1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains the code to implement file range locking in 30 * ZFS, although there isn't much specific to ZFS (all that comes to mind 31 * support for growing the blocksize). 32 * 33 * Interface 34 * --------- 35 * Defined in zfs_rlock.h but essentially: 36 * rl = zfs_range_lock(zp, off, len, lock_type); 37 * zfs_range_unlock(rl); 38 * zfs_range_reduce(rl, off, len); 39 * 40 * AVL tree 41 * -------- 42 * An AVL tree is used to maintain the state of the existing ranges 43 * that are locked for exclusive (writer) or shared (reader) use. 44 * The starting range offset is used for searching and sorting the tree. 45 * 46 * Common case 47 * ----------- 48 * The (hopefully) usual case is of no overlaps or contention for 49 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree 50 * searched that finds no overlap, and *this* rl_t is placed in the tree. 51 * 52 * Overlaps/Reference counting/Proxy locks 53 * --------------------------------------- 54 * The avl code only allows one node at a particular offset. Also it's very 55 * inefficient to search through all previous entries looking for overlaps 56 * (because the very 1st in the ordered list might be at offset 0 but 57 * cover the whole file). 58 * So this implementation uses reference counts and proxy range locks. 59 * Firstly, only reader locks use reference counts and proxy locks, 60 * because writer locks are exclusive. 61 * When a reader lock overlaps with another then a proxy lock is created 62 * for that range and replaces the original lock. If the overlap 63 * is exact then the reference count of the proxy is simply incremented. 64 * Otherwise, the proxy lock is split into smaller lock ranges and 65 * new proxy locks created for non overlapping ranges. 66 * The reference counts are adjusted accordingly. 67 * Meanwhile, the orginal lock is kept around (this is the callers handle) 68 * and its offset and length are used when releasing the lock. 69 * 70 * Thread coordination 71 * ------------------- 72 * In order to make wakeups efficient and to ensure multiple continuous 73 * readers on a range don't starve a writer for the same range lock, 74 * two condition variables are allocated in each rl_t. 75 * If a writer (or reader) can't get a range it initialises the writer 76 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; 77 * and waits on that cv. When a thread unlocks that range it wakes up all 78 * writers then all readers before destroying the lock. 79 * 80 * Append mode writes 81 * ------------------ 82 * Append mode writes need to lock a range at the end of a file. 83 * The offset of the end of the file is determined under the 84 * range locking mutex, and the lock type converted from RL_APPEND to 85 * RL_WRITER and the range locked. 86 * 87 * Grow block handling 88 * ------------------- 89 * ZFS supports multiple block sizes currently upto 128K. The smallest 90 * block size is used for the file which is grown as needed. During this 91 * growth all other writers and readers must be excluded. 92 * So if the block size needs to be grown then the whole file is 93 * exclusively locked, then later the caller will reduce the lock 94 * range to just the range to be written using zfs_reduce_range. 95 */ 96 97 #include <sys/zfs_rlock.h> 98 99 /* 100 * Check if a write lock can be grabbed, or wait and recheck until available. 101 */ 102 static void 103 zfs_range_lock_writer(znode_t *zp, rl_t *new) 104 { 105 avl_tree_t *tree = &zp->z_range_avl; 106 rl_t *rl; 107 avl_index_t where; 108 uint64_t end_size; 109 uint64_t off = new->r_off; 110 uint64_t len = new->r_len; 111 int max_blksz = zp->z_zfsvfs->z_max_blksz; 112 113 for (;;) { 114 /* 115 * If in append mode pick up the current end of file. 116 * This is done under z_range_lock to avoid races. 117 */ 118 if (new->r_type == RL_APPEND) 119 new->r_off = zp->z_phys->zp_size; 120 121 /* 122 * If we need to grow the block size then grab the whole 123 * file range. This is also done under z_range_lock to 124 * avoid races. 125 */ 126 end_size = MAX(zp->z_phys->zp_size, new->r_off + len); 127 if (end_size > zp->z_blksz && 128 (!ISP2(zp->z_blksz) || zp->z_blksz < max_blksz)) { 129 new->r_off = 0; 130 new->r_len = UINT64_MAX; 131 } 132 133 /* 134 * First check for the usual case of no locks 135 */ 136 if (avl_numnodes(tree) == 0) { 137 new->r_type = RL_WRITER; /* convert to writer */ 138 avl_add(tree, new); 139 return; 140 } 141 142 /* 143 * Look for any locks in the range. 144 */ 145 rl = avl_find(tree, new, &where); 146 if (rl) 147 goto wait; /* already locked at same offset */ 148 149 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 150 if (rl && (rl->r_off < new->r_off + new->r_len)) 151 goto wait; 152 153 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 154 if (rl && rl->r_off + rl->r_len > new->r_off) 155 goto wait; 156 157 new->r_type = RL_WRITER; /* convert possible RL_APPEND */ 158 avl_insert(tree, new, where); 159 return; 160 wait: 161 if (!rl->r_write_wanted) { 162 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); 163 rl->r_write_wanted = B_TRUE; 164 } 165 cv_wait(&rl->r_wr_cv, &zp->z_range_lock); 166 167 /* reset to original */ 168 new->r_off = off; 169 new->r_len = len; 170 } 171 } 172 173 /* 174 * If this is an original (non-proxy) lock then replace it by 175 * a proxy and return the proxy. 176 */ 177 static rl_t * 178 zfs_range_proxify(avl_tree_t *tree, rl_t *rl) 179 { 180 rl_t *proxy; 181 182 if (rl->r_proxy) 183 return (rl); /* already a proxy */ 184 185 ASSERT3U(rl->r_cnt, ==, 1); 186 ASSERT(rl->r_write_wanted == B_FALSE); 187 ASSERT(rl->r_read_wanted == B_FALSE); 188 avl_remove(tree, rl); 189 rl->r_cnt = 0; 190 191 /* create a proxy range lock */ 192 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); 193 proxy->r_off = rl->r_off; 194 proxy->r_len = rl->r_len; 195 proxy->r_cnt = 1; 196 proxy->r_type = RL_READER; 197 proxy->r_proxy = B_TRUE; 198 proxy->r_write_wanted = B_FALSE; 199 proxy->r_read_wanted = B_FALSE; 200 avl_add(tree, proxy); 201 202 return (proxy); 203 } 204 205 /* 206 * Split the range lock at the supplied offset 207 * returning the *front* proxy. 208 */ 209 static rl_t * 210 zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) 211 { 212 rl_t *front, *rear; 213 214 ASSERT3U(rl->r_len, >, 1); 215 ASSERT3U(off, >, rl->r_off); 216 ASSERT3U(off, <, rl->r_off + rl->r_len); 217 ASSERT(rl->r_write_wanted == B_FALSE); 218 ASSERT(rl->r_read_wanted == B_FALSE); 219 220 /* create the rear proxy range lock */ 221 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); 222 rear->r_off = off; 223 rear->r_len = rl->r_off + rl->r_len - off; 224 rear->r_cnt = rl->r_cnt; 225 rear->r_type = RL_READER; 226 rear->r_proxy = B_TRUE; 227 rear->r_write_wanted = B_FALSE; 228 rear->r_read_wanted = B_FALSE; 229 230 front = zfs_range_proxify(tree, rl); 231 front->r_len = off - rl->r_off; 232 233 avl_insert_here(tree, rear, front, AVL_AFTER); 234 return (front); 235 } 236 237 /* 238 * Create and add a new proxy range lock for the supplied range. 239 */ 240 static void 241 zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) 242 { 243 rl_t *rl; 244 245 ASSERT(len); 246 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); 247 rl->r_off = off; 248 rl->r_len = len; 249 rl->r_cnt = 1; 250 rl->r_type = RL_READER; 251 rl->r_proxy = B_TRUE; 252 rl->r_write_wanted = B_FALSE; 253 rl->r_read_wanted = B_FALSE; 254 avl_add(tree, rl); 255 } 256 257 static void 258 zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) 259 { 260 rl_t *next; 261 uint64_t off = new->r_off; 262 uint64_t len = new->r_len; 263 264 /* 265 * prev arrives either: 266 * - pointing to an entry at the same offset 267 * - pointing to the entry with the closest previous offset whose 268 * range may overlap with the new range 269 * - null, if there were no ranges starting before the new one 270 */ 271 if (prev) { 272 if (prev->r_off + prev->r_len <= off) { 273 prev = NULL; 274 } else if (prev->r_off != off) { 275 /* 276 * convert to proxy if needed then 277 * split this entry and bump ref count 278 */ 279 prev = zfs_range_split(tree, prev, off); 280 prev = AVL_NEXT(tree, prev); /* move to rear range */ 281 } 282 } 283 ASSERT((prev == NULL) || (prev->r_off == off)); 284 285 if (prev) 286 next = prev; 287 else 288 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 289 290 if (next == NULL || off + len <= next->r_off) { 291 /* no overlaps, use the original new rl_t in the tree */ 292 avl_insert(tree, new, where); 293 return; 294 } 295 296 if (off < next->r_off) { 297 /* Add a proxy for initial range before the overlap */ 298 zfs_range_new_proxy(tree, off, next->r_off - off); 299 } 300 301 new->r_cnt = 0; /* will use proxies in tree */ 302 /* 303 * We now search forward through the ranges, until we go past the end 304 * of the new range. For each entry we make it a proxy if it 305 * isn't already, then bump its reference count. If there's any 306 * gaps between the ranges then we create a new proxy range. 307 */ 308 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { 309 if (off + len <= next->r_off) 310 break; 311 if (prev && prev->r_off + prev->r_len < next->r_off) { 312 /* there's a gap */ 313 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); 314 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 315 next->r_off - (prev->r_off + prev->r_len)); 316 } 317 if (off + len == next->r_off + next->r_len) { 318 /* exact overlap with end */ 319 next = zfs_range_proxify(tree, next); 320 next->r_cnt++; 321 return; 322 } 323 if (off + len < next->r_off + next->r_len) { 324 /* new range ends in the middle of this block */ 325 next = zfs_range_split(tree, next, off + len); 326 next->r_cnt++; 327 return; 328 } 329 ASSERT3U(off + len, >, next->r_off + next->r_len); 330 next = zfs_range_proxify(tree, next); 331 next->r_cnt++; 332 } 333 334 /* Add the remaining end range. */ 335 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 336 (off + len) - (prev->r_off + prev->r_len)); 337 } 338 339 /* 340 * Check if a reader lock can be grabbed, or wait and recheck until available. 341 */ 342 static void 343 zfs_range_lock_reader(znode_t *zp, rl_t *new) 344 { 345 avl_tree_t *tree = &zp->z_range_avl; 346 rl_t *prev, *next; 347 avl_index_t where; 348 uint64_t off = new->r_off; 349 uint64_t len = new->r_len; 350 351 /* 352 * Look for any writer locks in the range. 353 */ 354 retry: 355 prev = avl_find(tree, new, &where); 356 if (prev == NULL) 357 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 358 359 /* 360 * Check the previous range for a writer lock overlap. 361 */ 362 if (prev && (off < prev->r_off + prev->r_len)) { 363 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { 364 if (!prev->r_read_wanted) { 365 cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); 366 prev->r_read_wanted = B_TRUE; 367 } 368 cv_wait(&prev->r_rd_cv, &zp->z_range_lock); 369 goto retry; 370 } 371 if (off + len < prev->r_off + prev->r_len) 372 goto got_lock; 373 } 374 375 /* 376 * Search through the following ranges to see if there's 377 * write lock any overlap. 378 */ 379 if (prev) 380 next = AVL_NEXT(tree, prev); 381 else 382 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 383 for (; next; next = AVL_NEXT(tree, next)) { 384 if (off + len <= next->r_off) 385 goto got_lock; 386 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { 387 if (!next->r_read_wanted) { 388 cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); 389 next->r_read_wanted = B_TRUE; 390 } 391 cv_wait(&next->r_rd_cv, &zp->z_range_lock); 392 goto retry; 393 } 394 if (off + len <= next->r_off + next->r_len) 395 goto got_lock; 396 } 397 398 got_lock: 399 /* 400 * Add the read lock, which may involve splitting existing 401 * locks and bumping ref counts (r_cnt). 402 */ 403 zfs_range_add_reader(tree, new, prev, where); 404 } 405 406 /* 407 * Lock a range (offset, length) as either shared (RL_READER) 408 * or exclusive (RL_WRITER). Returns the range lock structure 409 * for later unlocking or reduce range (if entire file 410 * previously locked as RL_WRITER). 411 */ 412 rl_t * 413 zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) 414 { 415 rl_t *new; 416 417 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); 418 419 new = kmem_alloc(sizeof (rl_t), KM_SLEEP); 420 new->r_zp = zp; 421 new->r_off = off; 422 new->r_len = len; 423 new->r_cnt = 1; /* assume it's going to be in the tree */ 424 new->r_type = type; 425 new->r_proxy = B_FALSE; 426 new->r_write_wanted = B_FALSE; 427 new->r_read_wanted = B_FALSE; 428 429 mutex_enter(&zp->z_range_lock); 430 if (type == RL_READER) { 431 /* 432 * First check for the usual case of no locks 433 */ 434 if (avl_numnodes(&zp->z_range_avl) == 0) 435 avl_add(&zp->z_range_avl, new); 436 else 437 zfs_range_lock_reader(zp, new); 438 } else 439 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ 440 mutex_exit(&zp->z_range_lock); 441 return (new); 442 } 443 444 /* 445 * Unlock a reader lock 446 */ 447 static void 448 zfs_range_unlock_reader(znode_t *zp, rl_t *remove) 449 { 450 avl_tree_t *tree = &zp->z_range_avl; 451 rl_t *rl, *next; 452 uint64_t len; 453 454 /* 455 * The common case is when the remove entry is in the tree 456 * (cnt == 1) meaning there's been no other reader locks overlapping 457 * with this one. Otherwise the remove entry will have been 458 * removed from the tree and replaced by proxies (one or 459 * more ranges mapping to the entire range). 460 */ 461 if (remove->r_cnt == 1) { 462 avl_remove(tree, remove); 463 if (remove->r_write_wanted) 464 cv_broadcast(&remove->r_wr_cv); 465 if (remove->r_read_wanted) 466 cv_broadcast(&remove->r_rd_cv); 467 } else { 468 ASSERT3U(remove->r_cnt, ==, 0); 469 ASSERT3U(remove->r_write_wanted, ==, 0); 470 ASSERT3U(remove->r_read_wanted, ==, 0); 471 /* 472 * Find start proxy representing this reader lock, 473 * then decrement ref count on all proxies 474 * that make up this range, freeing them as needed. 475 */ 476 rl = avl_find(tree, remove, NULL); 477 ASSERT(rl); 478 ASSERT(rl->r_cnt); 479 ASSERT(rl->r_type == RL_READER); 480 for (len = remove->r_len; len != 0; rl = next) { 481 len -= rl->r_len; 482 if (len) { 483 next = AVL_NEXT(tree, rl); 484 ASSERT(next); 485 ASSERT(rl->r_off + rl->r_len == next->r_off); 486 ASSERT(next->r_cnt); 487 ASSERT(next->r_type == RL_READER); 488 } 489 rl->r_cnt--; 490 if (rl->r_cnt == 0) { 491 avl_remove(tree, rl); 492 if (rl->r_write_wanted) 493 cv_broadcast(&rl->r_wr_cv); 494 if (rl->r_read_wanted) 495 cv_broadcast(&rl->r_rd_cv); 496 kmem_free(rl, sizeof (rl_t)); 497 } 498 } 499 } 500 kmem_free(remove, sizeof (rl_t)); 501 } 502 503 /* 504 * Unlock range and destroy range lock structure. 505 */ 506 void 507 zfs_range_unlock(rl_t *rl) 508 { 509 znode_t *zp = rl->r_zp; 510 511 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); 512 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); 513 ASSERT(!rl->r_proxy); 514 515 mutex_enter(&zp->z_range_lock); 516 if (rl->r_type == RL_WRITER) { 517 /* writer locks can't be shared or split */ 518 avl_remove(&zp->z_range_avl, rl); 519 mutex_exit(&zp->z_range_lock); 520 if (rl->r_write_wanted) 521 cv_broadcast(&rl->r_wr_cv); 522 if (rl->r_read_wanted) 523 cv_broadcast(&rl->r_rd_cv); 524 kmem_free(rl, sizeof (rl_t)); 525 } else { 526 /* 527 * lock may be shared, let zfs_range_unlock_reader() 528 * release the lock and free the rl_t 529 */ 530 zfs_range_unlock_reader(zp, rl); 531 mutex_exit(&zp->z_range_lock); 532 } 533 } 534 535 /* 536 * Reduce range locked as RL_WRITER from whole file to specified range. 537 * Asserts the whole file is exclusivly locked and so there's only one 538 * entry in the tree. 539 */ 540 void 541 zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) 542 { 543 znode_t *zp = rl->r_zp; 544 545 /* Ensure there are no other locks */ 546 ASSERT(avl_numnodes(&zp->z_range_avl) == 1); 547 ASSERT(rl->r_off == 0); 548 ASSERT(rl->r_type == RL_WRITER); 549 ASSERT(!rl->r_proxy); 550 ASSERT3U(rl->r_len, ==, UINT64_MAX); 551 ASSERT3U(rl->r_cnt, ==, 1); 552 553 mutex_enter(&zp->z_range_lock); 554 rl->r_off = off; 555 rl->r_len = len; 556 mutex_exit(&zp->z_range_lock); 557 if (rl->r_write_wanted) 558 cv_broadcast(&rl->r_wr_cv); 559 if (rl->r_read_wanted) 560 cv_broadcast(&rl->r_rd_cv); 561 } 562 563 /* 564 * AVL comparison function used to order range locks 565 * Locks are ordered on the start offset of the range. 566 */ 567 int 568 zfs_range_compare(const void *arg1, const void *arg2) 569 { 570 const rl_t *rl1 = arg1; 571 const rl_t *rl2 = arg2; 572 573 if (rl1->r_off > rl2->r_off) 574 return (1); 575 if (rl1->r_off < rl2->r_off) 576 return (-1); 577 return (0); 578 } 579