1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2012 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/zio.h> 33 #include <sys/space_map.h> 34 35 static kmem_cache_t *space_seg_cache; 36 37 void 38 space_map_init(void) 39 { 40 ASSERT(space_seg_cache == NULL); 41 space_seg_cache = kmem_cache_create("space_seg_cache", 42 sizeof (space_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 43 } 44 45 void 46 space_map_fini(void) 47 { 48 kmem_cache_destroy(space_seg_cache); 49 space_seg_cache = NULL; 50 } 51 52 /* 53 * Space map routines. 54 * NOTE: caller is responsible for all locking. 55 */ 56 static int 57 space_map_seg_compare(const void *x1, const void *x2) 58 { 59 const space_seg_t *s1 = x1; 60 const space_seg_t *s2 = x2; 61 62 if (s1->ss_start < s2->ss_start) { 63 if (s1->ss_end > s2->ss_start) 64 return (0); 65 return (-1); 66 } 67 if (s1->ss_start > s2->ss_start) { 68 if (s1->ss_start < s2->ss_end) 69 return (0); 70 return (1); 71 } 72 return (0); 73 } 74 75 void 76 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift, 77 kmutex_t *lp) 78 { 79 bzero(sm, sizeof (*sm)); 80 81 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL); 82 83 avl_create(&sm->sm_root, space_map_seg_compare, 84 sizeof (space_seg_t), offsetof(struct space_seg, ss_node)); 85 86 sm->sm_start = start; 87 sm->sm_size = size; 88 sm->sm_shift = shift; 89 sm->sm_lock = lp; 90 } 91 92 void 93 space_map_destroy(space_map_t *sm) 94 { 95 ASSERT(!sm->sm_loaded && !sm->sm_loading); 96 VERIFY0(sm->sm_space); 97 avl_destroy(&sm->sm_root); 98 cv_destroy(&sm->sm_load_cv); 99 } 100 101 void 102 space_map_add(space_map_t *sm, uint64_t start, uint64_t size) 103 { 104 avl_index_t where; 105 space_seg_t *ss_before, *ss_after, *ss; 106 uint64_t end = start + size; 107 int merge_before, merge_after; 108 109 ASSERT(MUTEX_HELD(sm->sm_lock)); 110 VERIFY(!sm->sm_condensing); 111 VERIFY(size != 0); 112 VERIFY3U(start, >=, sm->sm_start); 113 VERIFY3U(end, <=, sm->sm_start + sm->sm_size); 114 VERIFY(sm->sm_space + size <= sm->sm_size); 115 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); 116 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); 117 118 ss = space_map_find(sm, start, size, &where); 119 if (ss != NULL) { 120 zfs_panic_recover("zfs: allocating allocated segment" 121 "(offset=%llu size=%llu)\n", 122 (longlong_t)start, (longlong_t)size); 123 return; 124 } 125 126 /* Make sure we don't overlap with either of our neighbors */ 127 VERIFY(ss == NULL); 128 129 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE); 130 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER); 131 132 merge_before = (ss_before != NULL && ss_before->ss_end == start); 133 merge_after = (ss_after != NULL && ss_after->ss_start == end); 134 135 if (merge_before && merge_after) { 136 avl_remove(&sm->sm_root, ss_before); 137 if (sm->sm_pp_root) { 138 avl_remove(sm->sm_pp_root, ss_before); 139 avl_remove(sm->sm_pp_root, ss_after); 140 } 141 ss_after->ss_start = ss_before->ss_start; 142 kmem_cache_free(space_seg_cache, ss_before); 143 ss = ss_after; 144 } else if (merge_before) { 145 ss_before->ss_end = end; 146 if (sm->sm_pp_root) 147 avl_remove(sm->sm_pp_root, ss_before); 148 ss = ss_before; 149 } else if (merge_after) { 150 ss_after->ss_start = start; 151 if (sm->sm_pp_root) 152 avl_remove(sm->sm_pp_root, ss_after); 153 ss = ss_after; 154 } else { 155 ss = kmem_cache_alloc(space_seg_cache, KM_SLEEP); 156 ss->ss_start = start; 157 ss->ss_end = end; 158 avl_insert(&sm->sm_root, ss, where); 159 } 160 161 if (sm->sm_pp_root) 162 avl_add(sm->sm_pp_root, ss); 163 164 sm->sm_space += size; 165 } 166 167 void 168 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size) 169 { 170 avl_index_t where; 171 space_seg_t *ss, *newseg; 172 uint64_t end = start + size; 173 int left_over, right_over; 174 175 VERIFY(!sm->sm_condensing); 176 ss = space_map_find(sm, start, size, &where); 177 178 /* Make sure we completely overlap with someone */ 179 if (ss == NULL) { 180 zfs_panic_recover("zfs: freeing free segment " 181 "(offset=%llu size=%llu)", 182 (longlong_t)start, (longlong_t)size); 183 return; 184 } 185 VERIFY3U(ss->ss_start, <=, start); 186 VERIFY3U(ss->ss_end, >=, end); 187 VERIFY(sm->sm_space - size <= sm->sm_size); 188 189 left_over = (ss->ss_start != start); 190 right_over = (ss->ss_end != end); 191 192 if (sm->sm_pp_root) 193 avl_remove(sm->sm_pp_root, ss); 194 195 if (left_over && right_over) { 196 newseg = kmem_cache_alloc(space_seg_cache, KM_SLEEP); 197 newseg->ss_start = end; 198 newseg->ss_end = ss->ss_end; 199 ss->ss_end = start; 200 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER); 201 if (sm->sm_pp_root) 202 avl_add(sm->sm_pp_root, newseg); 203 } else if (left_over) { 204 ss->ss_end = start; 205 } else if (right_over) { 206 ss->ss_start = end; 207 } else { 208 avl_remove(&sm->sm_root, ss); 209 kmem_cache_free(space_seg_cache, ss); 210 ss = NULL; 211 } 212 213 if (sm->sm_pp_root && ss != NULL) 214 avl_add(sm->sm_pp_root, ss); 215 216 sm->sm_space -= size; 217 } 218 219 space_seg_t * 220 space_map_find(space_map_t *sm, uint64_t start, uint64_t size, 221 avl_index_t *wherep) 222 { 223 space_seg_t ssearch, *ss; 224 225 ASSERT(MUTEX_HELD(sm->sm_lock)); 226 VERIFY(size != 0); 227 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); 228 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); 229 230 ssearch.ss_start = start; 231 ssearch.ss_end = start + size; 232 ss = avl_find(&sm->sm_root, &ssearch, wherep); 233 234 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= start + size) 235 return (ss); 236 return (NULL); 237 } 238 239 boolean_t 240 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size) 241 { 242 avl_index_t where; 243 244 return (space_map_find(sm, start, size, &where) != 0); 245 } 246 247 void 248 space_map_swap(space_map_t **msrc, space_map_t **mdst) 249 { 250 space_map_t *sm; 251 252 ASSERT(MUTEX_HELD((*msrc)->sm_lock)); 253 ASSERT0((*mdst)->sm_space); 254 ASSERT0(avl_numnodes(&(*mdst)->sm_root)); 255 256 sm = *msrc; 257 *msrc = *mdst; 258 *mdst = sm; 259 } 260 261 void 262 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) 263 { 264 space_seg_t *ss; 265 void *cookie = NULL; 266 267 ASSERT(MUTEX_HELD(sm->sm_lock)); 268 269 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) { 270 if (func != NULL) 271 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); 272 kmem_cache_free(space_seg_cache, ss); 273 } 274 sm->sm_space = 0; 275 } 276 277 void 278 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) 279 { 280 space_seg_t *ss; 281 282 ASSERT(MUTEX_HELD(sm->sm_lock)); 283 284 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 285 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); 286 } 287 288 /* 289 * Wait for any in-progress space_map_load() to complete. 290 */ 291 void 292 space_map_load_wait(space_map_t *sm) 293 { 294 ASSERT(MUTEX_HELD(sm->sm_lock)); 295 296 while (sm->sm_loading) { 297 ASSERT(!sm->sm_loaded); 298 cv_wait(&sm->sm_load_cv, sm->sm_lock); 299 } 300 } 301 302 /* 303 * Note: space_map_load() will drop sm_lock across dmu_read() calls. 304 * The caller must be OK with this. 305 */ 306 int 307 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype, 308 space_map_obj_t *smo, objset_t *os) 309 { 310 uint64_t *entry, *entry_map, *entry_map_end; 311 uint64_t bufsize, size, offset, end, space; 312 uint64_t mapstart = sm->sm_start; 313 int error = 0; 314 315 ASSERT(MUTEX_HELD(sm->sm_lock)); 316 ASSERT(!sm->sm_loaded); 317 ASSERT(!sm->sm_loading); 318 319 sm->sm_loading = B_TRUE; 320 end = smo->smo_objsize; 321 space = smo->smo_alloc; 322 323 ASSERT(sm->sm_ops == NULL); 324 VERIFY0(sm->sm_space); 325 326 if (maptype == SM_FREE) { 327 space_map_add(sm, sm->sm_start, sm->sm_size); 328 space = sm->sm_size - space; 329 } 330 331 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT; 332 entry_map = zio_buf_alloc(bufsize); 333 334 mutex_exit(sm->sm_lock); 335 if (end > bufsize) 336 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize); 337 mutex_enter(sm->sm_lock); 338 339 for (offset = 0; offset < end; offset += bufsize) { 340 size = MIN(end - offset, bufsize); 341 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 342 VERIFY(size != 0); 343 344 dprintf("object=%llu offset=%llx size=%llx\n", 345 smo->smo_object, offset, size); 346 347 mutex_exit(sm->sm_lock); 348 error = dmu_read(os, smo->smo_object, offset, size, entry_map, 349 DMU_READ_PREFETCH); 350 mutex_enter(sm->sm_lock); 351 if (error != 0) 352 break; 353 354 entry_map_end = entry_map + (size / sizeof (uint64_t)); 355 for (entry = entry_map; entry < entry_map_end; entry++) { 356 uint64_t e = *entry; 357 358 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 359 continue; 360 361 (SM_TYPE_DECODE(e) == maptype ? 362 space_map_add : space_map_remove)(sm, 363 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart, 364 SM_RUN_DECODE(e) << sm->sm_shift); 365 } 366 } 367 368 if (error == 0) { 369 VERIFY3U(sm->sm_space, ==, space); 370 371 sm->sm_loaded = B_TRUE; 372 sm->sm_ops = ops; 373 if (ops != NULL) 374 ops->smop_load(sm); 375 } else { 376 space_map_vacate(sm, NULL, NULL); 377 } 378 379 zio_buf_free(entry_map, bufsize); 380 381 sm->sm_loading = B_FALSE; 382 383 cv_broadcast(&sm->sm_load_cv); 384 385 return (error); 386 } 387 388 void 389 space_map_unload(space_map_t *sm) 390 { 391 ASSERT(MUTEX_HELD(sm->sm_lock)); 392 393 if (sm->sm_loaded && sm->sm_ops != NULL) 394 sm->sm_ops->smop_unload(sm); 395 396 sm->sm_loaded = B_FALSE; 397 sm->sm_ops = NULL; 398 399 space_map_vacate(sm, NULL, NULL); 400 } 401 402 uint64_t 403 space_map_maxsize(space_map_t *sm) 404 { 405 ASSERT(sm->sm_ops != NULL); 406 return (sm->sm_ops->smop_max(sm)); 407 } 408 409 uint64_t 410 space_map_alloc(space_map_t *sm, uint64_t size) 411 { 412 uint64_t start; 413 414 start = sm->sm_ops->smop_alloc(sm, size); 415 if (start != -1ULL) 416 space_map_remove(sm, start, size); 417 return (start); 418 } 419 420 void 421 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size) 422 { 423 sm->sm_ops->smop_claim(sm, start, size); 424 space_map_remove(sm, start, size); 425 } 426 427 void 428 space_map_free(space_map_t *sm, uint64_t start, uint64_t size) 429 { 430 space_map_add(sm, start, size); 431 sm->sm_ops->smop_free(sm, start, size); 432 } 433 434 /* 435 * Note: space_map_sync() will drop sm_lock across dmu_write() calls. 436 */ 437 void 438 space_map_sync(space_map_t *sm, uint8_t maptype, 439 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) 440 { 441 spa_t *spa = dmu_objset_spa(os); 442 avl_tree_t *t = &sm->sm_root; 443 space_seg_t *ss; 444 uint64_t bufsize, start, size, run_len, total, sm_space, nodes; 445 uint64_t *entry, *entry_map, *entry_map_end; 446 447 ASSERT(MUTEX_HELD(sm->sm_lock)); 448 449 if (sm->sm_space == 0) 450 return; 451 452 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n", 453 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa), 454 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root), 455 sm->sm_space); 456 457 if (maptype == SM_ALLOC) 458 smo->smo_alloc += sm->sm_space; 459 else 460 smo->smo_alloc -= sm->sm_space; 461 462 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t); 463 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT); 464 entry_map = zio_buf_alloc(bufsize); 465 entry_map_end = entry_map + (bufsize / sizeof (uint64_t)); 466 entry = entry_map; 467 468 *entry++ = SM_DEBUG_ENCODE(1) | 469 SM_DEBUG_ACTION_ENCODE(maptype) | 470 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 471 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 472 473 total = 0; 474 nodes = avl_numnodes(&sm->sm_root); 475 sm_space = sm->sm_space; 476 for (ss = avl_first(t); ss != NULL; ss = AVL_NEXT(t, ss)) { 477 size = ss->ss_end - ss->ss_start; 478 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift; 479 480 total += size; 481 size >>= sm->sm_shift; 482 483 while (size) { 484 run_len = MIN(size, SM_RUN_MAX); 485 486 if (entry == entry_map_end) { 487 mutex_exit(sm->sm_lock); 488 dmu_write(os, smo->smo_object, smo->smo_objsize, 489 bufsize, entry_map, tx); 490 mutex_enter(sm->sm_lock); 491 smo->smo_objsize += bufsize; 492 entry = entry_map; 493 } 494 495 *entry++ = SM_OFFSET_ENCODE(start) | 496 SM_TYPE_ENCODE(maptype) | 497 SM_RUN_ENCODE(run_len); 498 499 start += run_len; 500 size -= run_len; 501 } 502 } 503 504 if (entry != entry_map) { 505 size = (entry - entry_map) * sizeof (uint64_t); 506 mutex_exit(sm->sm_lock); 507 dmu_write(os, smo->smo_object, smo->smo_objsize, 508 size, entry_map, tx); 509 mutex_enter(sm->sm_lock); 510 smo->smo_objsize += size; 511 } 512 513 /* 514 * Ensure that the space_map's accounting wasn't changed 515 * while we were in the middle of writing it out. 516 */ 517 VERIFY3U(nodes, ==, avl_numnodes(&sm->sm_root)); 518 VERIFY3U(sm->sm_space, ==, sm_space); 519 VERIFY3U(sm->sm_space, ==, total); 520 521 zio_buf_free(entry_map, bufsize); 522 } 523 524 void 525 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) 526 { 527 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0); 528 529 smo->smo_objsize = 0; 530 smo->smo_alloc = 0; 531 } 532 533 /* 534 * Space map reference trees. 535 * 536 * A space map is a collection of integers. Every integer is either 537 * in the map, or it's not. A space map reference tree generalizes 538 * the idea: it allows its members to have arbitrary reference counts, 539 * as opposed to the implicit reference count of 0 or 1 in a space map. 540 * This representation comes in handy when computing the union or 541 * intersection of multiple space maps. For example, the union of 542 * N space maps is the subset of the reference tree with refcnt >= 1. 543 * The intersection of N space maps is the subset with refcnt >= N. 544 * 545 * [It's very much like a Fourier transform. Unions and intersections 546 * are hard to perform in the 'space map domain', so we convert the maps 547 * into the 'reference count domain', where it's trivial, then invert.] 548 * 549 * vdev_dtl_reassess() uses computations of this form to determine 550 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev 551 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev 552 * has an outage wherever refcnt >= vdev_children. 553 */ 554 static int 555 space_map_ref_compare(const void *x1, const void *x2) 556 { 557 const space_ref_t *sr1 = x1; 558 const space_ref_t *sr2 = x2; 559 560 if (sr1->sr_offset < sr2->sr_offset) 561 return (-1); 562 if (sr1->sr_offset > sr2->sr_offset) 563 return (1); 564 565 if (sr1 < sr2) 566 return (-1); 567 if (sr1 > sr2) 568 return (1); 569 570 return (0); 571 } 572 573 void 574 space_map_ref_create(avl_tree_t *t) 575 { 576 avl_create(t, space_map_ref_compare, 577 sizeof (space_ref_t), offsetof(space_ref_t, sr_node)); 578 } 579 580 void 581 space_map_ref_destroy(avl_tree_t *t) 582 { 583 space_ref_t *sr; 584 void *cookie = NULL; 585 586 while ((sr = avl_destroy_nodes(t, &cookie)) != NULL) 587 kmem_free(sr, sizeof (*sr)); 588 589 avl_destroy(t); 590 } 591 592 static void 593 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt) 594 { 595 space_ref_t *sr; 596 597 sr = kmem_alloc(sizeof (*sr), KM_SLEEP); 598 sr->sr_offset = offset; 599 sr->sr_refcnt = refcnt; 600 601 avl_add(t, sr); 602 } 603 604 void 605 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end, 606 int64_t refcnt) 607 { 608 space_map_ref_add_node(t, start, refcnt); 609 space_map_ref_add_node(t, end, -refcnt); 610 } 611 612 /* 613 * Convert (or add) a space map into a reference tree. 614 */ 615 void 616 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt) 617 { 618 space_seg_t *ss; 619 620 ASSERT(MUTEX_HELD(sm->sm_lock)); 621 622 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 623 space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt); 624 } 625 626 /* 627 * Convert a reference tree into a space map. The space map will contain 628 * all members of the reference tree for which refcnt >= minref. 629 */ 630 void 631 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref) 632 { 633 uint64_t start = -1ULL; 634 int64_t refcnt = 0; 635 space_ref_t *sr; 636 637 ASSERT(MUTEX_HELD(sm->sm_lock)); 638 639 space_map_vacate(sm, NULL, NULL); 640 641 for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) { 642 refcnt += sr->sr_refcnt; 643 if (refcnt >= minref) { 644 if (start == -1ULL) { 645 start = sr->sr_offset; 646 } 647 } else { 648 if (start != -1ULL) { 649 uint64_t end = sr->sr_offset; 650 ASSERT(start <= end); 651 if (end > start) 652 space_map_add(sm, start, end - start); 653 start = -1ULL; 654 } 655 } 656 } 657 ASSERT(refcnt == 0); 658 ASSERT(start == -1ULL); 659 } 660