1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2012 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/zio.h> 33 #include <sys/space_map.h> 34 35 static kmem_cache_t *space_seg_cache; 36 37 void 38 space_map_init(void) 39 { 40 ASSERT(space_seg_cache == NULL); 41 space_seg_cache = kmem_cache_create("space_seg_cache", 42 sizeof (space_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 43 } 44 45 void 46 space_map_fini(void) 47 { 48 kmem_cache_destroy(space_seg_cache); 49 space_seg_cache = NULL; 50 } 51 52 /* 53 * Space map routines. 54 * NOTE: caller is responsible for all locking. 55 */ 56 static int 57 space_map_seg_compare(const void *x1, const void *x2) 58 { 59 const space_seg_t *s1 = x1; 60 const space_seg_t *s2 = x2; 61 62 if (s1->ss_start < s2->ss_start) { 63 if (s1->ss_end > s2->ss_start) 64 return (0); 65 return (-1); 66 } 67 if (s1->ss_start > s2->ss_start) { 68 if (s1->ss_start < s2->ss_end) 69 return (0); 70 return (1); 71 } 72 return (0); 73 } 74 75 void 76 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift, 77 kmutex_t *lp) 78 { 79 bzero(sm, sizeof (*sm)); 80 81 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL); 82 83 avl_create(&sm->sm_root, space_map_seg_compare, 84 sizeof (space_seg_t), offsetof(struct space_seg, ss_node)); 85 86 sm->sm_start = start; 87 sm->sm_size = size; 88 sm->sm_shift = shift; 89 sm->sm_lock = lp; 90 } 91 92 void 93 space_map_destroy(space_map_t *sm) 94 { 95 ASSERT(!sm->sm_loaded && !sm->sm_loading); 96 VERIFY0(sm->sm_space); 97 avl_destroy(&sm->sm_root); 98 cv_destroy(&sm->sm_load_cv); 99 } 100 101 void 102 space_map_add(space_map_t *sm, uint64_t start, uint64_t size) 103 { 104 avl_index_t where; 105 space_seg_t ssearch, *ss_before, *ss_after, *ss; 106 uint64_t end = start + size; 107 int merge_before, merge_after; 108 109 ASSERT(MUTEX_HELD(sm->sm_lock)); 110 VERIFY(size != 0); 111 VERIFY3U(start, >=, sm->sm_start); 112 VERIFY3U(end, <=, sm->sm_start + sm->sm_size); 113 VERIFY(sm->sm_space + size <= sm->sm_size); 114 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); 115 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); 116 117 ssearch.ss_start = start; 118 ssearch.ss_end = end; 119 ss = avl_find(&sm->sm_root, &ssearch, &where); 120 121 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) { 122 zfs_panic_recover("zfs: allocating allocated segment" 123 "(offset=%llu size=%llu)\n", 124 (longlong_t)start, (longlong_t)size); 125 return; 126 } 127 128 /* Make sure we don't overlap with either of our neighbors */ 129 VERIFY(ss == NULL); 130 131 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE); 132 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER); 133 134 merge_before = (ss_before != NULL && ss_before->ss_end == start); 135 merge_after = (ss_after != NULL && ss_after->ss_start == end); 136 137 if (merge_before && merge_after) { 138 avl_remove(&sm->sm_root, ss_before); 139 if (sm->sm_pp_root) { 140 avl_remove(sm->sm_pp_root, ss_before); 141 avl_remove(sm->sm_pp_root, ss_after); 142 } 143 ss_after->ss_start = ss_before->ss_start; 144 kmem_cache_free(space_seg_cache, ss_before); 145 ss = ss_after; 146 } else if (merge_before) { 147 ss_before->ss_end = end; 148 if (sm->sm_pp_root) 149 avl_remove(sm->sm_pp_root, ss_before); 150 ss = ss_before; 151 } else if (merge_after) { 152 ss_after->ss_start = start; 153 if (sm->sm_pp_root) 154 avl_remove(sm->sm_pp_root, ss_after); 155 ss = ss_after; 156 } else { 157 ss = kmem_cache_alloc(space_seg_cache, KM_SLEEP); 158 ss->ss_start = start; 159 ss->ss_end = end; 160 avl_insert(&sm->sm_root, ss, where); 161 } 162 163 if (sm->sm_pp_root) 164 avl_add(sm->sm_pp_root, ss); 165 166 sm->sm_space += size; 167 } 168 169 void 170 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size) 171 { 172 avl_index_t where; 173 space_seg_t ssearch, *ss, *newseg; 174 uint64_t end = start + size; 175 int left_over, right_over; 176 177 ASSERT(MUTEX_HELD(sm->sm_lock)); 178 VERIFY(size != 0); 179 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); 180 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); 181 182 ssearch.ss_start = start; 183 ssearch.ss_end = end; 184 ss = avl_find(&sm->sm_root, &ssearch, &where); 185 186 /* Make sure we completely overlap with someone */ 187 if (ss == NULL) { 188 zfs_panic_recover("zfs: freeing free segment " 189 "(offset=%llu size=%llu)", 190 (longlong_t)start, (longlong_t)size); 191 return; 192 } 193 VERIFY3U(ss->ss_start, <=, start); 194 VERIFY3U(ss->ss_end, >=, end); 195 VERIFY(sm->sm_space - size <= sm->sm_size); 196 197 left_over = (ss->ss_start != start); 198 right_over = (ss->ss_end != end); 199 200 if (sm->sm_pp_root) 201 avl_remove(sm->sm_pp_root, ss); 202 203 if (left_over && right_over) { 204 newseg = kmem_cache_alloc(space_seg_cache, KM_SLEEP); 205 newseg->ss_start = end; 206 newseg->ss_end = ss->ss_end; 207 ss->ss_end = start; 208 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER); 209 if (sm->sm_pp_root) 210 avl_add(sm->sm_pp_root, newseg); 211 } else if (left_over) { 212 ss->ss_end = start; 213 } else if (right_over) { 214 ss->ss_start = end; 215 } else { 216 avl_remove(&sm->sm_root, ss); 217 kmem_cache_free(space_seg_cache, ss); 218 ss = NULL; 219 } 220 221 if (sm->sm_pp_root && ss != NULL) 222 avl_add(sm->sm_pp_root, ss); 223 224 sm->sm_space -= size; 225 } 226 227 boolean_t 228 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size) 229 { 230 avl_index_t where; 231 space_seg_t ssearch, *ss; 232 uint64_t end = start + size; 233 234 ASSERT(MUTEX_HELD(sm->sm_lock)); 235 VERIFY(size != 0); 236 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); 237 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); 238 239 ssearch.ss_start = start; 240 ssearch.ss_end = end; 241 ss = avl_find(&sm->sm_root, &ssearch, &where); 242 243 return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end); 244 } 245 246 void 247 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) 248 { 249 space_seg_t *ss; 250 void *cookie = NULL; 251 252 ASSERT(MUTEX_HELD(sm->sm_lock)); 253 254 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) { 255 if (func != NULL) 256 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); 257 kmem_cache_free(space_seg_cache, ss); 258 } 259 sm->sm_space = 0; 260 } 261 262 void 263 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) 264 { 265 space_seg_t *ss; 266 267 ASSERT(MUTEX_HELD(sm->sm_lock)); 268 269 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 270 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); 271 } 272 273 /* 274 * Wait for any in-progress space_map_load() to complete. 275 */ 276 void 277 space_map_load_wait(space_map_t *sm) 278 { 279 ASSERT(MUTEX_HELD(sm->sm_lock)); 280 281 while (sm->sm_loading) { 282 ASSERT(!sm->sm_loaded); 283 cv_wait(&sm->sm_load_cv, sm->sm_lock); 284 } 285 } 286 287 /* 288 * Note: space_map_load() will drop sm_lock across dmu_read() calls. 289 * The caller must be OK with this. 290 */ 291 int 292 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype, 293 space_map_obj_t *smo, objset_t *os) 294 { 295 uint64_t *entry, *entry_map, *entry_map_end; 296 uint64_t bufsize, size, offset, end, space; 297 uint64_t mapstart = sm->sm_start; 298 int error = 0; 299 300 ASSERT(MUTEX_HELD(sm->sm_lock)); 301 ASSERT(!sm->sm_loaded); 302 ASSERT(!sm->sm_loading); 303 304 sm->sm_loading = B_TRUE; 305 end = smo->smo_objsize; 306 space = smo->smo_alloc; 307 308 ASSERT(sm->sm_ops == NULL); 309 VERIFY0(sm->sm_space); 310 311 if (maptype == SM_FREE) { 312 space_map_add(sm, sm->sm_start, sm->sm_size); 313 space = sm->sm_size - space; 314 } 315 316 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT; 317 entry_map = zio_buf_alloc(bufsize); 318 319 mutex_exit(sm->sm_lock); 320 if (end > bufsize) 321 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize); 322 mutex_enter(sm->sm_lock); 323 324 for (offset = 0; offset < end; offset += bufsize) { 325 size = MIN(end - offset, bufsize); 326 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 327 VERIFY(size != 0); 328 329 dprintf("object=%llu offset=%llx size=%llx\n", 330 smo->smo_object, offset, size); 331 332 mutex_exit(sm->sm_lock); 333 error = dmu_read(os, smo->smo_object, offset, size, entry_map, 334 DMU_READ_PREFETCH); 335 mutex_enter(sm->sm_lock); 336 if (error != 0) 337 break; 338 339 entry_map_end = entry_map + (size / sizeof (uint64_t)); 340 for (entry = entry_map; entry < entry_map_end; entry++) { 341 uint64_t e = *entry; 342 343 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 344 continue; 345 346 (SM_TYPE_DECODE(e) == maptype ? 347 space_map_add : space_map_remove)(sm, 348 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart, 349 SM_RUN_DECODE(e) << sm->sm_shift); 350 } 351 } 352 353 if (error == 0) { 354 VERIFY3U(sm->sm_space, ==, space); 355 356 sm->sm_loaded = B_TRUE; 357 sm->sm_ops = ops; 358 if (ops != NULL) 359 ops->smop_load(sm); 360 } else { 361 space_map_vacate(sm, NULL, NULL); 362 } 363 364 zio_buf_free(entry_map, bufsize); 365 366 sm->sm_loading = B_FALSE; 367 368 cv_broadcast(&sm->sm_load_cv); 369 370 return (error); 371 } 372 373 void 374 space_map_unload(space_map_t *sm) 375 { 376 ASSERT(MUTEX_HELD(sm->sm_lock)); 377 378 if (sm->sm_loaded && sm->sm_ops != NULL) 379 sm->sm_ops->smop_unload(sm); 380 381 sm->sm_loaded = B_FALSE; 382 sm->sm_ops = NULL; 383 384 space_map_vacate(sm, NULL, NULL); 385 } 386 387 uint64_t 388 space_map_maxsize(space_map_t *sm) 389 { 390 ASSERT(sm->sm_ops != NULL); 391 return (sm->sm_ops->smop_max(sm)); 392 } 393 394 uint64_t 395 space_map_alloc(space_map_t *sm, uint64_t size) 396 { 397 uint64_t start; 398 399 start = sm->sm_ops->smop_alloc(sm, size); 400 if (start != -1ULL) 401 space_map_remove(sm, start, size); 402 return (start); 403 } 404 405 void 406 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size) 407 { 408 sm->sm_ops->smop_claim(sm, start, size); 409 space_map_remove(sm, start, size); 410 } 411 412 void 413 space_map_free(space_map_t *sm, uint64_t start, uint64_t size) 414 { 415 space_map_add(sm, start, size); 416 sm->sm_ops->smop_free(sm, start, size); 417 } 418 419 /* 420 * Note: space_map_sync() will drop sm_lock across dmu_write() calls. 421 */ 422 void 423 space_map_sync(space_map_t *sm, uint8_t maptype, 424 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) 425 { 426 spa_t *spa = dmu_objset_spa(os); 427 void *cookie = NULL; 428 space_seg_t *ss; 429 uint64_t bufsize, start, size, run_len, delta, sm_space; 430 uint64_t *entry, *entry_map, *entry_map_end; 431 432 ASSERT(MUTEX_HELD(sm->sm_lock)); 433 434 if (sm->sm_space == 0) 435 return; 436 437 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n", 438 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa), 439 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root), 440 sm->sm_space); 441 442 if (maptype == SM_ALLOC) 443 smo->smo_alloc += sm->sm_space; 444 else 445 smo->smo_alloc -= sm->sm_space; 446 447 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t); 448 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT); 449 entry_map = zio_buf_alloc(bufsize); 450 entry_map_end = entry_map + (bufsize / sizeof (uint64_t)); 451 entry = entry_map; 452 453 *entry++ = SM_DEBUG_ENCODE(1) | 454 SM_DEBUG_ACTION_ENCODE(maptype) | 455 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 456 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 457 458 delta = 0; 459 sm_space = sm->sm_space; 460 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) { 461 size = ss->ss_end - ss->ss_start; 462 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift; 463 464 delta += size; 465 size >>= sm->sm_shift; 466 467 while (size) { 468 run_len = MIN(size, SM_RUN_MAX); 469 470 if (entry == entry_map_end) { 471 mutex_exit(sm->sm_lock); 472 dmu_write(os, smo->smo_object, smo->smo_objsize, 473 bufsize, entry_map, tx); 474 mutex_enter(sm->sm_lock); 475 smo->smo_objsize += bufsize; 476 entry = entry_map; 477 } 478 479 *entry++ = SM_OFFSET_ENCODE(start) | 480 SM_TYPE_ENCODE(maptype) | 481 SM_RUN_ENCODE(run_len); 482 483 start += run_len; 484 size -= run_len; 485 } 486 kmem_cache_free(space_seg_cache, ss); 487 } 488 489 if (entry != entry_map) { 490 size = (entry - entry_map) * sizeof (uint64_t); 491 mutex_exit(sm->sm_lock); 492 dmu_write(os, smo->smo_object, smo->smo_objsize, 493 size, entry_map, tx); 494 mutex_enter(sm->sm_lock); 495 smo->smo_objsize += size; 496 } 497 498 /* 499 * Ensure that the space_map's accounting wasn't changed 500 * while we were in the middle of writing it out. 501 */ 502 VERIFY3U(sm->sm_space, ==, sm_space); 503 504 zio_buf_free(entry_map, bufsize); 505 506 sm->sm_space -= delta; 507 VERIFY0(sm->sm_space); 508 } 509 510 void 511 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) 512 { 513 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0); 514 515 smo->smo_objsize = 0; 516 smo->smo_alloc = 0; 517 } 518 519 /* 520 * Space map reference trees. 521 * 522 * A space map is a collection of integers. Every integer is either 523 * in the map, or it's not. A space map reference tree generalizes 524 * the idea: it allows its members to have arbitrary reference counts, 525 * as opposed to the implicit reference count of 0 or 1 in a space map. 526 * This representation comes in handy when computing the union or 527 * intersection of multiple space maps. For example, the union of 528 * N space maps is the subset of the reference tree with refcnt >= 1. 529 * The intersection of N space maps is the subset with refcnt >= N. 530 * 531 * [It's very much like a Fourier transform. Unions and intersections 532 * are hard to perform in the 'space map domain', so we convert the maps 533 * into the 'reference count domain', where it's trivial, then invert.] 534 * 535 * vdev_dtl_reassess() uses computations of this form to determine 536 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev 537 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev 538 * has an outage wherever refcnt >= vdev_children. 539 */ 540 static int 541 space_map_ref_compare(const void *x1, const void *x2) 542 { 543 const space_ref_t *sr1 = x1; 544 const space_ref_t *sr2 = x2; 545 546 if (sr1->sr_offset < sr2->sr_offset) 547 return (-1); 548 if (sr1->sr_offset > sr2->sr_offset) 549 return (1); 550 551 if (sr1 < sr2) 552 return (-1); 553 if (sr1 > sr2) 554 return (1); 555 556 return (0); 557 } 558 559 void 560 space_map_ref_create(avl_tree_t *t) 561 { 562 avl_create(t, space_map_ref_compare, 563 sizeof (space_ref_t), offsetof(space_ref_t, sr_node)); 564 } 565 566 void 567 space_map_ref_destroy(avl_tree_t *t) 568 { 569 space_ref_t *sr; 570 void *cookie = NULL; 571 572 while ((sr = avl_destroy_nodes(t, &cookie)) != NULL) 573 kmem_free(sr, sizeof (*sr)); 574 575 avl_destroy(t); 576 } 577 578 static void 579 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt) 580 { 581 space_ref_t *sr; 582 583 sr = kmem_alloc(sizeof (*sr), KM_SLEEP); 584 sr->sr_offset = offset; 585 sr->sr_refcnt = refcnt; 586 587 avl_add(t, sr); 588 } 589 590 void 591 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end, 592 int64_t refcnt) 593 { 594 space_map_ref_add_node(t, start, refcnt); 595 space_map_ref_add_node(t, end, -refcnt); 596 } 597 598 /* 599 * Convert (or add) a space map into a reference tree. 600 */ 601 void 602 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt) 603 { 604 space_seg_t *ss; 605 606 ASSERT(MUTEX_HELD(sm->sm_lock)); 607 608 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 609 space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt); 610 } 611 612 /* 613 * Convert a reference tree into a space map. The space map will contain 614 * all members of the reference tree for which refcnt >= minref. 615 */ 616 void 617 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref) 618 { 619 uint64_t start = -1ULL; 620 int64_t refcnt = 0; 621 space_ref_t *sr; 622 623 ASSERT(MUTEX_HELD(sm->sm_lock)); 624 625 space_map_vacate(sm, NULL, NULL); 626 627 for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) { 628 refcnt += sr->sr_refcnt; 629 if (refcnt >= minref) { 630 if (start == -1ULL) { 631 start = sr->sr_offset; 632 } 633 } else { 634 if (start != -1ULL) { 635 uint64_t end = sr->sr_offset; 636 ASSERT(start <= end); 637 if (end > start) 638 space_map_add(sm, start, end - start); 639 start = -1ULL; 640 } 641 } 642 } 643 ASSERT(refcnt == 0); 644 ASSERT(start == -1ULL); 645 } 646