1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2013, 2019 by Delphix. All rights reserved. 27 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/spa.h> 32 #include <sys/dmu.h> 33 #include <sys/dnode.h> 34 #include <sys/zio.h> 35 #include <sys/range_tree.h> 36 37 /* 38 * Range trees are tree-based data structures that can be used to 39 * track free space or generally any space allocation information. 40 * A range tree keeps track of individual segments and automatically 41 * provides facilities such as adjacent extent merging and extent 42 * splitting in response to range add/remove requests. 43 * 44 * A range tree starts out completely empty, with no segments in it. 45 * Adding an allocation via range_tree_add to the range tree can either: 46 * 1) create a new extent 47 * 2) extend an adjacent extent 48 * 3) merge two adjacent extents 49 * Conversely, removing an allocation via range_tree_remove can: 50 * 1) completely remove an extent 51 * 2) shorten an extent (if the allocation was near one of its ends) 52 * 3) split an extent into two extents, in effect punching a hole 53 * 54 * A range tree is also capable of 'bridging' gaps when adding 55 * allocations. This is useful for cases when close proximity of 56 * allocations is an important detail that needs to be represented 57 * in the range tree. See range_tree_set_gap(). The default behavior 58 * is not to bridge gaps (i.e. the maximum allowed gap size is 0). 59 * 60 * In order to traverse a range tree, use either the range_tree_walk() 61 * or range_tree_vacate() functions. 62 * 63 * To obtain more accurate information on individual segment 64 * operations that the range tree performs "under the hood", you can 65 * specify a set of callbacks by passing a range_tree_ops_t structure 66 * to the range_tree_create function. Any callbacks that are non-NULL 67 * are then called at the appropriate times. 68 * 69 * The range tree code also supports a special variant of range trees 70 * that can bridge small gaps between segments. This kind of tree is used 71 * by the dsl scanning code to group I/Os into mostly sequential chunks to 72 * optimize disk performance. The code here attempts to do this with as 73 * little memory and computational overhead as possible. One limitation of 74 * this implementation is that segments of range trees with gaps can only 75 * support removing complete segments. 76 */ 77 78 static inline void 79 rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt) 80 { 81 ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES); 82 size_t size = 0; 83 switch (rt->rt_type) { 84 case RANGE_SEG32: 85 size = sizeof (range_seg32_t); 86 break; 87 case RANGE_SEG64: 88 size = sizeof (range_seg64_t); 89 break; 90 case RANGE_SEG_GAP: 91 size = sizeof (range_seg_gap_t); 92 break; 93 default: 94 __builtin_unreachable(); 95 } 96 memcpy(dest, src, size); 97 } 98 99 void 100 range_tree_stat_verify(range_tree_t *rt) 101 { 102 range_seg_t *rs; 103 zfs_btree_index_t where; 104 uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 }; 105 int i; 106 107 for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL; 108 rs = zfs_btree_next(&rt->rt_root, &where, &where)) { 109 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); 110 int idx = highbit64(size) - 1; 111 112 hist[idx]++; 113 ASSERT3U(hist[idx], !=, 0); 114 } 115 116 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 117 if (hist[i] != rt->rt_histogram[i]) { 118 zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu", 119 i, hist, (u_longlong_t)hist[i], 120 (u_longlong_t)rt->rt_histogram[i]); 121 } 122 VERIFY3U(hist[i], ==, rt->rt_histogram[i]); 123 } 124 } 125 126 static void 127 range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs) 128 { 129 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); 130 int idx = highbit64(size) - 1; 131 132 ASSERT(size != 0); 133 ASSERT3U(idx, <, 134 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram)); 135 136 rt->rt_histogram[idx]++; 137 ASSERT3U(rt->rt_histogram[idx], !=, 0); 138 } 139 140 static void 141 range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs) 142 { 143 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); 144 int idx = highbit64(size) - 1; 145 146 ASSERT(size != 0); 147 ASSERT3U(idx, <, 148 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram)); 149 150 ASSERT3U(rt->rt_histogram[idx], !=, 0); 151 rt->rt_histogram[idx]--; 152 } 153 154 static int 155 range_tree_seg32_compare(const void *x1, const void *x2) 156 { 157 const range_seg32_t *r1 = x1; 158 const range_seg32_t *r2 = x2; 159 160 ASSERT3U(r1->rs_start, <=, r1->rs_end); 161 ASSERT3U(r2->rs_start, <=, r2->rs_end); 162 163 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start)); 164 } 165 166 static int 167 range_tree_seg64_compare(const void *x1, const void *x2) 168 { 169 const range_seg64_t *r1 = x1; 170 const range_seg64_t *r2 = x2; 171 172 ASSERT3U(r1->rs_start, <=, r1->rs_end); 173 ASSERT3U(r2->rs_start, <=, r2->rs_end); 174 175 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start)); 176 } 177 178 static int 179 range_tree_seg_gap_compare(const void *x1, const void *x2) 180 { 181 const range_seg_gap_t *r1 = x1; 182 const range_seg_gap_t *r2 = x2; 183 184 ASSERT3U(r1->rs_start, <=, r1->rs_end); 185 ASSERT3U(r2->rs_start, <=, r2->rs_end); 186 187 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start)); 188 } 189 190 range_tree_t * 191 range_tree_create_impl(const range_tree_ops_t *ops, range_seg_type_t type, 192 void *arg, uint64_t start, uint64_t shift, 193 int (*zfs_btree_compare) (const void *, const void *), 194 uint64_t gap) 195 { 196 range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP); 197 198 ASSERT3U(shift, <, 64); 199 ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES); 200 size_t size; 201 int (*compare) (const void *, const void *); 202 switch (type) { 203 case RANGE_SEG32: 204 size = sizeof (range_seg32_t); 205 compare = range_tree_seg32_compare; 206 break; 207 case RANGE_SEG64: 208 size = sizeof (range_seg64_t); 209 compare = range_tree_seg64_compare; 210 break; 211 case RANGE_SEG_GAP: 212 size = sizeof (range_seg_gap_t); 213 compare = range_tree_seg_gap_compare; 214 break; 215 default: 216 panic("Invalid range seg type %d", type); 217 } 218 zfs_btree_create(&rt->rt_root, compare, size); 219 220 rt->rt_ops = ops; 221 rt->rt_gap = gap; 222 rt->rt_arg = arg; 223 rt->rt_type = type; 224 rt->rt_start = start; 225 rt->rt_shift = shift; 226 rt->rt_btree_compare = zfs_btree_compare; 227 228 if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL) 229 rt->rt_ops->rtop_create(rt, rt->rt_arg); 230 231 return (rt); 232 } 233 234 range_tree_t * 235 range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type, 236 void *arg, uint64_t start, uint64_t shift) 237 { 238 return (range_tree_create_impl(ops, type, arg, start, shift, NULL, 0)); 239 } 240 241 void 242 range_tree_destroy(range_tree_t *rt) 243 { 244 VERIFY0(rt->rt_space); 245 246 if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL) 247 rt->rt_ops->rtop_destroy(rt, rt->rt_arg); 248 249 zfs_btree_destroy(&rt->rt_root); 250 kmem_free(rt, sizeof (*rt)); 251 } 252 253 void 254 range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta) 255 { 256 if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) { 257 zfs_panic_recover("zfs: attempting to decrease fill to or " 258 "below 0; probable double remove in segment [%llx:%llx]", 259 (longlong_t)rs_get_start(rs, rt), 260 (longlong_t)rs_get_end(rs, rt)); 261 } 262 if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) - 263 rs_get_start(rs, rt)) { 264 zfs_panic_recover("zfs: attempting to increase fill beyond " 265 "max; probable double add in segment [%llx:%llx]", 266 (longlong_t)rs_get_start(rs, rt), 267 (longlong_t)rs_get_end(rs, rt)); 268 } 269 270 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 271 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); 272 rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta); 273 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) 274 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); 275 } 276 277 static void 278 range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill) 279 { 280 range_tree_t *rt = arg; 281 zfs_btree_index_t where; 282 range_seg_t *rs_before, *rs_after, *rs; 283 range_seg_max_t tmp, rsearch; 284 uint64_t end = start + size, gap = rt->rt_gap; 285 uint64_t bridge_size = 0; 286 boolean_t merge_before, merge_after; 287 288 ASSERT3U(size, !=, 0); 289 ASSERT3U(fill, <=, size); 290 ASSERT3U(start + size, >, start); 291 292 rs_set_start(&rsearch, rt, start); 293 rs_set_end(&rsearch, rt, end); 294 rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); 295 296 /* 297 * If this is a gap-supporting range tree, it is possible that we 298 * are inserting into an existing segment. In this case simply 299 * bump the fill count and call the remove / add callbacks. If the 300 * new range will extend an existing segment, we remove the 301 * existing one, apply the new extent to it and re-insert it using 302 * the normal code paths. 303 */ 304 if (rs != NULL) { 305 if (gap == 0) { 306 zfs_panic_recover("zfs: adding existent segment to " 307 "range tree (offset=%llx size=%llx)", 308 (longlong_t)start, (longlong_t)size); 309 return; 310 } 311 uint64_t rstart = rs_get_start(rs, rt); 312 uint64_t rend = rs_get_end(rs, rt); 313 if (rstart <= start && rend >= end) { 314 range_tree_adjust_fill(rt, rs, fill); 315 return; 316 } 317 318 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 319 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); 320 321 range_tree_stat_decr(rt, rs); 322 rt->rt_space -= rend - rstart; 323 324 fill += rs_get_fill(rs, rt); 325 start = MIN(start, rstart); 326 end = MAX(end, rend); 327 size = end - start; 328 329 zfs_btree_remove(&rt->rt_root, rs); 330 range_tree_add_impl(rt, start, size, fill); 331 return; 332 } 333 334 ASSERT3P(rs, ==, NULL); 335 336 /* 337 * Determine whether or not we will have to merge with our neighbors. 338 * If gap != 0, we might need to merge with our neighbors even if we 339 * aren't directly touching. 340 */ 341 zfs_btree_index_t where_before, where_after; 342 rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before); 343 rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after); 344 345 merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >= 346 start - gap); 347 merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end + 348 gap); 349 350 if (merge_before && gap != 0) 351 bridge_size += start - rs_get_end(rs_before, rt); 352 if (merge_after && gap != 0) 353 bridge_size += rs_get_start(rs_after, rt) - end; 354 355 if (merge_before && merge_after) { 356 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) { 357 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg); 358 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg); 359 } 360 361 range_tree_stat_decr(rt, rs_before); 362 range_tree_stat_decr(rt, rs_after); 363 364 rs_copy(rs_after, &tmp, rt); 365 uint64_t before_start = rs_get_start_raw(rs_before, rt); 366 uint64_t before_fill = rs_get_fill(rs_before, rt); 367 uint64_t after_fill = rs_get_fill(rs_after, rt); 368 zfs_btree_remove_idx(&rt->rt_root, &where_before); 369 370 /* 371 * We have to re-find the node because our old reference is 372 * invalid as soon as we do any mutating btree operations. 373 */ 374 rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after); 375 rs_set_start_raw(rs_after, rt, before_start); 376 rs_set_fill(rs_after, rt, after_fill + before_fill + fill); 377 rs = rs_after; 378 } else if (merge_before) { 379 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 380 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg); 381 382 range_tree_stat_decr(rt, rs_before); 383 384 uint64_t before_fill = rs_get_fill(rs_before, rt); 385 rs_set_end(rs_before, rt, end); 386 rs_set_fill(rs_before, rt, before_fill + fill); 387 rs = rs_before; 388 } else if (merge_after) { 389 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 390 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg); 391 392 range_tree_stat_decr(rt, rs_after); 393 394 uint64_t after_fill = rs_get_fill(rs_after, rt); 395 rs_set_start(rs_after, rt, start); 396 rs_set_fill(rs_after, rt, after_fill + fill); 397 rs = rs_after; 398 } else { 399 rs = &tmp; 400 401 rs_set_start(rs, rt, start); 402 rs_set_end(rs, rt, end); 403 rs_set_fill(rs, rt, fill); 404 zfs_btree_add_idx(&rt->rt_root, rs, &where); 405 } 406 407 if (gap != 0) { 408 ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) - 409 rs_get_start(rs, rt)); 410 } else { 411 ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) - 412 rs_get_start(rs, rt)); 413 } 414 415 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) 416 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); 417 418 range_tree_stat_incr(rt, rs); 419 rt->rt_space += size + bridge_size; 420 } 421 422 void 423 range_tree_add(void *arg, uint64_t start, uint64_t size) 424 { 425 range_tree_add_impl(arg, start, size, size); 426 } 427 428 static void 429 range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size, 430 boolean_t do_fill) 431 { 432 zfs_btree_index_t where; 433 range_seg_t *rs; 434 range_seg_max_t rsearch, rs_tmp; 435 uint64_t end = start + size; 436 boolean_t left_over, right_over; 437 438 VERIFY3U(size, !=, 0); 439 VERIFY3U(size, <=, rt->rt_space); 440 if (rt->rt_type == RANGE_SEG64) 441 ASSERT3U(start + size, >, start); 442 443 rs_set_start(&rsearch, rt, start); 444 rs_set_end(&rsearch, rt, end); 445 rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); 446 447 /* Make sure we completely overlap with someone */ 448 if (rs == NULL) { 449 zfs_panic_recover("zfs: removing nonexistent segment from " 450 "range tree (offset=%llx size=%llx)", 451 (longlong_t)start, (longlong_t)size); 452 return; 453 } 454 455 /* 456 * Range trees with gap support must only remove complete segments 457 * from the tree. This allows us to maintain accurate fill accounting 458 * and to ensure that bridged sections are not leaked. If we need to 459 * remove less than the full segment, we can only adjust the fill count. 460 */ 461 if (rt->rt_gap != 0) { 462 if (do_fill) { 463 if (rs_get_fill(rs, rt) == size) { 464 start = rs_get_start(rs, rt); 465 end = rs_get_end(rs, rt); 466 size = end - start; 467 } else { 468 range_tree_adjust_fill(rt, rs, -size); 469 return; 470 } 471 } else if (rs_get_start(rs, rt) != start || 472 rs_get_end(rs, rt) != end) { 473 zfs_panic_recover("zfs: freeing partial segment of " 474 "gap tree (offset=%llx size=%llx) of " 475 "(offset=%llx size=%llx)", 476 (longlong_t)start, (longlong_t)size, 477 (longlong_t)rs_get_start(rs, rt), 478 (longlong_t)rs_get_end(rs, rt) - rs_get_start(rs, 479 rt)); 480 return; 481 } 482 } 483 484 VERIFY3U(rs_get_start(rs, rt), <=, start); 485 VERIFY3U(rs_get_end(rs, rt), >=, end); 486 487 left_over = (rs_get_start(rs, rt) != start); 488 right_over = (rs_get_end(rs, rt) != end); 489 490 range_tree_stat_decr(rt, rs); 491 492 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 493 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); 494 495 if (left_over && right_over) { 496 range_seg_max_t newseg; 497 rs_set_start(&newseg, rt, end); 498 rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt)); 499 rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end); 500 range_tree_stat_incr(rt, &newseg); 501 502 // This modifies the buffer already inside the range tree 503 rs_set_end(rs, rt, start); 504 505 rs_copy(rs, &rs_tmp, rt); 506 if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL) 507 zfs_btree_add_idx(&rt->rt_root, &newseg, &where); 508 else 509 zfs_btree_add(&rt->rt_root, &newseg); 510 511 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) 512 rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg); 513 } else if (left_over) { 514 // This modifies the buffer already inside the range tree 515 rs_set_end(rs, rt, start); 516 rs_copy(rs, &rs_tmp, rt); 517 } else if (right_over) { 518 // This modifies the buffer already inside the range tree 519 rs_set_start(rs, rt, end); 520 rs_copy(rs, &rs_tmp, rt); 521 } else { 522 zfs_btree_remove_idx(&rt->rt_root, &where); 523 rs = NULL; 524 } 525 526 if (rs != NULL) { 527 /* 528 * The fill of the leftover segment will always be equal to 529 * the size, since we do not support removing partial segments 530 * of range trees with gaps. 531 */ 532 rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) - 533 rs_get_start_raw(rs, rt)); 534 range_tree_stat_incr(rt, &rs_tmp); 535 536 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) 537 rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg); 538 } 539 540 rt->rt_space -= size; 541 } 542 543 void 544 range_tree_remove(void *arg, uint64_t start, uint64_t size) 545 { 546 range_tree_remove_impl(arg, start, size, B_FALSE); 547 } 548 549 void 550 range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size) 551 { 552 range_tree_remove_impl(rt, start, size, B_TRUE); 553 } 554 555 void 556 range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs, 557 uint64_t newstart, uint64_t newsize) 558 { 559 int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt)); 560 561 range_tree_stat_decr(rt, rs); 562 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) 563 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); 564 565 rs_set_start(rs, rt, newstart); 566 rs_set_end(rs, rt, newstart + newsize); 567 568 range_tree_stat_incr(rt, rs); 569 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) 570 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); 571 572 rt->rt_space += delta; 573 } 574 575 static range_seg_t * 576 range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size) 577 { 578 range_seg_max_t rsearch; 579 uint64_t end = start + size; 580 581 VERIFY(size != 0); 582 583 rs_set_start(&rsearch, rt, start); 584 rs_set_end(&rsearch, rt, end); 585 return (zfs_btree_find(&rt->rt_root, &rsearch, NULL)); 586 } 587 588 range_seg_t * 589 range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size) 590 { 591 if (rt->rt_type == RANGE_SEG64) 592 ASSERT3U(start + size, >, start); 593 594 range_seg_t *rs = range_tree_find_impl(rt, start, size); 595 if (rs != NULL && rs_get_start(rs, rt) <= start && 596 rs_get_end(rs, rt) >= start + size) { 597 return (rs); 598 } 599 return (NULL); 600 } 601 602 void 603 range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size) 604 { 605 range_seg_t *rs = range_tree_find(rt, off, size); 606 if (rs != NULL) 607 panic("segment already in tree; rs=%p", (void *)rs); 608 } 609 610 boolean_t 611 range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size) 612 { 613 return (range_tree_find(rt, start, size) != NULL); 614 } 615 616 /* 617 * Returns the first subset of the given range which overlaps with the range 618 * tree. Returns true if there is a segment in the range, and false if there 619 * isn't. 620 */ 621 boolean_t 622 range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size, 623 uint64_t *ostart, uint64_t *osize) 624 { 625 if (rt->rt_type == RANGE_SEG64) 626 ASSERT3U(start + size, >, start); 627 628 range_seg_max_t rsearch; 629 rs_set_start(&rsearch, rt, start); 630 rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1); 631 632 zfs_btree_index_t where; 633 range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); 634 if (rs != NULL) { 635 *ostart = start; 636 *osize = MIN(size, rs_get_end(rs, rt) - start); 637 return (B_TRUE); 638 } 639 640 rs = zfs_btree_next(&rt->rt_root, &where, &where); 641 if (rs == NULL || rs_get_start(rs, rt) > start + size) 642 return (B_FALSE); 643 644 *ostart = rs_get_start(rs, rt); 645 *osize = MIN(start + size, rs_get_end(rs, rt)) - 646 rs_get_start(rs, rt); 647 return (B_TRUE); 648 } 649 650 /* 651 * Ensure that this range is not in the tree, regardless of whether 652 * it is currently in the tree. 653 */ 654 void 655 range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size) 656 { 657 range_seg_t *rs; 658 659 if (size == 0) 660 return; 661 662 if (rt->rt_type == RANGE_SEG64) 663 ASSERT3U(start + size, >, start); 664 665 while ((rs = range_tree_find_impl(rt, start, size)) != NULL) { 666 uint64_t free_start = MAX(rs_get_start(rs, rt), start); 667 uint64_t free_end = MIN(rs_get_end(rs, rt), start + size); 668 range_tree_remove(rt, free_start, free_end - free_start); 669 } 670 } 671 672 void 673 range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst) 674 { 675 range_tree_t *rt; 676 677 ASSERT0(range_tree_space(*rtdst)); 678 ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root)); 679 680 rt = *rtsrc; 681 *rtsrc = *rtdst; 682 *rtdst = rt; 683 } 684 685 void 686 range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg) 687 { 688 if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL) 689 rt->rt_ops->rtop_vacate(rt, rt->rt_arg); 690 691 if (func != NULL) { 692 range_seg_t *rs; 693 zfs_btree_index_t *cookie = NULL; 694 695 while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) != 696 NULL) { 697 func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) - 698 rs_get_start(rs, rt)); 699 } 700 } else { 701 zfs_btree_clear(&rt->rt_root); 702 } 703 704 memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram)); 705 rt->rt_space = 0; 706 } 707 708 void 709 range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg) 710 { 711 zfs_btree_index_t where; 712 for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); 713 rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) { 714 func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) - 715 rs_get_start(rs, rt)); 716 } 717 } 718 719 range_seg_t * 720 range_tree_first(range_tree_t *rt) 721 { 722 return (zfs_btree_first(&rt->rt_root, NULL)); 723 } 724 725 uint64_t 726 range_tree_space(range_tree_t *rt) 727 { 728 return (rt->rt_space); 729 } 730 731 uint64_t 732 range_tree_numsegs(range_tree_t *rt) 733 { 734 return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root)); 735 } 736 737 boolean_t 738 range_tree_is_empty(range_tree_t *rt) 739 { 740 ASSERT(rt != NULL); 741 return (range_tree_space(rt) == 0); 742 } 743 744 void 745 rt_btree_create(range_tree_t *rt, void *arg) 746 { 747 zfs_btree_t *size_tree = arg; 748 749 size_t size; 750 switch (rt->rt_type) { 751 case RANGE_SEG32: 752 size = sizeof (range_seg32_t); 753 break; 754 case RANGE_SEG64: 755 size = sizeof (range_seg64_t); 756 break; 757 case RANGE_SEG_GAP: 758 size = sizeof (range_seg_gap_t); 759 break; 760 default: 761 panic("Invalid range seg type %d", rt->rt_type); 762 } 763 zfs_btree_create(size_tree, rt->rt_btree_compare, size); 764 } 765 766 void 767 rt_btree_destroy(range_tree_t *rt, void *arg) 768 { 769 (void) rt; 770 zfs_btree_t *size_tree = arg; 771 ASSERT0(zfs_btree_numnodes(size_tree)); 772 773 zfs_btree_destroy(size_tree); 774 } 775 776 void 777 rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg) 778 { 779 (void) rt; 780 zfs_btree_t *size_tree = arg; 781 782 zfs_btree_add(size_tree, rs); 783 } 784 785 void 786 rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 787 { 788 (void) rt; 789 zfs_btree_t *size_tree = arg; 790 791 zfs_btree_remove(size_tree, rs); 792 } 793 794 void 795 rt_btree_vacate(range_tree_t *rt, void *arg) 796 { 797 zfs_btree_t *size_tree = arg; 798 zfs_btree_clear(size_tree); 799 zfs_btree_destroy(size_tree); 800 801 rt_btree_create(rt, arg); 802 } 803 804 const range_tree_ops_t rt_btree_ops = { 805 .rtop_create = rt_btree_create, 806 .rtop_destroy = rt_btree_destroy, 807 .rtop_add = rt_btree_add, 808 .rtop_remove = rt_btree_remove, 809 .rtop_vacate = rt_btree_vacate 810 }; 811 812 /* 813 * Remove any overlapping ranges between the given segment [start, end) 814 * from removefrom. Add non-overlapping leftovers to addto. 815 */ 816 void 817 range_tree_remove_xor_add_segment(uint64_t start, uint64_t end, 818 range_tree_t *removefrom, range_tree_t *addto) 819 { 820 zfs_btree_index_t where; 821 range_seg_max_t starting_rs; 822 rs_set_start(&starting_rs, removefrom, start); 823 rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs, 824 removefrom) + 1); 825 826 range_seg_t *curr = zfs_btree_find(&removefrom->rt_root, 827 &starting_rs, &where); 828 829 if (curr == NULL) 830 curr = zfs_btree_next(&removefrom->rt_root, &where, &where); 831 832 range_seg_t *next; 833 for (; curr != NULL; curr = next) { 834 if (start == end) 835 return; 836 VERIFY3U(start, <, end); 837 838 /* there is no overlap */ 839 if (end <= rs_get_start(curr, removefrom)) { 840 range_tree_add(addto, start, end - start); 841 return; 842 } 843 844 uint64_t overlap_start = MAX(rs_get_start(curr, removefrom), 845 start); 846 uint64_t overlap_end = MIN(rs_get_end(curr, removefrom), 847 end); 848 uint64_t overlap_size = overlap_end - overlap_start; 849 ASSERT3S(overlap_size, >, 0); 850 range_seg_max_t rs; 851 rs_copy(curr, &rs, removefrom); 852 853 range_tree_remove(removefrom, overlap_start, overlap_size); 854 855 if (start < overlap_start) 856 range_tree_add(addto, start, overlap_start - start); 857 858 start = overlap_end; 859 next = zfs_btree_find(&removefrom->rt_root, &rs, &where); 860 /* 861 * If we find something here, we only removed part of the 862 * curr segment. Either there's some left at the end 863 * because we've reached the end of the range we're removing, 864 * or there's some left at the start because we started 865 * partway through the range. Either way, we continue with 866 * the loop. If it's the former, we'll return at the start of 867 * the loop, and if it's the latter we'll see if there is more 868 * area to process. 869 */ 870 if (next != NULL) { 871 ASSERT(start == end || start == rs_get_end(&rs, 872 removefrom)); 873 } 874 875 next = zfs_btree_next(&removefrom->rt_root, &where, &where); 876 } 877 VERIFY3P(curr, ==, NULL); 878 879 if (start != end) { 880 VERIFY3U(start, <, end); 881 range_tree_add(addto, start, end - start); 882 } else { 883 VERIFY3U(start, ==, end); 884 } 885 } 886 887 /* 888 * For each entry in rt, if it exists in removefrom, remove it 889 * from removefrom. Otherwise, add it to addto. 890 */ 891 void 892 range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom, 893 range_tree_t *addto) 894 { 895 zfs_btree_index_t where; 896 for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs; 897 rs = zfs_btree_next(&rt->rt_root, &where, &where)) { 898 range_tree_remove_xor_add_segment(rs_get_start(rs, rt), 899 rs_get_end(rs, rt), removefrom, addto); 900 } 901 } 902 903 uint64_t 904 range_tree_min(range_tree_t *rt) 905 { 906 range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL); 907 return (rs != NULL ? rs_get_start(rs, rt) : 0); 908 } 909 910 uint64_t 911 range_tree_max(range_tree_t *rt) 912 { 913 range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL); 914 return (rs != NULL ? rs_get_end(rs, rt) : 0); 915 } 916 917 uint64_t 918 range_tree_span(range_tree_t *rt) 919 { 920 return (range_tree_max(rt) - range_tree_min(rt)); 921 } 922