1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 25 */ 26 27 #include <sys/dmu.h> 28 #include <sys/zap.h> 29 #include <sys/zfs_context.h> 30 #include <sys/dsl_pool.h> 31 #include <sys/dsl_dataset.h> 32 33 /* 34 * Deadlist concurrency: 35 * 36 * Deadlists can only be modified from the syncing thread. 37 * 38 * Except for dsl_deadlist_insert(), it can only be modified with the 39 * dp_config_rwlock held with RW_WRITER. 40 * 41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can 42 * be called concurrently, from open context, with the dl_config_rwlock held 43 * with RW_READER. 44 * 45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and 46 * the accessors, protecting: 47 * dl_phys->dl_used,comp,uncomp 48 * and protecting the dl_tree from being loaded. 49 * The locking is provided by dl_lock. Note that locking on the bpobj_t 50 * provides its own locking, and dl_oldfmt is immutable. 51 */ 52 53 /* 54 * Livelist Overview 55 * ================ 56 * 57 * Livelists use the same 'deadlist_t' struct as deadlists and are also used 58 * to track blkptrs over the lifetime of a dataset. Livelists however, belong 59 * to clones and track the blkptrs that are clone-specific (were born after 60 * the clone's creation). The exception is embedded block pointers which are 61 * not included in livelists because they do not need to be freed. 62 * 63 * When it comes time to delete the clone, the livelist provides a quick 64 * reference as to what needs to be freed. For this reason, livelists also track 65 * when clone-specific blkptrs are freed before deletion to prevent double 66 * frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the 67 * deletion algorithm iterates backwards over the livelist, matching 68 * FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists 69 * are also updated in the case when blkptrs are remapped: the old version 70 * of the blkptr is cancelled out with a FREE and the new version is tracked 71 * with an ALLOC. 72 * 73 * To bound the amount of memory required for deletion, livelists over a 74 * certain size are spread over multiple entries. Entries are grouped by 75 * birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will 76 * be in the same entry. This allows us to delete livelists incrementally 77 * over multiple syncs, one entry at a time. 78 * 79 * During the lifetime of the clone, livelists can get extremely large. 80 * Their size is managed by periodic condensing (preemptively cancelling out 81 * FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when 82 * the shared space between the clone and its origin is so small that it 83 * doesn't make sense to use livelists anymore. 84 */ 85 86 /* 87 * The threshold sublist size at which we create a new sub-livelist for the 88 * next txg. However, since blkptrs of the same transaction group must be in 89 * the same sub-list, the actual sublist size may exceed this. When picking the 90 * size we had to balance the fact that larger sublists mean fewer sublists 91 * (decreasing the cost of insertion) against the consideration that sublists 92 * will be loaded into memory and shouldn't take up an inordinate amount of 93 * space. We settled on ~500000 entries, corresponding to roughly 128M. 94 */ 95 uint64_t zfs_livelist_max_entries = 500000; 96 97 /* 98 * We can approximate how much of a performance gain a livelist will give us 99 * based on the percentage of blocks shared between the clone and its origin. 100 * 0 percent shared means that the clone has completely diverged and that the 101 * old method is maximally effective: every read from the block tree will 102 * result in lots of frees. Livelists give us gains when they track blocks 103 * scattered across the tree, when one read in the old method might only 104 * result in a few frees. Once the clone has been overwritten enough, 105 * writes are no longer sparse and we'll no longer get much of a benefit from 106 * tracking them with a livelist. We chose a lower limit of 75 percent shared 107 * (25 percent overwritten). This means that 1/4 of all block pointers will be 108 * freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists 109 * to make deletion 4x faster. Once the amount of shared space drops below this 110 * threshold, the clone will revert to the old deletion method. 111 */ 112 int zfs_livelist_min_percent_shared = 75; 113 114 static int 115 dsl_deadlist_compare(const void *arg1, const void *arg2) 116 { 117 const dsl_deadlist_entry_t *dle1 = arg1; 118 const dsl_deadlist_entry_t *dle2 = arg2; 119 120 return (TREE_CMP(dle1->dle_mintxg, dle2->dle_mintxg)); 121 } 122 123 static int 124 dsl_deadlist_cache_compare(const void *arg1, const void *arg2) 125 { 126 const dsl_deadlist_cache_entry_t *dlce1 = arg1; 127 const dsl_deadlist_cache_entry_t *dlce2 = arg2; 128 129 return (TREE_CMP(dlce1->dlce_mintxg, dlce2->dlce_mintxg)); 130 } 131 132 static void 133 dsl_deadlist_load_tree(dsl_deadlist_t *dl) 134 { 135 zap_cursor_t zc; 136 zap_attribute_t *za; 137 int error; 138 139 ASSERT(MUTEX_HELD(&dl->dl_lock)); 140 141 ASSERT(!dl->dl_oldfmt); 142 if (dl->dl_havecache) { 143 /* 144 * After loading the tree, the caller may modify the tree, 145 * e.g. to add or remove nodes, or to make a node no longer 146 * refer to the empty_bpobj. These changes would make the 147 * dl_cache incorrect. Therefore we discard the cache here, 148 * so that it can't become incorrect. 149 */ 150 dsl_deadlist_cache_entry_t *dlce; 151 void *cookie = NULL; 152 while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie)) 153 != NULL) { 154 kmem_free(dlce, sizeof (*dlce)); 155 } 156 avl_destroy(&dl->dl_cache); 157 dl->dl_havecache = B_FALSE; 158 } 159 if (dl->dl_havetree) 160 return; 161 162 za = zap_attribute_alloc(); 163 avl_create(&dl->dl_tree, dsl_deadlist_compare, 164 sizeof (dsl_deadlist_entry_t), 165 offsetof(dsl_deadlist_entry_t, dle_node)); 166 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object); 167 (error = zap_cursor_retrieve(&zc, za)) == 0; 168 zap_cursor_advance(&zc)) { 169 dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP); 170 dle->dle_mintxg = zfs_strtonum(za->za_name, NULL); 171 172 /* 173 * Prefetch all the bpobj's so that we do that i/o 174 * in parallel. Then open them all in a second pass. 175 */ 176 dle->dle_bpobj.bpo_object = za->za_first_integer; 177 dmu_prefetch_dnode(dl->dl_os, dle->dle_bpobj.bpo_object, 178 ZIO_PRIORITY_SYNC_READ); 179 180 avl_add(&dl->dl_tree, dle); 181 } 182 VERIFY3U(error, ==, ENOENT); 183 zap_cursor_fini(&zc); 184 zap_attribute_free(za); 185 186 for (dsl_deadlist_entry_t *dle = avl_first(&dl->dl_tree); 187 dle != NULL; dle = AVL_NEXT(&dl->dl_tree, dle)) { 188 VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, 189 dle->dle_bpobj.bpo_object)); 190 } 191 dl->dl_havetree = B_TRUE; 192 } 193 194 /* 195 * Load only the non-empty bpobj's into the dl_cache. The cache is an analog 196 * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It 197 * is used only for gathering space statistics. The dl_cache has two 198 * advantages over the dl_tree: 199 * 200 * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's 201 * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj 202 * many times and to inquire about its (zero) space stats many times. 203 * 204 * 2. The dl_cache uses less memory than the dl_tree. We only need to load 205 * the dl_tree of snapshots when deleting a snapshot, after which we free the 206 * dl_tree with dsl_deadlist_discard_tree 207 */ 208 static void 209 dsl_deadlist_load_cache(dsl_deadlist_t *dl) 210 { 211 zap_cursor_t zc; 212 zap_attribute_t *za; 213 int error; 214 215 ASSERT(MUTEX_HELD(&dl->dl_lock)); 216 217 ASSERT(!dl->dl_oldfmt); 218 if (dl->dl_havecache) 219 return; 220 221 uint64_t empty_bpobj = dmu_objset_pool(dl->dl_os)->dp_empty_bpobj; 222 223 avl_create(&dl->dl_cache, dsl_deadlist_cache_compare, 224 sizeof (dsl_deadlist_cache_entry_t), 225 offsetof(dsl_deadlist_cache_entry_t, dlce_node)); 226 za = zap_attribute_alloc(); 227 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object); 228 (error = zap_cursor_retrieve(&zc, za)) == 0; 229 zap_cursor_advance(&zc)) { 230 if (za->za_first_integer == empty_bpobj) 231 continue; 232 dsl_deadlist_cache_entry_t *dlce = 233 kmem_zalloc(sizeof (*dlce), KM_SLEEP); 234 dlce->dlce_mintxg = zfs_strtonum(za->za_name, NULL); 235 236 /* 237 * Prefetch all the bpobj's so that we do that i/o 238 * in parallel. Then open them all in a second pass. 239 */ 240 dlce->dlce_bpobj = za->za_first_integer; 241 dmu_prefetch_dnode(dl->dl_os, dlce->dlce_bpobj, 242 ZIO_PRIORITY_SYNC_READ); 243 avl_add(&dl->dl_cache, dlce); 244 } 245 VERIFY3U(error, ==, ENOENT); 246 zap_cursor_fini(&zc); 247 zap_attribute_free(za); 248 249 for (dsl_deadlist_cache_entry_t *dlce = avl_first(&dl->dl_cache); 250 dlce != NULL; dlce = AVL_NEXT(&dl->dl_cache, dlce)) { 251 bpobj_t bpo; 252 VERIFY0(bpobj_open(&bpo, dl->dl_os, dlce->dlce_bpobj)); 253 254 VERIFY0(bpobj_space(&bpo, 255 &dlce->dlce_bytes, &dlce->dlce_comp, &dlce->dlce_uncomp)); 256 bpobj_close(&bpo); 257 } 258 dl->dl_havecache = B_TRUE; 259 } 260 261 /* 262 * Discard the tree to save memory. 263 */ 264 void 265 dsl_deadlist_discard_tree(dsl_deadlist_t *dl) 266 { 267 mutex_enter(&dl->dl_lock); 268 269 if (!dl->dl_havetree) { 270 mutex_exit(&dl->dl_lock); 271 return; 272 } 273 dsl_deadlist_entry_t *dle; 274 void *cookie = NULL; 275 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) != NULL) { 276 bpobj_close(&dle->dle_bpobj); 277 kmem_free(dle, sizeof (*dle)); 278 } 279 avl_destroy(&dl->dl_tree); 280 281 dl->dl_havetree = B_FALSE; 282 mutex_exit(&dl->dl_lock); 283 } 284 285 void 286 dsl_deadlist_iterate(dsl_deadlist_t *dl, deadlist_iter_t func, void *args) 287 { 288 dsl_deadlist_entry_t *dle; 289 290 ASSERT(dsl_deadlist_is_open(dl)); 291 292 mutex_enter(&dl->dl_lock); 293 dsl_deadlist_load_tree(dl); 294 mutex_exit(&dl->dl_lock); 295 for (dle = avl_first(&dl->dl_tree); dle != NULL; 296 dle = AVL_NEXT(&dl->dl_tree, dle)) { 297 if (func(args, dle) != 0) 298 break; 299 } 300 } 301 302 int 303 dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object) 304 { 305 dmu_object_info_t doi; 306 int err; 307 308 ASSERT(!dsl_deadlist_is_open(dl)); 309 310 mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL); 311 dl->dl_os = os; 312 dl->dl_object = object; 313 err = dmu_bonus_hold(os, object, dl, &dl->dl_dbuf); 314 if (err != 0) 315 return (err); 316 dmu_object_info_from_db(dl->dl_dbuf, &doi); 317 if (doi.doi_type == DMU_OT_BPOBJ) { 318 dmu_buf_rele(dl->dl_dbuf, dl); 319 dl->dl_dbuf = NULL; 320 dl->dl_oldfmt = B_TRUE; 321 return (bpobj_open(&dl->dl_bpobj, os, object)); 322 } 323 324 dl->dl_oldfmt = B_FALSE; 325 dl->dl_phys = dl->dl_dbuf->db_data; 326 dl->dl_havetree = B_FALSE; 327 dl->dl_havecache = B_FALSE; 328 return (0); 329 } 330 331 boolean_t 332 dsl_deadlist_is_open(dsl_deadlist_t *dl) 333 { 334 return (dl->dl_os != NULL); 335 } 336 337 void 338 dsl_deadlist_close(dsl_deadlist_t *dl) 339 { 340 ASSERT(dsl_deadlist_is_open(dl)); 341 mutex_destroy(&dl->dl_lock); 342 343 if (dl->dl_oldfmt) { 344 dl->dl_oldfmt = B_FALSE; 345 bpobj_close(&dl->dl_bpobj); 346 dl->dl_os = NULL; 347 dl->dl_object = 0; 348 return; 349 } 350 351 if (dl->dl_havetree) { 352 dsl_deadlist_entry_t *dle; 353 void *cookie = NULL; 354 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) 355 != NULL) { 356 bpobj_close(&dle->dle_bpobj); 357 kmem_free(dle, sizeof (*dle)); 358 } 359 avl_destroy(&dl->dl_tree); 360 } 361 if (dl->dl_havecache) { 362 dsl_deadlist_cache_entry_t *dlce; 363 void *cookie = NULL; 364 while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie)) 365 != NULL) { 366 kmem_free(dlce, sizeof (*dlce)); 367 } 368 avl_destroy(&dl->dl_cache); 369 } 370 dmu_buf_rele(dl->dl_dbuf, dl); 371 dl->dl_dbuf = NULL; 372 dl->dl_phys = NULL; 373 dl->dl_os = NULL; 374 dl->dl_object = 0; 375 } 376 377 uint64_t 378 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx) 379 { 380 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS) 381 return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx)); 382 return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR, 383 sizeof (dsl_deadlist_phys_t), tx)); 384 } 385 386 void 387 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx) 388 { 389 dmu_object_info_t doi; 390 zap_cursor_t zc; 391 zap_attribute_t *za; 392 int error; 393 394 VERIFY0(dmu_object_info(os, dlobj, &doi)); 395 if (doi.doi_type == DMU_OT_BPOBJ) { 396 bpobj_free(os, dlobj, tx); 397 return; 398 } 399 400 za = zap_attribute_alloc(); 401 for (zap_cursor_init(&zc, os, dlobj); 402 (error = zap_cursor_retrieve(&zc, za)) == 0; 403 zap_cursor_advance(&zc)) { 404 uint64_t obj = za->za_first_integer; 405 if (obj == dmu_objset_pool(os)->dp_empty_bpobj) 406 bpobj_decr_empty(os, tx); 407 else 408 bpobj_free(os, obj, tx); 409 } 410 VERIFY3U(error, ==, ENOENT); 411 zap_cursor_fini(&zc); 412 zap_attribute_free(za); 413 VERIFY0(dmu_object_free(os, dlobj, tx)); 414 } 415 416 static void 417 dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle, 418 const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) 419 { 420 ASSERT(MUTEX_HELD(&dl->dl_lock)); 421 if (dle->dle_bpobj.bpo_object == 422 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) { 423 uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx); 424 bpobj_close(&dle->dle_bpobj); 425 bpobj_decr_empty(dl->dl_os, tx); 426 VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj)); 427 VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object, 428 dle->dle_mintxg, obj, tx)); 429 } 430 bpobj_enqueue(&dle->dle_bpobj, bp, bp_freed, tx); 431 } 432 433 static void 434 dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle, 435 uint64_t obj, dmu_tx_t *tx) 436 { 437 ASSERT(MUTEX_HELD(&dl->dl_lock)); 438 if (dle->dle_bpobj.bpo_object != 439 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) { 440 bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx); 441 } else { 442 bpobj_close(&dle->dle_bpobj); 443 bpobj_decr_empty(dl->dl_os, tx); 444 VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj)); 445 VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object, 446 dle->dle_mintxg, obj, tx)); 447 } 448 } 449 450 /* 451 * Prefetch metadata required for dle_enqueue_subobj(). 452 */ 453 static void 454 dle_prefetch_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle, 455 uint64_t obj) 456 { 457 if (dle->dle_bpobj.bpo_object != 458 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) 459 bpobj_prefetch_subobj(&dle->dle_bpobj, obj); 460 } 461 462 void 463 dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, boolean_t bp_freed, 464 dmu_tx_t *tx) 465 { 466 dsl_deadlist_entry_t dle_tofind; 467 dsl_deadlist_entry_t *dle; 468 avl_index_t where; 469 470 if (dl->dl_oldfmt) { 471 bpobj_enqueue(&dl->dl_bpobj, bp, bp_freed, tx); 472 return; 473 } 474 475 mutex_enter(&dl->dl_lock); 476 dsl_deadlist_load_tree(dl); 477 478 dmu_buf_will_dirty(dl->dl_dbuf, tx); 479 480 int sign = bp_freed ? -1 : +1; 481 dl->dl_phys->dl_used += 482 sign * bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp); 483 dl->dl_phys->dl_comp += sign * BP_GET_PSIZE(bp); 484 dl->dl_phys->dl_uncomp += sign * BP_GET_UCSIZE(bp); 485 486 dle_tofind.dle_mintxg = BP_GET_LOGICAL_BIRTH(bp); 487 dle = avl_find(&dl->dl_tree, &dle_tofind, &where); 488 if (dle == NULL) 489 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE); 490 else 491 dle = AVL_PREV(&dl->dl_tree, dle); 492 493 if (dle == NULL) { 494 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu", 495 bp, (longlong_t)BP_GET_LOGICAL_BIRTH(bp)); 496 dle = avl_first(&dl->dl_tree); 497 } 498 499 ASSERT3P(dle, !=, NULL); 500 dle_enqueue(dl, dle, bp, bp_freed, tx); 501 mutex_exit(&dl->dl_lock); 502 } 503 504 int 505 dsl_deadlist_insert_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 506 { 507 dsl_deadlist_t *dl = arg; 508 dsl_deadlist_insert(dl, bp, B_FALSE, tx); 509 return (0); 510 } 511 512 int 513 dsl_deadlist_insert_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 514 { 515 dsl_deadlist_t *dl = arg; 516 dsl_deadlist_insert(dl, bp, B_TRUE, tx); 517 return (0); 518 } 519 520 /* 521 * Insert new key in deadlist, which must be > all current entries. 522 * mintxg is not inclusive. 523 */ 524 void 525 dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx) 526 { 527 uint64_t obj; 528 dsl_deadlist_entry_t *dle; 529 530 if (dl->dl_oldfmt) 531 return; 532 533 dle = kmem_alloc(sizeof (*dle), KM_SLEEP); 534 dle->dle_mintxg = mintxg; 535 536 mutex_enter(&dl->dl_lock); 537 dsl_deadlist_load_tree(dl); 538 539 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx); 540 VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj)); 541 avl_add(&dl->dl_tree, dle); 542 543 VERIFY0(zap_add_int_key(dl->dl_os, dl->dl_object, 544 mintxg, obj, tx)); 545 mutex_exit(&dl->dl_lock); 546 } 547 548 /* 549 * Remove this key, merging its entries into the previous key. 550 */ 551 void 552 dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx) 553 { 554 dsl_deadlist_entry_t dle_tofind; 555 dsl_deadlist_entry_t *dle, *dle_prev; 556 557 if (dl->dl_oldfmt) 558 return; 559 mutex_enter(&dl->dl_lock); 560 dsl_deadlist_load_tree(dl); 561 562 dle_tofind.dle_mintxg = mintxg; 563 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL); 564 ASSERT3P(dle, !=, NULL); 565 dle_prev = AVL_PREV(&dl->dl_tree, dle); 566 ASSERT3P(dle_prev, !=, NULL); 567 568 dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx); 569 570 avl_remove(&dl->dl_tree, dle); 571 bpobj_close(&dle->dle_bpobj); 572 kmem_free(dle, sizeof (*dle)); 573 574 VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx)); 575 mutex_exit(&dl->dl_lock); 576 } 577 578 /* 579 * Remove a deadlist entry and all of its contents by removing the entry from 580 * the deadlist's avl tree, freeing the entry's bpobj and adjusting the 581 * deadlist's space accounting accordingly. 582 */ 583 void 584 dsl_deadlist_remove_entry(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx) 585 { 586 uint64_t used, comp, uncomp; 587 dsl_deadlist_entry_t dle_tofind; 588 dsl_deadlist_entry_t *dle; 589 objset_t *os = dl->dl_os; 590 591 if (dl->dl_oldfmt) 592 return; 593 594 mutex_enter(&dl->dl_lock); 595 dsl_deadlist_load_tree(dl); 596 597 dle_tofind.dle_mintxg = mintxg; 598 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL); 599 VERIFY3P(dle, !=, NULL); 600 601 avl_remove(&dl->dl_tree, dle); 602 VERIFY0(zap_remove_int(os, dl->dl_object, mintxg, tx)); 603 VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp)); 604 dmu_buf_will_dirty(dl->dl_dbuf, tx); 605 dl->dl_phys->dl_used -= used; 606 dl->dl_phys->dl_comp -= comp; 607 dl->dl_phys->dl_uncomp -= uncomp; 608 if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj) { 609 bpobj_decr_empty(os, tx); 610 } else { 611 bpobj_free(os, dle->dle_bpobj.bpo_object, tx); 612 } 613 bpobj_close(&dle->dle_bpobj); 614 kmem_free(dle, sizeof (*dle)); 615 mutex_exit(&dl->dl_lock); 616 } 617 618 /* 619 * Clear out the contents of a deadlist_entry by freeing its bpobj, 620 * replacing it with an empty bpobj and adjusting the deadlist's 621 * space accounting 622 */ 623 void 624 dsl_deadlist_clear_entry(dsl_deadlist_entry_t *dle, dsl_deadlist_t *dl, 625 dmu_tx_t *tx) 626 { 627 uint64_t new_obj, used, comp, uncomp; 628 objset_t *os = dl->dl_os; 629 630 mutex_enter(&dl->dl_lock); 631 VERIFY0(zap_remove_int(os, dl->dl_object, dle->dle_mintxg, tx)); 632 VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp)); 633 dmu_buf_will_dirty(dl->dl_dbuf, tx); 634 dl->dl_phys->dl_used -= used; 635 dl->dl_phys->dl_comp -= comp; 636 dl->dl_phys->dl_uncomp -= uncomp; 637 if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj) 638 bpobj_decr_empty(os, tx); 639 else 640 bpobj_free(os, dle->dle_bpobj.bpo_object, tx); 641 bpobj_close(&dle->dle_bpobj); 642 new_obj = bpobj_alloc_empty(os, SPA_OLD_MAXBLOCKSIZE, tx); 643 VERIFY0(bpobj_open(&dle->dle_bpobj, os, new_obj)); 644 VERIFY0(zap_add_int_key(os, dl->dl_object, dle->dle_mintxg, 645 new_obj, tx)); 646 ASSERT(bpobj_is_empty(&dle->dle_bpobj)); 647 mutex_exit(&dl->dl_lock); 648 } 649 650 /* 651 * Return the first entry in deadlist's avl tree 652 */ 653 dsl_deadlist_entry_t * 654 dsl_deadlist_first(dsl_deadlist_t *dl) 655 { 656 dsl_deadlist_entry_t *dle; 657 658 mutex_enter(&dl->dl_lock); 659 dsl_deadlist_load_tree(dl); 660 dle = avl_first(&dl->dl_tree); 661 mutex_exit(&dl->dl_lock); 662 663 return (dle); 664 } 665 666 /* 667 * Return the last entry in deadlist's avl tree 668 */ 669 dsl_deadlist_entry_t * 670 dsl_deadlist_last(dsl_deadlist_t *dl) 671 { 672 dsl_deadlist_entry_t *dle; 673 674 mutex_enter(&dl->dl_lock); 675 dsl_deadlist_load_tree(dl); 676 dle = avl_last(&dl->dl_tree); 677 mutex_exit(&dl->dl_lock); 678 679 return (dle); 680 } 681 682 /* 683 * Walk ds's snapshots to regenerate generate ZAP & AVL. 684 */ 685 static void 686 dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj, 687 uint64_t mrs_obj, dmu_tx_t *tx) 688 { 689 dsl_deadlist_t dl = { 0 }; 690 dsl_pool_t *dp = dmu_objset_pool(os); 691 692 VERIFY0(dsl_deadlist_open(&dl, os, dlobj)); 693 if (dl.dl_oldfmt) { 694 dsl_deadlist_close(&dl); 695 return; 696 } 697 698 while (mrs_obj != 0) { 699 dsl_dataset_t *ds; 700 VERIFY0(dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds)); 701 dsl_deadlist_add_key(&dl, 702 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx); 703 mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 704 dsl_dataset_rele(ds, FTAG); 705 } 706 dsl_deadlist_close(&dl); 707 } 708 709 uint64_t 710 dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg, 711 uint64_t mrs_obj, dmu_tx_t *tx) 712 { 713 dsl_deadlist_entry_t *dle; 714 uint64_t newobj; 715 716 newobj = dsl_deadlist_alloc(dl->dl_os, tx); 717 718 if (dl->dl_oldfmt) { 719 dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx); 720 return (newobj); 721 } 722 723 mutex_enter(&dl->dl_lock); 724 dsl_deadlist_load_tree(dl); 725 726 for (dle = avl_first(&dl->dl_tree); dle; 727 dle = AVL_NEXT(&dl->dl_tree, dle)) { 728 uint64_t obj; 729 730 if (dle->dle_mintxg >= maxtxg) 731 break; 732 733 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx); 734 VERIFY0(zap_add_int_key(dl->dl_os, newobj, 735 dle->dle_mintxg, obj, tx)); 736 } 737 mutex_exit(&dl->dl_lock); 738 return (newobj); 739 } 740 741 void 742 dsl_deadlist_space(dsl_deadlist_t *dl, 743 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) 744 { 745 ASSERT(dsl_deadlist_is_open(dl)); 746 if (dl->dl_oldfmt) { 747 VERIFY0(bpobj_space(&dl->dl_bpobj, 748 usedp, compp, uncompp)); 749 return; 750 } 751 752 mutex_enter(&dl->dl_lock); 753 *usedp = dl->dl_phys->dl_used; 754 *compp = dl->dl_phys->dl_comp; 755 *uncompp = dl->dl_phys->dl_uncomp; 756 mutex_exit(&dl->dl_lock); 757 } 758 759 /* 760 * return space used in the range (mintxg, maxtxg]. 761 * Includes maxtxg, does not include mintxg. 762 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is 763 * UINT64_MAX). 764 */ 765 void 766 dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg, 767 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) 768 { 769 dsl_deadlist_cache_entry_t *dlce; 770 dsl_deadlist_cache_entry_t dlce_tofind; 771 avl_index_t where; 772 773 if (dl->dl_oldfmt) { 774 VERIFY0(bpobj_space_range(&dl->dl_bpobj, 775 mintxg, maxtxg, usedp, compp, uncompp)); 776 return; 777 } 778 779 *usedp = *compp = *uncompp = 0; 780 781 mutex_enter(&dl->dl_lock); 782 dsl_deadlist_load_cache(dl); 783 dlce_tofind.dlce_mintxg = mintxg; 784 dlce = avl_find(&dl->dl_cache, &dlce_tofind, &where); 785 786 /* 787 * If this mintxg doesn't exist, it may be an empty_bpobj which 788 * is omitted from the sparse tree. Start at the next non-empty 789 * entry. 790 */ 791 if (dlce == NULL) 792 dlce = avl_nearest(&dl->dl_cache, where, AVL_AFTER); 793 794 for (; dlce && dlce->dlce_mintxg < maxtxg; 795 dlce = AVL_NEXT(&dl->dl_tree, dlce)) { 796 *usedp += dlce->dlce_bytes; 797 *compp += dlce->dlce_comp; 798 *uncompp += dlce->dlce_uncomp; 799 } 800 801 mutex_exit(&dl->dl_lock); 802 } 803 804 static void 805 dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth, 806 dmu_tx_t *tx) 807 { 808 dsl_deadlist_entry_t dle_tofind; 809 dsl_deadlist_entry_t *dle; 810 avl_index_t where; 811 uint64_t used, comp, uncomp; 812 bpobj_t bpo; 813 814 ASSERT(MUTEX_HELD(&dl->dl_lock)); 815 816 VERIFY0(bpobj_open(&bpo, dl->dl_os, obj)); 817 VERIFY0(bpobj_space(&bpo, &used, &comp, &uncomp)); 818 bpobj_close(&bpo); 819 820 dsl_deadlist_load_tree(dl); 821 822 dmu_buf_will_dirty(dl->dl_dbuf, tx); 823 dl->dl_phys->dl_used += used; 824 dl->dl_phys->dl_comp += comp; 825 dl->dl_phys->dl_uncomp += uncomp; 826 827 dle_tofind.dle_mintxg = birth; 828 dle = avl_find(&dl->dl_tree, &dle_tofind, &where); 829 if (dle == NULL) 830 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE); 831 dle_enqueue_subobj(dl, dle, obj, tx); 832 } 833 834 /* 835 * Prefetch metadata required for dsl_deadlist_insert_bpobj(). 836 */ 837 static void 838 dsl_deadlist_prefetch_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth) 839 { 840 dsl_deadlist_entry_t dle_tofind; 841 dsl_deadlist_entry_t *dle; 842 avl_index_t where; 843 844 ASSERT(MUTEX_HELD(&dl->dl_lock)); 845 846 dsl_deadlist_load_tree(dl); 847 848 dle_tofind.dle_mintxg = birth; 849 dle = avl_find(&dl->dl_tree, &dle_tofind, &where); 850 if (dle == NULL) 851 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE); 852 dle_prefetch_subobj(dl, dle, obj); 853 } 854 855 static int 856 dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 857 dmu_tx_t *tx) 858 { 859 dsl_deadlist_t *dl = arg; 860 dsl_deadlist_insert(dl, bp, bp_freed, tx); 861 return (0); 862 } 863 864 /* 865 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as 866 * an empty deadlist. 867 */ 868 void 869 dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx) 870 { 871 zap_cursor_t zc, pzc; 872 zap_attribute_t *za, *pza; 873 dmu_buf_t *bonus; 874 dsl_deadlist_phys_t *dlp; 875 dmu_object_info_t doi; 876 int error, perror, i; 877 878 VERIFY0(dmu_object_info(dl->dl_os, obj, &doi)); 879 if (doi.doi_type == DMU_OT_BPOBJ) { 880 bpobj_t bpo; 881 VERIFY0(bpobj_open(&bpo, dl->dl_os, obj)); 882 VERIFY0(bpobj_iterate(&bpo, dsl_deadlist_insert_cb, dl, tx)); 883 bpobj_close(&bpo); 884 return; 885 } 886 887 za = zap_attribute_alloc(); 888 pza = zap_attribute_alloc(); 889 890 mutex_enter(&dl->dl_lock); 891 /* 892 * Prefetch up to 128 deadlists first and then more as we progress. 893 * The limit is a balance between ARC use and diminishing returns. 894 */ 895 for (zap_cursor_init(&pzc, dl->dl_os, obj), i = 0; 896 (perror = zap_cursor_retrieve(&pzc, pza)) == 0 && i < 128; 897 zap_cursor_advance(&pzc), i++) { 898 dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer, 899 zfs_strtonum(pza->za_name, NULL)); 900 } 901 for (zap_cursor_init(&zc, dl->dl_os, obj); 902 (error = zap_cursor_retrieve(&zc, za)) == 0; 903 zap_cursor_advance(&zc)) { 904 dsl_deadlist_insert_bpobj(dl, za->za_first_integer, 905 zfs_strtonum(za->za_name, NULL), tx); 906 VERIFY0(zap_remove(dl->dl_os, obj, za->za_name, tx)); 907 if (perror == 0) { 908 dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer, 909 zfs_strtonum(pza->za_name, NULL)); 910 zap_cursor_advance(&pzc); 911 perror = zap_cursor_retrieve(&pzc, pza); 912 } 913 } 914 VERIFY3U(error, ==, ENOENT); 915 zap_cursor_fini(&zc); 916 zap_cursor_fini(&pzc); 917 918 VERIFY0(dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus)); 919 dlp = bonus->db_data; 920 dmu_buf_will_dirty(bonus, tx); 921 memset(dlp, 0, sizeof (*dlp)); 922 dmu_buf_rele(bonus, FTAG); 923 mutex_exit(&dl->dl_lock); 924 925 zap_attribute_free(za); 926 zap_attribute_free(pza); 927 } 928 929 /* 930 * Remove entries on dl that are born > mintxg, and put them on the bpobj. 931 */ 932 void 933 dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg, 934 dmu_tx_t *tx) 935 { 936 dsl_deadlist_entry_t dle_tofind; 937 dsl_deadlist_entry_t *dle, *pdle; 938 avl_index_t where; 939 int i; 940 941 ASSERT(!dl->dl_oldfmt); 942 943 mutex_enter(&dl->dl_lock); 944 dmu_buf_will_dirty(dl->dl_dbuf, tx); 945 dsl_deadlist_load_tree(dl); 946 947 dle_tofind.dle_mintxg = mintxg; 948 dle = avl_find(&dl->dl_tree, &dle_tofind, &where); 949 if (dle == NULL) 950 dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER); 951 /* 952 * Prefetch up to 128 deadlists first and then more as we progress. 953 * The limit is a balance between ARC use and diminishing returns. 954 */ 955 for (pdle = dle, i = 0; pdle && i < 128; i++) { 956 bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object); 957 pdle = AVL_NEXT(&dl->dl_tree, pdle); 958 } 959 while (dle) { 960 uint64_t used, comp, uncomp; 961 dsl_deadlist_entry_t *dle_next; 962 963 bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx); 964 if (pdle) { 965 bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object); 966 pdle = AVL_NEXT(&dl->dl_tree, pdle); 967 } 968 969 VERIFY0(bpobj_space(&dle->dle_bpobj, 970 &used, &comp, &uncomp)); 971 ASSERT3U(dl->dl_phys->dl_used, >=, used); 972 ASSERT3U(dl->dl_phys->dl_comp, >=, comp); 973 ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp); 974 dl->dl_phys->dl_used -= used; 975 dl->dl_phys->dl_comp -= comp; 976 dl->dl_phys->dl_uncomp -= uncomp; 977 978 VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object, 979 dle->dle_mintxg, tx)); 980 981 dle_next = AVL_NEXT(&dl->dl_tree, dle); 982 avl_remove(&dl->dl_tree, dle); 983 bpobj_close(&dle->dle_bpobj); 984 kmem_free(dle, sizeof (*dle)); 985 dle = dle_next; 986 } 987 mutex_exit(&dl->dl_lock); 988 } 989 990 typedef struct livelist_entry { 991 blkptr_t le_bp; 992 uint32_t le_refcnt; 993 avl_node_t le_node; 994 } livelist_entry_t; 995 996 static int 997 livelist_compare(const void *larg, const void *rarg) 998 { 999 const blkptr_t *l = &((livelist_entry_t *)larg)->le_bp; 1000 const blkptr_t *r = &((livelist_entry_t *)rarg)->le_bp; 1001 1002 /* Sort them according to dva[0] */ 1003 uint64_t l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]); 1004 uint64_t r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]); 1005 1006 if (l_dva0_vdev != r_dva0_vdev) 1007 return (TREE_CMP(l_dva0_vdev, r_dva0_vdev)); 1008 1009 /* if vdevs are equal, sort by offsets. */ 1010 uint64_t l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]); 1011 uint64_t r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]); 1012 return (TREE_CMP(l_dva0_offset, r_dva0_offset)); 1013 } 1014 1015 struct livelist_iter_arg { 1016 avl_tree_t *avl; 1017 bplist_t *to_free; 1018 zthr_t *t; 1019 }; 1020 1021 /* 1022 * Expects an AVL tree which is incrementally filled will FREE blkptrs 1023 * and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a 1024 * corresponding FREE are stored in the supplied bplist. 1025 * 1026 * Note that multiple FREE and ALLOC entries for the same blkptr may be 1027 * encountered when dedup or block cloning is involved. For this reason we 1028 * keep a refcount for all the FREE entries of each blkptr and ensure that 1029 * each of those FREE entries has a corresponding ALLOC preceding it. 1030 */ 1031 static int 1032 dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed, 1033 dmu_tx_t *tx) 1034 { 1035 struct livelist_iter_arg *lia = arg; 1036 avl_tree_t *avl = lia->avl; 1037 bplist_t *to_free = lia->to_free; 1038 zthr_t *t = lia->t; 1039 ASSERT(tx == NULL); 1040 1041 if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t))) 1042 return (SET_ERROR(EINTR)); 1043 1044 livelist_entry_t node; 1045 node.le_bp = *bp; 1046 livelist_entry_t *found = avl_find(avl, &node, NULL); 1047 if (found) { 1048 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(&found->le_bp)); 1049 ASSERT3U(BP_GET_CHECKSUM(bp), ==, 1050 BP_GET_CHECKSUM(&found->le_bp)); 1051 ASSERT3U(BP_GET_BIRTH(bp), ==, BP_GET_BIRTH(&found->le_bp)); 1052 } 1053 if (bp_freed) { 1054 if (found == NULL) { 1055 /* first free entry for this blkptr */ 1056 livelist_entry_t *e = 1057 kmem_alloc(sizeof (livelist_entry_t), KM_SLEEP); 1058 e->le_bp = *bp; 1059 e->le_refcnt = 1; 1060 avl_add(avl, e); 1061 } else { 1062 /* 1063 * Deduped or cloned block free. We could assert D bit 1064 * for dedup, but there is no such one for cloning. 1065 */ 1066 ASSERT3U(found->le_refcnt + 1, >, found->le_refcnt); 1067 found->le_refcnt++; 1068 } 1069 } else { 1070 if (found == NULL) { 1071 /* block is currently marked as allocated */ 1072 bplist_append(to_free, bp); 1073 } else { 1074 /* alloc matches a free entry */ 1075 ASSERT3U(found->le_refcnt, !=, 0); 1076 found->le_refcnt--; 1077 if (found->le_refcnt == 0) { 1078 /* all tracked free pairs have been matched */ 1079 avl_remove(avl, found); 1080 kmem_free(found, sizeof (livelist_entry_t)); 1081 } 1082 } 1083 } 1084 return (0); 1085 } 1086 1087 /* 1088 * Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs 1089 * which have an ALLOC entry but no matching FREE 1090 */ 1091 int 1092 dsl_process_sub_livelist(bpobj_t *bpobj, bplist_t *to_free, zthr_t *t, 1093 uint64_t *size) 1094 { 1095 avl_tree_t avl; 1096 avl_create(&avl, livelist_compare, sizeof (livelist_entry_t), 1097 offsetof(livelist_entry_t, le_node)); 1098 1099 /* process the sublist */ 1100 struct livelist_iter_arg arg = { 1101 .avl = &avl, 1102 .to_free = to_free, 1103 .t = t 1104 }; 1105 int err = bpobj_iterate_nofree(bpobj, dsl_livelist_iterate, &arg, size); 1106 VERIFY(err != 0 || avl_numnodes(&avl) == 0); 1107 1108 void *cookie = NULL; 1109 livelist_entry_t *le = NULL; 1110 while ((le = avl_destroy_nodes(&avl, &cookie)) != NULL) { 1111 kmem_free(le, sizeof (livelist_entry_t)); 1112 } 1113 avl_destroy(&avl); 1114 return (err); 1115 } 1116 1117 ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, max_entries, U64, ZMOD_RW, 1118 "Size to start the next sub-livelist in a livelist"); 1119 1120 ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, min_percent_shared, INT, ZMOD_RW, 1121 "Threshold at which livelist is disabled"); 1122