1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu_objset.h> 28 #include <sys/dmu_traverse.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dnode.h> 33 #include <sys/spa.h> 34 #include <sys/zio.h> 35 #include <sys/dmu_impl.h> 36 #include <sys/sa.h> 37 #include <sys/sa_impl.h> 38 #include <sys/callb.h> 39 #include <sys/zfeature.h> 40 41 int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ 42 43 typedef struct prefetch_data { 44 kmutex_t pd_mtx; 45 kcondvar_t pd_cv; 46 int32_t pd_bytes_fetched; 47 int pd_flags; 48 boolean_t pd_cancel; 49 boolean_t pd_exited; 50 zbookmark_phys_t pd_resume; 51 } prefetch_data_t; 52 53 typedef struct traverse_data { 54 spa_t *td_spa; 55 uint64_t td_objset; 56 blkptr_t *td_rootbp; 57 uint64_t td_min_txg; 58 zbookmark_phys_t *td_resume; 59 int td_flags; 60 prefetch_data_t *td_pfd; 61 boolean_t td_paused; 62 uint64_t td_hole_birth_enabled_txg; 63 blkptr_cb_t *td_func; 64 void *td_arg; 65 boolean_t td_realloc_possible; 66 } traverse_data_t; 67 68 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp, 69 uint64_t objset, uint64_t object); 70 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, 71 uint64_t objset, uint64_t object); 72 73 static int 74 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 75 { 76 traverse_data_t *td = arg; 77 zbookmark_phys_t zb; 78 79 if (BP_IS_HOLE(bp)) 80 return (0); 81 82 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa)) 83 return (0); 84 85 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 86 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 87 88 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg); 89 90 return (0); 91 } 92 93 static int 94 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 95 { 96 traverse_data_t *td = arg; 97 98 if (lrc->lrc_txtype == TX_WRITE) { 99 lr_write_t *lr = (lr_write_t *)lrc; 100 blkptr_t *bp = &lr->lr_blkptr; 101 zbookmark_phys_t zb; 102 103 if (BP_IS_HOLE(bp)) 104 return (0); 105 106 if (claim_txg == 0 || bp->blk_birth < claim_txg) 107 return (0); 108 109 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, 110 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 111 112 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, 113 td->td_arg); 114 } 115 return (0); 116 } 117 118 static void 119 traverse_zil(traverse_data_t *td, zil_header_t *zh) 120 { 121 uint64_t claim_txg = zh->zh_claim_txg; 122 zilog_t *zilog; 123 124 /* 125 * We only want to visit blocks that have been claimed but not yet 126 * replayed; plus, in read-only mode, blocks that are already stable. 127 */ 128 if (claim_txg == 0 && spa_writeable(td->td_spa)) 129 return; 130 131 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); 132 133 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, 134 claim_txg); 135 136 zil_free(zilog); 137 } 138 139 typedef enum resume_skip { 140 RESUME_SKIP_ALL, 141 RESUME_SKIP_NONE, 142 RESUME_SKIP_CHILDREN 143 } resume_skip_t; 144 145 /* 146 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and 147 * the block indicated by zb does not need to be visited at all. Returns 148 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the 149 * resume point. This indicates that this block should be visited but not its 150 * children (since they must have been visited in a previous traversal). 151 * Otherwise returns RESUME_SKIP_NONE. 152 */ 153 static resume_skip_t 154 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp, 155 const zbookmark_phys_t *zb) 156 { 157 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) { 158 /* 159 * If we already visited this bp & everything below, 160 * don't bother doing it again. 161 */ 162 if (zbookmark_subtree_completed(dnp, zb, td->td_resume)) 163 return (RESUME_SKIP_ALL); 164 165 /* 166 * If we found the block we're trying to resume from, zero 167 * the bookmark out to indicate that we have resumed. 168 */ 169 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) { 170 bzero(td->td_resume, sizeof (*zb)); 171 if (td->td_flags & TRAVERSE_POST) 172 return (RESUME_SKIP_CHILDREN); 173 } 174 } 175 return (RESUME_SKIP_NONE); 176 } 177 178 static void 179 traverse_prefetch_metadata(traverse_data_t *td, 180 const blkptr_t *bp, const zbookmark_phys_t *zb) 181 { 182 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 183 184 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA)) 185 return; 186 /* 187 * If we are in the process of resuming, don't prefetch, because 188 * some children will not be needed (and in fact may have already 189 * been freed). 190 */ 191 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) 192 return; 193 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg) 194 return; 195 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) 196 return; 197 198 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL, 199 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 200 } 201 202 static boolean_t 203 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp) 204 { 205 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA); 206 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || 207 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) 208 return (B_FALSE); 209 return (B_TRUE); 210 } 211 212 static int 213 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, 214 const blkptr_t *bp, const zbookmark_phys_t *zb) 215 { 216 zbookmark_phys_t czb; 217 int err = 0; 218 arc_buf_t *buf = NULL; 219 prefetch_data_t *pd = td->td_pfd; 220 boolean_t hard = td->td_flags & TRAVERSE_HARD; 221 222 switch (resume_skip_check(td, dnp, zb)) { 223 case RESUME_SKIP_ALL: 224 return (0); 225 case RESUME_SKIP_CHILDREN: 226 goto post; 227 case RESUME_SKIP_NONE: 228 break; 229 default: 230 ASSERT(0); 231 } 232 233 if (bp->blk_birth == 0) { 234 /* 235 * Since this block has a birth time of 0 it must be one of 236 * two things: a hole created before the 237 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole 238 * which has always been a hole in an object. 239 * 240 * If a file is written sparsely, then the unwritten parts of 241 * the file were "always holes" -- that is, they have been 242 * holes since this object was allocated. However, we (and 243 * our callers) can not necessarily tell when an object was 244 * allocated. Therefore, if it's possible that this object 245 * was freed and then its object number reused, we need to 246 * visit all the holes with birth==0. 247 * 248 * If it isn't possible that the object number was reused, 249 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote 250 * all the blocks we will visit as part of this traversal, 251 * then this hole must have always existed, so we can skip 252 * it. We visit blocks born after (exclusive) td_min_txg. 253 * 254 * Note that the meta-dnode cannot be reallocated. 255 */ 256 if ((!td->td_realloc_possible || 257 zb->zb_object == DMU_META_DNODE_OBJECT) && 258 td->td_hole_birth_enabled_txg <= td->td_min_txg) 259 return (0); 260 } else if (bp->blk_birth <= td->td_min_txg) { 261 return (0); 262 } 263 264 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) { 265 uint64_t size = BP_GET_LSIZE(bp); 266 mutex_enter(&pd->pd_mtx); 267 ASSERT(pd->pd_bytes_fetched >= 0); 268 while (pd->pd_bytes_fetched < size && !pd->pd_exited) 269 cv_wait(&pd->pd_cv, &pd->pd_mtx); 270 pd->pd_bytes_fetched -= size; 271 cv_broadcast(&pd->pd_cv); 272 mutex_exit(&pd->pd_mtx); 273 } 274 275 if (BP_IS_HOLE(bp)) { 276 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 277 if (err != 0) 278 goto post; 279 return (0); 280 } 281 282 if (td->td_flags & TRAVERSE_PRE) { 283 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, 284 td->td_arg); 285 if (err == TRAVERSE_VISIT_NO_CHILDREN) 286 return (0); 287 if (err != 0) 288 goto post; 289 } 290 291 if (BP_GET_LEVEL(bp) > 0) { 292 arc_flags_t flags = ARC_FLAG_WAIT; 293 int i; 294 blkptr_t *cbp; 295 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 296 297 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 298 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 299 if (err != 0) 300 goto post; 301 cbp = buf->b_data; 302 303 for (i = 0; i < epb; i++) { 304 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 305 zb->zb_level - 1, 306 zb->zb_blkid * epb + i); 307 traverse_prefetch_metadata(td, &cbp[i], &czb); 308 } 309 310 /* recursively visitbp() blocks below this */ 311 for (i = 0; i < epb; i++) { 312 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 313 zb->zb_level - 1, 314 zb->zb_blkid * epb + i); 315 err = traverse_visitbp(td, dnp, &cbp[i], &czb); 316 if (err != 0) 317 break; 318 } 319 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 320 arc_flags_t flags = ARC_FLAG_WAIT; 321 int i; 322 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 323 324 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 325 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 326 if (err != 0) 327 goto post; 328 dnode_phys_t *child_dnp = buf->b_data; 329 330 for (i = 0; i < epb; i++) { 331 prefetch_dnode_metadata(td, &child_dnp[i], 332 zb->zb_objset, zb->zb_blkid * epb + i); 333 } 334 335 /* recursively visitbp() blocks below this */ 336 for (i = 0; i < epb; i++) { 337 err = traverse_dnode(td, &child_dnp[i], 338 zb->zb_objset, zb->zb_blkid * epb + i); 339 if (err != 0) 340 break; 341 } 342 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 343 arc_flags_t flags = ARC_FLAG_WAIT; 344 345 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 346 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 347 if (err != 0) 348 goto post; 349 350 objset_phys_t *osp = buf->b_data; 351 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset, 352 DMU_META_DNODE_OBJECT); 353 /* 354 * See the block comment above for the goal of this variable. 355 * If the maxblkid of the meta-dnode is 0, then we know that 356 * we've never had more than DNODES_PER_BLOCK objects in the 357 * dataset, which means we can't have reused any object ids. 358 */ 359 if (osp->os_meta_dnode.dn_maxblkid == 0) 360 td->td_realloc_possible = B_FALSE; 361 362 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) { 363 prefetch_dnode_metadata(td, &osp->os_groupused_dnode, 364 zb->zb_objset, DMU_GROUPUSED_OBJECT); 365 prefetch_dnode_metadata(td, &osp->os_userused_dnode, 366 zb->zb_objset, DMU_USERUSED_OBJECT); 367 } 368 369 err = traverse_dnode(td, &osp->os_meta_dnode, zb->zb_objset, 370 DMU_META_DNODE_OBJECT); 371 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) { 372 err = traverse_dnode(td, &osp->os_groupused_dnode, 373 zb->zb_objset, DMU_GROUPUSED_OBJECT); 374 } 375 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) { 376 err = traverse_dnode(td, &osp->os_userused_dnode, 377 zb->zb_objset, DMU_USERUSED_OBJECT); 378 } 379 } 380 381 if (buf) 382 (void) arc_buf_remove_ref(buf, &buf); 383 384 post: 385 if (err == 0 && (td->td_flags & TRAVERSE_POST)) 386 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 387 388 if (hard && (err == EIO || err == ECKSUM)) { 389 /* 390 * Ignore this disk error as requested by the HARD flag, 391 * and continue traversal. 392 */ 393 err = 0; 394 } 395 396 /* 397 * If we are stopping here, set td_resume. 398 */ 399 if (td->td_resume != NULL && err != 0 && !td->td_paused) { 400 td->td_resume->zb_objset = zb->zb_objset; 401 td->td_resume->zb_object = zb->zb_object; 402 td->td_resume->zb_level = 0; 403 /* 404 * If we have stopped on an indirect block (e.g. due to 405 * i/o error), we have not visited anything below it. 406 * Set the bookmark to the first level-0 block that we need 407 * to visit. This way, the resuming code does not need to 408 * deal with resuming from indirect blocks. 409 * 410 * Note, if zb_level <= 0, dnp may be NULL, so we don't want 411 * to dereference it. 412 */ 413 td->td_resume->zb_blkid = zb->zb_blkid; 414 if (zb->zb_level > 0) { 415 td->td_resume->zb_blkid <<= zb->zb_level * 416 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT); 417 } 418 td->td_paused = B_TRUE; 419 } 420 421 return (err); 422 } 423 424 static void 425 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 426 uint64_t objset, uint64_t object) 427 { 428 int j; 429 zbookmark_phys_t czb; 430 431 for (j = 0; j < dnp->dn_nblkptr; j++) { 432 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 433 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb); 434 } 435 436 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 437 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 438 traverse_prefetch_metadata(td, &dnp->dn_spill, &czb); 439 } 440 } 441 442 static int 443 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp, 444 uint64_t objset, uint64_t object) 445 { 446 int j, err = 0; 447 zbookmark_phys_t czb; 448 449 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL && 450 object < td->td_resume->zb_object) 451 return (0); 452 453 if (td->td_flags & TRAVERSE_PRE) { 454 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 455 ZB_DNODE_BLKID); 456 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp, 457 td->td_arg); 458 if (err == TRAVERSE_VISIT_NO_CHILDREN) 459 return (0); 460 if (err != 0) 461 return (err); 462 } 463 464 for (j = 0; j < dnp->dn_nblkptr; j++) { 465 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 466 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb); 467 if (err != 0) 468 break; 469 } 470 471 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 472 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 473 err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb); 474 } 475 476 if (err == 0 && (td->td_flags & TRAVERSE_POST)) { 477 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 478 ZB_DNODE_BLKID); 479 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp, 480 td->td_arg); 481 if (err == TRAVERSE_VISIT_NO_CHILDREN) 482 return (0); 483 if (err != 0) 484 return (err); 485 } 486 return (err); 487 } 488 489 /* ARGSUSED */ 490 static int 491 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 492 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 493 { 494 prefetch_data_t *pfd = arg; 495 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 496 497 ASSERT(pfd->pd_bytes_fetched >= 0); 498 if (bp == NULL) 499 return (0); 500 if (pfd->pd_cancel) 501 return (SET_ERROR(EINTR)); 502 503 if (!prefetch_needed(pfd, bp)) 504 return (0); 505 506 mutex_enter(&pfd->pd_mtx); 507 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max) 508 cv_wait(&pfd->pd_cv, &pfd->pd_mtx); 509 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp); 510 cv_broadcast(&pfd->pd_cv); 511 mutex_exit(&pfd->pd_mtx); 512 513 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 514 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb); 515 516 return (0); 517 } 518 519 static void 520 traverse_prefetch_thread(void *arg) 521 { 522 traverse_data_t *td_main = arg; 523 traverse_data_t td = *td_main; 524 zbookmark_phys_t czb; 525 526 td.td_func = traverse_prefetcher; 527 td.td_arg = td_main->td_pfd; 528 td.td_pfd = NULL; 529 td.td_resume = &td_main->td_pfd->pd_resume; 530 531 SET_BOOKMARK(&czb, td.td_objset, 532 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 533 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb); 534 535 mutex_enter(&td_main->td_pfd->pd_mtx); 536 td_main->td_pfd->pd_exited = B_TRUE; 537 cv_broadcast(&td_main->td_pfd->pd_cv); 538 mutex_exit(&td_main->td_pfd->pd_mtx); 539 } 540 541 /* 542 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 543 * in syncing context). 544 */ 545 static int 546 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp, 547 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 548 blkptr_cb_t func, void *arg) 549 { 550 traverse_data_t td; 551 prefetch_data_t pd = { 0 }; 552 zbookmark_phys_t czb; 553 int err; 554 555 ASSERT(ds == NULL || objset == ds->ds_object); 556 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST)); 557 558 td.td_spa = spa; 559 td.td_objset = objset; 560 td.td_rootbp = rootbp; 561 td.td_min_txg = txg_start; 562 td.td_resume = resume; 563 td.td_func = func; 564 td.td_arg = arg; 565 td.td_pfd = &pd; 566 td.td_flags = flags; 567 td.td_paused = B_FALSE; 568 td.td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE); 569 570 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 571 VERIFY(spa_feature_enabled_txg(spa, 572 SPA_FEATURE_HOLE_BIRTH, &td.td_hole_birth_enabled_txg)); 573 } else { 574 td.td_hole_birth_enabled_txg = UINT64_MAX; 575 } 576 577 pd.pd_flags = flags; 578 if (resume != NULL) 579 pd.pd_resume = *resume; 580 mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL); 581 cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL); 582 583 /* See comment on ZIL traversal in dsl_scan_visitds. */ 584 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) { 585 arc_flags_t flags = ARC_FLAG_WAIT; 586 objset_phys_t *osp; 587 arc_buf_t *buf; 588 589 err = arc_read(NULL, td.td_spa, rootbp, 590 arc_getbuf_func, &buf, 591 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, NULL); 592 if (err != 0) 593 return (err); 594 595 osp = buf->b_data; 596 traverse_zil(&td, &osp->os_zil_header); 597 (void) arc_buf_remove_ref(buf, &buf); 598 } 599 600 if (!(flags & TRAVERSE_PREFETCH_DATA) || 601 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread, 602 &td, TQ_NOQUEUE)) 603 pd.pd_exited = B_TRUE; 604 605 SET_BOOKMARK(&czb, td.td_objset, 606 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 607 err = traverse_visitbp(&td, NULL, rootbp, &czb); 608 609 mutex_enter(&pd.pd_mtx); 610 pd.pd_cancel = B_TRUE; 611 cv_broadcast(&pd.pd_cv); 612 while (!pd.pd_exited) 613 cv_wait(&pd.pd_cv, &pd.pd_mtx); 614 mutex_exit(&pd.pd_mtx); 615 616 mutex_destroy(&pd.pd_mtx); 617 cv_destroy(&pd.pd_cv); 618 619 return (err); 620 } 621 622 /* 623 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 624 * in syncing context). 625 */ 626 int 627 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start, 628 zbookmark_phys_t *resume, 629 int flags, blkptr_cb_t func, void *arg) 630 { 631 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object, 632 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg)); 633 } 634 635 int 636 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, 637 int flags, blkptr_cb_t func, void *arg) 638 { 639 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg)); 640 } 641 642 int 643 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr, 644 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 645 blkptr_cb_t func, void *arg) 646 { 647 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET, 648 blkptr, txg_start, resume, flags, func, arg)); 649 } 650 651 /* 652 * NB: pool must not be changing on-disk (eg, from zdb or sync context). 653 */ 654 int 655 traverse_pool(spa_t *spa, uint64_t txg_start, int flags, 656 blkptr_cb_t func, void *arg) 657 { 658 int err; 659 dsl_pool_t *dp = spa_get_dsl(spa); 660 objset_t *mos = dp->dp_meta_objset; 661 boolean_t hard = (flags & TRAVERSE_HARD); 662 663 /* visit the MOS */ 664 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa), 665 txg_start, NULL, flags, func, arg); 666 if (err != 0) 667 return (err); 668 669 /* visit each dataset */ 670 for (uint64_t obj = 1; err == 0; 671 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { 672 dmu_object_info_t doi; 673 674 err = dmu_object_info(mos, obj, &doi); 675 if (err != 0) { 676 if (hard) 677 continue; 678 break; 679 } 680 681 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) { 682 dsl_dataset_t *ds; 683 uint64_t txg = txg_start; 684 685 dsl_pool_config_enter(dp, FTAG); 686 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); 687 dsl_pool_config_exit(dp, FTAG); 688 if (err != 0) { 689 if (hard) 690 continue; 691 break; 692 } 693 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg) 694 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 695 err = traverse_dataset(ds, txg, flags, func, arg); 696 dsl_dataset_rele(ds, FTAG); 697 if (err != 0) 698 break; 699 } 700 } 701 if (err == ESRCH) 702 err = 0; 703 return (err); 704 } 705