1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu_objset.h> 28 #include <sys/dmu_traverse.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dnode.h> 33 #include <sys/spa.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/sa.h> 38 #include <sys/sa_impl.h> 39 #include <sys/callb.h> 40 #include <sys/zfeature.h> 41 42 static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ 43 static int32_t send_holes_without_birth_time = 1; 44 static uint_t zfs_traverse_indirect_prefetch_limit = 32; 45 46 typedef struct prefetch_data { 47 kmutex_t pd_mtx; 48 kcondvar_t pd_cv; 49 int32_t pd_bytes_fetched; 50 int pd_flags; 51 boolean_t pd_cancel; 52 boolean_t pd_exited; 53 zbookmark_phys_t pd_resume; 54 } prefetch_data_t; 55 56 typedef struct traverse_data { 57 spa_t *td_spa; 58 uint64_t td_objset; 59 blkptr_t *td_rootbp; 60 uint64_t td_min_txg; 61 zbookmark_phys_t *td_resume; 62 int td_flags; 63 prefetch_data_t *td_pfd; 64 boolean_t td_paused; 65 uint64_t td_hole_birth_enabled_txg; 66 blkptr_cb_t *td_func; 67 void *td_arg; 68 boolean_t td_realloc_possible; 69 } traverse_data_t; 70 71 static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp, 72 const dnode_phys_t *dnp, uint64_t objset, uint64_t object); 73 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, 74 uint64_t objset, uint64_t object); 75 76 static int 77 traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 78 uint64_t claim_txg) 79 { 80 traverse_data_t *td = arg; 81 zbookmark_phys_t zb; 82 83 if (BP_IS_HOLE(bp)) 84 return (0); 85 86 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa)) 87 return (-1); 88 89 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 90 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 91 92 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg); 93 94 return (0); 95 } 96 97 static int 98 traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 99 uint64_t claim_txg) 100 { 101 traverse_data_t *td = arg; 102 103 if (lrc->lrc_txtype == TX_WRITE) { 104 lr_write_t *lr = (lr_write_t *)lrc; 105 blkptr_t *bp = &lr->lr_blkptr; 106 zbookmark_phys_t zb; 107 108 if (BP_IS_HOLE(bp)) 109 return (0); 110 111 if (claim_txg == 0 || bp->blk_birth < claim_txg) 112 return (0); 113 114 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 115 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, 116 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 117 118 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, 119 td->td_arg); 120 } 121 return (0); 122 } 123 124 static void 125 traverse_zil(traverse_data_t *td, zil_header_t *zh) 126 { 127 uint64_t claim_txg = zh->zh_claim_txg; 128 129 /* 130 * We only want to visit blocks that have been claimed but not yet 131 * replayed; plus blocks that are already stable in read-only mode. 132 */ 133 if (claim_txg == 0 && spa_writeable(td->td_spa)) 134 return; 135 136 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); 137 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, 138 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT)); 139 zil_free(zilog); 140 } 141 142 typedef enum resume_skip { 143 RESUME_SKIP_ALL, 144 RESUME_SKIP_NONE, 145 RESUME_SKIP_CHILDREN 146 } resume_skip_t; 147 148 /* 149 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and 150 * the block indicated by zb does not need to be visited at all. Returns 151 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the 152 * resume point. This indicates that this block should be visited but not its 153 * children (since they must have been visited in a previous traversal). 154 * Otherwise returns RESUME_SKIP_NONE. 155 */ 156 static resume_skip_t 157 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp, 158 const zbookmark_phys_t *zb) 159 { 160 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) { 161 /* 162 * If we already visited this bp & everything below, 163 * don't bother doing it again. 164 */ 165 if (zbookmark_subtree_completed(dnp, zb, td->td_resume)) 166 return (RESUME_SKIP_ALL); 167 168 /* 169 * If we found the block we're trying to resume from, zero 170 * the bookmark out to indicate that we have resumed. 171 */ 172 if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) { 173 memset(td->td_resume, 0, sizeof (*zb)); 174 if (td->td_flags & TRAVERSE_POST) 175 return (RESUME_SKIP_CHILDREN); 176 } 177 } 178 return (RESUME_SKIP_NONE); 179 } 180 181 /* 182 * Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE. 183 */ 184 static boolean_t 185 traverse_prefetch_metadata(traverse_data_t *td, 186 const blkptr_t *bp, const zbookmark_phys_t *zb) 187 { 188 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 189 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 190 191 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA)) 192 return (B_FALSE); 193 /* 194 * If we are in the process of resuming, don't prefetch, because 195 * some children will not be needed (and in fact may have already 196 * been freed). 197 */ 198 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) 199 return (B_FALSE); 200 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg) 201 return (B_FALSE); 202 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) 203 return (B_FALSE); 204 ASSERT(!BP_IS_REDACTED(bp)); 205 206 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 207 zio_flags |= ZIO_FLAG_RAW; 208 209 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL, 210 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 211 return (B_TRUE); 212 } 213 214 static boolean_t 215 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp) 216 { 217 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA); 218 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || 219 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp)) 220 return (B_FALSE); 221 return (B_TRUE); 222 } 223 224 static int 225 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, 226 const blkptr_t *bp, const zbookmark_phys_t *zb) 227 { 228 int err = 0; 229 arc_buf_t *buf = NULL; 230 prefetch_data_t *pd = td->td_pfd; 231 232 switch (resume_skip_check(td, dnp, zb)) { 233 case RESUME_SKIP_ALL: 234 return (0); 235 case RESUME_SKIP_CHILDREN: 236 goto post; 237 case RESUME_SKIP_NONE: 238 break; 239 default: 240 ASSERT(0); 241 } 242 243 if (bp->blk_birth == 0) { 244 /* 245 * Since this block has a birth time of 0 it must be one of 246 * two things: a hole created before the 247 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole 248 * which has always been a hole in an object. 249 * 250 * If a file is written sparsely, then the unwritten parts of 251 * the file were "always holes" -- that is, they have been 252 * holes since this object was allocated. However, we (and 253 * our callers) can not necessarily tell when an object was 254 * allocated. Therefore, if it's possible that this object 255 * was freed and then its object number reused, we need to 256 * visit all the holes with birth==0. 257 * 258 * If it isn't possible that the object number was reused, 259 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote 260 * all the blocks we will visit as part of this traversal, 261 * then this hole must have always existed, so we can skip 262 * it. We visit blocks born after (exclusive) td_min_txg. 263 * 264 * Note that the meta-dnode cannot be reallocated. 265 */ 266 if (!send_holes_without_birth_time && 267 (!td->td_realloc_possible || 268 zb->zb_object == DMU_META_DNODE_OBJECT) && 269 td->td_hole_birth_enabled_txg <= td->td_min_txg) 270 return (0); 271 } else if (bp->blk_birth <= td->td_min_txg) { 272 return (0); 273 } 274 275 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) { 276 uint64_t size = BP_GET_LSIZE(bp); 277 mutex_enter(&pd->pd_mtx); 278 ASSERT(pd->pd_bytes_fetched >= 0); 279 while (pd->pd_bytes_fetched < size && !pd->pd_exited) 280 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 281 pd->pd_bytes_fetched -= size; 282 cv_broadcast(&pd->pd_cv); 283 mutex_exit(&pd->pd_mtx); 284 } 285 286 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 287 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 288 if (err != 0) 289 goto post; 290 return (0); 291 } 292 293 if (td->td_flags & TRAVERSE_PRE) { 294 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, 295 td->td_arg); 296 if (err == TRAVERSE_VISIT_NO_CHILDREN) 297 return (0); 298 if (err != 0) 299 goto post; 300 } 301 302 if (BP_GET_LEVEL(bp) > 0) { 303 uint32_t flags = ARC_FLAG_WAIT; 304 int32_t i, ptidx, pidx; 305 uint32_t prefetchlimit; 306 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 307 zbookmark_phys_t *czb; 308 309 ASSERT(!BP_IS_PROTECTED(bp)); 310 311 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 312 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 313 if (err != 0) 314 goto post; 315 316 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 317 318 /* 319 * When performing a traversal it is beneficial to 320 * asynchronously read-ahead the upcoming indirect 321 * blocks since they will be needed shortly. However, 322 * since a 128k indirect (non-L0) block may contain up 323 * to 1024 128-byte block pointers, its preferable to not 324 * prefetch them all at once. Issuing a large number of 325 * async reads may effect performance, and the earlier 326 * the indirect blocks are prefetched the less likely 327 * they are to still be resident in the ARC when needed. 328 * Therefore, prefetching indirect blocks is limited to 329 * zfs_traverse_indirect_prefetch_limit=32 blocks by 330 * default. 331 * 332 * pidx: Index for which next prefetch to be issued. 333 * ptidx: Index at which next prefetch to be triggered. 334 */ 335 ptidx = 0; 336 pidx = 1; 337 prefetchlimit = zfs_traverse_indirect_prefetch_limit; 338 for (i = 0; i < epb; i++) { 339 if (prefetchlimit && i == ptidx) { 340 ASSERT3S(ptidx, <=, pidx); 341 for (uint32_t prefetched = 0; pidx < epb && 342 prefetched < prefetchlimit; pidx++) { 343 SET_BOOKMARK(czb, zb->zb_objset, 344 zb->zb_object, zb->zb_level - 1, 345 zb->zb_blkid * epb + pidx); 346 if (traverse_prefetch_metadata(td, 347 &((blkptr_t *)buf->b_data)[pidx], 348 czb) == B_TRUE) { 349 prefetched++; 350 if (prefetched == 351 MAX(prefetchlimit / 2, 1)) 352 ptidx = pidx; 353 } 354 } 355 } 356 357 /* recursively visitbp() blocks below this */ 358 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object, 359 zb->zb_level - 1, 360 zb->zb_blkid * epb + i); 361 err = traverse_visitbp(td, dnp, 362 &((blkptr_t *)buf->b_data)[i], czb); 363 if (err != 0) 364 break; 365 } 366 367 kmem_free(czb, sizeof (zbookmark_phys_t)); 368 369 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 370 uint32_t flags = ARC_FLAG_WAIT; 371 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 372 int32_t i; 373 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 374 dnode_phys_t *child_dnp; 375 376 /* 377 * dnode blocks might have their bonus buffers encrypted, so 378 * we must be careful to honor TRAVERSE_NO_DECRYPT 379 */ 380 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 381 zio_flags |= ZIO_FLAG_RAW; 382 383 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 384 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 385 if (err != 0) 386 goto post; 387 388 child_dnp = buf->b_data; 389 390 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 391 prefetch_dnode_metadata(td, &child_dnp[i], 392 zb->zb_objset, zb->zb_blkid * epb + i); 393 } 394 395 /* recursively visitbp() blocks below this */ 396 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 397 err = traverse_dnode(td, bp, &child_dnp[i], 398 zb->zb_objset, zb->zb_blkid * epb + i); 399 if (err != 0) 400 break; 401 } 402 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 403 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 404 arc_flags_t flags = ARC_FLAG_WAIT; 405 objset_phys_t *osp; 406 407 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 408 zio_flags |= ZIO_FLAG_RAW; 409 410 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 411 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 412 if (err != 0) 413 goto post; 414 415 osp = buf->b_data; 416 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset, 417 DMU_META_DNODE_OBJECT); 418 /* 419 * See the block comment above for the goal of this variable. 420 * If the maxblkid of the meta-dnode is 0, then we know that 421 * we've never had more than DNODES_PER_BLOCK objects in the 422 * dataset, which means we can't have reused any object ids. 423 */ 424 if (osp->os_meta_dnode.dn_maxblkid == 0) 425 td->td_realloc_possible = B_FALSE; 426 427 if (OBJSET_BUF_HAS_USERUSED(buf)) { 428 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 429 prefetch_dnode_metadata(td, 430 &osp->os_projectused_dnode, 431 zb->zb_objset, DMU_PROJECTUSED_OBJECT); 432 prefetch_dnode_metadata(td, &osp->os_groupused_dnode, 433 zb->zb_objset, DMU_GROUPUSED_OBJECT); 434 prefetch_dnode_metadata(td, &osp->os_userused_dnode, 435 zb->zb_objset, DMU_USERUSED_OBJECT); 436 } 437 438 err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset, 439 DMU_META_DNODE_OBJECT); 440 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) { 441 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 442 err = traverse_dnode(td, bp, 443 &osp->os_projectused_dnode, zb->zb_objset, 444 DMU_PROJECTUSED_OBJECT); 445 if (err == 0) 446 err = traverse_dnode(td, bp, 447 &osp->os_groupused_dnode, zb->zb_objset, 448 DMU_GROUPUSED_OBJECT); 449 if (err == 0) 450 err = traverse_dnode(td, bp, 451 &osp->os_userused_dnode, zb->zb_objset, 452 DMU_USERUSED_OBJECT); 453 } 454 } 455 456 if (buf) 457 arc_buf_destroy(buf, &buf); 458 459 post: 460 if (err == 0 && (td->td_flags & TRAVERSE_POST)) 461 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 462 463 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) { 464 /* 465 * Ignore this disk error as requested by the HARD flag, 466 * and continue traversal. 467 */ 468 err = 0; 469 } 470 471 /* 472 * If we are stopping here, set td_resume. 473 */ 474 if (td->td_resume != NULL && err != 0 && !td->td_paused) { 475 td->td_resume->zb_objset = zb->zb_objset; 476 td->td_resume->zb_object = zb->zb_object; 477 td->td_resume->zb_level = 0; 478 /* 479 * If we have stopped on an indirect block (e.g. due to 480 * i/o error), we have not visited anything below it. 481 * Set the bookmark to the first level-0 block that we need 482 * to visit. This way, the resuming code does not need to 483 * deal with resuming from indirect blocks. 484 * 485 * Note, if zb_level <= 0, dnp may be NULL, so we don't want 486 * to dereference it. 487 */ 488 td->td_resume->zb_blkid = zb->zb_blkid; 489 if (zb->zb_level > 0) { 490 td->td_resume->zb_blkid <<= zb->zb_level * 491 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT); 492 } 493 td->td_paused = B_TRUE; 494 } 495 496 return (err); 497 } 498 499 static void 500 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 501 uint64_t objset, uint64_t object) 502 { 503 int j; 504 zbookmark_phys_t czb; 505 506 for (j = 0; j < dnp->dn_nblkptr; j++) { 507 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 508 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb); 509 } 510 511 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 512 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 513 traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb); 514 } 515 } 516 517 static int 518 traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp, 519 uint64_t objset, uint64_t object) 520 { 521 int j, err = 0; 522 zbookmark_phys_t czb; 523 524 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL && 525 object < td->td_resume->zb_object) 526 return (0); 527 528 if (td->td_flags & TRAVERSE_PRE) { 529 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 530 ZB_DNODE_BLKID); 531 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 532 td->td_arg); 533 if (err == TRAVERSE_VISIT_NO_CHILDREN) 534 return (0); 535 if (err != 0) 536 return (err); 537 } 538 539 for (j = 0; j < dnp->dn_nblkptr; j++) { 540 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 541 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb); 542 if (err != 0) 543 break; 544 } 545 546 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 547 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 548 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 549 } 550 551 if (err == 0 && (td->td_flags & TRAVERSE_POST)) { 552 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 553 ZB_DNODE_BLKID); 554 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 555 td->td_arg); 556 if (err == TRAVERSE_VISIT_NO_CHILDREN) 557 return (0); 558 if (err != 0) 559 return (err); 560 } 561 return (err); 562 } 563 564 static int 565 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 566 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 567 { 568 (void) zilog, (void) dnp; 569 prefetch_data_t *pfd = arg; 570 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 571 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 572 ARC_FLAG_PRESCIENT_PREFETCH; 573 574 ASSERT(pfd->pd_bytes_fetched >= 0); 575 if (zb->zb_level == ZB_DNODE_LEVEL) 576 return (0); 577 if (pfd->pd_cancel) 578 return (SET_ERROR(EINTR)); 579 580 if (!prefetch_needed(pfd, bp)) 581 return (0); 582 583 mutex_enter(&pfd->pd_mtx); 584 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max) 585 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx); 586 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp); 587 cv_broadcast(&pfd->pd_cv); 588 mutex_exit(&pfd->pd_mtx); 589 590 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 591 zio_flags |= ZIO_FLAG_RAW; 592 593 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 594 zio_flags, &aflags, zb); 595 596 return (0); 597 } 598 599 static void 600 traverse_prefetch_thread(void *arg) 601 { 602 traverse_data_t *td_main = arg; 603 traverse_data_t td = *td_main; 604 zbookmark_phys_t czb; 605 fstrans_cookie_t cookie = spl_fstrans_mark(); 606 607 td.td_func = traverse_prefetcher; 608 td.td_arg = td_main->td_pfd; 609 td.td_pfd = NULL; 610 td.td_resume = &td_main->td_pfd->pd_resume; 611 612 SET_BOOKMARK(&czb, td.td_objset, 613 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 614 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb); 615 616 mutex_enter(&td_main->td_pfd->pd_mtx); 617 td_main->td_pfd->pd_exited = B_TRUE; 618 cv_broadcast(&td_main->td_pfd->pd_cv); 619 mutex_exit(&td_main->td_pfd->pd_mtx); 620 spl_fstrans_unmark(cookie); 621 } 622 623 /* 624 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 625 * in syncing context). 626 */ 627 static int 628 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp, 629 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 630 blkptr_cb_t func, void *arg) 631 { 632 traverse_data_t *td; 633 prefetch_data_t *pd; 634 zbookmark_phys_t *czb; 635 int err; 636 637 ASSERT(ds == NULL || objset == ds->ds_object); 638 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST)); 639 640 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP); 641 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP); 642 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 643 644 td->td_spa = spa; 645 td->td_objset = objset; 646 td->td_rootbp = rootbp; 647 td->td_min_txg = txg_start; 648 td->td_resume = resume; 649 td->td_func = func; 650 td->td_arg = arg; 651 td->td_pfd = pd; 652 td->td_flags = flags; 653 td->td_paused = B_FALSE; 654 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE); 655 656 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 657 VERIFY(spa_feature_enabled_txg(spa, 658 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg)); 659 } else { 660 td->td_hole_birth_enabled_txg = UINT64_MAX; 661 } 662 663 pd->pd_flags = flags; 664 if (resume != NULL) 665 pd->pd_resume = *resume; 666 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL); 667 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL); 668 669 SET_BOOKMARK(czb, td->td_objset, 670 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 671 672 /* See comment on ZIL traversal in dsl_scan_visitds. */ 673 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) { 674 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 675 uint32_t flags = ARC_FLAG_WAIT; 676 objset_phys_t *osp; 677 arc_buf_t *buf; 678 ASSERT(!BP_IS_REDACTED(rootbp)); 679 680 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && 681 BP_IS_PROTECTED(rootbp)) 682 zio_flags |= ZIO_FLAG_RAW; 683 684 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func, 685 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb); 686 if (err != 0) { 687 /* 688 * If both TRAVERSE_HARD and TRAVERSE_PRE are set, 689 * continue to visitbp so that td_func can be called 690 * in pre stage, and err will reset to zero. 691 */ 692 if (!(td->td_flags & TRAVERSE_HARD) || 693 !(td->td_flags & TRAVERSE_PRE)) 694 goto out; 695 } else { 696 osp = buf->b_data; 697 traverse_zil(td, &osp->os_zil_header); 698 arc_buf_destroy(buf, &buf); 699 } 700 } 701 702 if (!(flags & TRAVERSE_PREFETCH_DATA) || 703 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread, 704 td, TQ_NOQUEUE) == TASKQID_INVALID) 705 pd->pd_exited = B_TRUE; 706 707 err = traverse_visitbp(td, NULL, rootbp, czb); 708 709 mutex_enter(&pd->pd_mtx); 710 pd->pd_cancel = B_TRUE; 711 cv_broadcast(&pd->pd_cv); 712 while (!pd->pd_exited) 713 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 714 mutex_exit(&pd->pd_mtx); 715 out: 716 mutex_destroy(&pd->pd_mtx); 717 cv_destroy(&pd->pd_cv); 718 719 kmem_free(czb, sizeof (zbookmark_phys_t)); 720 kmem_free(pd, sizeof (struct prefetch_data)); 721 kmem_free(td, sizeof (struct traverse_data)); 722 723 return (err); 724 } 725 726 /* 727 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 728 * in syncing context). 729 */ 730 int 731 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start, 732 zbookmark_phys_t *resume, 733 int flags, blkptr_cb_t func, void *arg) 734 { 735 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object, 736 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg)); 737 } 738 739 int 740 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, 741 int flags, blkptr_cb_t func, void *arg) 742 { 743 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg)); 744 } 745 746 int 747 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr, 748 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 749 blkptr_cb_t func, void *arg) 750 { 751 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET, 752 blkptr, txg_start, resume, flags, func, arg)); 753 } 754 755 /* 756 * NB: pool must not be changing on-disk (eg, from zdb or sync context). 757 */ 758 int 759 traverse_pool(spa_t *spa, uint64_t txg_start, int flags, 760 blkptr_cb_t func, void *arg) 761 { 762 int err; 763 dsl_pool_t *dp = spa_get_dsl(spa); 764 objset_t *mos = dp->dp_meta_objset; 765 boolean_t hard = (flags & TRAVERSE_HARD); 766 767 /* visit the MOS */ 768 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa), 769 txg_start, NULL, flags, func, arg); 770 if (err != 0) 771 return (err); 772 773 /* visit each dataset */ 774 for (uint64_t obj = 1; err == 0; 775 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { 776 dmu_object_info_t doi; 777 778 err = dmu_object_info(mos, obj, &doi); 779 if (err != 0) { 780 if (hard) 781 continue; 782 break; 783 } 784 785 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) { 786 dsl_dataset_t *ds; 787 uint64_t txg = txg_start; 788 789 dsl_pool_config_enter(dp, FTAG); 790 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); 791 dsl_pool_config_exit(dp, FTAG); 792 if (err != 0) { 793 if (hard) 794 continue; 795 break; 796 } 797 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg) 798 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 799 err = traverse_dataset(ds, txg, flags, func, arg); 800 dsl_dataset_rele(ds, FTAG); 801 if (err != 0) 802 break; 803 } 804 } 805 if (err == ESRCH) 806 err = 0; 807 return (err); 808 } 809 810 EXPORT_SYMBOL(traverse_dataset); 811 EXPORT_SYMBOL(traverse_pool); 812 813 ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW, 814 "Max number of bytes to prefetch"); 815 816 ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW, 817 "Traverse prefetch number of blocks pointed by indirect block"); 818 819 #if defined(_KERNEL) 820 module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644); 821 MODULE_PARM_DESC(ignore_hole_birth, 822 "Alias for send_holes_without_birth_time"); 823 #endif 824 825 /* CSTYLED */ 826 ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW, 827 "Ignore hole_birth txg for zfs send"); 828