1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu_objset.h> 28 #include <sys/dmu_traverse.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dnode.h> 33 #include <sys/spa.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/sa.h> 38 #include <sys/sa_impl.h> 39 #include <sys/callb.h> 40 #include <sys/zfeature.h> 41 42 static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ 43 static int32_t send_holes_without_birth_time = 1; 44 static int32_t zfs_traverse_indirect_prefetch_limit = 32; 45 46 typedef struct prefetch_data { 47 kmutex_t pd_mtx; 48 kcondvar_t pd_cv; 49 int32_t pd_bytes_fetched; 50 int pd_flags; 51 boolean_t pd_cancel; 52 boolean_t pd_exited; 53 zbookmark_phys_t pd_resume; 54 } prefetch_data_t; 55 56 typedef struct traverse_data { 57 spa_t *td_spa; 58 uint64_t td_objset; 59 blkptr_t *td_rootbp; 60 uint64_t td_min_txg; 61 zbookmark_phys_t *td_resume; 62 int td_flags; 63 prefetch_data_t *td_pfd; 64 boolean_t td_paused; 65 uint64_t td_hole_birth_enabled_txg; 66 blkptr_cb_t *td_func; 67 void *td_arg; 68 boolean_t td_realloc_possible; 69 } traverse_data_t; 70 71 static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp, 72 const dnode_phys_t *dnp, uint64_t objset, uint64_t object); 73 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, 74 uint64_t objset, uint64_t object); 75 76 static int 77 traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 78 uint64_t claim_txg) 79 { 80 traverse_data_t *td = arg; 81 zbookmark_phys_t zb; 82 83 if (BP_IS_HOLE(bp)) 84 return (0); 85 86 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa)) 87 return (-1); 88 89 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 90 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 91 92 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg); 93 94 return (0); 95 } 96 97 static int 98 traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 99 uint64_t claim_txg) 100 { 101 traverse_data_t *td = arg; 102 103 if (lrc->lrc_txtype == TX_WRITE) { 104 lr_write_t *lr = (lr_write_t *)lrc; 105 blkptr_t *bp = &lr->lr_blkptr; 106 zbookmark_phys_t zb; 107 108 if (BP_IS_HOLE(bp)) 109 return (0); 110 111 if (claim_txg == 0 || bp->blk_birth < claim_txg) 112 return (0); 113 114 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, 115 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 116 117 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, 118 td->td_arg); 119 } 120 return (0); 121 } 122 123 static void 124 traverse_zil(traverse_data_t *td, zil_header_t *zh) 125 { 126 uint64_t claim_txg = zh->zh_claim_txg; 127 128 /* 129 * We only want to visit blocks that have been claimed but not yet 130 * replayed; plus blocks that are already stable in read-only mode. 131 */ 132 if (claim_txg == 0 && spa_writeable(td->td_spa)) 133 return; 134 135 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); 136 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, 137 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT)); 138 zil_free(zilog); 139 } 140 141 typedef enum resume_skip { 142 RESUME_SKIP_ALL, 143 RESUME_SKIP_NONE, 144 RESUME_SKIP_CHILDREN 145 } resume_skip_t; 146 147 /* 148 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and 149 * the block indicated by zb does not need to be visited at all. Returns 150 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the 151 * resume point. This indicates that this block should be visited but not its 152 * children (since they must have been visited in a previous traversal). 153 * Otherwise returns RESUME_SKIP_NONE. 154 */ 155 static resume_skip_t 156 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp, 157 const zbookmark_phys_t *zb) 158 { 159 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) { 160 /* 161 * If we already visited this bp & everything below, 162 * don't bother doing it again. 163 */ 164 if (zbookmark_subtree_completed(dnp, zb, td->td_resume)) 165 return (RESUME_SKIP_ALL); 166 167 /* 168 * If we found the block we're trying to resume from, zero 169 * the bookmark out to indicate that we have resumed. 170 */ 171 if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) { 172 memset(td->td_resume, 0, sizeof (*zb)); 173 if (td->td_flags & TRAVERSE_POST) 174 return (RESUME_SKIP_CHILDREN); 175 } 176 } 177 return (RESUME_SKIP_NONE); 178 } 179 180 /* 181 * Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE. 182 */ 183 static boolean_t 184 traverse_prefetch_metadata(traverse_data_t *td, 185 const blkptr_t *bp, const zbookmark_phys_t *zb) 186 { 187 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 188 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 189 190 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA)) 191 return (B_FALSE); 192 /* 193 * If we are in the process of resuming, don't prefetch, because 194 * some children will not be needed (and in fact may have already 195 * been freed). 196 */ 197 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) 198 return (B_FALSE); 199 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg) 200 return (B_FALSE); 201 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) 202 return (B_FALSE); 203 ASSERT(!BP_IS_REDACTED(bp)); 204 205 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 206 zio_flags |= ZIO_FLAG_RAW; 207 208 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL, 209 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 210 return (B_TRUE); 211 } 212 213 static boolean_t 214 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp) 215 { 216 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA); 217 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || 218 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp)) 219 return (B_FALSE); 220 return (B_TRUE); 221 } 222 223 static int 224 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, 225 const blkptr_t *bp, const zbookmark_phys_t *zb) 226 { 227 int err = 0; 228 arc_buf_t *buf = NULL; 229 prefetch_data_t *pd = td->td_pfd; 230 231 switch (resume_skip_check(td, dnp, zb)) { 232 case RESUME_SKIP_ALL: 233 return (0); 234 case RESUME_SKIP_CHILDREN: 235 goto post; 236 case RESUME_SKIP_NONE: 237 break; 238 default: 239 ASSERT(0); 240 } 241 242 if (bp->blk_birth == 0) { 243 /* 244 * Since this block has a birth time of 0 it must be one of 245 * two things: a hole created before the 246 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole 247 * which has always been a hole in an object. 248 * 249 * If a file is written sparsely, then the unwritten parts of 250 * the file were "always holes" -- that is, they have been 251 * holes since this object was allocated. However, we (and 252 * our callers) can not necessarily tell when an object was 253 * allocated. Therefore, if it's possible that this object 254 * was freed and then its object number reused, we need to 255 * visit all the holes with birth==0. 256 * 257 * If it isn't possible that the object number was reused, 258 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote 259 * all the blocks we will visit as part of this traversal, 260 * then this hole must have always existed, so we can skip 261 * it. We visit blocks born after (exclusive) td_min_txg. 262 * 263 * Note that the meta-dnode cannot be reallocated. 264 */ 265 if (!send_holes_without_birth_time && 266 (!td->td_realloc_possible || 267 zb->zb_object == DMU_META_DNODE_OBJECT) && 268 td->td_hole_birth_enabled_txg <= td->td_min_txg) 269 return (0); 270 } else if (bp->blk_birth <= td->td_min_txg) { 271 return (0); 272 } 273 274 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) { 275 uint64_t size = BP_GET_LSIZE(bp); 276 mutex_enter(&pd->pd_mtx); 277 ASSERT(pd->pd_bytes_fetched >= 0); 278 while (pd->pd_bytes_fetched < size && !pd->pd_exited) 279 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 280 pd->pd_bytes_fetched -= size; 281 cv_broadcast(&pd->pd_cv); 282 mutex_exit(&pd->pd_mtx); 283 } 284 285 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 286 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 287 if (err != 0) 288 goto post; 289 return (0); 290 } 291 292 if (td->td_flags & TRAVERSE_PRE) { 293 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, 294 td->td_arg); 295 if (err == TRAVERSE_VISIT_NO_CHILDREN) 296 return (0); 297 if (err != 0) 298 goto post; 299 } 300 301 if (BP_GET_LEVEL(bp) > 0) { 302 uint32_t flags = ARC_FLAG_WAIT; 303 int32_t i, ptidx, pidx; 304 uint32_t prefetchlimit; 305 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 306 zbookmark_phys_t *czb; 307 308 ASSERT(!BP_IS_PROTECTED(bp)); 309 310 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 311 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 312 if (err != 0) 313 goto post; 314 315 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 316 317 /* 318 * When performing a traversal it is beneficial to 319 * asynchronously read-ahead the upcoming indirect 320 * blocks since they will be needed shortly. However, 321 * since a 128k indirect (non-L0) block may contain up 322 * to 1024 128-byte block pointers, its preferable to not 323 * prefetch them all at once. Issuing a large number of 324 * async reads may effect performance, and the earlier 325 * the indirect blocks are prefetched the less likely 326 * they are to still be resident in the ARC when needed. 327 * Therefore, prefetching indirect blocks is limited to 328 * zfs_traverse_indirect_prefetch_limit=32 blocks by 329 * default. 330 * 331 * pidx: Index for which next prefetch to be issued. 332 * ptidx: Index at which next prefetch to be triggered. 333 */ 334 ptidx = 0; 335 pidx = 1; 336 prefetchlimit = zfs_traverse_indirect_prefetch_limit; 337 for (i = 0; i < epb; i++) { 338 if (prefetchlimit && i == ptidx) { 339 ASSERT3S(ptidx, <=, pidx); 340 for (uint32_t prefetched = 0; pidx < epb && 341 prefetched < prefetchlimit; pidx++) { 342 SET_BOOKMARK(czb, zb->zb_objset, 343 zb->zb_object, zb->zb_level - 1, 344 zb->zb_blkid * epb + pidx); 345 if (traverse_prefetch_metadata(td, 346 &((blkptr_t *)buf->b_data)[pidx], 347 czb) == B_TRUE) { 348 prefetched++; 349 if (prefetched == 350 MAX(prefetchlimit / 2, 1)) 351 ptidx = pidx; 352 } 353 } 354 } 355 356 /* recursively visitbp() blocks below this */ 357 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object, 358 zb->zb_level - 1, 359 zb->zb_blkid * epb + i); 360 err = traverse_visitbp(td, dnp, 361 &((blkptr_t *)buf->b_data)[i], czb); 362 if (err != 0) 363 break; 364 } 365 366 kmem_free(czb, sizeof (zbookmark_phys_t)); 367 368 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 369 uint32_t flags = ARC_FLAG_WAIT; 370 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 371 int32_t i; 372 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 373 dnode_phys_t *child_dnp; 374 375 /* 376 * dnode blocks might have their bonus buffers encrypted, so 377 * we must be careful to honor TRAVERSE_NO_DECRYPT 378 */ 379 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 380 zio_flags |= ZIO_FLAG_RAW; 381 382 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 383 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 384 if (err != 0) 385 goto post; 386 387 child_dnp = buf->b_data; 388 389 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 390 prefetch_dnode_metadata(td, &child_dnp[i], 391 zb->zb_objset, zb->zb_blkid * epb + i); 392 } 393 394 /* recursively visitbp() blocks below this */ 395 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 396 err = traverse_dnode(td, bp, &child_dnp[i], 397 zb->zb_objset, zb->zb_blkid * epb + i); 398 if (err != 0) 399 break; 400 } 401 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 402 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 403 arc_flags_t flags = ARC_FLAG_WAIT; 404 objset_phys_t *osp; 405 406 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 407 zio_flags |= ZIO_FLAG_RAW; 408 409 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 410 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 411 if (err != 0) 412 goto post; 413 414 osp = buf->b_data; 415 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset, 416 DMU_META_DNODE_OBJECT); 417 /* 418 * See the block comment above for the goal of this variable. 419 * If the maxblkid of the meta-dnode is 0, then we know that 420 * we've never had more than DNODES_PER_BLOCK objects in the 421 * dataset, which means we can't have reused any object ids. 422 */ 423 if (osp->os_meta_dnode.dn_maxblkid == 0) 424 td->td_realloc_possible = B_FALSE; 425 426 if (OBJSET_BUF_HAS_USERUSED(buf)) { 427 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 428 prefetch_dnode_metadata(td, 429 &osp->os_projectused_dnode, 430 zb->zb_objset, DMU_PROJECTUSED_OBJECT); 431 prefetch_dnode_metadata(td, &osp->os_groupused_dnode, 432 zb->zb_objset, DMU_GROUPUSED_OBJECT); 433 prefetch_dnode_metadata(td, &osp->os_userused_dnode, 434 zb->zb_objset, DMU_USERUSED_OBJECT); 435 } 436 437 err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset, 438 DMU_META_DNODE_OBJECT); 439 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) { 440 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 441 err = traverse_dnode(td, bp, 442 &osp->os_projectused_dnode, zb->zb_objset, 443 DMU_PROJECTUSED_OBJECT); 444 if (err == 0) 445 err = traverse_dnode(td, bp, 446 &osp->os_groupused_dnode, zb->zb_objset, 447 DMU_GROUPUSED_OBJECT); 448 if (err == 0) 449 err = traverse_dnode(td, bp, 450 &osp->os_userused_dnode, zb->zb_objset, 451 DMU_USERUSED_OBJECT); 452 } 453 } 454 455 if (buf) 456 arc_buf_destroy(buf, &buf); 457 458 post: 459 if (err == 0 && (td->td_flags & TRAVERSE_POST)) 460 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 461 462 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) { 463 /* 464 * Ignore this disk error as requested by the HARD flag, 465 * and continue traversal. 466 */ 467 err = 0; 468 } 469 470 /* 471 * If we are stopping here, set td_resume. 472 */ 473 if (td->td_resume != NULL && err != 0 && !td->td_paused) { 474 td->td_resume->zb_objset = zb->zb_objset; 475 td->td_resume->zb_object = zb->zb_object; 476 td->td_resume->zb_level = 0; 477 /* 478 * If we have stopped on an indirect block (e.g. due to 479 * i/o error), we have not visited anything below it. 480 * Set the bookmark to the first level-0 block that we need 481 * to visit. This way, the resuming code does not need to 482 * deal with resuming from indirect blocks. 483 * 484 * Note, if zb_level <= 0, dnp may be NULL, so we don't want 485 * to dereference it. 486 */ 487 td->td_resume->zb_blkid = zb->zb_blkid; 488 if (zb->zb_level > 0) { 489 td->td_resume->zb_blkid <<= zb->zb_level * 490 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT); 491 } 492 td->td_paused = B_TRUE; 493 } 494 495 return (err); 496 } 497 498 static void 499 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 500 uint64_t objset, uint64_t object) 501 { 502 int j; 503 zbookmark_phys_t czb; 504 505 for (j = 0; j < dnp->dn_nblkptr; j++) { 506 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 507 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb); 508 } 509 510 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 511 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 512 traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb); 513 } 514 } 515 516 static int 517 traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp, 518 uint64_t objset, uint64_t object) 519 { 520 int j, err = 0; 521 zbookmark_phys_t czb; 522 523 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL && 524 object < td->td_resume->zb_object) 525 return (0); 526 527 if (td->td_flags & TRAVERSE_PRE) { 528 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 529 ZB_DNODE_BLKID); 530 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 531 td->td_arg); 532 if (err == TRAVERSE_VISIT_NO_CHILDREN) 533 return (0); 534 if (err != 0) 535 return (err); 536 } 537 538 for (j = 0; j < dnp->dn_nblkptr; j++) { 539 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 540 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb); 541 if (err != 0) 542 break; 543 } 544 545 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 546 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 547 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 548 } 549 550 if (err == 0 && (td->td_flags & TRAVERSE_POST)) { 551 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 552 ZB_DNODE_BLKID); 553 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 554 td->td_arg); 555 if (err == TRAVERSE_VISIT_NO_CHILDREN) 556 return (0); 557 if (err != 0) 558 return (err); 559 } 560 return (err); 561 } 562 563 static int 564 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 565 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 566 { 567 (void) zilog, (void) dnp; 568 prefetch_data_t *pfd = arg; 569 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 570 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 571 ARC_FLAG_PRESCIENT_PREFETCH; 572 573 ASSERT(pfd->pd_bytes_fetched >= 0); 574 if (zb->zb_level == ZB_DNODE_LEVEL) 575 return (0); 576 if (pfd->pd_cancel) 577 return (SET_ERROR(EINTR)); 578 579 if (!prefetch_needed(pfd, bp)) 580 return (0); 581 582 mutex_enter(&pfd->pd_mtx); 583 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max) 584 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx); 585 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp); 586 cv_broadcast(&pfd->pd_cv); 587 mutex_exit(&pfd->pd_mtx); 588 589 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 590 zio_flags |= ZIO_FLAG_RAW; 591 592 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 593 zio_flags, &aflags, zb); 594 595 return (0); 596 } 597 598 static void 599 traverse_prefetch_thread(void *arg) 600 { 601 traverse_data_t *td_main = arg; 602 traverse_data_t td = *td_main; 603 zbookmark_phys_t czb; 604 fstrans_cookie_t cookie = spl_fstrans_mark(); 605 606 td.td_func = traverse_prefetcher; 607 td.td_arg = td_main->td_pfd; 608 td.td_pfd = NULL; 609 td.td_resume = &td_main->td_pfd->pd_resume; 610 611 SET_BOOKMARK(&czb, td.td_objset, 612 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 613 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb); 614 615 mutex_enter(&td_main->td_pfd->pd_mtx); 616 td_main->td_pfd->pd_exited = B_TRUE; 617 cv_broadcast(&td_main->td_pfd->pd_cv); 618 mutex_exit(&td_main->td_pfd->pd_mtx); 619 spl_fstrans_unmark(cookie); 620 } 621 622 /* 623 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 624 * in syncing context). 625 */ 626 static int 627 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp, 628 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 629 blkptr_cb_t func, void *arg) 630 { 631 traverse_data_t *td; 632 prefetch_data_t *pd; 633 zbookmark_phys_t *czb; 634 int err; 635 636 ASSERT(ds == NULL || objset == ds->ds_object); 637 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST)); 638 639 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP); 640 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP); 641 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 642 643 td->td_spa = spa; 644 td->td_objset = objset; 645 td->td_rootbp = rootbp; 646 td->td_min_txg = txg_start; 647 td->td_resume = resume; 648 td->td_func = func; 649 td->td_arg = arg; 650 td->td_pfd = pd; 651 td->td_flags = flags; 652 td->td_paused = B_FALSE; 653 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE); 654 655 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 656 VERIFY(spa_feature_enabled_txg(spa, 657 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg)); 658 } else { 659 td->td_hole_birth_enabled_txg = UINT64_MAX; 660 } 661 662 pd->pd_flags = flags; 663 if (resume != NULL) 664 pd->pd_resume = *resume; 665 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL); 666 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL); 667 668 SET_BOOKMARK(czb, td->td_objset, 669 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 670 671 /* See comment on ZIL traversal in dsl_scan_visitds. */ 672 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) { 673 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; 674 uint32_t flags = ARC_FLAG_WAIT; 675 objset_phys_t *osp; 676 arc_buf_t *buf; 677 ASSERT(!BP_IS_REDACTED(rootbp)); 678 679 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && 680 BP_IS_PROTECTED(rootbp)) 681 zio_flags |= ZIO_FLAG_RAW; 682 683 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func, 684 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb); 685 if (err != 0) { 686 /* 687 * If both TRAVERSE_HARD and TRAVERSE_PRE are set, 688 * continue to visitbp so that td_func can be called 689 * in pre stage, and err will reset to zero. 690 */ 691 if (!(td->td_flags & TRAVERSE_HARD) || 692 !(td->td_flags & TRAVERSE_PRE)) 693 goto out; 694 } else { 695 osp = buf->b_data; 696 traverse_zil(td, &osp->os_zil_header); 697 arc_buf_destroy(buf, &buf); 698 } 699 } 700 701 if (!(flags & TRAVERSE_PREFETCH_DATA) || 702 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread, 703 td, TQ_NOQUEUE) == TASKQID_INVALID) 704 pd->pd_exited = B_TRUE; 705 706 err = traverse_visitbp(td, NULL, rootbp, czb); 707 708 mutex_enter(&pd->pd_mtx); 709 pd->pd_cancel = B_TRUE; 710 cv_broadcast(&pd->pd_cv); 711 while (!pd->pd_exited) 712 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 713 mutex_exit(&pd->pd_mtx); 714 out: 715 mutex_destroy(&pd->pd_mtx); 716 cv_destroy(&pd->pd_cv); 717 718 kmem_free(czb, sizeof (zbookmark_phys_t)); 719 kmem_free(pd, sizeof (struct prefetch_data)); 720 kmem_free(td, sizeof (struct traverse_data)); 721 722 return (err); 723 } 724 725 /* 726 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 727 * in syncing context). 728 */ 729 int 730 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start, 731 zbookmark_phys_t *resume, 732 int flags, blkptr_cb_t func, void *arg) 733 { 734 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object, 735 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg)); 736 } 737 738 int 739 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, 740 int flags, blkptr_cb_t func, void *arg) 741 { 742 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg)); 743 } 744 745 int 746 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr, 747 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 748 blkptr_cb_t func, void *arg) 749 { 750 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET, 751 blkptr, txg_start, resume, flags, func, arg)); 752 } 753 754 /* 755 * NB: pool must not be changing on-disk (eg, from zdb or sync context). 756 */ 757 int 758 traverse_pool(spa_t *spa, uint64_t txg_start, int flags, 759 blkptr_cb_t func, void *arg) 760 { 761 int err; 762 dsl_pool_t *dp = spa_get_dsl(spa); 763 objset_t *mos = dp->dp_meta_objset; 764 boolean_t hard = (flags & TRAVERSE_HARD); 765 766 /* visit the MOS */ 767 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa), 768 txg_start, NULL, flags, func, arg); 769 if (err != 0) 770 return (err); 771 772 /* visit each dataset */ 773 for (uint64_t obj = 1; err == 0; 774 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { 775 dmu_object_info_t doi; 776 777 err = dmu_object_info(mos, obj, &doi); 778 if (err != 0) { 779 if (hard) 780 continue; 781 break; 782 } 783 784 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) { 785 dsl_dataset_t *ds; 786 uint64_t txg = txg_start; 787 788 dsl_pool_config_enter(dp, FTAG); 789 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); 790 dsl_pool_config_exit(dp, FTAG); 791 if (err != 0) { 792 if (hard) 793 continue; 794 break; 795 } 796 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg) 797 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 798 err = traverse_dataset(ds, txg, flags, func, arg); 799 dsl_dataset_rele(ds, FTAG); 800 if (err != 0) 801 break; 802 } 803 } 804 if (err == ESRCH) 805 err = 0; 806 return (err); 807 } 808 809 EXPORT_SYMBOL(traverse_dataset); 810 EXPORT_SYMBOL(traverse_pool); 811 812 ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW, 813 "Max number of bytes to prefetch"); 814 815 ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, INT, ZMOD_RW, 816 "Traverse prefetch number of blocks pointed by indirect block"); 817 818 #if defined(_KERNEL) 819 module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644); 820 MODULE_PARM_DESC(ignore_hole_birth, 821 "Alias for send_holes_without_birth_time"); 822 #endif 823 824 /* CSTYLED */ 825 ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW, 826 "Ignore hole_birth txg for zfs send"); 827