1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/dmu_objset.h> 29 #include <sys/dmu_traverse.h> 30 #include <sys/dsl_dataset.h> 31 #include <sys/dsl_dir.h> 32 #include <sys/dsl_pool.h> 33 #include <sys/dnode.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/dmu_impl.h> 38 #include <sys/sa.h> 39 #include <sys/sa_impl.h> 40 #include <sys/callb.h> 41 #include <sys/zfeature.h> 42 43 static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ 44 static int32_t send_holes_without_birth_time = 1; 45 static uint_t zfs_traverse_indirect_prefetch_limit = 32; 46 47 typedef struct prefetch_data { 48 kmutex_t pd_mtx; 49 kcondvar_t pd_cv; 50 int32_t pd_bytes_fetched; 51 int pd_flags; 52 boolean_t pd_cancel; 53 boolean_t pd_exited; 54 zbookmark_phys_t pd_resume; 55 } prefetch_data_t; 56 57 typedef struct traverse_data { 58 spa_t *td_spa; 59 uint64_t td_objset; 60 blkptr_t *td_rootbp; 61 uint64_t td_min_txg; 62 zbookmark_phys_t *td_resume; 63 int td_flags; 64 prefetch_data_t *td_pfd; 65 boolean_t td_paused; 66 uint64_t td_hole_birth_enabled_txg; 67 blkptr_cb_t *td_func; 68 void *td_arg; 69 boolean_t td_realloc_possible; 70 } traverse_data_t; 71 72 static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp, 73 const dnode_phys_t *dnp, uint64_t objset, uint64_t object); 74 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, 75 uint64_t objset, uint64_t object); 76 77 static int 78 traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 79 uint64_t claim_txg) 80 { 81 traverse_data_t *td = arg; 82 zbookmark_phys_t zb; 83 84 if (BP_IS_HOLE(bp)) 85 return (0); 86 87 if (claim_txg == 0 && 88 BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(td->td_spa)) 89 return (-1); 90 91 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 92 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 93 94 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg); 95 96 return (0); 97 } 98 99 static int 100 traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 101 uint64_t claim_txg) 102 { 103 traverse_data_t *td = arg; 104 105 if (lrc->lrc_txtype == TX_WRITE) { 106 lr_write_t *lr = (lr_write_t *)lrc; 107 blkptr_t *bp = &lr->lr_blkptr; 108 zbookmark_phys_t zb; 109 110 if (BP_IS_HOLE(bp)) 111 return (0); 112 113 if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg) 114 return (0); 115 116 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 117 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, 118 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 119 120 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, 121 td->td_arg); 122 } 123 return (0); 124 } 125 126 static void 127 traverse_zil(traverse_data_t *td, zil_header_t *zh) 128 { 129 uint64_t claim_txg = zh->zh_claim_txg; 130 131 /* 132 * We only want to visit blocks that have been claimed but not yet 133 * replayed; plus blocks that are already stable in read-only mode. 134 */ 135 if (claim_txg == 0 && spa_writeable(td->td_spa)) 136 return; 137 138 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); 139 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, 140 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT)); 141 zil_free(zilog); 142 } 143 144 typedef enum resume_skip { 145 RESUME_SKIP_ALL, 146 RESUME_SKIP_NONE, 147 RESUME_SKIP_CHILDREN 148 } resume_skip_t; 149 150 /* 151 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and 152 * the block indicated by zb does not need to be visited at all. Returns 153 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the 154 * resume point. This indicates that this block should be visited but not its 155 * children (since they must have been visited in a previous traversal). 156 * Otherwise returns RESUME_SKIP_NONE. 157 */ 158 static resume_skip_t 159 resume_skip_check(const traverse_data_t *td, const dnode_phys_t *dnp, 160 const zbookmark_phys_t *zb) 161 { 162 if (td->td_resume != NULL) { 163 /* 164 * If we already visited this bp & everything below, 165 * don't bother doing it again. 166 */ 167 if (zbookmark_subtree_completed(dnp, zb, td->td_resume)) 168 return (RESUME_SKIP_ALL); 169 170 if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) { 171 if (td->td_flags & TRAVERSE_POST) 172 return (RESUME_SKIP_CHILDREN); 173 } 174 } 175 return (RESUME_SKIP_NONE); 176 } 177 178 /* 179 * Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE. 180 */ 181 static boolean_t 182 traverse_prefetch_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 183 const blkptr_t *bp, const zbookmark_phys_t *zb) 184 { 185 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 186 ARC_FLAG_PRESCIENT_PREFETCH; 187 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 188 189 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA)) 190 return (B_FALSE); 191 /* 192 * If this bp is before the resume point, it may have already been 193 * freed. 194 */ 195 if (resume_skip_check(td, dnp, zb) != RESUME_SKIP_NONE) 196 return (B_FALSE); 197 if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) <= td->td_min_txg) 198 return (B_FALSE); 199 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) 200 return (B_FALSE); 201 ASSERT(!BP_IS_REDACTED(bp)); 202 203 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 204 zio_flags |= ZIO_FLAG_RAW; 205 206 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL, 207 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 208 return (B_TRUE); 209 } 210 211 static boolean_t 212 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp) 213 { 214 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA); 215 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || 216 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp)) 217 return (B_FALSE); 218 return (B_TRUE); 219 } 220 221 static int 222 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, 223 const blkptr_t *bp, const zbookmark_phys_t *zb) 224 { 225 int err = 0; 226 arc_buf_t *buf = NULL; 227 prefetch_data_t *pd = td->td_pfd; 228 229 switch (resume_skip_check(td, dnp, zb)) { 230 case RESUME_SKIP_ALL: 231 return (0); 232 case RESUME_SKIP_CHILDREN: 233 goto post; 234 case RESUME_SKIP_NONE: 235 break; 236 default: 237 ASSERT(0); 238 } 239 240 if (BP_GET_LOGICAL_BIRTH(bp) == 0) { 241 /* 242 * Since this block has a birth time of 0 it must be one of 243 * two things: a hole created before the 244 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole 245 * which has always been a hole in an object. 246 * 247 * If a file is written sparsely, then the unwritten parts of 248 * the file were "always holes" -- that is, they have been 249 * holes since this object was allocated. However, we (and 250 * our callers) can not necessarily tell when an object was 251 * allocated. Therefore, if it's possible that this object 252 * was freed and then its object number reused, we need to 253 * visit all the holes with birth==0. 254 * 255 * If it isn't possible that the object number was reused, 256 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote 257 * all the blocks we will visit as part of this traversal, 258 * then this hole must have always existed, so we can skip 259 * it. We visit blocks born after (exclusive) td_min_txg. 260 * 261 * Note that the meta-dnode cannot be reallocated. 262 */ 263 if (!send_holes_without_birth_time && 264 (!td->td_realloc_possible || 265 zb->zb_object == DMU_META_DNODE_OBJECT) && 266 td->td_hole_birth_enabled_txg <= td->td_min_txg) 267 return (0); 268 } else if (BP_GET_LOGICAL_BIRTH(bp) <= td->td_min_txg) { 269 return (0); 270 } 271 272 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) { 273 uint64_t size = BP_GET_LSIZE(bp); 274 mutex_enter(&pd->pd_mtx); 275 ASSERT(pd->pd_bytes_fetched >= 0); 276 while (pd->pd_bytes_fetched < size && !pd->pd_exited) 277 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 278 pd->pd_bytes_fetched -= size; 279 cv_broadcast(&pd->pd_cv); 280 mutex_exit(&pd->pd_mtx); 281 } 282 283 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 284 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 285 if (err != 0) 286 goto post; 287 return (0); 288 } 289 290 if (td->td_flags & TRAVERSE_PRE) { 291 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, 292 td->td_arg); 293 if (err == TRAVERSE_VISIT_NO_CHILDREN) 294 return (0); 295 if (err != 0) 296 goto post; 297 } 298 299 if (BP_GET_LEVEL(bp) > 0) { 300 arc_flags_t flags = ARC_FLAG_WAIT; 301 int32_t i, ptidx, pidx; 302 uint32_t prefetchlimit; 303 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 304 zbookmark_phys_t *czb; 305 306 ASSERT(!BP_IS_PROTECTED(bp)); 307 308 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 309 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 310 if (err != 0) 311 goto post; 312 313 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 314 315 /* 316 * When performing a traversal it is beneficial to 317 * asynchronously read-ahead the upcoming indirect 318 * blocks since they will be needed shortly. However, 319 * since a 128k indirect (non-L0) block may contain up 320 * to 1024 128-byte block pointers, its preferable to not 321 * prefetch them all at once. Issuing a large number of 322 * async reads may effect performance, and the earlier 323 * the indirect blocks are prefetched the less likely 324 * they are to still be resident in the ARC when needed. 325 * Therefore, prefetching indirect blocks is limited to 326 * zfs_traverse_indirect_prefetch_limit=32 blocks by 327 * default. 328 * 329 * pidx: Index for which next prefetch to be issued. 330 * ptidx: Index at which next prefetch to be triggered. 331 */ 332 ptidx = 0; 333 pidx = 1; 334 prefetchlimit = zfs_traverse_indirect_prefetch_limit; 335 for (i = 0; i < epb; i++) { 336 if (prefetchlimit && i == ptidx) { 337 ASSERT3S(ptidx, <=, pidx); 338 for (uint32_t prefetched = 0; pidx < epb && 339 prefetched < prefetchlimit; pidx++) { 340 SET_BOOKMARK(czb, zb->zb_objset, 341 zb->zb_object, zb->zb_level - 1, 342 zb->zb_blkid * epb + pidx); 343 if (traverse_prefetch_metadata(td, dnp, 344 &((blkptr_t *)buf->b_data)[pidx], 345 czb) == B_TRUE) { 346 prefetched++; 347 if (prefetched == 348 MAX(prefetchlimit / 2, 1)) 349 ptidx = pidx; 350 } 351 } 352 } 353 354 /* recursively visitbp() blocks below this */ 355 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object, 356 zb->zb_level - 1, 357 zb->zb_blkid * epb + i); 358 err = traverse_visitbp(td, dnp, 359 &((blkptr_t *)buf->b_data)[i], czb); 360 if (err != 0) 361 break; 362 } 363 364 kmem_free(czb, sizeof (zbookmark_phys_t)); 365 366 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 367 arc_flags_t flags = ARC_FLAG_WAIT; 368 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 369 int32_t i; 370 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 371 dnode_phys_t *child_dnp; 372 373 /* 374 * dnode blocks might have their bonus buffers encrypted, so 375 * we must be careful to honor TRAVERSE_NO_DECRYPT 376 */ 377 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 378 zio_flags |= ZIO_FLAG_RAW; 379 380 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 381 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 382 if (err != 0) 383 goto post; 384 385 child_dnp = buf->b_data; 386 387 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 388 prefetch_dnode_metadata(td, &child_dnp[i], 389 zb->zb_objset, zb->zb_blkid * epb + i); 390 } 391 392 /* recursively visitbp() blocks below this */ 393 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 394 err = traverse_dnode(td, bp, &child_dnp[i], 395 zb->zb_objset, zb->zb_blkid * epb + i); 396 if (err != 0) 397 break; 398 } 399 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 400 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 401 arc_flags_t flags = ARC_FLAG_WAIT; 402 objset_phys_t *osp; 403 404 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 405 zio_flags |= ZIO_FLAG_RAW; 406 407 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 408 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 409 if (err != 0) 410 goto post; 411 412 osp = buf->b_data; 413 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset, 414 DMU_META_DNODE_OBJECT); 415 /* 416 * See the block comment above for the goal of this variable. 417 * If the maxblkid of the meta-dnode is 0, then we know that 418 * we've never had more than DNODES_PER_BLOCK objects in the 419 * dataset, which means we can't have reused any object ids. 420 */ 421 if (osp->os_meta_dnode.dn_maxblkid == 0) 422 td->td_realloc_possible = B_FALSE; 423 424 if (OBJSET_BUF_HAS_USERUSED(buf)) { 425 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 426 prefetch_dnode_metadata(td, 427 &osp->os_projectused_dnode, 428 zb->zb_objset, DMU_PROJECTUSED_OBJECT); 429 prefetch_dnode_metadata(td, &osp->os_groupused_dnode, 430 zb->zb_objset, DMU_GROUPUSED_OBJECT); 431 prefetch_dnode_metadata(td, &osp->os_userused_dnode, 432 zb->zb_objset, DMU_USERUSED_OBJECT); 433 } 434 435 err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset, 436 DMU_META_DNODE_OBJECT); 437 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) { 438 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 439 err = traverse_dnode(td, bp, 440 &osp->os_projectused_dnode, zb->zb_objset, 441 DMU_PROJECTUSED_OBJECT); 442 if (err == 0) 443 err = traverse_dnode(td, bp, 444 &osp->os_groupused_dnode, zb->zb_objset, 445 DMU_GROUPUSED_OBJECT); 446 if (err == 0) 447 err = traverse_dnode(td, bp, 448 &osp->os_userused_dnode, zb->zb_objset, 449 DMU_USERUSED_OBJECT); 450 } 451 } 452 453 if (buf) 454 arc_buf_destroy(buf, &buf); 455 456 post: 457 if (err == 0 && (td->td_flags & TRAVERSE_POST)) 458 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 459 460 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) { 461 /* 462 * Ignore this disk error as requested by the HARD flag, 463 * and continue traversal. 464 */ 465 err = 0; 466 } 467 468 /* 469 * If we are stopping here, set td_resume. 470 */ 471 if (td->td_resume != NULL && err != 0 && !td->td_paused) { 472 td->td_resume->zb_objset = zb->zb_objset; 473 td->td_resume->zb_object = zb->zb_object; 474 td->td_resume->zb_level = 0; 475 /* 476 * If we have stopped on an indirect block (e.g. due to 477 * i/o error), we have not visited anything below it. 478 * Set the bookmark to the first level-0 block that we need 479 * to visit. This way, the resuming code does not need to 480 * deal with resuming from indirect blocks. 481 * 482 * Note, if zb_level <= 0, dnp may be NULL, so we don't want 483 * to dereference it. 484 */ 485 td->td_resume->zb_blkid = zb->zb_blkid; 486 if (zb->zb_level > 0) { 487 td->td_resume->zb_blkid <<= zb->zb_level * 488 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT); 489 } 490 td->td_paused = B_TRUE; 491 } 492 493 return (err); 494 } 495 496 static void 497 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 498 uint64_t objset, uint64_t object) 499 { 500 int j; 501 zbookmark_phys_t czb; 502 503 for (j = 0; j < dnp->dn_nblkptr; j++) { 504 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 505 traverse_prefetch_metadata(td, dnp, &dnp->dn_blkptr[j], &czb); 506 } 507 508 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 509 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 510 traverse_prefetch_metadata(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 511 } 512 } 513 514 static int 515 traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp, 516 uint64_t objset, uint64_t object) 517 { 518 int j, err = 0; 519 zbookmark_phys_t czb; 520 521 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL && 522 object < td->td_resume->zb_object) 523 return (0); 524 525 if (td->td_flags & TRAVERSE_PRE) { 526 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 527 ZB_DNODE_BLKID); 528 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 529 td->td_arg); 530 if (err == TRAVERSE_VISIT_NO_CHILDREN) 531 return (0); 532 if (err != 0) 533 return (err); 534 } 535 536 for (j = 0; j < dnp->dn_nblkptr; j++) { 537 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 538 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb); 539 if (err != 0) 540 break; 541 } 542 543 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 544 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 545 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 546 } 547 548 if (err == 0 && (td->td_flags & TRAVERSE_POST)) { 549 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 550 ZB_DNODE_BLKID); 551 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 552 td->td_arg); 553 if (err == TRAVERSE_VISIT_NO_CHILDREN) 554 return (0); 555 if (err != 0) 556 return (err); 557 } 558 return (err); 559 } 560 561 static int 562 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 563 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 564 { 565 (void) zilog, (void) dnp; 566 prefetch_data_t *pfd = arg; 567 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 568 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 569 ARC_FLAG_PRESCIENT_PREFETCH; 570 571 ASSERT(pfd->pd_bytes_fetched >= 0); 572 if (zb->zb_level == ZB_DNODE_LEVEL) 573 return (0); 574 if (pfd->pd_cancel) 575 return (SET_ERROR(EINTR)); 576 577 if (!prefetch_needed(pfd, bp)) 578 return (0); 579 580 mutex_enter(&pfd->pd_mtx); 581 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max) 582 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx); 583 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp); 584 cv_broadcast(&pfd->pd_cv); 585 mutex_exit(&pfd->pd_mtx); 586 587 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 588 zio_flags |= ZIO_FLAG_RAW; 589 590 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 591 zio_flags, &aflags, zb); 592 593 return (0); 594 } 595 596 static void 597 traverse_prefetch_thread(void *arg) 598 { 599 traverse_data_t *td_main = arg; 600 traverse_data_t td = *td_main; 601 zbookmark_phys_t czb; 602 fstrans_cookie_t cookie = spl_fstrans_mark(); 603 604 td.td_func = traverse_prefetcher; 605 td.td_arg = td_main->td_pfd; 606 td.td_pfd = NULL; 607 td.td_resume = &td_main->td_pfd->pd_resume; 608 609 SET_BOOKMARK(&czb, td.td_objset, 610 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 611 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb); 612 613 mutex_enter(&td_main->td_pfd->pd_mtx); 614 td_main->td_pfd->pd_exited = B_TRUE; 615 cv_broadcast(&td_main->td_pfd->pd_cv); 616 mutex_exit(&td_main->td_pfd->pd_mtx); 617 spl_fstrans_unmark(cookie); 618 } 619 620 /* 621 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 622 * in syncing context). 623 */ 624 static int 625 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp, 626 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 627 blkptr_cb_t func, void *arg) 628 { 629 traverse_data_t *td; 630 prefetch_data_t *pd; 631 zbookmark_phys_t *czb; 632 int err; 633 634 ASSERT(ds == NULL || objset == ds->ds_object); 635 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST)); 636 637 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP); 638 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP); 639 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 640 641 td->td_spa = spa; 642 td->td_objset = objset; 643 td->td_rootbp = rootbp; 644 td->td_min_txg = txg_start; 645 td->td_resume = resume; 646 td->td_func = func; 647 td->td_arg = arg; 648 td->td_pfd = pd; 649 td->td_flags = flags; 650 td->td_paused = B_FALSE; 651 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE); 652 653 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 654 VERIFY(spa_feature_enabled_txg(spa, 655 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg)); 656 } else { 657 td->td_hole_birth_enabled_txg = UINT64_MAX; 658 } 659 660 pd->pd_flags = flags; 661 if (resume != NULL) 662 pd->pd_resume = *resume; 663 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL); 664 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL); 665 666 SET_BOOKMARK(czb, td->td_objset, 667 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 668 669 /* See comment on ZIL traversal in dsl_scan_visitds. */ 670 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) { 671 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 672 arc_flags_t flags = ARC_FLAG_WAIT; 673 objset_phys_t *osp; 674 arc_buf_t *buf; 675 ASSERT(!BP_IS_REDACTED(rootbp)); 676 677 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && 678 BP_IS_PROTECTED(rootbp)) 679 zio_flags |= ZIO_FLAG_RAW; 680 681 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func, 682 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb); 683 if (err != 0) { 684 /* 685 * If both TRAVERSE_HARD and TRAVERSE_PRE are set, 686 * continue to visitbp so that td_func can be called 687 * in pre stage, and err will reset to zero. 688 */ 689 if (!(td->td_flags & TRAVERSE_HARD) || 690 !(td->td_flags & TRAVERSE_PRE)) 691 goto out; 692 } else { 693 osp = buf->b_data; 694 traverse_zil(td, &osp->os_zil_header); 695 arc_buf_destroy(buf, &buf); 696 } 697 } 698 699 if (!(flags & TRAVERSE_PREFETCH_DATA) || 700 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread, 701 td, TQ_NOQUEUE) == TASKQID_INVALID) 702 pd->pd_exited = B_TRUE; 703 704 err = traverse_visitbp(td, NULL, rootbp, czb); 705 706 mutex_enter(&pd->pd_mtx); 707 pd->pd_cancel = B_TRUE; 708 cv_broadcast(&pd->pd_cv); 709 while (!pd->pd_exited) 710 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 711 mutex_exit(&pd->pd_mtx); 712 out: 713 mutex_destroy(&pd->pd_mtx); 714 cv_destroy(&pd->pd_cv); 715 716 kmem_free(czb, sizeof (zbookmark_phys_t)); 717 kmem_free(pd, sizeof (struct prefetch_data)); 718 kmem_free(td, sizeof (struct traverse_data)); 719 720 return (err); 721 } 722 723 /* 724 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 725 * in syncing context). 726 */ 727 int 728 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start, 729 zbookmark_phys_t *resume, 730 int flags, blkptr_cb_t func, void *arg) 731 { 732 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object, 733 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg)); 734 } 735 736 int 737 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, 738 int flags, blkptr_cb_t func, void *arg) 739 { 740 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg)); 741 } 742 743 int 744 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr, 745 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 746 blkptr_cb_t func, void *arg) 747 { 748 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET, 749 blkptr, txg_start, resume, flags, func, arg)); 750 } 751 752 /* 753 * NB: pool must not be changing on-disk (eg, from zdb or sync context). 754 */ 755 int 756 traverse_pool(spa_t *spa, uint64_t txg_start, int flags, 757 blkptr_cb_t func, void *arg) 758 { 759 int err; 760 dsl_pool_t *dp = spa_get_dsl(spa); 761 objset_t *mos = dp->dp_meta_objset; 762 boolean_t hard = (flags & TRAVERSE_HARD); 763 764 /* visit the MOS */ 765 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa), 766 txg_start, NULL, flags, func, arg); 767 if (err != 0) 768 return (err); 769 770 /* visit each dataset */ 771 for (uint64_t obj = 1; err == 0; 772 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { 773 dmu_object_info_t doi; 774 775 err = dmu_object_info(mos, obj, &doi); 776 if (err != 0) { 777 if (hard) 778 continue; 779 break; 780 } 781 782 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) { 783 dsl_dataset_t *ds; 784 uint64_t txg = txg_start; 785 786 dsl_pool_config_enter(dp, FTAG); 787 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); 788 dsl_pool_config_exit(dp, FTAG); 789 if (err != 0) { 790 if (hard) 791 continue; 792 break; 793 } 794 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg) 795 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 796 err = traverse_dataset(ds, txg, flags, func, arg); 797 dsl_dataset_rele(ds, FTAG); 798 if (err != 0) 799 break; 800 } 801 } 802 if (err == ESRCH) 803 err = 0; 804 return (err); 805 } 806 807 EXPORT_SYMBOL(traverse_dataset); 808 EXPORT_SYMBOL(traverse_pool); 809 810 ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW, 811 "Max number of bytes to prefetch"); 812 813 ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW, 814 "Traverse prefetch number of blocks pointed by indirect block"); 815 816 ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW, 817 "Ignore hole_birth txg for zfs send"); 818