1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu_objset.h> 28 #include <sys/dmu_traverse.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dnode.h> 33 #include <sys/spa.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/sa.h> 38 #include <sys/sa_impl.h> 39 #include <sys/callb.h> 40 #include <sys/zfeature.h> 41 42 static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ 43 static int32_t send_holes_without_birth_time = 1; 44 static uint_t zfs_traverse_indirect_prefetch_limit = 32; 45 46 typedef struct prefetch_data { 47 kmutex_t pd_mtx; 48 kcondvar_t pd_cv; 49 int32_t pd_bytes_fetched; 50 int pd_flags; 51 boolean_t pd_cancel; 52 boolean_t pd_exited; 53 zbookmark_phys_t pd_resume; 54 } prefetch_data_t; 55 56 typedef struct traverse_data { 57 spa_t *td_spa; 58 uint64_t td_objset; 59 blkptr_t *td_rootbp; 60 uint64_t td_min_txg; 61 zbookmark_phys_t *td_resume; 62 int td_flags; 63 prefetch_data_t *td_pfd; 64 boolean_t td_paused; 65 uint64_t td_hole_birth_enabled_txg; 66 blkptr_cb_t *td_func; 67 void *td_arg; 68 boolean_t td_realloc_possible; 69 } traverse_data_t; 70 71 static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp, 72 const dnode_phys_t *dnp, uint64_t objset, uint64_t object); 73 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, 74 uint64_t objset, uint64_t object); 75 76 static int 77 traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 78 uint64_t claim_txg) 79 { 80 traverse_data_t *td = arg; 81 zbookmark_phys_t zb; 82 83 if (BP_IS_HOLE(bp)) 84 return (0); 85 86 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa)) 87 return (-1); 88 89 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, 90 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 91 92 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg); 93 94 return (0); 95 } 96 97 static int 98 traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 99 uint64_t claim_txg) 100 { 101 traverse_data_t *td = arg; 102 103 if (lrc->lrc_txtype == TX_WRITE) { 104 lr_write_t *lr = (lr_write_t *)lrc; 105 blkptr_t *bp = &lr->lr_blkptr; 106 zbookmark_phys_t zb; 107 108 if (BP_IS_HOLE(bp)) 109 return (0); 110 111 if (claim_txg == 0 || bp->blk_birth < claim_txg) 112 return (0); 113 114 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 115 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, 116 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); 117 118 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, 119 td->td_arg); 120 } 121 return (0); 122 } 123 124 static void 125 traverse_zil(traverse_data_t *td, zil_header_t *zh) 126 { 127 uint64_t claim_txg = zh->zh_claim_txg; 128 129 /* 130 * We only want to visit blocks that have been claimed but not yet 131 * replayed; plus blocks that are already stable in read-only mode. 132 */ 133 if (claim_txg == 0 && spa_writeable(td->td_spa)) 134 return; 135 136 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); 137 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, 138 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT)); 139 zil_free(zilog); 140 } 141 142 typedef enum resume_skip { 143 RESUME_SKIP_ALL, 144 RESUME_SKIP_NONE, 145 RESUME_SKIP_CHILDREN 146 } resume_skip_t; 147 148 /* 149 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and 150 * the block indicated by zb does not need to be visited at all. Returns 151 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the 152 * resume point. This indicates that this block should be visited but not its 153 * children (since they must have been visited in a previous traversal). 154 * Otherwise returns RESUME_SKIP_NONE. 155 */ 156 static resume_skip_t 157 resume_skip_check(const traverse_data_t *td, const dnode_phys_t *dnp, 158 const zbookmark_phys_t *zb) 159 { 160 if (td->td_resume != NULL) { 161 /* 162 * If we already visited this bp & everything below, 163 * don't bother doing it again. 164 */ 165 if (zbookmark_subtree_completed(dnp, zb, td->td_resume)) 166 return (RESUME_SKIP_ALL); 167 168 if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) { 169 if (td->td_flags & TRAVERSE_POST) 170 return (RESUME_SKIP_CHILDREN); 171 } 172 } 173 return (RESUME_SKIP_NONE); 174 } 175 176 /* 177 * Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE. 178 */ 179 static boolean_t 180 traverse_prefetch_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 181 const blkptr_t *bp, const zbookmark_phys_t *zb) 182 { 183 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 184 ARC_FLAG_PRESCIENT_PREFETCH; 185 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 186 187 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA)) 188 return (B_FALSE); 189 /* 190 * If this bp is before the resume point, it may have already been 191 * freed. 192 */ 193 if (resume_skip_check(td, dnp, zb) != RESUME_SKIP_NONE) 194 return (B_FALSE); 195 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg) 196 return (B_FALSE); 197 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) 198 return (B_FALSE); 199 ASSERT(!BP_IS_REDACTED(bp)); 200 201 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 202 zio_flags |= ZIO_FLAG_RAW; 203 204 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL, 205 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 206 return (B_TRUE); 207 } 208 209 static boolean_t 210 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp) 211 { 212 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA); 213 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || 214 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp)) 215 return (B_FALSE); 216 return (B_TRUE); 217 } 218 219 static int 220 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, 221 const blkptr_t *bp, const zbookmark_phys_t *zb) 222 { 223 int err = 0; 224 arc_buf_t *buf = NULL; 225 prefetch_data_t *pd = td->td_pfd; 226 227 switch (resume_skip_check(td, dnp, zb)) { 228 case RESUME_SKIP_ALL: 229 return (0); 230 case RESUME_SKIP_CHILDREN: 231 goto post; 232 case RESUME_SKIP_NONE: 233 break; 234 default: 235 ASSERT(0); 236 } 237 238 if (bp->blk_birth == 0) { 239 /* 240 * Since this block has a birth time of 0 it must be one of 241 * two things: a hole created before the 242 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole 243 * which has always been a hole in an object. 244 * 245 * If a file is written sparsely, then the unwritten parts of 246 * the file were "always holes" -- that is, they have been 247 * holes since this object was allocated. However, we (and 248 * our callers) can not necessarily tell when an object was 249 * allocated. Therefore, if it's possible that this object 250 * was freed and then its object number reused, we need to 251 * visit all the holes with birth==0. 252 * 253 * If it isn't possible that the object number was reused, 254 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote 255 * all the blocks we will visit as part of this traversal, 256 * then this hole must have always existed, so we can skip 257 * it. We visit blocks born after (exclusive) td_min_txg. 258 * 259 * Note that the meta-dnode cannot be reallocated. 260 */ 261 if (!send_holes_without_birth_time && 262 (!td->td_realloc_possible || 263 zb->zb_object == DMU_META_DNODE_OBJECT) && 264 td->td_hole_birth_enabled_txg <= td->td_min_txg) 265 return (0); 266 } else if (bp->blk_birth <= td->td_min_txg) { 267 return (0); 268 } 269 270 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) { 271 uint64_t size = BP_GET_LSIZE(bp); 272 mutex_enter(&pd->pd_mtx); 273 ASSERT(pd->pd_bytes_fetched >= 0); 274 while (pd->pd_bytes_fetched < size && !pd->pd_exited) 275 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 276 pd->pd_bytes_fetched -= size; 277 cv_broadcast(&pd->pd_cv); 278 mutex_exit(&pd->pd_mtx); 279 } 280 281 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 282 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 283 if (err != 0) 284 goto post; 285 return (0); 286 } 287 288 if (td->td_flags & TRAVERSE_PRE) { 289 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, 290 td->td_arg); 291 if (err == TRAVERSE_VISIT_NO_CHILDREN) 292 return (0); 293 if (err != 0) 294 goto post; 295 } 296 297 if (BP_GET_LEVEL(bp) > 0) { 298 uint32_t flags = ARC_FLAG_WAIT; 299 int32_t i, ptidx, pidx; 300 uint32_t prefetchlimit; 301 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 302 zbookmark_phys_t *czb; 303 304 ASSERT(!BP_IS_PROTECTED(bp)); 305 306 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 307 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 308 if (err != 0) 309 goto post; 310 311 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 312 313 /* 314 * When performing a traversal it is beneficial to 315 * asynchronously read-ahead the upcoming indirect 316 * blocks since they will be needed shortly. However, 317 * since a 128k indirect (non-L0) block may contain up 318 * to 1024 128-byte block pointers, its preferable to not 319 * prefetch them all at once. Issuing a large number of 320 * async reads may effect performance, and the earlier 321 * the indirect blocks are prefetched the less likely 322 * they are to still be resident in the ARC when needed. 323 * Therefore, prefetching indirect blocks is limited to 324 * zfs_traverse_indirect_prefetch_limit=32 blocks by 325 * default. 326 * 327 * pidx: Index for which next prefetch to be issued. 328 * ptidx: Index at which next prefetch to be triggered. 329 */ 330 ptidx = 0; 331 pidx = 1; 332 prefetchlimit = zfs_traverse_indirect_prefetch_limit; 333 for (i = 0; i < epb; i++) { 334 if (prefetchlimit && i == ptidx) { 335 ASSERT3S(ptidx, <=, pidx); 336 for (uint32_t prefetched = 0; pidx < epb && 337 prefetched < prefetchlimit; pidx++) { 338 SET_BOOKMARK(czb, zb->zb_objset, 339 zb->zb_object, zb->zb_level - 1, 340 zb->zb_blkid * epb + pidx); 341 if (traverse_prefetch_metadata(td, dnp, 342 &((blkptr_t *)buf->b_data)[pidx], 343 czb) == B_TRUE) { 344 prefetched++; 345 if (prefetched == 346 MAX(prefetchlimit / 2, 1)) 347 ptidx = pidx; 348 } 349 } 350 } 351 352 /* recursively visitbp() blocks below this */ 353 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object, 354 zb->zb_level - 1, 355 zb->zb_blkid * epb + i); 356 err = traverse_visitbp(td, dnp, 357 &((blkptr_t *)buf->b_data)[i], czb); 358 if (err != 0) 359 break; 360 } 361 362 kmem_free(czb, sizeof (zbookmark_phys_t)); 363 364 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 365 uint32_t flags = ARC_FLAG_WAIT; 366 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 367 int32_t i; 368 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 369 dnode_phys_t *child_dnp; 370 371 /* 372 * dnode blocks might have their bonus buffers encrypted, so 373 * we must be careful to honor TRAVERSE_NO_DECRYPT 374 */ 375 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 376 zio_flags |= ZIO_FLAG_RAW; 377 378 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 379 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 380 if (err != 0) 381 goto post; 382 383 child_dnp = buf->b_data; 384 385 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 386 prefetch_dnode_metadata(td, &child_dnp[i], 387 zb->zb_objset, zb->zb_blkid * epb + i); 388 } 389 390 /* recursively visitbp() blocks below this */ 391 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) { 392 err = traverse_dnode(td, bp, &child_dnp[i], 393 zb->zb_objset, zb->zb_blkid * epb + i); 394 if (err != 0) 395 break; 396 } 397 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 398 uint32_t zio_flags = ZIO_FLAG_CANFAIL; 399 arc_flags_t flags = ARC_FLAG_WAIT; 400 objset_phys_t *osp; 401 402 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 403 zio_flags |= ZIO_FLAG_RAW; 404 405 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf, 406 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 407 if (err != 0) 408 goto post; 409 410 osp = buf->b_data; 411 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset, 412 DMU_META_DNODE_OBJECT); 413 /* 414 * See the block comment above for the goal of this variable. 415 * If the maxblkid of the meta-dnode is 0, then we know that 416 * we've never had more than DNODES_PER_BLOCK objects in the 417 * dataset, which means we can't have reused any object ids. 418 */ 419 if (osp->os_meta_dnode.dn_maxblkid == 0) 420 td->td_realloc_possible = B_FALSE; 421 422 if (OBJSET_BUF_HAS_USERUSED(buf)) { 423 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 424 prefetch_dnode_metadata(td, 425 &osp->os_projectused_dnode, 426 zb->zb_objset, DMU_PROJECTUSED_OBJECT); 427 prefetch_dnode_metadata(td, &osp->os_groupused_dnode, 428 zb->zb_objset, DMU_GROUPUSED_OBJECT); 429 prefetch_dnode_metadata(td, &osp->os_userused_dnode, 430 zb->zb_objset, DMU_USERUSED_OBJECT); 431 } 432 433 err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset, 434 DMU_META_DNODE_OBJECT); 435 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) { 436 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 437 err = traverse_dnode(td, bp, 438 &osp->os_projectused_dnode, zb->zb_objset, 439 DMU_PROJECTUSED_OBJECT); 440 if (err == 0) 441 err = traverse_dnode(td, bp, 442 &osp->os_groupused_dnode, zb->zb_objset, 443 DMU_GROUPUSED_OBJECT); 444 if (err == 0) 445 err = traverse_dnode(td, bp, 446 &osp->os_userused_dnode, zb->zb_objset, 447 DMU_USERUSED_OBJECT); 448 } 449 } 450 451 if (buf) 452 arc_buf_destroy(buf, &buf); 453 454 post: 455 if (err == 0 && (td->td_flags & TRAVERSE_POST)) 456 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg); 457 458 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) { 459 /* 460 * Ignore this disk error as requested by the HARD flag, 461 * and continue traversal. 462 */ 463 err = 0; 464 } 465 466 /* 467 * If we are stopping here, set td_resume. 468 */ 469 if (td->td_resume != NULL && err != 0 && !td->td_paused) { 470 td->td_resume->zb_objset = zb->zb_objset; 471 td->td_resume->zb_object = zb->zb_object; 472 td->td_resume->zb_level = 0; 473 /* 474 * If we have stopped on an indirect block (e.g. due to 475 * i/o error), we have not visited anything below it. 476 * Set the bookmark to the first level-0 block that we need 477 * to visit. This way, the resuming code does not need to 478 * deal with resuming from indirect blocks. 479 * 480 * Note, if zb_level <= 0, dnp may be NULL, so we don't want 481 * to dereference it. 482 */ 483 td->td_resume->zb_blkid = zb->zb_blkid; 484 if (zb->zb_level > 0) { 485 td->td_resume->zb_blkid <<= zb->zb_level * 486 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT); 487 } 488 td->td_paused = B_TRUE; 489 } 490 491 return (err); 492 } 493 494 static void 495 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp, 496 uint64_t objset, uint64_t object) 497 { 498 int j; 499 zbookmark_phys_t czb; 500 501 for (j = 0; j < dnp->dn_nblkptr; j++) { 502 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 503 traverse_prefetch_metadata(td, dnp, &dnp->dn_blkptr[j], &czb); 504 } 505 506 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 507 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 508 traverse_prefetch_metadata(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 509 } 510 } 511 512 static int 513 traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp, 514 uint64_t objset, uint64_t object) 515 { 516 int j, err = 0; 517 zbookmark_phys_t czb; 518 519 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL && 520 object < td->td_resume->zb_object) 521 return (0); 522 523 if (td->td_flags & TRAVERSE_PRE) { 524 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 525 ZB_DNODE_BLKID); 526 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 527 td->td_arg); 528 if (err == TRAVERSE_VISIT_NO_CHILDREN) 529 return (0); 530 if (err != 0) 531 return (err); 532 } 533 534 for (j = 0; j < dnp->dn_nblkptr; j++) { 535 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j); 536 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb); 537 if (err != 0) 538 break; 539 } 540 541 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 542 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID); 543 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb); 544 } 545 546 if (err == 0 && (td->td_flags & TRAVERSE_POST)) { 547 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL, 548 ZB_DNODE_BLKID); 549 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp, 550 td->td_arg); 551 if (err == TRAVERSE_VISIT_NO_CHILDREN) 552 return (0); 553 if (err != 0) 554 return (err); 555 } 556 return (err); 557 } 558 559 static int 560 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 561 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 562 { 563 (void) zilog, (void) dnp; 564 prefetch_data_t *pfd = arg; 565 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 566 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 567 ARC_FLAG_PRESCIENT_PREFETCH; 568 569 ASSERT(pfd->pd_bytes_fetched >= 0); 570 if (zb->zb_level == ZB_DNODE_LEVEL) 571 return (0); 572 if (pfd->pd_cancel) 573 return (SET_ERROR(EINTR)); 574 575 if (!prefetch_needed(pfd, bp)) 576 return (0); 577 578 mutex_enter(&pfd->pd_mtx); 579 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max) 580 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx); 581 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp); 582 cv_broadcast(&pfd->pd_cv); 583 mutex_exit(&pfd->pd_mtx); 584 585 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 586 zio_flags |= ZIO_FLAG_RAW; 587 588 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 589 zio_flags, &aflags, zb); 590 591 return (0); 592 } 593 594 static void 595 traverse_prefetch_thread(void *arg) 596 { 597 traverse_data_t *td_main = arg; 598 traverse_data_t td = *td_main; 599 zbookmark_phys_t czb; 600 fstrans_cookie_t cookie = spl_fstrans_mark(); 601 602 td.td_func = traverse_prefetcher; 603 td.td_arg = td_main->td_pfd; 604 td.td_pfd = NULL; 605 td.td_resume = &td_main->td_pfd->pd_resume; 606 607 SET_BOOKMARK(&czb, td.td_objset, 608 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 609 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb); 610 611 mutex_enter(&td_main->td_pfd->pd_mtx); 612 td_main->td_pfd->pd_exited = B_TRUE; 613 cv_broadcast(&td_main->td_pfd->pd_cv); 614 mutex_exit(&td_main->td_pfd->pd_mtx); 615 spl_fstrans_unmark(cookie); 616 } 617 618 /* 619 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 620 * in syncing context). 621 */ 622 static int 623 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp, 624 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 625 blkptr_cb_t func, void *arg) 626 { 627 traverse_data_t *td; 628 prefetch_data_t *pd; 629 zbookmark_phys_t *czb; 630 int err; 631 632 ASSERT(ds == NULL || objset == ds->ds_object); 633 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST)); 634 635 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP); 636 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP); 637 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP); 638 639 td->td_spa = spa; 640 td->td_objset = objset; 641 td->td_rootbp = rootbp; 642 td->td_min_txg = txg_start; 643 td->td_resume = resume; 644 td->td_func = func; 645 td->td_arg = arg; 646 td->td_pfd = pd; 647 td->td_flags = flags; 648 td->td_paused = B_FALSE; 649 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE); 650 651 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 652 VERIFY(spa_feature_enabled_txg(spa, 653 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg)); 654 } else { 655 td->td_hole_birth_enabled_txg = UINT64_MAX; 656 } 657 658 pd->pd_flags = flags; 659 if (resume != NULL) 660 pd->pd_resume = *resume; 661 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL); 662 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL); 663 664 SET_BOOKMARK(czb, td->td_objset, 665 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 666 667 /* See comment on ZIL traversal in dsl_scan_visitds. */ 668 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) { 669 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; 670 uint32_t flags = ARC_FLAG_WAIT; 671 objset_phys_t *osp; 672 arc_buf_t *buf; 673 ASSERT(!BP_IS_REDACTED(rootbp)); 674 675 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && 676 BP_IS_PROTECTED(rootbp)) 677 zio_flags |= ZIO_FLAG_RAW; 678 679 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func, 680 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb); 681 if (err != 0) { 682 /* 683 * If both TRAVERSE_HARD and TRAVERSE_PRE are set, 684 * continue to visitbp so that td_func can be called 685 * in pre stage, and err will reset to zero. 686 */ 687 if (!(td->td_flags & TRAVERSE_HARD) || 688 !(td->td_flags & TRAVERSE_PRE)) 689 goto out; 690 } else { 691 osp = buf->b_data; 692 traverse_zil(td, &osp->os_zil_header); 693 arc_buf_destroy(buf, &buf); 694 } 695 } 696 697 if (!(flags & TRAVERSE_PREFETCH_DATA) || 698 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread, 699 td, TQ_NOQUEUE) == TASKQID_INVALID) 700 pd->pd_exited = B_TRUE; 701 702 err = traverse_visitbp(td, NULL, rootbp, czb); 703 704 mutex_enter(&pd->pd_mtx); 705 pd->pd_cancel = B_TRUE; 706 cv_broadcast(&pd->pd_cv); 707 while (!pd->pd_exited) 708 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx); 709 mutex_exit(&pd->pd_mtx); 710 out: 711 mutex_destroy(&pd->pd_mtx); 712 cv_destroy(&pd->pd_cv); 713 714 kmem_free(czb, sizeof (zbookmark_phys_t)); 715 kmem_free(pd, sizeof (struct prefetch_data)); 716 kmem_free(td, sizeof (struct traverse_data)); 717 718 return (err); 719 } 720 721 /* 722 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are 723 * in syncing context). 724 */ 725 int 726 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start, 727 zbookmark_phys_t *resume, 728 int flags, blkptr_cb_t func, void *arg) 729 { 730 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object, 731 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg)); 732 } 733 734 int 735 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, 736 int flags, blkptr_cb_t func, void *arg) 737 { 738 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg)); 739 } 740 741 int 742 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr, 743 uint64_t txg_start, zbookmark_phys_t *resume, int flags, 744 blkptr_cb_t func, void *arg) 745 { 746 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET, 747 blkptr, txg_start, resume, flags, func, arg)); 748 } 749 750 /* 751 * NB: pool must not be changing on-disk (eg, from zdb or sync context). 752 */ 753 int 754 traverse_pool(spa_t *spa, uint64_t txg_start, int flags, 755 blkptr_cb_t func, void *arg) 756 { 757 int err; 758 dsl_pool_t *dp = spa_get_dsl(spa); 759 objset_t *mos = dp->dp_meta_objset; 760 boolean_t hard = (flags & TRAVERSE_HARD); 761 762 /* visit the MOS */ 763 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa), 764 txg_start, NULL, flags, func, arg); 765 if (err != 0) 766 return (err); 767 768 /* visit each dataset */ 769 for (uint64_t obj = 1; err == 0; 770 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { 771 dmu_object_info_t doi; 772 773 err = dmu_object_info(mos, obj, &doi); 774 if (err != 0) { 775 if (hard) 776 continue; 777 break; 778 } 779 780 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) { 781 dsl_dataset_t *ds; 782 uint64_t txg = txg_start; 783 784 dsl_pool_config_enter(dp, FTAG); 785 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); 786 dsl_pool_config_exit(dp, FTAG); 787 if (err != 0) { 788 if (hard) 789 continue; 790 break; 791 } 792 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg) 793 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 794 err = traverse_dataset(ds, txg, flags, func, arg); 795 dsl_dataset_rele(ds, FTAG); 796 if (err != 0) 797 break; 798 } 799 } 800 if (err == ESRCH) 801 err = 0; 802 return (err); 803 } 804 805 EXPORT_SYMBOL(traverse_dataset); 806 EXPORT_SYMBOL(traverse_pool); 807 808 ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW, 809 "Max number of bytes to prefetch"); 810 811 ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW, 812 "Traverse prefetch number of blocks pointed by indirect block"); 813 814 #if defined(_KERNEL) 815 module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644); 816 MODULE_PARM_DESC(ignore_hole_birth, 817 "Alias for send_holes_without_birth_time"); 818 #endif 819 820 /* CSTYLED */ 821 ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW, 822 "Ignore hole_birth txg for zfs send"); 823