1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <stdio.h> 29 #include <unistd.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <ctype.h> 33 #include <sys/zfs_context.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/dmu.h> 37 #include <sys/zap.h> 38 #include <sys/fs/zfs.h> 39 #include <sys/zfs_znode.h> 40 #include <sys/zfs_sa.h> 41 #include <sys/sa.h> 42 #include <sys/sa_impl.h> 43 #include <sys/vdev.h> 44 #include <sys/vdev_impl.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/dmu_objset.h> 47 #include <sys/dsl_dir.h> 48 #include <sys/dsl_dataset.h> 49 #include <sys/dsl_pool.h> 50 #include <sys/dbuf.h> 51 #include <sys/zil.h> 52 #include <sys/zil_impl.h> 53 #include <sys/stat.h> 54 #include <sys/resource.h> 55 #include <sys/dmu_traverse.h> 56 #include <sys/zio_checksum.h> 57 #include <sys/zio_compress.h> 58 #include <sys/zfs_fuid.h> 59 #include <sys/arc.h> 60 #include <sys/ddt.h> 61 #include <sys/zfeature.h> 62 #include <zfs_comutil.h> 63 #undef ZFS_MAXNAMELEN 64 #undef verify 65 #include <libzfs.h> 66 67 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ 68 zio_compress_table[(idx)].ci_name : "UNKNOWN") 69 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ 70 zio_checksum_table[(idx)].ci_name : "UNKNOWN") 71 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ 72 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \ 73 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN") 74 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \ 75 (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \ 76 DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES)) 77 78 #ifndef lint 79 extern boolean_t zfs_recover; 80 extern uint64_t zfs_arc_max, zfs_arc_meta_limit; 81 extern int zfs_vdev_async_read_max_active; 82 #else 83 boolean_t zfs_recover; 84 uint64_t zfs_arc_max, zfs_arc_meta_limit; 85 int zfs_vdev_async_read_max_active; 86 #endif 87 88 const char cmdname[] = "zdb"; 89 uint8_t dump_opt[256]; 90 91 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); 92 93 extern void dump_intent_log(zilog_t *); 94 uint64_t *zopt_object = NULL; 95 int zopt_objects = 0; 96 libzfs_handle_t *g_zfs; 97 uint64_t max_inflight = 1000; 98 99 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *); 100 101 /* 102 * These libumem hooks provide a reasonable set of defaults for the allocator's 103 * debugging facilities. 104 */ 105 const char * 106 _umem_debug_init() 107 { 108 return ("default,verbose"); /* $UMEM_DEBUG setting */ 109 } 110 111 const char * 112 _umem_logging_init(void) 113 { 114 return ("fail,contents"); /* $UMEM_LOGGING setting */ 115 } 116 117 static void 118 usage(void) 119 { 120 (void) fprintf(stderr, 121 "Usage: %s [-CumMdibcsDvhLXFPA] [-t txg] [-e [-p path...]] " 122 "[-U config] [-I inflight I/Os] [-x dumpdir] poolname [object...]\n" 123 " %s [-divPA] [-e -p path...] [-U config] dataset " 124 "[object...]\n" 125 " %s -mM [-LXFPA] [-t txg] [-e [-p path...]] [-U config] " 126 "poolname [vdev [metaslab...]]\n" 127 " %s -R [-A] [-e [-p path...]] poolname " 128 "vdev:offset:size[:flags]\n" 129 " %s -S [-PA] [-e [-p path...]] [-U config] poolname\n" 130 " %s -l [-uA] device\n" 131 " %s -C [-A] [-U config]\n\n", 132 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); 133 134 (void) fprintf(stderr, " Dataset name must include at least one " 135 "separator character '/' or '@'\n"); 136 (void) fprintf(stderr, " If dataset name is specified, only that " 137 "dataset is dumped\n"); 138 (void) fprintf(stderr, " If object numbers are specified, only " 139 "those objects are dumped\n\n"); 140 (void) fprintf(stderr, " Options to control amount of output:\n"); 141 (void) fprintf(stderr, " -u uberblock\n"); 142 (void) fprintf(stderr, " -d dataset(s)\n"); 143 (void) fprintf(stderr, " -i intent logs\n"); 144 (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); 145 (void) fprintf(stderr, " -h pool history\n"); 146 (void) fprintf(stderr, " -b block statistics\n"); 147 (void) fprintf(stderr, " -m metaslabs\n"); 148 (void) fprintf(stderr, " -M metaslab groups\n"); 149 (void) fprintf(stderr, " -c checksum all metadata (twice for " 150 "all data) blocks\n"); 151 (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); 152 (void) fprintf(stderr, " -D dedup statistics\n"); 153 (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); 154 (void) fprintf(stderr, " -v verbose (applies to all others)\n"); 155 (void) fprintf(stderr, " -l dump label contents\n"); 156 (void) fprintf(stderr, " -L disable leak tracking (do not " 157 "load spacemaps)\n"); 158 (void) fprintf(stderr, " -R read and display block from a " 159 "device\n\n"); 160 (void) fprintf(stderr, " Below options are intended for use " 161 "with other options:\n"); 162 (void) fprintf(stderr, " -A ignore assertions (-A), enable " 163 "panic recovery (-AA) or both (-AAA)\n"); 164 (void) fprintf(stderr, " -F attempt automatic rewind within " 165 "safe range of transaction groups\n"); 166 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate " 167 "cachefile\n"); 168 (void) fprintf(stderr, " -X attempt extreme rewind (does not " 169 "work with dataset)\n"); 170 (void) fprintf(stderr, " -e pool is exported/destroyed/" 171 "has altroot/not in a cachefile\n"); 172 (void) fprintf(stderr, " -p <path> -- use one or more with " 173 "-e to specify path to vdev dir\n"); 174 (void) fprintf(stderr, " -x <dumpdir> -- " 175 "dump all read blocks into specified directory\n"); 176 (void) fprintf(stderr, " -P print numbers in parseable form\n"); 177 (void) fprintf(stderr, " -t <txg> -- highest txg to use when " 178 "searching for uberblocks\n"); 179 (void) fprintf(stderr, " -I <number of inflight I/Os> -- " 180 "specify the maximum number of " 181 "checksumming I/Os [default is 200]\n"); 182 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " 183 "to make only that option verbose\n"); 184 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); 185 exit(1); 186 } 187 188 /* 189 * Called for usage errors that are discovered after a call to spa_open(), 190 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. 191 */ 192 193 static void 194 fatal(const char *fmt, ...) 195 { 196 va_list ap; 197 198 va_start(ap, fmt); 199 (void) fprintf(stderr, "%s: ", cmdname); 200 (void) vfprintf(stderr, fmt, ap); 201 va_end(ap); 202 (void) fprintf(stderr, "\n"); 203 204 exit(1); 205 } 206 207 /* ARGSUSED */ 208 static void 209 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) 210 { 211 nvlist_t *nv; 212 size_t nvsize = *(uint64_t *)data; 213 char *packed = umem_alloc(nvsize, UMEM_NOFAIL); 214 215 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); 216 217 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); 218 219 umem_free(packed, nvsize); 220 221 dump_nvlist(nv, 8); 222 223 nvlist_free(nv); 224 } 225 226 /* ARGSUSED */ 227 static void 228 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size) 229 { 230 spa_history_phys_t *shp = data; 231 232 if (shp == NULL) 233 return; 234 235 (void) printf("\t\tpool_create_len = %llu\n", 236 (u_longlong_t)shp->sh_pool_create_len); 237 (void) printf("\t\tphys_max_off = %llu\n", 238 (u_longlong_t)shp->sh_phys_max_off); 239 (void) printf("\t\tbof = %llu\n", 240 (u_longlong_t)shp->sh_bof); 241 (void) printf("\t\teof = %llu\n", 242 (u_longlong_t)shp->sh_eof); 243 (void) printf("\t\trecords_lost = %llu\n", 244 (u_longlong_t)shp->sh_records_lost); 245 } 246 247 static void 248 zdb_nicenum(uint64_t num, char *buf) 249 { 250 if (dump_opt['P']) 251 (void) sprintf(buf, "%llu", (longlong_t)num); 252 else 253 nicenum(num, buf); 254 } 255 256 const char histo_stars[] = "****************************************"; 257 const int histo_width = sizeof (histo_stars) - 1; 258 259 static void 260 dump_histogram(const uint64_t *histo, int size, int offset) 261 { 262 int i; 263 int minidx = size - 1; 264 int maxidx = 0; 265 uint64_t max = 0; 266 267 for (i = 0; i < size; i++) { 268 if (histo[i] > max) 269 max = histo[i]; 270 if (histo[i] > 0 && i > maxidx) 271 maxidx = i; 272 if (histo[i] > 0 && i < minidx) 273 minidx = i; 274 } 275 276 if (max < histo_width) 277 max = histo_width; 278 279 for (i = minidx; i <= maxidx; i++) { 280 (void) printf("\t\t\t%3u: %6llu %s\n", 281 i + offset, (u_longlong_t)histo[i], 282 &histo_stars[(max - histo[i]) * histo_width / max]); 283 } 284 } 285 286 static void 287 dump_zap_stats(objset_t *os, uint64_t object) 288 { 289 int error; 290 zap_stats_t zs; 291 292 error = zap_get_stats(os, object, &zs); 293 if (error) 294 return; 295 296 if (zs.zs_ptrtbl_len == 0) { 297 ASSERT(zs.zs_num_blocks == 1); 298 (void) printf("\tmicrozap: %llu bytes, %llu entries\n", 299 (u_longlong_t)zs.zs_blocksize, 300 (u_longlong_t)zs.zs_num_entries); 301 return; 302 } 303 304 (void) printf("\tFat ZAP stats:\n"); 305 306 (void) printf("\t\tPointer table:\n"); 307 (void) printf("\t\t\t%llu elements\n", 308 (u_longlong_t)zs.zs_ptrtbl_len); 309 (void) printf("\t\t\tzt_blk: %llu\n", 310 (u_longlong_t)zs.zs_ptrtbl_zt_blk); 311 (void) printf("\t\t\tzt_numblks: %llu\n", 312 (u_longlong_t)zs.zs_ptrtbl_zt_numblks); 313 (void) printf("\t\t\tzt_shift: %llu\n", 314 (u_longlong_t)zs.zs_ptrtbl_zt_shift); 315 (void) printf("\t\t\tzt_blks_copied: %llu\n", 316 (u_longlong_t)zs.zs_ptrtbl_blks_copied); 317 (void) printf("\t\t\tzt_nextblk: %llu\n", 318 (u_longlong_t)zs.zs_ptrtbl_nextblk); 319 320 (void) printf("\t\tZAP entries: %llu\n", 321 (u_longlong_t)zs.zs_num_entries); 322 (void) printf("\t\tLeaf blocks: %llu\n", 323 (u_longlong_t)zs.zs_num_leafs); 324 (void) printf("\t\tTotal blocks: %llu\n", 325 (u_longlong_t)zs.zs_num_blocks); 326 (void) printf("\t\tzap_block_type: 0x%llx\n", 327 (u_longlong_t)zs.zs_block_type); 328 (void) printf("\t\tzap_magic: 0x%llx\n", 329 (u_longlong_t)zs.zs_magic); 330 (void) printf("\t\tzap_salt: 0x%llx\n", 331 (u_longlong_t)zs.zs_salt); 332 333 (void) printf("\t\tLeafs with 2^n pointers:\n"); 334 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0); 335 336 (void) printf("\t\tBlocks with n*5 entries:\n"); 337 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0); 338 339 (void) printf("\t\tBlocks n/10 full:\n"); 340 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0); 341 342 (void) printf("\t\tEntries with n chunks:\n"); 343 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0); 344 345 (void) printf("\t\tBuckets with n entries:\n"); 346 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0); 347 } 348 349 /*ARGSUSED*/ 350 static void 351 dump_none(objset_t *os, uint64_t object, void *data, size_t size) 352 { 353 } 354 355 /*ARGSUSED*/ 356 static void 357 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) 358 { 359 (void) printf("\tUNKNOWN OBJECT TYPE\n"); 360 } 361 362 /*ARGSUSED*/ 363 void 364 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) 365 { 366 } 367 368 /*ARGSUSED*/ 369 static void 370 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) 371 { 372 } 373 374 /*ARGSUSED*/ 375 static void 376 dump_zap(objset_t *os, uint64_t object, void *data, size_t size) 377 { 378 zap_cursor_t zc; 379 zap_attribute_t attr; 380 void *prop; 381 int i; 382 383 dump_zap_stats(os, object); 384 (void) printf("\n"); 385 386 for (zap_cursor_init(&zc, os, object); 387 zap_cursor_retrieve(&zc, &attr) == 0; 388 zap_cursor_advance(&zc)) { 389 (void) printf("\t\t%s = ", attr.za_name); 390 if (attr.za_num_integers == 0) { 391 (void) printf("\n"); 392 continue; 393 } 394 prop = umem_zalloc(attr.za_num_integers * 395 attr.za_integer_length, UMEM_NOFAIL); 396 (void) zap_lookup(os, object, attr.za_name, 397 attr.za_integer_length, attr.za_num_integers, prop); 398 if (attr.za_integer_length == 1) { 399 (void) printf("%s", (char *)prop); 400 } else { 401 for (i = 0; i < attr.za_num_integers; i++) { 402 switch (attr.za_integer_length) { 403 case 2: 404 (void) printf("%u ", 405 ((uint16_t *)prop)[i]); 406 break; 407 case 4: 408 (void) printf("%u ", 409 ((uint32_t *)prop)[i]); 410 break; 411 case 8: 412 (void) printf("%lld ", 413 (u_longlong_t)((int64_t *)prop)[i]); 414 break; 415 } 416 } 417 } 418 (void) printf("\n"); 419 umem_free(prop, attr.za_num_integers * attr.za_integer_length); 420 } 421 zap_cursor_fini(&zc); 422 } 423 424 static void 425 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size) 426 { 427 bpobj_phys_t *bpop = data; 428 char bytes[32], comp[32], uncomp[32]; 429 430 if (bpop == NULL) 431 return; 432 433 zdb_nicenum(bpop->bpo_bytes, bytes); 434 zdb_nicenum(bpop->bpo_comp, comp); 435 zdb_nicenum(bpop->bpo_uncomp, uncomp); 436 437 (void) printf("\t\tnum_blkptrs = %llu\n", 438 (u_longlong_t)bpop->bpo_num_blkptrs); 439 (void) printf("\t\tbytes = %s\n", bytes); 440 if (size >= BPOBJ_SIZE_V1) { 441 (void) printf("\t\tcomp = %s\n", comp); 442 (void) printf("\t\tuncomp = %s\n", uncomp); 443 } 444 if (size >= sizeof (*bpop)) { 445 (void) printf("\t\tsubobjs = %llu\n", 446 (u_longlong_t)bpop->bpo_subobjs); 447 (void) printf("\t\tnum_subobjs = %llu\n", 448 (u_longlong_t)bpop->bpo_num_subobjs); 449 } 450 451 if (dump_opt['d'] < 5) 452 return; 453 454 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) { 455 char blkbuf[BP_SPRINTF_LEN]; 456 blkptr_t bp; 457 458 int err = dmu_read(os, object, 459 i * sizeof (bp), sizeof (bp), &bp, 0); 460 if (err != 0) { 461 (void) printf("got error %u from dmu_read\n", err); 462 break; 463 } 464 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp); 465 (void) printf("\t%s\n", blkbuf); 466 } 467 } 468 469 /* ARGSUSED */ 470 static void 471 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size) 472 { 473 dmu_object_info_t doi; 474 475 VERIFY0(dmu_object_info(os, object, &doi)); 476 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP); 477 478 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0); 479 if (err != 0) { 480 (void) printf("got error %u from dmu_read\n", err); 481 kmem_free(subobjs, doi.doi_max_offset); 482 return; 483 } 484 485 int64_t last_nonzero = -1; 486 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) { 487 if (subobjs[i] != 0) 488 last_nonzero = i; 489 } 490 491 for (int64_t i = 0; i <= last_nonzero; i++) { 492 (void) printf("\t%llu\n", (longlong_t)subobjs[i]); 493 } 494 kmem_free(subobjs, doi.doi_max_offset); 495 } 496 497 /*ARGSUSED*/ 498 static void 499 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) 500 { 501 dump_zap_stats(os, object); 502 /* contents are printed elsewhere, properly decoded */ 503 } 504 505 /*ARGSUSED*/ 506 static void 507 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) 508 { 509 zap_cursor_t zc; 510 zap_attribute_t attr; 511 512 dump_zap_stats(os, object); 513 (void) printf("\n"); 514 515 for (zap_cursor_init(&zc, os, object); 516 zap_cursor_retrieve(&zc, &attr) == 0; 517 zap_cursor_advance(&zc)) { 518 (void) printf("\t\t%s = ", attr.za_name); 519 if (attr.za_num_integers == 0) { 520 (void) printf("\n"); 521 continue; 522 } 523 (void) printf(" %llx : [%d:%d:%d]\n", 524 (u_longlong_t)attr.za_first_integer, 525 (int)ATTR_LENGTH(attr.za_first_integer), 526 (int)ATTR_BSWAP(attr.za_first_integer), 527 (int)ATTR_NUM(attr.za_first_integer)); 528 } 529 zap_cursor_fini(&zc); 530 } 531 532 /*ARGSUSED*/ 533 static void 534 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) 535 { 536 zap_cursor_t zc; 537 zap_attribute_t attr; 538 uint16_t *layout_attrs; 539 int i; 540 541 dump_zap_stats(os, object); 542 (void) printf("\n"); 543 544 for (zap_cursor_init(&zc, os, object); 545 zap_cursor_retrieve(&zc, &attr) == 0; 546 zap_cursor_advance(&zc)) { 547 (void) printf("\t\t%s = [", attr.za_name); 548 if (attr.za_num_integers == 0) { 549 (void) printf("\n"); 550 continue; 551 } 552 553 VERIFY(attr.za_integer_length == 2); 554 layout_attrs = umem_zalloc(attr.za_num_integers * 555 attr.za_integer_length, UMEM_NOFAIL); 556 557 VERIFY(zap_lookup(os, object, attr.za_name, 558 attr.za_integer_length, 559 attr.za_num_integers, layout_attrs) == 0); 560 561 for (i = 0; i != attr.za_num_integers; i++) 562 (void) printf(" %d ", (int)layout_attrs[i]); 563 (void) printf("]\n"); 564 umem_free(layout_attrs, 565 attr.za_num_integers * attr.za_integer_length); 566 } 567 zap_cursor_fini(&zc); 568 } 569 570 /*ARGSUSED*/ 571 static void 572 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) 573 { 574 zap_cursor_t zc; 575 zap_attribute_t attr; 576 const char *typenames[] = { 577 /* 0 */ "not specified", 578 /* 1 */ "FIFO", 579 /* 2 */ "Character Device", 580 /* 3 */ "3 (invalid)", 581 /* 4 */ "Directory", 582 /* 5 */ "5 (invalid)", 583 /* 6 */ "Block Device", 584 /* 7 */ "7 (invalid)", 585 /* 8 */ "Regular File", 586 /* 9 */ "9 (invalid)", 587 /* 10 */ "Symbolic Link", 588 /* 11 */ "11 (invalid)", 589 /* 12 */ "Socket", 590 /* 13 */ "Door", 591 /* 14 */ "Event Port", 592 /* 15 */ "15 (invalid)", 593 }; 594 595 dump_zap_stats(os, object); 596 (void) printf("\n"); 597 598 for (zap_cursor_init(&zc, os, object); 599 zap_cursor_retrieve(&zc, &attr) == 0; 600 zap_cursor_advance(&zc)) { 601 (void) printf("\t\t%s = %lld (type: %s)\n", 602 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), 603 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); 604 } 605 zap_cursor_fini(&zc); 606 } 607 608 int 609 get_dtl_refcount(vdev_t *vd) 610 { 611 int refcount = 0; 612 613 if (vd->vdev_ops->vdev_op_leaf) { 614 space_map_t *sm = vd->vdev_dtl_sm; 615 616 if (sm != NULL && 617 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 618 return (1); 619 return (0); 620 } 621 622 for (int c = 0; c < vd->vdev_children; c++) 623 refcount += get_dtl_refcount(vd->vdev_child[c]); 624 return (refcount); 625 } 626 627 int 628 get_metaslab_refcount(vdev_t *vd) 629 { 630 int refcount = 0; 631 632 if (vd->vdev_top == vd && !vd->vdev_removing) { 633 for (int m = 0; m < vd->vdev_ms_count; m++) { 634 space_map_t *sm = vd->vdev_ms[m]->ms_sm; 635 636 if (sm != NULL && 637 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 638 refcount++; 639 } 640 } 641 for (int c = 0; c < vd->vdev_children; c++) 642 refcount += get_metaslab_refcount(vd->vdev_child[c]); 643 644 return (refcount); 645 } 646 647 static int 648 verify_spacemap_refcounts(spa_t *spa) 649 { 650 uint64_t expected_refcount = 0; 651 uint64_t actual_refcount; 652 653 (void) feature_get_refcount(spa, 654 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM], 655 &expected_refcount); 656 actual_refcount = get_dtl_refcount(spa->spa_root_vdev); 657 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev); 658 659 if (expected_refcount != actual_refcount) { 660 (void) printf("space map refcount mismatch: expected %lld != " 661 "actual %lld\n", 662 (longlong_t)expected_refcount, 663 (longlong_t)actual_refcount); 664 return (2); 665 } 666 return (0); 667 } 668 669 static void 670 dump_spacemap(objset_t *os, space_map_t *sm) 671 { 672 uint64_t alloc, offset, entry; 673 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", 674 "INVALID", "INVALID", "INVALID", "INVALID" }; 675 676 if (sm == NULL) 677 return; 678 679 /* 680 * Print out the freelist entries in both encoded and decoded form. 681 */ 682 alloc = 0; 683 for (offset = 0; offset < space_map_length(sm); 684 offset += sizeof (entry)) { 685 uint8_t mapshift = sm->sm_shift; 686 687 VERIFY0(dmu_read(os, space_map_object(sm), offset, 688 sizeof (entry), &entry, DMU_READ_PREFETCH)); 689 if (SM_DEBUG_DECODE(entry)) { 690 691 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n", 692 (u_longlong_t)(offset / sizeof (entry)), 693 ddata[SM_DEBUG_ACTION_DECODE(entry)], 694 (u_longlong_t)SM_DEBUG_TXG_DECODE(entry), 695 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry)); 696 } else { 697 (void) printf("\t [%6llu] %c range:" 698 " %010llx-%010llx size: %06llx\n", 699 (u_longlong_t)(offset / sizeof (entry)), 700 SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F', 701 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 702 mapshift) + sm->sm_start), 703 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 704 mapshift) + sm->sm_start + 705 (SM_RUN_DECODE(entry) << mapshift)), 706 (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift)); 707 if (SM_TYPE_DECODE(entry) == SM_ALLOC) 708 alloc += SM_RUN_DECODE(entry) << mapshift; 709 else 710 alloc -= SM_RUN_DECODE(entry) << mapshift; 711 } 712 } 713 if (alloc != space_map_allocated(sm)) { 714 (void) printf("space_map_object alloc (%llu) INCONSISTENT " 715 "with space map summary (%llu)\n", 716 (u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc); 717 } 718 } 719 720 static void 721 dump_metaslab_stats(metaslab_t *msp) 722 { 723 char maxbuf[32]; 724 range_tree_t *rt = msp->ms_tree; 725 avl_tree_t *t = &msp->ms_size_tree; 726 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 727 728 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf); 729 730 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", 731 "segments", avl_numnodes(t), "maxsize", maxbuf, 732 "freepct", free_pct); 733 (void) printf("\tIn-memory histogram:\n"); 734 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 735 } 736 737 static void 738 dump_metaslab(metaslab_t *msp) 739 { 740 vdev_t *vd = msp->ms_group->mg_vd; 741 spa_t *spa = vd->vdev_spa; 742 space_map_t *sm = msp->ms_sm; 743 char freebuf[32]; 744 745 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf); 746 747 (void) printf( 748 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", 749 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start, 750 (u_longlong_t)space_map_object(sm), freebuf); 751 752 if (dump_opt['m'] > 2 && !dump_opt['L']) { 753 mutex_enter(&msp->ms_lock); 754 metaslab_load_wait(msp); 755 if (!msp->ms_loaded) { 756 VERIFY0(metaslab_load(msp)); 757 range_tree_stat_verify(msp->ms_tree); 758 } 759 dump_metaslab_stats(msp); 760 metaslab_unload(msp); 761 mutex_exit(&msp->ms_lock); 762 } 763 764 if (dump_opt['m'] > 1 && sm != NULL && 765 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 766 /* 767 * The space map histogram represents free space in chunks 768 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift). 769 */ 770 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n", 771 (u_longlong_t)msp->ms_fragmentation); 772 dump_histogram(sm->sm_phys->smp_histogram, 773 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift); 774 } 775 776 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) { 777 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift)); 778 779 mutex_enter(&msp->ms_lock); 780 dump_spacemap(spa->spa_meta_objset, msp->ms_sm); 781 mutex_exit(&msp->ms_lock); 782 } 783 } 784 785 static void 786 print_vdev_metaslab_header(vdev_t *vd) 787 { 788 (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n", 789 (u_longlong_t)vd->vdev_id, 790 "metaslabs", (u_longlong_t)vd->vdev_ms_count, 791 "offset", "spacemap", "free"); 792 (void) printf("\t%15s %19s %15s %10s\n", 793 "---------------", "-------------------", 794 "---------------", "-------------"); 795 } 796 797 static void 798 dump_metaslab_groups(spa_t *spa) 799 { 800 vdev_t *rvd = spa->spa_root_vdev; 801 metaslab_class_t *mc = spa_normal_class(spa); 802 uint64_t fragmentation; 803 804 metaslab_class_histogram_verify(mc); 805 806 for (int c = 0; c < rvd->vdev_children; c++) { 807 vdev_t *tvd = rvd->vdev_child[c]; 808 metaslab_group_t *mg = tvd->vdev_mg; 809 810 if (mg->mg_class != mc) 811 continue; 812 813 metaslab_group_histogram_verify(mg); 814 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 815 816 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t" 817 "fragmentation", 818 (u_longlong_t)tvd->vdev_id, 819 (u_longlong_t)tvd->vdev_ms_count); 820 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 821 (void) printf("%3s\n", "-"); 822 } else { 823 (void) printf("%3llu%%\n", 824 (u_longlong_t)mg->mg_fragmentation); 825 } 826 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 827 } 828 829 (void) printf("\tpool %s\tfragmentation", spa_name(spa)); 830 fragmentation = metaslab_class_fragmentation(mc); 831 if (fragmentation == ZFS_FRAG_INVALID) 832 (void) printf("\t%3s\n", "-"); 833 else 834 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation); 835 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 836 } 837 838 static void 839 dump_metaslabs(spa_t *spa) 840 { 841 vdev_t *vd, *rvd = spa->spa_root_vdev; 842 uint64_t m, c = 0, children = rvd->vdev_children; 843 844 (void) printf("\nMetaslabs:\n"); 845 846 if (!dump_opt['d'] && zopt_objects > 0) { 847 c = zopt_object[0]; 848 849 if (c >= children) 850 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 851 852 if (zopt_objects > 1) { 853 vd = rvd->vdev_child[c]; 854 print_vdev_metaslab_header(vd); 855 856 for (m = 1; m < zopt_objects; m++) { 857 if (zopt_object[m] < vd->vdev_ms_count) 858 dump_metaslab( 859 vd->vdev_ms[zopt_object[m]]); 860 else 861 (void) fprintf(stderr, "bad metaslab " 862 "number %llu\n", 863 (u_longlong_t)zopt_object[m]); 864 } 865 (void) printf("\n"); 866 return; 867 } 868 children = c + 1; 869 } 870 for (; c < children; c++) { 871 vd = rvd->vdev_child[c]; 872 print_vdev_metaslab_header(vd); 873 874 for (m = 0; m < vd->vdev_ms_count; m++) 875 dump_metaslab(vd->vdev_ms[m]); 876 (void) printf("\n"); 877 } 878 } 879 880 static void 881 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) 882 { 883 const ddt_phys_t *ddp = dde->dde_phys; 884 const ddt_key_t *ddk = &dde->dde_key; 885 char *types[4] = { "ditto", "single", "double", "triple" }; 886 char blkbuf[BP_SPRINTF_LEN]; 887 blkptr_t blk; 888 889 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 890 if (ddp->ddp_phys_birth == 0) 891 continue; 892 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); 893 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk); 894 (void) printf("index %llx refcnt %llu %s %s\n", 895 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, 896 types[p], blkbuf); 897 } 898 } 899 900 static void 901 dump_dedup_ratio(const ddt_stat_t *dds) 902 { 903 double rL, rP, rD, D, dedup, compress, copies; 904 905 if (dds->dds_blocks == 0) 906 return; 907 908 rL = (double)dds->dds_ref_lsize; 909 rP = (double)dds->dds_ref_psize; 910 rD = (double)dds->dds_ref_dsize; 911 D = (double)dds->dds_dsize; 912 913 dedup = rD / D; 914 compress = rL / rP; 915 copies = rD / rP; 916 917 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " 918 "dedup * compress / copies = %.2f\n\n", 919 dedup, compress, copies, dedup * compress / copies); 920 } 921 922 static void 923 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) 924 { 925 char name[DDT_NAMELEN]; 926 ddt_entry_t dde; 927 uint64_t walk = 0; 928 dmu_object_info_t doi; 929 uint64_t count, dspace, mspace; 930 int error; 931 932 error = ddt_object_info(ddt, type, class, &doi); 933 934 if (error == ENOENT) 935 return; 936 ASSERT(error == 0); 937 938 if ((count = ddt_object_count(ddt, type, class)) == 0) 939 return; 940 941 dspace = doi.doi_physical_blocks_512 << 9; 942 mspace = doi.doi_fill_count * doi.doi_data_block_size; 943 944 ddt_object_name(ddt, type, class, name); 945 946 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", 947 name, 948 (u_longlong_t)count, 949 (u_longlong_t)(dspace / count), 950 (u_longlong_t)(mspace / count)); 951 952 if (dump_opt['D'] < 3) 953 return; 954 955 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); 956 957 if (dump_opt['D'] < 4) 958 return; 959 960 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) 961 return; 962 963 (void) printf("%s contents:\n\n", name); 964 965 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) 966 dump_dde(ddt, &dde, walk); 967 968 ASSERT(error == ENOENT); 969 970 (void) printf("\n"); 971 } 972 973 static void 974 dump_all_ddts(spa_t *spa) 975 { 976 ddt_histogram_t ddh_total = { 0 }; 977 ddt_stat_t dds_total = { 0 }; 978 979 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { 980 ddt_t *ddt = spa->spa_ddt[c]; 981 for (enum ddt_type type = 0; type < DDT_TYPES; type++) { 982 for (enum ddt_class class = 0; class < DDT_CLASSES; 983 class++) { 984 dump_ddt(ddt, type, class); 985 } 986 } 987 } 988 989 ddt_get_dedup_stats(spa, &dds_total); 990 991 if (dds_total.dds_blocks == 0) { 992 (void) printf("All DDTs are empty\n"); 993 return; 994 } 995 996 (void) printf("\n"); 997 998 if (dump_opt['D'] > 1) { 999 (void) printf("DDT histogram (aggregated over all DDTs):\n"); 1000 ddt_get_dedup_histogram(spa, &ddh_total); 1001 zpool_dump_ddt(&dds_total, &ddh_total); 1002 } 1003 1004 dump_dedup_ratio(&dds_total); 1005 } 1006 1007 static void 1008 dump_dtl_seg(void *arg, uint64_t start, uint64_t size) 1009 { 1010 char *prefix = arg; 1011 1012 (void) printf("%s [%llu,%llu) length %llu\n", 1013 prefix, 1014 (u_longlong_t)start, 1015 (u_longlong_t)(start + size), 1016 (u_longlong_t)(size)); 1017 } 1018 1019 static void 1020 dump_dtl(vdev_t *vd, int indent) 1021 { 1022 spa_t *spa = vd->vdev_spa; 1023 boolean_t required; 1024 char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" }; 1025 char prefix[256]; 1026 1027 spa_vdev_state_enter(spa, SCL_NONE); 1028 required = vdev_dtl_required(vd); 1029 (void) spa_vdev_state_exit(spa, NULL, 0); 1030 1031 if (indent == 0) 1032 (void) printf("\nDirty time logs:\n\n"); 1033 1034 (void) printf("\t%*s%s [%s]\n", indent, "", 1035 vd->vdev_path ? vd->vdev_path : 1036 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), 1037 required ? "DTL-required" : "DTL-expendable"); 1038 1039 for (int t = 0; t < DTL_TYPES; t++) { 1040 range_tree_t *rt = vd->vdev_dtl[t]; 1041 if (range_tree_space(rt) == 0) 1042 continue; 1043 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", 1044 indent + 2, "", name[t]); 1045 mutex_enter(rt->rt_lock); 1046 range_tree_walk(rt, dump_dtl_seg, prefix); 1047 mutex_exit(rt->rt_lock); 1048 if (dump_opt['d'] > 5 && vd->vdev_children == 0) 1049 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm); 1050 } 1051 1052 for (int c = 0; c < vd->vdev_children; c++) 1053 dump_dtl(vd->vdev_child[c], indent + 4); 1054 } 1055 1056 static void 1057 dump_history(spa_t *spa) 1058 { 1059 nvlist_t **events = NULL; 1060 uint64_t resid, len, off = 0; 1061 uint_t num = 0; 1062 int error; 1063 time_t tsec; 1064 struct tm t; 1065 char tbuf[30]; 1066 char internalstr[MAXPATHLEN]; 1067 1068 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 1069 do { 1070 len = SPA_MAXBLOCKSIZE; 1071 1072 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { 1073 (void) fprintf(stderr, "Unable to read history: " 1074 "error %d\n", error); 1075 umem_free(buf, SPA_MAXBLOCKSIZE); 1076 return; 1077 } 1078 1079 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) 1080 break; 1081 1082 off -= resid; 1083 } while (len != 0); 1084 umem_free(buf, SPA_MAXBLOCKSIZE); 1085 1086 (void) printf("\nHistory:\n"); 1087 for (int i = 0; i < num; i++) { 1088 uint64_t time, txg, ievent; 1089 char *cmd, *intstr; 1090 boolean_t printed = B_FALSE; 1091 1092 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, 1093 &time) != 0) 1094 goto next; 1095 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, 1096 &cmd) != 0) { 1097 if (nvlist_lookup_uint64(events[i], 1098 ZPOOL_HIST_INT_EVENT, &ievent) != 0) 1099 goto next; 1100 verify(nvlist_lookup_uint64(events[i], 1101 ZPOOL_HIST_TXG, &txg) == 0); 1102 verify(nvlist_lookup_string(events[i], 1103 ZPOOL_HIST_INT_STR, &intstr) == 0); 1104 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) 1105 goto next; 1106 1107 (void) snprintf(internalstr, 1108 sizeof (internalstr), 1109 "[internal %s txg:%lld] %s", 1110 zfs_history_event_names[ievent], txg, 1111 intstr); 1112 cmd = internalstr; 1113 } 1114 tsec = time; 1115 (void) localtime_r(&tsec, &t); 1116 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 1117 (void) printf("%s %s\n", tbuf, cmd); 1118 printed = B_TRUE; 1119 1120 next: 1121 if (dump_opt['h'] > 1) { 1122 if (!printed) 1123 (void) printf("unrecognized record:\n"); 1124 dump_nvlist(events[i], 2); 1125 } 1126 } 1127 } 1128 1129 /*ARGSUSED*/ 1130 static void 1131 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) 1132 { 1133 } 1134 1135 static uint64_t 1136 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, 1137 const zbookmark_phys_t *zb) 1138 { 1139 if (dnp == NULL) { 1140 ASSERT(zb->zb_level < 0); 1141 if (zb->zb_object == 0) 1142 return (zb->zb_blkid); 1143 return (zb->zb_blkid * BP_GET_LSIZE(bp)); 1144 } 1145 1146 ASSERT(zb->zb_level >= 0); 1147 1148 return ((zb->zb_blkid << 1149 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * 1150 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1151 } 1152 1153 static void 1154 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp) 1155 { 1156 const dva_t *dva = bp->blk_dva; 1157 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; 1158 1159 if (dump_opt['b'] >= 6) { 1160 snprintf_blkptr(blkbuf, buflen, bp); 1161 return; 1162 } 1163 1164 if (BP_IS_EMBEDDED(bp)) { 1165 (void) sprintf(blkbuf, 1166 "EMBEDDED et=%u %llxL/%llxP B=%llu", 1167 (int)BPE_GET_ETYPE(bp), 1168 (u_longlong_t)BPE_GET_LSIZE(bp), 1169 (u_longlong_t)BPE_GET_PSIZE(bp), 1170 (u_longlong_t)bp->blk_birth); 1171 return; 1172 } 1173 1174 blkbuf[0] = '\0'; 1175 for (int i = 0; i < ndvas; i++) 1176 (void) snprintf(blkbuf + strlen(blkbuf), 1177 buflen - strlen(blkbuf), "%llu:%llx:%llx ", 1178 (u_longlong_t)DVA_GET_VDEV(&dva[i]), 1179 (u_longlong_t)DVA_GET_OFFSET(&dva[i]), 1180 (u_longlong_t)DVA_GET_ASIZE(&dva[i])); 1181 1182 if (BP_IS_HOLE(bp)) { 1183 (void) snprintf(blkbuf + strlen(blkbuf), 1184 buflen - strlen(blkbuf), 1185 "%llxL B=%llu", 1186 (u_longlong_t)BP_GET_LSIZE(bp), 1187 (u_longlong_t)bp->blk_birth); 1188 } else { 1189 (void) snprintf(blkbuf + strlen(blkbuf), 1190 buflen - strlen(blkbuf), 1191 "%llxL/%llxP F=%llu B=%llu/%llu", 1192 (u_longlong_t)BP_GET_LSIZE(bp), 1193 (u_longlong_t)BP_GET_PSIZE(bp), 1194 (u_longlong_t)BP_GET_FILL(bp), 1195 (u_longlong_t)bp->blk_birth, 1196 (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); 1197 } 1198 } 1199 1200 static void 1201 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb, 1202 const dnode_phys_t *dnp) 1203 { 1204 char blkbuf[BP_SPRINTF_LEN]; 1205 int l; 1206 1207 if (!BP_IS_EMBEDDED(bp)) { 1208 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); 1209 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); 1210 } 1211 1212 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); 1213 1214 ASSERT(zb->zb_level >= 0); 1215 1216 for (l = dnp->dn_nlevels - 1; l >= -1; l--) { 1217 if (l == zb->zb_level) { 1218 (void) printf("L%llx", (u_longlong_t)zb->zb_level); 1219 } else { 1220 (void) printf(" "); 1221 } 1222 } 1223 1224 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1225 (void) printf("%s\n", blkbuf); 1226 } 1227 1228 static int 1229 visit_indirect(spa_t *spa, const dnode_phys_t *dnp, 1230 blkptr_t *bp, const zbookmark_phys_t *zb) 1231 { 1232 int err = 0; 1233 1234 if (bp->blk_birth == 0) 1235 return (0); 1236 1237 print_indirect(bp, zb, dnp); 1238 1239 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) { 1240 arc_flags_t flags = ARC_FLAG_WAIT; 1241 int i; 1242 blkptr_t *cbp; 1243 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1244 arc_buf_t *buf; 1245 uint64_t fill = 0; 1246 1247 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1248 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 1249 if (err) 1250 return (err); 1251 ASSERT(buf->b_data); 1252 1253 /* recursively visit blocks below this */ 1254 cbp = buf->b_data; 1255 for (i = 0; i < epb; i++, cbp++) { 1256 zbookmark_phys_t czb; 1257 1258 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1259 zb->zb_level - 1, 1260 zb->zb_blkid * epb + i); 1261 err = visit_indirect(spa, dnp, cbp, &czb); 1262 if (err) 1263 break; 1264 fill += BP_GET_FILL(cbp); 1265 } 1266 if (!err) 1267 ASSERT3U(fill, ==, BP_GET_FILL(bp)); 1268 (void) arc_buf_remove_ref(buf, &buf); 1269 } 1270 1271 return (err); 1272 } 1273 1274 /*ARGSUSED*/ 1275 static void 1276 dump_indirect(dnode_t *dn) 1277 { 1278 dnode_phys_t *dnp = dn->dn_phys; 1279 int j; 1280 zbookmark_phys_t czb; 1281 1282 (void) printf("Indirect blocks:\n"); 1283 1284 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), 1285 dn->dn_object, dnp->dn_nlevels - 1, 0); 1286 for (j = 0; j < dnp->dn_nblkptr; j++) { 1287 czb.zb_blkid = j; 1288 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, 1289 &dnp->dn_blkptr[j], &czb); 1290 } 1291 1292 (void) printf("\n"); 1293 } 1294 1295 /*ARGSUSED*/ 1296 static void 1297 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) 1298 { 1299 dsl_dir_phys_t *dd = data; 1300 time_t crtime; 1301 char nice[32]; 1302 1303 if (dd == NULL) 1304 return; 1305 1306 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); 1307 1308 crtime = dd->dd_creation_time; 1309 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1310 (void) printf("\t\thead_dataset_obj = %llu\n", 1311 (u_longlong_t)dd->dd_head_dataset_obj); 1312 (void) printf("\t\tparent_dir_obj = %llu\n", 1313 (u_longlong_t)dd->dd_parent_obj); 1314 (void) printf("\t\torigin_obj = %llu\n", 1315 (u_longlong_t)dd->dd_origin_obj); 1316 (void) printf("\t\tchild_dir_zapobj = %llu\n", 1317 (u_longlong_t)dd->dd_child_dir_zapobj); 1318 zdb_nicenum(dd->dd_used_bytes, nice); 1319 (void) printf("\t\tused_bytes = %s\n", nice); 1320 zdb_nicenum(dd->dd_compressed_bytes, nice); 1321 (void) printf("\t\tcompressed_bytes = %s\n", nice); 1322 zdb_nicenum(dd->dd_uncompressed_bytes, nice); 1323 (void) printf("\t\tuncompressed_bytes = %s\n", nice); 1324 zdb_nicenum(dd->dd_quota, nice); 1325 (void) printf("\t\tquota = %s\n", nice); 1326 zdb_nicenum(dd->dd_reserved, nice); 1327 (void) printf("\t\treserved = %s\n", nice); 1328 (void) printf("\t\tprops_zapobj = %llu\n", 1329 (u_longlong_t)dd->dd_props_zapobj); 1330 (void) printf("\t\tdeleg_zapobj = %llu\n", 1331 (u_longlong_t)dd->dd_deleg_zapobj); 1332 (void) printf("\t\tflags = %llx\n", 1333 (u_longlong_t)dd->dd_flags); 1334 1335 #define DO(which) \ 1336 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \ 1337 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) 1338 DO(HEAD); 1339 DO(SNAP); 1340 DO(CHILD); 1341 DO(CHILD_RSRV); 1342 DO(REFRSRV); 1343 #undef DO 1344 } 1345 1346 /*ARGSUSED*/ 1347 static void 1348 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) 1349 { 1350 dsl_dataset_phys_t *ds = data; 1351 time_t crtime; 1352 char used[32], compressed[32], uncompressed[32], unique[32]; 1353 char blkbuf[BP_SPRINTF_LEN]; 1354 1355 if (ds == NULL) 1356 return; 1357 1358 ASSERT(size == sizeof (*ds)); 1359 crtime = ds->ds_creation_time; 1360 zdb_nicenum(ds->ds_referenced_bytes, used); 1361 zdb_nicenum(ds->ds_compressed_bytes, compressed); 1362 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed); 1363 zdb_nicenum(ds->ds_unique_bytes, unique); 1364 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp); 1365 1366 (void) printf("\t\tdir_obj = %llu\n", 1367 (u_longlong_t)ds->ds_dir_obj); 1368 (void) printf("\t\tprev_snap_obj = %llu\n", 1369 (u_longlong_t)ds->ds_prev_snap_obj); 1370 (void) printf("\t\tprev_snap_txg = %llu\n", 1371 (u_longlong_t)ds->ds_prev_snap_txg); 1372 (void) printf("\t\tnext_snap_obj = %llu\n", 1373 (u_longlong_t)ds->ds_next_snap_obj); 1374 (void) printf("\t\tsnapnames_zapobj = %llu\n", 1375 (u_longlong_t)ds->ds_snapnames_zapobj); 1376 (void) printf("\t\tnum_children = %llu\n", 1377 (u_longlong_t)ds->ds_num_children); 1378 (void) printf("\t\tuserrefs_obj = %llu\n", 1379 (u_longlong_t)ds->ds_userrefs_obj); 1380 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1381 (void) printf("\t\tcreation_txg = %llu\n", 1382 (u_longlong_t)ds->ds_creation_txg); 1383 (void) printf("\t\tdeadlist_obj = %llu\n", 1384 (u_longlong_t)ds->ds_deadlist_obj); 1385 (void) printf("\t\tused_bytes = %s\n", used); 1386 (void) printf("\t\tcompressed_bytes = %s\n", compressed); 1387 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); 1388 (void) printf("\t\tunique = %s\n", unique); 1389 (void) printf("\t\tfsid_guid = %llu\n", 1390 (u_longlong_t)ds->ds_fsid_guid); 1391 (void) printf("\t\tguid = %llu\n", 1392 (u_longlong_t)ds->ds_guid); 1393 (void) printf("\t\tflags = %llx\n", 1394 (u_longlong_t)ds->ds_flags); 1395 (void) printf("\t\tnext_clones_obj = %llu\n", 1396 (u_longlong_t)ds->ds_next_clones_obj); 1397 (void) printf("\t\tprops_obj = %llu\n", 1398 (u_longlong_t)ds->ds_props_obj); 1399 (void) printf("\t\tbp = %s\n", blkbuf); 1400 } 1401 1402 /* ARGSUSED */ 1403 static int 1404 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1405 { 1406 char blkbuf[BP_SPRINTF_LEN]; 1407 1408 if (bp->blk_birth != 0) { 1409 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 1410 (void) printf("\t%s\n", blkbuf); 1411 } 1412 return (0); 1413 } 1414 1415 static void 1416 dump_bptree(objset_t *os, uint64_t obj, char *name) 1417 { 1418 char bytes[32]; 1419 bptree_phys_t *bt; 1420 dmu_buf_t *db; 1421 1422 if (dump_opt['d'] < 3) 1423 return; 1424 1425 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db)); 1426 bt = db->db_data; 1427 zdb_nicenum(bt->bt_bytes, bytes); 1428 (void) printf("\n %s: %llu datasets, %s\n", 1429 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes); 1430 dmu_buf_rele(db, FTAG); 1431 1432 if (dump_opt['d'] < 5) 1433 return; 1434 1435 (void) printf("\n"); 1436 1437 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL); 1438 } 1439 1440 /* ARGSUSED */ 1441 static int 1442 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1443 { 1444 char blkbuf[BP_SPRINTF_LEN]; 1445 1446 ASSERT(bp->blk_birth != 0); 1447 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1448 (void) printf("\t%s\n", blkbuf); 1449 return (0); 1450 } 1451 1452 static void 1453 dump_full_bpobj(bpobj_t *bpo, char *name, int indent) 1454 { 1455 char bytes[32]; 1456 char comp[32]; 1457 char uncomp[32]; 1458 1459 if (dump_opt['d'] < 3) 1460 return; 1461 1462 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes); 1463 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1464 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp); 1465 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp); 1466 (void) printf(" %*s: object %llu, %llu local blkptrs, " 1467 "%llu subobjs in object %llu, %s (%s/%s comp)\n", 1468 indent * 8, name, 1469 (u_longlong_t)bpo->bpo_object, 1470 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1471 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, 1472 (u_longlong_t)bpo->bpo_phys->bpo_subobjs, 1473 bytes, comp, uncomp); 1474 1475 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1476 uint64_t subobj; 1477 bpobj_t subbpo; 1478 int error; 1479 VERIFY0(dmu_read(bpo->bpo_os, 1480 bpo->bpo_phys->bpo_subobjs, 1481 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1482 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1483 if (error != 0) { 1484 (void) printf("ERROR %u while trying to open " 1485 "subobj id %llu\n", 1486 error, (u_longlong_t)subobj); 1487 continue; 1488 } 1489 dump_full_bpobj(&subbpo, "subobj", indent + 1); 1490 bpobj_close(&subbpo); 1491 } 1492 } else { 1493 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n", 1494 indent * 8, name, 1495 (u_longlong_t)bpo->bpo_object, 1496 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1497 bytes); 1498 } 1499 1500 if (dump_opt['d'] < 5) 1501 return; 1502 1503 1504 if (indent == 0) { 1505 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); 1506 (void) printf("\n"); 1507 } 1508 } 1509 1510 static void 1511 dump_deadlist(dsl_deadlist_t *dl) 1512 { 1513 dsl_deadlist_entry_t *dle; 1514 uint64_t unused; 1515 char bytes[32]; 1516 char comp[32]; 1517 char uncomp[32]; 1518 1519 if (dump_opt['d'] < 3) 1520 return; 1521 1522 if (dl->dl_oldfmt) { 1523 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0); 1524 return; 1525 } 1526 1527 zdb_nicenum(dl->dl_phys->dl_used, bytes); 1528 zdb_nicenum(dl->dl_phys->dl_comp, comp); 1529 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp); 1530 (void) printf("\n Deadlist: %s (%s/%s comp)\n", 1531 bytes, comp, uncomp); 1532 1533 if (dump_opt['d'] < 4) 1534 return; 1535 1536 (void) printf("\n"); 1537 1538 /* force the tree to be loaded */ 1539 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused); 1540 1541 for (dle = avl_first(&dl->dl_tree); dle; 1542 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1543 if (dump_opt['d'] >= 5) { 1544 char buf[128]; 1545 (void) snprintf(buf, sizeof (buf), "mintxg %llu -> ", 1546 (longlong_t)dle->dle_mintxg, 1547 (longlong_t)dle->dle_bpobj.bpo_object); 1548 1549 dump_full_bpobj(&dle->dle_bpobj, buf, 0); 1550 } else { 1551 (void) printf("mintxg %llu -> obj %llu\n", 1552 (longlong_t)dle->dle_mintxg, 1553 (longlong_t)dle->dle_bpobj.bpo_object); 1554 1555 } 1556 } 1557 } 1558 1559 static avl_tree_t idx_tree; 1560 static avl_tree_t domain_tree; 1561 static boolean_t fuid_table_loaded; 1562 static boolean_t sa_loaded; 1563 sa_attr_type_t *sa_attr_table; 1564 1565 static void 1566 fuid_table_destroy() 1567 { 1568 if (fuid_table_loaded) { 1569 zfs_fuid_table_destroy(&idx_tree, &domain_tree); 1570 fuid_table_loaded = B_FALSE; 1571 } 1572 } 1573 1574 /* 1575 * print uid or gid information. 1576 * For normal POSIX id just the id is printed in decimal format. 1577 * For CIFS files with FUID the fuid is printed in hex followed by 1578 * the domain-rid string. 1579 */ 1580 static void 1581 print_idstr(uint64_t id, const char *id_type) 1582 { 1583 if (FUID_INDEX(id)) { 1584 char *domain; 1585 1586 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); 1587 (void) printf("\t%s %llx [%s-%d]\n", id_type, 1588 (u_longlong_t)id, domain, (int)FUID_RID(id)); 1589 } else { 1590 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); 1591 } 1592 1593 } 1594 1595 static void 1596 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) 1597 { 1598 uint32_t uid_idx, gid_idx; 1599 1600 uid_idx = FUID_INDEX(uid); 1601 gid_idx = FUID_INDEX(gid); 1602 1603 /* Load domain table, if not already loaded */ 1604 if (!fuid_table_loaded && (uid_idx || gid_idx)) { 1605 uint64_t fuid_obj; 1606 1607 /* first find the fuid object. It lives in the master node */ 1608 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 1609 8, 1, &fuid_obj) == 0); 1610 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); 1611 (void) zfs_fuid_table_load(os, fuid_obj, 1612 &idx_tree, &domain_tree); 1613 fuid_table_loaded = B_TRUE; 1614 } 1615 1616 print_idstr(uid, "uid"); 1617 print_idstr(gid, "gid"); 1618 } 1619 1620 /*ARGSUSED*/ 1621 static void 1622 dump_znode(objset_t *os, uint64_t object, void *data, size_t size) 1623 { 1624 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ 1625 sa_handle_t *hdl; 1626 uint64_t xattr, rdev, gen; 1627 uint64_t uid, gid, mode, fsize, parent, links; 1628 uint64_t pflags; 1629 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; 1630 time_t z_crtime, z_atime, z_mtime, z_ctime; 1631 sa_bulk_attr_t bulk[12]; 1632 int idx = 0; 1633 int error; 1634 1635 if (!sa_loaded) { 1636 uint64_t sa_attrs = 0; 1637 uint64_t version; 1638 1639 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 1640 8, 1, &version) == 0); 1641 if (version >= ZPL_VERSION_SA) { 1642 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 1643 8, 1, &sa_attrs) == 0); 1644 } 1645 if ((error = sa_setup(os, sa_attrs, zfs_attr_table, 1646 ZPL_END, &sa_attr_table)) != 0) { 1647 (void) printf("sa_setup failed errno %d, can't " 1648 "display znode contents\n", error); 1649 return; 1650 } 1651 sa_loaded = B_TRUE; 1652 } 1653 1654 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { 1655 (void) printf("Failed to get handle for SA znode\n"); 1656 return; 1657 } 1658 1659 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); 1660 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); 1661 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, 1662 &links, 8); 1663 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); 1664 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, 1665 &mode, 8); 1666 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], 1667 NULL, &parent, 8); 1668 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, 1669 &fsize, 8); 1670 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, 1671 acctm, 16); 1672 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, 1673 modtm, 16); 1674 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, 1675 crtm, 16); 1676 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, 1677 chgtm, 16); 1678 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, 1679 &pflags, 8); 1680 1681 if (sa_bulk_lookup(hdl, bulk, idx)) { 1682 (void) sa_handle_destroy(hdl); 1683 return; 1684 } 1685 1686 error = zfs_obj_to_path(os, object, path, sizeof (path)); 1687 if (error != 0) { 1688 (void) snprintf(path, sizeof (path), "\?\?\?<object#%llu>", 1689 (u_longlong_t)object); 1690 } 1691 if (dump_opt['d'] < 3) { 1692 (void) printf("\t%s\n", path); 1693 (void) sa_handle_destroy(hdl); 1694 return; 1695 } 1696 1697 z_crtime = (time_t)crtm[0]; 1698 z_atime = (time_t)acctm[0]; 1699 z_mtime = (time_t)modtm[0]; 1700 z_ctime = (time_t)chgtm[0]; 1701 1702 (void) printf("\tpath %s\n", path); 1703 dump_uidgid(os, uid, gid); 1704 (void) printf("\tatime %s", ctime(&z_atime)); 1705 (void) printf("\tmtime %s", ctime(&z_mtime)); 1706 (void) printf("\tctime %s", ctime(&z_ctime)); 1707 (void) printf("\tcrtime %s", ctime(&z_crtime)); 1708 (void) printf("\tgen %llu\n", (u_longlong_t)gen); 1709 (void) printf("\tmode %llo\n", (u_longlong_t)mode); 1710 (void) printf("\tsize %llu\n", (u_longlong_t)fsize); 1711 (void) printf("\tparent %llu\n", (u_longlong_t)parent); 1712 (void) printf("\tlinks %llu\n", (u_longlong_t)links); 1713 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); 1714 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, 1715 sizeof (uint64_t)) == 0) 1716 (void) printf("\txattr %llu\n", (u_longlong_t)xattr); 1717 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, 1718 sizeof (uint64_t)) == 0) 1719 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); 1720 sa_handle_destroy(hdl); 1721 } 1722 1723 /*ARGSUSED*/ 1724 static void 1725 dump_acl(objset_t *os, uint64_t object, void *data, size_t size) 1726 { 1727 } 1728 1729 /*ARGSUSED*/ 1730 static void 1731 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) 1732 { 1733 } 1734 1735 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { 1736 dump_none, /* unallocated */ 1737 dump_zap, /* object directory */ 1738 dump_uint64, /* object array */ 1739 dump_none, /* packed nvlist */ 1740 dump_packed_nvlist, /* packed nvlist size */ 1741 dump_none, /* bpobj */ 1742 dump_bpobj, /* bpobj header */ 1743 dump_none, /* SPA space map header */ 1744 dump_none, /* SPA space map */ 1745 dump_none, /* ZIL intent log */ 1746 dump_dnode, /* DMU dnode */ 1747 dump_dmu_objset, /* DMU objset */ 1748 dump_dsl_dir, /* DSL directory */ 1749 dump_zap, /* DSL directory child map */ 1750 dump_zap, /* DSL dataset snap map */ 1751 dump_zap, /* DSL props */ 1752 dump_dsl_dataset, /* DSL dataset */ 1753 dump_znode, /* ZFS znode */ 1754 dump_acl, /* ZFS V0 ACL */ 1755 dump_uint8, /* ZFS plain file */ 1756 dump_zpldir, /* ZFS directory */ 1757 dump_zap, /* ZFS master node */ 1758 dump_zap, /* ZFS delete queue */ 1759 dump_uint8, /* zvol object */ 1760 dump_zap, /* zvol prop */ 1761 dump_uint8, /* other uint8[] */ 1762 dump_uint64, /* other uint64[] */ 1763 dump_zap, /* other ZAP */ 1764 dump_zap, /* persistent error log */ 1765 dump_uint8, /* SPA history */ 1766 dump_history_offsets, /* SPA history offsets */ 1767 dump_zap, /* Pool properties */ 1768 dump_zap, /* DSL permissions */ 1769 dump_acl, /* ZFS ACL */ 1770 dump_uint8, /* ZFS SYSACL */ 1771 dump_none, /* FUID nvlist */ 1772 dump_packed_nvlist, /* FUID nvlist size */ 1773 dump_zap, /* DSL dataset next clones */ 1774 dump_zap, /* DSL scrub queue */ 1775 dump_zap, /* ZFS user/group used */ 1776 dump_zap, /* ZFS user/group quota */ 1777 dump_zap, /* snapshot refcount tags */ 1778 dump_ddt_zap, /* DDT ZAP object */ 1779 dump_zap, /* DDT statistics */ 1780 dump_znode, /* SA object */ 1781 dump_zap, /* SA Master Node */ 1782 dump_sa_attrs, /* SA attribute registration */ 1783 dump_sa_layouts, /* SA attribute layouts */ 1784 dump_zap, /* DSL scrub translations */ 1785 dump_none, /* fake dedup BP */ 1786 dump_zap, /* deadlist */ 1787 dump_none, /* deadlist hdr */ 1788 dump_zap, /* dsl clones */ 1789 dump_bpobj_subobjs, /* bpobj subobjs */ 1790 dump_unknown, /* Unknown type, must be last */ 1791 }; 1792 1793 static void 1794 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) 1795 { 1796 dmu_buf_t *db = NULL; 1797 dmu_object_info_t doi; 1798 dnode_t *dn; 1799 void *bonus = NULL; 1800 size_t bsize = 0; 1801 char iblk[32], dblk[32], lsize[32], asize[32], fill[32]; 1802 char bonus_size[32]; 1803 char aux[50]; 1804 int error; 1805 1806 if (*print_header) { 1807 (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n", 1808 "Object", "lvl", "iblk", "dblk", "dsize", "lsize", 1809 "%full", "type"); 1810 *print_header = 0; 1811 } 1812 1813 if (object == 0) { 1814 dn = DMU_META_DNODE(os); 1815 } else { 1816 error = dmu_bonus_hold(os, object, FTAG, &db); 1817 if (error) 1818 fatal("dmu_bonus_hold(%llu) failed, errno %u", 1819 object, error); 1820 bonus = db->db_data; 1821 bsize = db->db_size; 1822 dn = DB_DNODE((dmu_buf_impl_t *)db); 1823 } 1824 dmu_object_info_from_dnode(dn, &doi); 1825 1826 zdb_nicenum(doi.doi_metadata_block_size, iblk); 1827 zdb_nicenum(doi.doi_data_block_size, dblk); 1828 zdb_nicenum(doi.doi_max_offset, lsize); 1829 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize); 1830 zdb_nicenum(doi.doi_bonus_size, bonus_size); 1831 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * 1832 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / 1833 doi.doi_max_offset); 1834 1835 aux[0] = '\0'; 1836 1837 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { 1838 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", 1839 ZDB_CHECKSUM_NAME(doi.doi_checksum)); 1840 } 1841 1842 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { 1843 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", 1844 ZDB_COMPRESS_NAME(doi.doi_compress)); 1845 } 1846 1847 (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n", 1848 (u_longlong_t)object, doi.doi_indirection, iblk, dblk, 1849 asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); 1850 1851 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { 1852 (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n", 1853 "", "", "", "", "", bonus_size, "bonus", 1854 ZDB_OT_NAME(doi.doi_bonus_type)); 1855 } 1856 1857 if (verbosity >= 4) { 1858 (void) printf("\tdnode flags: %s%s%s\n", 1859 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? 1860 "USED_BYTES " : "", 1861 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? 1862 "USERUSED_ACCOUNTED " : "", 1863 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? 1864 "SPILL_BLKPTR" : ""); 1865 (void) printf("\tdnode maxblkid: %llu\n", 1866 (longlong_t)dn->dn_phys->dn_maxblkid); 1867 1868 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object, 1869 bonus, bsize); 1870 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0); 1871 *print_header = 1; 1872 } 1873 1874 if (verbosity >= 5) 1875 dump_indirect(dn); 1876 1877 if (verbosity >= 5) { 1878 /* 1879 * Report the list of segments that comprise the object. 1880 */ 1881 uint64_t start = 0; 1882 uint64_t end; 1883 uint64_t blkfill = 1; 1884 int minlvl = 1; 1885 1886 if (dn->dn_type == DMU_OT_DNODE) { 1887 minlvl = 0; 1888 blkfill = DNODES_PER_BLOCK; 1889 } 1890 1891 for (;;) { 1892 char segsize[32]; 1893 error = dnode_next_offset(dn, 1894 0, &start, minlvl, blkfill, 0); 1895 if (error) 1896 break; 1897 end = start; 1898 error = dnode_next_offset(dn, 1899 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); 1900 zdb_nicenum(end - start, segsize); 1901 (void) printf("\t\tsegment [%016llx, %016llx)" 1902 " size %5s\n", (u_longlong_t)start, 1903 (u_longlong_t)end, segsize); 1904 if (error) 1905 break; 1906 start = end; 1907 } 1908 } 1909 1910 if (db != NULL) 1911 dmu_buf_rele(db, FTAG); 1912 } 1913 1914 static char *objset_types[DMU_OST_NUMTYPES] = { 1915 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; 1916 1917 static void 1918 dump_dir(objset_t *os) 1919 { 1920 dmu_objset_stats_t dds; 1921 uint64_t object, object_count; 1922 uint64_t refdbytes, usedobjs, scratch; 1923 char numbuf[32]; 1924 char blkbuf[BP_SPRINTF_LEN + 20]; 1925 char osname[MAXNAMELEN]; 1926 char *type = "UNKNOWN"; 1927 int verbosity = dump_opt['d']; 1928 int print_header = 1; 1929 int i, error; 1930 1931 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 1932 dmu_objset_fast_stat(os, &dds); 1933 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 1934 1935 if (dds.dds_type < DMU_OST_NUMTYPES) 1936 type = objset_types[dds.dds_type]; 1937 1938 if (dds.dds_type == DMU_OST_META) { 1939 dds.dds_creation_txg = TXG_INITIAL; 1940 usedobjs = BP_GET_FILL(os->os_rootbp); 1941 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)-> 1942 dd_used_bytes; 1943 } else { 1944 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); 1945 } 1946 1947 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp)); 1948 1949 zdb_nicenum(refdbytes, numbuf); 1950 1951 if (verbosity >= 4) { 1952 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp "); 1953 (void) snprintf_blkptr(blkbuf + strlen(blkbuf), 1954 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp); 1955 } else { 1956 blkbuf[0] = '\0'; 1957 } 1958 1959 dmu_objset_name(os, osname); 1960 1961 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " 1962 "%s, %llu objects%s\n", 1963 osname, type, (u_longlong_t)dmu_objset_id(os), 1964 (u_longlong_t)dds.dds_creation_txg, 1965 numbuf, (u_longlong_t)usedobjs, blkbuf); 1966 1967 if (zopt_objects != 0) { 1968 for (i = 0; i < zopt_objects; i++) 1969 dump_object(os, zopt_object[i], verbosity, 1970 &print_header); 1971 (void) printf("\n"); 1972 return; 1973 } 1974 1975 if (dump_opt['i'] != 0 || verbosity >= 2) 1976 dump_intent_log(dmu_objset_zil(os)); 1977 1978 if (dmu_objset_ds(os) != NULL) 1979 dump_deadlist(&dmu_objset_ds(os)->ds_deadlist); 1980 1981 if (verbosity < 2) 1982 return; 1983 1984 if (BP_IS_HOLE(os->os_rootbp)) 1985 return; 1986 1987 dump_object(os, 0, verbosity, &print_header); 1988 object_count = 0; 1989 if (DMU_USERUSED_DNODE(os) != NULL && 1990 DMU_USERUSED_DNODE(os)->dn_type != 0) { 1991 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header); 1992 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header); 1993 } 1994 1995 object = 0; 1996 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 1997 dump_object(os, object, verbosity, &print_header); 1998 object_count++; 1999 } 2000 2001 ASSERT3U(object_count, ==, usedobjs); 2002 2003 (void) printf("\n"); 2004 2005 if (error != ESRCH) { 2006 (void) fprintf(stderr, "dmu_object_next() = %d\n", error); 2007 abort(); 2008 } 2009 } 2010 2011 static void 2012 dump_uberblock(uberblock_t *ub, const char *header, const char *footer) 2013 { 2014 time_t timestamp = ub->ub_timestamp; 2015 2016 (void) printf(header ? header : ""); 2017 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); 2018 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); 2019 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); 2020 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); 2021 (void) printf("\ttimestamp = %llu UTC = %s", 2022 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); 2023 if (dump_opt['u'] >= 3) { 2024 char blkbuf[BP_SPRINTF_LEN]; 2025 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp); 2026 (void) printf("\trootbp = %s\n", blkbuf); 2027 } 2028 (void) printf(footer ? footer : ""); 2029 } 2030 2031 static void 2032 dump_config(spa_t *spa) 2033 { 2034 dmu_buf_t *db; 2035 size_t nvsize = 0; 2036 int error = 0; 2037 2038 2039 error = dmu_bonus_hold(spa->spa_meta_objset, 2040 spa->spa_config_object, FTAG, &db); 2041 2042 if (error == 0) { 2043 nvsize = *(uint64_t *)db->db_data; 2044 dmu_buf_rele(db, FTAG); 2045 2046 (void) printf("\nMOS Configuration:\n"); 2047 dump_packed_nvlist(spa->spa_meta_objset, 2048 spa->spa_config_object, (void *)&nvsize, 1); 2049 } else { 2050 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", 2051 (u_longlong_t)spa->spa_config_object, error); 2052 } 2053 } 2054 2055 static void 2056 dump_cachefile(const char *cachefile) 2057 { 2058 int fd; 2059 struct stat64 statbuf; 2060 char *buf; 2061 nvlist_t *config; 2062 2063 if ((fd = open64(cachefile, O_RDONLY)) < 0) { 2064 (void) printf("cannot open '%s': %s\n", cachefile, 2065 strerror(errno)); 2066 exit(1); 2067 } 2068 2069 if (fstat64(fd, &statbuf) != 0) { 2070 (void) printf("failed to stat '%s': %s\n", cachefile, 2071 strerror(errno)); 2072 exit(1); 2073 } 2074 2075 if ((buf = malloc(statbuf.st_size)) == NULL) { 2076 (void) fprintf(stderr, "failed to allocate %llu bytes\n", 2077 (u_longlong_t)statbuf.st_size); 2078 exit(1); 2079 } 2080 2081 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 2082 (void) fprintf(stderr, "failed to read %llu bytes\n", 2083 (u_longlong_t)statbuf.st_size); 2084 exit(1); 2085 } 2086 2087 (void) close(fd); 2088 2089 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { 2090 (void) fprintf(stderr, "failed to unpack nvlist\n"); 2091 exit(1); 2092 } 2093 2094 free(buf); 2095 2096 dump_nvlist(config, 0); 2097 2098 nvlist_free(config); 2099 } 2100 2101 #define ZDB_MAX_UB_HEADER_SIZE 32 2102 2103 static void 2104 dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift) 2105 { 2106 vdev_t vd; 2107 vdev_t *vdp = &vd; 2108 char header[ZDB_MAX_UB_HEADER_SIZE]; 2109 2110 vd.vdev_ashift = ashift; 2111 vdp->vdev_top = vdp; 2112 2113 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) { 2114 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i); 2115 uberblock_t *ub = (void *)((char *)lbl + uoff); 2116 2117 if (uberblock_verify(ub)) 2118 continue; 2119 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, 2120 "Uberblock[%d]\n", i); 2121 dump_uberblock(ub, header, ""); 2122 } 2123 } 2124 2125 static void 2126 dump_label(const char *dev) 2127 { 2128 int fd; 2129 vdev_label_t label; 2130 char *path, *buf = label.vl_vdev_phys.vp_nvlist; 2131 size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); 2132 struct stat64 statbuf; 2133 uint64_t psize, ashift; 2134 int len = strlen(dev) + 1; 2135 2136 if (strncmp(dev, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) { 2137 len++; 2138 path = malloc(len); 2139 (void) snprintf(path, len, "%s%s", ZFS_RDISK_ROOTD, 2140 dev + strlen(ZFS_DISK_ROOTD)); 2141 } else { 2142 path = strdup(dev); 2143 } 2144 2145 if ((fd = open64(path, O_RDONLY)) < 0) { 2146 (void) printf("cannot open '%s': %s\n", path, strerror(errno)); 2147 free(path); 2148 exit(1); 2149 } 2150 2151 if (fstat64(fd, &statbuf) != 0) { 2152 (void) printf("failed to stat '%s': %s\n", path, 2153 strerror(errno)); 2154 free(path); 2155 (void) close(fd); 2156 exit(1); 2157 } 2158 2159 if (S_ISBLK(statbuf.st_mode)) { 2160 (void) printf("cannot use '%s': character device required\n", 2161 path); 2162 free(path); 2163 (void) close(fd); 2164 exit(1); 2165 } 2166 2167 psize = statbuf.st_size; 2168 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); 2169 2170 for (int l = 0; l < VDEV_LABELS; l++) { 2171 nvlist_t *config = NULL; 2172 2173 (void) printf("--------------------------------------------\n"); 2174 (void) printf("LABEL %d\n", l); 2175 (void) printf("--------------------------------------------\n"); 2176 2177 if (pread64(fd, &label, sizeof (label), 2178 vdev_label_offset(psize, l, 0)) != sizeof (label)) { 2179 (void) printf("failed to read label %d\n", l); 2180 continue; 2181 } 2182 2183 if (nvlist_unpack(buf, buflen, &config, 0) != 0) { 2184 (void) printf("failed to unpack label %d\n", l); 2185 ashift = SPA_MINBLOCKSHIFT; 2186 } else { 2187 nvlist_t *vdev_tree = NULL; 2188 2189 dump_nvlist(config, 4); 2190 if ((nvlist_lookup_nvlist(config, 2191 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || 2192 (nvlist_lookup_uint64(vdev_tree, 2193 ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) 2194 ashift = SPA_MINBLOCKSHIFT; 2195 nvlist_free(config); 2196 } 2197 if (dump_opt['u']) 2198 dump_label_uberblocks(&label, ashift); 2199 } 2200 2201 free(path); 2202 (void) close(fd); 2203 } 2204 2205 static uint64_t dataset_feature_count[SPA_FEATURES]; 2206 2207 /*ARGSUSED*/ 2208 static int 2209 dump_one_dir(const char *dsname, void *arg) 2210 { 2211 int error; 2212 objset_t *os; 2213 2214 error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os); 2215 if (error) { 2216 (void) printf("Could not open %s, error %d\n", dsname, error); 2217 return (0); 2218 } 2219 2220 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 2221 if (!dmu_objset_ds(os)->ds_feature_inuse[f]) 2222 continue; 2223 ASSERT(spa_feature_table[f].fi_flags & 2224 ZFEATURE_FLAG_PER_DATASET); 2225 dataset_feature_count[f]++; 2226 } 2227 2228 dump_dir(os); 2229 dmu_objset_disown(os, FTAG); 2230 fuid_table_destroy(); 2231 sa_loaded = B_FALSE; 2232 return (0); 2233 } 2234 2235 /* 2236 * Block statistics. 2237 */ 2238 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2) 2239 typedef struct zdb_blkstats { 2240 uint64_t zb_asize; 2241 uint64_t zb_lsize; 2242 uint64_t zb_psize; 2243 uint64_t zb_count; 2244 uint64_t zb_gangs; 2245 uint64_t zb_ditto_samevdev; 2246 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE]; 2247 } zdb_blkstats_t; 2248 2249 /* 2250 * Extended object types to report deferred frees and dedup auto-ditto blocks. 2251 */ 2252 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) 2253 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) 2254 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2) 2255 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3) 2256 2257 static char *zdb_ot_extname[] = { 2258 "deferred free", 2259 "dedup ditto", 2260 "other", 2261 "Total", 2262 }; 2263 2264 #define ZB_TOTAL DN_MAX_LEVELS 2265 2266 typedef struct zdb_cb { 2267 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; 2268 uint64_t zcb_dedup_asize; 2269 uint64_t zcb_dedup_blocks; 2270 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES]; 2271 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES] 2272 [BPE_PAYLOAD_SIZE]; 2273 uint64_t zcb_start; 2274 uint64_t zcb_lastprint; 2275 uint64_t zcb_totalasize; 2276 uint64_t zcb_errors[256]; 2277 int zcb_readfails; 2278 int zcb_haderrors; 2279 spa_t *zcb_spa; 2280 } zdb_cb_t; 2281 2282 static void 2283 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, 2284 dmu_object_type_t type) 2285 { 2286 uint64_t refcnt = 0; 2287 2288 ASSERT(type < ZDB_OT_TOTAL); 2289 2290 if (zilog && zil_bp_tree_add(zilog, bp) != 0) 2291 return; 2292 2293 for (int i = 0; i < 4; i++) { 2294 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; 2295 int t = (i & 1) ? type : ZDB_OT_TOTAL; 2296 int equal; 2297 zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; 2298 2299 zb->zb_asize += BP_GET_ASIZE(bp); 2300 zb->zb_lsize += BP_GET_LSIZE(bp); 2301 zb->zb_psize += BP_GET_PSIZE(bp); 2302 zb->zb_count++; 2303 2304 /* 2305 * The histogram is only big enough to record blocks up to 2306 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last, 2307 * "other", bucket. 2308 */ 2309 int idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT; 2310 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1); 2311 zb->zb_psize_histogram[idx]++; 2312 2313 zb->zb_gangs += BP_COUNT_GANG(bp); 2314 2315 switch (BP_GET_NDVAS(bp)) { 2316 case 2: 2317 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 2318 DVA_GET_VDEV(&bp->blk_dva[1])) 2319 zb->zb_ditto_samevdev++; 2320 break; 2321 case 3: 2322 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 2323 DVA_GET_VDEV(&bp->blk_dva[1])) + 2324 (DVA_GET_VDEV(&bp->blk_dva[0]) == 2325 DVA_GET_VDEV(&bp->blk_dva[2])) + 2326 (DVA_GET_VDEV(&bp->blk_dva[1]) == 2327 DVA_GET_VDEV(&bp->blk_dva[2])); 2328 if (equal != 0) 2329 zb->zb_ditto_samevdev++; 2330 break; 2331 } 2332 2333 } 2334 2335 if (BP_IS_EMBEDDED(bp)) { 2336 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++; 2337 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)] 2338 [BPE_GET_PSIZE(bp)]++; 2339 return; 2340 } 2341 2342 if (dump_opt['L']) 2343 return; 2344 2345 if (BP_GET_DEDUP(bp)) { 2346 ddt_t *ddt; 2347 ddt_entry_t *dde; 2348 2349 ddt = ddt_select(zcb->zcb_spa, bp); 2350 ddt_enter(ddt); 2351 dde = ddt_lookup(ddt, bp, B_FALSE); 2352 2353 if (dde == NULL) { 2354 refcnt = 0; 2355 } else { 2356 ddt_phys_t *ddp = ddt_phys_select(dde, bp); 2357 ddt_phys_decref(ddp); 2358 refcnt = ddp->ddp_refcnt; 2359 if (ddt_phys_total_refcnt(dde) == 0) 2360 ddt_remove(ddt, dde); 2361 } 2362 ddt_exit(ddt); 2363 } 2364 2365 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, 2366 refcnt ? 0 : spa_first_txg(zcb->zcb_spa), 2367 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); 2368 } 2369 2370 static void 2371 zdb_blkptr_done(zio_t *zio) 2372 { 2373 spa_t *spa = zio->io_spa; 2374 blkptr_t *bp = zio->io_bp; 2375 int ioerr = zio->io_error; 2376 zdb_cb_t *zcb = zio->io_private; 2377 zbookmark_phys_t *zb = &zio->io_bookmark; 2378 2379 zio_data_buf_free(zio->io_data, zio->io_size); 2380 2381 mutex_enter(&spa->spa_scrub_lock); 2382 spa->spa_scrub_inflight--; 2383 cv_broadcast(&spa->spa_scrub_io_cv); 2384 2385 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2386 char blkbuf[BP_SPRINTF_LEN]; 2387 2388 zcb->zcb_haderrors = 1; 2389 zcb->zcb_errors[ioerr]++; 2390 2391 if (dump_opt['b'] >= 2) 2392 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2393 else 2394 blkbuf[0] = '\0'; 2395 2396 (void) printf("zdb_blkptr_cb: " 2397 "Got error %d reading " 2398 "<%llu, %llu, %lld, %llx> %s -- skipping\n", 2399 ioerr, 2400 (u_longlong_t)zb->zb_objset, 2401 (u_longlong_t)zb->zb_object, 2402 (u_longlong_t)zb->zb_level, 2403 (u_longlong_t)zb->zb_blkid, 2404 blkbuf); 2405 } 2406 mutex_exit(&spa->spa_scrub_lock); 2407 } 2408 2409 static int 2410 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2411 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2412 { 2413 zdb_cb_t *zcb = arg; 2414 dmu_object_type_t type; 2415 boolean_t is_metadata; 2416 2417 if (bp == NULL) 2418 return (0); 2419 2420 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) { 2421 char blkbuf[BP_SPRINTF_LEN]; 2422 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2423 (void) printf("objset %llu object %llu " 2424 "level %lld offset 0x%llx %s\n", 2425 (u_longlong_t)zb->zb_objset, 2426 (u_longlong_t)zb->zb_object, 2427 (longlong_t)zb->zb_level, 2428 (u_longlong_t)blkid2offset(dnp, bp, zb), 2429 blkbuf); 2430 } 2431 2432 if (BP_IS_HOLE(bp)) 2433 return (0); 2434 2435 type = BP_GET_TYPE(bp); 2436 2437 zdb_count_block(zcb, zilog, bp, 2438 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type); 2439 2440 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); 2441 2442 if (!BP_IS_EMBEDDED(bp) && 2443 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) { 2444 size_t size = BP_GET_PSIZE(bp); 2445 void *data = zio_data_buf_alloc(size); 2446 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; 2447 2448 /* If it's an intent log block, failure is expected. */ 2449 if (zb->zb_level == ZB_ZIL_LEVEL) 2450 flags |= ZIO_FLAG_SPECULATIVE; 2451 2452 mutex_enter(&spa->spa_scrub_lock); 2453 while (spa->spa_scrub_inflight > max_inflight) 2454 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2455 spa->spa_scrub_inflight++; 2456 mutex_exit(&spa->spa_scrub_lock); 2457 2458 zio_nowait(zio_read(NULL, spa, bp, data, size, 2459 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb)); 2460 } 2461 2462 zcb->zcb_readfails = 0; 2463 2464 /* only call gethrtime() every 100 blocks */ 2465 static int iters; 2466 if (++iters > 100) 2467 iters = 0; 2468 else 2469 return (0); 2470 2471 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) { 2472 uint64_t now = gethrtime(); 2473 char buf[10]; 2474 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize; 2475 int kb_per_sec = 2476 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000)); 2477 int sec_remaining = 2478 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec; 2479 2480 zfs_nicenum(bytes, buf, sizeof (buf)); 2481 (void) fprintf(stderr, 2482 "\r%5s completed (%4dMB/s) " 2483 "estimated time remaining: %uhr %02umin %02usec ", 2484 buf, kb_per_sec / 1024, 2485 sec_remaining / 60 / 60, 2486 sec_remaining / 60 % 60, 2487 sec_remaining % 60); 2488 2489 zcb->zcb_lastprint = now; 2490 } 2491 2492 return (0); 2493 } 2494 2495 static void 2496 zdb_leak(void *arg, uint64_t start, uint64_t size) 2497 { 2498 vdev_t *vd = arg; 2499 2500 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 2501 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); 2502 } 2503 2504 static metaslab_ops_t zdb_metaslab_ops = { 2505 NULL /* alloc */ 2506 }; 2507 2508 static void 2509 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) 2510 { 2511 ddt_bookmark_t ddb = { 0 }; 2512 ddt_entry_t dde; 2513 int error; 2514 2515 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { 2516 blkptr_t blk; 2517 ddt_phys_t *ddp = dde.dde_phys; 2518 2519 if (ddb.ddb_class == DDT_CLASS_UNIQUE) 2520 return; 2521 2522 ASSERT(ddt_phys_total_refcnt(&dde) > 1); 2523 2524 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2525 if (ddp->ddp_phys_birth == 0) 2526 continue; 2527 ddt_bp_create(ddb.ddb_checksum, 2528 &dde.dde_key, ddp, &blk); 2529 if (p == DDT_PHYS_DITTO) { 2530 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); 2531 } else { 2532 zcb->zcb_dedup_asize += 2533 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); 2534 zcb->zcb_dedup_blocks++; 2535 } 2536 } 2537 if (!dump_opt['L']) { 2538 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; 2539 ddt_enter(ddt); 2540 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); 2541 ddt_exit(ddt); 2542 } 2543 } 2544 2545 ASSERT(error == ENOENT); 2546 } 2547 2548 static void 2549 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) 2550 { 2551 zcb->zcb_spa = spa; 2552 2553 if (!dump_opt['L']) { 2554 vdev_t *rvd = spa->spa_root_vdev; 2555 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2556 vdev_t *vd = rvd->vdev_child[c]; 2557 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 2558 metaslab_t *msp = vd->vdev_ms[m]; 2559 mutex_enter(&msp->ms_lock); 2560 metaslab_unload(msp); 2561 2562 /* 2563 * For leak detection, we overload the metaslab 2564 * ms_tree to contain allocated segments 2565 * instead of free segments. As a result, 2566 * we can't use the normal metaslab_load/unload 2567 * interfaces. 2568 */ 2569 if (msp->ms_sm != NULL) { 2570 (void) fprintf(stderr, 2571 "\rloading space map for " 2572 "vdev %llu of %llu, " 2573 "metaslab %llu of %llu ...", 2574 (longlong_t)c, 2575 (longlong_t)rvd->vdev_children, 2576 (longlong_t)m, 2577 (longlong_t)vd->vdev_ms_count); 2578 2579 msp->ms_ops = &zdb_metaslab_ops; 2580 2581 /* 2582 * We don't want to spend the CPU 2583 * manipulating the size-ordered 2584 * tree, so clear the range_tree 2585 * ops. 2586 */ 2587 msp->ms_tree->rt_ops = NULL; 2588 VERIFY0(space_map_load(msp->ms_sm, 2589 msp->ms_tree, SM_ALLOC)); 2590 msp->ms_loaded = B_TRUE; 2591 } 2592 mutex_exit(&msp->ms_lock); 2593 } 2594 } 2595 (void) fprintf(stderr, "\n"); 2596 } 2597 2598 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2599 2600 zdb_ddt_leak_init(spa, zcb); 2601 2602 spa_config_exit(spa, SCL_CONFIG, FTAG); 2603 } 2604 2605 static void 2606 zdb_leak_fini(spa_t *spa) 2607 { 2608 if (!dump_opt['L']) { 2609 vdev_t *rvd = spa->spa_root_vdev; 2610 for (int c = 0; c < rvd->vdev_children; c++) { 2611 vdev_t *vd = rvd->vdev_child[c]; 2612 for (int m = 0; m < vd->vdev_ms_count; m++) { 2613 metaslab_t *msp = vd->vdev_ms[m]; 2614 mutex_enter(&msp->ms_lock); 2615 2616 /* 2617 * The ms_tree has been overloaded to 2618 * contain allocated segments. Now that we 2619 * finished traversing all blocks, any 2620 * block that remains in the ms_tree 2621 * represents an allocated block that we 2622 * did not claim during the traversal. 2623 * Claimed blocks would have been removed 2624 * from the ms_tree. 2625 */ 2626 range_tree_vacate(msp->ms_tree, zdb_leak, vd); 2627 msp->ms_loaded = B_FALSE; 2628 2629 mutex_exit(&msp->ms_lock); 2630 } 2631 } 2632 } 2633 } 2634 2635 /* ARGSUSED */ 2636 static int 2637 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2638 { 2639 zdb_cb_t *zcb = arg; 2640 2641 if (dump_opt['b'] >= 5) { 2642 char blkbuf[BP_SPRINTF_LEN]; 2643 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2644 (void) printf("[%s] %s\n", 2645 "deferred free", blkbuf); 2646 } 2647 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); 2648 return (0); 2649 } 2650 2651 static int 2652 dump_block_stats(spa_t *spa) 2653 { 2654 zdb_cb_t zcb = { 0 }; 2655 zdb_blkstats_t *zb, *tzb; 2656 uint64_t norm_alloc, norm_space, total_alloc, total_found; 2657 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; 2658 boolean_t leaks = B_FALSE; 2659 2660 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n", 2661 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", 2662 (dump_opt['c'] == 1) ? "metadata " : "", 2663 dump_opt['c'] ? "checksums " : "", 2664 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", 2665 !dump_opt['L'] ? "nothing leaked " : ""); 2666 2667 /* 2668 * Load all space maps as SM_ALLOC maps, then traverse the pool 2669 * claiming each block we discover. If the pool is perfectly 2670 * consistent, the space maps will be empty when we're done. 2671 * Anything left over is a leak; any block we can't claim (because 2672 * it's not part of any space map) is a double allocation, 2673 * reference to a freed block, or an unclaimed log block. 2674 */ 2675 zdb_leak_init(spa, &zcb); 2676 2677 /* 2678 * If there's a deferred-free bplist, process that first. 2679 */ 2680 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, 2681 count_block_cb, &zcb, NULL); 2682 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 2683 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, 2684 count_block_cb, &zcb, NULL); 2685 } 2686 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 2687 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset, 2688 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb, 2689 &zcb, NULL)); 2690 } 2691 2692 if (dump_opt['c'] > 1) 2693 flags |= TRAVERSE_PREFETCH_DATA; 2694 2695 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa)); 2696 zcb.zcb_start = zcb.zcb_lastprint = gethrtime(); 2697 zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); 2698 2699 /* 2700 * If we've traversed the data blocks then we need to wait for those 2701 * I/Os to complete. We leverage "The Godfather" zio to wait on 2702 * all async I/Os to complete. 2703 */ 2704 if (dump_opt['c']) { 2705 for (int i = 0; i < max_ncpus; i++) { 2706 (void) zio_wait(spa->spa_async_zio_root[i]); 2707 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2708 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2709 ZIO_FLAG_GODFATHER); 2710 } 2711 } 2712 2713 if (zcb.zcb_haderrors) { 2714 (void) printf("\nError counts:\n\n"); 2715 (void) printf("\t%5s %s\n", "errno", "count"); 2716 for (int e = 0; e < 256; e++) { 2717 if (zcb.zcb_errors[e] != 0) { 2718 (void) printf("\t%5d %llu\n", 2719 e, (u_longlong_t)zcb.zcb_errors[e]); 2720 } 2721 } 2722 } 2723 2724 /* 2725 * Report any leaked segments. 2726 */ 2727 zdb_leak_fini(spa); 2728 2729 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; 2730 2731 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 2732 norm_space = metaslab_class_get_space(spa_normal_class(spa)); 2733 2734 total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa)); 2735 total_found = tzb->zb_asize - zcb.zcb_dedup_asize; 2736 2737 if (total_found == total_alloc) { 2738 if (!dump_opt['L']) 2739 (void) printf("\n\tNo leaks (block sum matches space" 2740 " maps exactly)\n"); 2741 } else { 2742 (void) printf("block traversal size %llu != alloc %llu " 2743 "(%s %lld)\n", 2744 (u_longlong_t)total_found, 2745 (u_longlong_t)total_alloc, 2746 (dump_opt['L']) ? "unreachable" : "leaked", 2747 (longlong_t)(total_alloc - total_found)); 2748 leaks = B_TRUE; 2749 } 2750 2751 if (tzb->zb_count == 0) 2752 return (2); 2753 2754 (void) printf("\n"); 2755 (void) printf("\tbp count: %10llu\n", 2756 (u_longlong_t)tzb->zb_count); 2757 (void) printf("\tganged count: %10llu\n", 2758 (longlong_t)tzb->zb_gangs); 2759 (void) printf("\tbp logical: %10llu avg: %6llu\n", 2760 (u_longlong_t)tzb->zb_lsize, 2761 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); 2762 (void) printf("\tbp physical: %10llu avg:" 2763 " %6llu compression: %6.2f\n", 2764 (u_longlong_t)tzb->zb_psize, 2765 (u_longlong_t)(tzb->zb_psize / tzb->zb_count), 2766 (double)tzb->zb_lsize / tzb->zb_psize); 2767 (void) printf("\tbp allocated: %10llu avg:" 2768 " %6llu compression: %6.2f\n", 2769 (u_longlong_t)tzb->zb_asize, 2770 (u_longlong_t)(tzb->zb_asize / tzb->zb_count), 2771 (double)tzb->zb_lsize / tzb->zb_asize); 2772 (void) printf("\tbp deduped: %10llu ref>1:" 2773 " %6llu deduplication: %6.2f\n", 2774 (u_longlong_t)zcb.zcb_dedup_asize, 2775 (u_longlong_t)zcb.zcb_dedup_blocks, 2776 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); 2777 (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n", 2778 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); 2779 2780 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) { 2781 if (zcb.zcb_embedded_blocks[i] == 0) 2782 continue; 2783 (void) printf("\n"); 2784 (void) printf("\tadditional, non-pointer bps of type %u: " 2785 "%10llu\n", 2786 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]); 2787 2788 if (dump_opt['b'] >= 3) { 2789 (void) printf("\t number of (compressed) bytes: " 2790 "number of bps\n"); 2791 dump_histogram(zcb.zcb_embedded_histogram[i], 2792 sizeof (zcb.zcb_embedded_histogram[i]) / 2793 sizeof (zcb.zcb_embedded_histogram[i][0]), 0); 2794 } 2795 } 2796 2797 if (tzb->zb_ditto_samevdev != 0) { 2798 (void) printf("\tDittoed blocks on same vdev: %llu\n", 2799 (longlong_t)tzb->zb_ditto_samevdev); 2800 } 2801 2802 if (dump_opt['b'] >= 2) { 2803 int l, t, level; 2804 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" 2805 "\t avg\t comp\t%%Total\tType\n"); 2806 2807 for (t = 0; t <= ZDB_OT_TOTAL; t++) { 2808 char csize[32], lsize[32], psize[32], asize[32]; 2809 char avg[32], gang[32]; 2810 char *typename; 2811 2812 if (t < DMU_OT_NUMTYPES) 2813 typename = dmu_ot[t].ot_name; 2814 else 2815 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; 2816 2817 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { 2818 (void) printf("%6s\t%5s\t%5s\t%5s" 2819 "\t%5s\t%5s\t%6s\t%s\n", 2820 "-", 2821 "-", 2822 "-", 2823 "-", 2824 "-", 2825 "-", 2826 "-", 2827 typename); 2828 continue; 2829 } 2830 2831 for (l = ZB_TOTAL - 1; l >= -1; l--) { 2832 level = (l == -1 ? ZB_TOTAL : l); 2833 zb = &zcb.zcb_type[level][t]; 2834 2835 if (zb->zb_asize == 0) 2836 continue; 2837 2838 if (dump_opt['b'] < 3 && level != ZB_TOTAL) 2839 continue; 2840 2841 if (level == 0 && zb->zb_asize == 2842 zcb.zcb_type[ZB_TOTAL][t].zb_asize) 2843 continue; 2844 2845 zdb_nicenum(zb->zb_count, csize); 2846 zdb_nicenum(zb->zb_lsize, lsize); 2847 zdb_nicenum(zb->zb_psize, psize); 2848 zdb_nicenum(zb->zb_asize, asize); 2849 zdb_nicenum(zb->zb_asize / zb->zb_count, avg); 2850 zdb_nicenum(zb->zb_gangs, gang); 2851 2852 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" 2853 "\t%5.2f\t%6.2f\t", 2854 csize, lsize, psize, asize, avg, 2855 (double)zb->zb_lsize / zb->zb_psize, 2856 100.0 * zb->zb_asize / tzb->zb_asize); 2857 2858 if (level == ZB_TOTAL) 2859 (void) printf("%s\n", typename); 2860 else 2861 (void) printf(" L%d %s\n", 2862 level, typename); 2863 2864 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) { 2865 (void) printf("\t number of ganged " 2866 "blocks: %s\n", gang); 2867 } 2868 2869 if (dump_opt['b'] >= 4) { 2870 (void) printf("psize " 2871 "(in 512-byte sectors): " 2872 "number of blocks\n"); 2873 dump_histogram(zb->zb_psize_histogram, 2874 PSIZE_HISTO_SIZE, 0); 2875 } 2876 } 2877 } 2878 } 2879 2880 (void) printf("\n"); 2881 2882 if (leaks) 2883 return (2); 2884 2885 if (zcb.zcb_haderrors) 2886 return (3); 2887 2888 return (0); 2889 } 2890 2891 typedef struct zdb_ddt_entry { 2892 ddt_key_t zdde_key; 2893 uint64_t zdde_ref_blocks; 2894 uint64_t zdde_ref_lsize; 2895 uint64_t zdde_ref_psize; 2896 uint64_t zdde_ref_dsize; 2897 avl_node_t zdde_node; 2898 } zdb_ddt_entry_t; 2899 2900 /* ARGSUSED */ 2901 static int 2902 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2903 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2904 { 2905 avl_tree_t *t = arg; 2906 avl_index_t where; 2907 zdb_ddt_entry_t *zdde, zdde_search; 2908 2909 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2910 return (0); 2911 2912 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { 2913 (void) printf("traversing objset %llu, %llu objects, " 2914 "%lu blocks so far\n", 2915 (u_longlong_t)zb->zb_objset, 2916 (u_longlong_t)BP_GET_FILL(bp), 2917 avl_numnodes(t)); 2918 } 2919 2920 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || 2921 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) 2922 return (0); 2923 2924 ddt_key_fill(&zdde_search.zdde_key, bp); 2925 2926 zdde = avl_find(t, &zdde_search, &where); 2927 2928 if (zdde == NULL) { 2929 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); 2930 zdde->zdde_key = zdde_search.zdde_key; 2931 avl_insert(t, zdde, where); 2932 } 2933 2934 zdde->zdde_ref_blocks += 1; 2935 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); 2936 zdde->zdde_ref_psize += BP_GET_PSIZE(bp); 2937 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); 2938 2939 return (0); 2940 } 2941 2942 static void 2943 dump_simulated_ddt(spa_t *spa) 2944 { 2945 avl_tree_t t; 2946 void *cookie = NULL; 2947 zdb_ddt_entry_t *zdde; 2948 ddt_histogram_t ddh_total = { 0 }; 2949 ddt_stat_t dds_total = { 0 }; 2950 2951 avl_create(&t, ddt_entry_compare, 2952 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); 2953 2954 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2955 2956 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 2957 zdb_ddt_add_cb, &t); 2958 2959 spa_config_exit(spa, SCL_CONFIG, FTAG); 2960 2961 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { 2962 ddt_stat_t dds; 2963 uint64_t refcnt = zdde->zdde_ref_blocks; 2964 ASSERT(refcnt != 0); 2965 2966 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; 2967 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; 2968 dds.dds_psize = zdde->zdde_ref_psize / refcnt; 2969 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; 2970 2971 dds.dds_ref_blocks = zdde->zdde_ref_blocks; 2972 dds.dds_ref_lsize = zdde->zdde_ref_lsize; 2973 dds.dds_ref_psize = zdde->zdde_ref_psize; 2974 dds.dds_ref_dsize = zdde->zdde_ref_dsize; 2975 2976 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1], 2977 &dds, 0); 2978 2979 umem_free(zdde, sizeof (*zdde)); 2980 } 2981 2982 avl_destroy(&t); 2983 2984 ddt_histogram_stat(&dds_total, &ddh_total); 2985 2986 (void) printf("Simulated DDT histogram:\n"); 2987 2988 zpool_dump_ddt(&dds_total, &ddh_total); 2989 2990 dump_dedup_ratio(&dds_total); 2991 } 2992 2993 static void 2994 dump_zpool(spa_t *spa) 2995 { 2996 dsl_pool_t *dp = spa_get_dsl(spa); 2997 int rc = 0; 2998 2999 if (dump_opt['S']) { 3000 dump_simulated_ddt(spa); 3001 return; 3002 } 3003 3004 if (!dump_opt['e'] && dump_opt['C'] > 1) { 3005 (void) printf("\nCached configuration:\n"); 3006 dump_nvlist(spa->spa_config, 8); 3007 } 3008 3009 if (dump_opt['C']) 3010 dump_config(spa); 3011 3012 if (dump_opt['u']) 3013 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); 3014 3015 if (dump_opt['D']) 3016 dump_all_ddts(spa); 3017 3018 if (dump_opt['d'] > 2 || dump_opt['m']) 3019 dump_metaslabs(spa); 3020 if (dump_opt['M']) 3021 dump_metaslab_groups(spa); 3022 3023 if (dump_opt['d'] || dump_opt['i']) { 3024 dump_dir(dp->dp_meta_objset); 3025 if (dump_opt['d'] >= 3) { 3026 dump_full_bpobj(&spa->spa_deferred_bpobj, 3027 "Deferred frees", 0); 3028 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3029 dump_full_bpobj( 3030 &spa->spa_dsl_pool->dp_free_bpobj, 3031 "Pool snapshot frees", 0); 3032 } 3033 3034 if (spa_feature_is_active(spa, 3035 SPA_FEATURE_ASYNC_DESTROY)) { 3036 dump_bptree(spa->spa_meta_objset, 3037 spa->spa_dsl_pool->dp_bptree_obj, 3038 "Pool dataset frees"); 3039 } 3040 dump_dtl(spa->spa_root_vdev, 0); 3041 } 3042 (void) dmu_objset_find(spa_name(spa), dump_one_dir, 3043 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 3044 3045 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 3046 uint64_t refcount; 3047 3048 if (!(spa_feature_table[f].fi_flags & 3049 ZFEATURE_FLAG_PER_DATASET) || 3050 !spa_feature_is_enabled(spa, f)) { 3051 ASSERT0(dataset_feature_count[f]); 3052 continue; 3053 } 3054 (void) feature_get_refcount(spa, 3055 &spa_feature_table[f], &refcount); 3056 if (dataset_feature_count[f] != refcount) { 3057 (void) printf("%s feature refcount mismatch: " 3058 "%lld datasets != %lld refcount\n", 3059 spa_feature_table[f].fi_uname, 3060 (longlong_t)dataset_feature_count[f], 3061 (longlong_t)refcount); 3062 rc = 2; 3063 } else { 3064 (void) printf("Verified %s feature refcount " 3065 "of %llu is correct\n", 3066 spa_feature_table[f].fi_uname, 3067 (longlong_t)refcount); 3068 } 3069 } 3070 } 3071 if (rc == 0 && (dump_opt['b'] || dump_opt['c'])) 3072 rc = dump_block_stats(spa); 3073 3074 if (rc == 0) 3075 rc = verify_spacemap_refcounts(spa); 3076 3077 if (dump_opt['s']) 3078 show_pool_stats(spa); 3079 3080 if (dump_opt['h']) 3081 dump_history(spa); 3082 3083 if (rc != 0) 3084 exit(rc); 3085 } 3086 3087 #define ZDB_FLAG_CHECKSUM 0x0001 3088 #define ZDB_FLAG_DECOMPRESS 0x0002 3089 #define ZDB_FLAG_BSWAP 0x0004 3090 #define ZDB_FLAG_GBH 0x0008 3091 #define ZDB_FLAG_INDIRECT 0x0010 3092 #define ZDB_FLAG_PHYS 0x0020 3093 #define ZDB_FLAG_RAW 0x0040 3094 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 3095 3096 int flagbits[256]; 3097 3098 static void 3099 zdb_print_blkptr(blkptr_t *bp, int flags) 3100 { 3101 char blkbuf[BP_SPRINTF_LEN]; 3102 3103 if (flags & ZDB_FLAG_BSWAP) 3104 byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); 3105 3106 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3107 (void) printf("%s\n", blkbuf); 3108 } 3109 3110 static void 3111 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) 3112 { 3113 int i; 3114 3115 for (i = 0; i < nbps; i++) 3116 zdb_print_blkptr(&bp[i], flags); 3117 } 3118 3119 static void 3120 zdb_dump_gbh(void *buf, int flags) 3121 { 3122 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); 3123 } 3124 3125 static void 3126 zdb_dump_block_raw(void *buf, uint64_t size, int flags) 3127 { 3128 if (flags & ZDB_FLAG_BSWAP) 3129 byteswap_uint64_array(buf, size); 3130 (void) write(1, buf, size); 3131 } 3132 3133 static void 3134 zdb_dump_block(char *label, void *buf, uint64_t size, int flags) 3135 { 3136 uint64_t *d = (uint64_t *)buf; 3137 int nwords = size / sizeof (uint64_t); 3138 int do_bswap = !!(flags & ZDB_FLAG_BSWAP); 3139 int i, j; 3140 char *hdr, *c; 3141 3142 3143 if (do_bswap) 3144 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; 3145 else 3146 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; 3147 3148 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); 3149 3150 for (i = 0; i < nwords; i += 2) { 3151 (void) printf("%06llx: %016llx %016llx ", 3152 (u_longlong_t)(i * sizeof (uint64_t)), 3153 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), 3154 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); 3155 3156 c = (char *)&d[i]; 3157 for (j = 0; j < 2 * sizeof (uint64_t); j++) 3158 (void) printf("%c", isprint(c[j]) ? c[j] : '.'); 3159 (void) printf("\n"); 3160 } 3161 } 3162 3163 /* 3164 * There are two acceptable formats: 3165 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a 3166 * child[.child]* - For example: 0.1.1 3167 * 3168 * The second form can be used to specify arbitrary vdevs anywhere 3169 * in the heirarchy. For example, in a pool with a mirror of 3170 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . 3171 */ 3172 static vdev_t * 3173 zdb_vdev_lookup(vdev_t *vdev, char *path) 3174 { 3175 char *s, *p, *q; 3176 int i; 3177 3178 if (vdev == NULL) 3179 return (NULL); 3180 3181 /* First, assume the x.x.x.x format */ 3182 i = (int)strtoul(path, &s, 10); 3183 if (s == path || (s && *s != '.' && *s != '\0')) 3184 goto name; 3185 if (i < 0 || i >= vdev->vdev_children) 3186 return (NULL); 3187 3188 vdev = vdev->vdev_child[i]; 3189 if (*s == '\0') 3190 return (vdev); 3191 return (zdb_vdev_lookup(vdev, s+1)); 3192 3193 name: 3194 for (i = 0; i < vdev->vdev_children; i++) { 3195 vdev_t *vc = vdev->vdev_child[i]; 3196 3197 if (vc->vdev_path == NULL) { 3198 vc = zdb_vdev_lookup(vc, path); 3199 if (vc == NULL) 3200 continue; 3201 else 3202 return (vc); 3203 } 3204 3205 p = strrchr(vc->vdev_path, '/'); 3206 p = p ? p + 1 : vc->vdev_path; 3207 q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; 3208 3209 if (strcmp(vc->vdev_path, path) == 0) 3210 return (vc); 3211 if (strcmp(p, path) == 0) 3212 return (vc); 3213 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) 3214 return (vc); 3215 } 3216 3217 return (NULL); 3218 } 3219 3220 /* 3221 * Read a block from a pool and print it out. The syntax of the 3222 * block descriptor is: 3223 * 3224 * pool:vdev_specifier:offset:size[:flags] 3225 * 3226 * pool - The name of the pool you wish to read from 3227 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) 3228 * offset - offset, in hex, in bytes 3229 * size - Amount of data to read, in hex, in bytes 3230 * flags - A string of characters specifying options 3231 * b: Decode a blkptr at given offset within block 3232 * *c: Calculate and display checksums 3233 * d: Decompress data before dumping 3234 * e: Byteswap data before dumping 3235 * g: Display data as a gang block header 3236 * i: Display as an indirect block 3237 * p: Do I/O to physical offset 3238 * r: Dump raw data to stdout 3239 * 3240 * * = not yet implemented 3241 */ 3242 static void 3243 zdb_read_block(char *thing, spa_t *spa) 3244 { 3245 blkptr_t blk, *bp = &blk; 3246 dva_t *dva = bp->blk_dva; 3247 int flags = 0; 3248 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; 3249 zio_t *zio; 3250 vdev_t *vd; 3251 void *pbuf, *lbuf, *buf; 3252 char *s, *p, *dup, *vdev, *flagstr; 3253 int i, error; 3254 3255 dup = strdup(thing); 3256 s = strtok(dup, ":"); 3257 vdev = s ? s : ""; 3258 s = strtok(NULL, ":"); 3259 offset = strtoull(s ? s : "", NULL, 16); 3260 s = strtok(NULL, ":"); 3261 size = strtoull(s ? s : "", NULL, 16); 3262 s = strtok(NULL, ":"); 3263 flagstr = s ? s : ""; 3264 3265 s = NULL; 3266 if (size == 0) 3267 s = "size must not be zero"; 3268 if (!IS_P2ALIGNED(size, DEV_BSIZE)) 3269 s = "size must be a multiple of sector size"; 3270 if (!IS_P2ALIGNED(offset, DEV_BSIZE)) 3271 s = "offset must be a multiple of sector size"; 3272 if (s) { 3273 (void) printf("Invalid block specifier: %s - %s\n", thing, s); 3274 free(dup); 3275 return; 3276 } 3277 3278 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { 3279 for (i = 0; flagstr[i]; i++) { 3280 int bit = flagbits[(uchar_t)flagstr[i]]; 3281 3282 if (bit == 0) { 3283 (void) printf("***Invalid flag: %c\n", 3284 flagstr[i]); 3285 continue; 3286 } 3287 flags |= bit; 3288 3289 /* If it's not something with an argument, keep going */ 3290 if ((bit & (ZDB_FLAG_CHECKSUM | 3291 ZDB_FLAG_PRINT_BLKPTR)) == 0) 3292 continue; 3293 3294 p = &flagstr[i + 1]; 3295 if (bit == ZDB_FLAG_PRINT_BLKPTR) 3296 blkptr_offset = strtoull(p, &p, 16); 3297 if (*p != ':' && *p != '\0') { 3298 (void) printf("***Invalid flag arg: '%s'\n", s); 3299 free(dup); 3300 return; 3301 } 3302 } 3303 } 3304 3305 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); 3306 if (vd == NULL) { 3307 (void) printf("***Invalid vdev: %s\n", vdev); 3308 free(dup); 3309 return; 3310 } else { 3311 if (vd->vdev_path) 3312 (void) fprintf(stderr, "Found vdev: %s\n", 3313 vd->vdev_path); 3314 else 3315 (void) fprintf(stderr, "Found vdev type: %s\n", 3316 vd->vdev_ops->vdev_op_type); 3317 } 3318 3319 psize = size; 3320 lsize = size; 3321 3322 pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3323 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3324 3325 BP_ZERO(bp); 3326 3327 DVA_SET_VDEV(&dva[0], vd->vdev_id); 3328 DVA_SET_OFFSET(&dva[0], offset); 3329 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); 3330 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); 3331 3332 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 3333 3334 BP_SET_LSIZE(bp, lsize); 3335 BP_SET_PSIZE(bp, psize); 3336 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 3337 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 3338 BP_SET_TYPE(bp, DMU_OT_NONE); 3339 BP_SET_LEVEL(bp, 0); 3340 BP_SET_DEDUP(bp, 0); 3341 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 3342 3343 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3344 zio = zio_root(spa, NULL, NULL, 0); 3345 3346 if (vd == vd->vdev_top) { 3347 /* 3348 * Treat this as a normal block read. 3349 */ 3350 zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL, 3351 ZIO_PRIORITY_SYNC_READ, 3352 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); 3353 } else { 3354 /* 3355 * Treat this as a vdev child I/O. 3356 */ 3357 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize, 3358 ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, 3359 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | 3360 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | 3361 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL)); 3362 } 3363 3364 error = zio_wait(zio); 3365 spa_config_exit(spa, SCL_STATE, FTAG); 3366 3367 if (error) { 3368 (void) printf("Read of %s failed, error: %d\n", thing, error); 3369 goto out; 3370 } 3371 3372 if (flags & ZDB_FLAG_DECOMPRESS) { 3373 /* 3374 * We don't know how the data was compressed, so just try 3375 * every decompress function at every inflated blocksize. 3376 */ 3377 enum zio_compress c; 3378 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3379 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3380 3381 bcopy(pbuf, pbuf2, psize); 3382 3383 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize, 3384 SPA_MAXBLOCKSIZE - psize) == 0); 3385 3386 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, 3387 SPA_MAXBLOCKSIZE - psize) == 0); 3388 3389 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; 3390 lsize -= SPA_MINBLOCKSIZE) { 3391 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { 3392 if (zio_decompress_data(c, pbuf, lbuf, 3393 psize, lsize) == 0 && 3394 zio_decompress_data(c, pbuf2, lbuf2, 3395 psize, lsize) == 0 && 3396 bcmp(lbuf, lbuf2, lsize) == 0) 3397 break; 3398 } 3399 if (c != ZIO_COMPRESS_FUNCTIONS) 3400 break; 3401 lsize -= SPA_MINBLOCKSIZE; 3402 } 3403 3404 umem_free(pbuf2, SPA_MAXBLOCKSIZE); 3405 umem_free(lbuf2, SPA_MAXBLOCKSIZE); 3406 3407 if (lsize <= psize) { 3408 (void) printf("Decompress of %s failed\n", thing); 3409 goto out; 3410 } 3411 buf = lbuf; 3412 size = lsize; 3413 } else { 3414 buf = pbuf; 3415 size = psize; 3416 } 3417 3418 if (flags & ZDB_FLAG_PRINT_BLKPTR) 3419 zdb_print_blkptr((blkptr_t *)(void *) 3420 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); 3421 else if (flags & ZDB_FLAG_RAW) 3422 zdb_dump_block_raw(buf, size, flags); 3423 else if (flags & ZDB_FLAG_INDIRECT) 3424 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), 3425 flags); 3426 else if (flags & ZDB_FLAG_GBH) 3427 zdb_dump_gbh(buf, flags); 3428 else 3429 zdb_dump_block(thing, buf, size, flags); 3430 3431 out: 3432 umem_free(pbuf, SPA_MAXBLOCKSIZE); 3433 umem_free(lbuf, SPA_MAXBLOCKSIZE); 3434 free(dup); 3435 } 3436 3437 static boolean_t 3438 pool_match(nvlist_t *cfg, char *tgt) 3439 { 3440 uint64_t v, guid = strtoull(tgt, NULL, 0); 3441 char *s; 3442 3443 if (guid != 0) { 3444 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0) 3445 return (v == guid); 3446 } else { 3447 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0) 3448 return (strcmp(s, tgt) == 0); 3449 } 3450 return (B_FALSE); 3451 } 3452 3453 static char * 3454 find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv) 3455 { 3456 nvlist_t *pools; 3457 nvlist_t *match = NULL; 3458 char *name = NULL; 3459 char *sepp = NULL; 3460 char sep; 3461 int count = 0; 3462 importargs_t args = { 0 }; 3463 3464 args.paths = dirc; 3465 args.path = dirv; 3466 args.can_be_active = B_TRUE; 3467 3468 if ((sepp = strpbrk(*target, "/@")) != NULL) { 3469 sep = *sepp; 3470 *sepp = '\0'; 3471 } 3472 3473 pools = zpool_search_import(g_zfs, &args); 3474 3475 if (pools != NULL) { 3476 nvpair_t *elem = NULL; 3477 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3478 verify(nvpair_value_nvlist(elem, configp) == 0); 3479 if (pool_match(*configp, *target)) { 3480 count++; 3481 if (match != NULL) { 3482 /* print previously found config */ 3483 if (name != NULL) { 3484 (void) printf("%s\n", name); 3485 dump_nvlist(match, 8); 3486 name = NULL; 3487 } 3488 (void) printf("%s\n", 3489 nvpair_name(elem)); 3490 dump_nvlist(*configp, 8); 3491 } else { 3492 match = *configp; 3493 name = nvpair_name(elem); 3494 } 3495 } 3496 } 3497 } 3498 if (count > 1) 3499 (void) fatal("\tMatched %d pools - use pool GUID " 3500 "instead of pool name or \n" 3501 "\tpool name part of a dataset name to select pool", count); 3502 3503 if (sepp) 3504 *sepp = sep; 3505 /* 3506 * If pool GUID was specified for pool id, replace it with pool name 3507 */ 3508 if (name && (strstr(*target, name) != *target)) { 3509 int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0); 3510 3511 *target = umem_alloc(sz, UMEM_NOFAIL); 3512 (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : ""); 3513 } 3514 3515 *configp = name ? match : NULL; 3516 3517 return (name); 3518 } 3519 3520 int 3521 main(int argc, char **argv) 3522 { 3523 int i, c; 3524 struct rlimit rl = { 1024, 1024 }; 3525 spa_t *spa = NULL; 3526 objset_t *os = NULL; 3527 int dump_all = 1; 3528 int verbose = 0; 3529 int error = 0; 3530 char **searchdirs = NULL; 3531 int nsearch = 0; 3532 char *target; 3533 nvlist_t *policy = NULL; 3534 uint64_t max_txg = UINT64_MAX; 3535 int rewind = ZPOOL_NEVER_REWIND; 3536 char *spa_config_path_env; 3537 3538 (void) setrlimit(RLIMIT_NOFILE, &rl); 3539 (void) enable_extended_FILE_stdio(-1, -1); 3540 3541 dprintf_setup(&argc, argv); 3542 3543 /* 3544 * If there is an environment variable SPA_CONFIG_PATH it overrides 3545 * default spa_config_path setting. If -U flag is specified it will 3546 * override this environment variable settings once again. 3547 */ 3548 spa_config_path_env = getenv("SPA_CONFIG_PATH"); 3549 if (spa_config_path_env != NULL) 3550 spa_config_path = spa_config_path_env; 3551 3552 while ((c = getopt(argc, argv, 3553 "bcdhilmMI:suCDRSAFLXx:evp:t:U:P")) != -1) { 3554 switch (c) { 3555 case 'b': 3556 case 'c': 3557 case 'd': 3558 case 'h': 3559 case 'i': 3560 case 'l': 3561 case 'm': 3562 case 's': 3563 case 'u': 3564 case 'C': 3565 case 'D': 3566 case 'M': 3567 case 'R': 3568 case 'S': 3569 dump_opt[c]++; 3570 dump_all = 0; 3571 break; 3572 case 'A': 3573 case 'F': 3574 case 'L': 3575 case 'X': 3576 case 'e': 3577 case 'P': 3578 dump_opt[c]++; 3579 break; 3580 case 'I': 3581 max_inflight = strtoull(optarg, NULL, 0); 3582 if (max_inflight == 0) { 3583 (void) fprintf(stderr, "maximum number " 3584 "of inflight I/Os must be greater " 3585 "than 0\n"); 3586 usage(); 3587 } 3588 break; 3589 case 'p': 3590 if (searchdirs == NULL) { 3591 searchdirs = umem_alloc(sizeof (char *), 3592 UMEM_NOFAIL); 3593 } else { 3594 char **tmp = umem_alloc((nsearch + 1) * 3595 sizeof (char *), UMEM_NOFAIL); 3596 bcopy(searchdirs, tmp, nsearch * 3597 sizeof (char *)); 3598 umem_free(searchdirs, 3599 nsearch * sizeof (char *)); 3600 searchdirs = tmp; 3601 } 3602 searchdirs[nsearch++] = optarg; 3603 break; 3604 case 't': 3605 max_txg = strtoull(optarg, NULL, 0); 3606 if (max_txg < TXG_INITIAL) { 3607 (void) fprintf(stderr, "incorrect txg " 3608 "specified: %s\n", optarg); 3609 usage(); 3610 } 3611 break; 3612 case 'U': 3613 spa_config_path = optarg; 3614 break; 3615 case 'v': 3616 verbose++; 3617 break; 3618 case 'x': 3619 vn_dumpdir = optarg; 3620 break; 3621 default: 3622 usage(); 3623 break; 3624 } 3625 } 3626 3627 if (!dump_opt['e'] && searchdirs != NULL) { 3628 (void) fprintf(stderr, "-p option requires use of -e\n"); 3629 usage(); 3630 } 3631 3632 /* 3633 * ZDB does not typically re-read blocks; therefore limit the ARC 3634 * to 256 MB, which can be used entirely for metadata. 3635 */ 3636 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024; 3637 3638 /* 3639 * "zdb -c" uses checksum-verifying scrub i/os which are async reads. 3640 * "zdb -b" uses traversal prefetch which uses async reads. 3641 * For good performance, let several of them be active at once. 3642 */ 3643 zfs_vdev_async_read_max_active = 10; 3644 3645 kernel_init(FREAD); 3646 g_zfs = libzfs_init(); 3647 ASSERT(g_zfs != NULL); 3648 3649 if (dump_all) 3650 verbose = MAX(verbose, 1); 3651 3652 for (c = 0; c < 256; c++) { 3653 if (dump_all && !strchr("elAFLRSXP", c)) 3654 dump_opt[c] = 1; 3655 if (dump_opt[c]) 3656 dump_opt[c] += verbose; 3657 } 3658 3659 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); 3660 zfs_recover = (dump_opt['A'] > 1); 3661 3662 argc -= optind; 3663 argv += optind; 3664 3665 if (argc < 2 && dump_opt['R']) 3666 usage(); 3667 if (argc < 1) { 3668 if (!dump_opt['e'] && dump_opt['C']) { 3669 dump_cachefile(spa_config_path); 3670 return (0); 3671 } 3672 usage(); 3673 } 3674 3675 if (dump_opt['l']) { 3676 dump_label(argv[0]); 3677 return (0); 3678 } 3679 3680 if (dump_opt['X'] || dump_opt['F']) 3681 rewind = ZPOOL_DO_REWIND | 3682 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); 3683 3684 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || 3685 nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 || 3686 nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0) 3687 fatal("internal error: %s", strerror(ENOMEM)); 3688 3689 error = 0; 3690 target = argv[0]; 3691 3692 if (dump_opt['e']) { 3693 nvlist_t *cfg = NULL; 3694 char *name = find_zpool(&target, &cfg, nsearch, searchdirs); 3695 3696 error = ENOENT; 3697 if (name) { 3698 if (dump_opt['C'] > 1) { 3699 (void) printf("\nConfiguration for import:\n"); 3700 dump_nvlist(cfg, 8); 3701 } 3702 if (nvlist_add_nvlist(cfg, 3703 ZPOOL_REWIND_POLICY, policy) != 0) { 3704 fatal("can't open '%s': %s", 3705 target, strerror(ENOMEM)); 3706 } 3707 if ((error = spa_import(name, cfg, NULL, 3708 ZFS_IMPORT_MISSING_LOG)) != 0) { 3709 error = spa_import(name, cfg, NULL, 3710 ZFS_IMPORT_VERBATIM); 3711 } 3712 } 3713 } 3714 3715 if (error == 0) { 3716 if (strpbrk(target, "/@") == NULL || dump_opt['R']) { 3717 error = spa_open_rewind(target, &spa, FTAG, policy, 3718 NULL); 3719 if (error) { 3720 /* 3721 * If we're missing the log device then 3722 * try opening the pool after clearing the 3723 * log state. 3724 */ 3725 mutex_enter(&spa_namespace_lock); 3726 if ((spa = spa_lookup(target)) != NULL && 3727 spa->spa_log_state == SPA_LOG_MISSING) { 3728 spa->spa_log_state = SPA_LOG_CLEAR; 3729 error = 0; 3730 } 3731 mutex_exit(&spa_namespace_lock); 3732 3733 if (!error) { 3734 error = spa_open_rewind(target, &spa, 3735 FTAG, policy, NULL); 3736 } 3737 } 3738 } else { 3739 error = dmu_objset_own(target, DMU_OST_ANY, 3740 B_TRUE, FTAG, &os); 3741 } 3742 } 3743 nvlist_free(policy); 3744 3745 if (error) 3746 fatal("can't open '%s': %s", target, strerror(error)); 3747 3748 argv++; 3749 argc--; 3750 if (!dump_opt['R']) { 3751 if (argc > 0) { 3752 zopt_objects = argc; 3753 zopt_object = calloc(zopt_objects, sizeof (uint64_t)); 3754 for (i = 0; i < zopt_objects; i++) { 3755 errno = 0; 3756 zopt_object[i] = strtoull(argv[i], NULL, 0); 3757 if (zopt_object[i] == 0 && errno != 0) 3758 fatal("bad number %s: %s", 3759 argv[i], strerror(errno)); 3760 } 3761 } 3762 if (os != NULL) { 3763 dump_dir(os); 3764 } else if (zopt_objects > 0 && !dump_opt['m']) { 3765 dump_dir(spa->spa_meta_objset); 3766 } else { 3767 dump_zpool(spa); 3768 } 3769 } else { 3770 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; 3771 flagbits['c'] = ZDB_FLAG_CHECKSUM; 3772 flagbits['d'] = ZDB_FLAG_DECOMPRESS; 3773 flagbits['e'] = ZDB_FLAG_BSWAP; 3774 flagbits['g'] = ZDB_FLAG_GBH; 3775 flagbits['i'] = ZDB_FLAG_INDIRECT; 3776 flagbits['p'] = ZDB_FLAG_PHYS; 3777 flagbits['r'] = ZDB_FLAG_RAW; 3778 3779 for (i = 0; i < argc; i++) 3780 zdb_read_block(argv[i], spa); 3781 } 3782 3783 (os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG); 3784 3785 fuid_table_destroy(); 3786 sa_loaded = B_FALSE; 3787 3788 libzfs_fini(g_zfs); 3789 kernel_fini(); 3790 3791 return (0); 3792 } 3793