1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <stdio.h> 29 #include <unistd.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <ctype.h> 33 #include <sys/zfs_context.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/dmu.h> 37 #include <sys/zap.h> 38 #include <sys/fs/zfs.h> 39 #include <sys/zfs_znode.h> 40 #include <sys/zfs_sa.h> 41 #include <sys/sa.h> 42 #include <sys/sa_impl.h> 43 #include <sys/vdev.h> 44 #include <sys/vdev_impl.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/dmu_objset.h> 47 #include <sys/dsl_dir.h> 48 #include <sys/dsl_dataset.h> 49 #include <sys/dsl_pool.h> 50 #include <sys/dbuf.h> 51 #include <sys/zil.h> 52 #include <sys/zil_impl.h> 53 #include <sys/stat.h> 54 #include <sys/resource.h> 55 #include <sys/dmu_traverse.h> 56 #include <sys/zio_checksum.h> 57 #include <sys/zio_compress.h> 58 #include <sys/zfs_fuid.h> 59 #include <sys/arc.h> 60 #include <sys/ddt.h> 61 #include <sys/zfeature.h> 62 #include <zfs_comutil.h> 63 #include <libcmdutils.h> 64 #undef verify 65 #include <libzfs.h> 66 67 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ 68 zio_compress_table[(idx)].ci_name : "UNKNOWN") 69 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ 70 zio_checksum_table[(idx)].ci_name : "UNKNOWN") 71 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ 72 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \ 73 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN") 74 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \ 75 (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \ 76 DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES)) 77 78 #ifndef lint 79 extern boolean_t zfs_recover; 80 extern uint64_t zfs_arc_max, zfs_arc_meta_limit; 81 extern int zfs_vdev_async_read_max_active; 82 #else 83 boolean_t zfs_recover; 84 uint64_t zfs_arc_max, zfs_arc_meta_limit; 85 int zfs_vdev_async_read_max_active; 86 #endif 87 88 const char cmdname[] = "zdb"; 89 uint8_t dump_opt[256]; 90 91 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); 92 93 extern void dump_intent_log(zilog_t *); 94 uint64_t *zopt_object = NULL; 95 int zopt_objects = 0; 96 libzfs_handle_t *g_zfs; 97 uint64_t max_inflight = 1000; 98 99 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *); 100 101 /* 102 * These libumem hooks provide a reasonable set of defaults for the allocator's 103 * debugging facilities. 104 */ 105 const char * 106 _umem_debug_init() 107 { 108 return ("default,verbose"); /* $UMEM_DEBUG setting */ 109 } 110 111 const char * 112 _umem_logging_init(void) 113 { 114 return ("fail,contents"); /* $UMEM_LOGGING setting */ 115 } 116 117 static void 118 usage(void) 119 { 120 (void) fprintf(stderr, 121 "Usage: %s [-CumMdibcsDvhLXFPA] [-t txg] [-e [-p path...]] " 122 "[-U config] [-I inflight I/Os] [-x dumpdir] poolname [object...]\n" 123 " %s [-divPA] [-e -p path...] [-U config] dataset " 124 "[object...]\n" 125 " %s -mM [-LXFPA] [-t txg] [-e [-p path...]] [-U config] " 126 "poolname [vdev [metaslab...]]\n" 127 " %s -R [-A] [-e [-p path...]] poolname " 128 "vdev:offset:size[:flags]\n" 129 " %s -S [-PA] [-e [-p path...]] [-U config] poolname\n" 130 " %s -l [-uA] device\n" 131 " %s -C [-A] [-U config]\n\n", 132 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); 133 134 (void) fprintf(stderr, " Dataset name must include at least one " 135 "separator character '/' or '@'\n"); 136 (void) fprintf(stderr, " If dataset name is specified, only that " 137 "dataset is dumped\n"); 138 (void) fprintf(stderr, " If object numbers are specified, only " 139 "those objects are dumped\n\n"); 140 (void) fprintf(stderr, " Options to control amount of output:\n"); 141 (void) fprintf(stderr, " -u uberblock\n"); 142 (void) fprintf(stderr, " -d dataset(s)\n"); 143 (void) fprintf(stderr, " -i intent logs\n"); 144 (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); 145 (void) fprintf(stderr, " -h pool history\n"); 146 (void) fprintf(stderr, " -b block statistics\n"); 147 (void) fprintf(stderr, " -m metaslabs\n"); 148 (void) fprintf(stderr, " -M metaslab groups\n"); 149 (void) fprintf(stderr, " -c checksum all metadata (twice for " 150 "all data) blocks\n"); 151 (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); 152 (void) fprintf(stderr, " -D dedup statistics\n"); 153 (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); 154 (void) fprintf(stderr, " -v verbose (applies to all others)\n"); 155 (void) fprintf(stderr, " -l dump label contents\n"); 156 (void) fprintf(stderr, " -L disable leak tracking (do not " 157 "load spacemaps)\n"); 158 (void) fprintf(stderr, " -R read and display block from a " 159 "device\n\n"); 160 (void) fprintf(stderr, " Below options are intended for use " 161 "with other options:\n"); 162 (void) fprintf(stderr, " -A ignore assertions (-A), enable " 163 "panic recovery (-AA) or both (-AAA)\n"); 164 (void) fprintf(stderr, " -F attempt automatic rewind within " 165 "safe range of transaction groups\n"); 166 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate " 167 "cachefile\n"); 168 (void) fprintf(stderr, " -X attempt extreme rewind (does not " 169 "work with dataset)\n"); 170 (void) fprintf(stderr, " -e pool is exported/destroyed/" 171 "has altroot/not in a cachefile\n"); 172 (void) fprintf(stderr, " -p <path> -- use one or more with " 173 "-e to specify path to vdev dir\n"); 174 (void) fprintf(stderr, " -x <dumpdir> -- " 175 "dump all read blocks into specified directory\n"); 176 (void) fprintf(stderr, " -P print numbers in parseable form\n"); 177 (void) fprintf(stderr, " -t <txg> -- highest txg to use when " 178 "searching for uberblocks\n"); 179 (void) fprintf(stderr, " -I <number of inflight I/Os> -- " 180 "specify the maximum number of " 181 "checksumming I/Os [default is 200]\n"); 182 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " 183 "to make only that option verbose\n"); 184 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); 185 exit(1); 186 } 187 188 /* 189 * Called for usage errors that are discovered after a call to spa_open(), 190 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. 191 */ 192 193 static void 194 fatal(const char *fmt, ...) 195 { 196 va_list ap; 197 198 va_start(ap, fmt); 199 (void) fprintf(stderr, "%s: ", cmdname); 200 (void) vfprintf(stderr, fmt, ap); 201 va_end(ap); 202 (void) fprintf(stderr, "\n"); 203 204 exit(1); 205 } 206 207 /* ARGSUSED */ 208 static void 209 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) 210 { 211 nvlist_t *nv; 212 size_t nvsize = *(uint64_t *)data; 213 char *packed = umem_alloc(nvsize, UMEM_NOFAIL); 214 215 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); 216 217 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); 218 219 umem_free(packed, nvsize); 220 221 dump_nvlist(nv, 8); 222 223 nvlist_free(nv); 224 } 225 226 /* ARGSUSED */ 227 static void 228 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size) 229 { 230 spa_history_phys_t *shp = data; 231 232 if (shp == NULL) 233 return; 234 235 (void) printf("\t\tpool_create_len = %llu\n", 236 (u_longlong_t)shp->sh_pool_create_len); 237 (void) printf("\t\tphys_max_off = %llu\n", 238 (u_longlong_t)shp->sh_phys_max_off); 239 (void) printf("\t\tbof = %llu\n", 240 (u_longlong_t)shp->sh_bof); 241 (void) printf("\t\teof = %llu\n", 242 (u_longlong_t)shp->sh_eof); 243 (void) printf("\t\trecords_lost = %llu\n", 244 (u_longlong_t)shp->sh_records_lost); 245 } 246 247 static void 248 zdb_nicenum(uint64_t num, char *buf, size_t buflen) 249 { 250 if (dump_opt['P']) 251 (void) snprintf(buf, buflen, "%llu", (longlong_t)num); 252 else 253 nicenum(num, buf, sizeof (buf)); 254 } 255 256 const char histo_stars[] = "****************************************"; 257 const int histo_width = sizeof (histo_stars) - 1; 258 259 static void 260 dump_histogram(const uint64_t *histo, int size, int offset) 261 { 262 int i; 263 int minidx = size - 1; 264 int maxidx = 0; 265 uint64_t max = 0; 266 267 for (i = 0; i < size; i++) { 268 if (histo[i] > max) 269 max = histo[i]; 270 if (histo[i] > 0 && i > maxidx) 271 maxidx = i; 272 if (histo[i] > 0 && i < minidx) 273 minidx = i; 274 } 275 276 if (max < histo_width) 277 max = histo_width; 278 279 for (i = minidx; i <= maxidx; i++) { 280 (void) printf("\t\t\t%3u: %6llu %s\n", 281 i + offset, (u_longlong_t)histo[i], 282 &histo_stars[(max - histo[i]) * histo_width / max]); 283 } 284 } 285 286 static void 287 dump_zap_stats(objset_t *os, uint64_t object) 288 { 289 int error; 290 zap_stats_t zs; 291 292 error = zap_get_stats(os, object, &zs); 293 if (error) 294 return; 295 296 if (zs.zs_ptrtbl_len == 0) { 297 ASSERT(zs.zs_num_blocks == 1); 298 (void) printf("\tmicrozap: %llu bytes, %llu entries\n", 299 (u_longlong_t)zs.zs_blocksize, 300 (u_longlong_t)zs.zs_num_entries); 301 return; 302 } 303 304 (void) printf("\tFat ZAP stats:\n"); 305 306 (void) printf("\t\tPointer table:\n"); 307 (void) printf("\t\t\t%llu elements\n", 308 (u_longlong_t)zs.zs_ptrtbl_len); 309 (void) printf("\t\t\tzt_blk: %llu\n", 310 (u_longlong_t)zs.zs_ptrtbl_zt_blk); 311 (void) printf("\t\t\tzt_numblks: %llu\n", 312 (u_longlong_t)zs.zs_ptrtbl_zt_numblks); 313 (void) printf("\t\t\tzt_shift: %llu\n", 314 (u_longlong_t)zs.zs_ptrtbl_zt_shift); 315 (void) printf("\t\t\tzt_blks_copied: %llu\n", 316 (u_longlong_t)zs.zs_ptrtbl_blks_copied); 317 (void) printf("\t\t\tzt_nextblk: %llu\n", 318 (u_longlong_t)zs.zs_ptrtbl_nextblk); 319 320 (void) printf("\t\tZAP entries: %llu\n", 321 (u_longlong_t)zs.zs_num_entries); 322 (void) printf("\t\tLeaf blocks: %llu\n", 323 (u_longlong_t)zs.zs_num_leafs); 324 (void) printf("\t\tTotal blocks: %llu\n", 325 (u_longlong_t)zs.zs_num_blocks); 326 (void) printf("\t\tzap_block_type: 0x%llx\n", 327 (u_longlong_t)zs.zs_block_type); 328 (void) printf("\t\tzap_magic: 0x%llx\n", 329 (u_longlong_t)zs.zs_magic); 330 (void) printf("\t\tzap_salt: 0x%llx\n", 331 (u_longlong_t)zs.zs_salt); 332 333 (void) printf("\t\tLeafs with 2^n pointers:\n"); 334 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0); 335 336 (void) printf("\t\tBlocks with n*5 entries:\n"); 337 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0); 338 339 (void) printf("\t\tBlocks n/10 full:\n"); 340 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0); 341 342 (void) printf("\t\tEntries with n chunks:\n"); 343 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0); 344 345 (void) printf("\t\tBuckets with n entries:\n"); 346 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0); 347 } 348 349 /*ARGSUSED*/ 350 static void 351 dump_none(objset_t *os, uint64_t object, void *data, size_t size) 352 { 353 } 354 355 /*ARGSUSED*/ 356 static void 357 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) 358 { 359 (void) printf("\tUNKNOWN OBJECT TYPE\n"); 360 } 361 362 /*ARGSUSED*/ 363 void 364 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) 365 { 366 } 367 368 /*ARGSUSED*/ 369 static void 370 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) 371 { 372 } 373 374 /*ARGSUSED*/ 375 static void 376 dump_zap(objset_t *os, uint64_t object, void *data, size_t size) 377 { 378 zap_cursor_t zc; 379 zap_attribute_t attr; 380 void *prop; 381 int i; 382 383 dump_zap_stats(os, object); 384 (void) printf("\n"); 385 386 for (zap_cursor_init(&zc, os, object); 387 zap_cursor_retrieve(&zc, &attr) == 0; 388 zap_cursor_advance(&zc)) { 389 (void) printf("\t\t%s = ", attr.za_name); 390 if (attr.za_num_integers == 0) { 391 (void) printf("\n"); 392 continue; 393 } 394 prop = umem_zalloc(attr.za_num_integers * 395 attr.za_integer_length, UMEM_NOFAIL); 396 (void) zap_lookup(os, object, attr.za_name, 397 attr.za_integer_length, attr.za_num_integers, prop); 398 if (attr.za_integer_length == 1) { 399 (void) printf("%s", (char *)prop); 400 } else { 401 for (i = 0; i < attr.za_num_integers; i++) { 402 switch (attr.za_integer_length) { 403 case 2: 404 (void) printf("%u ", 405 ((uint16_t *)prop)[i]); 406 break; 407 case 4: 408 (void) printf("%u ", 409 ((uint32_t *)prop)[i]); 410 break; 411 case 8: 412 (void) printf("%lld ", 413 (u_longlong_t)((int64_t *)prop)[i]); 414 break; 415 } 416 } 417 } 418 (void) printf("\n"); 419 umem_free(prop, attr.za_num_integers * attr.za_integer_length); 420 } 421 zap_cursor_fini(&zc); 422 } 423 424 static void 425 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size) 426 { 427 bpobj_phys_t *bpop = data; 428 char bytes[32], comp[32], uncomp[32]; 429 430 /* make sure the output won't get truncated */ 431 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 432 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 433 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 434 435 if (bpop == NULL) 436 return; 437 438 zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes)); 439 zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp)); 440 zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp)); 441 442 (void) printf("\t\tnum_blkptrs = %llu\n", 443 (u_longlong_t)bpop->bpo_num_blkptrs); 444 (void) printf("\t\tbytes = %s\n", bytes); 445 if (size >= BPOBJ_SIZE_V1) { 446 (void) printf("\t\tcomp = %s\n", comp); 447 (void) printf("\t\tuncomp = %s\n", uncomp); 448 } 449 if (size >= sizeof (*bpop)) { 450 (void) printf("\t\tsubobjs = %llu\n", 451 (u_longlong_t)bpop->bpo_subobjs); 452 (void) printf("\t\tnum_subobjs = %llu\n", 453 (u_longlong_t)bpop->bpo_num_subobjs); 454 } 455 456 if (dump_opt['d'] < 5) 457 return; 458 459 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) { 460 char blkbuf[BP_SPRINTF_LEN]; 461 blkptr_t bp; 462 463 int err = dmu_read(os, object, 464 i * sizeof (bp), sizeof (bp), &bp, 0); 465 if (err != 0) { 466 (void) printf("got error %u from dmu_read\n", err); 467 break; 468 } 469 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp); 470 (void) printf("\t%s\n", blkbuf); 471 } 472 } 473 474 /* ARGSUSED */ 475 static void 476 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size) 477 { 478 dmu_object_info_t doi; 479 480 VERIFY0(dmu_object_info(os, object, &doi)); 481 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP); 482 483 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0); 484 if (err != 0) { 485 (void) printf("got error %u from dmu_read\n", err); 486 kmem_free(subobjs, doi.doi_max_offset); 487 return; 488 } 489 490 int64_t last_nonzero = -1; 491 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) { 492 if (subobjs[i] != 0) 493 last_nonzero = i; 494 } 495 496 for (int64_t i = 0; i <= last_nonzero; i++) { 497 (void) printf("\t%llu\n", (longlong_t)subobjs[i]); 498 } 499 kmem_free(subobjs, doi.doi_max_offset); 500 } 501 502 /*ARGSUSED*/ 503 static void 504 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) 505 { 506 dump_zap_stats(os, object); 507 /* contents are printed elsewhere, properly decoded */ 508 } 509 510 /*ARGSUSED*/ 511 static void 512 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) 513 { 514 zap_cursor_t zc; 515 zap_attribute_t attr; 516 517 dump_zap_stats(os, object); 518 (void) printf("\n"); 519 520 for (zap_cursor_init(&zc, os, object); 521 zap_cursor_retrieve(&zc, &attr) == 0; 522 zap_cursor_advance(&zc)) { 523 (void) printf("\t\t%s = ", attr.za_name); 524 if (attr.za_num_integers == 0) { 525 (void) printf("\n"); 526 continue; 527 } 528 (void) printf(" %llx : [%d:%d:%d]\n", 529 (u_longlong_t)attr.za_first_integer, 530 (int)ATTR_LENGTH(attr.za_first_integer), 531 (int)ATTR_BSWAP(attr.za_first_integer), 532 (int)ATTR_NUM(attr.za_first_integer)); 533 } 534 zap_cursor_fini(&zc); 535 } 536 537 /*ARGSUSED*/ 538 static void 539 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) 540 { 541 zap_cursor_t zc; 542 zap_attribute_t attr; 543 uint16_t *layout_attrs; 544 int i; 545 546 dump_zap_stats(os, object); 547 (void) printf("\n"); 548 549 for (zap_cursor_init(&zc, os, object); 550 zap_cursor_retrieve(&zc, &attr) == 0; 551 zap_cursor_advance(&zc)) { 552 (void) printf("\t\t%s = [", attr.za_name); 553 if (attr.za_num_integers == 0) { 554 (void) printf("\n"); 555 continue; 556 } 557 558 VERIFY(attr.za_integer_length == 2); 559 layout_attrs = umem_zalloc(attr.za_num_integers * 560 attr.za_integer_length, UMEM_NOFAIL); 561 562 VERIFY(zap_lookup(os, object, attr.za_name, 563 attr.za_integer_length, 564 attr.za_num_integers, layout_attrs) == 0); 565 566 for (i = 0; i != attr.za_num_integers; i++) 567 (void) printf(" %d ", (int)layout_attrs[i]); 568 (void) printf("]\n"); 569 umem_free(layout_attrs, 570 attr.za_num_integers * attr.za_integer_length); 571 } 572 zap_cursor_fini(&zc); 573 } 574 575 /*ARGSUSED*/ 576 static void 577 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) 578 { 579 zap_cursor_t zc; 580 zap_attribute_t attr; 581 const char *typenames[] = { 582 /* 0 */ "not specified", 583 /* 1 */ "FIFO", 584 /* 2 */ "Character Device", 585 /* 3 */ "3 (invalid)", 586 /* 4 */ "Directory", 587 /* 5 */ "5 (invalid)", 588 /* 6 */ "Block Device", 589 /* 7 */ "7 (invalid)", 590 /* 8 */ "Regular File", 591 /* 9 */ "9 (invalid)", 592 /* 10 */ "Symbolic Link", 593 /* 11 */ "11 (invalid)", 594 /* 12 */ "Socket", 595 /* 13 */ "Door", 596 /* 14 */ "Event Port", 597 /* 15 */ "15 (invalid)", 598 }; 599 600 dump_zap_stats(os, object); 601 (void) printf("\n"); 602 603 for (zap_cursor_init(&zc, os, object); 604 zap_cursor_retrieve(&zc, &attr) == 0; 605 zap_cursor_advance(&zc)) { 606 (void) printf("\t\t%s = %lld (type: %s)\n", 607 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), 608 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); 609 } 610 zap_cursor_fini(&zc); 611 } 612 613 int 614 get_dtl_refcount(vdev_t *vd) 615 { 616 int refcount = 0; 617 618 if (vd->vdev_ops->vdev_op_leaf) { 619 space_map_t *sm = vd->vdev_dtl_sm; 620 621 if (sm != NULL && 622 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 623 return (1); 624 return (0); 625 } 626 627 for (int c = 0; c < vd->vdev_children; c++) 628 refcount += get_dtl_refcount(vd->vdev_child[c]); 629 return (refcount); 630 } 631 632 int 633 get_metaslab_refcount(vdev_t *vd) 634 { 635 int refcount = 0; 636 637 if (vd->vdev_top == vd && !vd->vdev_removing) { 638 for (int m = 0; m < vd->vdev_ms_count; m++) { 639 space_map_t *sm = vd->vdev_ms[m]->ms_sm; 640 641 if (sm != NULL && 642 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 643 refcount++; 644 } 645 } 646 for (int c = 0; c < vd->vdev_children; c++) 647 refcount += get_metaslab_refcount(vd->vdev_child[c]); 648 649 return (refcount); 650 } 651 652 static int 653 verify_spacemap_refcounts(spa_t *spa) 654 { 655 uint64_t expected_refcount = 0; 656 uint64_t actual_refcount; 657 658 (void) feature_get_refcount(spa, 659 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM], 660 &expected_refcount); 661 actual_refcount = get_dtl_refcount(spa->spa_root_vdev); 662 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev); 663 664 if (expected_refcount != actual_refcount) { 665 (void) printf("space map refcount mismatch: expected %lld != " 666 "actual %lld\n", 667 (longlong_t)expected_refcount, 668 (longlong_t)actual_refcount); 669 return (2); 670 } 671 return (0); 672 } 673 674 static void 675 dump_spacemap(objset_t *os, space_map_t *sm) 676 { 677 uint64_t alloc, offset, entry; 678 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", 679 "INVALID", "INVALID", "INVALID", "INVALID" }; 680 681 if (sm == NULL) 682 return; 683 684 /* 685 * Print out the freelist entries in both encoded and decoded form. 686 */ 687 alloc = 0; 688 for (offset = 0; offset < space_map_length(sm); 689 offset += sizeof (entry)) { 690 uint8_t mapshift = sm->sm_shift; 691 692 VERIFY0(dmu_read(os, space_map_object(sm), offset, 693 sizeof (entry), &entry, DMU_READ_PREFETCH)); 694 if (SM_DEBUG_DECODE(entry)) { 695 696 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n", 697 (u_longlong_t)(offset / sizeof (entry)), 698 ddata[SM_DEBUG_ACTION_DECODE(entry)], 699 (u_longlong_t)SM_DEBUG_TXG_DECODE(entry), 700 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry)); 701 } else { 702 (void) printf("\t [%6llu] %c range:" 703 " %010llx-%010llx size: %06llx\n", 704 (u_longlong_t)(offset / sizeof (entry)), 705 SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F', 706 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 707 mapshift) + sm->sm_start), 708 (u_longlong_t)((SM_OFFSET_DECODE(entry) << 709 mapshift) + sm->sm_start + 710 (SM_RUN_DECODE(entry) << mapshift)), 711 (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift)); 712 if (SM_TYPE_DECODE(entry) == SM_ALLOC) 713 alloc += SM_RUN_DECODE(entry) << mapshift; 714 else 715 alloc -= SM_RUN_DECODE(entry) << mapshift; 716 } 717 } 718 if (alloc != space_map_allocated(sm)) { 719 (void) printf("space_map_object alloc (%llu) INCONSISTENT " 720 "with space map summary (%llu)\n", 721 (u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc); 722 } 723 } 724 725 static void 726 dump_metaslab_stats(metaslab_t *msp) 727 { 728 char maxbuf[32]; 729 range_tree_t *rt = msp->ms_tree; 730 avl_tree_t *t = &msp->ms_size_tree; 731 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 732 733 /* max sure nicenum has enough space */ 734 CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ); 735 736 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf, sizeof (maxbuf)); 737 738 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", 739 "segments", avl_numnodes(t), "maxsize", maxbuf, 740 "freepct", free_pct); 741 (void) printf("\tIn-memory histogram:\n"); 742 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 743 } 744 745 static void 746 dump_metaslab(metaslab_t *msp) 747 { 748 vdev_t *vd = msp->ms_group->mg_vd; 749 spa_t *spa = vd->vdev_spa; 750 space_map_t *sm = msp->ms_sm; 751 char freebuf[32]; 752 753 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf, 754 sizeof (freebuf)); 755 756 (void) printf( 757 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", 758 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start, 759 (u_longlong_t)space_map_object(sm), freebuf); 760 761 if (dump_opt['m'] > 2 && !dump_opt['L']) { 762 mutex_enter(&msp->ms_lock); 763 metaslab_load_wait(msp); 764 if (!msp->ms_loaded) { 765 VERIFY0(metaslab_load(msp)); 766 range_tree_stat_verify(msp->ms_tree); 767 } 768 dump_metaslab_stats(msp); 769 metaslab_unload(msp); 770 mutex_exit(&msp->ms_lock); 771 } 772 773 if (dump_opt['m'] > 1 && sm != NULL && 774 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 775 /* 776 * The space map histogram represents free space in chunks 777 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift). 778 */ 779 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n", 780 (u_longlong_t)msp->ms_fragmentation); 781 dump_histogram(sm->sm_phys->smp_histogram, 782 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift); 783 } 784 785 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) { 786 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift)); 787 788 mutex_enter(&msp->ms_lock); 789 dump_spacemap(spa->spa_meta_objset, msp->ms_sm); 790 mutex_exit(&msp->ms_lock); 791 } 792 } 793 794 static void 795 print_vdev_metaslab_header(vdev_t *vd) 796 { 797 (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n", 798 (u_longlong_t)vd->vdev_id, 799 "metaslabs", (u_longlong_t)vd->vdev_ms_count, 800 "offset", "spacemap", "free"); 801 (void) printf("\t%15s %19s %15s %10s\n", 802 "---------------", "-------------------", 803 "---------------", "-------------"); 804 } 805 806 static void 807 dump_metaslab_groups(spa_t *spa) 808 { 809 vdev_t *rvd = spa->spa_root_vdev; 810 metaslab_class_t *mc = spa_normal_class(spa); 811 uint64_t fragmentation; 812 813 metaslab_class_histogram_verify(mc); 814 815 for (int c = 0; c < rvd->vdev_children; c++) { 816 vdev_t *tvd = rvd->vdev_child[c]; 817 metaslab_group_t *mg = tvd->vdev_mg; 818 819 if (mg->mg_class != mc) 820 continue; 821 822 metaslab_group_histogram_verify(mg); 823 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 824 825 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t" 826 "fragmentation", 827 (u_longlong_t)tvd->vdev_id, 828 (u_longlong_t)tvd->vdev_ms_count); 829 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 830 (void) printf("%3s\n", "-"); 831 } else { 832 (void) printf("%3llu%%\n", 833 (u_longlong_t)mg->mg_fragmentation); 834 } 835 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 836 } 837 838 (void) printf("\tpool %s\tfragmentation", spa_name(spa)); 839 fragmentation = metaslab_class_fragmentation(mc); 840 if (fragmentation == ZFS_FRAG_INVALID) 841 (void) printf("\t%3s\n", "-"); 842 else 843 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation); 844 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 845 } 846 847 static void 848 dump_metaslabs(spa_t *spa) 849 { 850 vdev_t *vd, *rvd = spa->spa_root_vdev; 851 uint64_t m, c = 0, children = rvd->vdev_children; 852 853 (void) printf("\nMetaslabs:\n"); 854 855 if (!dump_opt['d'] && zopt_objects > 0) { 856 c = zopt_object[0]; 857 858 if (c >= children) 859 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 860 861 if (zopt_objects > 1) { 862 vd = rvd->vdev_child[c]; 863 print_vdev_metaslab_header(vd); 864 865 for (m = 1; m < zopt_objects; m++) { 866 if (zopt_object[m] < vd->vdev_ms_count) 867 dump_metaslab( 868 vd->vdev_ms[zopt_object[m]]); 869 else 870 (void) fprintf(stderr, "bad metaslab " 871 "number %llu\n", 872 (u_longlong_t)zopt_object[m]); 873 } 874 (void) printf("\n"); 875 return; 876 } 877 children = c + 1; 878 } 879 for (; c < children; c++) { 880 vd = rvd->vdev_child[c]; 881 print_vdev_metaslab_header(vd); 882 883 for (m = 0; m < vd->vdev_ms_count; m++) 884 dump_metaslab(vd->vdev_ms[m]); 885 (void) printf("\n"); 886 } 887 } 888 889 static void 890 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) 891 { 892 const ddt_phys_t *ddp = dde->dde_phys; 893 const ddt_key_t *ddk = &dde->dde_key; 894 char *types[4] = { "ditto", "single", "double", "triple" }; 895 char blkbuf[BP_SPRINTF_LEN]; 896 blkptr_t blk; 897 898 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 899 if (ddp->ddp_phys_birth == 0) 900 continue; 901 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); 902 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk); 903 (void) printf("index %llx refcnt %llu %s %s\n", 904 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, 905 types[p], blkbuf); 906 } 907 } 908 909 static void 910 dump_dedup_ratio(const ddt_stat_t *dds) 911 { 912 double rL, rP, rD, D, dedup, compress, copies; 913 914 if (dds->dds_blocks == 0) 915 return; 916 917 rL = (double)dds->dds_ref_lsize; 918 rP = (double)dds->dds_ref_psize; 919 rD = (double)dds->dds_ref_dsize; 920 D = (double)dds->dds_dsize; 921 922 dedup = rD / D; 923 compress = rL / rP; 924 copies = rD / rP; 925 926 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " 927 "dedup * compress / copies = %.2f\n\n", 928 dedup, compress, copies, dedup * compress / copies); 929 } 930 931 static void 932 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) 933 { 934 char name[DDT_NAMELEN]; 935 ddt_entry_t dde; 936 uint64_t walk = 0; 937 dmu_object_info_t doi; 938 uint64_t count, dspace, mspace; 939 int error; 940 941 error = ddt_object_info(ddt, type, class, &doi); 942 943 if (error == ENOENT) 944 return; 945 ASSERT(error == 0); 946 947 if ((count = ddt_object_count(ddt, type, class)) == 0) 948 return; 949 950 dspace = doi.doi_physical_blocks_512 << 9; 951 mspace = doi.doi_fill_count * doi.doi_data_block_size; 952 953 ddt_object_name(ddt, type, class, name); 954 955 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", 956 name, 957 (u_longlong_t)count, 958 (u_longlong_t)(dspace / count), 959 (u_longlong_t)(mspace / count)); 960 961 if (dump_opt['D'] < 3) 962 return; 963 964 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); 965 966 if (dump_opt['D'] < 4) 967 return; 968 969 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) 970 return; 971 972 (void) printf("%s contents:\n\n", name); 973 974 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) 975 dump_dde(ddt, &dde, walk); 976 977 ASSERT(error == ENOENT); 978 979 (void) printf("\n"); 980 } 981 982 static void 983 dump_all_ddts(spa_t *spa) 984 { 985 ddt_histogram_t ddh_total = { 0 }; 986 ddt_stat_t dds_total = { 0 }; 987 988 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { 989 ddt_t *ddt = spa->spa_ddt[c]; 990 for (enum ddt_type type = 0; type < DDT_TYPES; type++) { 991 for (enum ddt_class class = 0; class < DDT_CLASSES; 992 class++) { 993 dump_ddt(ddt, type, class); 994 } 995 } 996 } 997 998 ddt_get_dedup_stats(spa, &dds_total); 999 1000 if (dds_total.dds_blocks == 0) { 1001 (void) printf("All DDTs are empty\n"); 1002 return; 1003 } 1004 1005 (void) printf("\n"); 1006 1007 if (dump_opt['D'] > 1) { 1008 (void) printf("DDT histogram (aggregated over all DDTs):\n"); 1009 ddt_get_dedup_histogram(spa, &ddh_total); 1010 zpool_dump_ddt(&dds_total, &ddh_total); 1011 } 1012 1013 dump_dedup_ratio(&dds_total); 1014 } 1015 1016 static void 1017 dump_dtl_seg(void *arg, uint64_t start, uint64_t size) 1018 { 1019 char *prefix = arg; 1020 1021 (void) printf("%s [%llu,%llu) length %llu\n", 1022 prefix, 1023 (u_longlong_t)start, 1024 (u_longlong_t)(start + size), 1025 (u_longlong_t)(size)); 1026 } 1027 1028 static void 1029 dump_dtl(vdev_t *vd, int indent) 1030 { 1031 spa_t *spa = vd->vdev_spa; 1032 boolean_t required; 1033 char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" }; 1034 char prefix[256]; 1035 1036 spa_vdev_state_enter(spa, SCL_NONE); 1037 required = vdev_dtl_required(vd); 1038 (void) spa_vdev_state_exit(spa, NULL, 0); 1039 1040 if (indent == 0) 1041 (void) printf("\nDirty time logs:\n\n"); 1042 1043 (void) printf("\t%*s%s [%s]\n", indent, "", 1044 vd->vdev_path ? vd->vdev_path : 1045 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), 1046 required ? "DTL-required" : "DTL-expendable"); 1047 1048 for (int t = 0; t < DTL_TYPES; t++) { 1049 range_tree_t *rt = vd->vdev_dtl[t]; 1050 if (range_tree_space(rt) == 0) 1051 continue; 1052 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", 1053 indent + 2, "", name[t]); 1054 mutex_enter(rt->rt_lock); 1055 range_tree_walk(rt, dump_dtl_seg, prefix); 1056 mutex_exit(rt->rt_lock); 1057 if (dump_opt['d'] > 5 && vd->vdev_children == 0) 1058 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm); 1059 } 1060 1061 for (int c = 0; c < vd->vdev_children; c++) 1062 dump_dtl(vd->vdev_child[c], indent + 4); 1063 } 1064 1065 static void 1066 dump_history(spa_t *spa) 1067 { 1068 nvlist_t **events = NULL; 1069 uint64_t resid, len, off = 0; 1070 uint_t num = 0; 1071 int error; 1072 time_t tsec; 1073 struct tm t; 1074 char tbuf[30]; 1075 char internalstr[MAXPATHLEN]; 1076 1077 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 1078 do { 1079 len = SPA_MAXBLOCKSIZE; 1080 1081 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { 1082 (void) fprintf(stderr, "Unable to read history: " 1083 "error %d\n", error); 1084 umem_free(buf, SPA_MAXBLOCKSIZE); 1085 return; 1086 } 1087 1088 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) 1089 break; 1090 1091 off -= resid; 1092 } while (len != 0); 1093 umem_free(buf, SPA_MAXBLOCKSIZE); 1094 1095 (void) printf("\nHistory:\n"); 1096 for (int i = 0; i < num; i++) { 1097 uint64_t time, txg, ievent; 1098 char *cmd, *intstr; 1099 boolean_t printed = B_FALSE; 1100 1101 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, 1102 &time) != 0) 1103 goto next; 1104 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, 1105 &cmd) != 0) { 1106 if (nvlist_lookup_uint64(events[i], 1107 ZPOOL_HIST_INT_EVENT, &ievent) != 0) 1108 goto next; 1109 verify(nvlist_lookup_uint64(events[i], 1110 ZPOOL_HIST_TXG, &txg) == 0); 1111 verify(nvlist_lookup_string(events[i], 1112 ZPOOL_HIST_INT_STR, &intstr) == 0); 1113 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) 1114 goto next; 1115 1116 (void) snprintf(internalstr, 1117 sizeof (internalstr), 1118 "[internal %s txg:%lld] %s", 1119 zfs_history_event_names[ievent], txg, 1120 intstr); 1121 cmd = internalstr; 1122 } 1123 tsec = time; 1124 (void) localtime_r(&tsec, &t); 1125 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 1126 (void) printf("%s %s\n", tbuf, cmd); 1127 printed = B_TRUE; 1128 1129 next: 1130 if (dump_opt['h'] > 1) { 1131 if (!printed) 1132 (void) printf("unrecognized record:\n"); 1133 dump_nvlist(events[i], 2); 1134 } 1135 } 1136 } 1137 1138 /*ARGSUSED*/ 1139 static void 1140 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) 1141 { 1142 } 1143 1144 static uint64_t 1145 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, 1146 const zbookmark_phys_t *zb) 1147 { 1148 if (dnp == NULL) { 1149 ASSERT(zb->zb_level < 0); 1150 if (zb->zb_object == 0) 1151 return (zb->zb_blkid); 1152 return (zb->zb_blkid * BP_GET_LSIZE(bp)); 1153 } 1154 1155 ASSERT(zb->zb_level >= 0); 1156 1157 return ((zb->zb_blkid << 1158 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * 1159 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1160 } 1161 1162 static void 1163 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp) 1164 { 1165 const dva_t *dva = bp->blk_dva; 1166 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; 1167 1168 if (dump_opt['b'] >= 6) { 1169 snprintf_blkptr(blkbuf, buflen, bp); 1170 return; 1171 } 1172 1173 if (BP_IS_EMBEDDED(bp)) { 1174 (void) sprintf(blkbuf, 1175 "EMBEDDED et=%u %llxL/%llxP B=%llu", 1176 (int)BPE_GET_ETYPE(bp), 1177 (u_longlong_t)BPE_GET_LSIZE(bp), 1178 (u_longlong_t)BPE_GET_PSIZE(bp), 1179 (u_longlong_t)bp->blk_birth); 1180 return; 1181 } 1182 1183 blkbuf[0] = '\0'; 1184 for (int i = 0; i < ndvas; i++) 1185 (void) snprintf(blkbuf + strlen(blkbuf), 1186 buflen - strlen(blkbuf), "%llu:%llx:%llx ", 1187 (u_longlong_t)DVA_GET_VDEV(&dva[i]), 1188 (u_longlong_t)DVA_GET_OFFSET(&dva[i]), 1189 (u_longlong_t)DVA_GET_ASIZE(&dva[i])); 1190 1191 if (BP_IS_HOLE(bp)) { 1192 (void) snprintf(blkbuf + strlen(blkbuf), 1193 buflen - strlen(blkbuf), 1194 "%llxL B=%llu", 1195 (u_longlong_t)BP_GET_LSIZE(bp), 1196 (u_longlong_t)bp->blk_birth); 1197 } else { 1198 (void) snprintf(blkbuf + strlen(blkbuf), 1199 buflen - strlen(blkbuf), 1200 "%llxL/%llxP F=%llu B=%llu/%llu", 1201 (u_longlong_t)BP_GET_LSIZE(bp), 1202 (u_longlong_t)BP_GET_PSIZE(bp), 1203 (u_longlong_t)BP_GET_FILL(bp), 1204 (u_longlong_t)bp->blk_birth, 1205 (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); 1206 } 1207 } 1208 1209 static void 1210 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb, 1211 const dnode_phys_t *dnp) 1212 { 1213 char blkbuf[BP_SPRINTF_LEN]; 1214 int l; 1215 1216 if (!BP_IS_EMBEDDED(bp)) { 1217 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); 1218 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); 1219 } 1220 1221 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); 1222 1223 ASSERT(zb->zb_level >= 0); 1224 1225 for (l = dnp->dn_nlevels - 1; l >= -1; l--) { 1226 if (l == zb->zb_level) { 1227 (void) printf("L%llx", (u_longlong_t)zb->zb_level); 1228 } else { 1229 (void) printf(" "); 1230 } 1231 } 1232 1233 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1234 (void) printf("%s\n", blkbuf); 1235 } 1236 1237 static int 1238 visit_indirect(spa_t *spa, const dnode_phys_t *dnp, 1239 blkptr_t *bp, const zbookmark_phys_t *zb) 1240 { 1241 int err = 0; 1242 1243 if (bp->blk_birth == 0) 1244 return (0); 1245 1246 print_indirect(bp, zb, dnp); 1247 1248 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) { 1249 arc_flags_t flags = ARC_FLAG_WAIT; 1250 int i; 1251 blkptr_t *cbp; 1252 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1253 arc_buf_t *buf; 1254 uint64_t fill = 0; 1255 1256 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1257 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 1258 if (err) 1259 return (err); 1260 ASSERT(buf->b_data); 1261 1262 /* recursively visit blocks below this */ 1263 cbp = buf->b_data; 1264 for (i = 0; i < epb; i++, cbp++) { 1265 zbookmark_phys_t czb; 1266 1267 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1268 zb->zb_level - 1, 1269 zb->zb_blkid * epb + i); 1270 err = visit_indirect(spa, dnp, cbp, &czb); 1271 if (err) 1272 break; 1273 fill += BP_GET_FILL(cbp); 1274 } 1275 if (!err) 1276 ASSERT3U(fill, ==, BP_GET_FILL(bp)); 1277 (void) arc_buf_remove_ref(buf, &buf); 1278 } 1279 1280 return (err); 1281 } 1282 1283 /*ARGSUSED*/ 1284 static void 1285 dump_indirect(dnode_t *dn) 1286 { 1287 dnode_phys_t *dnp = dn->dn_phys; 1288 int j; 1289 zbookmark_phys_t czb; 1290 1291 (void) printf("Indirect blocks:\n"); 1292 1293 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), 1294 dn->dn_object, dnp->dn_nlevels - 1, 0); 1295 for (j = 0; j < dnp->dn_nblkptr; j++) { 1296 czb.zb_blkid = j; 1297 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, 1298 &dnp->dn_blkptr[j], &czb); 1299 } 1300 1301 (void) printf("\n"); 1302 } 1303 1304 /*ARGSUSED*/ 1305 static void 1306 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) 1307 { 1308 dsl_dir_phys_t *dd = data; 1309 time_t crtime; 1310 char nice[32]; 1311 1312 /* make sure nicenum has enough space */ 1313 CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ); 1314 1315 if (dd == NULL) 1316 return; 1317 1318 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); 1319 1320 crtime = dd->dd_creation_time; 1321 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1322 (void) printf("\t\thead_dataset_obj = %llu\n", 1323 (u_longlong_t)dd->dd_head_dataset_obj); 1324 (void) printf("\t\tparent_dir_obj = %llu\n", 1325 (u_longlong_t)dd->dd_parent_obj); 1326 (void) printf("\t\torigin_obj = %llu\n", 1327 (u_longlong_t)dd->dd_origin_obj); 1328 (void) printf("\t\tchild_dir_zapobj = %llu\n", 1329 (u_longlong_t)dd->dd_child_dir_zapobj); 1330 zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice)); 1331 (void) printf("\t\tused_bytes = %s\n", nice); 1332 zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice)); 1333 (void) printf("\t\tcompressed_bytes = %s\n", nice); 1334 zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice)); 1335 (void) printf("\t\tuncompressed_bytes = %s\n", nice); 1336 zdb_nicenum(dd->dd_quota, nice, sizeof (nice)); 1337 (void) printf("\t\tquota = %s\n", nice); 1338 zdb_nicenum(dd->dd_reserved, nice, sizeof (nice)); 1339 (void) printf("\t\treserved = %s\n", nice); 1340 (void) printf("\t\tprops_zapobj = %llu\n", 1341 (u_longlong_t)dd->dd_props_zapobj); 1342 (void) printf("\t\tdeleg_zapobj = %llu\n", 1343 (u_longlong_t)dd->dd_deleg_zapobj); 1344 (void) printf("\t\tflags = %llx\n", 1345 (u_longlong_t)dd->dd_flags); 1346 1347 #define DO(which) \ 1348 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \ 1349 sizeof (nice)); \ 1350 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) 1351 DO(HEAD); 1352 DO(SNAP); 1353 DO(CHILD); 1354 DO(CHILD_RSRV); 1355 DO(REFRSRV); 1356 #undef DO 1357 } 1358 1359 /*ARGSUSED*/ 1360 static void 1361 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) 1362 { 1363 dsl_dataset_phys_t *ds = data; 1364 time_t crtime; 1365 char used[32], compressed[32], uncompressed[32], unique[32]; 1366 char blkbuf[BP_SPRINTF_LEN]; 1367 1368 /* make sure nicenum has enough space */ 1369 CTASSERT(sizeof (used) >= NN_NUMBUF_SZ); 1370 CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ); 1371 CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ); 1372 CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ); 1373 1374 if (ds == NULL) 1375 return; 1376 1377 ASSERT(size == sizeof (*ds)); 1378 crtime = ds->ds_creation_time; 1379 zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used)); 1380 zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed)); 1381 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed, 1382 sizeof (uncompressed)); 1383 zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique)); 1384 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp); 1385 1386 (void) printf("\t\tdir_obj = %llu\n", 1387 (u_longlong_t)ds->ds_dir_obj); 1388 (void) printf("\t\tprev_snap_obj = %llu\n", 1389 (u_longlong_t)ds->ds_prev_snap_obj); 1390 (void) printf("\t\tprev_snap_txg = %llu\n", 1391 (u_longlong_t)ds->ds_prev_snap_txg); 1392 (void) printf("\t\tnext_snap_obj = %llu\n", 1393 (u_longlong_t)ds->ds_next_snap_obj); 1394 (void) printf("\t\tsnapnames_zapobj = %llu\n", 1395 (u_longlong_t)ds->ds_snapnames_zapobj); 1396 (void) printf("\t\tnum_children = %llu\n", 1397 (u_longlong_t)ds->ds_num_children); 1398 (void) printf("\t\tuserrefs_obj = %llu\n", 1399 (u_longlong_t)ds->ds_userrefs_obj); 1400 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1401 (void) printf("\t\tcreation_txg = %llu\n", 1402 (u_longlong_t)ds->ds_creation_txg); 1403 (void) printf("\t\tdeadlist_obj = %llu\n", 1404 (u_longlong_t)ds->ds_deadlist_obj); 1405 (void) printf("\t\tused_bytes = %s\n", used); 1406 (void) printf("\t\tcompressed_bytes = %s\n", compressed); 1407 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); 1408 (void) printf("\t\tunique = %s\n", unique); 1409 (void) printf("\t\tfsid_guid = %llu\n", 1410 (u_longlong_t)ds->ds_fsid_guid); 1411 (void) printf("\t\tguid = %llu\n", 1412 (u_longlong_t)ds->ds_guid); 1413 (void) printf("\t\tflags = %llx\n", 1414 (u_longlong_t)ds->ds_flags); 1415 (void) printf("\t\tnext_clones_obj = %llu\n", 1416 (u_longlong_t)ds->ds_next_clones_obj); 1417 (void) printf("\t\tprops_obj = %llu\n", 1418 (u_longlong_t)ds->ds_props_obj); 1419 (void) printf("\t\tbp = %s\n", blkbuf); 1420 } 1421 1422 /* ARGSUSED */ 1423 static int 1424 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1425 { 1426 char blkbuf[BP_SPRINTF_LEN]; 1427 1428 if (bp->blk_birth != 0) { 1429 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 1430 (void) printf("\t%s\n", blkbuf); 1431 } 1432 return (0); 1433 } 1434 1435 static void 1436 dump_bptree(objset_t *os, uint64_t obj, char *name) 1437 { 1438 char bytes[32]; 1439 bptree_phys_t *bt; 1440 dmu_buf_t *db; 1441 1442 /* make sure nicenum has enough space */ 1443 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1444 1445 if (dump_opt['d'] < 3) 1446 return; 1447 1448 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db)); 1449 bt = db->db_data; 1450 zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes)); 1451 (void) printf("\n %s: %llu datasets, %s\n", 1452 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes); 1453 dmu_buf_rele(db, FTAG); 1454 1455 if (dump_opt['d'] < 5) 1456 return; 1457 1458 (void) printf("\n"); 1459 1460 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL); 1461 } 1462 1463 /* ARGSUSED */ 1464 static int 1465 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1466 { 1467 char blkbuf[BP_SPRINTF_LEN]; 1468 1469 ASSERT(bp->blk_birth != 0); 1470 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1471 (void) printf("\t%s\n", blkbuf); 1472 return (0); 1473 } 1474 1475 static void 1476 dump_full_bpobj(bpobj_t *bpo, char *name, int indent) 1477 { 1478 char bytes[32]; 1479 char comp[32]; 1480 char uncomp[32]; 1481 1482 /* make sure nicenum has enough space */ 1483 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1484 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1485 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1486 1487 if (dump_opt['d'] < 3) 1488 return; 1489 1490 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes)); 1491 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1492 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp)); 1493 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp)); 1494 (void) printf(" %*s: object %llu, %llu local blkptrs, " 1495 "%llu subobjs in object %llu, %s (%s/%s comp)\n", 1496 indent * 8, name, 1497 (u_longlong_t)bpo->bpo_object, 1498 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1499 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, 1500 (u_longlong_t)bpo->bpo_phys->bpo_subobjs, 1501 bytes, comp, uncomp); 1502 1503 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1504 uint64_t subobj; 1505 bpobj_t subbpo; 1506 int error; 1507 VERIFY0(dmu_read(bpo->bpo_os, 1508 bpo->bpo_phys->bpo_subobjs, 1509 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1510 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1511 if (error != 0) { 1512 (void) printf("ERROR %u while trying to open " 1513 "subobj id %llu\n", 1514 error, (u_longlong_t)subobj); 1515 continue; 1516 } 1517 dump_full_bpobj(&subbpo, "subobj", indent + 1); 1518 bpobj_close(&subbpo); 1519 } 1520 } else { 1521 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n", 1522 indent * 8, name, 1523 (u_longlong_t)bpo->bpo_object, 1524 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1525 bytes); 1526 } 1527 1528 if (dump_opt['d'] < 5) 1529 return; 1530 1531 1532 if (indent == 0) { 1533 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); 1534 (void) printf("\n"); 1535 } 1536 } 1537 1538 static void 1539 dump_deadlist(dsl_deadlist_t *dl) 1540 { 1541 dsl_deadlist_entry_t *dle; 1542 uint64_t unused; 1543 char bytes[32]; 1544 char comp[32]; 1545 char uncomp[32]; 1546 1547 /* make sure nicenum has enough space */ 1548 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1549 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1550 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1551 1552 if (dump_opt['d'] < 3) 1553 return; 1554 1555 if (dl->dl_oldfmt) { 1556 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0); 1557 return; 1558 } 1559 1560 zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes)); 1561 zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp)); 1562 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp)); 1563 (void) printf("\n Deadlist: %s (%s/%s comp)\n", 1564 bytes, comp, uncomp); 1565 1566 if (dump_opt['d'] < 4) 1567 return; 1568 1569 (void) printf("\n"); 1570 1571 /* force the tree to be loaded */ 1572 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused); 1573 1574 for (dle = avl_first(&dl->dl_tree); dle; 1575 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1576 if (dump_opt['d'] >= 5) { 1577 char buf[128]; 1578 (void) snprintf(buf, sizeof (buf), "mintxg %llu -> ", 1579 (longlong_t)dle->dle_mintxg, 1580 (longlong_t)dle->dle_bpobj.bpo_object); 1581 1582 dump_full_bpobj(&dle->dle_bpobj, buf, 0); 1583 } else { 1584 (void) printf("mintxg %llu -> obj %llu\n", 1585 (longlong_t)dle->dle_mintxg, 1586 (longlong_t)dle->dle_bpobj.bpo_object); 1587 1588 } 1589 } 1590 } 1591 1592 static avl_tree_t idx_tree; 1593 static avl_tree_t domain_tree; 1594 static boolean_t fuid_table_loaded; 1595 static boolean_t sa_loaded; 1596 sa_attr_type_t *sa_attr_table; 1597 1598 static void 1599 fuid_table_destroy() 1600 { 1601 if (fuid_table_loaded) { 1602 zfs_fuid_table_destroy(&idx_tree, &domain_tree); 1603 fuid_table_loaded = B_FALSE; 1604 } 1605 } 1606 1607 /* 1608 * print uid or gid information. 1609 * For normal POSIX id just the id is printed in decimal format. 1610 * For CIFS files with FUID the fuid is printed in hex followed by 1611 * the domain-rid string. 1612 */ 1613 static void 1614 print_idstr(uint64_t id, const char *id_type) 1615 { 1616 if (FUID_INDEX(id)) { 1617 char *domain; 1618 1619 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); 1620 (void) printf("\t%s %llx [%s-%d]\n", id_type, 1621 (u_longlong_t)id, domain, (int)FUID_RID(id)); 1622 } else { 1623 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); 1624 } 1625 1626 } 1627 1628 static void 1629 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) 1630 { 1631 uint32_t uid_idx, gid_idx; 1632 1633 uid_idx = FUID_INDEX(uid); 1634 gid_idx = FUID_INDEX(gid); 1635 1636 /* Load domain table, if not already loaded */ 1637 if (!fuid_table_loaded && (uid_idx || gid_idx)) { 1638 uint64_t fuid_obj; 1639 1640 /* first find the fuid object. It lives in the master node */ 1641 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 1642 8, 1, &fuid_obj) == 0); 1643 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); 1644 (void) zfs_fuid_table_load(os, fuid_obj, 1645 &idx_tree, &domain_tree); 1646 fuid_table_loaded = B_TRUE; 1647 } 1648 1649 print_idstr(uid, "uid"); 1650 print_idstr(gid, "gid"); 1651 } 1652 1653 /*ARGSUSED*/ 1654 static void 1655 dump_znode(objset_t *os, uint64_t object, void *data, size_t size) 1656 { 1657 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ 1658 sa_handle_t *hdl; 1659 uint64_t xattr, rdev, gen; 1660 uint64_t uid, gid, mode, fsize, parent, links; 1661 uint64_t pflags; 1662 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; 1663 time_t z_crtime, z_atime, z_mtime, z_ctime; 1664 sa_bulk_attr_t bulk[12]; 1665 int idx = 0; 1666 int error; 1667 1668 if (!sa_loaded) { 1669 uint64_t sa_attrs = 0; 1670 uint64_t version; 1671 1672 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 1673 8, 1, &version) == 0); 1674 if (version >= ZPL_VERSION_SA) { 1675 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 1676 8, 1, &sa_attrs) == 0); 1677 } 1678 if ((error = sa_setup(os, sa_attrs, zfs_attr_table, 1679 ZPL_END, &sa_attr_table)) != 0) { 1680 (void) printf("sa_setup failed errno %d, can't " 1681 "display znode contents\n", error); 1682 return; 1683 } 1684 sa_loaded = B_TRUE; 1685 } 1686 1687 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { 1688 (void) printf("Failed to get handle for SA znode\n"); 1689 return; 1690 } 1691 1692 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); 1693 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); 1694 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, 1695 &links, 8); 1696 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); 1697 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, 1698 &mode, 8); 1699 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], 1700 NULL, &parent, 8); 1701 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, 1702 &fsize, 8); 1703 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, 1704 acctm, 16); 1705 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, 1706 modtm, 16); 1707 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, 1708 crtm, 16); 1709 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, 1710 chgtm, 16); 1711 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, 1712 &pflags, 8); 1713 1714 if (sa_bulk_lookup(hdl, bulk, idx)) { 1715 (void) sa_handle_destroy(hdl); 1716 return; 1717 } 1718 1719 error = zfs_obj_to_path(os, object, path, sizeof (path)); 1720 if (error != 0) { 1721 (void) snprintf(path, sizeof (path), "\?\?\?<object#%llu>", 1722 (u_longlong_t)object); 1723 } 1724 if (dump_opt['d'] < 3) { 1725 (void) printf("\t%s\n", path); 1726 (void) sa_handle_destroy(hdl); 1727 return; 1728 } 1729 1730 z_crtime = (time_t)crtm[0]; 1731 z_atime = (time_t)acctm[0]; 1732 z_mtime = (time_t)modtm[0]; 1733 z_ctime = (time_t)chgtm[0]; 1734 1735 (void) printf("\tpath %s\n", path); 1736 dump_uidgid(os, uid, gid); 1737 (void) printf("\tatime %s", ctime(&z_atime)); 1738 (void) printf("\tmtime %s", ctime(&z_mtime)); 1739 (void) printf("\tctime %s", ctime(&z_ctime)); 1740 (void) printf("\tcrtime %s", ctime(&z_crtime)); 1741 (void) printf("\tgen %llu\n", (u_longlong_t)gen); 1742 (void) printf("\tmode %llo\n", (u_longlong_t)mode); 1743 (void) printf("\tsize %llu\n", (u_longlong_t)fsize); 1744 (void) printf("\tparent %llu\n", (u_longlong_t)parent); 1745 (void) printf("\tlinks %llu\n", (u_longlong_t)links); 1746 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); 1747 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, 1748 sizeof (uint64_t)) == 0) 1749 (void) printf("\txattr %llu\n", (u_longlong_t)xattr); 1750 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, 1751 sizeof (uint64_t)) == 0) 1752 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); 1753 sa_handle_destroy(hdl); 1754 } 1755 1756 /*ARGSUSED*/ 1757 static void 1758 dump_acl(objset_t *os, uint64_t object, void *data, size_t size) 1759 { 1760 } 1761 1762 /*ARGSUSED*/ 1763 static void 1764 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) 1765 { 1766 } 1767 1768 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { 1769 dump_none, /* unallocated */ 1770 dump_zap, /* object directory */ 1771 dump_uint64, /* object array */ 1772 dump_none, /* packed nvlist */ 1773 dump_packed_nvlist, /* packed nvlist size */ 1774 dump_none, /* bpobj */ 1775 dump_bpobj, /* bpobj header */ 1776 dump_none, /* SPA space map header */ 1777 dump_none, /* SPA space map */ 1778 dump_none, /* ZIL intent log */ 1779 dump_dnode, /* DMU dnode */ 1780 dump_dmu_objset, /* DMU objset */ 1781 dump_dsl_dir, /* DSL directory */ 1782 dump_zap, /* DSL directory child map */ 1783 dump_zap, /* DSL dataset snap map */ 1784 dump_zap, /* DSL props */ 1785 dump_dsl_dataset, /* DSL dataset */ 1786 dump_znode, /* ZFS znode */ 1787 dump_acl, /* ZFS V0 ACL */ 1788 dump_uint8, /* ZFS plain file */ 1789 dump_zpldir, /* ZFS directory */ 1790 dump_zap, /* ZFS master node */ 1791 dump_zap, /* ZFS delete queue */ 1792 dump_uint8, /* zvol object */ 1793 dump_zap, /* zvol prop */ 1794 dump_uint8, /* other uint8[] */ 1795 dump_uint64, /* other uint64[] */ 1796 dump_zap, /* other ZAP */ 1797 dump_zap, /* persistent error log */ 1798 dump_uint8, /* SPA history */ 1799 dump_history_offsets, /* SPA history offsets */ 1800 dump_zap, /* Pool properties */ 1801 dump_zap, /* DSL permissions */ 1802 dump_acl, /* ZFS ACL */ 1803 dump_uint8, /* ZFS SYSACL */ 1804 dump_none, /* FUID nvlist */ 1805 dump_packed_nvlist, /* FUID nvlist size */ 1806 dump_zap, /* DSL dataset next clones */ 1807 dump_zap, /* DSL scrub queue */ 1808 dump_zap, /* ZFS user/group used */ 1809 dump_zap, /* ZFS user/group quota */ 1810 dump_zap, /* snapshot refcount tags */ 1811 dump_ddt_zap, /* DDT ZAP object */ 1812 dump_zap, /* DDT statistics */ 1813 dump_znode, /* SA object */ 1814 dump_zap, /* SA Master Node */ 1815 dump_sa_attrs, /* SA attribute registration */ 1816 dump_sa_layouts, /* SA attribute layouts */ 1817 dump_zap, /* DSL scrub translations */ 1818 dump_none, /* fake dedup BP */ 1819 dump_zap, /* deadlist */ 1820 dump_none, /* deadlist hdr */ 1821 dump_zap, /* dsl clones */ 1822 dump_bpobj_subobjs, /* bpobj subobjs */ 1823 dump_unknown, /* Unknown type, must be last */ 1824 }; 1825 1826 static void 1827 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) 1828 { 1829 dmu_buf_t *db = NULL; 1830 dmu_object_info_t doi; 1831 dnode_t *dn; 1832 void *bonus = NULL; 1833 size_t bsize = 0; 1834 char iblk[32], dblk[32], lsize[32], asize[32], fill[32]; 1835 char bonus_size[32]; 1836 char aux[50]; 1837 int error; 1838 1839 /* make sure nicenum has enough space */ 1840 CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ); 1841 CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ); 1842 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 1843 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 1844 CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ); 1845 1846 if (*print_header) { 1847 (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n", 1848 "Object", "lvl", "iblk", "dblk", "dsize", "lsize", 1849 "%full", "type"); 1850 *print_header = 0; 1851 } 1852 1853 if (object == 0) { 1854 dn = DMU_META_DNODE(os); 1855 } else { 1856 error = dmu_bonus_hold(os, object, FTAG, &db); 1857 if (error) 1858 fatal("dmu_bonus_hold(%llu) failed, errno %u", 1859 object, error); 1860 bonus = db->db_data; 1861 bsize = db->db_size; 1862 dn = DB_DNODE((dmu_buf_impl_t *)db); 1863 } 1864 dmu_object_info_from_dnode(dn, &doi); 1865 1866 zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk)); 1867 zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk)); 1868 zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize)); 1869 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize)); 1870 zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size)); 1871 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * 1872 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / 1873 doi.doi_max_offset); 1874 1875 aux[0] = '\0'; 1876 1877 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { 1878 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", 1879 ZDB_CHECKSUM_NAME(doi.doi_checksum)); 1880 } 1881 1882 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { 1883 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", 1884 ZDB_COMPRESS_NAME(doi.doi_compress)); 1885 } 1886 1887 (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n", 1888 (u_longlong_t)object, doi.doi_indirection, iblk, dblk, 1889 asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); 1890 1891 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { 1892 (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n", 1893 "", "", "", "", "", bonus_size, "bonus", 1894 ZDB_OT_NAME(doi.doi_bonus_type)); 1895 } 1896 1897 if (verbosity >= 4) { 1898 (void) printf("\tdnode flags: %s%s%s\n", 1899 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? 1900 "USED_BYTES " : "", 1901 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? 1902 "USERUSED_ACCOUNTED " : "", 1903 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? 1904 "SPILL_BLKPTR" : ""); 1905 (void) printf("\tdnode maxblkid: %llu\n", 1906 (longlong_t)dn->dn_phys->dn_maxblkid); 1907 1908 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object, 1909 bonus, bsize); 1910 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0); 1911 *print_header = 1; 1912 } 1913 1914 if (verbosity >= 5) 1915 dump_indirect(dn); 1916 1917 if (verbosity >= 5) { 1918 /* 1919 * Report the list of segments that comprise the object. 1920 */ 1921 uint64_t start = 0; 1922 uint64_t end; 1923 uint64_t blkfill = 1; 1924 int minlvl = 1; 1925 1926 if (dn->dn_type == DMU_OT_DNODE) { 1927 minlvl = 0; 1928 blkfill = DNODES_PER_BLOCK; 1929 } 1930 1931 for (;;) { 1932 char segsize[32]; 1933 /* make sure nicenum has enough space */ 1934 CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ); 1935 error = dnode_next_offset(dn, 1936 0, &start, minlvl, blkfill, 0); 1937 if (error) 1938 break; 1939 end = start; 1940 error = dnode_next_offset(dn, 1941 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); 1942 zdb_nicenum(end - start, segsize, sizeof (segsize)); 1943 (void) printf("\t\tsegment [%016llx, %016llx)" 1944 " size %5s\n", (u_longlong_t)start, 1945 (u_longlong_t)end, segsize); 1946 if (error) 1947 break; 1948 start = end; 1949 } 1950 } 1951 1952 if (db != NULL) 1953 dmu_buf_rele(db, FTAG); 1954 } 1955 1956 static char *objset_types[DMU_OST_NUMTYPES] = { 1957 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; 1958 1959 static void 1960 dump_dir(objset_t *os) 1961 { 1962 dmu_objset_stats_t dds; 1963 uint64_t object, object_count; 1964 uint64_t refdbytes, usedobjs, scratch; 1965 char numbuf[32]; 1966 char blkbuf[BP_SPRINTF_LEN + 20]; 1967 char osname[ZFS_MAX_DATASET_NAME_LEN]; 1968 char *type = "UNKNOWN"; 1969 int verbosity = dump_opt['d']; 1970 int print_header = 1; 1971 int i, error; 1972 1973 /* make sure nicenum has enough space */ 1974 CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ); 1975 1976 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 1977 dmu_objset_fast_stat(os, &dds); 1978 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 1979 1980 if (dds.dds_type < DMU_OST_NUMTYPES) 1981 type = objset_types[dds.dds_type]; 1982 1983 if (dds.dds_type == DMU_OST_META) { 1984 dds.dds_creation_txg = TXG_INITIAL; 1985 usedobjs = BP_GET_FILL(os->os_rootbp); 1986 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)-> 1987 dd_used_bytes; 1988 } else { 1989 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); 1990 } 1991 1992 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp)); 1993 1994 zdb_nicenum(refdbytes, numbuf, sizeof (numbuf)); 1995 1996 if (verbosity >= 4) { 1997 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp "); 1998 (void) snprintf_blkptr(blkbuf + strlen(blkbuf), 1999 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp); 2000 } else { 2001 blkbuf[0] = '\0'; 2002 } 2003 2004 dmu_objset_name(os, osname); 2005 2006 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " 2007 "%s, %llu objects%s\n", 2008 osname, type, (u_longlong_t)dmu_objset_id(os), 2009 (u_longlong_t)dds.dds_creation_txg, 2010 numbuf, (u_longlong_t)usedobjs, blkbuf); 2011 2012 if (zopt_objects != 0) { 2013 for (i = 0; i < zopt_objects; i++) 2014 dump_object(os, zopt_object[i], verbosity, 2015 &print_header); 2016 (void) printf("\n"); 2017 return; 2018 } 2019 2020 if (dump_opt['i'] != 0 || verbosity >= 2) 2021 dump_intent_log(dmu_objset_zil(os)); 2022 2023 if (dmu_objset_ds(os) != NULL) 2024 dump_deadlist(&dmu_objset_ds(os)->ds_deadlist); 2025 2026 if (verbosity < 2) 2027 return; 2028 2029 if (BP_IS_HOLE(os->os_rootbp)) 2030 return; 2031 2032 dump_object(os, 0, verbosity, &print_header); 2033 object_count = 0; 2034 if (DMU_USERUSED_DNODE(os) != NULL && 2035 DMU_USERUSED_DNODE(os)->dn_type != 0) { 2036 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header); 2037 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header); 2038 } 2039 2040 object = 0; 2041 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 2042 dump_object(os, object, verbosity, &print_header); 2043 object_count++; 2044 } 2045 2046 ASSERT3U(object_count, ==, usedobjs); 2047 2048 (void) printf("\n"); 2049 2050 if (error != ESRCH) { 2051 (void) fprintf(stderr, "dmu_object_next() = %d\n", error); 2052 abort(); 2053 } 2054 } 2055 2056 static void 2057 dump_uberblock(uberblock_t *ub, const char *header, const char *footer) 2058 { 2059 time_t timestamp = ub->ub_timestamp; 2060 2061 (void) printf(header ? header : ""); 2062 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); 2063 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); 2064 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); 2065 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); 2066 (void) printf("\ttimestamp = %llu UTC = %s", 2067 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); 2068 if (dump_opt['u'] >= 3) { 2069 char blkbuf[BP_SPRINTF_LEN]; 2070 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp); 2071 (void) printf("\trootbp = %s\n", blkbuf); 2072 } 2073 (void) printf(footer ? footer : ""); 2074 } 2075 2076 static void 2077 dump_config(spa_t *spa) 2078 { 2079 dmu_buf_t *db; 2080 size_t nvsize = 0; 2081 int error = 0; 2082 2083 2084 error = dmu_bonus_hold(spa->spa_meta_objset, 2085 spa->spa_config_object, FTAG, &db); 2086 2087 if (error == 0) { 2088 nvsize = *(uint64_t *)db->db_data; 2089 dmu_buf_rele(db, FTAG); 2090 2091 (void) printf("\nMOS Configuration:\n"); 2092 dump_packed_nvlist(spa->spa_meta_objset, 2093 spa->spa_config_object, (void *)&nvsize, 1); 2094 } else { 2095 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", 2096 (u_longlong_t)spa->spa_config_object, error); 2097 } 2098 } 2099 2100 static void 2101 dump_cachefile(const char *cachefile) 2102 { 2103 int fd; 2104 struct stat64 statbuf; 2105 char *buf; 2106 nvlist_t *config; 2107 2108 if ((fd = open64(cachefile, O_RDONLY)) < 0) { 2109 (void) printf("cannot open '%s': %s\n", cachefile, 2110 strerror(errno)); 2111 exit(1); 2112 } 2113 2114 if (fstat64(fd, &statbuf) != 0) { 2115 (void) printf("failed to stat '%s': %s\n", cachefile, 2116 strerror(errno)); 2117 exit(1); 2118 } 2119 2120 if ((buf = malloc(statbuf.st_size)) == NULL) { 2121 (void) fprintf(stderr, "failed to allocate %llu bytes\n", 2122 (u_longlong_t)statbuf.st_size); 2123 exit(1); 2124 } 2125 2126 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 2127 (void) fprintf(stderr, "failed to read %llu bytes\n", 2128 (u_longlong_t)statbuf.st_size); 2129 exit(1); 2130 } 2131 2132 (void) close(fd); 2133 2134 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { 2135 (void) fprintf(stderr, "failed to unpack nvlist\n"); 2136 exit(1); 2137 } 2138 2139 free(buf); 2140 2141 dump_nvlist(config, 0); 2142 2143 nvlist_free(config); 2144 } 2145 2146 #define ZDB_MAX_UB_HEADER_SIZE 32 2147 2148 static void 2149 dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift) 2150 { 2151 vdev_t vd; 2152 vdev_t *vdp = &vd; 2153 char header[ZDB_MAX_UB_HEADER_SIZE]; 2154 2155 vd.vdev_ashift = ashift; 2156 vdp->vdev_top = vdp; 2157 2158 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) { 2159 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i); 2160 uberblock_t *ub = (void *)((char *)lbl + uoff); 2161 2162 if (uberblock_verify(ub)) 2163 continue; 2164 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, 2165 "Uberblock[%d]\n", i); 2166 dump_uberblock(ub, header, ""); 2167 } 2168 } 2169 2170 static void 2171 dump_label(const char *dev) 2172 { 2173 int fd; 2174 vdev_label_t label; 2175 char *path, *buf = label.vl_vdev_phys.vp_nvlist; 2176 size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); 2177 struct stat64 statbuf; 2178 uint64_t psize, ashift; 2179 int len = strlen(dev) + 1; 2180 2181 if (strncmp(dev, "/dev/dsk/", 9) == 0) { 2182 len++; 2183 path = malloc(len); 2184 (void) snprintf(path, len, "%s%s", "/dev/rdsk/", dev + 9); 2185 } else { 2186 path = strdup(dev); 2187 } 2188 2189 if ((fd = open64(path, O_RDONLY)) < 0) { 2190 (void) printf("cannot open '%s': %s\n", path, strerror(errno)); 2191 free(path); 2192 exit(1); 2193 } 2194 2195 if (fstat64(fd, &statbuf) != 0) { 2196 (void) printf("failed to stat '%s': %s\n", path, 2197 strerror(errno)); 2198 free(path); 2199 (void) close(fd); 2200 exit(1); 2201 } 2202 2203 if (S_ISBLK(statbuf.st_mode)) { 2204 (void) printf("cannot use '%s': character device required\n", 2205 path); 2206 free(path); 2207 (void) close(fd); 2208 exit(1); 2209 } 2210 2211 psize = statbuf.st_size; 2212 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); 2213 2214 for (int l = 0; l < VDEV_LABELS; l++) { 2215 nvlist_t *config = NULL; 2216 2217 (void) printf("--------------------------------------------\n"); 2218 (void) printf("LABEL %d\n", l); 2219 (void) printf("--------------------------------------------\n"); 2220 2221 if (pread64(fd, &label, sizeof (label), 2222 vdev_label_offset(psize, l, 0)) != sizeof (label)) { 2223 (void) printf("failed to read label %d\n", l); 2224 continue; 2225 } 2226 2227 if (nvlist_unpack(buf, buflen, &config, 0) != 0) { 2228 (void) printf("failed to unpack label %d\n", l); 2229 ashift = SPA_MINBLOCKSHIFT; 2230 } else { 2231 nvlist_t *vdev_tree = NULL; 2232 2233 dump_nvlist(config, 4); 2234 if ((nvlist_lookup_nvlist(config, 2235 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || 2236 (nvlist_lookup_uint64(vdev_tree, 2237 ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) 2238 ashift = SPA_MINBLOCKSHIFT; 2239 nvlist_free(config); 2240 } 2241 if (dump_opt['u']) 2242 dump_label_uberblocks(&label, ashift); 2243 } 2244 2245 free(path); 2246 (void) close(fd); 2247 } 2248 2249 static uint64_t dataset_feature_count[SPA_FEATURES]; 2250 2251 /*ARGSUSED*/ 2252 static int 2253 dump_one_dir(const char *dsname, void *arg) 2254 { 2255 int error; 2256 objset_t *os; 2257 2258 error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os); 2259 if (error) { 2260 (void) printf("Could not open %s, error %d\n", dsname, error); 2261 return (0); 2262 } 2263 2264 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 2265 if (!dmu_objset_ds(os)->ds_feature_inuse[f]) 2266 continue; 2267 ASSERT(spa_feature_table[f].fi_flags & 2268 ZFEATURE_FLAG_PER_DATASET); 2269 dataset_feature_count[f]++; 2270 } 2271 2272 dump_dir(os); 2273 dmu_objset_disown(os, FTAG); 2274 fuid_table_destroy(); 2275 sa_loaded = B_FALSE; 2276 return (0); 2277 } 2278 2279 /* 2280 * Block statistics. 2281 */ 2282 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2) 2283 typedef struct zdb_blkstats { 2284 uint64_t zb_asize; 2285 uint64_t zb_lsize; 2286 uint64_t zb_psize; 2287 uint64_t zb_count; 2288 uint64_t zb_gangs; 2289 uint64_t zb_ditto_samevdev; 2290 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE]; 2291 } zdb_blkstats_t; 2292 2293 /* 2294 * Extended object types to report deferred frees and dedup auto-ditto blocks. 2295 */ 2296 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) 2297 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) 2298 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2) 2299 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3) 2300 2301 static char *zdb_ot_extname[] = { 2302 "deferred free", 2303 "dedup ditto", 2304 "other", 2305 "Total", 2306 }; 2307 2308 #define ZB_TOTAL DN_MAX_LEVELS 2309 2310 typedef struct zdb_cb { 2311 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; 2312 uint64_t zcb_dedup_asize; 2313 uint64_t zcb_dedup_blocks; 2314 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES]; 2315 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES] 2316 [BPE_PAYLOAD_SIZE]; 2317 uint64_t zcb_start; 2318 uint64_t zcb_lastprint; 2319 uint64_t zcb_totalasize; 2320 uint64_t zcb_errors[256]; 2321 int zcb_readfails; 2322 int zcb_haderrors; 2323 spa_t *zcb_spa; 2324 } zdb_cb_t; 2325 2326 static void 2327 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, 2328 dmu_object_type_t type) 2329 { 2330 uint64_t refcnt = 0; 2331 2332 ASSERT(type < ZDB_OT_TOTAL); 2333 2334 if (zilog && zil_bp_tree_add(zilog, bp) != 0) 2335 return; 2336 2337 for (int i = 0; i < 4; i++) { 2338 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; 2339 int t = (i & 1) ? type : ZDB_OT_TOTAL; 2340 int equal; 2341 zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; 2342 2343 zb->zb_asize += BP_GET_ASIZE(bp); 2344 zb->zb_lsize += BP_GET_LSIZE(bp); 2345 zb->zb_psize += BP_GET_PSIZE(bp); 2346 zb->zb_count++; 2347 2348 /* 2349 * The histogram is only big enough to record blocks up to 2350 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last, 2351 * "other", bucket. 2352 */ 2353 int idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT; 2354 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1); 2355 zb->zb_psize_histogram[idx]++; 2356 2357 zb->zb_gangs += BP_COUNT_GANG(bp); 2358 2359 switch (BP_GET_NDVAS(bp)) { 2360 case 2: 2361 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 2362 DVA_GET_VDEV(&bp->blk_dva[1])) 2363 zb->zb_ditto_samevdev++; 2364 break; 2365 case 3: 2366 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 2367 DVA_GET_VDEV(&bp->blk_dva[1])) + 2368 (DVA_GET_VDEV(&bp->blk_dva[0]) == 2369 DVA_GET_VDEV(&bp->blk_dva[2])) + 2370 (DVA_GET_VDEV(&bp->blk_dva[1]) == 2371 DVA_GET_VDEV(&bp->blk_dva[2])); 2372 if (equal != 0) 2373 zb->zb_ditto_samevdev++; 2374 break; 2375 } 2376 2377 } 2378 2379 if (BP_IS_EMBEDDED(bp)) { 2380 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++; 2381 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)] 2382 [BPE_GET_PSIZE(bp)]++; 2383 return; 2384 } 2385 2386 if (dump_opt['L']) 2387 return; 2388 2389 if (BP_GET_DEDUP(bp)) { 2390 ddt_t *ddt; 2391 ddt_entry_t *dde; 2392 2393 ddt = ddt_select(zcb->zcb_spa, bp); 2394 ddt_enter(ddt); 2395 dde = ddt_lookup(ddt, bp, B_FALSE); 2396 2397 if (dde == NULL) { 2398 refcnt = 0; 2399 } else { 2400 ddt_phys_t *ddp = ddt_phys_select(dde, bp); 2401 ddt_phys_decref(ddp); 2402 refcnt = ddp->ddp_refcnt; 2403 if (ddt_phys_total_refcnt(dde) == 0) 2404 ddt_remove(ddt, dde); 2405 } 2406 ddt_exit(ddt); 2407 } 2408 2409 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, 2410 refcnt ? 0 : spa_first_txg(zcb->zcb_spa), 2411 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); 2412 } 2413 2414 static void 2415 zdb_blkptr_done(zio_t *zio) 2416 { 2417 spa_t *spa = zio->io_spa; 2418 blkptr_t *bp = zio->io_bp; 2419 int ioerr = zio->io_error; 2420 zdb_cb_t *zcb = zio->io_private; 2421 zbookmark_phys_t *zb = &zio->io_bookmark; 2422 2423 zio_data_buf_free(zio->io_data, zio->io_size); 2424 2425 mutex_enter(&spa->spa_scrub_lock); 2426 spa->spa_scrub_inflight--; 2427 cv_broadcast(&spa->spa_scrub_io_cv); 2428 2429 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2430 char blkbuf[BP_SPRINTF_LEN]; 2431 2432 zcb->zcb_haderrors = 1; 2433 zcb->zcb_errors[ioerr]++; 2434 2435 if (dump_opt['b'] >= 2) 2436 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2437 else 2438 blkbuf[0] = '\0'; 2439 2440 (void) printf("zdb_blkptr_cb: " 2441 "Got error %d reading " 2442 "<%llu, %llu, %lld, %llx> %s -- skipping\n", 2443 ioerr, 2444 (u_longlong_t)zb->zb_objset, 2445 (u_longlong_t)zb->zb_object, 2446 (u_longlong_t)zb->zb_level, 2447 (u_longlong_t)zb->zb_blkid, 2448 blkbuf); 2449 } 2450 mutex_exit(&spa->spa_scrub_lock); 2451 } 2452 2453 static int 2454 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2455 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2456 { 2457 zdb_cb_t *zcb = arg; 2458 dmu_object_type_t type; 2459 boolean_t is_metadata; 2460 2461 if (bp == NULL) 2462 return (0); 2463 2464 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) { 2465 char blkbuf[BP_SPRINTF_LEN]; 2466 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2467 (void) printf("objset %llu object %llu " 2468 "level %lld offset 0x%llx %s\n", 2469 (u_longlong_t)zb->zb_objset, 2470 (u_longlong_t)zb->zb_object, 2471 (longlong_t)zb->zb_level, 2472 (u_longlong_t)blkid2offset(dnp, bp, zb), 2473 blkbuf); 2474 } 2475 2476 if (BP_IS_HOLE(bp)) 2477 return (0); 2478 2479 type = BP_GET_TYPE(bp); 2480 2481 zdb_count_block(zcb, zilog, bp, 2482 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type); 2483 2484 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); 2485 2486 if (!BP_IS_EMBEDDED(bp) && 2487 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) { 2488 size_t size = BP_GET_PSIZE(bp); 2489 void *data = zio_data_buf_alloc(size); 2490 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; 2491 2492 /* If it's an intent log block, failure is expected. */ 2493 if (zb->zb_level == ZB_ZIL_LEVEL) 2494 flags |= ZIO_FLAG_SPECULATIVE; 2495 2496 mutex_enter(&spa->spa_scrub_lock); 2497 while (spa->spa_scrub_inflight > max_inflight) 2498 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2499 spa->spa_scrub_inflight++; 2500 mutex_exit(&spa->spa_scrub_lock); 2501 2502 zio_nowait(zio_read(NULL, spa, bp, data, size, 2503 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb)); 2504 } 2505 2506 zcb->zcb_readfails = 0; 2507 2508 /* only call gethrtime() every 100 blocks */ 2509 static int iters; 2510 if (++iters > 100) 2511 iters = 0; 2512 else 2513 return (0); 2514 2515 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) { 2516 uint64_t now = gethrtime(); 2517 char buf[10]; 2518 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize; 2519 int kb_per_sec = 2520 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000)); 2521 int sec_remaining = 2522 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec; 2523 2524 /* make sure nicenum has enough space */ 2525 CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ); 2526 2527 zfs_nicenum(bytes, buf, sizeof (buf)); 2528 (void) fprintf(stderr, 2529 "\r%5s completed (%4dMB/s) " 2530 "estimated time remaining: %uhr %02umin %02usec ", 2531 buf, kb_per_sec / 1024, 2532 sec_remaining / 60 / 60, 2533 sec_remaining / 60 % 60, 2534 sec_remaining % 60); 2535 2536 zcb->zcb_lastprint = now; 2537 } 2538 2539 return (0); 2540 } 2541 2542 static void 2543 zdb_leak(void *arg, uint64_t start, uint64_t size) 2544 { 2545 vdev_t *vd = arg; 2546 2547 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 2548 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); 2549 } 2550 2551 static metaslab_ops_t zdb_metaslab_ops = { 2552 NULL /* alloc */ 2553 }; 2554 2555 static void 2556 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) 2557 { 2558 ddt_bookmark_t ddb = { 0 }; 2559 ddt_entry_t dde; 2560 int error; 2561 2562 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { 2563 blkptr_t blk; 2564 ddt_phys_t *ddp = dde.dde_phys; 2565 2566 if (ddb.ddb_class == DDT_CLASS_UNIQUE) 2567 return; 2568 2569 ASSERT(ddt_phys_total_refcnt(&dde) > 1); 2570 2571 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2572 if (ddp->ddp_phys_birth == 0) 2573 continue; 2574 ddt_bp_create(ddb.ddb_checksum, 2575 &dde.dde_key, ddp, &blk); 2576 if (p == DDT_PHYS_DITTO) { 2577 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); 2578 } else { 2579 zcb->zcb_dedup_asize += 2580 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); 2581 zcb->zcb_dedup_blocks++; 2582 } 2583 } 2584 if (!dump_opt['L']) { 2585 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; 2586 ddt_enter(ddt); 2587 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); 2588 ddt_exit(ddt); 2589 } 2590 } 2591 2592 ASSERT(error == ENOENT); 2593 } 2594 2595 static void 2596 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) 2597 { 2598 zcb->zcb_spa = spa; 2599 2600 if (!dump_opt['L']) { 2601 vdev_t *rvd = spa->spa_root_vdev; 2602 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2603 vdev_t *vd = rvd->vdev_child[c]; 2604 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 2605 metaslab_t *msp = vd->vdev_ms[m]; 2606 mutex_enter(&msp->ms_lock); 2607 metaslab_unload(msp); 2608 2609 /* 2610 * For leak detection, we overload the metaslab 2611 * ms_tree to contain allocated segments 2612 * instead of free segments. As a result, 2613 * we can't use the normal metaslab_load/unload 2614 * interfaces. 2615 */ 2616 if (msp->ms_sm != NULL) { 2617 (void) fprintf(stderr, 2618 "\rloading space map for " 2619 "vdev %llu of %llu, " 2620 "metaslab %llu of %llu ...", 2621 (longlong_t)c, 2622 (longlong_t)rvd->vdev_children, 2623 (longlong_t)m, 2624 (longlong_t)vd->vdev_ms_count); 2625 2626 msp->ms_ops = &zdb_metaslab_ops; 2627 2628 /* 2629 * We don't want to spend the CPU 2630 * manipulating the size-ordered 2631 * tree, so clear the range_tree 2632 * ops. 2633 */ 2634 msp->ms_tree->rt_ops = NULL; 2635 VERIFY0(space_map_load(msp->ms_sm, 2636 msp->ms_tree, SM_ALLOC)); 2637 msp->ms_loaded = B_TRUE; 2638 } 2639 mutex_exit(&msp->ms_lock); 2640 } 2641 } 2642 (void) fprintf(stderr, "\n"); 2643 } 2644 2645 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2646 2647 zdb_ddt_leak_init(spa, zcb); 2648 2649 spa_config_exit(spa, SCL_CONFIG, FTAG); 2650 } 2651 2652 static void 2653 zdb_leak_fini(spa_t *spa) 2654 { 2655 if (!dump_opt['L']) { 2656 vdev_t *rvd = spa->spa_root_vdev; 2657 for (int c = 0; c < rvd->vdev_children; c++) { 2658 vdev_t *vd = rvd->vdev_child[c]; 2659 for (int m = 0; m < vd->vdev_ms_count; m++) { 2660 metaslab_t *msp = vd->vdev_ms[m]; 2661 mutex_enter(&msp->ms_lock); 2662 2663 /* 2664 * The ms_tree has been overloaded to 2665 * contain allocated segments. Now that we 2666 * finished traversing all blocks, any 2667 * block that remains in the ms_tree 2668 * represents an allocated block that we 2669 * did not claim during the traversal. 2670 * Claimed blocks would have been removed 2671 * from the ms_tree. 2672 */ 2673 range_tree_vacate(msp->ms_tree, zdb_leak, vd); 2674 msp->ms_loaded = B_FALSE; 2675 2676 mutex_exit(&msp->ms_lock); 2677 } 2678 } 2679 } 2680 } 2681 2682 /* ARGSUSED */ 2683 static int 2684 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2685 { 2686 zdb_cb_t *zcb = arg; 2687 2688 if (dump_opt['b'] >= 5) { 2689 char blkbuf[BP_SPRINTF_LEN]; 2690 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2691 (void) printf("[%s] %s\n", 2692 "deferred free", blkbuf); 2693 } 2694 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); 2695 return (0); 2696 } 2697 2698 static int 2699 dump_block_stats(spa_t *spa) 2700 { 2701 zdb_cb_t zcb = { 0 }; 2702 zdb_blkstats_t *zb, *tzb; 2703 uint64_t norm_alloc, norm_space, total_alloc, total_found; 2704 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; 2705 boolean_t leaks = B_FALSE; 2706 2707 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n", 2708 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", 2709 (dump_opt['c'] == 1) ? "metadata " : "", 2710 dump_opt['c'] ? "checksums " : "", 2711 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", 2712 !dump_opt['L'] ? "nothing leaked " : ""); 2713 2714 /* 2715 * Load all space maps as SM_ALLOC maps, then traverse the pool 2716 * claiming each block we discover. If the pool is perfectly 2717 * consistent, the space maps will be empty when we're done. 2718 * Anything left over is a leak; any block we can't claim (because 2719 * it's not part of any space map) is a double allocation, 2720 * reference to a freed block, or an unclaimed log block. 2721 */ 2722 zdb_leak_init(spa, &zcb); 2723 2724 /* 2725 * If there's a deferred-free bplist, process that first. 2726 */ 2727 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, 2728 count_block_cb, &zcb, NULL); 2729 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 2730 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, 2731 count_block_cb, &zcb, NULL); 2732 } 2733 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 2734 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset, 2735 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb, 2736 &zcb, NULL)); 2737 } 2738 2739 if (dump_opt['c'] > 1) 2740 flags |= TRAVERSE_PREFETCH_DATA; 2741 2742 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa)); 2743 zcb.zcb_start = zcb.zcb_lastprint = gethrtime(); 2744 zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); 2745 2746 /* 2747 * If we've traversed the data blocks then we need to wait for those 2748 * I/Os to complete. We leverage "The Godfather" zio to wait on 2749 * all async I/Os to complete. 2750 */ 2751 if (dump_opt['c']) { 2752 for (int i = 0; i < max_ncpus; i++) { 2753 (void) zio_wait(spa->spa_async_zio_root[i]); 2754 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2755 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2756 ZIO_FLAG_GODFATHER); 2757 } 2758 } 2759 2760 if (zcb.zcb_haderrors) { 2761 (void) printf("\nError counts:\n\n"); 2762 (void) printf("\t%5s %s\n", "errno", "count"); 2763 for (int e = 0; e < 256; e++) { 2764 if (zcb.zcb_errors[e] != 0) { 2765 (void) printf("\t%5d %llu\n", 2766 e, (u_longlong_t)zcb.zcb_errors[e]); 2767 } 2768 } 2769 } 2770 2771 /* 2772 * Report any leaked segments. 2773 */ 2774 zdb_leak_fini(spa); 2775 2776 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; 2777 2778 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 2779 norm_space = metaslab_class_get_space(spa_normal_class(spa)); 2780 2781 total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa)); 2782 total_found = tzb->zb_asize - zcb.zcb_dedup_asize; 2783 2784 if (total_found == total_alloc) { 2785 if (!dump_opt['L']) 2786 (void) printf("\n\tNo leaks (block sum matches space" 2787 " maps exactly)\n"); 2788 } else { 2789 (void) printf("block traversal size %llu != alloc %llu " 2790 "(%s %lld)\n", 2791 (u_longlong_t)total_found, 2792 (u_longlong_t)total_alloc, 2793 (dump_opt['L']) ? "unreachable" : "leaked", 2794 (longlong_t)(total_alloc - total_found)); 2795 leaks = B_TRUE; 2796 } 2797 2798 if (tzb->zb_count == 0) 2799 return (2); 2800 2801 (void) printf("\n"); 2802 (void) printf("\tbp count: %10llu\n", 2803 (u_longlong_t)tzb->zb_count); 2804 (void) printf("\tganged count: %10llu\n", 2805 (longlong_t)tzb->zb_gangs); 2806 (void) printf("\tbp logical: %10llu avg: %6llu\n", 2807 (u_longlong_t)tzb->zb_lsize, 2808 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); 2809 (void) printf("\tbp physical: %10llu avg:" 2810 " %6llu compression: %6.2f\n", 2811 (u_longlong_t)tzb->zb_psize, 2812 (u_longlong_t)(tzb->zb_psize / tzb->zb_count), 2813 (double)tzb->zb_lsize / tzb->zb_psize); 2814 (void) printf("\tbp allocated: %10llu avg:" 2815 " %6llu compression: %6.2f\n", 2816 (u_longlong_t)tzb->zb_asize, 2817 (u_longlong_t)(tzb->zb_asize / tzb->zb_count), 2818 (double)tzb->zb_lsize / tzb->zb_asize); 2819 (void) printf("\tbp deduped: %10llu ref>1:" 2820 " %6llu deduplication: %6.2f\n", 2821 (u_longlong_t)zcb.zcb_dedup_asize, 2822 (u_longlong_t)zcb.zcb_dedup_blocks, 2823 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); 2824 (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n", 2825 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); 2826 2827 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) { 2828 if (zcb.zcb_embedded_blocks[i] == 0) 2829 continue; 2830 (void) printf("\n"); 2831 (void) printf("\tadditional, non-pointer bps of type %u: " 2832 "%10llu\n", 2833 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]); 2834 2835 if (dump_opt['b'] >= 3) { 2836 (void) printf("\t number of (compressed) bytes: " 2837 "number of bps\n"); 2838 dump_histogram(zcb.zcb_embedded_histogram[i], 2839 sizeof (zcb.zcb_embedded_histogram[i]) / 2840 sizeof (zcb.zcb_embedded_histogram[i][0]), 0); 2841 } 2842 } 2843 2844 if (tzb->zb_ditto_samevdev != 0) { 2845 (void) printf("\tDittoed blocks on same vdev: %llu\n", 2846 (longlong_t)tzb->zb_ditto_samevdev); 2847 } 2848 2849 if (dump_opt['b'] >= 2) { 2850 int l, t, level; 2851 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" 2852 "\t avg\t comp\t%%Total\tType\n"); 2853 2854 for (t = 0; t <= ZDB_OT_TOTAL; t++) { 2855 char csize[32], lsize[32], psize[32], asize[32]; 2856 char avg[32], gang[32]; 2857 char *typename; 2858 2859 /* make sure nicenum has enough space */ 2860 CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ); 2861 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 2862 CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ); 2863 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 2864 CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ); 2865 CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ); 2866 2867 if (t < DMU_OT_NUMTYPES) 2868 typename = dmu_ot[t].ot_name; 2869 else 2870 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; 2871 2872 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { 2873 (void) printf("%6s\t%5s\t%5s\t%5s" 2874 "\t%5s\t%5s\t%6s\t%s\n", 2875 "-", 2876 "-", 2877 "-", 2878 "-", 2879 "-", 2880 "-", 2881 "-", 2882 typename); 2883 continue; 2884 } 2885 2886 for (l = ZB_TOTAL - 1; l >= -1; l--) { 2887 level = (l == -1 ? ZB_TOTAL : l); 2888 zb = &zcb.zcb_type[level][t]; 2889 2890 if (zb->zb_asize == 0) 2891 continue; 2892 2893 if (dump_opt['b'] < 3 && level != ZB_TOTAL) 2894 continue; 2895 2896 if (level == 0 && zb->zb_asize == 2897 zcb.zcb_type[ZB_TOTAL][t].zb_asize) 2898 continue; 2899 2900 zdb_nicenum(zb->zb_count, csize, 2901 sizeof (csize)); 2902 zdb_nicenum(zb->zb_lsize, lsize, 2903 sizeof (lsize)); 2904 zdb_nicenum(zb->zb_psize, psize, 2905 sizeof (psize)); 2906 zdb_nicenum(zb->zb_asize, asize, 2907 sizeof (asize)); 2908 zdb_nicenum(zb->zb_asize / zb->zb_count, avg, 2909 sizeof (avg)); 2910 zdb_nicenum(zb->zb_gangs, gang, sizeof (gang)); 2911 2912 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" 2913 "\t%5.2f\t%6.2f\t", 2914 csize, lsize, psize, asize, avg, 2915 (double)zb->zb_lsize / zb->zb_psize, 2916 100.0 * zb->zb_asize / tzb->zb_asize); 2917 2918 if (level == ZB_TOTAL) 2919 (void) printf("%s\n", typename); 2920 else 2921 (void) printf(" L%d %s\n", 2922 level, typename); 2923 2924 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) { 2925 (void) printf("\t number of ganged " 2926 "blocks: %s\n", gang); 2927 } 2928 2929 if (dump_opt['b'] >= 4) { 2930 (void) printf("psize " 2931 "(in 512-byte sectors): " 2932 "number of blocks\n"); 2933 dump_histogram(zb->zb_psize_histogram, 2934 PSIZE_HISTO_SIZE, 0); 2935 } 2936 } 2937 } 2938 } 2939 2940 (void) printf("\n"); 2941 2942 if (leaks) 2943 return (2); 2944 2945 if (zcb.zcb_haderrors) 2946 return (3); 2947 2948 return (0); 2949 } 2950 2951 typedef struct zdb_ddt_entry { 2952 ddt_key_t zdde_key; 2953 uint64_t zdde_ref_blocks; 2954 uint64_t zdde_ref_lsize; 2955 uint64_t zdde_ref_psize; 2956 uint64_t zdde_ref_dsize; 2957 avl_node_t zdde_node; 2958 } zdb_ddt_entry_t; 2959 2960 /* ARGSUSED */ 2961 static int 2962 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2963 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2964 { 2965 avl_tree_t *t = arg; 2966 avl_index_t where; 2967 zdb_ddt_entry_t *zdde, zdde_search; 2968 2969 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2970 return (0); 2971 2972 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { 2973 (void) printf("traversing objset %llu, %llu objects, " 2974 "%lu blocks so far\n", 2975 (u_longlong_t)zb->zb_objset, 2976 (u_longlong_t)BP_GET_FILL(bp), 2977 avl_numnodes(t)); 2978 } 2979 2980 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || 2981 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) 2982 return (0); 2983 2984 ddt_key_fill(&zdde_search.zdde_key, bp); 2985 2986 zdde = avl_find(t, &zdde_search, &where); 2987 2988 if (zdde == NULL) { 2989 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); 2990 zdde->zdde_key = zdde_search.zdde_key; 2991 avl_insert(t, zdde, where); 2992 } 2993 2994 zdde->zdde_ref_blocks += 1; 2995 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); 2996 zdde->zdde_ref_psize += BP_GET_PSIZE(bp); 2997 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); 2998 2999 return (0); 3000 } 3001 3002 static void 3003 dump_simulated_ddt(spa_t *spa) 3004 { 3005 avl_tree_t t; 3006 void *cookie = NULL; 3007 zdb_ddt_entry_t *zdde; 3008 ddt_histogram_t ddh_total = { 0 }; 3009 ddt_stat_t dds_total = { 0 }; 3010 3011 avl_create(&t, ddt_entry_compare, 3012 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); 3013 3014 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3015 3016 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 3017 zdb_ddt_add_cb, &t); 3018 3019 spa_config_exit(spa, SCL_CONFIG, FTAG); 3020 3021 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { 3022 ddt_stat_t dds; 3023 uint64_t refcnt = zdde->zdde_ref_blocks; 3024 ASSERT(refcnt != 0); 3025 3026 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; 3027 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; 3028 dds.dds_psize = zdde->zdde_ref_psize / refcnt; 3029 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; 3030 3031 dds.dds_ref_blocks = zdde->zdde_ref_blocks; 3032 dds.dds_ref_lsize = zdde->zdde_ref_lsize; 3033 dds.dds_ref_psize = zdde->zdde_ref_psize; 3034 dds.dds_ref_dsize = zdde->zdde_ref_dsize; 3035 3036 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1], 3037 &dds, 0); 3038 3039 umem_free(zdde, sizeof (*zdde)); 3040 } 3041 3042 avl_destroy(&t); 3043 3044 ddt_histogram_stat(&dds_total, &ddh_total); 3045 3046 (void) printf("Simulated DDT histogram:\n"); 3047 3048 zpool_dump_ddt(&dds_total, &ddh_total); 3049 3050 dump_dedup_ratio(&dds_total); 3051 } 3052 3053 static void 3054 dump_zpool(spa_t *spa) 3055 { 3056 dsl_pool_t *dp = spa_get_dsl(spa); 3057 int rc = 0; 3058 3059 if (dump_opt['S']) { 3060 dump_simulated_ddt(spa); 3061 return; 3062 } 3063 3064 if (!dump_opt['e'] && dump_opt['C'] > 1) { 3065 (void) printf("\nCached configuration:\n"); 3066 dump_nvlist(spa->spa_config, 8); 3067 } 3068 3069 if (dump_opt['C']) 3070 dump_config(spa); 3071 3072 if (dump_opt['u']) 3073 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); 3074 3075 if (dump_opt['D']) 3076 dump_all_ddts(spa); 3077 3078 if (dump_opt['d'] > 2 || dump_opt['m']) 3079 dump_metaslabs(spa); 3080 if (dump_opt['M']) 3081 dump_metaslab_groups(spa); 3082 3083 if (dump_opt['d'] || dump_opt['i']) { 3084 dump_dir(dp->dp_meta_objset); 3085 if (dump_opt['d'] >= 3) { 3086 dump_full_bpobj(&spa->spa_deferred_bpobj, 3087 "Deferred frees", 0); 3088 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3089 dump_full_bpobj( 3090 &spa->spa_dsl_pool->dp_free_bpobj, 3091 "Pool snapshot frees", 0); 3092 } 3093 3094 if (spa_feature_is_active(spa, 3095 SPA_FEATURE_ASYNC_DESTROY)) { 3096 dump_bptree(spa->spa_meta_objset, 3097 spa->spa_dsl_pool->dp_bptree_obj, 3098 "Pool dataset frees"); 3099 } 3100 dump_dtl(spa->spa_root_vdev, 0); 3101 } 3102 (void) dmu_objset_find(spa_name(spa), dump_one_dir, 3103 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 3104 3105 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 3106 uint64_t refcount; 3107 3108 if (!(spa_feature_table[f].fi_flags & 3109 ZFEATURE_FLAG_PER_DATASET)) { 3110 ASSERT0(dataset_feature_count[f]); 3111 continue; 3112 } 3113 (void) feature_get_refcount(spa, 3114 &spa_feature_table[f], &refcount); 3115 if (dataset_feature_count[f] != refcount) { 3116 (void) printf("%s feature refcount mismatch: " 3117 "%lld datasets != %lld refcount\n", 3118 spa_feature_table[f].fi_uname, 3119 (longlong_t)dataset_feature_count[f], 3120 (longlong_t)refcount); 3121 rc = 2; 3122 } else { 3123 (void) printf("Verified %s feature refcount " 3124 "of %llu is correct\n", 3125 spa_feature_table[f].fi_uname, 3126 (longlong_t)refcount); 3127 } 3128 } 3129 } 3130 if (rc == 0 && (dump_opt['b'] || dump_opt['c'])) 3131 rc = dump_block_stats(spa); 3132 3133 if (rc == 0) 3134 rc = verify_spacemap_refcounts(spa); 3135 3136 if (dump_opt['s']) 3137 show_pool_stats(spa); 3138 3139 if (dump_opt['h']) 3140 dump_history(spa); 3141 3142 if (rc != 0) 3143 exit(rc); 3144 } 3145 3146 #define ZDB_FLAG_CHECKSUM 0x0001 3147 #define ZDB_FLAG_DECOMPRESS 0x0002 3148 #define ZDB_FLAG_BSWAP 0x0004 3149 #define ZDB_FLAG_GBH 0x0008 3150 #define ZDB_FLAG_INDIRECT 0x0010 3151 #define ZDB_FLAG_PHYS 0x0020 3152 #define ZDB_FLAG_RAW 0x0040 3153 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 3154 3155 int flagbits[256]; 3156 3157 static void 3158 zdb_print_blkptr(blkptr_t *bp, int flags) 3159 { 3160 char blkbuf[BP_SPRINTF_LEN]; 3161 3162 if (flags & ZDB_FLAG_BSWAP) 3163 byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); 3164 3165 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3166 (void) printf("%s\n", blkbuf); 3167 } 3168 3169 static void 3170 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) 3171 { 3172 int i; 3173 3174 for (i = 0; i < nbps; i++) 3175 zdb_print_blkptr(&bp[i], flags); 3176 } 3177 3178 static void 3179 zdb_dump_gbh(void *buf, int flags) 3180 { 3181 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); 3182 } 3183 3184 static void 3185 zdb_dump_block_raw(void *buf, uint64_t size, int flags) 3186 { 3187 if (flags & ZDB_FLAG_BSWAP) 3188 byteswap_uint64_array(buf, size); 3189 (void) write(1, buf, size); 3190 } 3191 3192 static void 3193 zdb_dump_block(char *label, void *buf, uint64_t size, int flags) 3194 { 3195 uint64_t *d = (uint64_t *)buf; 3196 int nwords = size / sizeof (uint64_t); 3197 int do_bswap = !!(flags & ZDB_FLAG_BSWAP); 3198 int i, j; 3199 char *hdr, *c; 3200 3201 3202 if (do_bswap) 3203 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; 3204 else 3205 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; 3206 3207 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); 3208 3209 for (i = 0; i < nwords; i += 2) { 3210 (void) printf("%06llx: %016llx %016llx ", 3211 (u_longlong_t)(i * sizeof (uint64_t)), 3212 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), 3213 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); 3214 3215 c = (char *)&d[i]; 3216 for (j = 0; j < 2 * sizeof (uint64_t); j++) 3217 (void) printf("%c", isprint(c[j]) ? c[j] : '.'); 3218 (void) printf("\n"); 3219 } 3220 } 3221 3222 /* 3223 * There are two acceptable formats: 3224 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a 3225 * child[.child]* - For example: 0.1.1 3226 * 3227 * The second form can be used to specify arbitrary vdevs anywhere 3228 * in the heirarchy. For example, in a pool with a mirror of 3229 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . 3230 */ 3231 static vdev_t * 3232 zdb_vdev_lookup(vdev_t *vdev, char *path) 3233 { 3234 char *s, *p, *q; 3235 int i; 3236 3237 if (vdev == NULL) 3238 return (NULL); 3239 3240 /* First, assume the x.x.x.x format */ 3241 i = (int)strtoul(path, &s, 10); 3242 if (s == path || (s && *s != '.' && *s != '\0')) 3243 goto name; 3244 if (i < 0 || i >= vdev->vdev_children) 3245 return (NULL); 3246 3247 vdev = vdev->vdev_child[i]; 3248 if (*s == '\0') 3249 return (vdev); 3250 return (zdb_vdev_lookup(vdev, s+1)); 3251 3252 name: 3253 for (i = 0; i < vdev->vdev_children; i++) { 3254 vdev_t *vc = vdev->vdev_child[i]; 3255 3256 if (vc->vdev_path == NULL) { 3257 vc = zdb_vdev_lookup(vc, path); 3258 if (vc == NULL) 3259 continue; 3260 else 3261 return (vc); 3262 } 3263 3264 p = strrchr(vc->vdev_path, '/'); 3265 p = p ? p + 1 : vc->vdev_path; 3266 q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; 3267 3268 if (strcmp(vc->vdev_path, path) == 0) 3269 return (vc); 3270 if (strcmp(p, path) == 0) 3271 return (vc); 3272 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) 3273 return (vc); 3274 } 3275 3276 return (NULL); 3277 } 3278 3279 /* 3280 * Read a block from a pool and print it out. The syntax of the 3281 * block descriptor is: 3282 * 3283 * pool:vdev_specifier:offset:size[:flags] 3284 * 3285 * pool - The name of the pool you wish to read from 3286 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) 3287 * offset - offset, in hex, in bytes 3288 * size - Amount of data to read, in hex, in bytes 3289 * flags - A string of characters specifying options 3290 * b: Decode a blkptr at given offset within block 3291 * *c: Calculate and display checksums 3292 * d: Decompress data before dumping 3293 * e: Byteswap data before dumping 3294 * g: Display data as a gang block header 3295 * i: Display as an indirect block 3296 * p: Do I/O to physical offset 3297 * r: Dump raw data to stdout 3298 * 3299 * * = not yet implemented 3300 */ 3301 static void 3302 zdb_read_block(char *thing, spa_t *spa) 3303 { 3304 blkptr_t blk, *bp = &blk; 3305 dva_t *dva = bp->blk_dva; 3306 int flags = 0; 3307 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; 3308 zio_t *zio; 3309 vdev_t *vd; 3310 void *pbuf, *lbuf, *buf; 3311 char *s, *p, *dup, *vdev, *flagstr; 3312 int i, error; 3313 3314 dup = strdup(thing); 3315 s = strtok(dup, ":"); 3316 vdev = s ? s : ""; 3317 s = strtok(NULL, ":"); 3318 offset = strtoull(s ? s : "", NULL, 16); 3319 s = strtok(NULL, ":"); 3320 size = strtoull(s ? s : "", NULL, 16); 3321 s = strtok(NULL, ":"); 3322 flagstr = s ? s : ""; 3323 3324 s = NULL; 3325 if (size == 0) 3326 s = "size must not be zero"; 3327 if (!IS_P2ALIGNED(size, DEV_BSIZE)) 3328 s = "size must be a multiple of sector size"; 3329 if (!IS_P2ALIGNED(offset, DEV_BSIZE)) 3330 s = "offset must be a multiple of sector size"; 3331 if (s) { 3332 (void) printf("Invalid block specifier: %s - %s\n", thing, s); 3333 free(dup); 3334 return; 3335 } 3336 3337 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { 3338 for (i = 0; flagstr[i]; i++) { 3339 int bit = flagbits[(uchar_t)flagstr[i]]; 3340 3341 if (bit == 0) { 3342 (void) printf("***Invalid flag: %c\n", 3343 flagstr[i]); 3344 continue; 3345 } 3346 flags |= bit; 3347 3348 /* If it's not something with an argument, keep going */ 3349 if ((bit & (ZDB_FLAG_CHECKSUM | 3350 ZDB_FLAG_PRINT_BLKPTR)) == 0) 3351 continue; 3352 3353 p = &flagstr[i + 1]; 3354 if (bit == ZDB_FLAG_PRINT_BLKPTR) 3355 blkptr_offset = strtoull(p, &p, 16); 3356 if (*p != ':' && *p != '\0') { 3357 (void) printf("***Invalid flag arg: '%s'\n", s); 3358 free(dup); 3359 return; 3360 } 3361 } 3362 } 3363 3364 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); 3365 if (vd == NULL) { 3366 (void) printf("***Invalid vdev: %s\n", vdev); 3367 free(dup); 3368 return; 3369 } else { 3370 if (vd->vdev_path) 3371 (void) fprintf(stderr, "Found vdev: %s\n", 3372 vd->vdev_path); 3373 else 3374 (void) fprintf(stderr, "Found vdev type: %s\n", 3375 vd->vdev_ops->vdev_op_type); 3376 } 3377 3378 psize = size; 3379 lsize = size; 3380 3381 pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3382 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3383 3384 BP_ZERO(bp); 3385 3386 DVA_SET_VDEV(&dva[0], vd->vdev_id); 3387 DVA_SET_OFFSET(&dva[0], offset); 3388 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); 3389 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); 3390 3391 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 3392 3393 BP_SET_LSIZE(bp, lsize); 3394 BP_SET_PSIZE(bp, psize); 3395 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 3396 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 3397 BP_SET_TYPE(bp, DMU_OT_NONE); 3398 BP_SET_LEVEL(bp, 0); 3399 BP_SET_DEDUP(bp, 0); 3400 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 3401 3402 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3403 zio = zio_root(spa, NULL, NULL, 0); 3404 3405 if (vd == vd->vdev_top) { 3406 /* 3407 * Treat this as a normal block read. 3408 */ 3409 zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL, 3410 ZIO_PRIORITY_SYNC_READ, 3411 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); 3412 } else { 3413 /* 3414 * Treat this as a vdev child I/O. 3415 */ 3416 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize, 3417 ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, 3418 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | 3419 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | 3420 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL)); 3421 } 3422 3423 error = zio_wait(zio); 3424 spa_config_exit(spa, SCL_STATE, FTAG); 3425 3426 if (error) { 3427 (void) printf("Read of %s failed, error: %d\n", thing, error); 3428 goto out; 3429 } 3430 3431 if (flags & ZDB_FLAG_DECOMPRESS) { 3432 /* 3433 * We don't know how the data was compressed, so just try 3434 * every decompress function at every inflated blocksize. 3435 */ 3436 enum zio_compress c; 3437 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3438 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 3439 3440 bcopy(pbuf, pbuf2, psize); 3441 3442 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize, 3443 SPA_MAXBLOCKSIZE - psize) == 0); 3444 3445 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, 3446 SPA_MAXBLOCKSIZE - psize) == 0); 3447 3448 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; 3449 lsize -= SPA_MINBLOCKSIZE) { 3450 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { 3451 if (zio_decompress_data(c, pbuf, lbuf, 3452 psize, lsize) == 0 && 3453 zio_decompress_data(c, pbuf2, lbuf2, 3454 psize, lsize) == 0 && 3455 bcmp(lbuf, lbuf2, lsize) == 0) 3456 break; 3457 } 3458 if (c != ZIO_COMPRESS_FUNCTIONS) 3459 break; 3460 lsize -= SPA_MINBLOCKSIZE; 3461 } 3462 3463 umem_free(pbuf2, SPA_MAXBLOCKSIZE); 3464 umem_free(lbuf2, SPA_MAXBLOCKSIZE); 3465 3466 if (lsize <= psize) { 3467 (void) printf("Decompress of %s failed\n", thing); 3468 goto out; 3469 } 3470 buf = lbuf; 3471 size = lsize; 3472 } else { 3473 buf = pbuf; 3474 size = psize; 3475 } 3476 3477 if (flags & ZDB_FLAG_PRINT_BLKPTR) 3478 zdb_print_blkptr((blkptr_t *)(void *) 3479 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); 3480 else if (flags & ZDB_FLAG_RAW) 3481 zdb_dump_block_raw(buf, size, flags); 3482 else if (flags & ZDB_FLAG_INDIRECT) 3483 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), 3484 flags); 3485 else if (flags & ZDB_FLAG_GBH) 3486 zdb_dump_gbh(buf, flags); 3487 else 3488 zdb_dump_block(thing, buf, size, flags); 3489 3490 out: 3491 umem_free(pbuf, SPA_MAXBLOCKSIZE); 3492 umem_free(lbuf, SPA_MAXBLOCKSIZE); 3493 free(dup); 3494 } 3495 3496 static boolean_t 3497 pool_match(nvlist_t *cfg, char *tgt) 3498 { 3499 uint64_t v, guid = strtoull(tgt, NULL, 0); 3500 char *s; 3501 3502 if (guid != 0) { 3503 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0) 3504 return (v == guid); 3505 } else { 3506 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0) 3507 return (strcmp(s, tgt) == 0); 3508 } 3509 return (B_FALSE); 3510 } 3511 3512 static char * 3513 find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv) 3514 { 3515 nvlist_t *pools; 3516 nvlist_t *match = NULL; 3517 char *name = NULL; 3518 char *sepp = NULL; 3519 char sep = '\0'; 3520 int count = 0; 3521 importargs_t args = { 0 }; 3522 3523 args.paths = dirc; 3524 args.path = dirv; 3525 args.can_be_active = B_TRUE; 3526 3527 if ((sepp = strpbrk(*target, "/@")) != NULL) { 3528 sep = *sepp; 3529 *sepp = '\0'; 3530 } 3531 3532 pools = zpool_search_import(g_zfs, &args); 3533 3534 if (pools != NULL) { 3535 nvpair_t *elem = NULL; 3536 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3537 verify(nvpair_value_nvlist(elem, configp) == 0); 3538 if (pool_match(*configp, *target)) { 3539 count++; 3540 if (match != NULL) { 3541 /* print previously found config */ 3542 if (name != NULL) { 3543 (void) printf("%s\n", name); 3544 dump_nvlist(match, 8); 3545 name = NULL; 3546 } 3547 (void) printf("%s\n", 3548 nvpair_name(elem)); 3549 dump_nvlist(*configp, 8); 3550 } else { 3551 match = *configp; 3552 name = nvpair_name(elem); 3553 } 3554 } 3555 } 3556 } 3557 if (count > 1) 3558 (void) fatal("\tMatched %d pools - use pool GUID " 3559 "instead of pool name or \n" 3560 "\tpool name part of a dataset name to select pool", count); 3561 3562 if (sepp) 3563 *sepp = sep; 3564 /* 3565 * If pool GUID was specified for pool id, replace it with pool name 3566 */ 3567 if (name && (strstr(*target, name) != *target)) { 3568 int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0); 3569 3570 *target = umem_alloc(sz, UMEM_NOFAIL); 3571 (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : ""); 3572 } 3573 3574 *configp = name ? match : NULL; 3575 3576 return (name); 3577 } 3578 3579 int 3580 main(int argc, char **argv) 3581 { 3582 int i, c; 3583 struct rlimit rl = { 1024, 1024 }; 3584 spa_t *spa = NULL; 3585 objset_t *os = NULL; 3586 int dump_all = 1; 3587 int verbose = 0; 3588 int error = 0; 3589 char **searchdirs = NULL; 3590 int nsearch = 0; 3591 char *target; 3592 nvlist_t *policy = NULL; 3593 uint64_t max_txg = UINT64_MAX; 3594 int rewind = ZPOOL_NEVER_REWIND; 3595 char *spa_config_path_env; 3596 3597 (void) setrlimit(RLIMIT_NOFILE, &rl); 3598 (void) enable_extended_FILE_stdio(-1, -1); 3599 3600 dprintf_setup(&argc, argv); 3601 3602 /* 3603 * If there is an environment variable SPA_CONFIG_PATH it overrides 3604 * default spa_config_path setting. If -U flag is specified it will 3605 * override this environment variable settings once again. 3606 */ 3607 spa_config_path_env = getenv("SPA_CONFIG_PATH"); 3608 if (spa_config_path_env != NULL) 3609 spa_config_path = spa_config_path_env; 3610 3611 while ((c = getopt(argc, argv, 3612 "bcdhilmMI:suCDRSAFLXx:evp:t:U:P")) != -1) { 3613 switch (c) { 3614 case 'b': 3615 case 'c': 3616 case 'd': 3617 case 'h': 3618 case 'i': 3619 case 'l': 3620 case 'm': 3621 case 's': 3622 case 'u': 3623 case 'C': 3624 case 'D': 3625 case 'M': 3626 case 'R': 3627 case 'S': 3628 dump_opt[c]++; 3629 dump_all = 0; 3630 break; 3631 case 'A': 3632 case 'F': 3633 case 'L': 3634 case 'X': 3635 case 'e': 3636 case 'P': 3637 dump_opt[c]++; 3638 break; 3639 case 'I': 3640 max_inflight = strtoull(optarg, NULL, 0); 3641 if (max_inflight == 0) { 3642 (void) fprintf(stderr, "maximum number " 3643 "of inflight I/Os must be greater " 3644 "than 0\n"); 3645 usage(); 3646 } 3647 break; 3648 case 'p': 3649 if (searchdirs == NULL) { 3650 searchdirs = umem_alloc(sizeof (char *), 3651 UMEM_NOFAIL); 3652 } else { 3653 char **tmp = umem_alloc((nsearch + 1) * 3654 sizeof (char *), UMEM_NOFAIL); 3655 bcopy(searchdirs, tmp, nsearch * 3656 sizeof (char *)); 3657 umem_free(searchdirs, 3658 nsearch * sizeof (char *)); 3659 searchdirs = tmp; 3660 } 3661 searchdirs[nsearch++] = optarg; 3662 break; 3663 case 't': 3664 max_txg = strtoull(optarg, NULL, 0); 3665 if (max_txg < TXG_INITIAL) { 3666 (void) fprintf(stderr, "incorrect txg " 3667 "specified: %s\n", optarg); 3668 usage(); 3669 } 3670 break; 3671 case 'U': 3672 spa_config_path = optarg; 3673 break; 3674 case 'v': 3675 verbose++; 3676 break; 3677 case 'x': 3678 vn_dumpdir = optarg; 3679 break; 3680 default: 3681 usage(); 3682 break; 3683 } 3684 } 3685 3686 if (!dump_opt['e'] && searchdirs != NULL) { 3687 (void) fprintf(stderr, "-p option requires use of -e\n"); 3688 usage(); 3689 } 3690 3691 /* 3692 * ZDB does not typically re-read blocks; therefore limit the ARC 3693 * to 256 MB, which can be used entirely for metadata. 3694 */ 3695 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024; 3696 3697 /* 3698 * "zdb -c" uses checksum-verifying scrub i/os which are async reads. 3699 * "zdb -b" uses traversal prefetch which uses async reads. 3700 * For good performance, let several of them be active at once. 3701 */ 3702 zfs_vdev_async_read_max_active = 10; 3703 3704 kernel_init(FREAD); 3705 g_zfs = libzfs_init(); 3706 ASSERT(g_zfs != NULL); 3707 3708 if (dump_all) 3709 verbose = MAX(verbose, 1); 3710 3711 for (c = 0; c < 256; c++) { 3712 if (dump_all && !strchr("elAFLRSXP", c)) 3713 dump_opt[c] = 1; 3714 if (dump_opt[c]) 3715 dump_opt[c] += verbose; 3716 } 3717 3718 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); 3719 zfs_recover = (dump_opt['A'] > 1); 3720 3721 argc -= optind; 3722 argv += optind; 3723 3724 if (argc < 2 && dump_opt['R']) 3725 usage(); 3726 if (argc < 1) { 3727 if (!dump_opt['e'] && dump_opt['C']) { 3728 dump_cachefile(spa_config_path); 3729 return (0); 3730 } 3731 usage(); 3732 } 3733 3734 if (dump_opt['l']) { 3735 dump_label(argv[0]); 3736 return (0); 3737 } 3738 3739 if (dump_opt['X'] || dump_opt['F']) 3740 rewind = ZPOOL_DO_REWIND | 3741 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); 3742 3743 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || 3744 nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 || 3745 nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0) 3746 fatal("internal error: %s", strerror(ENOMEM)); 3747 3748 error = 0; 3749 target = argv[0]; 3750 3751 if (dump_opt['e']) { 3752 nvlist_t *cfg = NULL; 3753 char *name = find_zpool(&target, &cfg, nsearch, searchdirs); 3754 3755 error = ENOENT; 3756 if (name) { 3757 if (dump_opt['C'] > 1) { 3758 (void) printf("\nConfiguration for import:\n"); 3759 dump_nvlist(cfg, 8); 3760 } 3761 if (nvlist_add_nvlist(cfg, 3762 ZPOOL_REWIND_POLICY, policy) != 0) { 3763 fatal("can't open '%s': %s", 3764 target, strerror(ENOMEM)); 3765 } 3766 if ((error = spa_import(name, cfg, NULL, 3767 ZFS_IMPORT_MISSING_LOG)) != 0) { 3768 error = spa_import(name, cfg, NULL, 3769 ZFS_IMPORT_VERBATIM); 3770 } 3771 } 3772 } 3773 3774 if (error == 0) { 3775 if (strpbrk(target, "/@") == NULL || dump_opt['R']) { 3776 error = spa_open_rewind(target, &spa, FTAG, policy, 3777 NULL); 3778 if (error) { 3779 /* 3780 * If we're missing the log device then 3781 * try opening the pool after clearing the 3782 * log state. 3783 */ 3784 mutex_enter(&spa_namespace_lock); 3785 if ((spa = spa_lookup(target)) != NULL && 3786 spa->spa_log_state == SPA_LOG_MISSING) { 3787 spa->spa_log_state = SPA_LOG_CLEAR; 3788 error = 0; 3789 } 3790 mutex_exit(&spa_namespace_lock); 3791 3792 if (!error) { 3793 error = spa_open_rewind(target, &spa, 3794 FTAG, policy, NULL); 3795 } 3796 } 3797 } else { 3798 error = dmu_objset_own(target, DMU_OST_ANY, 3799 B_TRUE, FTAG, &os); 3800 } 3801 } 3802 nvlist_free(policy); 3803 3804 if (error) 3805 fatal("can't open '%s': %s", target, strerror(error)); 3806 3807 argv++; 3808 argc--; 3809 if (!dump_opt['R']) { 3810 if (argc > 0) { 3811 zopt_objects = argc; 3812 zopt_object = calloc(zopt_objects, sizeof (uint64_t)); 3813 for (i = 0; i < zopt_objects; i++) { 3814 errno = 0; 3815 zopt_object[i] = strtoull(argv[i], NULL, 0); 3816 if (zopt_object[i] == 0 && errno != 0) 3817 fatal("bad number %s: %s", 3818 argv[i], strerror(errno)); 3819 } 3820 } 3821 if (os != NULL) { 3822 dump_dir(os); 3823 } else if (zopt_objects > 0 && !dump_opt['m']) { 3824 dump_dir(spa->spa_meta_objset); 3825 } else { 3826 dump_zpool(spa); 3827 } 3828 } else { 3829 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; 3830 flagbits['c'] = ZDB_FLAG_CHECKSUM; 3831 flagbits['d'] = ZDB_FLAG_DECOMPRESS; 3832 flagbits['e'] = ZDB_FLAG_BSWAP; 3833 flagbits['g'] = ZDB_FLAG_GBH; 3834 flagbits['i'] = ZDB_FLAG_INDIRECT; 3835 flagbits['p'] = ZDB_FLAG_PHYS; 3836 flagbits['r'] = ZDB_FLAG_RAW; 3837 3838 for (i = 0; i < argc; i++) 3839 zdb_read_block(argv[i], spa); 3840 } 3841 3842 (os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG); 3843 3844 fuid_table_destroy(); 3845 sa_loaded = B_FALSE; 3846 3847 libzfs_fini(g_zfs); 3848 kernel_fini(); 3849 3850 return (0); 3851 } 3852