1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright 2017 Nexenta Systems, Inc. 27 * Copyright 2017 RackTop Systems. 28 */ 29 30 #include <stdio.h> 31 #include <unistd.h> 32 #include <stdio_ext.h> 33 #include <stdlib.h> 34 #include <ctype.h> 35 #include <sys/zfs_context.h> 36 #include <sys/spa.h> 37 #include <sys/spa_impl.h> 38 #include <sys/dmu.h> 39 #include <sys/zap.h> 40 #include <sys/fs/zfs.h> 41 #include <sys/zfs_znode.h> 42 #include <sys/zfs_sa.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/vdev.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/metaslab_impl.h> 48 #include <sys/dmu_objset.h> 49 #include <sys/dsl_dir.h> 50 #include <sys/dsl_dataset.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dbuf.h> 53 #include <sys/zil.h> 54 #include <sys/zil_impl.h> 55 #include <sys/stat.h> 56 #include <sys/resource.h> 57 #include <sys/dmu_traverse.h> 58 #include <sys/zio_checksum.h> 59 #include <sys/zio_compress.h> 60 #include <sys/zfs_fuid.h> 61 #include <sys/arc.h> 62 #include <sys/ddt.h> 63 #include <sys/zfeature.h> 64 #include <sys/abd.h> 65 #include <sys/blkptr.h> 66 #include <zfs_comutil.h> 67 #include <libcmdutils.h> 68 #undef verify 69 #include <libzfs.h> 70 71 #include "zdb.h" 72 73 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ 74 zio_compress_table[(idx)].ci_name : "UNKNOWN") 75 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ 76 zio_checksum_table[(idx)].ci_name : "UNKNOWN") 77 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ 78 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \ 79 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN") 80 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \ 81 (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \ 82 DMU_OT_ZAP_OTHER : \ 83 (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \ 84 DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES) 85 86 #ifndef lint 87 extern int reference_tracking_enable; 88 extern boolean_t zfs_recover; 89 extern uint64_t zfs_arc_max, zfs_arc_meta_limit; 90 extern int zfs_vdev_async_read_max_active; 91 extern int aok; 92 extern boolean_t spa_load_verify_dryrun; 93 #else 94 int reference_tracking_enable; 95 boolean_t zfs_recover; 96 uint64_t zfs_arc_max, zfs_arc_meta_limit; 97 int zfs_vdev_async_read_max_active; 98 int aok; 99 boolean_t spa_load_verify_dryrun; 100 #endif 101 102 static const char cmdname[] = "zdb"; 103 uint8_t dump_opt[256]; 104 105 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); 106 107 uint64_t *zopt_object = NULL; 108 static unsigned zopt_objects = 0; 109 libzfs_handle_t *g_zfs; 110 uint64_t max_inflight = 1000; 111 static int leaked_objects = 0; 112 113 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *); 114 115 /* 116 * These libumem hooks provide a reasonable set of defaults for the allocator's 117 * debugging facilities. 118 */ 119 const char * 120 _umem_debug_init() 121 { 122 return ("default,verbose"); /* $UMEM_DEBUG setting */ 123 } 124 125 const char * 126 _umem_logging_init(void) 127 { 128 return ("fail,contents"); /* $UMEM_LOGGING setting */ 129 } 130 131 static void 132 usage(void) 133 { 134 (void) fprintf(stderr, 135 "Usage:\t%s [-AbcdDFGhikLMPsvX] [-e [-V] [-p <path> ...]] " 136 "[-I <inflight I/Os>]\n" 137 "\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n" 138 "\t\t[<poolname> [<object> ...]]\n" 139 "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset> " 140 "[<object> ...]\n" 141 "\t%s -C [-A] [-U <cache>]\n" 142 "\t%s -l [-Aqu] <device>\n" 143 "\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] " 144 "[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n" 145 "\t%s -O <dataset> <path>\n" 146 "\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n" 147 "\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n" 148 "\t%s -E [-A] word0:word1:...:word15\n" 149 "\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] " 150 "<poolname>\n\n", 151 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, 152 cmdname, cmdname); 153 154 (void) fprintf(stderr, " Dataset name must include at least one " 155 "separator character '/' or '@'\n"); 156 (void) fprintf(stderr, " If dataset name is specified, only that " 157 "dataset is dumped\n"); 158 (void) fprintf(stderr, " If object numbers are specified, only " 159 "those objects are dumped\n\n"); 160 (void) fprintf(stderr, " Options to control amount of output:\n"); 161 (void) fprintf(stderr, " -b block statistics\n"); 162 (void) fprintf(stderr, " -c checksum all metadata (twice for " 163 "all data) blocks\n"); 164 (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); 165 (void) fprintf(stderr, " -d dataset(s)\n"); 166 (void) fprintf(stderr, " -D dedup statistics\n"); 167 (void) fprintf(stderr, " -E decode and display block from an " 168 "embedded block pointer\n"); 169 (void) fprintf(stderr, " -h pool history\n"); 170 (void) fprintf(stderr, " -i intent logs\n"); 171 (void) fprintf(stderr, " -l read label contents\n"); 172 (void) fprintf(stderr, " -k examine the checkpointed state " 173 "of the pool\n"); 174 (void) fprintf(stderr, " -L disable leak tracking (do not " 175 "load spacemaps)\n"); 176 (void) fprintf(stderr, " -m metaslabs\n"); 177 (void) fprintf(stderr, " -M metaslab groups\n"); 178 (void) fprintf(stderr, " -O perform object lookups by path\n"); 179 (void) fprintf(stderr, " -R read and display block from a " 180 "device\n"); 181 (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); 182 (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); 183 (void) fprintf(stderr, " -v verbose (applies to all " 184 "others)\n\n"); 185 (void) fprintf(stderr, " Below options are intended for use " 186 "with other options:\n"); 187 (void) fprintf(stderr, " -A ignore assertions (-A), enable " 188 "panic recovery (-AA) or both (-AAA)\n"); 189 (void) fprintf(stderr, " -e pool is exported/destroyed/" 190 "has altroot/not in a cachefile\n"); 191 (void) fprintf(stderr, " -F attempt automatic rewind within " 192 "safe range of transaction groups\n"); 193 (void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before " 194 "exiting\n"); 195 (void) fprintf(stderr, " -I <number of inflight I/Os> -- " 196 "specify the maximum number of " 197 "checksumming I/Os [default is 200]\n"); 198 (void) fprintf(stderr, " -o <variable>=<value> set global " 199 "variable to an unsigned 32-bit integer value\n"); 200 (void) fprintf(stderr, " -p <path> -- use one or more with " 201 "-e to specify path to vdev dir\n"); 202 (void) fprintf(stderr, " -P print numbers in parseable form\n"); 203 (void) fprintf(stderr, " -q don't print label contents\n"); 204 (void) fprintf(stderr, " -t <txg> -- highest txg to use when " 205 "searching for uberblocks\n"); 206 (void) fprintf(stderr, " -u uberblock\n"); 207 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate " 208 "cachefile\n"); 209 (void) fprintf(stderr, " -V do verbatim import\n"); 210 (void) fprintf(stderr, " -x <dumpdir> -- " 211 "dump all read blocks into specified directory\n"); 212 (void) fprintf(stderr, " -X attempt extreme rewind (does not " 213 "work with dataset)\n\n"); 214 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " 215 "to make only that option verbose\n"); 216 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); 217 exit(1); 218 } 219 220 static void 221 dump_debug_buffer() 222 { 223 if (dump_opt['G']) { 224 (void) printf("\n"); 225 zfs_dbgmsg_print("zdb"); 226 } 227 } 228 229 /* 230 * Called for usage errors that are discovered after a call to spa_open(), 231 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. 232 */ 233 234 static void 235 fatal(const char *fmt, ...) 236 { 237 va_list ap; 238 239 va_start(ap, fmt); 240 (void) fprintf(stderr, "%s: ", cmdname); 241 (void) vfprintf(stderr, fmt, ap); 242 va_end(ap); 243 (void) fprintf(stderr, "\n"); 244 245 dump_debug_buffer(); 246 247 exit(1); 248 } 249 250 /* ARGSUSED */ 251 static void 252 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) 253 { 254 nvlist_t *nv; 255 size_t nvsize = *(uint64_t *)data; 256 char *packed = umem_alloc(nvsize, UMEM_NOFAIL); 257 258 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); 259 260 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); 261 262 umem_free(packed, nvsize); 263 264 dump_nvlist(nv, 8); 265 266 nvlist_free(nv); 267 } 268 269 /* ARGSUSED */ 270 static void 271 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size) 272 { 273 spa_history_phys_t *shp = data; 274 275 if (shp == NULL) 276 return; 277 278 (void) printf("\t\tpool_create_len = %llu\n", 279 (u_longlong_t)shp->sh_pool_create_len); 280 (void) printf("\t\tphys_max_off = %llu\n", 281 (u_longlong_t)shp->sh_phys_max_off); 282 (void) printf("\t\tbof = %llu\n", 283 (u_longlong_t)shp->sh_bof); 284 (void) printf("\t\teof = %llu\n", 285 (u_longlong_t)shp->sh_eof); 286 (void) printf("\t\trecords_lost = %llu\n", 287 (u_longlong_t)shp->sh_records_lost); 288 } 289 290 static void 291 zdb_nicenum(uint64_t num, char *buf, size_t buflen) 292 { 293 if (dump_opt['P']) 294 (void) snprintf(buf, buflen, "%llu", (longlong_t)num); 295 else 296 nicenum(num, buf, sizeof (buf)); 297 } 298 299 static const char histo_stars[] = "****************************************"; 300 static const uint64_t histo_width = sizeof (histo_stars) - 1; 301 302 static void 303 dump_histogram(const uint64_t *histo, int size, int offset) 304 { 305 int i; 306 int minidx = size - 1; 307 int maxidx = 0; 308 uint64_t max = 0; 309 310 for (i = 0; i < size; i++) { 311 if (histo[i] > max) 312 max = histo[i]; 313 if (histo[i] > 0 && i > maxidx) 314 maxidx = i; 315 if (histo[i] > 0 && i < minidx) 316 minidx = i; 317 } 318 319 if (max < histo_width) 320 max = histo_width; 321 322 for (i = minidx; i <= maxidx; i++) { 323 (void) printf("\t\t\t%3u: %6llu %s\n", 324 i + offset, (u_longlong_t)histo[i], 325 &histo_stars[(max - histo[i]) * histo_width / max]); 326 } 327 } 328 329 static void 330 dump_zap_stats(objset_t *os, uint64_t object) 331 { 332 int error; 333 zap_stats_t zs; 334 335 error = zap_get_stats(os, object, &zs); 336 if (error) 337 return; 338 339 if (zs.zs_ptrtbl_len == 0) { 340 ASSERT(zs.zs_num_blocks == 1); 341 (void) printf("\tmicrozap: %llu bytes, %llu entries\n", 342 (u_longlong_t)zs.zs_blocksize, 343 (u_longlong_t)zs.zs_num_entries); 344 return; 345 } 346 347 (void) printf("\tFat ZAP stats:\n"); 348 349 (void) printf("\t\tPointer table:\n"); 350 (void) printf("\t\t\t%llu elements\n", 351 (u_longlong_t)zs.zs_ptrtbl_len); 352 (void) printf("\t\t\tzt_blk: %llu\n", 353 (u_longlong_t)zs.zs_ptrtbl_zt_blk); 354 (void) printf("\t\t\tzt_numblks: %llu\n", 355 (u_longlong_t)zs.zs_ptrtbl_zt_numblks); 356 (void) printf("\t\t\tzt_shift: %llu\n", 357 (u_longlong_t)zs.zs_ptrtbl_zt_shift); 358 (void) printf("\t\t\tzt_blks_copied: %llu\n", 359 (u_longlong_t)zs.zs_ptrtbl_blks_copied); 360 (void) printf("\t\t\tzt_nextblk: %llu\n", 361 (u_longlong_t)zs.zs_ptrtbl_nextblk); 362 363 (void) printf("\t\tZAP entries: %llu\n", 364 (u_longlong_t)zs.zs_num_entries); 365 (void) printf("\t\tLeaf blocks: %llu\n", 366 (u_longlong_t)zs.zs_num_leafs); 367 (void) printf("\t\tTotal blocks: %llu\n", 368 (u_longlong_t)zs.zs_num_blocks); 369 (void) printf("\t\tzap_block_type: 0x%llx\n", 370 (u_longlong_t)zs.zs_block_type); 371 (void) printf("\t\tzap_magic: 0x%llx\n", 372 (u_longlong_t)zs.zs_magic); 373 (void) printf("\t\tzap_salt: 0x%llx\n", 374 (u_longlong_t)zs.zs_salt); 375 376 (void) printf("\t\tLeafs with 2^n pointers:\n"); 377 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0); 378 379 (void) printf("\t\tBlocks with n*5 entries:\n"); 380 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0); 381 382 (void) printf("\t\tBlocks n/10 full:\n"); 383 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0); 384 385 (void) printf("\t\tEntries with n chunks:\n"); 386 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0); 387 388 (void) printf("\t\tBuckets with n entries:\n"); 389 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0); 390 } 391 392 /*ARGSUSED*/ 393 static void 394 dump_none(objset_t *os, uint64_t object, void *data, size_t size) 395 { 396 } 397 398 /*ARGSUSED*/ 399 static void 400 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) 401 { 402 (void) printf("\tUNKNOWN OBJECT TYPE\n"); 403 } 404 405 /*ARGSUSED*/ 406 static void 407 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) 408 { 409 } 410 411 /*ARGSUSED*/ 412 static void 413 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) 414 { 415 } 416 417 /*ARGSUSED*/ 418 static void 419 dump_zap(objset_t *os, uint64_t object, void *data, size_t size) 420 { 421 zap_cursor_t zc; 422 zap_attribute_t attr; 423 void *prop; 424 unsigned i; 425 426 dump_zap_stats(os, object); 427 (void) printf("\n"); 428 429 for (zap_cursor_init(&zc, os, object); 430 zap_cursor_retrieve(&zc, &attr) == 0; 431 zap_cursor_advance(&zc)) { 432 (void) printf("\t\t%s = ", attr.za_name); 433 if (attr.za_num_integers == 0) { 434 (void) printf("\n"); 435 continue; 436 } 437 prop = umem_zalloc(attr.za_num_integers * 438 attr.za_integer_length, UMEM_NOFAIL); 439 (void) zap_lookup(os, object, attr.za_name, 440 attr.za_integer_length, attr.za_num_integers, prop); 441 if (attr.za_integer_length == 1) { 442 (void) printf("%s", (char *)prop); 443 } else { 444 for (i = 0; i < attr.za_num_integers; i++) { 445 switch (attr.za_integer_length) { 446 case 2: 447 (void) printf("%u ", 448 ((uint16_t *)prop)[i]); 449 break; 450 case 4: 451 (void) printf("%u ", 452 ((uint32_t *)prop)[i]); 453 break; 454 case 8: 455 (void) printf("%lld ", 456 (u_longlong_t)((int64_t *)prop)[i]); 457 break; 458 } 459 } 460 } 461 (void) printf("\n"); 462 umem_free(prop, attr.za_num_integers * attr.za_integer_length); 463 } 464 zap_cursor_fini(&zc); 465 } 466 467 static void 468 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size) 469 { 470 bpobj_phys_t *bpop = data; 471 char bytes[32], comp[32], uncomp[32]; 472 473 /* make sure the output won't get truncated */ 474 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 475 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 476 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 477 478 if (bpop == NULL) 479 return; 480 481 zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes)); 482 zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp)); 483 zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp)); 484 485 (void) printf("\t\tnum_blkptrs = %llu\n", 486 (u_longlong_t)bpop->bpo_num_blkptrs); 487 (void) printf("\t\tbytes = %s\n", bytes); 488 if (size >= BPOBJ_SIZE_V1) { 489 (void) printf("\t\tcomp = %s\n", comp); 490 (void) printf("\t\tuncomp = %s\n", uncomp); 491 } 492 if (size >= sizeof (*bpop)) { 493 (void) printf("\t\tsubobjs = %llu\n", 494 (u_longlong_t)bpop->bpo_subobjs); 495 (void) printf("\t\tnum_subobjs = %llu\n", 496 (u_longlong_t)bpop->bpo_num_subobjs); 497 } 498 499 if (dump_opt['d'] < 5) 500 return; 501 502 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) { 503 char blkbuf[BP_SPRINTF_LEN]; 504 blkptr_t bp; 505 506 int err = dmu_read(os, object, 507 i * sizeof (bp), sizeof (bp), &bp, 0); 508 if (err != 0) { 509 (void) printf("got error %u from dmu_read\n", err); 510 break; 511 } 512 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp); 513 (void) printf("\t%s\n", blkbuf); 514 } 515 } 516 517 /* ARGSUSED */ 518 static void 519 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size) 520 { 521 dmu_object_info_t doi; 522 523 VERIFY0(dmu_object_info(os, object, &doi)); 524 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP); 525 526 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0); 527 if (err != 0) { 528 (void) printf("got error %u from dmu_read\n", err); 529 kmem_free(subobjs, doi.doi_max_offset); 530 return; 531 } 532 533 int64_t last_nonzero = -1; 534 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) { 535 if (subobjs[i] != 0) 536 last_nonzero = i; 537 } 538 539 for (int64_t i = 0; i <= last_nonzero; i++) { 540 (void) printf("\t%llu\n", (longlong_t)subobjs[i]); 541 } 542 kmem_free(subobjs, doi.doi_max_offset); 543 } 544 545 /*ARGSUSED*/ 546 static void 547 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) 548 { 549 dump_zap_stats(os, object); 550 /* contents are printed elsewhere, properly decoded */ 551 } 552 553 /*ARGSUSED*/ 554 static void 555 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) 556 { 557 zap_cursor_t zc; 558 zap_attribute_t attr; 559 560 dump_zap_stats(os, object); 561 (void) printf("\n"); 562 563 for (zap_cursor_init(&zc, os, object); 564 zap_cursor_retrieve(&zc, &attr) == 0; 565 zap_cursor_advance(&zc)) { 566 (void) printf("\t\t%s = ", attr.za_name); 567 if (attr.za_num_integers == 0) { 568 (void) printf("\n"); 569 continue; 570 } 571 (void) printf(" %llx : [%d:%d:%d]\n", 572 (u_longlong_t)attr.za_first_integer, 573 (int)ATTR_LENGTH(attr.za_first_integer), 574 (int)ATTR_BSWAP(attr.za_first_integer), 575 (int)ATTR_NUM(attr.za_first_integer)); 576 } 577 zap_cursor_fini(&zc); 578 } 579 580 /*ARGSUSED*/ 581 static void 582 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) 583 { 584 zap_cursor_t zc; 585 zap_attribute_t attr; 586 uint16_t *layout_attrs; 587 unsigned i; 588 589 dump_zap_stats(os, object); 590 (void) printf("\n"); 591 592 for (zap_cursor_init(&zc, os, object); 593 zap_cursor_retrieve(&zc, &attr) == 0; 594 zap_cursor_advance(&zc)) { 595 (void) printf("\t\t%s = [", attr.za_name); 596 if (attr.za_num_integers == 0) { 597 (void) printf("\n"); 598 continue; 599 } 600 601 VERIFY(attr.za_integer_length == 2); 602 layout_attrs = umem_zalloc(attr.za_num_integers * 603 attr.za_integer_length, UMEM_NOFAIL); 604 605 VERIFY(zap_lookup(os, object, attr.za_name, 606 attr.za_integer_length, 607 attr.za_num_integers, layout_attrs) == 0); 608 609 for (i = 0; i != attr.za_num_integers; i++) 610 (void) printf(" %d ", (int)layout_attrs[i]); 611 (void) printf("]\n"); 612 umem_free(layout_attrs, 613 attr.za_num_integers * attr.za_integer_length); 614 } 615 zap_cursor_fini(&zc); 616 } 617 618 /*ARGSUSED*/ 619 static void 620 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) 621 { 622 zap_cursor_t zc; 623 zap_attribute_t attr; 624 const char *typenames[] = { 625 /* 0 */ "not specified", 626 /* 1 */ "FIFO", 627 /* 2 */ "Character Device", 628 /* 3 */ "3 (invalid)", 629 /* 4 */ "Directory", 630 /* 5 */ "5 (invalid)", 631 /* 6 */ "Block Device", 632 /* 7 */ "7 (invalid)", 633 /* 8 */ "Regular File", 634 /* 9 */ "9 (invalid)", 635 /* 10 */ "Symbolic Link", 636 /* 11 */ "11 (invalid)", 637 /* 12 */ "Socket", 638 /* 13 */ "Door", 639 /* 14 */ "Event Port", 640 /* 15 */ "15 (invalid)", 641 }; 642 643 dump_zap_stats(os, object); 644 (void) printf("\n"); 645 646 for (zap_cursor_init(&zc, os, object); 647 zap_cursor_retrieve(&zc, &attr) == 0; 648 zap_cursor_advance(&zc)) { 649 (void) printf("\t\t%s = %lld (type: %s)\n", 650 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), 651 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); 652 } 653 zap_cursor_fini(&zc); 654 } 655 656 static int 657 get_dtl_refcount(vdev_t *vd) 658 { 659 int refcount = 0; 660 661 if (vd->vdev_ops->vdev_op_leaf) { 662 space_map_t *sm = vd->vdev_dtl_sm; 663 664 if (sm != NULL && 665 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 666 return (1); 667 return (0); 668 } 669 670 for (unsigned c = 0; c < vd->vdev_children; c++) 671 refcount += get_dtl_refcount(vd->vdev_child[c]); 672 return (refcount); 673 } 674 675 static int 676 get_metaslab_refcount(vdev_t *vd) 677 { 678 int refcount = 0; 679 680 if (vd->vdev_top == vd) { 681 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 682 space_map_t *sm = vd->vdev_ms[m]->ms_sm; 683 684 if (sm != NULL && 685 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 686 refcount++; 687 } 688 } 689 for (unsigned c = 0; c < vd->vdev_children; c++) 690 refcount += get_metaslab_refcount(vd->vdev_child[c]); 691 692 return (refcount); 693 } 694 695 static int 696 get_obsolete_refcount(vdev_t *vd) 697 { 698 int refcount = 0; 699 700 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd); 701 if (vd->vdev_top == vd && obsolete_sm_obj != 0) { 702 dmu_object_info_t doi; 703 VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset, 704 obsolete_sm_obj, &doi)); 705 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) { 706 refcount++; 707 } 708 } else { 709 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 710 ASSERT3U(obsolete_sm_obj, ==, 0); 711 } 712 for (unsigned c = 0; c < vd->vdev_children; c++) { 713 refcount += get_obsolete_refcount(vd->vdev_child[c]); 714 } 715 716 return (refcount); 717 } 718 719 static int 720 get_prev_obsolete_spacemap_refcount(spa_t *spa) 721 { 722 uint64_t prev_obj = 723 spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object; 724 if (prev_obj != 0) { 725 dmu_object_info_t doi; 726 VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi)); 727 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) { 728 return (1); 729 } 730 } 731 return (0); 732 } 733 734 static int 735 get_checkpoint_refcount(vdev_t *vd) 736 { 737 int refcount = 0; 738 739 if (vd->vdev_top == vd && vd->vdev_top_zap != 0 && 740 zap_contains(spa_meta_objset(vd->vdev_spa), 741 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0) 742 refcount++; 743 744 for (uint64_t c = 0; c < vd->vdev_children; c++) 745 refcount += get_checkpoint_refcount(vd->vdev_child[c]); 746 747 return (refcount); 748 } 749 750 static int 751 verify_spacemap_refcounts(spa_t *spa) 752 { 753 uint64_t expected_refcount = 0; 754 uint64_t actual_refcount; 755 756 (void) feature_get_refcount(spa, 757 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM], 758 &expected_refcount); 759 actual_refcount = get_dtl_refcount(spa->spa_root_vdev); 760 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev); 761 actual_refcount += get_obsolete_refcount(spa->spa_root_vdev); 762 actual_refcount += get_prev_obsolete_spacemap_refcount(spa); 763 actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev); 764 765 if (expected_refcount != actual_refcount) { 766 (void) printf("space map refcount mismatch: expected %lld != " 767 "actual %lld\n", 768 (longlong_t)expected_refcount, 769 (longlong_t)actual_refcount); 770 return (2); 771 } 772 return (0); 773 } 774 775 static void 776 dump_spacemap(objset_t *os, space_map_t *sm) 777 { 778 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", 779 "INVALID", "INVALID", "INVALID", "INVALID" }; 780 781 if (sm == NULL) 782 return; 783 784 (void) printf("space map object %llu:\n", 785 (longlong_t)sm->sm_phys->smp_object); 786 (void) printf(" smp_objsize = 0x%llx\n", 787 (longlong_t)sm->sm_phys->smp_objsize); 788 (void) printf(" smp_alloc = 0x%llx\n", 789 (longlong_t)sm->sm_phys->smp_alloc); 790 791 /* 792 * Print out the freelist entries in both encoded and decoded form. 793 */ 794 uint8_t mapshift = sm->sm_shift; 795 int64_t alloc = 0; 796 uint64_t word; 797 for (uint64_t offset = 0; offset < space_map_length(sm); 798 offset += sizeof (word)) { 799 800 VERIFY0(dmu_read(os, space_map_object(sm), offset, 801 sizeof (word), &word, DMU_READ_PREFETCH)); 802 803 if (sm_entry_is_debug(word)) { 804 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n", 805 (u_longlong_t)(offset / sizeof (word)), 806 ddata[SM_DEBUG_ACTION_DECODE(word)], 807 (u_longlong_t)SM_DEBUG_TXG_DECODE(word), 808 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(word)); 809 continue; 810 } 811 812 uint8_t words; 813 char entry_type; 814 uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID; 815 816 if (sm_entry_is_single_word(word)) { 817 entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ? 818 'A' : 'F'; 819 entry_off = (SM_OFFSET_DECODE(word) << mapshift) + 820 sm->sm_start; 821 entry_run = SM_RUN_DECODE(word) << mapshift; 822 words = 1; 823 } else { 824 /* it is a two-word entry so we read another word */ 825 ASSERT(sm_entry_is_double_word(word)); 826 827 uint64_t extra_word; 828 offset += sizeof (extra_word); 829 VERIFY0(dmu_read(os, space_map_object(sm), offset, 830 sizeof (extra_word), &extra_word, 831 DMU_READ_PREFETCH)); 832 833 ASSERT3U(offset, <=, space_map_length(sm)); 834 835 entry_run = SM2_RUN_DECODE(word) << mapshift; 836 entry_vdev = SM2_VDEV_DECODE(word); 837 entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ? 838 'A' : 'F'; 839 entry_off = (SM2_OFFSET_DECODE(extra_word) << 840 mapshift) + sm->sm_start; 841 words = 2; 842 } 843 844 (void) printf("\t [%6llu] %c range:" 845 " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n", 846 (u_longlong_t)(offset / sizeof (word)), 847 entry_type, (u_longlong_t)entry_off, 848 (u_longlong_t)(entry_off + entry_run), 849 (u_longlong_t)entry_run, 850 (u_longlong_t)entry_vdev, words); 851 852 if (entry_type == 'A') 853 alloc += entry_run; 854 else 855 alloc -= entry_run; 856 } 857 if ((uint64_t)alloc != space_map_allocated(sm)) { 858 (void) printf("space_map_object alloc (%lld) INCONSISTENT " 859 "with space map summary (%lld)\n", 860 (longlong_t)space_map_allocated(sm), (longlong_t)alloc); 861 } 862 } 863 864 static void 865 dump_metaslab_stats(metaslab_t *msp) 866 { 867 char maxbuf[32]; 868 range_tree_t *rt = msp->ms_allocatable; 869 avl_tree_t *t = &msp->ms_allocatable_by_size; 870 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 871 872 /* max sure nicenum has enough space */ 873 CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ); 874 875 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf, sizeof (maxbuf)); 876 877 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", 878 "segments", avl_numnodes(t), "maxsize", maxbuf, 879 "freepct", free_pct); 880 (void) printf("\tIn-memory histogram:\n"); 881 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 882 } 883 884 static void 885 dump_metaslab(metaslab_t *msp) 886 { 887 vdev_t *vd = msp->ms_group->mg_vd; 888 spa_t *spa = vd->vdev_spa; 889 space_map_t *sm = msp->ms_sm; 890 char freebuf[32]; 891 892 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf, 893 sizeof (freebuf)); 894 895 (void) printf( 896 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", 897 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start, 898 (u_longlong_t)space_map_object(sm), freebuf); 899 900 if (dump_opt['m'] > 2 && !dump_opt['L']) { 901 mutex_enter(&msp->ms_lock); 902 metaslab_load_wait(msp); 903 if (!msp->ms_loaded) { 904 VERIFY0(metaslab_load(msp)); 905 range_tree_stat_verify(msp->ms_allocatable); 906 } 907 dump_metaslab_stats(msp); 908 metaslab_unload(msp); 909 mutex_exit(&msp->ms_lock); 910 } 911 912 if (dump_opt['m'] > 1 && sm != NULL && 913 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 914 /* 915 * The space map histogram represents free space in chunks 916 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift). 917 */ 918 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n", 919 (u_longlong_t)msp->ms_fragmentation); 920 dump_histogram(sm->sm_phys->smp_histogram, 921 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift); 922 } 923 924 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) { 925 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift)); 926 927 dump_spacemap(spa->spa_meta_objset, msp->ms_sm); 928 } 929 } 930 931 static void 932 print_vdev_metaslab_header(vdev_t *vd) 933 { 934 (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n", 935 (u_longlong_t)vd->vdev_id, 936 "metaslabs", (u_longlong_t)vd->vdev_ms_count, 937 "offset", "spacemap", "free"); 938 (void) printf("\t%15s %19s %15s %10s\n", 939 "---------------", "-------------------", 940 "---------------", "-------------"); 941 } 942 943 static void 944 dump_metaslab_groups(spa_t *spa) 945 { 946 vdev_t *rvd = spa->spa_root_vdev; 947 metaslab_class_t *mc = spa_normal_class(spa); 948 uint64_t fragmentation; 949 950 metaslab_class_histogram_verify(mc); 951 952 for (unsigned c = 0; c < rvd->vdev_children; c++) { 953 vdev_t *tvd = rvd->vdev_child[c]; 954 metaslab_group_t *mg = tvd->vdev_mg; 955 956 if (mg->mg_class != mc) 957 continue; 958 959 metaslab_group_histogram_verify(mg); 960 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 961 962 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t" 963 "fragmentation", 964 (u_longlong_t)tvd->vdev_id, 965 (u_longlong_t)tvd->vdev_ms_count); 966 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 967 (void) printf("%3s\n", "-"); 968 } else { 969 (void) printf("%3llu%%\n", 970 (u_longlong_t)mg->mg_fragmentation); 971 } 972 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 973 } 974 975 (void) printf("\tpool %s\tfragmentation", spa_name(spa)); 976 fragmentation = metaslab_class_fragmentation(mc); 977 if (fragmentation == ZFS_FRAG_INVALID) 978 (void) printf("\t%3s\n", "-"); 979 else 980 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation); 981 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 982 } 983 984 static void 985 print_vdev_indirect(vdev_t *vd) 986 { 987 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 988 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 989 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 990 991 if (vim == NULL) { 992 ASSERT3P(vib, ==, NULL); 993 return; 994 } 995 996 ASSERT3U(vdev_indirect_mapping_object(vim), ==, 997 vic->vic_mapping_object); 998 ASSERT3U(vdev_indirect_births_object(vib), ==, 999 vic->vic_births_object); 1000 1001 (void) printf("indirect births obj %llu:\n", 1002 (longlong_t)vic->vic_births_object); 1003 (void) printf(" vib_count = %llu\n", 1004 (longlong_t)vdev_indirect_births_count(vib)); 1005 for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) { 1006 vdev_indirect_birth_entry_phys_t *cur_vibe = 1007 &vib->vib_entries[i]; 1008 (void) printf("\toffset %llx -> txg %llu\n", 1009 (longlong_t)cur_vibe->vibe_offset, 1010 (longlong_t)cur_vibe->vibe_phys_birth_txg); 1011 } 1012 (void) printf("\n"); 1013 1014 (void) printf("indirect mapping obj %llu:\n", 1015 (longlong_t)vic->vic_mapping_object); 1016 (void) printf(" vim_max_offset = 0x%llx\n", 1017 (longlong_t)vdev_indirect_mapping_max_offset(vim)); 1018 (void) printf(" vim_bytes_mapped = 0x%llx\n", 1019 (longlong_t)vdev_indirect_mapping_bytes_mapped(vim)); 1020 (void) printf(" vim_count = %llu\n", 1021 (longlong_t)vdev_indirect_mapping_num_entries(vim)); 1022 1023 if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3) 1024 return; 1025 1026 uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim); 1027 1028 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) { 1029 vdev_indirect_mapping_entry_phys_t *vimep = 1030 &vim->vim_entries[i]; 1031 (void) printf("\t<%llx:%llx:%llx> -> " 1032 "<%llx:%llx:%llx> (%x obsolete)\n", 1033 (longlong_t)vd->vdev_id, 1034 (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep), 1035 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 1036 (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst), 1037 (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst), 1038 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 1039 counts[i]); 1040 } 1041 (void) printf("\n"); 1042 1043 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 1044 if (obsolete_sm_object != 0) { 1045 objset_t *mos = vd->vdev_spa->spa_meta_objset; 1046 (void) printf("obsolete space map object %llu:\n", 1047 (u_longlong_t)obsolete_sm_object); 1048 ASSERT(vd->vdev_obsolete_sm != NULL); 1049 ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==, 1050 obsolete_sm_object); 1051 dump_spacemap(mos, vd->vdev_obsolete_sm); 1052 (void) printf("\n"); 1053 } 1054 } 1055 1056 static void 1057 dump_metaslabs(spa_t *spa) 1058 { 1059 vdev_t *vd, *rvd = spa->spa_root_vdev; 1060 uint64_t m, c = 0, children = rvd->vdev_children; 1061 1062 (void) printf("\nMetaslabs:\n"); 1063 1064 if (!dump_opt['d'] && zopt_objects > 0) { 1065 c = zopt_object[0]; 1066 1067 if (c >= children) 1068 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 1069 1070 if (zopt_objects > 1) { 1071 vd = rvd->vdev_child[c]; 1072 print_vdev_metaslab_header(vd); 1073 1074 for (m = 1; m < zopt_objects; m++) { 1075 if (zopt_object[m] < vd->vdev_ms_count) 1076 dump_metaslab( 1077 vd->vdev_ms[zopt_object[m]]); 1078 else 1079 (void) fprintf(stderr, "bad metaslab " 1080 "number %llu\n", 1081 (u_longlong_t)zopt_object[m]); 1082 } 1083 (void) printf("\n"); 1084 return; 1085 } 1086 children = c + 1; 1087 } 1088 for (; c < children; c++) { 1089 vd = rvd->vdev_child[c]; 1090 print_vdev_metaslab_header(vd); 1091 1092 print_vdev_indirect(vd); 1093 1094 for (m = 0; m < vd->vdev_ms_count; m++) 1095 dump_metaslab(vd->vdev_ms[m]); 1096 (void) printf("\n"); 1097 } 1098 } 1099 1100 static void 1101 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) 1102 { 1103 const ddt_phys_t *ddp = dde->dde_phys; 1104 const ddt_key_t *ddk = &dde->dde_key; 1105 const char *types[4] = { "ditto", "single", "double", "triple" }; 1106 char blkbuf[BP_SPRINTF_LEN]; 1107 blkptr_t blk; 1108 1109 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1110 if (ddp->ddp_phys_birth == 0) 1111 continue; 1112 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); 1113 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk); 1114 (void) printf("index %llx refcnt %llu %s %s\n", 1115 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, 1116 types[p], blkbuf); 1117 } 1118 } 1119 1120 static void 1121 dump_dedup_ratio(const ddt_stat_t *dds) 1122 { 1123 double rL, rP, rD, D, dedup, compress, copies; 1124 1125 if (dds->dds_blocks == 0) 1126 return; 1127 1128 rL = (double)dds->dds_ref_lsize; 1129 rP = (double)dds->dds_ref_psize; 1130 rD = (double)dds->dds_ref_dsize; 1131 D = (double)dds->dds_dsize; 1132 1133 dedup = rD / D; 1134 compress = rL / rP; 1135 copies = rD / rP; 1136 1137 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " 1138 "dedup * compress / copies = %.2f\n\n", 1139 dedup, compress, copies, dedup * compress / copies); 1140 } 1141 1142 static void 1143 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) 1144 { 1145 char name[DDT_NAMELEN]; 1146 ddt_entry_t dde; 1147 uint64_t walk = 0; 1148 dmu_object_info_t doi; 1149 uint64_t count, dspace, mspace; 1150 int error; 1151 1152 error = ddt_object_info(ddt, type, class, &doi); 1153 1154 if (error == ENOENT) 1155 return; 1156 ASSERT(error == 0); 1157 1158 if ((count = ddt_object_count(ddt, type, class)) == 0) 1159 return; 1160 1161 dspace = doi.doi_physical_blocks_512 << 9; 1162 mspace = doi.doi_fill_count * doi.doi_data_block_size; 1163 1164 ddt_object_name(ddt, type, class, name); 1165 1166 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", 1167 name, 1168 (u_longlong_t)count, 1169 (u_longlong_t)(dspace / count), 1170 (u_longlong_t)(mspace / count)); 1171 1172 if (dump_opt['D'] < 3) 1173 return; 1174 1175 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); 1176 1177 if (dump_opt['D'] < 4) 1178 return; 1179 1180 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) 1181 return; 1182 1183 (void) printf("%s contents:\n\n", name); 1184 1185 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) 1186 dump_dde(ddt, &dde, walk); 1187 1188 ASSERT3U(error, ==, ENOENT); 1189 1190 (void) printf("\n"); 1191 } 1192 1193 static void 1194 dump_all_ddts(spa_t *spa) 1195 { 1196 ddt_histogram_t ddh_total; 1197 ddt_stat_t dds_total; 1198 1199 bzero(&ddh_total, sizeof (ddh_total)); 1200 bzero(&dds_total, sizeof (dds_total)); 1201 1202 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { 1203 ddt_t *ddt = spa->spa_ddt[c]; 1204 for (enum ddt_type type = 0; type < DDT_TYPES; type++) { 1205 for (enum ddt_class class = 0; class < DDT_CLASSES; 1206 class++) { 1207 dump_ddt(ddt, type, class); 1208 } 1209 } 1210 } 1211 1212 ddt_get_dedup_stats(spa, &dds_total); 1213 1214 if (dds_total.dds_blocks == 0) { 1215 (void) printf("All DDTs are empty\n"); 1216 return; 1217 } 1218 1219 (void) printf("\n"); 1220 1221 if (dump_opt['D'] > 1) { 1222 (void) printf("DDT histogram (aggregated over all DDTs):\n"); 1223 ddt_get_dedup_histogram(spa, &ddh_total); 1224 zpool_dump_ddt(&dds_total, &ddh_total); 1225 } 1226 1227 dump_dedup_ratio(&dds_total); 1228 } 1229 1230 static void 1231 dump_dtl_seg(void *arg, uint64_t start, uint64_t size) 1232 { 1233 char *prefix = arg; 1234 1235 (void) printf("%s [%llu,%llu) length %llu\n", 1236 prefix, 1237 (u_longlong_t)start, 1238 (u_longlong_t)(start + size), 1239 (u_longlong_t)(size)); 1240 } 1241 1242 static void 1243 dump_dtl(vdev_t *vd, int indent) 1244 { 1245 spa_t *spa = vd->vdev_spa; 1246 boolean_t required; 1247 const char *name[DTL_TYPES] = { "missing", "partial", "scrub", 1248 "outage" }; 1249 char prefix[256]; 1250 1251 spa_vdev_state_enter(spa, SCL_NONE); 1252 required = vdev_dtl_required(vd); 1253 (void) spa_vdev_state_exit(spa, NULL, 0); 1254 1255 if (indent == 0) 1256 (void) printf("\nDirty time logs:\n\n"); 1257 1258 (void) printf("\t%*s%s [%s]\n", indent, "", 1259 vd->vdev_path ? vd->vdev_path : 1260 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), 1261 required ? "DTL-required" : "DTL-expendable"); 1262 1263 for (int t = 0; t < DTL_TYPES; t++) { 1264 range_tree_t *rt = vd->vdev_dtl[t]; 1265 if (range_tree_space(rt) == 0) 1266 continue; 1267 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", 1268 indent + 2, "", name[t]); 1269 range_tree_walk(rt, dump_dtl_seg, prefix); 1270 if (dump_opt['d'] > 5 && vd->vdev_children == 0) 1271 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm); 1272 } 1273 1274 for (unsigned c = 0; c < vd->vdev_children; c++) 1275 dump_dtl(vd->vdev_child[c], indent + 4); 1276 } 1277 1278 static void 1279 dump_history(spa_t *spa) 1280 { 1281 nvlist_t **events = NULL; 1282 uint64_t resid, len, off = 0; 1283 uint_t num = 0; 1284 int error; 1285 time_t tsec; 1286 struct tm t; 1287 char tbuf[30]; 1288 char internalstr[MAXPATHLEN]; 1289 1290 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 1291 do { 1292 len = SPA_MAXBLOCKSIZE; 1293 1294 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { 1295 (void) fprintf(stderr, "Unable to read history: " 1296 "error %d\n", error); 1297 umem_free(buf, SPA_MAXBLOCKSIZE); 1298 return; 1299 } 1300 1301 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) 1302 break; 1303 1304 off -= resid; 1305 } while (len != 0); 1306 umem_free(buf, SPA_MAXBLOCKSIZE); 1307 1308 (void) printf("\nHistory:\n"); 1309 for (unsigned i = 0; i < num; i++) { 1310 uint64_t time, txg, ievent; 1311 char *cmd, *intstr; 1312 boolean_t printed = B_FALSE; 1313 1314 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, 1315 &time) != 0) 1316 goto next; 1317 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, 1318 &cmd) != 0) { 1319 if (nvlist_lookup_uint64(events[i], 1320 ZPOOL_HIST_INT_EVENT, &ievent) != 0) 1321 goto next; 1322 verify(nvlist_lookup_uint64(events[i], 1323 ZPOOL_HIST_TXG, &txg) == 0); 1324 verify(nvlist_lookup_string(events[i], 1325 ZPOOL_HIST_INT_STR, &intstr) == 0); 1326 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) 1327 goto next; 1328 1329 (void) snprintf(internalstr, 1330 sizeof (internalstr), 1331 "[internal %s txg:%ju] %s", 1332 zfs_history_event_names[ievent], (uintmax_t)txg, 1333 intstr); 1334 cmd = internalstr; 1335 } 1336 tsec = time; 1337 (void) localtime_r(&tsec, &t); 1338 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 1339 (void) printf("%s %s\n", tbuf, cmd); 1340 printed = B_TRUE; 1341 1342 next: 1343 if (dump_opt['h'] > 1) { 1344 if (!printed) 1345 (void) printf("unrecognized record:\n"); 1346 dump_nvlist(events[i], 2); 1347 } 1348 } 1349 } 1350 1351 /*ARGSUSED*/ 1352 static void 1353 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) 1354 { 1355 } 1356 1357 static uint64_t 1358 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, 1359 const zbookmark_phys_t *zb) 1360 { 1361 if (dnp == NULL) { 1362 ASSERT(zb->zb_level < 0); 1363 if (zb->zb_object == 0) 1364 return (zb->zb_blkid); 1365 return (zb->zb_blkid * BP_GET_LSIZE(bp)); 1366 } 1367 1368 ASSERT(zb->zb_level >= 0); 1369 1370 return ((zb->zb_blkid << 1371 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * 1372 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1373 } 1374 1375 static void 1376 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp) 1377 { 1378 const dva_t *dva = bp->blk_dva; 1379 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; 1380 1381 if (dump_opt['b'] >= 6) { 1382 snprintf_blkptr(blkbuf, buflen, bp); 1383 return; 1384 } 1385 1386 if (BP_IS_EMBEDDED(bp)) { 1387 (void) sprintf(blkbuf, 1388 "EMBEDDED et=%u %llxL/%llxP B=%llu", 1389 (int)BPE_GET_ETYPE(bp), 1390 (u_longlong_t)BPE_GET_LSIZE(bp), 1391 (u_longlong_t)BPE_GET_PSIZE(bp), 1392 (u_longlong_t)bp->blk_birth); 1393 return; 1394 } 1395 1396 blkbuf[0] = '\0'; 1397 for (int i = 0; i < ndvas; i++) 1398 (void) snprintf(blkbuf + strlen(blkbuf), 1399 buflen - strlen(blkbuf), "%llu:%llx:%llx ", 1400 (u_longlong_t)DVA_GET_VDEV(&dva[i]), 1401 (u_longlong_t)DVA_GET_OFFSET(&dva[i]), 1402 (u_longlong_t)DVA_GET_ASIZE(&dva[i])); 1403 1404 if (BP_IS_HOLE(bp)) { 1405 (void) snprintf(blkbuf + strlen(blkbuf), 1406 buflen - strlen(blkbuf), 1407 "%llxL B=%llu", 1408 (u_longlong_t)BP_GET_LSIZE(bp), 1409 (u_longlong_t)bp->blk_birth); 1410 } else { 1411 (void) snprintf(blkbuf + strlen(blkbuf), 1412 buflen - strlen(blkbuf), 1413 "%llxL/%llxP F=%llu B=%llu/%llu", 1414 (u_longlong_t)BP_GET_LSIZE(bp), 1415 (u_longlong_t)BP_GET_PSIZE(bp), 1416 (u_longlong_t)BP_GET_FILL(bp), 1417 (u_longlong_t)bp->blk_birth, 1418 (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); 1419 } 1420 } 1421 1422 static void 1423 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb, 1424 const dnode_phys_t *dnp) 1425 { 1426 char blkbuf[BP_SPRINTF_LEN]; 1427 int l; 1428 1429 if (!BP_IS_EMBEDDED(bp)) { 1430 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); 1431 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); 1432 } 1433 1434 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); 1435 1436 ASSERT(zb->zb_level >= 0); 1437 1438 for (l = dnp->dn_nlevels - 1; l >= -1; l--) { 1439 if (l == zb->zb_level) { 1440 (void) printf("L%llx", (u_longlong_t)zb->zb_level); 1441 } else { 1442 (void) printf(" "); 1443 } 1444 } 1445 1446 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1447 (void) printf("%s\n", blkbuf); 1448 } 1449 1450 static int 1451 visit_indirect(spa_t *spa, const dnode_phys_t *dnp, 1452 blkptr_t *bp, const zbookmark_phys_t *zb) 1453 { 1454 int err = 0; 1455 1456 if (bp->blk_birth == 0) 1457 return (0); 1458 1459 print_indirect(bp, zb, dnp); 1460 1461 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) { 1462 arc_flags_t flags = ARC_FLAG_WAIT; 1463 int i; 1464 blkptr_t *cbp; 1465 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1466 arc_buf_t *buf; 1467 uint64_t fill = 0; 1468 1469 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1470 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 1471 if (err) 1472 return (err); 1473 ASSERT(buf->b_data); 1474 1475 /* recursively visit blocks below this */ 1476 cbp = buf->b_data; 1477 for (i = 0; i < epb; i++, cbp++) { 1478 zbookmark_phys_t czb; 1479 1480 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1481 zb->zb_level - 1, 1482 zb->zb_blkid * epb + i); 1483 err = visit_indirect(spa, dnp, cbp, &czb); 1484 if (err) 1485 break; 1486 fill += BP_GET_FILL(cbp); 1487 } 1488 if (!err) 1489 ASSERT3U(fill, ==, BP_GET_FILL(bp)); 1490 arc_buf_destroy(buf, &buf); 1491 } 1492 1493 return (err); 1494 } 1495 1496 /*ARGSUSED*/ 1497 static void 1498 dump_indirect(dnode_t *dn) 1499 { 1500 dnode_phys_t *dnp = dn->dn_phys; 1501 int j; 1502 zbookmark_phys_t czb; 1503 1504 (void) printf("Indirect blocks:\n"); 1505 1506 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), 1507 dn->dn_object, dnp->dn_nlevels - 1, 0); 1508 for (j = 0; j < dnp->dn_nblkptr; j++) { 1509 czb.zb_blkid = j; 1510 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, 1511 &dnp->dn_blkptr[j], &czb); 1512 } 1513 1514 (void) printf("\n"); 1515 } 1516 1517 /*ARGSUSED*/ 1518 static void 1519 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) 1520 { 1521 dsl_dir_phys_t *dd = data; 1522 time_t crtime; 1523 char nice[32]; 1524 1525 /* make sure nicenum has enough space */ 1526 CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ); 1527 1528 if (dd == NULL) 1529 return; 1530 1531 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); 1532 1533 crtime = dd->dd_creation_time; 1534 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1535 (void) printf("\t\thead_dataset_obj = %llu\n", 1536 (u_longlong_t)dd->dd_head_dataset_obj); 1537 (void) printf("\t\tparent_dir_obj = %llu\n", 1538 (u_longlong_t)dd->dd_parent_obj); 1539 (void) printf("\t\torigin_obj = %llu\n", 1540 (u_longlong_t)dd->dd_origin_obj); 1541 (void) printf("\t\tchild_dir_zapobj = %llu\n", 1542 (u_longlong_t)dd->dd_child_dir_zapobj); 1543 zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice)); 1544 (void) printf("\t\tused_bytes = %s\n", nice); 1545 zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice)); 1546 (void) printf("\t\tcompressed_bytes = %s\n", nice); 1547 zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice)); 1548 (void) printf("\t\tuncompressed_bytes = %s\n", nice); 1549 zdb_nicenum(dd->dd_quota, nice, sizeof (nice)); 1550 (void) printf("\t\tquota = %s\n", nice); 1551 zdb_nicenum(dd->dd_reserved, nice, sizeof (nice)); 1552 (void) printf("\t\treserved = %s\n", nice); 1553 (void) printf("\t\tprops_zapobj = %llu\n", 1554 (u_longlong_t)dd->dd_props_zapobj); 1555 (void) printf("\t\tdeleg_zapobj = %llu\n", 1556 (u_longlong_t)dd->dd_deleg_zapobj); 1557 (void) printf("\t\tflags = %llx\n", 1558 (u_longlong_t)dd->dd_flags); 1559 1560 #define DO(which) \ 1561 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \ 1562 sizeof (nice)); \ 1563 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) 1564 DO(HEAD); 1565 DO(SNAP); 1566 DO(CHILD); 1567 DO(CHILD_RSRV); 1568 DO(REFRSRV); 1569 #undef DO 1570 } 1571 1572 /*ARGSUSED*/ 1573 static void 1574 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) 1575 { 1576 dsl_dataset_phys_t *ds = data; 1577 time_t crtime; 1578 char used[32], compressed[32], uncompressed[32], unique[32]; 1579 char blkbuf[BP_SPRINTF_LEN]; 1580 1581 /* make sure nicenum has enough space */ 1582 CTASSERT(sizeof (used) >= NN_NUMBUF_SZ); 1583 CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ); 1584 CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ); 1585 CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ); 1586 1587 if (ds == NULL) 1588 return; 1589 1590 ASSERT(size == sizeof (*ds)); 1591 crtime = ds->ds_creation_time; 1592 zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used)); 1593 zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed)); 1594 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed, 1595 sizeof (uncompressed)); 1596 zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique)); 1597 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp); 1598 1599 (void) printf("\t\tdir_obj = %llu\n", 1600 (u_longlong_t)ds->ds_dir_obj); 1601 (void) printf("\t\tprev_snap_obj = %llu\n", 1602 (u_longlong_t)ds->ds_prev_snap_obj); 1603 (void) printf("\t\tprev_snap_txg = %llu\n", 1604 (u_longlong_t)ds->ds_prev_snap_txg); 1605 (void) printf("\t\tnext_snap_obj = %llu\n", 1606 (u_longlong_t)ds->ds_next_snap_obj); 1607 (void) printf("\t\tsnapnames_zapobj = %llu\n", 1608 (u_longlong_t)ds->ds_snapnames_zapobj); 1609 (void) printf("\t\tnum_children = %llu\n", 1610 (u_longlong_t)ds->ds_num_children); 1611 (void) printf("\t\tuserrefs_obj = %llu\n", 1612 (u_longlong_t)ds->ds_userrefs_obj); 1613 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1614 (void) printf("\t\tcreation_txg = %llu\n", 1615 (u_longlong_t)ds->ds_creation_txg); 1616 (void) printf("\t\tdeadlist_obj = %llu\n", 1617 (u_longlong_t)ds->ds_deadlist_obj); 1618 (void) printf("\t\tused_bytes = %s\n", used); 1619 (void) printf("\t\tcompressed_bytes = %s\n", compressed); 1620 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); 1621 (void) printf("\t\tunique = %s\n", unique); 1622 (void) printf("\t\tfsid_guid = %llu\n", 1623 (u_longlong_t)ds->ds_fsid_guid); 1624 (void) printf("\t\tguid = %llu\n", 1625 (u_longlong_t)ds->ds_guid); 1626 (void) printf("\t\tflags = %llx\n", 1627 (u_longlong_t)ds->ds_flags); 1628 (void) printf("\t\tnext_clones_obj = %llu\n", 1629 (u_longlong_t)ds->ds_next_clones_obj); 1630 (void) printf("\t\tprops_obj = %llu\n", 1631 (u_longlong_t)ds->ds_props_obj); 1632 (void) printf("\t\tbp = %s\n", blkbuf); 1633 } 1634 1635 /* ARGSUSED */ 1636 static int 1637 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1638 { 1639 char blkbuf[BP_SPRINTF_LEN]; 1640 1641 if (bp->blk_birth != 0) { 1642 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 1643 (void) printf("\t%s\n", blkbuf); 1644 } 1645 return (0); 1646 } 1647 1648 static void 1649 dump_bptree(objset_t *os, uint64_t obj, const char *name) 1650 { 1651 char bytes[32]; 1652 bptree_phys_t *bt; 1653 dmu_buf_t *db; 1654 1655 /* make sure nicenum has enough space */ 1656 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1657 1658 if (dump_opt['d'] < 3) 1659 return; 1660 1661 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db)); 1662 bt = db->db_data; 1663 zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes)); 1664 (void) printf("\n %s: %llu datasets, %s\n", 1665 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes); 1666 dmu_buf_rele(db, FTAG); 1667 1668 if (dump_opt['d'] < 5) 1669 return; 1670 1671 (void) printf("\n"); 1672 1673 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL); 1674 } 1675 1676 /* ARGSUSED */ 1677 static int 1678 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1679 { 1680 char blkbuf[BP_SPRINTF_LEN]; 1681 1682 ASSERT(bp->blk_birth != 0); 1683 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1684 (void) printf("\t%s\n", blkbuf); 1685 return (0); 1686 } 1687 1688 static void 1689 dump_full_bpobj(bpobj_t *bpo, const char *name, int indent) 1690 { 1691 char bytes[32]; 1692 char comp[32]; 1693 char uncomp[32]; 1694 1695 /* make sure nicenum has enough space */ 1696 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1697 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1698 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1699 1700 if (dump_opt['d'] < 3) 1701 return; 1702 1703 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes)); 1704 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1705 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp)); 1706 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp)); 1707 (void) printf(" %*s: object %llu, %llu local blkptrs, " 1708 "%llu subobjs in object %llu, %s (%s/%s comp)\n", 1709 indent * 8, name, 1710 (u_longlong_t)bpo->bpo_object, 1711 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1712 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, 1713 (u_longlong_t)bpo->bpo_phys->bpo_subobjs, 1714 bytes, comp, uncomp); 1715 1716 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1717 uint64_t subobj; 1718 bpobj_t subbpo; 1719 int error; 1720 VERIFY0(dmu_read(bpo->bpo_os, 1721 bpo->bpo_phys->bpo_subobjs, 1722 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1723 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1724 if (error != 0) { 1725 (void) printf("ERROR %u while trying to open " 1726 "subobj id %llu\n", 1727 error, (u_longlong_t)subobj); 1728 continue; 1729 } 1730 dump_full_bpobj(&subbpo, "subobj", indent + 1); 1731 bpobj_close(&subbpo); 1732 } 1733 } else { 1734 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n", 1735 indent * 8, name, 1736 (u_longlong_t)bpo->bpo_object, 1737 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1738 bytes); 1739 } 1740 1741 if (dump_opt['d'] < 5) 1742 return; 1743 1744 1745 if (indent == 0) { 1746 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); 1747 (void) printf("\n"); 1748 } 1749 } 1750 1751 static void 1752 dump_deadlist(dsl_deadlist_t *dl) 1753 { 1754 dsl_deadlist_entry_t *dle; 1755 uint64_t unused; 1756 char bytes[32]; 1757 char comp[32]; 1758 char uncomp[32]; 1759 1760 /* make sure nicenum has enough space */ 1761 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1762 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1763 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1764 1765 if (dump_opt['d'] < 3) 1766 return; 1767 1768 if (dl->dl_oldfmt) { 1769 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0); 1770 return; 1771 } 1772 1773 zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes)); 1774 zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp)); 1775 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp)); 1776 (void) printf("\n Deadlist: %s (%s/%s comp)\n", 1777 bytes, comp, uncomp); 1778 1779 if (dump_opt['d'] < 4) 1780 return; 1781 1782 (void) printf("\n"); 1783 1784 /* force the tree to be loaded */ 1785 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused); 1786 1787 for (dle = avl_first(&dl->dl_tree); dle; 1788 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1789 if (dump_opt['d'] >= 5) { 1790 char buf[128]; 1791 (void) snprintf(buf, sizeof (buf), 1792 "mintxg %llu -> obj %llu", 1793 (longlong_t)dle->dle_mintxg, 1794 (longlong_t)dle->dle_bpobj.bpo_object); 1795 1796 dump_full_bpobj(&dle->dle_bpobj, buf, 0); 1797 } else { 1798 (void) printf("mintxg %llu -> obj %llu\n", 1799 (longlong_t)dle->dle_mintxg, 1800 (longlong_t)dle->dle_bpobj.bpo_object); 1801 1802 } 1803 } 1804 } 1805 1806 static avl_tree_t idx_tree; 1807 static avl_tree_t domain_tree; 1808 static boolean_t fuid_table_loaded; 1809 static objset_t *sa_os = NULL; 1810 static sa_attr_type_t *sa_attr_table = NULL; 1811 1812 static int 1813 open_objset(const char *path, dmu_objset_type_t type, void *tag, objset_t **osp) 1814 { 1815 int err; 1816 uint64_t sa_attrs = 0; 1817 uint64_t version = 0; 1818 1819 VERIFY3P(sa_os, ==, NULL); 1820 err = dmu_objset_own(path, type, B_TRUE, tag, osp); 1821 if (err != 0) { 1822 (void) fprintf(stderr, "failed to own dataset '%s': %s\n", path, 1823 strerror(err)); 1824 return (err); 1825 } 1826 1827 if (dmu_objset_type(*osp) == DMU_OST_ZFS) { 1828 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR, 1829 8, 1, &version); 1830 if (version >= ZPL_VERSION_SA) { 1831 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 1832 8, 1, &sa_attrs); 1833 } 1834 err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END, 1835 &sa_attr_table); 1836 if (err != 0) { 1837 (void) fprintf(stderr, "sa_setup failed: %s\n", 1838 strerror(err)); 1839 dmu_objset_disown(*osp, tag); 1840 *osp = NULL; 1841 } 1842 } 1843 sa_os = *osp; 1844 1845 return (0); 1846 } 1847 1848 static void 1849 close_objset(objset_t *os, void *tag) 1850 { 1851 VERIFY3P(os, ==, sa_os); 1852 if (os->os_sa != NULL) 1853 sa_tear_down(os); 1854 dmu_objset_disown(os, tag); 1855 sa_attr_table = NULL; 1856 sa_os = NULL; 1857 } 1858 1859 static void 1860 fuid_table_destroy() 1861 { 1862 if (fuid_table_loaded) { 1863 zfs_fuid_table_destroy(&idx_tree, &domain_tree); 1864 fuid_table_loaded = B_FALSE; 1865 } 1866 } 1867 1868 /* 1869 * print uid or gid information. 1870 * For normal POSIX id just the id is printed in decimal format. 1871 * For CIFS files with FUID the fuid is printed in hex followed by 1872 * the domain-rid string. 1873 */ 1874 static void 1875 print_idstr(uint64_t id, const char *id_type) 1876 { 1877 if (FUID_INDEX(id)) { 1878 char *domain; 1879 1880 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); 1881 (void) printf("\t%s %llx [%s-%d]\n", id_type, 1882 (u_longlong_t)id, domain, (int)FUID_RID(id)); 1883 } else { 1884 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); 1885 } 1886 1887 } 1888 1889 static void 1890 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) 1891 { 1892 uint32_t uid_idx, gid_idx; 1893 1894 uid_idx = FUID_INDEX(uid); 1895 gid_idx = FUID_INDEX(gid); 1896 1897 /* Load domain table, if not already loaded */ 1898 if (!fuid_table_loaded && (uid_idx || gid_idx)) { 1899 uint64_t fuid_obj; 1900 1901 /* first find the fuid object. It lives in the master node */ 1902 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 1903 8, 1, &fuid_obj) == 0); 1904 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); 1905 (void) zfs_fuid_table_load(os, fuid_obj, 1906 &idx_tree, &domain_tree); 1907 fuid_table_loaded = B_TRUE; 1908 } 1909 1910 print_idstr(uid, "uid"); 1911 print_idstr(gid, "gid"); 1912 } 1913 1914 /*ARGSUSED*/ 1915 static void 1916 dump_znode(objset_t *os, uint64_t object, void *data, size_t size) 1917 { 1918 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ 1919 sa_handle_t *hdl; 1920 uint64_t xattr, rdev, gen; 1921 uint64_t uid, gid, mode, fsize, parent, links; 1922 uint64_t pflags; 1923 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; 1924 time_t z_crtime, z_atime, z_mtime, z_ctime; 1925 sa_bulk_attr_t bulk[12]; 1926 int idx = 0; 1927 int error; 1928 1929 VERIFY3P(os, ==, sa_os); 1930 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { 1931 (void) printf("Failed to get handle for SA znode\n"); 1932 return; 1933 } 1934 1935 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); 1936 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); 1937 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, 1938 &links, 8); 1939 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); 1940 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, 1941 &mode, 8); 1942 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], 1943 NULL, &parent, 8); 1944 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, 1945 &fsize, 8); 1946 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, 1947 acctm, 16); 1948 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, 1949 modtm, 16); 1950 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, 1951 crtm, 16); 1952 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, 1953 chgtm, 16); 1954 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, 1955 &pflags, 8); 1956 1957 if (sa_bulk_lookup(hdl, bulk, idx)) { 1958 (void) sa_handle_destroy(hdl); 1959 return; 1960 } 1961 1962 z_crtime = (time_t)crtm[0]; 1963 z_atime = (time_t)acctm[0]; 1964 z_mtime = (time_t)modtm[0]; 1965 z_ctime = (time_t)chgtm[0]; 1966 1967 if (dump_opt['d'] > 4) { 1968 error = zfs_obj_to_path(os, object, path, sizeof (path)); 1969 if (error == ESTALE) { 1970 (void) snprintf(path, sizeof (path), "on delete queue"); 1971 } else if (error != 0) { 1972 leaked_objects++; 1973 (void) snprintf(path, sizeof (path), 1974 "path not found, possibly leaked"); 1975 } 1976 (void) printf("\tpath %s\n", path); 1977 } 1978 dump_uidgid(os, uid, gid); 1979 (void) printf("\tatime %s", ctime(&z_atime)); 1980 (void) printf("\tmtime %s", ctime(&z_mtime)); 1981 (void) printf("\tctime %s", ctime(&z_ctime)); 1982 (void) printf("\tcrtime %s", ctime(&z_crtime)); 1983 (void) printf("\tgen %llu\n", (u_longlong_t)gen); 1984 (void) printf("\tmode %llo\n", (u_longlong_t)mode); 1985 (void) printf("\tsize %llu\n", (u_longlong_t)fsize); 1986 (void) printf("\tparent %llu\n", (u_longlong_t)parent); 1987 (void) printf("\tlinks %llu\n", (u_longlong_t)links); 1988 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); 1989 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, 1990 sizeof (uint64_t)) == 0) 1991 (void) printf("\txattr %llu\n", (u_longlong_t)xattr); 1992 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, 1993 sizeof (uint64_t)) == 0) 1994 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); 1995 sa_handle_destroy(hdl); 1996 } 1997 1998 /*ARGSUSED*/ 1999 static void 2000 dump_acl(objset_t *os, uint64_t object, void *data, size_t size) 2001 { 2002 } 2003 2004 /*ARGSUSED*/ 2005 static void 2006 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) 2007 { 2008 } 2009 2010 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { 2011 dump_none, /* unallocated */ 2012 dump_zap, /* object directory */ 2013 dump_uint64, /* object array */ 2014 dump_none, /* packed nvlist */ 2015 dump_packed_nvlist, /* packed nvlist size */ 2016 dump_none, /* bpobj */ 2017 dump_bpobj, /* bpobj header */ 2018 dump_none, /* SPA space map header */ 2019 dump_none, /* SPA space map */ 2020 dump_none, /* ZIL intent log */ 2021 dump_dnode, /* DMU dnode */ 2022 dump_dmu_objset, /* DMU objset */ 2023 dump_dsl_dir, /* DSL directory */ 2024 dump_zap, /* DSL directory child map */ 2025 dump_zap, /* DSL dataset snap map */ 2026 dump_zap, /* DSL props */ 2027 dump_dsl_dataset, /* DSL dataset */ 2028 dump_znode, /* ZFS znode */ 2029 dump_acl, /* ZFS V0 ACL */ 2030 dump_uint8, /* ZFS plain file */ 2031 dump_zpldir, /* ZFS directory */ 2032 dump_zap, /* ZFS master node */ 2033 dump_zap, /* ZFS delete queue */ 2034 dump_uint8, /* zvol object */ 2035 dump_zap, /* zvol prop */ 2036 dump_uint8, /* other uint8[] */ 2037 dump_uint64, /* other uint64[] */ 2038 dump_zap, /* other ZAP */ 2039 dump_zap, /* persistent error log */ 2040 dump_uint8, /* SPA history */ 2041 dump_history_offsets, /* SPA history offsets */ 2042 dump_zap, /* Pool properties */ 2043 dump_zap, /* DSL permissions */ 2044 dump_acl, /* ZFS ACL */ 2045 dump_uint8, /* ZFS SYSACL */ 2046 dump_none, /* FUID nvlist */ 2047 dump_packed_nvlist, /* FUID nvlist size */ 2048 dump_zap, /* DSL dataset next clones */ 2049 dump_zap, /* DSL scrub queue */ 2050 dump_zap, /* ZFS user/group used */ 2051 dump_zap, /* ZFS user/group quota */ 2052 dump_zap, /* snapshot refcount tags */ 2053 dump_ddt_zap, /* DDT ZAP object */ 2054 dump_zap, /* DDT statistics */ 2055 dump_znode, /* SA object */ 2056 dump_zap, /* SA Master Node */ 2057 dump_sa_attrs, /* SA attribute registration */ 2058 dump_sa_layouts, /* SA attribute layouts */ 2059 dump_zap, /* DSL scrub translations */ 2060 dump_none, /* fake dedup BP */ 2061 dump_zap, /* deadlist */ 2062 dump_none, /* deadlist hdr */ 2063 dump_zap, /* dsl clones */ 2064 dump_bpobj_subobjs, /* bpobj subobjs */ 2065 dump_unknown, /* Unknown type, must be last */ 2066 }; 2067 2068 static void 2069 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) 2070 { 2071 dmu_buf_t *db = NULL; 2072 dmu_object_info_t doi; 2073 dnode_t *dn; 2074 void *bonus = NULL; 2075 size_t bsize = 0; 2076 char iblk[32], dblk[32], lsize[32], asize[32], fill[32]; 2077 char bonus_size[32]; 2078 char aux[50]; 2079 int error; 2080 2081 /* make sure nicenum has enough space */ 2082 CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ); 2083 CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ); 2084 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 2085 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 2086 CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ); 2087 2088 if (*print_header) { 2089 (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n", 2090 "Object", "lvl", "iblk", "dblk", "dsize", "lsize", 2091 "%full", "type"); 2092 *print_header = 0; 2093 } 2094 2095 if (object == 0) { 2096 dn = DMU_META_DNODE(os); 2097 } else { 2098 error = dmu_bonus_hold(os, object, FTAG, &db); 2099 if (error) 2100 fatal("dmu_bonus_hold(%llu) failed, errno %u", 2101 object, error); 2102 bonus = db->db_data; 2103 bsize = db->db_size; 2104 dn = DB_DNODE((dmu_buf_impl_t *)db); 2105 } 2106 dmu_object_info_from_dnode(dn, &doi); 2107 2108 zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk)); 2109 zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk)); 2110 zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize)); 2111 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize)); 2112 zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size)); 2113 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * 2114 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / 2115 doi.doi_max_offset); 2116 2117 aux[0] = '\0'; 2118 2119 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { 2120 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", 2121 ZDB_CHECKSUM_NAME(doi.doi_checksum)); 2122 } 2123 2124 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { 2125 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", 2126 ZDB_COMPRESS_NAME(doi.doi_compress)); 2127 } 2128 2129 (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n", 2130 (u_longlong_t)object, doi.doi_indirection, iblk, dblk, 2131 asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); 2132 2133 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { 2134 (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n", 2135 "", "", "", "", "", bonus_size, "bonus", 2136 ZDB_OT_NAME(doi.doi_bonus_type)); 2137 } 2138 2139 if (verbosity >= 4) { 2140 (void) printf("\tdnode flags: %s%s%s\n", 2141 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? 2142 "USED_BYTES " : "", 2143 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? 2144 "USERUSED_ACCOUNTED " : "", 2145 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? 2146 "SPILL_BLKPTR" : ""); 2147 (void) printf("\tdnode maxblkid: %llu\n", 2148 (longlong_t)dn->dn_phys->dn_maxblkid); 2149 2150 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object, 2151 bonus, bsize); 2152 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0); 2153 *print_header = 1; 2154 } 2155 2156 if (verbosity >= 5) 2157 dump_indirect(dn); 2158 2159 if (verbosity >= 5) { 2160 /* 2161 * Report the list of segments that comprise the object. 2162 */ 2163 uint64_t start = 0; 2164 uint64_t end; 2165 uint64_t blkfill = 1; 2166 int minlvl = 1; 2167 2168 if (dn->dn_type == DMU_OT_DNODE) { 2169 minlvl = 0; 2170 blkfill = DNODES_PER_BLOCK; 2171 } 2172 2173 for (;;) { 2174 char segsize[32]; 2175 /* make sure nicenum has enough space */ 2176 CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ); 2177 error = dnode_next_offset(dn, 2178 0, &start, minlvl, blkfill, 0); 2179 if (error) 2180 break; 2181 end = start; 2182 error = dnode_next_offset(dn, 2183 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); 2184 zdb_nicenum(end - start, segsize, sizeof (segsize)); 2185 (void) printf("\t\tsegment [%016llx, %016llx)" 2186 " size %5s\n", (u_longlong_t)start, 2187 (u_longlong_t)end, segsize); 2188 if (error) 2189 break; 2190 start = end; 2191 } 2192 } 2193 2194 if (db != NULL) 2195 dmu_buf_rele(db, FTAG); 2196 } 2197 2198 static const char *objset_types[DMU_OST_NUMTYPES] = { 2199 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; 2200 2201 static void 2202 dump_dir(objset_t *os) 2203 { 2204 dmu_objset_stats_t dds; 2205 uint64_t object, object_count; 2206 uint64_t refdbytes, usedobjs, scratch; 2207 char numbuf[32]; 2208 char blkbuf[BP_SPRINTF_LEN + 20]; 2209 char osname[ZFS_MAX_DATASET_NAME_LEN]; 2210 const char *type = "UNKNOWN"; 2211 int verbosity = dump_opt['d']; 2212 int print_header = 1; 2213 unsigned i; 2214 int error; 2215 2216 /* make sure nicenum has enough space */ 2217 CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ); 2218 2219 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 2220 dmu_objset_fast_stat(os, &dds); 2221 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 2222 2223 if (dds.dds_type < DMU_OST_NUMTYPES) 2224 type = objset_types[dds.dds_type]; 2225 2226 if (dds.dds_type == DMU_OST_META) { 2227 dds.dds_creation_txg = TXG_INITIAL; 2228 usedobjs = BP_GET_FILL(os->os_rootbp); 2229 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)-> 2230 dd_used_bytes; 2231 } else { 2232 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); 2233 } 2234 2235 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp)); 2236 2237 zdb_nicenum(refdbytes, numbuf, sizeof (numbuf)); 2238 2239 if (verbosity >= 4) { 2240 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp "); 2241 (void) snprintf_blkptr(blkbuf + strlen(blkbuf), 2242 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp); 2243 } else { 2244 blkbuf[0] = '\0'; 2245 } 2246 2247 dmu_objset_name(os, osname); 2248 2249 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " 2250 "%s, %llu objects%s\n", 2251 osname, type, (u_longlong_t)dmu_objset_id(os), 2252 (u_longlong_t)dds.dds_creation_txg, 2253 numbuf, (u_longlong_t)usedobjs, blkbuf); 2254 2255 if (zopt_objects != 0) { 2256 for (i = 0; i < zopt_objects; i++) 2257 dump_object(os, zopt_object[i], verbosity, 2258 &print_header); 2259 (void) printf("\n"); 2260 return; 2261 } 2262 2263 if (dump_opt['i'] != 0 || verbosity >= 2) 2264 dump_intent_log(dmu_objset_zil(os)); 2265 2266 if (dmu_objset_ds(os) != NULL) { 2267 dsl_dataset_t *ds = dmu_objset_ds(os); 2268 dump_deadlist(&ds->ds_deadlist); 2269 2270 if (dsl_dataset_remap_deadlist_exists(ds)) { 2271 (void) printf("ds_remap_deadlist:\n"); 2272 dump_deadlist(&ds->ds_remap_deadlist); 2273 } 2274 } 2275 2276 if (verbosity < 2) 2277 return; 2278 2279 if (BP_IS_HOLE(os->os_rootbp)) 2280 return; 2281 2282 dump_object(os, 0, verbosity, &print_header); 2283 object_count = 0; 2284 if (DMU_USERUSED_DNODE(os) != NULL && 2285 DMU_USERUSED_DNODE(os)->dn_type != 0) { 2286 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header); 2287 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header); 2288 } 2289 2290 object = 0; 2291 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 2292 dump_object(os, object, verbosity, &print_header); 2293 object_count++; 2294 } 2295 2296 ASSERT3U(object_count, ==, usedobjs); 2297 2298 (void) printf("\n"); 2299 2300 if (error != ESRCH) { 2301 (void) fprintf(stderr, "dmu_object_next() = %d\n", error); 2302 abort(); 2303 } 2304 if (leaked_objects != 0) { 2305 (void) printf("%d potentially leaked objects detected\n", 2306 leaked_objects); 2307 leaked_objects = 0; 2308 } 2309 } 2310 2311 static void 2312 dump_uberblock(uberblock_t *ub, const char *header, const char *footer) 2313 { 2314 time_t timestamp = ub->ub_timestamp; 2315 2316 (void) printf("%s", header ? header : ""); 2317 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); 2318 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); 2319 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); 2320 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); 2321 (void) printf("\ttimestamp = %llu UTC = %s", 2322 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); 2323 if (dump_opt['u'] >= 3) { 2324 char blkbuf[BP_SPRINTF_LEN]; 2325 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp); 2326 (void) printf("\trootbp = %s\n", blkbuf); 2327 } 2328 (void) printf("\tcheckpoint_txg = %llu\n", 2329 (u_longlong_t)ub->ub_checkpoint_txg); 2330 (void) printf("%s", footer ? footer : ""); 2331 } 2332 2333 static void 2334 dump_config(spa_t *spa) 2335 { 2336 dmu_buf_t *db; 2337 size_t nvsize = 0; 2338 int error = 0; 2339 2340 2341 error = dmu_bonus_hold(spa->spa_meta_objset, 2342 spa->spa_config_object, FTAG, &db); 2343 2344 if (error == 0) { 2345 nvsize = *(uint64_t *)db->db_data; 2346 dmu_buf_rele(db, FTAG); 2347 2348 (void) printf("\nMOS Configuration:\n"); 2349 dump_packed_nvlist(spa->spa_meta_objset, 2350 spa->spa_config_object, (void *)&nvsize, 1); 2351 } else { 2352 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", 2353 (u_longlong_t)spa->spa_config_object, error); 2354 } 2355 } 2356 2357 static void 2358 dump_cachefile(const char *cachefile) 2359 { 2360 int fd; 2361 struct stat64 statbuf; 2362 char *buf; 2363 nvlist_t *config; 2364 2365 if ((fd = open64(cachefile, O_RDONLY)) < 0) { 2366 (void) printf("cannot open '%s': %s\n", cachefile, 2367 strerror(errno)); 2368 exit(1); 2369 } 2370 2371 if (fstat64(fd, &statbuf) != 0) { 2372 (void) printf("failed to stat '%s': %s\n", cachefile, 2373 strerror(errno)); 2374 exit(1); 2375 } 2376 2377 if ((buf = malloc(statbuf.st_size)) == NULL) { 2378 (void) fprintf(stderr, "failed to allocate %llu bytes\n", 2379 (u_longlong_t)statbuf.st_size); 2380 exit(1); 2381 } 2382 2383 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 2384 (void) fprintf(stderr, "failed to read %llu bytes\n", 2385 (u_longlong_t)statbuf.st_size); 2386 exit(1); 2387 } 2388 2389 (void) close(fd); 2390 2391 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { 2392 (void) fprintf(stderr, "failed to unpack nvlist\n"); 2393 exit(1); 2394 } 2395 2396 free(buf); 2397 2398 dump_nvlist(config, 0); 2399 2400 nvlist_free(config); 2401 } 2402 2403 #define ZDB_MAX_UB_HEADER_SIZE 32 2404 2405 static void 2406 dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift) 2407 { 2408 vdev_t vd; 2409 vdev_t *vdp = &vd; 2410 char header[ZDB_MAX_UB_HEADER_SIZE]; 2411 2412 vd.vdev_ashift = ashift; 2413 vdp->vdev_top = vdp; 2414 2415 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) { 2416 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i); 2417 uberblock_t *ub = (void *)((char *)lbl + uoff); 2418 2419 if (uberblock_verify(ub)) 2420 continue; 2421 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, 2422 "Uberblock[%d]\n", i); 2423 dump_uberblock(ub, header, ""); 2424 } 2425 } 2426 2427 static char curpath[PATH_MAX]; 2428 2429 /* 2430 * Iterate through the path components, recursively passing 2431 * current one's obj and remaining path until we find the obj 2432 * for the last one. 2433 */ 2434 static int 2435 dump_path_impl(objset_t *os, uint64_t obj, char *name) 2436 { 2437 int err; 2438 int header = 1; 2439 uint64_t child_obj; 2440 char *s; 2441 dmu_buf_t *db; 2442 dmu_object_info_t doi; 2443 2444 if ((s = strchr(name, '/')) != NULL) 2445 *s = '\0'; 2446 err = zap_lookup(os, obj, name, 8, 1, &child_obj); 2447 2448 (void) strlcat(curpath, name, sizeof (curpath)); 2449 2450 if (err != 0) { 2451 (void) fprintf(stderr, "failed to lookup %s: %s\n", 2452 curpath, strerror(err)); 2453 return (err); 2454 } 2455 2456 child_obj = ZFS_DIRENT_OBJ(child_obj); 2457 err = sa_buf_hold(os, child_obj, FTAG, &db); 2458 if (err != 0) { 2459 (void) fprintf(stderr, 2460 "failed to get SA dbuf for obj %llu: %s\n", 2461 (u_longlong_t)child_obj, strerror(err)); 2462 return (EINVAL); 2463 } 2464 dmu_object_info_from_db(db, &doi); 2465 sa_buf_rele(db, FTAG); 2466 2467 if (doi.doi_bonus_type != DMU_OT_SA && 2468 doi.doi_bonus_type != DMU_OT_ZNODE) { 2469 (void) fprintf(stderr, "invalid bonus type %d for obj %llu\n", 2470 doi.doi_bonus_type, (u_longlong_t)child_obj); 2471 return (EINVAL); 2472 } 2473 2474 if (dump_opt['v'] > 6) { 2475 (void) printf("obj=%llu %s type=%d bonustype=%d\n", 2476 (u_longlong_t)child_obj, curpath, doi.doi_type, 2477 doi.doi_bonus_type); 2478 } 2479 2480 (void) strlcat(curpath, "/", sizeof (curpath)); 2481 2482 switch (doi.doi_type) { 2483 case DMU_OT_DIRECTORY_CONTENTS: 2484 if (s != NULL && *(s + 1) != '\0') 2485 return (dump_path_impl(os, child_obj, s + 1)); 2486 /*FALLTHROUGH*/ 2487 case DMU_OT_PLAIN_FILE_CONTENTS: 2488 dump_object(os, child_obj, dump_opt['v'], &header); 2489 return (0); 2490 default: 2491 (void) fprintf(stderr, "object %llu has non-file/directory " 2492 "type %d\n", (u_longlong_t)obj, doi.doi_type); 2493 break; 2494 } 2495 2496 return (EINVAL); 2497 } 2498 2499 /* 2500 * Dump the blocks for the object specified by path inside the dataset. 2501 */ 2502 static int 2503 dump_path(char *ds, char *path) 2504 { 2505 int err; 2506 objset_t *os; 2507 uint64_t root_obj; 2508 2509 err = open_objset(ds, DMU_OST_ZFS, FTAG, &os); 2510 if (err != 0) 2511 return (err); 2512 2513 err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj); 2514 if (err != 0) { 2515 (void) fprintf(stderr, "can't lookup root znode: %s\n", 2516 strerror(err)); 2517 dmu_objset_disown(os, FTAG); 2518 return (EINVAL); 2519 } 2520 2521 (void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds); 2522 2523 err = dump_path_impl(os, root_obj, path); 2524 2525 close_objset(os, FTAG); 2526 return (err); 2527 } 2528 2529 static int 2530 dump_label(const char *dev) 2531 { 2532 int fd; 2533 vdev_label_t label; 2534 char path[MAXPATHLEN]; 2535 char *buf = label.vl_vdev_phys.vp_nvlist; 2536 size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); 2537 struct stat64 statbuf; 2538 uint64_t psize, ashift; 2539 boolean_t label_found = B_FALSE; 2540 2541 (void) strlcpy(path, dev, sizeof (path)); 2542 if (dev[0] == '/') { 2543 if (strncmp(dev, ZFS_DISK_ROOTD, 2544 strlen(ZFS_DISK_ROOTD)) == 0) { 2545 (void) snprintf(path, sizeof (path), "%s%s", 2546 ZFS_RDISK_ROOTD, dev + strlen(ZFS_DISK_ROOTD)); 2547 } 2548 } else if (stat64(path, &statbuf) != 0) { 2549 char *s; 2550 2551 (void) snprintf(path, sizeof (path), "%s%s", ZFS_RDISK_ROOTD, 2552 dev); 2553 if (((s = strrchr(dev, 's')) == NULL && 2554 (s = strchr(dev, 'p')) == NULL) || 2555 !isdigit(*(s + 1))) 2556 (void) strlcat(path, "s0", sizeof (path)); 2557 } 2558 2559 if ((fd = open64(path, O_RDONLY)) < 0) { 2560 (void) fprintf(stderr, "cannot open '%s': %s\n", path, 2561 strerror(errno)); 2562 exit(1); 2563 } 2564 2565 if (fstat64(fd, &statbuf) != 0) { 2566 (void) fprintf(stderr, "failed to stat '%s': %s\n", path, 2567 strerror(errno)); 2568 (void) close(fd); 2569 exit(1); 2570 } 2571 2572 if (S_ISBLK(statbuf.st_mode)) { 2573 (void) fprintf(stderr, 2574 "cannot use '%s': character device required\n", path); 2575 (void) close(fd); 2576 exit(1); 2577 } 2578 2579 psize = statbuf.st_size; 2580 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); 2581 2582 for (int l = 0; l < VDEV_LABELS; l++) { 2583 nvlist_t *config = NULL; 2584 2585 if (!dump_opt['q']) { 2586 (void) printf("------------------------------------\n"); 2587 (void) printf("LABEL %d\n", l); 2588 (void) printf("------------------------------------\n"); 2589 } 2590 2591 if (pread64(fd, &label, sizeof (label), 2592 vdev_label_offset(psize, l, 0)) != sizeof (label)) { 2593 if (!dump_opt['q']) 2594 (void) printf("failed to read label %d\n", l); 2595 continue; 2596 } 2597 2598 if (nvlist_unpack(buf, buflen, &config, 0) != 0) { 2599 if (!dump_opt['q']) 2600 (void) printf("failed to unpack label %d\n", l); 2601 ashift = SPA_MINBLOCKSHIFT; 2602 } else { 2603 nvlist_t *vdev_tree = NULL; 2604 2605 if (!dump_opt['q']) 2606 dump_nvlist(config, 4); 2607 if ((nvlist_lookup_nvlist(config, 2608 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || 2609 (nvlist_lookup_uint64(vdev_tree, 2610 ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) 2611 ashift = SPA_MINBLOCKSHIFT; 2612 nvlist_free(config); 2613 label_found = B_TRUE; 2614 } 2615 if (dump_opt['u']) 2616 dump_label_uberblocks(&label, ashift); 2617 } 2618 2619 (void) close(fd); 2620 2621 return (label_found ? 0 : 2); 2622 } 2623 2624 static uint64_t dataset_feature_count[SPA_FEATURES]; 2625 static uint64_t remap_deadlist_count = 0; 2626 2627 /*ARGSUSED*/ 2628 static int 2629 dump_one_dir(const char *dsname, void *arg) 2630 { 2631 int error; 2632 objset_t *os; 2633 2634 error = open_objset(dsname, DMU_OST_ANY, FTAG, &os); 2635 if (error != 0) 2636 return (0); 2637 2638 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 2639 if (!dmu_objset_ds(os)->ds_feature_inuse[f]) 2640 continue; 2641 ASSERT(spa_feature_table[f].fi_flags & 2642 ZFEATURE_FLAG_PER_DATASET); 2643 dataset_feature_count[f]++; 2644 } 2645 2646 if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) { 2647 remap_deadlist_count++; 2648 } 2649 2650 dump_dir(os); 2651 close_objset(os, FTAG); 2652 fuid_table_destroy(); 2653 return (0); 2654 } 2655 2656 /* 2657 * Block statistics. 2658 */ 2659 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2) 2660 typedef struct zdb_blkstats { 2661 uint64_t zb_asize; 2662 uint64_t zb_lsize; 2663 uint64_t zb_psize; 2664 uint64_t zb_count; 2665 uint64_t zb_gangs; 2666 uint64_t zb_ditto_samevdev; 2667 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE]; 2668 } zdb_blkstats_t; 2669 2670 /* 2671 * Extended object types to report deferred frees and dedup auto-ditto blocks. 2672 */ 2673 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) 2674 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) 2675 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2) 2676 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3) 2677 2678 static const char *zdb_ot_extname[] = { 2679 "deferred free", 2680 "dedup ditto", 2681 "other", 2682 "Total", 2683 }; 2684 2685 #define ZB_TOTAL DN_MAX_LEVELS 2686 2687 typedef struct zdb_cb { 2688 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; 2689 uint64_t zcb_removing_size; 2690 uint64_t zcb_checkpoint_size; 2691 uint64_t zcb_dedup_asize; 2692 uint64_t zcb_dedup_blocks; 2693 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES]; 2694 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES] 2695 [BPE_PAYLOAD_SIZE]; 2696 uint64_t zcb_start; 2697 hrtime_t zcb_lastprint; 2698 uint64_t zcb_totalasize; 2699 uint64_t zcb_errors[256]; 2700 int zcb_readfails; 2701 int zcb_haderrors; 2702 spa_t *zcb_spa; 2703 uint32_t **zcb_vd_obsolete_counts; 2704 } zdb_cb_t; 2705 2706 static void 2707 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, 2708 dmu_object_type_t type) 2709 { 2710 uint64_t refcnt = 0; 2711 2712 ASSERT(type < ZDB_OT_TOTAL); 2713 2714 if (zilog && zil_bp_tree_add(zilog, bp) != 0) 2715 return; 2716 2717 for (int i = 0; i < 4; i++) { 2718 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; 2719 int t = (i & 1) ? type : ZDB_OT_TOTAL; 2720 int equal; 2721 zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; 2722 2723 zb->zb_asize += BP_GET_ASIZE(bp); 2724 zb->zb_lsize += BP_GET_LSIZE(bp); 2725 zb->zb_psize += BP_GET_PSIZE(bp); 2726 zb->zb_count++; 2727 2728 /* 2729 * The histogram is only big enough to record blocks up to 2730 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last, 2731 * "other", bucket. 2732 */ 2733 unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT; 2734 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1); 2735 zb->zb_psize_histogram[idx]++; 2736 2737 zb->zb_gangs += BP_COUNT_GANG(bp); 2738 2739 switch (BP_GET_NDVAS(bp)) { 2740 case 2: 2741 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 2742 DVA_GET_VDEV(&bp->blk_dva[1])) 2743 zb->zb_ditto_samevdev++; 2744 break; 2745 case 3: 2746 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 2747 DVA_GET_VDEV(&bp->blk_dva[1])) + 2748 (DVA_GET_VDEV(&bp->blk_dva[0]) == 2749 DVA_GET_VDEV(&bp->blk_dva[2])) + 2750 (DVA_GET_VDEV(&bp->blk_dva[1]) == 2751 DVA_GET_VDEV(&bp->blk_dva[2])); 2752 if (equal != 0) 2753 zb->zb_ditto_samevdev++; 2754 break; 2755 } 2756 2757 } 2758 2759 if (BP_IS_EMBEDDED(bp)) { 2760 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++; 2761 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)] 2762 [BPE_GET_PSIZE(bp)]++; 2763 return; 2764 } 2765 2766 if (dump_opt['L']) 2767 return; 2768 2769 if (BP_GET_DEDUP(bp)) { 2770 ddt_t *ddt; 2771 ddt_entry_t *dde; 2772 2773 ddt = ddt_select(zcb->zcb_spa, bp); 2774 ddt_enter(ddt); 2775 dde = ddt_lookup(ddt, bp, B_FALSE); 2776 2777 if (dde == NULL) { 2778 refcnt = 0; 2779 } else { 2780 ddt_phys_t *ddp = ddt_phys_select(dde, bp); 2781 ddt_phys_decref(ddp); 2782 refcnt = ddp->ddp_refcnt; 2783 if (ddt_phys_total_refcnt(dde) == 0) 2784 ddt_remove(ddt, dde); 2785 } 2786 ddt_exit(ddt); 2787 } 2788 2789 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, 2790 refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa), 2791 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); 2792 } 2793 2794 static void 2795 zdb_blkptr_done(zio_t *zio) 2796 { 2797 spa_t *spa = zio->io_spa; 2798 blkptr_t *bp = zio->io_bp; 2799 int ioerr = zio->io_error; 2800 zdb_cb_t *zcb = zio->io_private; 2801 zbookmark_phys_t *zb = &zio->io_bookmark; 2802 2803 abd_free(zio->io_abd); 2804 2805 mutex_enter(&spa->spa_scrub_lock); 2806 spa->spa_scrub_inflight--; 2807 cv_broadcast(&spa->spa_scrub_io_cv); 2808 2809 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2810 char blkbuf[BP_SPRINTF_LEN]; 2811 2812 zcb->zcb_haderrors = 1; 2813 zcb->zcb_errors[ioerr]++; 2814 2815 if (dump_opt['b'] >= 2) 2816 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2817 else 2818 blkbuf[0] = '\0'; 2819 2820 (void) printf("zdb_blkptr_cb: " 2821 "Got error %d reading " 2822 "<%llu, %llu, %lld, %llx> %s -- skipping\n", 2823 ioerr, 2824 (u_longlong_t)zb->zb_objset, 2825 (u_longlong_t)zb->zb_object, 2826 (u_longlong_t)zb->zb_level, 2827 (u_longlong_t)zb->zb_blkid, 2828 blkbuf); 2829 } 2830 mutex_exit(&spa->spa_scrub_lock); 2831 } 2832 2833 static int 2834 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2835 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2836 { 2837 zdb_cb_t *zcb = arg; 2838 dmu_object_type_t type; 2839 boolean_t is_metadata; 2840 2841 if (bp == NULL) 2842 return (0); 2843 2844 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) { 2845 char blkbuf[BP_SPRINTF_LEN]; 2846 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 2847 (void) printf("objset %llu object %llu " 2848 "level %lld offset 0x%llx %s\n", 2849 (u_longlong_t)zb->zb_objset, 2850 (u_longlong_t)zb->zb_object, 2851 (longlong_t)zb->zb_level, 2852 (u_longlong_t)blkid2offset(dnp, bp, zb), 2853 blkbuf); 2854 } 2855 2856 if (BP_IS_HOLE(bp)) 2857 return (0); 2858 2859 type = BP_GET_TYPE(bp); 2860 2861 zdb_count_block(zcb, zilog, bp, 2862 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type); 2863 2864 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); 2865 2866 if (!BP_IS_EMBEDDED(bp) && 2867 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) { 2868 size_t size = BP_GET_PSIZE(bp); 2869 abd_t *abd = abd_alloc(size, B_FALSE); 2870 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; 2871 2872 /* If it's an intent log block, failure is expected. */ 2873 if (zb->zb_level == ZB_ZIL_LEVEL) 2874 flags |= ZIO_FLAG_SPECULATIVE; 2875 2876 mutex_enter(&spa->spa_scrub_lock); 2877 while (spa->spa_scrub_inflight > max_inflight) 2878 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2879 spa->spa_scrub_inflight++; 2880 mutex_exit(&spa->spa_scrub_lock); 2881 2882 zio_nowait(zio_read(NULL, spa, bp, abd, size, 2883 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb)); 2884 } 2885 2886 zcb->zcb_readfails = 0; 2887 2888 /* only call gethrtime() every 100 blocks */ 2889 static int iters; 2890 if (++iters > 100) 2891 iters = 0; 2892 else 2893 return (0); 2894 2895 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) { 2896 uint64_t now = gethrtime(); 2897 char buf[10]; 2898 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize; 2899 int kb_per_sec = 2900 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000)); 2901 int sec_remaining = 2902 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec; 2903 2904 /* make sure nicenum has enough space */ 2905 CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ); 2906 2907 zfs_nicenum(bytes, buf, sizeof (buf)); 2908 (void) fprintf(stderr, 2909 "\r%5s completed (%4dMB/s) " 2910 "estimated time remaining: %uhr %02umin %02usec ", 2911 buf, kb_per_sec / 1024, 2912 sec_remaining / 60 / 60, 2913 sec_remaining / 60 % 60, 2914 sec_remaining % 60); 2915 2916 zcb->zcb_lastprint = now; 2917 } 2918 2919 return (0); 2920 } 2921 2922 static void 2923 zdb_leak(void *arg, uint64_t start, uint64_t size) 2924 { 2925 vdev_t *vd = arg; 2926 2927 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 2928 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); 2929 } 2930 2931 static metaslab_ops_t zdb_metaslab_ops = { 2932 NULL /* alloc */ 2933 }; 2934 2935 static void 2936 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) 2937 { 2938 ddt_bookmark_t ddb; 2939 ddt_entry_t dde; 2940 int error; 2941 2942 bzero(&ddb, sizeof (ddb)); 2943 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { 2944 blkptr_t blk; 2945 ddt_phys_t *ddp = dde.dde_phys; 2946 2947 if (ddb.ddb_class == DDT_CLASS_UNIQUE) 2948 return; 2949 2950 ASSERT(ddt_phys_total_refcnt(&dde) > 1); 2951 2952 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2953 if (ddp->ddp_phys_birth == 0) 2954 continue; 2955 ddt_bp_create(ddb.ddb_checksum, 2956 &dde.dde_key, ddp, &blk); 2957 if (p == DDT_PHYS_DITTO) { 2958 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); 2959 } else { 2960 zcb->zcb_dedup_asize += 2961 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); 2962 zcb->zcb_dedup_blocks++; 2963 } 2964 } 2965 if (!dump_opt['L']) { 2966 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; 2967 ddt_enter(ddt); 2968 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); 2969 ddt_exit(ddt); 2970 } 2971 } 2972 2973 ASSERT(error == ENOENT); 2974 } 2975 2976 /* ARGSUSED */ 2977 static void 2978 claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 2979 uint64_t size, void *arg) 2980 { 2981 /* 2982 * This callback was called through a remap from 2983 * a device being removed. Therefore, the vdev that 2984 * this callback is applied to is a concrete 2985 * vdev. 2986 */ 2987 ASSERT(vdev_is_concrete(vd)); 2988 2989 VERIFY0(metaslab_claim_impl(vd, offset, size, 2990 spa_min_claim_txg(vd->vdev_spa))); 2991 } 2992 2993 static void 2994 claim_segment_cb(void *arg, uint64_t offset, uint64_t size) 2995 { 2996 vdev_t *vd = arg; 2997 2998 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 2999 claim_segment_impl_cb, NULL); 3000 } 3001 3002 /* 3003 * After accounting for all allocated blocks that are directly referenced, 3004 * we might have missed a reference to a block from a partially complete 3005 * (and thus unused) indirect mapping object. We perform a secondary pass 3006 * through the metaslabs we have already mapped and claim the destination 3007 * blocks. 3008 */ 3009 static void 3010 zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb) 3011 { 3012 if (spa->spa_vdev_removal == NULL) 3013 return; 3014 3015 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3016 3017 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 3018 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 3019 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3020 3021 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 3022 metaslab_t *msp = vd->vdev_ms[msi]; 3023 3024 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 3025 break; 3026 3027 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 3028 3029 if (msp->ms_sm != NULL) { 3030 VERIFY0(space_map_load(msp->ms_sm, 3031 svr->svr_allocd_segs, SM_ALLOC)); 3032 3033 /* 3034 * Clear everything past what has been synced unless 3035 * it's past the spacemap, because we have not allocated 3036 * mappings for it yet. 3037 */ 3038 uint64_t vim_max_offset = 3039 vdev_indirect_mapping_max_offset(vim); 3040 uint64_t sm_end = msp->ms_sm->sm_start + 3041 msp->ms_sm->sm_size; 3042 if (sm_end > vim_max_offset) 3043 range_tree_clear(svr->svr_allocd_segs, 3044 vim_max_offset, sm_end - vim_max_offset); 3045 } 3046 3047 zcb->zcb_removing_size += 3048 range_tree_space(svr->svr_allocd_segs); 3049 range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd); 3050 } 3051 3052 spa_config_exit(spa, SCL_CONFIG, FTAG); 3053 } 3054 3055 /* ARGSUSED */ 3056 static int 3057 increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3058 { 3059 zdb_cb_t *zcb = arg; 3060 spa_t *spa = zcb->zcb_spa; 3061 vdev_t *vd; 3062 const dva_t *dva = &bp->blk_dva[0]; 3063 3064 ASSERT(!dump_opt['L']); 3065 ASSERT3U(BP_GET_NDVAS(bp), ==, 1); 3066 3067 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3068 vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva)); 3069 ASSERT3P(vd, !=, NULL); 3070 spa_config_exit(spa, SCL_VDEV, FTAG); 3071 3072 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 3073 ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL); 3074 3075 vdev_indirect_mapping_increment_obsolete_count( 3076 vd->vdev_indirect_mapping, 3077 DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva), 3078 zcb->zcb_vd_obsolete_counts[vd->vdev_id]); 3079 3080 return (0); 3081 } 3082 3083 static uint32_t * 3084 zdb_load_obsolete_counts(vdev_t *vd) 3085 { 3086 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3087 spa_t *spa = vd->vdev_spa; 3088 spa_condensing_indirect_phys_t *scip = 3089 &spa->spa_condensing_indirect_phys; 3090 uint32_t *counts; 3091 3092 EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL); 3093 counts = vdev_indirect_mapping_load_obsolete_counts(vim); 3094 if (vd->vdev_obsolete_sm != NULL) { 3095 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts, 3096 vd->vdev_obsolete_sm); 3097 } 3098 if (scip->scip_vdev == vd->vdev_id && 3099 scip->scip_prev_obsolete_sm_object != 0) { 3100 space_map_t *prev_obsolete_sm = NULL; 3101 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, 3102 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); 3103 space_map_update(prev_obsolete_sm); 3104 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts, 3105 prev_obsolete_sm); 3106 space_map_close(prev_obsolete_sm); 3107 } 3108 return (counts); 3109 } 3110 3111 typedef struct checkpoint_sm_exclude_entry_arg { 3112 vdev_t *cseea_vd; 3113 uint64_t cseea_checkpoint_size; 3114 } checkpoint_sm_exclude_entry_arg_t; 3115 3116 static int 3117 checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg) 3118 { 3119 checkpoint_sm_exclude_entry_arg_t *cseea = arg; 3120 vdev_t *vd = cseea->cseea_vd; 3121 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift]; 3122 uint64_t end = sme->sme_offset + sme->sme_run; 3123 3124 ASSERT(sme->sme_type == SM_FREE); 3125 3126 /* 3127 * Since the vdev_checkpoint_sm exists in the vdev level 3128 * and the ms_sm space maps exist in the metaslab level, 3129 * an entry in the checkpoint space map could theoretically 3130 * cross the boundaries of the metaslab that it belongs. 3131 * 3132 * In reality, because of the way that we populate and 3133 * manipulate the checkpoint's space maps currently, 3134 * there shouldn't be any entries that cross metaslabs. 3135 * Hence the assertion below. 3136 * 3137 * That said, there is no fundamental requirement that 3138 * the checkpoint's space map entries should not cross 3139 * metaslab boundaries. So if needed we could add code 3140 * that handles metaslab-crossing segments in the future. 3141 */ 3142 VERIFY3U(sme->sme_offset, >=, ms->ms_start); 3143 VERIFY3U(end, <=, ms->ms_start + ms->ms_size); 3144 3145 /* 3146 * By removing the entry from the allocated segments we 3147 * also verify that the entry is there to begin with. 3148 */ 3149 mutex_enter(&ms->ms_lock); 3150 range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run); 3151 mutex_exit(&ms->ms_lock); 3152 3153 cseea->cseea_checkpoint_size += sme->sme_run; 3154 return (0); 3155 } 3156 3157 static void 3158 zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb) 3159 { 3160 spa_t *spa = vd->vdev_spa; 3161 space_map_t *checkpoint_sm = NULL; 3162 uint64_t checkpoint_sm_obj; 3163 3164 /* 3165 * If there is no vdev_top_zap, we are in a pool whose 3166 * version predates the pool checkpoint feature. 3167 */ 3168 if (vd->vdev_top_zap == 0) 3169 return; 3170 3171 /* 3172 * If there is no reference of the vdev_checkpoint_sm in 3173 * the vdev_top_zap, then one of the following scenarios 3174 * is true: 3175 * 3176 * 1] There is no checkpoint 3177 * 2] There is a checkpoint, but no checkpointed blocks 3178 * have been freed yet 3179 * 3] The current vdev is indirect 3180 * 3181 * In these cases we return immediately. 3182 */ 3183 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap, 3184 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 3185 return; 3186 3187 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap, 3188 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, 3189 &checkpoint_sm_obj)); 3190 3191 checkpoint_sm_exclude_entry_arg_t cseea; 3192 cseea.cseea_vd = vd; 3193 cseea.cseea_checkpoint_size = 0; 3194 3195 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa), 3196 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift)); 3197 space_map_update(checkpoint_sm); 3198 3199 VERIFY0(space_map_iterate(checkpoint_sm, 3200 checkpoint_sm_exclude_entry_cb, &cseea)); 3201 space_map_close(checkpoint_sm); 3202 3203 zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size; 3204 } 3205 3206 static void 3207 zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb) 3208 { 3209 vdev_t *rvd = spa->spa_root_vdev; 3210 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 3211 ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id); 3212 zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb); 3213 } 3214 } 3215 3216 static void 3217 load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype) 3218 { 3219 vdev_t *rvd = spa->spa_root_vdev; 3220 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 3221 vdev_t *vd = rvd->vdev_child[i]; 3222 3223 ASSERT3U(i, ==, vd->vdev_id); 3224 3225 if (vd->vdev_ops == &vdev_indirect_ops) 3226 continue; 3227 3228 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 3229 metaslab_t *msp = vd->vdev_ms[m]; 3230 3231 (void) fprintf(stderr, 3232 "\rloading concrete vdev %llu, " 3233 "metaslab %llu of %llu ...", 3234 (longlong_t)vd->vdev_id, 3235 (longlong_t)msp->ms_id, 3236 (longlong_t)vd->vdev_ms_count); 3237 3238 mutex_enter(&msp->ms_lock); 3239 metaslab_unload(msp); 3240 3241 /* 3242 * We don't want to spend the CPU manipulating the 3243 * size-ordered tree, so clear the range_tree ops. 3244 */ 3245 msp->ms_allocatable->rt_ops = NULL; 3246 3247 if (msp->ms_sm != NULL) { 3248 VERIFY0(space_map_load(msp->ms_sm, 3249 msp->ms_allocatable, maptype)); 3250 } 3251 if (!msp->ms_loaded) 3252 msp->ms_loaded = B_TRUE; 3253 mutex_exit(&msp->ms_lock); 3254 } 3255 } 3256 } 3257 3258 /* 3259 * vm_idxp is an in-out parameter which (for indirect vdevs) is the 3260 * index in vim_entries that has the first entry in this metaslab. 3261 * On return, it will be set to the first entry after this metaslab. 3262 */ 3263 static void 3264 load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp, 3265 uint64_t *vim_idxp) 3266 { 3267 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3268 3269 mutex_enter(&msp->ms_lock); 3270 metaslab_unload(msp); 3271 3272 /* 3273 * We don't want to spend the CPU manipulating the 3274 * size-ordered tree, so clear the range_tree ops. 3275 */ 3276 msp->ms_allocatable->rt_ops = NULL; 3277 3278 for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim); 3279 (*vim_idxp)++) { 3280 vdev_indirect_mapping_entry_phys_t *vimep = 3281 &vim->vim_entries[*vim_idxp]; 3282 uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep); 3283 uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst); 3284 ASSERT3U(ent_offset, >=, msp->ms_start); 3285 if (ent_offset >= msp->ms_start + msp->ms_size) 3286 break; 3287 3288 /* 3289 * Mappings do not cross metaslab boundaries, 3290 * because we create them by walking the metaslabs. 3291 */ 3292 ASSERT3U(ent_offset + ent_len, <=, 3293 msp->ms_start + msp->ms_size); 3294 range_tree_add(msp->ms_allocatable, ent_offset, ent_len); 3295 } 3296 3297 if (!msp->ms_loaded) 3298 msp->ms_loaded = B_TRUE; 3299 mutex_exit(&msp->ms_lock); 3300 } 3301 3302 static void 3303 zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb) 3304 { 3305 vdev_t *rvd = spa->spa_root_vdev; 3306 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 3307 vdev_t *vd = rvd->vdev_child[c]; 3308 3309 ASSERT3U(c, ==, vd->vdev_id); 3310 3311 if (vd->vdev_ops != &vdev_indirect_ops) 3312 continue; 3313 3314 /* 3315 * Note: we don't check for mapping leaks on 3316 * removing vdevs because their ms_allocatable's 3317 * are used to look for leaks in allocated space. 3318 */ 3319 zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd); 3320 3321 /* 3322 * Normally, indirect vdevs don't have any 3323 * metaslabs. We want to set them up for 3324 * zio_claim(). 3325 */ 3326 VERIFY0(vdev_metaslab_init(vd, 0)); 3327 3328 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3329 uint64_t vim_idx = 0; 3330 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 3331 3332 (void) fprintf(stderr, 3333 "\rloading indirect vdev %llu, " 3334 "metaslab %llu of %llu ...", 3335 (longlong_t)vd->vdev_id, 3336 (longlong_t)vd->vdev_ms[m]->ms_id, 3337 (longlong_t)vd->vdev_ms_count); 3338 3339 load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m], 3340 &vim_idx); 3341 } 3342 ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim)); 3343 } 3344 } 3345 3346 static void 3347 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) 3348 { 3349 zcb->zcb_spa = spa; 3350 3351 if (!dump_opt['L']) { 3352 dsl_pool_t *dp = spa->spa_dsl_pool; 3353 vdev_t *rvd = spa->spa_root_vdev; 3354 3355 /* 3356 * We are going to be changing the meaning of the metaslab's 3357 * ms_allocatable. Ensure that the allocator doesn't try to 3358 * use the tree. 3359 */ 3360 spa->spa_normal_class->mc_ops = &zdb_metaslab_ops; 3361 spa->spa_log_class->mc_ops = &zdb_metaslab_ops; 3362 3363 zcb->zcb_vd_obsolete_counts = 3364 umem_zalloc(rvd->vdev_children * sizeof (uint32_t *), 3365 UMEM_NOFAIL); 3366 3367 /* 3368 * For leak detection, we overload the ms_allocatable trees 3369 * to contain allocated segments instead of free segments. 3370 * As a result, we can't use the normal metaslab_load/unload 3371 * interfaces. 3372 */ 3373 zdb_leak_init_prepare_indirect_vdevs(spa, zcb); 3374 load_concrete_ms_allocatable_trees(spa, SM_ALLOC); 3375 3376 /* 3377 * On load_concrete_ms_allocatable_trees() we loaded all the 3378 * allocated entries from the ms_sm to the ms_allocatable for 3379 * each metaslab. If the pool has a checkpoint or is in the 3380 * middle of discarding a checkpoint, some of these blocks 3381 * may have been freed but their ms_sm may not have been 3382 * updated because they are referenced by the checkpoint. In 3383 * order to avoid false-positives during leak-detection, we 3384 * go through the vdev's checkpoint space map and exclude all 3385 * its entries from their relevant ms_allocatable. 3386 * 3387 * We also aggregate the space held by the checkpoint and add 3388 * it to zcb_checkpoint_size. 3389 * 3390 * Note that at this point we are also verifying that all the 3391 * entries on the checkpoint_sm are marked as allocated in 3392 * the ms_sm of their relevant metaslab. 3393 * [see comment in checkpoint_sm_exclude_entry_cb()] 3394 */ 3395 zdb_leak_init_exclude_checkpoint(spa, zcb); 3396 3397 /* for cleaner progress output */ 3398 (void) fprintf(stderr, "\n"); 3399 3400 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3401 ASSERT(spa_feature_is_enabled(spa, 3402 SPA_FEATURE_DEVICE_REMOVAL)); 3403 (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj, 3404 increment_indirect_mapping_cb, zcb, NULL); 3405 } 3406 } else { 3407 /* 3408 * If leak tracing is disabled, we still need to consider 3409 * any checkpointed space in our space verification. 3410 */ 3411 zcb->zcb_checkpoint_size += spa_get_checkpoint_space(spa); 3412 } 3413 3414 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3415 zdb_ddt_leak_init(spa, zcb); 3416 spa_config_exit(spa, SCL_CONFIG, FTAG); 3417 } 3418 3419 static boolean_t 3420 zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb) 3421 { 3422 boolean_t leaks = B_FALSE; 3423 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3424 uint64_t total_leaked = 0; 3425 3426 ASSERT(vim != NULL); 3427 3428 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) { 3429 vdev_indirect_mapping_entry_phys_t *vimep = 3430 &vim->vim_entries[i]; 3431 uint64_t obsolete_bytes = 0; 3432 uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep); 3433 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3434 3435 /* 3436 * This is not very efficient but it's easy to 3437 * verify correctness. 3438 */ 3439 for (uint64_t inner_offset = 0; 3440 inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst); 3441 inner_offset += 1 << vd->vdev_ashift) { 3442 if (range_tree_contains(msp->ms_allocatable, 3443 offset + inner_offset, 1 << vd->vdev_ashift)) { 3444 obsolete_bytes += 1 << vd->vdev_ashift; 3445 } 3446 } 3447 3448 int64_t bytes_leaked = obsolete_bytes - 3449 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]; 3450 ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=, 3451 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]); 3452 if (bytes_leaked != 0 && 3453 (vdev_obsolete_counts_are_precise(vd) || 3454 dump_opt['d'] >= 5)) { 3455 (void) printf("obsolete indirect mapping count " 3456 "mismatch on %llu:%llx:%llx : %llx bytes leaked\n", 3457 (u_longlong_t)vd->vdev_id, 3458 (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep), 3459 (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 3460 (u_longlong_t)bytes_leaked); 3461 } 3462 total_leaked += ABS(bytes_leaked); 3463 } 3464 3465 if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) { 3466 int pct_leaked = total_leaked * 100 / 3467 vdev_indirect_mapping_bytes_mapped(vim); 3468 (void) printf("cannot verify obsolete indirect mapping " 3469 "counts of vdev %llu because precise feature was not " 3470 "enabled when it was removed: %d%% (%llx bytes) of mapping" 3471 "unreferenced\n", 3472 (u_longlong_t)vd->vdev_id, pct_leaked, 3473 (u_longlong_t)total_leaked); 3474 } else if (total_leaked > 0) { 3475 (void) printf("obsolete indirect mapping count mismatch " 3476 "for vdev %llu -- %llx total bytes mismatched\n", 3477 (u_longlong_t)vd->vdev_id, 3478 (u_longlong_t)total_leaked); 3479 leaks |= B_TRUE; 3480 } 3481 3482 vdev_indirect_mapping_free_obsolete_counts(vim, 3483 zcb->zcb_vd_obsolete_counts[vd->vdev_id]); 3484 zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL; 3485 3486 return (leaks); 3487 } 3488 3489 static boolean_t 3490 zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb) 3491 { 3492 boolean_t leaks = B_FALSE; 3493 if (!dump_opt['L']) { 3494 vdev_t *rvd = spa->spa_root_vdev; 3495 for (unsigned c = 0; c < rvd->vdev_children; c++) { 3496 vdev_t *vd = rvd->vdev_child[c]; 3497 metaslab_group_t *mg = vd->vdev_mg; 3498 3499 if (zcb->zcb_vd_obsolete_counts[c] != NULL) { 3500 leaks |= zdb_check_for_obsolete_leaks(vd, zcb); 3501 } 3502 3503 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 3504 metaslab_t *msp = vd->vdev_ms[m]; 3505 ASSERT3P(mg, ==, msp->ms_group); 3506 3507 /* 3508 * ms_allocatable has been overloaded 3509 * to contain allocated segments. Now that 3510 * we finished traversing all blocks, any 3511 * block that remains in the ms_allocatable 3512 * represents an allocated block that we 3513 * did not claim during the traversal. 3514 * Claimed blocks would have been removed 3515 * from the ms_allocatable. For indirect 3516 * vdevs, space remaining in the tree 3517 * represents parts of the mapping that are 3518 * not referenced, which is not a bug. 3519 */ 3520 if (vd->vdev_ops == &vdev_indirect_ops) { 3521 range_tree_vacate(msp->ms_allocatable, 3522 NULL, NULL); 3523 } else { 3524 range_tree_vacate(msp->ms_allocatable, 3525 zdb_leak, vd); 3526 } 3527 3528 if (msp->ms_loaded) { 3529 msp->ms_loaded = B_FALSE; 3530 } 3531 } 3532 } 3533 3534 umem_free(zcb->zcb_vd_obsolete_counts, 3535 rvd->vdev_children * sizeof (uint32_t *)); 3536 zcb->zcb_vd_obsolete_counts = NULL; 3537 } 3538 return (leaks); 3539 } 3540 3541 /* ARGSUSED */ 3542 static int 3543 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3544 { 3545 zdb_cb_t *zcb = arg; 3546 3547 if (dump_opt['b'] >= 5) { 3548 char blkbuf[BP_SPRINTF_LEN]; 3549 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3550 (void) printf("[%s] %s\n", 3551 "deferred free", blkbuf); 3552 } 3553 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); 3554 return (0); 3555 } 3556 3557 static int 3558 dump_block_stats(spa_t *spa) 3559 { 3560 zdb_cb_t zcb; 3561 zdb_blkstats_t *zb, *tzb; 3562 uint64_t norm_alloc, norm_space, total_alloc, total_found; 3563 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; 3564 boolean_t leaks = B_FALSE; 3565 3566 bzero(&zcb, sizeof (zcb)); 3567 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n", 3568 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", 3569 (dump_opt['c'] == 1) ? "metadata " : "", 3570 dump_opt['c'] ? "checksums " : "", 3571 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", 3572 !dump_opt['L'] ? "nothing leaked " : ""); 3573 3574 /* 3575 * Load all space maps as SM_ALLOC maps, then traverse the pool 3576 * claiming each block we discover. If the pool is perfectly 3577 * consistent, the space maps will be empty when we're done. 3578 * Anything left over is a leak; any block we can't claim (because 3579 * it's not part of any space map) is a double allocation, 3580 * reference to a freed block, or an unclaimed log block. 3581 */ 3582 zdb_leak_init(spa, &zcb); 3583 3584 /* 3585 * If there's a deferred-free bplist, process that first. 3586 */ 3587 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, 3588 count_block_cb, &zcb, NULL); 3589 3590 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3591 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, 3592 count_block_cb, &zcb, NULL); 3593 } 3594 3595 zdb_claim_removing(spa, &zcb); 3596 3597 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3598 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset, 3599 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb, 3600 &zcb, NULL)); 3601 } 3602 3603 if (dump_opt['c'] > 1) 3604 flags |= TRAVERSE_PREFETCH_DATA; 3605 3606 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa)); 3607 zcb.zcb_start = zcb.zcb_lastprint = gethrtime(); 3608 zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); 3609 3610 /* 3611 * If we've traversed the data blocks then we need to wait for those 3612 * I/Os to complete. We leverage "The Godfather" zio to wait on 3613 * all async I/Os to complete. 3614 */ 3615 if (dump_opt['c']) { 3616 for (int i = 0; i < max_ncpus; i++) { 3617 (void) zio_wait(spa->spa_async_zio_root[i]); 3618 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3619 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3620 ZIO_FLAG_GODFATHER); 3621 } 3622 } 3623 3624 if (zcb.zcb_haderrors) { 3625 (void) printf("\nError counts:\n\n"); 3626 (void) printf("\t%5s %s\n", "errno", "count"); 3627 for (int e = 0; e < 256; e++) { 3628 if (zcb.zcb_errors[e] != 0) { 3629 (void) printf("\t%5d %llu\n", 3630 e, (u_longlong_t)zcb.zcb_errors[e]); 3631 } 3632 } 3633 } 3634 3635 /* 3636 * Report any leaked segments. 3637 */ 3638 leaks |= zdb_leak_fini(spa, &zcb); 3639 3640 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; 3641 3642 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 3643 norm_space = metaslab_class_get_space(spa_normal_class(spa)); 3644 3645 total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa)); 3646 total_found = tzb->zb_asize - zcb.zcb_dedup_asize + 3647 zcb.zcb_removing_size + zcb.zcb_checkpoint_size; 3648 3649 if (total_found == total_alloc) { 3650 if (!dump_opt['L']) 3651 (void) printf("\n\tNo leaks (block sum matches space" 3652 " maps exactly)\n"); 3653 } else { 3654 (void) printf("block traversal size %llu != alloc %llu " 3655 "(%s %lld)\n", 3656 (u_longlong_t)total_found, 3657 (u_longlong_t)total_alloc, 3658 (dump_opt['L']) ? "unreachable" : "leaked", 3659 (longlong_t)(total_alloc - total_found)); 3660 leaks = B_TRUE; 3661 } 3662 3663 if (tzb->zb_count == 0) 3664 return (2); 3665 3666 (void) printf("\n"); 3667 (void) printf("\tbp count: %10llu\n", 3668 (u_longlong_t)tzb->zb_count); 3669 (void) printf("\tganged count: %10llu\n", 3670 (longlong_t)tzb->zb_gangs); 3671 (void) printf("\tbp logical: %10llu avg: %6llu\n", 3672 (u_longlong_t)tzb->zb_lsize, 3673 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); 3674 (void) printf("\tbp physical: %10llu avg:" 3675 " %6llu compression: %6.2f\n", 3676 (u_longlong_t)tzb->zb_psize, 3677 (u_longlong_t)(tzb->zb_psize / tzb->zb_count), 3678 (double)tzb->zb_lsize / tzb->zb_psize); 3679 (void) printf("\tbp allocated: %10llu avg:" 3680 " %6llu compression: %6.2f\n", 3681 (u_longlong_t)tzb->zb_asize, 3682 (u_longlong_t)(tzb->zb_asize / tzb->zb_count), 3683 (double)tzb->zb_lsize / tzb->zb_asize); 3684 (void) printf("\tbp deduped: %10llu ref>1:" 3685 " %6llu deduplication: %6.2f\n", 3686 (u_longlong_t)zcb.zcb_dedup_asize, 3687 (u_longlong_t)zcb.zcb_dedup_blocks, 3688 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); 3689 (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n", 3690 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); 3691 3692 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) { 3693 if (zcb.zcb_embedded_blocks[i] == 0) 3694 continue; 3695 (void) printf("\n"); 3696 (void) printf("\tadditional, non-pointer bps of type %u: " 3697 "%10llu\n", 3698 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]); 3699 3700 if (dump_opt['b'] >= 3) { 3701 (void) printf("\t number of (compressed) bytes: " 3702 "number of bps\n"); 3703 dump_histogram(zcb.zcb_embedded_histogram[i], 3704 sizeof (zcb.zcb_embedded_histogram[i]) / 3705 sizeof (zcb.zcb_embedded_histogram[i][0]), 0); 3706 } 3707 } 3708 3709 if (tzb->zb_ditto_samevdev != 0) { 3710 (void) printf("\tDittoed blocks on same vdev: %llu\n", 3711 (longlong_t)tzb->zb_ditto_samevdev); 3712 } 3713 3714 for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) { 3715 vdev_t *vd = spa->spa_root_vdev->vdev_child[v]; 3716 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3717 3718 if (vim == NULL) { 3719 continue; 3720 } 3721 3722 char mem[32]; 3723 zdb_nicenum(vdev_indirect_mapping_num_entries(vim), 3724 mem, vdev_indirect_mapping_size(vim)); 3725 3726 (void) printf("\tindirect vdev id %llu has %llu segments " 3727 "(%s in memory)\n", 3728 (longlong_t)vd->vdev_id, 3729 (longlong_t)vdev_indirect_mapping_num_entries(vim), mem); 3730 } 3731 3732 if (dump_opt['b'] >= 2) { 3733 int l, t, level; 3734 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" 3735 "\t avg\t comp\t%%Total\tType\n"); 3736 3737 for (t = 0; t <= ZDB_OT_TOTAL; t++) { 3738 char csize[32], lsize[32], psize[32], asize[32]; 3739 char avg[32], gang[32]; 3740 const char *typename; 3741 3742 /* make sure nicenum has enough space */ 3743 CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ); 3744 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 3745 CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ); 3746 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 3747 CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ); 3748 CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ); 3749 3750 if (t < DMU_OT_NUMTYPES) 3751 typename = dmu_ot[t].ot_name; 3752 else 3753 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; 3754 3755 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { 3756 (void) printf("%6s\t%5s\t%5s\t%5s" 3757 "\t%5s\t%5s\t%6s\t%s\n", 3758 "-", 3759 "-", 3760 "-", 3761 "-", 3762 "-", 3763 "-", 3764 "-", 3765 typename); 3766 continue; 3767 } 3768 3769 for (l = ZB_TOTAL - 1; l >= -1; l--) { 3770 level = (l == -1 ? ZB_TOTAL : l); 3771 zb = &zcb.zcb_type[level][t]; 3772 3773 if (zb->zb_asize == 0) 3774 continue; 3775 3776 if (dump_opt['b'] < 3 && level != ZB_TOTAL) 3777 continue; 3778 3779 if (level == 0 && zb->zb_asize == 3780 zcb.zcb_type[ZB_TOTAL][t].zb_asize) 3781 continue; 3782 3783 zdb_nicenum(zb->zb_count, csize, 3784 sizeof (csize)); 3785 zdb_nicenum(zb->zb_lsize, lsize, 3786 sizeof (lsize)); 3787 zdb_nicenum(zb->zb_psize, psize, 3788 sizeof (psize)); 3789 zdb_nicenum(zb->zb_asize, asize, 3790 sizeof (asize)); 3791 zdb_nicenum(zb->zb_asize / zb->zb_count, avg, 3792 sizeof (avg)); 3793 zdb_nicenum(zb->zb_gangs, gang, sizeof (gang)); 3794 3795 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" 3796 "\t%5.2f\t%6.2f\t", 3797 csize, lsize, psize, asize, avg, 3798 (double)zb->zb_lsize / zb->zb_psize, 3799 100.0 * zb->zb_asize / tzb->zb_asize); 3800 3801 if (level == ZB_TOTAL) 3802 (void) printf("%s\n", typename); 3803 else 3804 (void) printf(" L%d %s\n", 3805 level, typename); 3806 3807 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) { 3808 (void) printf("\t number of ganged " 3809 "blocks: %s\n", gang); 3810 } 3811 3812 if (dump_opt['b'] >= 4) { 3813 (void) printf("psize " 3814 "(in 512-byte sectors): " 3815 "number of blocks\n"); 3816 dump_histogram(zb->zb_psize_histogram, 3817 PSIZE_HISTO_SIZE, 0); 3818 } 3819 } 3820 } 3821 } 3822 3823 (void) printf("\n"); 3824 3825 if (leaks) 3826 return (2); 3827 3828 if (zcb.zcb_haderrors) 3829 return (3); 3830 3831 return (0); 3832 } 3833 3834 typedef struct zdb_ddt_entry { 3835 ddt_key_t zdde_key; 3836 uint64_t zdde_ref_blocks; 3837 uint64_t zdde_ref_lsize; 3838 uint64_t zdde_ref_psize; 3839 uint64_t zdde_ref_dsize; 3840 avl_node_t zdde_node; 3841 } zdb_ddt_entry_t; 3842 3843 /* ARGSUSED */ 3844 static int 3845 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 3846 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 3847 { 3848 avl_tree_t *t = arg; 3849 avl_index_t where; 3850 zdb_ddt_entry_t *zdde, zdde_search; 3851 3852 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 3853 return (0); 3854 3855 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { 3856 (void) printf("traversing objset %llu, %llu objects, " 3857 "%lu blocks so far\n", 3858 (u_longlong_t)zb->zb_objset, 3859 (u_longlong_t)BP_GET_FILL(bp), 3860 avl_numnodes(t)); 3861 } 3862 3863 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || 3864 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) 3865 return (0); 3866 3867 ddt_key_fill(&zdde_search.zdde_key, bp); 3868 3869 zdde = avl_find(t, &zdde_search, &where); 3870 3871 if (zdde == NULL) { 3872 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); 3873 zdde->zdde_key = zdde_search.zdde_key; 3874 avl_insert(t, zdde, where); 3875 } 3876 3877 zdde->zdde_ref_blocks += 1; 3878 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); 3879 zdde->zdde_ref_psize += BP_GET_PSIZE(bp); 3880 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); 3881 3882 return (0); 3883 } 3884 3885 static void 3886 dump_simulated_ddt(spa_t *spa) 3887 { 3888 avl_tree_t t; 3889 void *cookie = NULL; 3890 zdb_ddt_entry_t *zdde; 3891 ddt_histogram_t ddh_total; 3892 ddt_stat_t dds_total; 3893 3894 bzero(&ddh_total, sizeof (ddh_total)); 3895 bzero(&dds_total, sizeof (dds_total)); 3896 avl_create(&t, ddt_entry_compare, 3897 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); 3898 3899 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3900 3901 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 3902 zdb_ddt_add_cb, &t); 3903 3904 spa_config_exit(spa, SCL_CONFIG, FTAG); 3905 3906 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { 3907 ddt_stat_t dds; 3908 uint64_t refcnt = zdde->zdde_ref_blocks; 3909 ASSERT(refcnt != 0); 3910 3911 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; 3912 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; 3913 dds.dds_psize = zdde->zdde_ref_psize / refcnt; 3914 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; 3915 3916 dds.dds_ref_blocks = zdde->zdde_ref_blocks; 3917 dds.dds_ref_lsize = zdde->zdde_ref_lsize; 3918 dds.dds_ref_psize = zdde->zdde_ref_psize; 3919 dds.dds_ref_dsize = zdde->zdde_ref_dsize; 3920 3921 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1], 3922 &dds, 0); 3923 3924 umem_free(zdde, sizeof (*zdde)); 3925 } 3926 3927 avl_destroy(&t); 3928 3929 ddt_histogram_stat(&dds_total, &ddh_total); 3930 3931 (void) printf("Simulated DDT histogram:\n"); 3932 3933 zpool_dump_ddt(&dds_total, &ddh_total); 3934 3935 dump_dedup_ratio(&dds_total); 3936 } 3937 3938 static int 3939 verify_device_removal_feature_counts(spa_t *spa) 3940 { 3941 uint64_t dr_feature_refcount = 0; 3942 uint64_t oc_feature_refcount = 0; 3943 uint64_t indirect_vdev_count = 0; 3944 uint64_t precise_vdev_count = 0; 3945 uint64_t obsolete_counts_object_count = 0; 3946 uint64_t obsolete_sm_count = 0; 3947 uint64_t obsolete_counts_count = 0; 3948 uint64_t scip_count = 0; 3949 uint64_t obsolete_bpobj_count = 0; 3950 int ret = 0; 3951 3952 spa_condensing_indirect_phys_t *scip = 3953 &spa->spa_condensing_indirect_phys; 3954 if (scip->scip_next_mapping_object != 0) { 3955 vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev]; 3956 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 3957 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 3958 3959 (void) printf("Condensing indirect vdev %llu: new mapping " 3960 "object %llu, prev obsolete sm %llu\n", 3961 (u_longlong_t)scip->scip_vdev, 3962 (u_longlong_t)scip->scip_next_mapping_object, 3963 (u_longlong_t)scip->scip_prev_obsolete_sm_object); 3964 if (scip->scip_prev_obsolete_sm_object != 0) { 3965 space_map_t *prev_obsolete_sm = NULL; 3966 VERIFY0(space_map_open(&prev_obsolete_sm, 3967 spa->spa_meta_objset, 3968 scip->scip_prev_obsolete_sm_object, 3969 0, vd->vdev_asize, 0)); 3970 space_map_update(prev_obsolete_sm); 3971 dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm); 3972 (void) printf("\n"); 3973 space_map_close(prev_obsolete_sm); 3974 } 3975 3976 scip_count += 2; 3977 } 3978 3979 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3980 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3981 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 3982 3983 if (vic->vic_mapping_object != 0) { 3984 ASSERT(vd->vdev_ops == &vdev_indirect_ops || 3985 vd->vdev_removing); 3986 indirect_vdev_count++; 3987 3988 if (vd->vdev_indirect_mapping->vim_havecounts) { 3989 obsolete_counts_count++; 3990 } 3991 } 3992 if (vdev_obsolete_counts_are_precise(vd)) { 3993 ASSERT(vic->vic_mapping_object != 0); 3994 precise_vdev_count++; 3995 } 3996 if (vdev_obsolete_sm_object(vd) != 0) { 3997 ASSERT(vic->vic_mapping_object != 0); 3998 obsolete_sm_count++; 3999 } 4000 } 4001 4002 (void) feature_get_refcount(spa, 4003 &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL], 4004 &dr_feature_refcount); 4005 (void) feature_get_refcount(spa, 4006 &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS], 4007 &oc_feature_refcount); 4008 4009 if (dr_feature_refcount != indirect_vdev_count) { 4010 ret = 1; 4011 (void) printf("Number of indirect vdevs (%llu) " \ 4012 "does not match feature count (%llu)\n", 4013 (u_longlong_t)indirect_vdev_count, 4014 (u_longlong_t)dr_feature_refcount); 4015 } else { 4016 (void) printf("Verified device_removal feature refcount " \ 4017 "of %llu is correct\n", 4018 (u_longlong_t)dr_feature_refcount); 4019 } 4020 4021 if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT, 4022 DMU_POOL_OBSOLETE_BPOBJ) == 0) { 4023 obsolete_bpobj_count++; 4024 } 4025 4026 4027 obsolete_counts_object_count = precise_vdev_count; 4028 obsolete_counts_object_count += obsolete_sm_count; 4029 obsolete_counts_object_count += obsolete_counts_count; 4030 obsolete_counts_object_count += scip_count; 4031 obsolete_counts_object_count += obsolete_bpobj_count; 4032 obsolete_counts_object_count += remap_deadlist_count; 4033 4034 if (oc_feature_refcount != obsolete_counts_object_count) { 4035 ret = 1; 4036 (void) printf("Number of obsolete counts objects (%llu) " \ 4037 "does not match feature count (%llu)\n", 4038 (u_longlong_t)obsolete_counts_object_count, 4039 (u_longlong_t)oc_feature_refcount); 4040 (void) printf("pv:%llu os:%llu oc:%llu sc:%llu " 4041 "ob:%llu rd:%llu\n", 4042 (u_longlong_t)precise_vdev_count, 4043 (u_longlong_t)obsolete_sm_count, 4044 (u_longlong_t)obsolete_counts_count, 4045 (u_longlong_t)scip_count, 4046 (u_longlong_t)obsolete_bpobj_count, 4047 (u_longlong_t)remap_deadlist_count); 4048 } else { 4049 (void) printf("Verified indirect_refcount feature refcount " \ 4050 "of %llu is correct\n", 4051 (u_longlong_t)oc_feature_refcount); 4052 } 4053 return (ret); 4054 } 4055 4056 #define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE" 4057 /* 4058 * Import the checkpointed state of the pool specified by the target 4059 * parameter as readonly. The function also accepts a pool config 4060 * as an optional parameter, else it attempts to infer the config by 4061 * the name of the target pool. 4062 * 4063 * Note that the checkpointed state's pool name will be the name of 4064 * the original pool with the above suffix appened to it. In addition, 4065 * if the target is not a pool name (e.g. a path to a dataset) then 4066 * the new_path parameter is populated with the updated path to 4067 * reflect the fact that we are looking into the checkpointed state. 4068 * 4069 * The function returns a newly-allocated copy of the name of the 4070 * pool containing the checkpointed state. When this copy is no 4071 * longer needed it should be freed with free(3C). Same thing 4072 * applies to the new_path parameter if allocated. 4073 */ 4074 static char * 4075 import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path) 4076 { 4077 int error = 0; 4078 char *poolname, *bogus_name; 4079 4080 /* If the target is not a pool, the extract the pool name */ 4081 char *path_start = strchr(target, '/'); 4082 if (path_start != NULL) { 4083 size_t poolname_len = path_start - target; 4084 poolname = strndup(target, poolname_len); 4085 } else { 4086 poolname = target; 4087 } 4088 4089 if (cfg == NULL) { 4090 error = spa_get_stats(poolname, &cfg, NULL, 0); 4091 if (error != 0) { 4092 fatal("Tried to read config of pool \"%s\" but " 4093 "spa_get_stats() failed with error %d\n", 4094 poolname, error); 4095 } 4096 } 4097 4098 (void) asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX); 4099 fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name); 4100 4101 error = spa_import(bogus_name, cfg, NULL, 4102 ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT); 4103 if (error != 0) { 4104 fatal("Tried to import pool \"%s\" but spa_import() failed " 4105 "with error %d\n", bogus_name, error); 4106 } 4107 4108 if (new_path != NULL && path_start != NULL) 4109 (void) asprintf(new_path, "%s%s", bogus_name, path_start); 4110 4111 if (target != poolname) 4112 free(poolname); 4113 4114 return (bogus_name); 4115 } 4116 4117 typedef struct verify_checkpoint_sm_entry_cb_arg { 4118 vdev_t *vcsec_vd; 4119 4120 /* the following fields are only used for printing progress */ 4121 uint64_t vcsec_entryid; 4122 uint64_t vcsec_num_entries; 4123 } verify_checkpoint_sm_entry_cb_arg_t; 4124 4125 #define ENTRIES_PER_PROGRESS_UPDATE 10000 4126 4127 static int 4128 verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg) 4129 { 4130 verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg; 4131 vdev_t *vd = vcsec->vcsec_vd; 4132 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift]; 4133 uint64_t end = sme->sme_offset + sme->sme_run; 4134 4135 ASSERT(sme->sme_type == SM_FREE); 4136 4137 if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) { 4138 (void) fprintf(stderr, 4139 "\rverifying vdev %llu, space map entry %llu of %llu ...", 4140 (longlong_t)vd->vdev_id, 4141 (longlong_t)vcsec->vcsec_entryid, 4142 (longlong_t)vcsec->vcsec_num_entries); 4143 } 4144 vcsec->vcsec_entryid++; 4145 4146 /* 4147 * See comment in checkpoint_sm_exclude_entry_cb() 4148 */ 4149 VERIFY3U(sme->sme_offset, >=, ms->ms_start); 4150 VERIFY3U(end, <=, ms->ms_start + ms->ms_size); 4151 4152 /* 4153 * The entries in the vdev_checkpoint_sm should be marked as 4154 * allocated in the checkpointed state of the pool, therefore 4155 * their respective ms_allocateable trees should not contain them. 4156 */ 4157 mutex_enter(&ms->ms_lock); 4158 range_tree_verify(ms->ms_allocatable, sme->sme_offset, sme->sme_run); 4159 mutex_exit(&ms->ms_lock); 4160 4161 return (0); 4162 } 4163 4164 /* 4165 * Verify that all segments in the vdev_checkpoint_sm are allocated 4166 * according to the checkpoint's ms_sm (i.e. are not in the checkpoint's 4167 * ms_allocatable). 4168 * 4169 * Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of 4170 * each vdev in the current state of the pool to the metaslab space maps 4171 * (ms_sm) of the checkpointed state of the pool. 4172 * 4173 * Note that the function changes the state of the ms_allocatable 4174 * trees of the current spa_t. The entries of these ms_allocatable 4175 * trees are cleared out and then repopulated from with the free 4176 * entries of their respective ms_sm space maps. 4177 */ 4178 static void 4179 verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current) 4180 { 4181 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev; 4182 vdev_t *current_rvd = current->spa_root_vdev; 4183 4184 load_concrete_ms_allocatable_trees(checkpoint, SM_FREE); 4185 4186 for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) { 4187 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c]; 4188 vdev_t *current_vd = current_rvd->vdev_child[c]; 4189 4190 space_map_t *checkpoint_sm = NULL; 4191 uint64_t checkpoint_sm_obj; 4192 4193 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) { 4194 /* 4195 * Since we don't allow device removal in a pool 4196 * that has a checkpoint, we expect that all removed 4197 * vdevs were removed from the pool before the 4198 * checkpoint. 4199 */ 4200 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops); 4201 continue; 4202 } 4203 4204 /* 4205 * If the checkpoint space map doesn't exist, then nothing 4206 * here is checkpointed so there's nothing to verify. 4207 */ 4208 if (current_vd->vdev_top_zap == 0 || 4209 zap_contains(spa_meta_objset(current), 4210 current_vd->vdev_top_zap, 4211 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 4212 continue; 4213 4214 VERIFY0(zap_lookup(spa_meta_objset(current), 4215 current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4216 sizeof (uint64_t), 1, &checkpoint_sm_obj)); 4217 4218 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current), 4219 checkpoint_sm_obj, 0, current_vd->vdev_asize, 4220 current_vd->vdev_ashift)); 4221 space_map_update(checkpoint_sm); 4222 4223 verify_checkpoint_sm_entry_cb_arg_t vcsec; 4224 vcsec.vcsec_vd = ckpoint_vd; 4225 vcsec.vcsec_entryid = 0; 4226 vcsec.vcsec_num_entries = 4227 space_map_length(checkpoint_sm) / sizeof (uint64_t); 4228 VERIFY0(space_map_iterate(checkpoint_sm, 4229 verify_checkpoint_sm_entry_cb, &vcsec)); 4230 dump_spacemap(current->spa_meta_objset, checkpoint_sm); 4231 space_map_close(checkpoint_sm); 4232 } 4233 4234 /* 4235 * If we've added vdevs since we took the checkpoint, ensure 4236 * that their checkpoint space maps are empty. 4237 */ 4238 if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) { 4239 for (uint64_t c = ckpoint_rvd->vdev_children; 4240 c < current_rvd->vdev_children; c++) { 4241 vdev_t *current_vd = current_rvd->vdev_child[c]; 4242 ASSERT3P(current_vd->vdev_checkpoint_sm, ==, NULL); 4243 } 4244 } 4245 4246 /* for cleaner progress output */ 4247 (void) fprintf(stderr, "\n"); 4248 } 4249 4250 /* 4251 * Verifies that all space that's allocated in the checkpoint is 4252 * still allocated in the current version, by checking that everything 4253 * in checkpoint's ms_allocatable (which is actually allocated, not 4254 * allocatable/free) is not present in current's ms_allocatable. 4255 * 4256 * Note that the function changes the state of the ms_allocatable 4257 * trees of both spas when called. The entries of all ms_allocatable 4258 * trees are cleared out and then repopulated from their respective 4259 * ms_sm space maps. In the checkpointed state we load the allocated 4260 * entries, and in the current state we load the free entries. 4261 */ 4262 static void 4263 verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current) 4264 { 4265 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev; 4266 vdev_t *current_rvd = current->spa_root_vdev; 4267 4268 load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC); 4269 load_concrete_ms_allocatable_trees(current, SM_FREE); 4270 4271 for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) { 4272 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i]; 4273 vdev_t *current_vd = current_rvd->vdev_child[i]; 4274 4275 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) { 4276 /* 4277 * See comment in verify_checkpoint_vdev_spacemaps() 4278 */ 4279 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops); 4280 continue; 4281 } 4282 4283 for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) { 4284 metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m]; 4285 metaslab_t *current_msp = current_vd->vdev_ms[m]; 4286 4287 (void) fprintf(stderr, 4288 "\rverifying vdev %llu of %llu, " 4289 "metaslab %llu of %llu ...", 4290 (longlong_t)current_vd->vdev_id, 4291 (longlong_t)current_rvd->vdev_children, 4292 (longlong_t)current_vd->vdev_ms[m]->ms_id, 4293 (longlong_t)current_vd->vdev_ms_count); 4294 4295 /* 4296 * We walk through the ms_allocatable trees that 4297 * are loaded with the allocated blocks from the 4298 * ms_sm spacemaps of the checkpoint. For each 4299 * one of these ranges we ensure that none of them 4300 * exists in the ms_allocatable trees of the 4301 * current state which are loaded with the ranges 4302 * that are currently free. 4303 * 4304 * This way we ensure that none of the blocks that 4305 * are part of the checkpoint were freed by mistake. 4306 */ 4307 range_tree_walk(ckpoint_msp->ms_allocatable, 4308 (range_tree_func_t *)range_tree_verify, 4309 current_msp->ms_allocatable); 4310 } 4311 } 4312 4313 /* for cleaner progress output */ 4314 (void) fprintf(stderr, "\n"); 4315 } 4316 4317 static void 4318 verify_checkpoint_blocks(spa_t *spa) 4319 { 4320 spa_t *checkpoint_spa; 4321 char *checkpoint_pool; 4322 nvlist_t *config = NULL; 4323 int error = 0; 4324 4325 /* 4326 * We import the checkpointed state of the pool (under a different 4327 * name) so we can do verification on it against the current state 4328 * of the pool. 4329 */ 4330 checkpoint_pool = import_checkpointed_state(spa->spa_name, config, 4331 NULL); 4332 ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0); 4333 4334 error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG); 4335 if (error != 0) { 4336 fatal("Tried to open pool \"%s\" but spa_open() failed with " 4337 "error %d\n", checkpoint_pool, error); 4338 } 4339 4340 /* 4341 * Ensure that ranges in the checkpoint space maps of each vdev 4342 * are allocated according to the checkpointed state's metaslab 4343 * space maps. 4344 */ 4345 verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa); 4346 4347 /* 4348 * Ensure that allocated ranges in the checkpoint's metaslab 4349 * space maps remain allocated in the metaslab space maps of 4350 * the current state. 4351 */ 4352 verify_checkpoint_ms_spacemaps(checkpoint_spa, spa); 4353 4354 /* 4355 * Once we are done, we get rid of the checkpointed state. 4356 */ 4357 spa_close(checkpoint_spa, FTAG); 4358 free(checkpoint_pool); 4359 } 4360 4361 static void 4362 dump_leftover_checkpoint_blocks(spa_t *spa) 4363 { 4364 vdev_t *rvd = spa->spa_root_vdev; 4365 4366 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 4367 vdev_t *vd = rvd->vdev_child[i]; 4368 4369 space_map_t *checkpoint_sm = NULL; 4370 uint64_t checkpoint_sm_obj; 4371 4372 if (vd->vdev_top_zap == 0) 4373 continue; 4374 4375 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap, 4376 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 4377 continue; 4378 4379 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap, 4380 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4381 sizeof (uint64_t), 1, &checkpoint_sm_obj)); 4382 4383 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa), 4384 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift)); 4385 space_map_update(checkpoint_sm); 4386 dump_spacemap(spa->spa_meta_objset, checkpoint_sm); 4387 space_map_close(checkpoint_sm); 4388 } 4389 } 4390 4391 static int 4392 verify_checkpoint(spa_t *spa) 4393 { 4394 uberblock_t checkpoint; 4395 int error; 4396 4397 if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) 4398 return (0); 4399 4400 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4401 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4402 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4403 4404 if (error == ENOENT && !dump_opt['L']) { 4405 /* 4406 * If the feature is active but the uberblock is missing 4407 * then we must be in the middle of discarding the 4408 * checkpoint. 4409 */ 4410 (void) printf("\nPartially discarded checkpoint " 4411 "state found:\n"); 4412 dump_leftover_checkpoint_blocks(spa); 4413 return (0); 4414 } else if (error != 0) { 4415 (void) printf("lookup error %d when looking for " 4416 "checkpointed uberblock in MOS\n", error); 4417 return (error); 4418 } 4419 dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n"); 4420 4421 if (checkpoint.ub_checkpoint_txg == 0) { 4422 (void) printf("\nub_checkpoint_txg not set in checkpointed " 4423 "uberblock\n"); 4424 error = 3; 4425 } 4426 4427 if (error == 0 && !dump_opt['L']) 4428 verify_checkpoint_blocks(spa); 4429 4430 return (error); 4431 } 4432 4433 static void 4434 dump_zpool(spa_t *spa) 4435 { 4436 dsl_pool_t *dp = spa_get_dsl(spa); 4437 int rc = 0; 4438 4439 if (dump_opt['S']) { 4440 dump_simulated_ddt(spa); 4441 return; 4442 } 4443 4444 if (!dump_opt['e'] && dump_opt['C'] > 1) { 4445 (void) printf("\nCached configuration:\n"); 4446 dump_nvlist(spa->spa_config, 8); 4447 } 4448 4449 if (dump_opt['C']) 4450 dump_config(spa); 4451 4452 if (dump_opt['u']) 4453 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); 4454 4455 if (dump_opt['D']) 4456 dump_all_ddts(spa); 4457 4458 if (dump_opt['d'] > 2 || dump_opt['m']) 4459 dump_metaslabs(spa); 4460 if (dump_opt['M']) 4461 dump_metaslab_groups(spa); 4462 4463 if (dump_opt['d'] || dump_opt['i']) { 4464 dump_dir(dp->dp_meta_objset); 4465 if (dump_opt['d'] >= 3) { 4466 dsl_pool_t *dp = spa->spa_dsl_pool; 4467 dump_full_bpobj(&spa->spa_deferred_bpobj, 4468 "Deferred frees", 0); 4469 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 4470 dump_full_bpobj(&dp->dp_free_bpobj, 4471 "Pool snapshot frees", 0); 4472 } 4473 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) { 4474 ASSERT(spa_feature_is_enabled(spa, 4475 SPA_FEATURE_DEVICE_REMOVAL)); 4476 dump_full_bpobj(&dp->dp_obsolete_bpobj, 4477 "Pool obsolete blocks", 0); 4478 } 4479 4480 if (spa_feature_is_active(spa, 4481 SPA_FEATURE_ASYNC_DESTROY)) { 4482 dump_bptree(spa->spa_meta_objset, 4483 dp->dp_bptree_obj, 4484 "Pool dataset frees"); 4485 } 4486 dump_dtl(spa->spa_root_vdev, 0); 4487 } 4488 (void) dmu_objset_find(spa_name(spa), dump_one_dir, 4489 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 4490 4491 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 4492 uint64_t refcount; 4493 4494 if (!(spa_feature_table[f].fi_flags & 4495 ZFEATURE_FLAG_PER_DATASET) || 4496 !spa_feature_is_enabled(spa, f)) { 4497 ASSERT0(dataset_feature_count[f]); 4498 continue; 4499 } 4500 (void) feature_get_refcount(spa, 4501 &spa_feature_table[f], &refcount); 4502 if (dataset_feature_count[f] != refcount) { 4503 (void) printf("%s feature refcount mismatch: " 4504 "%lld datasets != %lld refcount\n", 4505 spa_feature_table[f].fi_uname, 4506 (longlong_t)dataset_feature_count[f], 4507 (longlong_t)refcount); 4508 rc = 2; 4509 } else { 4510 (void) printf("Verified %s feature refcount " 4511 "of %llu is correct\n", 4512 spa_feature_table[f].fi_uname, 4513 (longlong_t)refcount); 4514 } 4515 } 4516 4517 if (rc == 0) { 4518 rc = verify_device_removal_feature_counts(spa); 4519 } 4520 } 4521 if (rc == 0 && (dump_opt['b'] || dump_opt['c'])) 4522 rc = dump_block_stats(spa); 4523 4524 if (rc == 0) 4525 rc = verify_spacemap_refcounts(spa); 4526 4527 if (dump_opt['s']) 4528 show_pool_stats(spa); 4529 4530 if (dump_opt['h']) 4531 dump_history(spa); 4532 4533 if (rc == 0) 4534 rc = verify_checkpoint(spa); 4535 4536 if (rc != 0) { 4537 dump_debug_buffer(); 4538 exit(rc); 4539 } 4540 } 4541 4542 #define ZDB_FLAG_CHECKSUM 0x0001 4543 #define ZDB_FLAG_DECOMPRESS 0x0002 4544 #define ZDB_FLAG_BSWAP 0x0004 4545 #define ZDB_FLAG_GBH 0x0008 4546 #define ZDB_FLAG_INDIRECT 0x0010 4547 #define ZDB_FLAG_PHYS 0x0020 4548 #define ZDB_FLAG_RAW 0x0040 4549 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 4550 4551 static int flagbits[256]; 4552 4553 static void 4554 zdb_print_blkptr(blkptr_t *bp, int flags) 4555 { 4556 char blkbuf[BP_SPRINTF_LEN]; 4557 4558 if (flags & ZDB_FLAG_BSWAP) 4559 byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); 4560 4561 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 4562 (void) printf("%s\n", blkbuf); 4563 } 4564 4565 static void 4566 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) 4567 { 4568 int i; 4569 4570 for (i = 0; i < nbps; i++) 4571 zdb_print_blkptr(&bp[i], flags); 4572 } 4573 4574 static void 4575 zdb_dump_gbh(void *buf, int flags) 4576 { 4577 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); 4578 } 4579 4580 static void 4581 zdb_dump_block_raw(void *buf, uint64_t size, int flags) 4582 { 4583 if (flags & ZDB_FLAG_BSWAP) 4584 byteswap_uint64_array(buf, size); 4585 (void) write(1, buf, size); 4586 } 4587 4588 static void 4589 zdb_dump_block(char *label, void *buf, uint64_t size, int flags) 4590 { 4591 uint64_t *d = (uint64_t *)buf; 4592 unsigned nwords = size / sizeof (uint64_t); 4593 int do_bswap = !!(flags & ZDB_FLAG_BSWAP); 4594 unsigned i, j; 4595 const char *hdr; 4596 char *c; 4597 4598 4599 if (do_bswap) 4600 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; 4601 else 4602 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; 4603 4604 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); 4605 4606 for (i = 0; i < nwords; i += 2) { 4607 (void) printf("%06llx: %016llx %016llx ", 4608 (u_longlong_t)(i * sizeof (uint64_t)), 4609 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), 4610 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); 4611 4612 c = (char *)&d[i]; 4613 for (j = 0; j < 2 * sizeof (uint64_t); j++) 4614 (void) printf("%c", isprint(c[j]) ? c[j] : '.'); 4615 (void) printf("\n"); 4616 } 4617 } 4618 4619 /* 4620 * There are two acceptable formats: 4621 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a 4622 * child[.child]* - For example: 0.1.1 4623 * 4624 * The second form can be used to specify arbitrary vdevs anywhere 4625 * in the heirarchy. For example, in a pool with a mirror of 4626 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . 4627 */ 4628 static vdev_t * 4629 zdb_vdev_lookup(vdev_t *vdev, const char *path) 4630 { 4631 char *s, *p, *q; 4632 unsigned i; 4633 4634 if (vdev == NULL) 4635 return (NULL); 4636 4637 /* First, assume the x.x.x.x format */ 4638 i = strtoul(path, &s, 10); 4639 if (s == path || (s && *s != '.' && *s != '\0')) 4640 goto name; 4641 if (i >= vdev->vdev_children) 4642 return (NULL); 4643 4644 vdev = vdev->vdev_child[i]; 4645 if (*s == '\0') 4646 return (vdev); 4647 return (zdb_vdev_lookup(vdev, s+1)); 4648 4649 name: 4650 for (i = 0; i < vdev->vdev_children; i++) { 4651 vdev_t *vc = vdev->vdev_child[i]; 4652 4653 if (vc->vdev_path == NULL) { 4654 vc = zdb_vdev_lookup(vc, path); 4655 if (vc == NULL) 4656 continue; 4657 else 4658 return (vc); 4659 } 4660 4661 p = strrchr(vc->vdev_path, '/'); 4662 p = p ? p + 1 : vc->vdev_path; 4663 q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; 4664 4665 if (strcmp(vc->vdev_path, path) == 0) 4666 return (vc); 4667 if (strcmp(p, path) == 0) 4668 return (vc); 4669 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) 4670 return (vc); 4671 } 4672 4673 return (NULL); 4674 } 4675 4676 /* ARGSUSED */ 4677 static int 4678 random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused) 4679 { 4680 return (random_get_pseudo_bytes(buf, len)); 4681 } 4682 4683 /* 4684 * Read a block from a pool and print it out. The syntax of the 4685 * block descriptor is: 4686 * 4687 * pool:vdev_specifier:offset:size[:flags] 4688 * 4689 * pool - The name of the pool you wish to read from 4690 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) 4691 * offset - offset, in hex, in bytes 4692 * size - Amount of data to read, in hex, in bytes 4693 * flags - A string of characters specifying options 4694 * b: Decode a blkptr at given offset within block 4695 * *c: Calculate and display checksums 4696 * d: Decompress data before dumping 4697 * e: Byteswap data before dumping 4698 * g: Display data as a gang block header 4699 * i: Display as an indirect block 4700 * p: Do I/O to physical offset 4701 * r: Dump raw data to stdout 4702 * 4703 * * = not yet implemented 4704 */ 4705 static void 4706 zdb_read_block(char *thing, spa_t *spa) 4707 { 4708 blkptr_t blk, *bp = &blk; 4709 dva_t *dva = bp->blk_dva; 4710 int flags = 0; 4711 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; 4712 zio_t *zio; 4713 vdev_t *vd; 4714 abd_t *pabd; 4715 void *lbuf, *buf; 4716 const char *s, *vdev; 4717 char *p, *dup, *flagstr; 4718 int i, error; 4719 4720 dup = strdup(thing); 4721 s = strtok(dup, ":"); 4722 vdev = s ? s : ""; 4723 s = strtok(NULL, ":"); 4724 offset = strtoull(s ? s : "", NULL, 16); 4725 s = strtok(NULL, ":"); 4726 size = strtoull(s ? s : "", NULL, 16); 4727 s = strtok(NULL, ":"); 4728 if (s) 4729 flagstr = strdup(s); 4730 else 4731 flagstr = strdup(""); 4732 4733 s = NULL; 4734 if (size == 0) 4735 s = "size must not be zero"; 4736 if (!IS_P2ALIGNED(size, DEV_BSIZE)) 4737 s = "size must be a multiple of sector size"; 4738 if (!IS_P2ALIGNED(offset, DEV_BSIZE)) 4739 s = "offset must be a multiple of sector size"; 4740 if (s) { 4741 (void) printf("Invalid block specifier: %s - %s\n", thing, s); 4742 free(dup); 4743 return; 4744 } 4745 4746 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { 4747 for (i = 0; flagstr[i]; i++) { 4748 int bit = flagbits[(uchar_t)flagstr[i]]; 4749 4750 if (bit == 0) { 4751 (void) printf("***Invalid flag: %c\n", 4752 flagstr[i]); 4753 continue; 4754 } 4755 flags |= bit; 4756 4757 /* If it's not something with an argument, keep going */ 4758 if ((bit & (ZDB_FLAG_CHECKSUM | 4759 ZDB_FLAG_PRINT_BLKPTR)) == 0) 4760 continue; 4761 4762 p = &flagstr[i + 1]; 4763 if (bit == ZDB_FLAG_PRINT_BLKPTR) 4764 blkptr_offset = strtoull(p, &p, 16); 4765 if (*p != ':' && *p != '\0') { 4766 (void) printf("***Invalid flag arg: '%s'\n", s); 4767 free(dup); 4768 return; 4769 } 4770 } 4771 } 4772 free(flagstr); 4773 4774 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); 4775 if (vd == NULL) { 4776 (void) printf("***Invalid vdev: %s\n", vdev); 4777 free(dup); 4778 return; 4779 } else { 4780 if (vd->vdev_path) 4781 (void) fprintf(stderr, "Found vdev: %s\n", 4782 vd->vdev_path); 4783 else 4784 (void) fprintf(stderr, "Found vdev type: %s\n", 4785 vd->vdev_ops->vdev_op_type); 4786 } 4787 4788 psize = size; 4789 lsize = size; 4790 4791 pabd = abd_alloc_linear(SPA_MAXBLOCKSIZE, B_FALSE); 4792 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 4793 4794 BP_ZERO(bp); 4795 4796 DVA_SET_VDEV(&dva[0], vd->vdev_id); 4797 DVA_SET_OFFSET(&dva[0], offset); 4798 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); 4799 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); 4800 4801 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 4802 4803 BP_SET_LSIZE(bp, lsize); 4804 BP_SET_PSIZE(bp, psize); 4805 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 4806 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 4807 BP_SET_TYPE(bp, DMU_OT_NONE); 4808 BP_SET_LEVEL(bp, 0); 4809 BP_SET_DEDUP(bp, 0); 4810 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 4811 4812 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4813 zio = zio_root(spa, NULL, NULL, 0); 4814 4815 if (vd == vd->vdev_top) { 4816 /* 4817 * Treat this as a normal block read. 4818 */ 4819 zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL, 4820 ZIO_PRIORITY_SYNC_READ, 4821 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); 4822 } else { 4823 /* 4824 * Treat this as a vdev child I/O. 4825 */ 4826 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd, 4827 psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, 4828 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | 4829 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | 4830 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL, 4831 NULL, NULL)); 4832 } 4833 4834 error = zio_wait(zio); 4835 spa_config_exit(spa, SCL_STATE, FTAG); 4836 4837 if (error) { 4838 (void) printf("Read of %s failed, error: %d\n", thing, error); 4839 goto out; 4840 } 4841 4842 if (flags & ZDB_FLAG_DECOMPRESS) { 4843 /* 4844 * We don't know how the data was compressed, so just try 4845 * every decompress function at every inflated blocksize. 4846 */ 4847 enum zio_compress c; 4848 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 4849 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 4850 4851 abd_copy_to_buf(pbuf2, pabd, psize); 4852 4853 VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize, 4854 random_get_pseudo_bytes_cb, NULL)); 4855 4856 VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, 4857 SPA_MAXBLOCKSIZE - psize)); 4858 4859 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; 4860 lsize -= SPA_MINBLOCKSIZE) { 4861 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { 4862 if (zio_decompress_data(c, pabd, 4863 lbuf, psize, lsize) == 0 && 4864 zio_decompress_data_buf(c, pbuf2, 4865 lbuf2, psize, lsize) == 0 && 4866 bcmp(lbuf, lbuf2, lsize) == 0) 4867 break; 4868 } 4869 if (c != ZIO_COMPRESS_FUNCTIONS) 4870 break; 4871 lsize -= SPA_MINBLOCKSIZE; 4872 } 4873 4874 umem_free(pbuf2, SPA_MAXBLOCKSIZE); 4875 umem_free(lbuf2, SPA_MAXBLOCKSIZE); 4876 4877 if (lsize <= psize) { 4878 (void) printf("Decompress of %s failed\n", thing); 4879 goto out; 4880 } 4881 buf = lbuf; 4882 size = lsize; 4883 } else { 4884 buf = abd_to_buf(pabd); 4885 size = psize; 4886 } 4887 4888 if (flags & ZDB_FLAG_PRINT_BLKPTR) 4889 zdb_print_blkptr((blkptr_t *)(void *) 4890 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); 4891 else if (flags & ZDB_FLAG_RAW) 4892 zdb_dump_block_raw(buf, size, flags); 4893 else if (flags & ZDB_FLAG_INDIRECT) 4894 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), 4895 flags); 4896 else if (flags & ZDB_FLAG_GBH) 4897 zdb_dump_gbh(buf, flags); 4898 else 4899 zdb_dump_block(thing, buf, size, flags); 4900 4901 out: 4902 abd_free(pabd); 4903 umem_free(lbuf, SPA_MAXBLOCKSIZE); 4904 free(dup); 4905 } 4906 4907 static void 4908 zdb_embedded_block(char *thing) 4909 { 4910 blkptr_t bp; 4911 unsigned long long *words = (void *)&bp; 4912 char *buf; 4913 int err; 4914 4915 bzero(&bp, sizeof (bp)); 4916 err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:" 4917 "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx", 4918 words + 0, words + 1, words + 2, words + 3, 4919 words + 4, words + 5, words + 6, words + 7, 4920 words + 8, words + 9, words + 10, words + 11, 4921 words + 12, words + 13, words + 14, words + 15); 4922 if (err != 16) { 4923 (void) fprintf(stderr, "invalid input format\n"); 4924 exit(1); 4925 } 4926 ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE); 4927 buf = malloc(SPA_MAXBLOCKSIZE); 4928 if (buf == NULL) { 4929 (void) fprintf(stderr, "out of memory\n"); 4930 exit(1); 4931 } 4932 err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp)); 4933 if (err != 0) { 4934 (void) fprintf(stderr, "decode failed: %u\n", err); 4935 exit(1); 4936 } 4937 zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0); 4938 free(buf); 4939 } 4940 4941 static boolean_t 4942 pool_match(nvlist_t *cfg, char *tgt) 4943 { 4944 uint64_t v, guid = strtoull(tgt, NULL, 0); 4945 char *s; 4946 4947 if (guid != 0) { 4948 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0) 4949 return (v == guid); 4950 } else { 4951 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0) 4952 return (strcmp(s, tgt) == 0); 4953 } 4954 return (B_FALSE); 4955 } 4956 4957 static char * 4958 find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv) 4959 { 4960 nvlist_t *pools; 4961 nvlist_t *match = NULL; 4962 char *name = NULL; 4963 char *sepp = NULL; 4964 char sep = '\0'; 4965 int count = 0; 4966 importargs_t args; 4967 4968 bzero(&args, sizeof (args)); 4969 args.paths = dirc; 4970 args.path = dirv; 4971 args.can_be_active = B_TRUE; 4972 4973 if ((sepp = strpbrk(*target, "/@")) != NULL) { 4974 sep = *sepp; 4975 *sepp = '\0'; 4976 } 4977 4978 pools = zpool_search_import(g_zfs, &args); 4979 4980 if (pools != NULL) { 4981 nvpair_t *elem = NULL; 4982 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 4983 verify(nvpair_value_nvlist(elem, configp) == 0); 4984 if (pool_match(*configp, *target)) { 4985 count++; 4986 if (match != NULL) { 4987 /* print previously found config */ 4988 if (name != NULL) { 4989 (void) printf("%s\n", name); 4990 dump_nvlist(match, 8); 4991 name = NULL; 4992 } 4993 (void) printf("%s\n", 4994 nvpair_name(elem)); 4995 dump_nvlist(*configp, 8); 4996 } else { 4997 match = *configp; 4998 name = nvpair_name(elem); 4999 } 5000 } 5001 } 5002 } 5003 if (count > 1) 5004 (void) fatal("\tMatched %d pools - use pool GUID " 5005 "instead of pool name or \n" 5006 "\tpool name part of a dataset name to select pool", count); 5007 5008 if (sepp) 5009 *sepp = sep; 5010 /* 5011 * If pool GUID was specified for pool id, replace it with pool name 5012 */ 5013 if (name && (strstr(*target, name) != *target)) { 5014 int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0); 5015 5016 *target = umem_alloc(sz, UMEM_NOFAIL); 5017 (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : ""); 5018 } 5019 5020 *configp = name ? match : NULL; 5021 5022 return (name); 5023 } 5024 5025 int 5026 main(int argc, char **argv) 5027 { 5028 int c; 5029 struct rlimit rl = { 1024, 1024 }; 5030 spa_t *spa = NULL; 5031 objset_t *os = NULL; 5032 int dump_all = 1; 5033 int verbose = 0; 5034 int error = 0; 5035 char **searchdirs = NULL; 5036 int nsearch = 0; 5037 char *target; 5038 nvlist_t *policy = NULL; 5039 uint64_t max_txg = UINT64_MAX; 5040 int flags = ZFS_IMPORT_MISSING_LOG; 5041 int rewind = ZPOOL_NEVER_REWIND; 5042 char *spa_config_path_env; 5043 boolean_t target_is_spa = B_TRUE; 5044 nvlist_t *cfg = NULL; 5045 5046 (void) setrlimit(RLIMIT_NOFILE, &rl); 5047 (void) enable_extended_FILE_stdio(-1, -1); 5048 5049 dprintf_setup(&argc, argv); 5050 5051 /* 5052 * If there is an environment variable SPA_CONFIG_PATH it overrides 5053 * default spa_config_path setting. If -U flag is specified it will 5054 * override this environment variable settings once again. 5055 */ 5056 spa_config_path_env = getenv("SPA_CONFIG_PATH"); 5057 if (spa_config_path_env != NULL) 5058 spa_config_path = spa_config_path_env; 5059 5060 while ((c = getopt(argc, argv, 5061 "AbcCdDeEFGhiI:klLmMo:Op:PqRsSt:uU:vVx:X")) != -1) { 5062 switch (c) { 5063 case 'b': 5064 case 'c': 5065 case 'C': 5066 case 'd': 5067 case 'D': 5068 case 'E': 5069 case 'G': 5070 case 'h': 5071 case 'i': 5072 case 'l': 5073 case 'm': 5074 case 'M': 5075 case 'O': 5076 case 'R': 5077 case 's': 5078 case 'S': 5079 case 'u': 5080 dump_opt[c]++; 5081 dump_all = 0; 5082 break; 5083 case 'A': 5084 case 'e': 5085 case 'F': 5086 case 'k': 5087 case 'L': 5088 case 'P': 5089 case 'q': 5090 case 'X': 5091 dump_opt[c]++; 5092 break; 5093 /* NB: Sort single match options below. */ 5094 case 'I': 5095 max_inflight = strtoull(optarg, NULL, 0); 5096 if (max_inflight == 0) { 5097 (void) fprintf(stderr, "maximum number " 5098 "of inflight I/Os must be greater " 5099 "than 0\n"); 5100 usage(); 5101 } 5102 break; 5103 case 'o': 5104 error = set_global_var(optarg); 5105 if (error != 0) 5106 usage(); 5107 break; 5108 case 'p': 5109 if (searchdirs == NULL) { 5110 searchdirs = umem_alloc(sizeof (char *), 5111 UMEM_NOFAIL); 5112 } else { 5113 char **tmp = umem_alloc((nsearch + 1) * 5114 sizeof (char *), UMEM_NOFAIL); 5115 bcopy(searchdirs, tmp, nsearch * 5116 sizeof (char *)); 5117 umem_free(searchdirs, 5118 nsearch * sizeof (char *)); 5119 searchdirs = tmp; 5120 } 5121 searchdirs[nsearch++] = optarg; 5122 break; 5123 case 't': 5124 max_txg = strtoull(optarg, NULL, 0); 5125 if (max_txg < TXG_INITIAL) { 5126 (void) fprintf(stderr, "incorrect txg " 5127 "specified: %s\n", optarg); 5128 usage(); 5129 } 5130 break; 5131 case 'U': 5132 spa_config_path = optarg; 5133 if (spa_config_path[0] != '/') { 5134 (void) fprintf(stderr, 5135 "cachefile must be an absolute path " 5136 "(i.e. start with a slash)\n"); 5137 usage(); 5138 } 5139 break; 5140 case 'v': 5141 verbose++; 5142 break; 5143 case 'V': 5144 flags = ZFS_IMPORT_VERBATIM; 5145 break; 5146 case 'x': 5147 vn_dumpdir = optarg; 5148 break; 5149 default: 5150 usage(); 5151 break; 5152 } 5153 } 5154 5155 if (!dump_opt['e'] && searchdirs != NULL) { 5156 (void) fprintf(stderr, "-p option requires use of -e\n"); 5157 usage(); 5158 } 5159 5160 /* 5161 * ZDB does not typically re-read blocks; therefore limit the ARC 5162 * to 256 MB, which can be used entirely for metadata. 5163 */ 5164 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024; 5165 5166 /* 5167 * "zdb -c" uses checksum-verifying scrub i/os which are async reads. 5168 * "zdb -b" uses traversal prefetch which uses async reads. 5169 * For good performance, let several of them be active at once. 5170 */ 5171 zfs_vdev_async_read_max_active = 10; 5172 5173 /* 5174 * Disable reference tracking for better performance. 5175 */ 5176 reference_tracking_enable = B_FALSE; 5177 5178 /* 5179 * Do not fail spa_load when spa_load_verify fails. This is needed 5180 * to load non-idle pools. 5181 */ 5182 spa_load_verify_dryrun = B_TRUE; 5183 5184 kernel_init(FREAD); 5185 g_zfs = libzfs_init(); 5186 ASSERT(g_zfs != NULL); 5187 5188 if (dump_all) 5189 verbose = MAX(verbose, 1); 5190 5191 for (c = 0; c < 256; c++) { 5192 if (dump_all && strchr("AeEFklLOPRSX", c) == NULL) 5193 dump_opt[c] = 1; 5194 if (dump_opt[c]) 5195 dump_opt[c] += verbose; 5196 } 5197 5198 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); 5199 zfs_recover = (dump_opt['A'] > 1); 5200 5201 argc -= optind; 5202 argv += optind; 5203 5204 if (argc < 2 && dump_opt['R']) 5205 usage(); 5206 5207 if (dump_opt['E']) { 5208 if (argc != 1) 5209 usage(); 5210 zdb_embedded_block(argv[0]); 5211 return (0); 5212 } 5213 5214 if (argc < 1) { 5215 if (!dump_opt['e'] && dump_opt['C']) { 5216 dump_cachefile(spa_config_path); 5217 return (0); 5218 } 5219 usage(); 5220 } 5221 5222 if (dump_opt['l']) 5223 return (dump_label(argv[0])); 5224 5225 if (dump_opt['O']) { 5226 if (argc != 2) 5227 usage(); 5228 dump_opt['v'] = verbose + 3; 5229 return (dump_path(argv[0], argv[1])); 5230 } 5231 5232 if (dump_opt['X'] || dump_opt['F']) 5233 rewind = ZPOOL_DO_REWIND | 5234 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); 5235 5236 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || 5237 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 || 5238 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0) 5239 fatal("internal error: %s", strerror(ENOMEM)); 5240 5241 error = 0; 5242 target = argv[0]; 5243 5244 if (dump_opt['e']) { 5245 char *name = find_zpool(&target, &cfg, nsearch, searchdirs); 5246 5247 error = ENOENT; 5248 if (name) { 5249 if (dump_opt['C'] > 1) { 5250 (void) printf("\nConfiguration for import:\n"); 5251 dump_nvlist(cfg, 8); 5252 } 5253 5254 if (nvlist_add_nvlist(cfg, 5255 ZPOOL_LOAD_POLICY, policy) != 0) { 5256 fatal("can't open '%s': %s", 5257 target, strerror(ENOMEM)); 5258 } 5259 error = spa_import(name, cfg, NULL, flags); 5260 } 5261 } 5262 5263 char *checkpoint_pool = NULL; 5264 char *checkpoint_target = NULL; 5265 if (dump_opt['k']) { 5266 checkpoint_pool = import_checkpointed_state(target, cfg, 5267 &checkpoint_target); 5268 5269 if (checkpoint_target != NULL) 5270 target = checkpoint_target; 5271 5272 } 5273 5274 if (strpbrk(target, "/@") != NULL) { 5275 size_t targetlen; 5276 5277 target_is_spa = B_FALSE; 5278 /* 5279 * Remove any trailing slash. Later code would get confused 5280 * by it, but we want to allow it so that "pool/" can 5281 * indicate that we want to dump the topmost filesystem, 5282 * rather than the whole pool. 5283 */ 5284 targetlen = strlen(target); 5285 if (targetlen != 0 && target[targetlen - 1] == '/') 5286 target[targetlen - 1] = '\0'; 5287 } 5288 5289 if (error == 0) { 5290 if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) { 5291 ASSERT(checkpoint_pool != NULL); 5292 ASSERT(checkpoint_target == NULL); 5293 5294 error = spa_open(checkpoint_pool, &spa, FTAG); 5295 if (error != 0) { 5296 fatal("Tried to open pool \"%s\" but " 5297 "spa_open() failed with error %d\n", 5298 checkpoint_pool, error); 5299 } 5300 5301 } else if (target_is_spa || dump_opt['R']) { 5302 error = spa_open_rewind(target, &spa, FTAG, policy, 5303 NULL); 5304 if (error) { 5305 /* 5306 * If we're missing the log device then 5307 * try opening the pool after clearing the 5308 * log state. 5309 */ 5310 mutex_enter(&spa_namespace_lock); 5311 if ((spa = spa_lookup(target)) != NULL && 5312 spa->spa_log_state == SPA_LOG_MISSING) { 5313 spa->spa_log_state = SPA_LOG_CLEAR; 5314 error = 0; 5315 } 5316 mutex_exit(&spa_namespace_lock); 5317 5318 if (!error) { 5319 error = spa_open_rewind(target, &spa, 5320 FTAG, policy, NULL); 5321 } 5322 } 5323 } else { 5324 error = open_objset(target, DMU_OST_ANY, FTAG, &os); 5325 } 5326 } 5327 nvlist_free(policy); 5328 5329 if (error) 5330 fatal("can't open '%s': %s", target, strerror(error)); 5331 5332 argv++; 5333 argc--; 5334 if (!dump_opt['R']) { 5335 if (argc > 0) { 5336 zopt_objects = argc; 5337 zopt_object = calloc(zopt_objects, sizeof (uint64_t)); 5338 for (unsigned i = 0; i < zopt_objects; i++) { 5339 errno = 0; 5340 zopt_object[i] = strtoull(argv[i], NULL, 0); 5341 if (zopt_object[i] == 0 && errno != 0) 5342 fatal("bad number %s: %s", 5343 argv[i], strerror(errno)); 5344 } 5345 } 5346 if (os != NULL) { 5347 dump_dir(os); 5348 } else if (zopt_objects > 0 && !dump_opt['m']) { 5349 dump_dir(spa->spa_meta_objset); 5350 } else { 5351 dump_zpool(spa); 5352 } 5353 } else { 5354 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; 5355 flagbits['c'] = ZDB_FLAG_CHECKSUM; 5356 flagbits['d'] = ZDB_FLAG_DECOMPRESS; 5357 flagbits['e'] = ZDB_FLAG_BSWAP; 5358 flagbits['g'] = ZDB_FLAG_GBH; 5359 flagbits['i'] = ZDB_FLAG_INDIRECT; 5360 flagbits['p'] = ZDB_FLAG_PHYS; 5361 flagbits['r'] = ZDB_FLAG_RAW; 5362 5363 for (int i = 0; i < argc; i++) 5364 zdb_read_block(argv[i], spa); 5365 } 5366 5367 if (dump_opt['k']) { 5368 free(checkpoint_pool); 5369 if (!target_is_spa) 5370 free(checkpoint_target); 5371 } 5372 5373 if (os != NULL) 5374 close_objset(os, FTAG); 5375 else 5376 spa_close(spa, FTAG); 5377 5378 fuid_table_destroy(); 5379 5380 dump_debug_buffer(); 5381 5382 libzfs_fini(g_zfs); 5383 kernel_fini(); 5384 5385 return (error); 5386 } 5387