1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright 2017 Nexenta Systems, Inc. 27 * Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC. 28 * Copyright 2017 RackTop Systems. 29 */ 30 31 #include <stdio.h> 32 #include <unistd.h> 33 #include <stdio_ext.h> 34 #include <stdlib.h> 35 #include <ctype.h> 36 #include <sys/zfs_context.h> 37 #include <sys/spa.h> 38 #include <sys/spa_impl.h> 39 #include <sys/dmu.h> 40 #include <sys/zap.h> 41 #include <sys/fs/zfs.h> 42 #include <sys/zfs_znode.h> 43 #include <sys/zfs_sa.h> 44 #include <sys/sa.h> 45 #include <sys/sa_impl.h> 46 #include <sys/vdev.h> 47 #include <sys/vdev_impl.h> 48 #include <sys/metaslab_impl.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/dsl_dir.h> 51 #include <sys/dsl_dataset.h> 52 #include <sys/dsl_pool.h> 53 #include <sys/dbuf.h> 54 #include <sys/zil.h> 55 #include <sys/zil_impl.h> 56 #include <sys/stat.h> 57 #include <sys/resource.h> 58 #include <sys/dmu_traverse.h> 59 #include <sys/zio_checksum.h> 60 #include <sys/zio_compress.h> 61 #include <zfs_fletcher.h> 62 #include <sys/zfs_fuid.h> 63 #include <sys/arc.h> 64 #include <sys/arc_impl.h> 65 #include <sys/ddt.h> 66 #include <sys/zfeature.h> 67 #include <sys/abd.h> 68 #include <sys/blkptr.h> 69 #include <sys/dsl_scan.h> 70 #include <sys/dsl_crypt.h> 71 #include <zfs_comutil.h> 72 #include <libcmdutils.h> 73 #undef verify 74 #include <libzfs.h> 75 76 #include <libnvpair.h> 77 #include <libzutil.h> 78 79 #include "zdb.h" 80 81 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ 82 zio_compress_table[(idx)].ci_name : "UNKNOWN") 83 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ 84 zio_checksum_table[(idx)].ci_name : "UNKNOWN") 85 #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ 86 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \ 87 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN") 88 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \ 89 (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \ 90 DMU_OT_ZAP_OTHER : \ 91 (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \ 92 DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES) 93 94 extern int reference_tracking_enable; 95 extern boolean_t zfs_recover; 96 extern uint64_t zfs_arc_max, zfs_arc_meta_limit; 97 extern int zfs_vdev_async_read_max_active; 98 extern int aok; 99 extern boolean_t spa_load_verify_dryrun; 100 extern int zfs_btree_verify_intensity; 101 102 static const char cmdname[] = "zdb"; 103 uint8_t dump_opt[256]; 104 105 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); 106 107 uint64_t *zopt_object = NULL; 108 static unsigned zopt_objects = 0; 109 uint64_t max_inflight = 1000; 110 static int leaked_objects = 0; 111 112 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *); 113 static void mos_obj_refd(uint64_t); 114 115 /* 116 * These libumem hooks provide a reasonable set of defaults for the allocator's 117 * debugging facilities. 118 */ 119 const char * 120 _umem_debug_init() 121 { 122 return ("default,verbose"); /* $UMEM_DEBUG setting */ 123 } 124 125 const char * 126 _umem_logging_init(void) 127 { 128 return ("fail,contents"); /* $UMEM_LOGGING setting */ 129 } 130 131 static void 132 usage(void) 133 { 134 (void) fprintf(stderr, 135 "Usage:\t%s [-AbcdDFGhikLMPsvX] [-e [-V] [-p <path> ...]] " 136 "[-I <inflight I/Os>]\n" 137 "\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n" 138 "\t\t[<poolname> [<object> ...]]\n" 139 "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset> " 140 "[<object> ...]\n" 141 "\t%s -C [-A] [-U <cache>]\n" 142 "\t%s -l [-Aqu] <device>\n" 143 "\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] " 144 "[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n" 145 "\t%s -O <dataset> <path>\n" 146 "\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n" 147 "\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n" 148 "\t%s -E [-A] word0:word1:...:word15\n" 149 "\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] " 150 "<poolname>\n\n", 151 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, 152 cmdname, cmdname); 153 154 (void) fprintf(stderr, " Dataset name must include at least one " 155 "separator character '/' or '@'\n"); 156 (void) fprintf(stderr, " If dataset name is specified, only that " 157 "dataset is dumped\n"); 158 (void) fprintf(stderr, " If object numbers are specified, only " 159 "those objects are dumped\n\n"); 160 (void) fprintf(stderr, " Options to control amount of output:\n"); 161 (void) fprintf(stderr, " -b block statistics\n"); 162 (void) fprintf(stderr, " -c checksum all metadata (twice for " 163 "all data) blocks\n"); 164 (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); 165 (void) fprintf(stderr, " -d dataset(s)\n"); 166 (void) fprintf(stderr, " -D dedup statistics\n"); 167 (void) fprintf(stderr, " -E decode and display block from an " 168 "embedded block pointer\n"); 169 (void) fprintf(stderr, " -h pool history\n"); 170 (void) fprintf(stderr, " -i intent logs\n"); 171 (void) fprintf(stderr, " -l read label contents\n"); 172 (void) fprintf(stderr, " -k examine the checkpointed state " 173 "of the pool\n"); 174 (void) fprintf(stderr, " -L disable leak tracking (do not " 175 "load spacemaps)\n"); 176 (void) fprintf(stderr, " -m metaslabs\n"); 177 (void) fprintf(stderr, " -M metaslab groups\n"); 178 (void) fprintf(stderr, " -O perform object lookups by path\n"); 179 (void) fprintf(stderr, " -R read and display block from a " 180 "device\n"); 181 (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); 182 (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); 183 (void) fprintf(stderr, " -v verbose (applies to all " 184 "others)\n\n"); 185 (void) fprintf(stderr, " Below options are intended for use " 186 "with other options:\n"); 187 (void) fprintf(stderr, " -A ignore assertions (-A), enable " 188 "panic recovery (-AA) or both (-AAA)\n"); 189 (void) fprintf(stderr, " -e pool is exported/destroyed/" 190 "has altroot/not in a cachefile\n"); 191 (void) fprintf(stderr, " -F attempt automatic rewind within " 192 "safe range of transaction groups\n"); 193 (void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before " 194 "exiting\n"); 195 (void) fprintf(stderr, " -I <number of inflight I/Os> -- " 196 "specify the maximum number of " 197 "checksumming I/Os [default is 200]\n"); 198 (void) fprintf(stderr, " -o <variable>=<value> set global " 199 "variable to an unsigned 32-bit integer value\n"); 200 (void) fprintf(stderr, " -p <path> -- use one or more with " 201 "-e to specify path to vdev dir\n"); 202 (void) fprintf(stderr, " -P print numbers in parseable form\n"); 203 (void) fprintf(stderr, " -q don't print label contents\n"); 204 (void) fprintf(stderr, " -t <txg> -- highest txg to use when " 205 "searching for uberblocks\n"); 206 (void) fprintf(stderr, " -u uberblock\n"); 207 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate " 208 "cachefile\n"); 209 (void) fprintf(stderr, " -V do verbatim import\n"); 210 (void) fprintf(stderr, " -x <dumpdir> -- " 211 "dump all read blocks into specified directory\n"); 212 (void) fprintf(stderr, " -X attempt extreme rewind (does not " 213 "work with dataset)\n\n"); 214 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " 215 "to make only that option verbose\n"); 216 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); 217 exit(1); 218 } 219 220 static void 221 dump_debug_buffer() 222 { 223 if (dump_opt['G']) { 224 (void) printf("\n"); 225 zfs_dbgmsg_print("zdb"); 226 } 227 } 228 229 /* 230 * Called for usage errors that are discovered after a call to spa_open(), 231 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. 232 */ 233 234 static void 235 fatal(const char *fmt, ...) 236 { 237 va_list ap; 238 239 va_start(ap, fmt); 240 (void) fprintf(stderr, "%s: ", cmdname); 241 (void) vfprintf(stderr, fmt, ap); 242 va_end(ap); 243 (void) fprintf(stderr, "\n"); 244 245 dump_debug_buffer(); 246 247 exit(1); 248 } 249 250 /* ARGSUSED */ 251 static void 252 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) 253 { 254 nvlist_t *nv; 255 size_t nvsize = *(uint64_t *)data; 256 char *packed = umem_alloc(nvsize, UMEM_NOFAIL); 257 258 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); 259 260 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); 261 262 umem_free(packed, nvsize); 263 264 dump_nvlist(nv, 8); 265 266 nvlist_free(nv); 267 } 268 269 /* ARGSUSED */ 270 static void 271 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size) 272 { 273 spa_history_phys_t *shp = data; 274 275 if (shp == NULL) 276 return; 277 278 (void) printf("\t\tpool_create_len = %llu\n", 279 (u_longlong_t)shp->sh_pool_create_len); 280 (void) printf("\t\tphys_max_off = %llu\n", 281 (u_longlong_t)shp->sh_phys_max_off); 282 (void) printf("\t\tbof = %llu\n", 283 (u_longlong_t)shp->sh_bof); 284 (void) printf("\t\teof = %llu\n", 285 (u_longlong_t)shp->sh_eof); 286 (void) printf("\t\trecords_lost = %llu\n", 287 (u_longlong_t)shp->sh_records_lost); 288 } 289 290 static void 291 zdb_nicenum(uint64_t num, char *buf, size_t buflen) 292 { 293 if (dump_opt['P']) 294 (void) snprintf(buf, buflen, "%llu", (longlong_t)num); 295 else 296 nicenum(num, buf, sizeof (buf)); 297 } 298 299 static const char histo_stars[] = "****************************************"; 300 static const uint64_t histo_width = sizeof (histo_stars) - 1; 301 302 static void 303 dump_histogram(const uint64_t *histo, int size, int offset) 304 { 305 int i; 306 int minidx = size - 1; 307 int maxidx = 0; 308 uint64_t max = 0; 309 310 for (i = 0; i < size; i++) { 311 if (histo[i] > max) 312 max = histo[i]; 313 if (histo[i] > 0 && i > maxidx) 314 maxidx = i; 315 if (histo[i] > 0 && i < minidx) 316 minidx = i; 317 } 318 319 if (max < histo_width) 320 max = histo_width; 321 322 for (i = minidx; i <= maxidx; i++) { 323 (void) printf("\t\t\t%3u: %6llu %s\n", 324 i + offset, (u_longlong_t)histo[i], 325 &histo_stars[(max - histo[i]) * histo_width / max]); 326 } 327 } 328 329 static void 330 dump_zap_stats(objset_t *os, uint64_t object) 331 { 332 int error; 333 zap_stats_t zs; 334 335 error = zap_get_stats(os, object, &zs); 336 if (error) 337 return; 338 339 if (zs.zs_ptrtbl_len == 0) { 340 ASSERT(zs.zs_num_blocks == 1); 341 (void) printf("\tmicrozap: %llu bytes, %llu entries\n", 342 (u_longlong_t)zs.zs_blocksize, 343 (u_longlong_t)zs.zs_num_entries); 344 return; 345 } 346 347 (void) printf("\tFat ZAP stats:\n"); 348 349 (void) printf("\t\tPointer table:\n"); 350 (void) printf("\t\t\t%llu elements\n", 351 (u_longlong_t)zs.zs_ptrtbl_len); 352 (void) printf("\t\t\tzt_blk: %llu\n", 353 (u_longlong_t)zs.zs_ptrtbl_zt_blk); 354 (void) printf("\t\t\tzt_numblks: %llu\n", 355 (u_longlong_t)zs.zs_ptrtbl_zt_numblks); 356 (void) printf("\t\t\tzt_shift: %llu\n", 357 (u_longlong_t)zs.zs_ptrtbl_zt_shift); 358 (void) printf("\t\t\tzt_blks_copied: %llu\n", 359 (u_longlong_t)zs.zs_ptrtbl_blks_copied); 360 (void) printf("\t\t\tzt_nextblk: %llu\n", 361 (u_longlong_t)zs.zs_ptrtbl_nextblk); 362 363 (void) printf("\t\tZAP entries: %llu\n", 364 (u_longlong_t)zs.zs_num_entries); 365 (void) printf("\t\tLeaf blocks: %llu\n", 366 (u_longlong_t)zs.zs_num_leafs); 367 (void) printf("\t\tTotal blocks: %llu\n", 368 (u_longlong_t)zs.zs_num_blocks); 369 (void) printf("\t\tzap_block_type: 0x%llx\n", 370 (u_longlong_t)zs.zs_block_type); 371 (void) printf("\t\tzap_magic: 0x%llx\n", 372 (u_longlong_t)zs.zs_magic); 373 (void) printf("\t\tzap_salt: 0x%llx\n", 374 (u_longlong_t)zs.zs_salt); 375 376 (void) printf("\t\tLeafs with 2^n pointers:\n"); 377 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0); 378 379 (void) printf("\t\tBlocks with n*5 entries:\n"); 380 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0); 381 382 (void) printf("\t\tBlocks n/10 full:\n"); 383 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0); 384 385 (void) printf("\t\tEntries with n chunks:\n"); 386 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0); 387 388 (void) printf("\t\tBuckets with n entries:\n"); 389 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0); 390 } 391 392 /*ARGSUSED*/ 393 static void 394 dump_none(objset_t *os, uint64_t object, void *data, size_t size) 395 { 396 } 397 398 /*ARGSUSED*/ 399 static void 400 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) 401 { 402 (void) printf("\tUNKNOWN OBJECT TYPE\n"); 403 } 404 405 /*ARGSUSED*/ 406 static void 407 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) 408 { 409 } 410 411 /*ARGSUSED*/ 412 static void 413 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) 414 { 415 } 416 417 /*ARGSUSED*/ 418 static void 419 dump_zap(objset_t *os, uint64_t object, void *data, size_t size) 420 { 421 zap_cursor_t zc; 422 zap_attribute_t attr; 423 void *prop; 424 unsigned i; 425 426 dump_zap_stats(os, object); 427 (void) printf("\n"); 428 429 for (zap_cursor_init(&zc, os, object); 430 zap_cursor_retrieve(&zc, &attr) == 0; 431 zap_cursor_advance(&zc)) { 432 (void) printf("\t\t%s = ", attr.za_name); 433 if (attr.za_num_integers == 0) { 434 (void) printf("\n"); 435 continue; 436 } 437 prop = umem_zalloc(attr.za_num_integers * 438 attr.za_integer_length, UMEM_NOFAIL); 439 (void) zap_lookup(os, object, attr.za_name, 440 attr.za_integer_length, attr.za_num_integers, prop); 441 if (attr.za_integer_length == 1) { 442 if (strcmp(attr.za_name, 443 DSL_CRYPTO_KEY_MASTER_KEY) == 0 || 444 strcmp(attr.za_name, 445 DSL_CRYPTO_KEY_HMAC_KEY) == 0 || 446 strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 || 447 strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 || 448 strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) { 449 uint8_t *u8 = prop; 450 451 for (i = 0; i < attr.za_num_integers; i++) { 452 (void) printf("%02x", u8[i]); 453 } 454 } else { 455 (void) printf("%s", (char *)prop); 456 } 457 } else { 458 for (i = 0; i < attr.za_num_integers; i++) { 459 switch (attr.za_integer_length) { 460 case 2: 461 (void) printf("%u ", 462 ((uint16_t *)prop)[i]); 463 break; 464 case 4: 465 (void) printf("%u ", 466 ((uint32_t *)prop)[i]); 467 break; 468 case 8: 469 (void) printf("%lld ", 470 (u_longlong_t)((int64_t *)prop)[i]); 471 break; 472 } 473 } 474 } 475 (void) printf("\n"); 476 umem_free(prop, attr.za_num_integers * attr.za_integer_length); 477 } 478 zap_cursor_fini(&zc); 479 } 480 481 static void 482 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size) 483 { 484 bpobj_phys_t *bpop = data; 485 char bytes[32], comp[32], uncomp[32]; 486 487 /* make sure the output won't get truncated */ 488 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 489 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 490 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 491 492 if (bpop == NULL) 493 return; 494 495 zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes)); 496 zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp)); 497 zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp)); 498 499 (void) printf("\t\tnum_blkptrs = %llu\n", 500 (u_longlong_t)bpop->bpo_num_blkptrs); 501 (void) printf("\t\tbytes = %s\n", bytes); 502 if (size >= BPOBJ_SIZE_V1) { 503 (void) printf("\t\tcomp = %s\n", comp); 504 (void) printf("\t\tuncomp = %s\n", uncomp); 505 } 506 if (size >= sizeof (*bpop)) { 507 (void) printf("\t\tsubobjs = %llu\n", 508 (u_longlong_t)bpop->bpo_subobjs); 509 (void) printf("\t\tnum_subobjs = %llu\n", 510 (u_longlong_t)bpop->bpo_num_subobjs); 511 } 512 513 if (dump_opt['d'] < 5) 514 return; 515 516 for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) { 517 char blkbuf[BP_SPRINTF_LEN]; 518 blkptr_t bp; 519 520 int err = dmu_read(os, object, 521 i * sizeof (bp), sizeof (bp), &bp, 0); 522 if (err != 0) { 523 (void) printf("got error %u from dmu_read\n", err); 524 break; 525 } 526 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp); 527 (void) printf("\t%s\n", blkbuf); 528 } 529 } 530 531 /* ARGSUSED */ 532 static void 533 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size) 534 { 535 dmu_object_info_t doi; 536 537 VERIFY0(dmu_object_info(os, object, &doi)); 538 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP); 539 540 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0); 541 if (err != 0) { 542 (void) printf("got error %u from dmu_read\n", err); 543 kmem_free(subobjs, doi.doi_max_offset); 544 return; 545 } 546 547 int64_t last_nonzero = -1; 548 for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) { 549 if (subobjs[i] != 0) 550 last_nonzero = i; 551 } 552 553 for (int64_t i = 0; i <= last_nonzero; i++) { 554 (void) printf("\t%llu\n", (longlong_t)subobjs[i]); 555 } 556 kmem_free(subobjs, doi.doi_max_offset); 557 } 558 559 /*ARGSUSED*/ 560 static void 561 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) 562 { 563 dump_zap_stats(os, object); 564 /* contents are printed elsewhere, properly decoded */ 565 } 566 567 /*ARGSUSED*/ 568 static void 569 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) 570 { 571 zap_cursor_t zc; 572 zap_attribute_t attr; 573 574 dump_zap_stats(os, object); 575 (void) printf("\n"); 576 577 for (zap_cursor_init(&zc, os, object); 578 zap_cursor_retrieve(&zc, &attr) == 0; 579 zap_cursor_advance(&zc)) { 580 (void) printf("\t\t%s = ", attr.za_name); 581 if (attr.za_num_integers == 0) { 582 (void) printf("\n"); 583 continue; 584 } 585 (void) printf(" %llx : [%d:%d:%d]\n", 586 (u_longlong_t)attr.za_first_integer, 587 (int)ATTR_LENGTH(attr.za_first_integer), 588 (int)ATTR_BSWAP(attr.za_first_integer), 589 (int)ATTR_NUM(attr.za_first_integer)); 590 } 591 zap_cursor_fini(&zc); 592 } 593 594 /*ARGSUSED*/ 595 static void 596 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) 597 { 598 zap_cursor_t zc; 599 zap_attribute_t attr; 600 uint16_t *layout_attrs; 601 unsigned i; 602 603 dump_zap_stats(os, object); 604 (void) printf("\n"); 605 606 for (zap_cursor_init(&zc, os, object); 607 zap_cursor_retrieve(&zc, &attr) == 0; 608 zap_cursor_advance(&zc)) { 609 (void) printf("\t\t%s = [", attr.za_name); 610 if (attr.za_num_integers == 0) { 611 (void) printf("\n"); 612 continue; 613 } 614 615 VERIFY(attr.za_integer_length == 2); 616 layout_attrs = umem_zalloc(attr.za_num_integers * 617 attr.za_integer_length, UMEM_NOFAIL); 618 619 VERIFY(zap_lookup(os, object, attr.za_name, 620 attr.za_integer_length, 621 attr.za_num_integers, layout_attrs) == 0); 622 623 for (i = 0; i != attr.za_num_integers; i++) 624 (void) printf(" %d ", (int)layout_attrs[i]); 625 (void) printf("]\n"); 626 umem_free(layout_attrs, 627 attr.za_num_integers * attr.za_integer_length); 628 } 629 zap_cursor_fini(&zc); 630 } 631 632 /*ARGSUSED*/ 633 static void 634 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) 635 { 636 zap_cursor_t zc; 637 zap_attribute_t attr; 638 const char *typenames[] = { 639 /* 0 */ "not specified", 640 /* 1 */ "FIFO", 641 /* 2 */ "Character Device", 642 /* 3 */ "3 (invalid)", 643 /* 4 */ "Directory", 644 /* 5 */ "5 (invalid)", 645 /* 6 */ "Block Device", 646 /* 7 */ "7 (invalid)", 647 /* 8 */ "Regular File", 648 /* 9 */ "9 (invalid)", 649 /* 10 */ "Symbolic Link", 650 /* 11 */ "11 (invalid)", 651 /* 12 */ "Socket", 652 /* 13 */ "Door", 653 /* 14 */ "Event Port", 654 /* 15 */ "15 (invalid)", 655 }; 656 657 dump_zap_stats(os, object); 658 (void) printf("\n"); 659 660 for (zap_cursor_init(&zc, os, object); 661 zap_cursor_retrieve(&zc, &attr) == 0; 662 zap_cursor_advance(&zc)) { 663 (void) printf("\t\t%s = %lld (type: %s)\n", 664 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), 665 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); 666 } 667 zap_cursor_fini(&zc); 668 } 669 670 static int 671 get_dtl_refcount(vdev_t *vd) 672 { 673 int refcount = 0; 674 675 if (vd->vdev_ops->vdev_op_leaf) { 676 space_map_t *sm = vd->vdev_dtl_sm; 677 678 if (sm != NULL && 679 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 680 return (1); 681 return (0); 682 } 683 684 for (unsigned c = 0; c < vd->vdev_children; c++) 685 refcount += get_dtl_refcount(vd->vdev_child[c]); 686 return (refcount); 687 } 688 689 static int 690 get_metaslab_refcount(vdev_t *vd) 691 { 692 int refcount = 0; 693 694 if (vd->vdev_top == vd) { 695 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 696 space_map_t *sm = vd->vdev_ms[m]->ms_sm; 697 698 if (sm != NULL && 699 sm->sm_dbuf->db_size == sizeof (space_map_phys_t)) 700 refcount++; 701 } 702 } 703 for (unsigned c = 0; c < vd->vdev_children; c++) 704 refcount += get_metaslab_refcount(vd->vdev_child[c]); 705 706 return (refcount); 707 } 708 709 static int 710 get_obsolete_refcount(vdev_t *vd) 711 { 712 int refcount = 0; 713 714 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd); 715 if (vd->vdev_top == vd && obsolete_sm_obj != 0) { 716 dmu_object_info_t doi; 717 VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset, 718 obsolete_sm_obj, &doi)); 719 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) { 720 refcount++; 721 } 722 } else { 723 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 724 ASSERT3U(obsolete_sm_obj, ==, 0); 725 } 726 for (unsigned c = 0; c < vd->vdev_children; c++) { 727 refcount += get_obsolete_refcount(vd->vdev_child[c]); 728 } 729 730 return (refcount); 731 } 732 733 static int 734 get_prev_obsolete_spacemap_refcount(spa_t *spa) 735 { 736 uint64_t prev_obj = 737 spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object; 738 if (prev_obj != 0) { 739 dmu_object_info_t doi; 740 VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi)); 741 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) { 742 return (1); 743 } 744 } 745 return (0); 746 } 747 748 static int 749 get_checkpoint_refcount(vdev_t *vd) 750 { 751 int refcount = 0; 752 753 if (vd->vdev_top == vd && vd->vdev_top_zap != 0 && 754 zap_contains(spa_meta_objset(vd->vdev_spa), 755 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0) 756 refcount++; 757 758 for (uint64_t c = 0; c < vd->vdev_children; c++) 759 refcount += get_checkpoint_refcount(vd->vdev_child[c]); 760 761 return (refcount); 762 } 763 764 static int 765 get_log_spacemap_refcount(spa_t *spa) 766 { 767 return (avl_numnodes(&spa->spa_sm_logs_by_txg)); 768 } 769 770 static int 771 verify_spacemap_refcounts(spa_t *spa) 772 { 773 uint64_t expected_refcount = 0; 774 uint64_t actual_refcount; 775 776 (void) feature_get_refcount(spa, 777 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM], 778 &expected_refcount); 779 actual_refcount = get_dtl_refcount(spa->spa_root_vdev); 780 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev); 781 actual_refcount += get_obsolete_refcount(spa->spa_root_vdev); 782 actual_refcount += get_prev_obsolete_spacemap_refcount(spa); 783 actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev); 784 actual_refcount += get_log_spacemap_refcount(spa); 785 786 if (expected_refcount != actual_refcount) { 787 (void) printf("space map refcount mismatch: expected %lld != " 788 "actual %lld\n", 789 (longlong_t)expected_refcount, 790 (longlong_t)actual_refcount); 791 return (2); 792 } 793 return (0); 794 } 795 796 static void 797 dump_spacemap(objset_t *os, space_map_t *sm) 798 { 799 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", 800 "INVALID", "INVALID", "INVALID", "INVALID" }; 801 802 if (sm == NULL) 803 return; 804 805 (void) printf("space map object %llu:\n", 806 (longlong_t)sm->sm_object); 807 (void) printf(" smp_length = 0x%llx\n", 808 (longlong_t)sm->sm_phys->smp_length); 809 (void) printf(" smp_alloc = 0x%llx\n", 810 (longlong_t)sm->sm_phys->smp_alloc); 811 812 if (dump_opt['d'] < 6 && dump_opt['m'] < 4) 813 return; 814 815 /* 816 * Print out the freelist entries in both encoded and decoded form. 817 */ 818 uint8_t mapshift = sm->sm_shift; 819 int64_t alloc = 0; 820 uint64_t word, entry_id = 0; 821 for (uint64_t offset = 0; offset < space_map_length(sm); 822 offset += sizeof (word)) { 823 824 VERIFY0(dmu_read(os, space_map_object(sm), offset, 825 sizeof (word), &word, DMU_READ_PREFETCH)); 826 827 if (sm_entry_is_debug(word)) { 828 (void) printf("\t [%6llu] %s: txg %llu pass %llu\n", 829 (u_longlong_t)entry_id, 830 ddata[SM_DEBUG_ACTION_DECODE(word)], 831 (u_longlong_t)SM_DEBUG_TXG_DECODE(word), 832 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(word)); 833 entry_id++; 834 continue; 835 } 836 837 uint8_t words; 838 char entry_type; 839 uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID; 840 841 if (sm_entry_is_single_word(word)) { 842 entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ? 843 'A' : 'F'; 844 entry_off = (SM_OFFSET_DECODE(word) << mapshift) + 845 sm->sm_start; 846 entry_run = SM_RUN_DECODE(word) << mapshift; 847 words = 1; 848 } else { 849 /* it is a two-word entry so we read another word */ 850 ASSERT(sm_entry_is_double_word(word)); 851 852 uint64_t extra_word; 853 offset += sizeof (extra_word); 854 VERIFY0(dmu_read(os, space_map_object(sm), offset, 855 sizeof (extra_word), &extra_word, 856 DMU_READ_PREFETCH)); 857 858 ASSERT3U(offset, <=, space_map_length(sm)); 859 860 entry_run = SM2_RUN_DECODE(word) << mapshift; 861 entry_vdev = SM2_VDEV_DECODE(word); 862 entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ? 863 'A' : 'F'; 864 entry_off = (SM2_OFFSET_DECODE(extra_word) << 865 mapshift) + sm->sm_start; 866 words = 2; 867 } 868 869 (void) printf("\t [%6llu] %c range:" 870 " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n", 871 (u_longlong_t)entry_id, 872 entry_type, (u_longlong_t)entry_off, 873 (u_longlong_t)(entry_off + entry_run), 874 (u_longlong_t)entry_run, 875 (u_longlong_t)entry_vdev, words); 876 877 if (entry_type == 'A') 878 alloc += entry_run; 879 else 880 alloc -= entry_run; 881 entry_id++; 882 } 883 if (alloc != space_map_allocated(sm)) { 884 (void) printf("space_map_object alloc (%lld) INCONSISTENT " 885 "with space map summary (%lld)\n", 886 (longlong_t)space_map_allocated(sm), (longlong_t)alloc); 887 } 888 } 889 890 static void 891 dump_metaslab_stats(metaslab_t *msp) 892 { 893 char maxbuf[32]; 894 range_tree_t *rt = msp->ms_allocatable; 895 zfs_btree_t *t = &msp->ms_allocatable_by_size; 896 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 897 898 /* max sure nicenum has enough space */ 899 CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ); 900 901 zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf)); 902 903 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", 904 "segments", zfs_btree_numnodes(t), "maxsize", maxbuf, 905 "freepct", free_pct); 906 (void) printf("\tIn-memory histogram:\n"); 907 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 908 } 909 910 static void 911 dump_metaslab(metaslab_t *msp) 912 { 913 vdev_t *vd = msp->ms_group->mg_vd; 914 spa_t *spa = vd->vdev_spa; 915 space_map_t *sm = msp->ms_sm; 916 char freebuf[32]; 917 918 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf, 919 sizeof (freebuf)); 920 921 (void) printf( 922 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", 923 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start, 924 (u_longlong_t)space_map_object(sm), freebuf); 925 926 if (dump_opt['m'] > 2 && !dump_opt['L']) { 927 mutex_enter(&msp->ms_lock); 928 VERIFY0(metaslab_load(msp)); 929 range_tree_stat_verify(msp->ms_allocatable); 930 dump_metaslab_stats(msp); 931 metaslab_unload(msp); 932 mutex_exit(&msp->ms_lock); 933 } 934 935 if (dump_opt['m'] > 1 && sm != NULL && 936 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 937 /* 938 * The space map histogram represents free space in chunks 939 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift). 940 */ 941 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n", 942 (u_longlong_t)msp->ms_fragmentation); 943 dump_histogram(sm->sm_phys->smp_histogram, 944 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift); 945 } 946 947 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift)); 948 dump_spacemap(spa->spa_meta_objset, msp->ms_sm); 949 950 if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 951 (void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n", 952 (u_longlong_t)metaslab_unflushed_txg(msp)); 953 } 954 } 955 956 static void 957 print_vdev_metaslab_header(vdev_t *vd) 958 { 959 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; 960 const char *bias_str = ""; 961 962 if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) { 963 bias_str = VDEV_ALLOC_BIAS_LOG; 964 } else if (alloc_bias == VDEV_BIAS_SPECIAL) { 965 bias_str = VDEV_ALLOC_BIAS_SPECIAL; 966 } else if (alloc_bias == VDEV_BIAS_DEDUP) { 967 bias_str = VDEV_ALLOC_BIAS_DEDUP; 968 } 969 970 uint64_t ms_flush_data_obj = 0; 971 if (vd->vdev_top_zap != 0) { 972 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), 973 vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, 974 sizeof (uint64_t), 1, &ms_flush_data_obj); 975 if (error != ENOENT) { 976 ASSERT0(error); 977 } 978 } 979 980 (void) printf("\tvdev %10llu %s", 981 (u_longlong_t)vd->vdev_id, bias_str); 982 983 if (ms_flush_data_obj != 0) { 984 (void) printf(" ms_unflushed_phys object %llu", 985 (u_longlong_t)ms_flush_data_obj); 986 } 987 988 (void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n", 989 "metaslabs", (u_longlong_t)vd->vdev_ms_count, 990 "offset", "spacemap", "free"); 991 (void) printf("\t%15s %19s %15s %12s\n", 992 "---------------", "-------------------", 993 "---------------", "------------"); 994 } 995 996 static void 997 dump_metaslab_groups(spa_t *spa) 998 { 999 vdev_t *rvd = spa->spa_root_vdev; 1000 metaslab_class_t *mc = spa_normal_class(spa); 1001 uint64_t fragmentation; 1002 1003 metaslab_class_histogram_verify(mc); 1004 1005 for (unsigned c = 0; c < rvd->vdev_children; c++) { 1006 vdev_t *tvd = rvd->vdev_child[c]; 1007 metaslab_group_t *mg = tvd->vdev_mg; 1008 1009 if (mg == NULL || mg->mg_class != mc) 1010 continue; 1011 1012 metaslab_group_histogram_verify(mg); 1013 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 1014 1015 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t" 1016 "fragmentation", 1017 (u_longlong_t)tvd->vdev_id, 1018 (u_longlong_t)tvd->vdev_ms_count); 1019 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 1020 (void) printf("%3s\n", "-"); 1021 } else { 1022 (void) printf("%3llu%%\n", 1023 (u_longlong_t)mg->mg_fragmentation); 1024 } 1025 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 1026 } 1027 1028 (void) printf("\tpool %s\tfragmentation", spa_name(spa)); 1029 fragmentation = metaslab_class_fragmentation(mc); 1030 if (fragmentation == ZFS_FRAG_INVALID) 1031 (void) printf("\t%3s\n", "-"); 1032 else 1033 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation); 1034 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0); 1035 } 1036 1037 static void 1038 print_vdev_indirect(vdev_t *vd) 1039 { 1040 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1041 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1042 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 1043 1044 if (vim == NULL) { 1045 ASSERT3P(vib, ==, NULL); 1046 return; 1047 } 1048 1049 ASSERT3U(vdev_indirect_mapping_object(vim), ==, 1050 vic->vic_mapping_object); 1051 ASSERT3U(vdev_indirect_births_object(vib), ==, 1052 vic->vic_births_object); 1053 1054 (void) printf("indirect births obj %llu:\n", 1055 (longlong_t)vic->vic_births_object); 1056 (void) printf(" vib_count = %llu\n", 1057 (longlong_t)vdev_indirect_births_count(vib)); 1058 for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) { 1059 vdev_indirect_birth_entry_phys_t *cur_vibe = 1060 &vib->vib_entries[i]; 1061 (void) printf("\toffset %llx -> txg %llu\n", 1062 (longlong_t)cur_vibe->vibe_offset, 1063 (longlong_t)cur_vibe->vibe_phys_birth_txg); 1064 } 1065 (void) printf("\n"); 1066 1067 (void) printf("indirect mapping obj %llu:\n", 1068 (longlong_t)vic->vic_mapping_object); 1069 (void) printf(" vim_max_offset = 0x%llx\n", 1070 (longlong_t)vdev_indirect_mapping_max_offset(vim)); 1071 (void) printf(" vim_bytes_mapped = 0x%llx\n", 1072 (longlong_t)vdev_indirect_mapping_bytes_mapped(vim)); 1073 (void) printf(" vim_count = %llu\n", 1074 (longlong_t)vdev_indirect_mapping_num_entries(vim)); 1075 1076 if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3) 1077 return; 1078 1079 uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim); 1080 1081 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) { 1082 vdev_indirect_mapping_entry_phys_t *vimep = 1083 &vim->vim_entries[i]; 1084 (void) printf("\t<%llx:%llx:%llx> -> " 1085 "<%llx:%llx:%llx> (%x obsolete)\n", 1086 (longlong_t)vd->vdev_id, 1087 (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep), 1088 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 1089 (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst), 1090 (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst), 1091 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 1092 counts[i]); 1093 } 1094 (void) printf("\n"); 1095 1096 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 1097 if (obsolete_sm_object != 0) { 1098 objset_t *mos = vd->vdev_spa->spa_meta_objset; 1099 (void) printf("obsolete space map object %llu:\n", 1100 (u_longlong_t)obsolete_sm_object); 1101 ASSERT(vd->vdev_obsolete_sm != NULL); 1102 ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==, 1103 obsolete_sm_object); 1104 dump_spacemap(mos, vd->vdev_obsolete_sm); 1105 (void) printf("\n"); 1106 } 1107 } 1108 1109 static void 1110 dump_metaslabs(spa_t *spa) 1111 { 1112 vdev_t *vd, *rvd = spa->spa_root_vdev; 1113 uint64_t m, c = 0, children = rvd->vdev_children; 1114 1115 (void) printf("\nMetaslabs:\n"); 1116 1117 if (!dump_opt['d'] && zopt_objects > 0) { 1118 c = zopt_object[0]; 1119 1120 if (c >= children) 1121 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 1122 1123 if (zopt_objects > 1) { 1124 vd = rvd->vdev_child[c]; 1125 print_vdev_metaslab_header(vd); 1126 1127 for (m = 1; m < zopt_objects; m++) { 1128 if (zopt_object[m] < vd->vdev_ms_count) 1129 dump_metaslab( 1130 vd->vdev_ms[zopt_object[m]]); 1131 else 1132 (void) fprintf(stderr, "bad metaslab " 1133 "number %llu\n", 1134 (u_longlong_t)zopt_object[m]); 1135 } 1136 (void) printf("\n"); 1137 return; 1138 } 1139 children = c + 1; 1140 } 1141 for (; c < children; c++) { 1142 vd = rvd->vdev_child[c]; 1143 print_vdev_metaslab_header(vd); 1144 1145 print_vdev_indirect(vd); 1146 1147 for (m = 0; m < vd->vdev_ms_count; m++) 1148 dump_metaslab(vd->vdev_ms[m]); 1149 (void) printf("\n"); 1150 } 1151 } 1152 1153 static void 1154 dump_log_spacemaps(spa_t *spa) 1155 { 1156 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1157 return; 1158 1159 (void) printf("\nLog Space Maps in Pool:\n"); 1160 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg); 1161 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) { 1162 space_map_t *sm = NULL; 1163 VERIFY0(space_map_open(&sm, spa_meta_objset(spa), 1164 sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT)); 1165 1166 (void) printf("Log Spacemap object %llu txg %llu\n", 1167 (u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg); 1168 dump_spacemap(spa->spa_meta_objset, sm); 1169 space_map_close(sm); 1170 } 1171 (void) printf("\n"); 1172 } 1173 1174 static void 1175 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) 1176 { 1177 const ddt_phys_t *ddp = dde->dde_phys; 1178 const ddt_key_t *ddk = &dde->dde_key; 1179 const char *types[4] = { "ditto", "single", "double", "triple" }; 1180 char blkbuf[BP_SPRINTF_LEN]; 1181 blkptr_t blk; 1182 1183 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1184 if (ddp->ddp_phys_birth == 0) 1185 continue; 1186 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); 1187 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk); 1188 (void) printf("index %llx refcnt %llu %s %s\n", 1189 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, 1190 types[p], blkbuf); 1191 } 1192 } 1193 1194 static void 1195 dump_dedup_ratio(const ddt_stat_t *dds) 1196 { 1197 double rL, rP, rD, D, dedup, compress, copies; 1198 1199 if (dds->dds_blocks == 0) 1200 return; 1201 1202 rL = (double)dds->dds_ref_lsize; 1203 rP = (double)dds->dds_ref_psize; 1204 rD = (double)dds->dds_ref_dsize; 1205 D = (double)dds->dds_dsize; 1206 1207 dedup = rD / D; 1208 compress = rL / rP; 1209 copies = rD / rP; 1210 1211 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " 1212 "dedup * compress / copies = %.2f\n\n", 1213 dedup, compress, copies, dedup * compress / copies); 1214 } 1215 1216 static void 1217 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) 1218 { 1219 char name[DDT_NAMELEN]; 1220 ddt_entry_t dde; 1221 uint64_t walk = 0; 1222 dmu_object_info_t doi; 1223 uint64_t count, dspace, mspace; 1224 int error; 1225 1226 error = ddt_object_info(ddt, type, class, &doi); 1227 1228 if (error == ENOENT) 1229 return; 1230 ASSERT(error == 0); 1231 1232 if ((count = ddt_object_count(ddt, type, class)) == 0) 1233 return; 1234 1235 dspace = doi.doi_physical_blocks_512 << 9; 1236 mspace = doi.doi_fill_count * doi.doi_data_block_size; 1237 1238 ddt_object_name(ddt, type, class, name); 1239 1240 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", 1241 name, 1242 (u_longlong_t)count, 1243 (u_longlong_t)(dspace / count), 1244 (u_longlong_t)(mspace / count)); 1245 1246 if (dump_opt['D'] < 3) 1247 return; 1248 1249 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); 1250 1251 if (dump_opt['D'] < 4) 1252 return; 1253 1254 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) 1255 return; 1256 1257 (void) printf("%s contents:\n\n", name); 1258 1259 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) 1260 dump_dde(ddt, &dde, walk); 1261 1262 ASSERT3U(error, ==, ENOENT); 1263 1264 (void) printf("\n"); 1265 } 1266 1267 static void 1268 dump_all_ddts(spa_t *spa) 1269 { 1270 ddt_histogram_t ddh_total; 1271 ddt_stat_t dds_total; 1272 1273 bzero(&ddh_total, sizeof (ddh_total)); 1274 bzero(&dds_total, sizeof (dds_total)); 1275 1276 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { 1277 ddt_t *ddt = spa->spa_ddt[c]; 1278 for (enum ddt_type type = 0; type < DDT_TYPES; type++) { 1279 for (enum ddt_class class = 0; class < DDT_CLASSES; 1280 class++) { 1281 dump_ddt(ddt, type, class); 1282 } 1283 } 1284 } 1285 1286 ddt_get_dedup_stats(spa, &dds_total); 1287 1288 if (dds_total.dds_blocks == 0) { 1289 (void) printf("All DDTs are empty\n"); 1290 return; 1291 } 1292 1293 (void) printf("\n"); 1294 1295 if (dump_opt['D'] > 1) { 1296 (void) printf("DDT histogram (aggregated over all DDTs):\n"); 1297 ddt_get_dedup_histogram(spa, &ddh_total); 1298 zpool_dump_ddt(&dds_total, &ddh_total); 1299 } 1300 1301 dump_dedup_ratio(&dds_total); 1302 } 1303 1304 static void 1305 dump_dtl_seg(void *arg, uint64_t start, uint64_t size) 1306 { 1307 char *prefix = arg; 1308 1309 (void) printf("%s [%llu,%llu) length %llu\n", 1310 prefix, 1311 (u_longlong_t)start, 1312 (u_longlong_t)(start + size), 1313 (u_longlong_t)(size)); 1314 } 1315 1316 static void 1317 dump_dtl(vdev_t *vd, int indent) 1318 { 1319 spa_t *spa = vd->vdev_spa; 1320 boolean_t required; 1321 const char *name[DTL_TYPES] = { "missing", "partial", "scrub", 1322 "outage" }; 1323 char prefix[256]; 1324 1325 spa_vdev_state_enter(spa, SCL_NONE); 1326 required = vdev_dtl_required(vd); 1327 (void) spa_vdev_state_exit(spa, NULL, 0); 1328 1329 if (indent == 0) 1330 (void) printf("\nDirty time logs:\n\n"); 1331 1332 (void) printf("\t%*s%s [%s]\n", indent, "", 1333 vd->vdev_path ? vd->vdev_path : 1334 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), 1335 required ? "DTL-required" : "DTL-expendable"); 1336 1337 for (int t = 0; t < DTL_TYPES; t++) { 1338 range_tree_t *rt = vd->vdev_dtl[t]; 1339 if (range_tree_space(rt) == 0) 1340 continue; 1341 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", 1342 indent + 2, "", name[t]); 1343 range_tree_walk(rt, dump_dtl_seg, prefix); 1344 if (dump_opt['d'] > 5 && vd->vdev_children == 0) 1345 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm); 1346 } 1347 1348 for (unsigned c = 0; c < vd->vdev_children; c++) 1349 dump_dtl(vd->vdev_child[c], indent + 4); 1350 } 1351 1352 static void 1353 dump_history(spa_t *spa) 1354 { 1355 nvlist_t **events = NULL; 1356 uint64_t resid, len, off = 0; 1357 uint_t num = 0; 1358 int error; 1359 time_t tsec; 1360 struct tm t; 1361 char tbuf[30]; 1362 char internalstr[MAXPATHLEN]; 1363 1364 char *buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 1365 do { 1366 len = SPA_MAXBLOCKSIZE; 1367 1368 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { 1369 (void) fprintf(stderr, "Unable to read history: " 1370 "error %d\n", error); 1371 umem_free(buf, SPA_MAXBLOCKSIZE); 1372 return; 1373 } 1374 1375 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) 1376 break; 1377 1378 off -= resid; 1379 } while (len != 0); 1380 umem_free(buf, SPA_MAXBLOCKSIZE); 1381 1382 (void) printf("\nHistory:\n"); 1383 for (unsigned i = 0; i < num; i++) { 1384 uint64_t time, txg, ievent; 1385 char *cmd, *intstr; 1386 boolean_t printed = B_FALSE; 1387 1388 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, 1389 &time) != 0) 1390 goto next; 1391 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, 1392 &cmd) != 0) { 1393 if (nvlist_lookup_uint64(events[i], 1394 ZPOOL_HIST_INT_EVENT, &ievent) != 0) 1395 goto next; 1396 verify(nvlist_lookup_uint64(events[i], 1397 ZPOOL_HIST_TXG, &txg) == 0); 1398 verify(nvlist_lookup_string(events[i], 1399 ZPOOL_HIST_INT_STR, &intstr) == 0); 1400 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) 1401 goto next; 1402 1403 (void) snprintf(internalstr, 1404 sizeof (internalstr), 1405 "[internal %s txg:%ju] %s", 1406 zfs_history_event_names[ievent], (uintmax_t)txg, 1407 intstr); 1408 cmd = internalstr; 1409 } 1410 tsec = time; 1411 (void) localtime_r(&tsec, &t); 1412 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 1413 (void) printf("%s %s\n", tbuf, cmd); 1414 printed = B_TRUE; 1415 1416 next: 1417 if (dump_opt['h'] > 1) { 1418 if (!printed) 1419 (void) printf("unrecognized record:\n"); 1420 dump_nvlist(events[i], 2); 1421 } 1422 } 1423 } 1424 1425 /*ARGSUSED*/ 1426 static void 1427 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) 1428 { 1429 } 1430 1431 static uint64_t 1432 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, 1433 const zbookmark_phys_t *zb) 1434 { 1435 if (dnp == NULL) { 1436 ASSERT(zb->zb_level < 0); 1437 if (zb->zb_object == 0) 1438 return (zb->zb_blkid); 1439 return (zb->zb_blkid * BP_GET_LSIZE(bp)); 1440 } 1441 1442 ASSERT(zb->zb_level >= 0); 1443 1444 return ((zb->zb_blkid << 1445 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * 1446 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1447 } 1448 1449 static void 1450 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp) 1451 { 1452 const dva_t *dva = bp->blk_dva; 1453 unsigned int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; 1454 1455 if (dump_opt['b'] >= 6) { 1456 snprintf_blkptr(blkbuf, buflen, bp); 1457 return; 1458 } 1459 1460 if (BP_IS_EMBEDDED(bp)) { 1461 (void) sprintf(blkbuf, 1462 "EMBEDDED et=%u %llxL/%llxP B=%llu", 1463 (int)BPE_GET_ETYPE(bp), 1464 (u_longlong_t)BPE_GET_LSIZE(bp), 1465 (u_longlong_t)BPE_GET_PSIZE(bp), 1466 (u_longlong_t)bp->blk_birth); 1467 return; 1468 } 1469 1470 blkbuf[0] = '\0'; 1471 for (unsigned int i = 0; i < ndvas; i++) 1472 (void) snprintf(blkbuf + strlen(blkbuf), 1473 buflen - strlen(blkbuf), "%llu:%llx:%llx ", 1474 (u_longlong_t)DVA_GET_VDEV(&dva[i]), 1475 (u_longlong_t)DVA_GET_OFFSET(&dva[i]), 1476 (u_longlong_t)DVA_GET_ASIZE(&dva[i])); 1477 1478 if (BP_IS_HOLE(bp)) { 1479 (void) snprintf(blkbuf + strlen(blkbuf), 1480 buflen - strlen(blkbuf), 1481 "%llxL B=%llu", 1482 (u_longlong_t)BP_GET_LSIZE(bp), 1483 (u_longlong_t)bp->blk_birth); 1484 } else { 1485 (void) snprintf(blkbuf + strlen(blkbuf), 1486 buflen - strlen(blkbuf), 1487 "%llxL/%llxP F=%llu B=%llu/%llu", 1488 (u_longlong_t)BP_GET_LSIZE(bp), 1489 (u_longlong_t)BP_GET_PSIZE(bp), 1490 (u_longlong_t)BP_GET_FILL(bp), 1491 (u_longlong_t)bp->blk_birth, 1492 (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); 1493 } 1494 } 1495 1496 static void 1497 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb, 1498 const dnode_phys_t *dnp) 1499 { 1500 char blkbuf[BP_SPRINTF_LEN]; 1501 int l; 1502 1503 if (!BP_IS_EMBEDDED(bp)) { 1504 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); 1505 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); 1506 } 1507 1508 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); 1509 1510 ASSERT(zb->zb_level >= 0); 1511 1512 for (l = dnp->dn_nlevels - 1; l >= -1; l--) { 1513 if (l == zb->zb_level) { 1514 (void) printf("L%llx", (u_longlong_t)zb->zb_level); 1515 } else { 1516 (void) printf(" "); 1517 } 1518 } 1519 1520 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1521 (void) printf("%s\n", blkbuf); 1522 } 1523 1524 static int 1525 visit_indirect(spa_t *spa, const dnode_phys_t *dnp, 1526 blkptr_t *bp, const zbookmark_phys_t *zb) 1527 { 1528 int err = 0; 1529 1530 if (bp->blk_birth == 0) 1531 return (0); 1532 1533 print_indirect(bp, zb, dnp); 1534 1535 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) { 1536 arc_flags_t flags = ARC_FLAG_WAIT; 1537 int i; 1538 blkptr_t *cbp; 1539 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1540 arc_buf_t *buf; 1541 uint64_t fill = 0; 1542 1543 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1544 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); 1545 if (err) 1546 return (err); 1547 ASSERT(buf->b_data); 1548 1549 /* recursively visit blocks below this */ 1550 cbp = buf->b_data; 1551 for (i = 0; i < epb; i++, cbp++) { 1552 zbookmark_phys_t czb; 1553 1554 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1555 zb->zb_level - 1, 1556 zb->zb_blkid * epb + i); 1557 err = visit_indirect(spa, dnp, cbp, &czb); 1558 if (err) 1559 break; 1560 fill += BP_GET_FILL(cbp); 1561 } 1562 if (!err) 1563 ASSERT3U(fill, ==, BP_GET_FILL(bp)); 1564 arc_buf_destroy(buf, &buf); 1565 } 1566 1567 return (err); 1568 } 1569 1570 /*ARGSUSED*/ 1571 static void 1572 dump_indirect(dnode_t *dn) 1573 { 1574 dnode_phys_t *dnp = dn->dn_phys; 1575 int j; 1576 zbookmark_phys_t czb; 1577 1578 (void) printf("Indirect blocks:\n"); 1579 1580 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), 1581 dn->dn_object, dnp->dn_nlevels - 1, 0); 1582 for (j = 0; j < dnp->dn_nblkptr; j++) { 1583 czb.zb_blkid = j; 1584 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, 1585 &dnp->dn_blkptr[j], &czb); 1586 } 1587 1588 (void) printf("\n"); 1589 } 1590 1591 /*ARGSUSED*/ 1592 static void 1593 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) 1594 { 1595 dsl_dir_phys_t *dd = data; 1596 time_t crtime; 1597 char nice[32]; 1598 1599 /* make sure nicenum has enough space */ 1600 CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ); 1601 1602 if (dd == NULL) 1603 return; 1604 1605 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); 1606 1607 crtime = dd->dd_creation_time; 1608 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1609 (void) printf("\t\thead_dataset_obj = %llu\n", 1610 (u_longlong_t)dd->dd_head_dataset_obj); 1611 (void) printf("\t\tparent_dir_obj = %llu\n", 1612 (u_longlong_t)dd->dd_parent_obj); 1613 (void) printf("\t\torigin_obj = %llu\n", 1614 (u_longlong_t)dd->dd_origin_obj); 1615 (void) printf("\t\tchild_dir_zapobj = %llu\n", 1616 (u_longlong_t)dd->dd_child_dir_zapobj); 1617 zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice)); 1618 (void) printf("\t\tused_bytes = %s\n", nice); 1619 zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice)); 1620 (void) printf("\t\tcompressed_bytes = %s\n", nice); 1621 zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice)); 1622 (void) printf("\t\tuncompressed_bytes = %s\n", nice); 1623 zdb_nicenum(dd->dd_quota, nice, sizeof (nice)); 1624 (void) printf("\t\tquota = %s\n", nice); 1625 zdb_nicenum(dd->dd_reserved, nice, sizeof (nice)); 1626 (void) printf("\t\treserved = %s\n", nice); 1627 (void) printf("\t\tprops_zapobj = %llu\n", 1628 (u_longlong_t)dd->dd_props_zapobj); 1629 (void) printf("\t\tdeleg_zapobj = %llu\n", 1630 (u_longlong_t)dd->dd_deleg_zapobj); 1631 (void) printf("\t\tflags = %llx\n", 1632 (u_longlong_t)dd->dd_flags); 1633 1634 #define DO(which) \ 1635 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \ 1636 sizeof (nice)); \ 1637 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) 1638 DO(HEAD); 1639 DO(SNAP); 1640 DO(CHILD); 1641 DO(CHILD_RSRV); 1642 DO(REFRSRV); 1643 #undef DO 1644 (void) printf("\t\tclones = %llu\n", 1645 (u_longlong_t)dd->dd_clones); 1646 } 1647 1648 /*ARGSUSED*/ 1649 static void 1650 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) 1651 { 1652 dsl_dataset_phys_t *ds = data; 1653 time_t crtime; 1654 char used[32], compressed[32], uncompressed[32], unique[32]; 1655 char blkbuf[BP_SPRINTF_LEN]; 1656 1657 /* make sure nicenum has enough space */ 1658 CTASSERT(sizeof (used) >= NN_NUMBUF_SZ); 1659 CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ); 1660 CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ); 1661 CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ); 1662 1663 if (ds == NULL) 1664 return; 1665 1666 ASSERT(size == sizeof (*ds)); 1667 crtime = ds->ds_creation_time; 1668 zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used)); 1669 zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed)); 1670 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed, 1671 sizeof (uncompressed)); 1672 zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique)); 1673 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp); 1674 1675 (void) printf("\t\tdir_obj = %llu\n", 1676 (u_longlong_t)ds->ds_dir_obj); 1677 (void) printf("\t\tprev_snap_obj = %llu\n", 1678 (u_longlong_t)ds->ds_prev_snap_obj); 1679 (void) printf("\t\tprev_snap_txg = %llu\n", 1680 (u_longlong_t)ds->ds_prev_snap_txg); 1681 (void) printf("\t\tnext_snap_obj = %llu\n", 1682 (u_longlong_t)ds->ds_next_snap_obj); 1683 (void) printf("\t\tsnapnames_zapobj = %llu\n", 1684 (u_longlong_t)ds->ds_snapnames_zapobj); 1685 (void) printf("\t\tnum_children = %llu\n", 1686 (u_longlong_t)ds->ds_num_children); 1687 (void) printf("\t\tuserrefs_obj = %llu\n", 1688 (u_longlong_t)ds->ds_userrefs_obj); 1689 (void) printf("\t\tcreation_time = %s", ctime(&crtime)); 1690 (void) printf("\t\tcreation_txg = %llu\n", 1691 (u_longlong_t)ds->ds_creation_txg); 1692 (void) printf("\t\tdeadlist_obj = %llu\n", 1693 (u_longlong_t)ds->ds_deadlist_obj); 1694 (void) printf("\t\tused_bytes = %s\n", used); 1695 (void) printf("\t\tcompressed_bytes = %s\n", compressed); 1696 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); 1697 (void) printf("\t\tunique = %s\n", unique); 1698 (void) printf("\t\tfsid_guid = %llu\n", 1699 (u_longlong_t)ds->ds_fsid_guid); 1700 (void) printf("\t\tguid = %llu\n", 1701 (u_longlong_t)ds->ds_guid); 1702 (void) printf("\t\tflags = %llx\n", 1703 (u_longlong_t)ds->ds_flags); 1704 (void) printf("\t\tnext_clones_obj = %llu\n", 1705 (u_longlong_t)ds->ds_next_clones_obj); 1706 (void) printf("\t\tprops_obj = %llu\n", 1707 (u_longlong_t)ds->ds_props_obj); 1708 (void) printf("\t\tbp = %s\n", blkbuf); 1709 } 1710 1711 /* ARGSUSED */ 1712 static int 1713 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1714 { 1715 char blkbuf[BP_SPRINTF_LEN]; 1716 1717 if (bp->blk_birth != 0) { 1718 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 1719 (void) printf("\t%s\n", blkbuf); 1720 } 1721 return (0); 1722 } 1723 1724 static void 1725 dump_bptree(objset_t *os, uint64_t obj, const char *name) 1726 { 1727 char bytes[32]; 1728 bptree_phys_t *bt; 1729 dmu_buf_t *db; 1730 1731 /* make sure nicenum has enough space */ 1732 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1733 1734 if (dump_opt['d'] < 3) 1735 return; 1736 1737 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db)); 1738 bt = db->db_data; 1739 zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes)); 1740 (void) printf("\n %s: %llu datasets, %s\n", 1741 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes); 1742 dmu_buf_rele(db, FTAG); 1743 1744 if (dump_opt['d'] < 5) 1745 return; 1746 1747 (void) printf("\n"); 1748 1749 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL); 1750 } 1751 1752 /* ARGSUSED */ 1753 static int 1754 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1755 { 1756 char blkbuf[BP_SPRINTF_LEN]; 1757 1758 ASSERT(bp->blk_birth != 0); 1759 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp); 1760 (void) printf("\t%s\n", blkbuf); 1761 return (0); 1762 } 1763 1764 static void 1765 dump_full_bpobj(bpobj_t *bpo, const char *name, int indent) 1766 { 1767 char bytes[32]; 1768 char comp[32]; 1769 char uncomp[32]; 1770 1771 /* make sure nicenum has enough space */ 1772 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1773 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1774 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1775 1776 if (dump_opt['d'] < 3) 1777 return; 1778 1779 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes)); 1780 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1781 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp)); 1782 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp)); 1783 (void) printf(" %*s: object %llu, %llu local blkptrs, " 1784 "%llu subobjs in object %llu, %s (%s/%s comp)\n", 1785 indent * 8, name, 1786 (u_longlong_t)bpo->bpo_object, 1787 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1788 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, 1789 (u_longlong_t)bpo->bpo_phys->bpo_subobjs, 1790 bytes, comp, uncomp); 1791 1792 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1793 uint64_t subobj; 1794 bpobj_t subbpo; 1795 int error; 1796 VERIFY0(dmu_read(bpo->bpo_os, 1797 bpo->bpo_phys->bpo_subobjs, 1798 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1799 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1800 if (error != 0) { 1801 (void) printf("ERROR %u while trying to open " 1802 "subobj id %llu\n", 1803 error, (u_longlong_t)subobj); 1804 continue; 1805 } 1806 dump_full_bpobj(&subbpo, "subobj", indent + 1); 1807 bpobj_close(&subbpo); 1808 } 1809 } else { 1810 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n", 1811 indent * 8, name, 1812 (u_longlong_t)bpo->bpo_object, 1813 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, 1814 bytes); 1815 } 1816 1817 if (dump_opt['d'] < 5) 1818 return; 1819 1820 1821 if (indent == 0) { 1822 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); 1823 (void) printf("\n"); 1824 } 1825 } 1826 1827 static void 1828 bpobj_count_refd(bpobj_t *bpo) 1829 { 1830 mos_obj_refd(bpo->bpo_object); 1831 1832 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) { 1833 mos_obj_refd(bpo->bpo_phys->bpo_subobjs); 1834 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) { 1835 uint64_t subobj; 1836 bpobj_t subbpo; 1837 int error; 1838 VERIFY0(dmu_read(bpo->bpo_os, 1839 bpo->bpo_phys->bpo_subobjs, 1840 i * sizeof (subobj), sizeof (subobj), &subobj, 0)); 1841 error = bpobj_open(&subbpo, bpo->bpo_os, subobj); 1842 if (error != 0) { 1843 (void) printf("ERROR %u while trying to open " 1844 "subobj id %llu\n", 1845 error, (u_longlong_t)subobj); 1846 continue; 1847 } 1848 bpobj_count_refd(&subbpo); 1849 bpobj_close(&subbpo); 1850 } 1851 } 1852 } 1853 1854 static void 1855 dump_deadlist(dsl_deadlist_t *dl) 1856 { 1857 dsl_deadlist_entry_t *dle; 1858 uint64_t unused; 1859 char bytes[32]; 1860 char comp[32]; 1861 char uncomp[32]; 1862 uint64_t empty_bpobj = 1863 dmu_objset_spa(dl->dl_os)->spa_dsl_pool->dp_empty_bpobj; 1864 1865 /* force the tree to be loaded */ 1866 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused); 1867 1868 if (dl->dl_oldfmt) { 1869 if (dl->dl_bpobj.bpo_object != empty_bpobj) 1870 bpobj_count_refd(&dl->dl_bpobj); 1871 } else { 1872 mos_obj_refd(dl->dl_object); 1873 for (dle = avl_first(&dl->dl_tree); dle; 1874 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1875 if (dle->dle_bpobj.bpo_object != empty_bpobj) 1876 bpobj_count_refd(&dle->dle_bpobj); 1877 } 1878 } 1879 1880 /* make sure nicenum has enough space */ 1881 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ); 1882 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ); 1883 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ); 1884 1885 if (dump_opt['d'] < 3) 1886 return; 1887 1888 if (dl->dl_oldfmt) { 1889 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0); 1890 return; 1891 } 1892 1893 zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes)); 1894 zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp)); 1895 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp)); 1896 (void) printf("\n Deadlist: %s (%s/%s comp)\n", 1897 bytes, comp, uncomp); 1898 1899 if (dump_opt['d'] < 4) 1900 return; 1901 1902 (void) printf("\n"); 1903 1904 for (dle = avl_first(&dl->dl_tree); dle; 1905 dle = AVL_NEXT(&dl->dl_tree, dle)) { 1906 if (dump_opt['d'] >= 5) { 1907 char buf[128]; 1908 (void) snprintf(buf, sizeof (buf), 1909 "mintxg %llu -> obj %llu", 1910 (longlong_t)dle->dle_mintxg, 1911 (longlong_t)dle->dle_bpobj.bpo_object); 1912 1913 dump_full_bpobj(&dle->dle_bpobj, buf, 0); 1914 } else { 1915 (void) printf("mintxg %llu -> obj %llu\n", 1916 (longlong_t)dle->dle_mintxg, 1917 (longlong_t)dle->dle_bpobj.bpo_object); 1918 } 1919 } 1920 } 1921 1922 static avl_tree_t idx_tree; 1923 static avl_tree_t domain_tree; 1924 static boolean_t fuid_table_loaded; 1925 static objset_t *sa_os = NULL; 1926 static sa_attr_type_t *sa_attr_table = NULL; 1927 1928 static int 1929 open_objset(const char *path, dmu_objset_type_t type, void *tag, objset_t **osp) 1930 { 1931 int err; 1932 uint64_t sa_attrs = 0; 1933 uint64_t version = 0; 1934 1935 VERIFY3P(sa_os, ==, NULL); 1936 err = dmu_objset_own(path, type, B_TRUE, B_FALSE, tag, osp); 1937 if (err != 0) { 1938 (void) fprintf(stderr, "failed to own dataset '%s': %s\n", path, 1939 strerror(err)); 1940 return (err); 1941 } 1942 1943 if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) { 1944 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR, 1945 8, 1, &version); 1946 if (version >= ZPL_VERSION_SA) { 1947 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 1948 8, 1, &sa_attrs); 1949 } 1950 err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END, 1951 &sa_attr_table); 1952 if (err != 0) { 1953 (void) fprintf(stderr, "sa_setup failed: %s\n", 1954 strerror(err)); 1955 dmu_objset_disown(*osp, B_FALSE, tag); 1956 *osp = NULL; 1957 } 1958 } 1959 sa_os = *osp; 1960 1961 return (0); 1962 } 1963 1964 static void 1965 close_objset(objset_t *os, void *tag) 1966 { 1967 VERIFY3P(os, ==, sa_os); 1968 if (os->os_sa != NULL) 1969 sa_tear_down(os); 1970 dmu_objset_disown(os, B_FALSE, tag); 1971 sa_attr_table = NULL; 1972 sa_os = NULL; 1973 } 1974 1975 static void 1976 fuid_table_destroy() 1977 { 1978 if (fuid_table_loaded) { 1979 zfs_fuid_table_destroy(&idx_tree, &domain_tree); 1980 fuid_table_loaded = B_FALSE; 1981 } 1982 } 1983 1984 /* 1985 * print uid or gid information. 1986 * For normal POSIX id just the id is printed in decimal format. 1987 * For CIFS files with FUID the fuid is printed in hex followed by 1988 * the domain-rid string. 1989 */ 1990 static void 1991 print_idstr(uint64_t id, const char *id_type) 1992 { 1993 if (FUID_INDEX(id)) { 1994 char *domain; 1995 1996 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); 1997 (void) printf("\t%s %llx [%s-%d]\n", id_type, 1998 (u_longlong_t)id, domain, (int)FUID_RID(id)); 1999 } else { 2000 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); 2001 } 2002 2003 } 2004 2005 static void 2006 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) 2007 { 2008 uint32_t uid_idx, gid_idx; 2009 2010 uid_idx = FUID_INDEX(uid); 2011 gid_idx = FUID_INDEX(gid); 2012 2013 /* Load domain table, if not already loaded */ 2014 if (!fuid_table_loaded && (uid_idx || gid_idx)) { 2015 uint64_t fuid_obj; 2016 2017 /* first find the fuid object. It lives in the master node */ 2018 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 2019 8, 1, &fuid_obj) == 0); 2020 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); 2021 (void) zfs_fuid_table_load(os, fuid_obj, 2022 &idx_tree, &domain_tree); 2023 fuid_table_loaded = B_TRUE; 2024 } 2025 2026 print_idstr(uid, "uid"); 2027 print_idstr(gid, "gid"); 2028 } 2029 2030 /*ARGSUSED*/ 2031 static void 2032 dump_znode(objset_t *os, uint64_t object, void *data, size_t size) 2033 { 2034 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ 2035 sa_handle_t *hdl; 2036 uint64_t xattr, rdev, gen; 2037 uint64_t uid, gid, mode, fsize, parent, links; 2038 uint64_t pflags; 2039 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; 2040 time_t z_crtime, z_atime, z_mtime, z_ctime; 2041 sa_bulk_attr_t bulk[12]; 2042 int idx = 0; 2043 int error; 2044 2045 VERIFY3P(os, ==, sa_os); 2046 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { 2047 (void) printf("Failed to get handle for SA znode\n"); 2048 return; 2049 } 2050 2051 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); 2052 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); 2053 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, 2054 &links, 8); 2055 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); 2056 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, 2057 &mode, 8); 2058 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], 2059 NULL, &parent, 8); 2060 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, 2061 &fsize, 8); 2062 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, 2063 acctm, 16); 2064 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, 2065 modtm, 16); 2066 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, 2067 crtm, 16); 2068 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, 2069 chgtm, 16); 2070 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, 2071 &pflags, 8); 2072 2073 if (sa_bulk_lookup(hdl, bulk, idx)) { 2074 (void) sa_handle_destroy(hdl); 2075 return; 2076 } 2077 2078 z_crtime = (time_t)crtm[0]; 2079 z_atime = (time_t)acctm[0]; 2080 z_mtime = (time_t)modtm[0]; 2081 z_ctime = (time_t)chgtm[0]; 2082 2083 if (dump_opt['d'] > 4) { 2084 error = zfs_obj_to_path(os, object, path, sizeof (path)); 2085 if (error == ESTALE) { 2086 (void) snprintf(path, sizeof (path), "on delete queue"); 2087 } else if (error != 0) { 2088 leaked_objects++; 2089 (void) snprintf(path, sizeof (path), 2090 "path not found, possibly leaked"); 2091 } 2092 (void) printf("\tpath %s\n", path); 2093 } 2094 dump_uidgid(os, uid, gid); 2095 (void) printf("\tatime %s", ctime(&z_atime)); 2096 (void) printf("\tmtime %s", ctime(&z_mtime)); 2097 (void) printf("\tctime %s", ctime(&z_ctime)); 2098 (void) printf("\tcrtime %s", ctime(&z_crtime)); 2099 (void) printf("\tgen %llu\n", (u_longlong_t)gen); 2100 (void) printf("\tmode %llo\n", (u_longlong_t)mode); 2101 (void) printf("\tsize %llu\n", (u_longlong_t)fsize); 2102 (void) printf("\tparent %llu\n", (u_longlong_t)parent); 2103 (void) printf("\tlinks %llu\n", (u_longlong_t)links); 2104 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); 2105 if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) { 2106 uint64_t projid; 2107 2108 if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid, 2109 sizeof (uint64_t)) == 0) 2110 (void) printf("\tprojid %llu\n", (u_longlong_t)projid); 2111 } 2112 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, 2113 sizeof (uint64_t)) == 0) 2114 (void) printf("\txattr %llu\n", (u_longlong_t)xattr); 2115 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, 2116 sizeof (uint64_t)) == 0) 2117 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); 2118 sa_handle_destroy(hdl); 2119 } 2120 2121 /*ARGSUSED*/ 2122 static void 2123 dump_acl(objset_t *os, uint64_t object, void *data, size_t size) 2124 { 2125 } 2126 2127 /*ARGSUSED*/ 2128 static void 2129 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) 2130 { 2131 } 2132 2133 2134 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { 2135 dump_none, /* unallocated */ 2136 dump_zap, /* object directory */ 2137 dump_uint64, /* object array */ 2138 dump_none, /* packed nvlist */ 2139 dump_packed_nvlist, /* packed nvlist size */ 2140 dump_none, /* bpobj */ 2141 dump_bpobj, /* bpobj header */ 2142 dump_none, /* SPA space map header */ 2143 dump_none, /* SPA space map */ 2144 dump_none, /* ZIL intent log */ 2145 dump_dnode, /* DMU dnode */ 2146 dump_dmu_objset, /* DMU objset */ 2147 dump_dsl_dir, /* DSL directory */ 2148 dump_zap, /* DSL directory child map */ 2149 dump_zap, /* DSL dataset snap map */ 2150 dump_zap, /* DSL props */ 2151 dump_dsl_dataset, /* DSL dataset */ 2152 dump_znode, /* ZFS znode */ 2153 dump_acl, /* ZFS V0 ACL */ 2154 dump_uint8, /* ZFS plain file */ 2155 dump_zpldir, /* ZFS directory */ 2156 dump_zap, /* ZFS master node */ 2157 dump_zap, /* ZFS delete queue */ 2158 dump_uint8, /* zvol object */ 2159 dump_zap, /* zvol prop */ 2160 dump_uint8, /* other uint8[] */ 2161 dump_uint64, /* other uint64[] */ 2162 dump_zap, /* other ZAP */ 2163 dump_zap, /* persistent error log */ 2164 dump_uint8, /* SPA history */ 2165 dump_history_offsets, /* SPA history offsets */ 2166 dump_zap, /* Pool properties */ 2167 dump_zap, /* DSL permissions */ 2168 dump_acl, /* ZFS ACL */ 2169 dump_uint8, /* ZFS SYSACL */ 2170 dump_none, /* FUID nvlist */ 2171 dump_packed_nvlist, /* FUID nvlist size */ 2172 dump_zap, /* DSL dataset next clones */ 2173 dump_zap, /* DSL scrub queue */ 2174 dump_zap, /* ZFS user/group/project used */ 2175 dump_zap, /* ZFS user/group/project quota */ 2176 dump_zap, /* snapshot refcount tags */ 2177 dump_ddt_zap, /* DDT ZAP object */ 2178 dump_zap, /* DDT statistics */ 2179 dump_znode, /* SA object */ 2180 dump_zap, /* SA Master Node */ 2181 dump_sa_attrs, /* SA attribute registration */ 2182 dump_sa_layouts, /* SA attribute layouts */ 2183 dump_zap, /* DSL scrub translations */ 2184 dump_none, /* fake dedup BP */ 2185 dump_zap, /* deadlist */ 2186 dump_none, /* deadlist hdr */ 2187 dump_zap, /* dsl clones */ 2188 dump_bpobj_subobjs, /* bpobj subobjs */ 2189 dump_unknown, /* Unknown type, must be last */ 2190 }; 2191 2192 static void 2193 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header, 2194 uint64_t *dnode_slots_used) 2195 { 2196 dmu_buf_t *db = NULL; 2197 dmu_object_info_t doi; 2198 dnode_t *dn; 2199 boolean_t dnode_held = B_FALSE; 2200 void *bonus = NULL; 2201 size_t bsize = 0; 2202 char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32]; 2203 char bonus_size[32]; 2204 char aux[50]; 2205 int error; 2206 2207 /* make sure nicenum has enough space */ 2208 CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ); 2209 CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ); 2210 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 2211 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 2212 CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ); 2213 2214 if (*print_header) { 2215 (void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n", 2216 "Object", "lvl", "iblk", "dblk", "dsize", "dnsize", 2217 "lsize", "%full", "type"); 2218 *print_header = 0; 2219 } 2220 2221 if (object == 0) { 2222 dn = DMU_META_DNODE(os); 2223 dmu_object_info_from_dnode(dn, &doi); 2224 } else { 2225 /* 2226 * Encrypted datasets will have sensitive bonus buffers 2227 * encrypted. Therefore we cannot hold the bonus buffer and 2228 * must hold the dnode itself instead. 2229 */ 2230 error = dmu_object_info(os, object, &doi); 2231 if (error) 2232 fatal("dmu_object_info() failed, errno %u", error); 2233 2234 if (os->os_encrypted && 2235 DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) { 2236 error = dnode_hold(os, object, FTAG, &dn); 2237 if (error) 2238 fatal("dnode_hold() failed, errno %u", error); 2239 dnode_held = B_TRUE; 2240 } else { 2241 error = dmu_bonus_hold(os, object, FTAG, &db); 2242 if (error) 2243 fatal("dmu_bonus_hold(%llu) failed, errno %u", 2244 object, error); 2245 bonus = db->db_data; 2246 bsize = db->db_size; 2247 dn = DB_DNODE((dmu_buf_impl_t *)db); 2248 } 2249 } 2250 2251 if (dnode_slots_used != NULL) 2252 *dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE; 2253 2254 zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk)); 2255 zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk)); 2256 zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize)); 2257 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize)); 2258 zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size)); 2259 zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize)); 2260 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * 2261 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / 2262 doi.doi_max_offset); 2263 2264 aux[0] = '\0'; 2265 2266 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { 2267 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", 2268 ZDB_CHECKSUM_NAME(doi.doi_checksum)); 2269 } 2270 2271 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { 2272 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", 2273 ZDB_COMPRESS_NAME(doi.doi_compress)); 2274 } 2275 2276 (void) printf("%10" PRIu64 2277 " %3u %5s %5s %5s %5s %5s %6s %s%s\n", 2278 object, doi.doi_indirection, iblk, dblk, 2279 asize, dnsize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); 2280 2281 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { 2282 (void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n", 2283 "", "", "", "", "", "", bonus_size, "bonus", 2284 ZDB_OT_NAME(doi.doi_bonus_type)); 2285 } 2286 2287 if (verbosity >= 4) { 2288 (void) printf("\tdnode flags: %s%s%s%s\n", 2289 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? 2290 "USED_BYTES " : "", 2291 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? 2292 "USERUSED_ACCOUNTED " : "", 2293 (dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ? 2294 "USEROBJUSED_ACCOUNTED " : "", 2295 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? 2296 "SPILL_BLKPTR" : ""); 2297 (void) printf("\tdnode maxblkid: %llu\n", 2298 (longlong_t)dn->dn_phys->dn_maxblkid); 2299 2300 if (!dnode_held) { 2301 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, 2302 object, bonus, bsize); 2303 } else { 2304 (void) printf("\t\t(bonus encrypted)\n"); 2305 } 2306 2307 if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) { 2308 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, 2309 NULL, 0); 2310 } else { 2311 (void) printf("\t\t(object encrypted)\n"); 2312 } 2313 2314 *print_header = 1; 2315 } 2316 2317 if (verbosity >= 5) 2318 dump_indirect(dn); 2319 2320 if (verbosity >= 5) { 2321 /* 2322 * Report the list of segments that comprise the object. 2323 */ 2324 uint64_t start = 0; 2325 uint64_t end; 2326 uint64_t blkfill = 1; 2327 int minlvl = 1; 2328 2329 if (dn->dn_type == DMU_OT_DNODE) { 2330 minlvl = 0; 2331 blkfill = DNODES_PER_BLOCK; 2332 } 2333 2334 for (;;) { 2335 char segsize[32]; 2336 /* make sure nicenum has enough space */ 2337 CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ); 2338 error = dnode_next_offset(dn, 2339 0, &start, minlvl, blkfill, 0); 2340 if (error) 2341 break; 2342 end = start; 2343 error = dnode_next_offset(dn, 2344 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); 2345 zdb_nicenum(end - start, segsize, sizeof (segsize)); 2346 (void) printf("\t\tsegment [%016llx, %016llx)" 2347 " size %5s\n", (u_longlong_t)start, 2348 (u_longlong_t)end, segsize); 2349 if (error) 2350 break; 2351 start = end; 2352 } 2353 } 2354 2355 if (db != NULL) 2356 dmu_buf_rele(db, FTAG); 2357 if (dnode_held) 2358 dnode_rele(dn, FTAG); 2359 } 2360 2361 static void 2362 count_dir_mos_objects(dsl_dir_t *dd) 2363 { 2364 mos_obj_refd(dd->dd_object); 2365 mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj); 2366 mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj); 2367 mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj); 2368 mos_obj_refd(dsl_dir_phys(dd)->dd_clones); 2369 } 2370 2371 static void 2372 count_ds_mos_objects(dsl_dataset_t *ds) 2373 { 2374 mos_obj_refd(ds->ds_object); 2375 mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj); 2376 mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj); 2377 mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj); 2378 mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj); 2379 2380 if (!dsl_dataset_is_snapshot(ds)) { 2381 count_dir_mos_objects(ds->ds_dir); 2382 } 2383 } 2384 2385 static const char *objset_types[DMU_OST_NUMTYPES] = { 2386 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; 2387 2388 static void 2389 dump_dir(objset_t *os) 2390 { 2391 dmu_objset_stats_t dds; 2392 uint64_t object, object_count; 2393 uint64_t refdbytes, usedobjs, scratch; 2394 char numbuf[32]; 2395 char blkbuf[BP_SPRINTF_LEN + 20]; 2396 char osname[ZFS_MAX_DATASET_NAME_LEN]; 2397 const char *type = "UNKNOWN"; 2398 int verbosity = dump_opt['d']; 2399 int print_header = 1; 2400 unsigned i; 2401 int error; 2402 uint64_t total_slots_used = 0; 2403 uint64_t max_slot_used = 0; 2404 uint64_t dnode_slots; 2405 2406 /* make sure nicenum has enough space */ 2407 CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ); 2408 2409 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 2410 dmu_objset_fast_stat(os, &dds); 2411 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 2412 2413 if (dds.dds_type < DMU_OST_NUMTYPES) 2414 type = objset_types[dds.dds_type]; 2415 2416 if (dds.dds_type == DMU_OST_META) { 2417 dds.dds_creation_txg = TXG_INITIAL; 2418 usedobjs = BP_GET_FILL(os->os_rootbp); 2419 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)-> 2420 dd_used_bytes; 2421 } else { 2422 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); 2423 } 2424 2425 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp)); 2426 2427 zdb_nicenum(refdbytes, numbuf, sizeof (numbuf)); 2428 2429 if (verbosity >= 4) { 2430 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp "); 2431 (void) snprintf_blkptr(blkbuf + strlen(blkbuf), 2432 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp); 2433 } else { 2434 blkbuf[0] = '\0'; 2435 } 2436 2437 dmu_objset_name(os, osname); 2438 2439 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " 2440 "%s, %llu objects%s%s\n", 2441 osname, type, (u_longlong_t)dmu_objset_id(os), 2442 (u_longlong_t)dds.dds_creation_txg, 2443 numbuf, (u_longlong_t)usedobjs, blkbuf, 2444 (dds.dds_inconsistent) ? " (inconsistent)" : ""); 2445 2446 if (zopt_objects != 0) { 2447 for (i = 0; i < zopt_objects; i++) 2448 dump_object(os, zopt_object[i], verbosity, 2449 &print_header, NULL); 2450 (void) printf("\n"); 2451 return; 2452 } 2453 2454 if (dump_opt['i'] != 0 || verbosity >= 2) 2455 dump_intent_log(dmu_objset_zil(os)); 2456 2457 if (dmu_objset_ds(os) != NULL) { 2458 dsl_dataset_t *ds = dmu_objset_ds(os); 2459 dump_deadlist(&ds->ds_deadlist); 2460 2461 if (dsl_dataset_remap_deadlist_exists(ds)) { 2462 (void) printf("ds_remap_deadlist:\n"); 2463 dump_deadlist(&ds->ds_remap_deadlist); 2464 } 2465 count_ds_mos_objects(ds); 2466 } 2467 2468 if (verbosity < 2) 2469 return; 2470 2471 if (BP_IS_HOLE(os->os_rootbp)) 2472 return; 2473 2474 dump_object(os, 0, verbosity, &print_header, NULL); 2475 object_count = 0; 2476 if (DMU_USERUSED_DNODE(os) != NULL && 2477 DMU_USERUSED_DNODE(os)->dn_type != 0) { 2478 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header, 2479 NULL); 2480 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header, 2481 NULL); 2482 } 2483 2484 if (DMU_PROJECTUSED_DNODE(os) != NULL && 2485 DMU_PROJECTUSED_DNODE(os)->dn_type != 0) 2486 dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity, 2487 &print_header, NULL); 2488 2489 object = 0; 2490 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 2491 dump_object(os, object, verbosity, &print_header, &dnode_slots); 2492 object_count++; 2493 total_slots_used += dnode_slots; 2494 max_slot_used = object + dnode_slots - 1; 2495 } 2496 2497 (void) printf("\n"); 2498 2499 (void) printf(" Dnode slots:\n"); 2500 (void) printf("\tTotal used: %10llu\n", 2501 (u_longlong_t)total_slots_used); 2502 (void) printf("\tMax used: %10llu\n", 2503 (u_longlong_t)max_slot_used); 2504 (void) printf("\tPercent empty: %10lf\n", 2505 (double)(max_slot_used - total_slots_used)*100 / 2506 (double)max_slot_used); 2507 2508 (void) printf("\n"); 2509 2510 if (error != ESRCH) { 2511 (void) fprintf(stderr, "dmu_object_next() = %d\n", error); 2512 abort(); 2513 } 2514 if (leaked_objects != 0) { 2515 (void) printf("%d potentially leaked objects detected\n", 2516 leaked_objects); 2517 leaked_objects = 0; 2518 } 2519 2520 ASSERT3U(object_count, ==, usedobjs); 2521 } 2522 2523 static void 2524 dump_uberblock(uberblock_t *ub, const char *header, const char *footer) 2525 { 2526 time_t timestamp = ub->ub_timestamp; 2527 2528 (void) printf("%s", header ? header : ""); 2529 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); 2530 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); 2531 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); 2532 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); 2533 (void) printf("\ttimestamp = %llu UTC = %s", 2534 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); 2535 2536 (void) printf("\tmmp_magic = %016llx\n", 2537 (u_longlong_t)ub->ub_mmp_magic); 2538 if (MMP_VALID(ub)) { 2539 (void) printf("\tmmp_delay = %0llu\n", 2540 (u_longlong_t)ub->ub_mmp_delay); 2541 if (MMP_SEQ_VALID(ub)) 2542 (void) printf("\tmmp_seq = %u\n", 2543 (unsigned int) MMP_SEQ(ub)); 2544 if (MMP_FAIL_INT_VALID(ub)) 2545 (void) printf("\tmmp_fail = %u\n", 2546 (unsigned int) MMP_FAIL_INT(ub)); 2547 if (MMP_INTERVAL_VALID(ub)) 2548 (void) printf("\tmmp_write = %u\n", 2549 (unsigned int) MMP_INTERVAL(ub)); 2550 /* After MMP_* to make summarize_uberblock_mmp cleaner */ 2551 (void) printf("\tmmp_valid = %x\n", 2552 (unsigned int) ub->ub_mmp_config & 0xFF); 2553 } 2554 2555 if (dump_opt['u'] >= 4) { 2556 char blkbuf[BP_SPRINTF_LEN]; 2557 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp); 2558 (void) printf("\trootbp = %s\n", blkbuf); 2559 } 2560 (void) printf("\tcheckpoint_txg = %llu\n", 2561 (u_longlong_t)ub->ub_checkpoint_txg); 2562 (void) printf("%s", footer ? footer : ""); 2563 } 2564 2565 static void 2566 dump_config(spa_t *spa) 2567 { 2568 dmu_buf_t *db; 2569 size_t nvsize = 0; 2570 int error = 0; 2571 2572 2573 error = dmu_bonus_hold(spa->spa_meta_objset, 2574 spa->spa_config_object, FTAG, &db); 2575 2576 if (error == 0) { 2577 nvsize = *(uint64_t *)db->db_data; 2578 dmu_buf_rele(db, FTAG); 2579 2580 (void) printf("\nMOS Configuration:\n"); 2581 dump_packed_nvlist(spa->spa_meta_objset, 2582 spa->spa_config_object, (void *)&nvsize, 1); 2583 } else { 2584 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", 2585 (u_longlong_t)spa->spa_config_object, error); 2586 } 2587 } 2588 2589 static void 2590 dump_cachefile(const char *cachefile) 2591 { 2592 int fd; 2593 struct stat64 statbuf; 2594 char *buf; 2595 nvlist_t *config; 2596 2597 if ((fd = open64(cachefile, O_RDONLY)) < 0) { 2598 (void) printf("cannot open '%s': %s\n", cachefile, 2599 strerror(errno)); 2600 exit(1); 2601 } 2602 2603 if (fstat64(fd, &statbuf) != 0) { 2604 (void) printf("failed to stat '%s': %s\n", cachefile, 2605 strerror(errno)); 2606 exit(1); 2607 } 2608 2609 if ((buf = malloc(statbuf.st_size)) == NULL) { 2610 (void) fprintf(stderr, "failed to allocate %llu bytes\n", 2611 (u_longlong_t)statbuf.st_size); 2612 exit(1); 2613 } 2614 2615 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 2616 (void) fprintf(stderr, "failed to read %llu bytes\n", 2617 (u_longlong_t)statbuf.st_size); 2618 exit(1); 2619 } 2620 2621 (void) close(fd); 2622 2623 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { 2624 (void) fprintf(stderr, "failed to unpack nvlist\n"); 2625 exit(1); 2626 } 2627 2628 free(buf); 2629 2630 dump_nvlist(config, 0); 2631 2632 nvlist_free(config); 2633 } 2634 2635 static void 2636 print_l2arc_header(void) 2637 { 2638 (void) printf("------------------------------------\n"); 2639 (void) printf("L2ARC device header\n"); 2640 (void) printf("------------------------------------\n"); 2641 } 2642 2643 static void 2644 print_l2arc_log_blocks(void) 2645 { 2646 (void) printf("------------------------------------\n"); 2647 (void) printf("L2ARC device log blocks\n"); 2648 (void) printf("------------------------------------\n"); 2649 } 2650 2651 static void 2652 dump_l2arc_log_entries(uint64_t log_entries, 2653 l2arc_log_ent_phys_t *le, uint64_t i) 2654 { 2655 for (uint64_t j = 0; j < log_entries; j++) { 2656 dva_t dva = le[j].le_dva; 2657 (void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, " 2658 "vdev: %llu, offset: %llu\n", 2659 (u_longlong_t)i, j + 1, 2660 (u_longlong_t)DVA_GET_ASIZE(&dva), 2661 (u_longlong_t)DVA_GET_VDEV(&dva), 2662 (u_longlong_t)DVA_GET_OFFSET(&dva)); 2663 (void) printf("|\t\t\t\tbirth: %llu\n", 2664 (u_longlong_t)le[j].le_birth); 2665 (void) printf("|\t\t\t\tlsize: %llu\n", 2666 (u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop)); 2667 (void) printf("|\t\t\t\tpsize: %llu\n", 2668 (u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop)); 2669 (void) printf("|\t\t\t\tcompr: %llu\n", 2670 (u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop)); 2671 (void) printf("|\t\t\t\ttype: %llu\n", 2672 (u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop)); 2673 (void) printf("|\t\t\t\tprotected: %llu\n", 2674 (u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop)); 2675 (void) printf("|\t\t\t\tprefetch: %llu\n", 2676 (u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop)); 2677 (void) printf("|\t\t\t\taddress: %llu\n", 2678 (u_longlong_t)le[j].le_daddr); 2679 (void) printf("|\n"); 2680 } 2681 (void) printf("\n"); 2682 } 2683 2684 static void 2685 dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps) 2686 { 2687 (void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps.lbp_daddr); 2688 (void) printf("|\t\tpayload_asize: %llu\n", 2689 (u_longlong_t)lbps.lbp_payload_asize); 2690 (void) printf("|\t\tpayload_start: %llu\n", 2691 (u_longlong_t)lbps.lbp_payload_start); 2692 (void) printf("|\t\tlsize: %llu\n", 2693 (u_longlong_t)L2BLK_GET_LSIZE((&lbps)->lbp_prop)); 2694 (void) printf("|\t\tasize: %llu\n", 2695 (u_longlong_t)L2BLK_GET_PSIZE((&lbps)->lbp_prop)); 2696 (void) printf("|\t\tcompralgo: %llu\n", 2697 (u_longlong_t)L2BLK_GET_COMPRESS((&lbps)->lbp_prop)); 2698 (void) printf("|\t\tcksumalgo: %llu\n", 2699 (u_longlong_t)L2BLK_GET_CHECKSUM((&lbps)->lbp_prop)); 2700 (void) printf("|\n\n"); 2701 } 2702 2703 static void 2704 dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr, 2705 l2arc_dev_hdr_phys_t *rebuild) 2706 { 2707 l2arc_log_blk_phys_t this_lb; 2708 uint64_t asize; 2709 l2arc_log_blkptr_t lbps[2]; 2710 abd_t *abd; 2711 zio_cksum_t cksum; 2712 int failed = 0; 2713 l2arc_dev_t dev; 2714 2715 if (!dump_opt['q']) 2716 print_l2arc_log_blocks(); 2717 bcopy((&l2dhdr)->dh_start_lbps, lbps, sizeof (lbps)); 2718 2719 dev.l2ad_evict = l2dhdr.dh_evict; 2720 dev.l2ad_start = l2dhdr.dh_start; 2721 dev.l2ad_end = l2dhdr.dh_end; 2722 2723 if (l2dhdr.dh_start_lbps[0].lbp_daddr == 0) { 2724 /* no log blocks to read */ 2725 if (!dump_opt['q']) { 2726 (void) printf("No log blocks to read\n"); 2727 (void) printf("\n"); 2728 } 2729 return; 2730 } else { 2731 dev.l2ad_hand = lbps[0].lbp_daddr + 2732 L2BLK_GET_PSIZE((&lbps[0])->lbp_prop); 2733 } 2734 2735 dev.l2ad_first = !!(l2dhdr.dh_flags & L2ARC_DEV_HDR_EVICT_FIRST); 2736 2737 for (;;) { 2738 if (!l2arc_log_blkptr_valid(&dev, &lbps[0])) 2739 break; 2740 2741 /* L2BLK_GET_PSIZE returns aligned size for log blocks */ 2742 asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop); 2743 if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != 2744 (ssize_t)asize) { 2745 if (!dump_opt['q']) { 2746 (void) printf("Error while reading next log " 2747 "block\n\n"); 2748 } 2749 break; 2750 } 2751 2752 fletcher_4_native(&this_lb, asize, NULL, &cksum); 2753 if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) { 2754 failed++; 2755 if (!dump_opt['q']) { 2756 (void) printf("Invalid cksum\n"); 2757 dump_l2arc_log_blkptr(lbps[0]); 2758 } 2759 break; 2760 } 2761 2762 switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) { 2763 case ZIO_COMPRESS_OFF: 2764 break; 2765 case ZIO_COMPRESS_LZ4: 2766 abd = abd_alloc_for_io(asize, B_TRUE); 2767 abd_copy_from_buf_off(abd, &this_lb, 0, asize); 2768 zio_decompress_data(L2BLK_GET_COMPRESS( 2769 (&lbps[0])->lbp_prop), abd, &this_lb, 2770 asize, sizeof (this_lb)); 2771 abd_free(abd); 2772 break; 2773 default: 2774 break; 2775 } 2776 2777 if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC)) 2778 byteswap_uint64_array(&this_lb, sizeof (this_lb)); 2779 if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) { 2780 if (!dump_opt['q']) 2781 (void) printf("Invalid log block magic\n\n"); 2782 break; 2783 } 2784 2785 rebuild->dh_lb_count++; 2786 rebuild->dh_lb_asize += asize; 2787 if (dump_opt['l'] > 1 && !dump_opt['q']) { 2788 (void) printf("lb[%4llu]\tmagic: %llu\n", 2789 (u_longlong_t)rebuild->dh_lb_count, 2790 (u_longlong_t)this_lb.lb_magic); 2791 dump_l2arc_log_blkptr(lbps[0]); 2792 } 2793 2794 if (dump_opt['l'] > 2 && !dump_opt['q']) 2795 dump_l2arc_log_entries(l2dhdr.dh_log_entries, 2796 this_lb.lb_entries, 2797 rebuild->dh_lb_count); 2798 2799 if (l2arc_range_check_overlap(lbps[1].lbp_payload_start, 2800 lbps[0].lbp_payload_start, dev.l2ad_evict) && 2801 !dev.l2ad_first) 2802 break; 2803 2804 lbps[0] = lbps[1]; 2805 lbps[1] = this_lb.lb_prev_lbp; 2806 } 2807 2808 if (!dump_opt['q']) { 2809 (void) printf("log_blk_count:\t %llu with valid cksum\n", 2810 (u_longlong_t)rebuild->dh_lb_count); 2811 (void) printf("\t\t %d with invalid cksum\n", failed); 2812 (void) printf("log_blk_asize:\t %llu\n\n", 2813 (u_longlong_t)rebuild->dh_lb_asize); 2814 } 2815 } 2816 2817 static int 2818 dump_l2arc_header(int fd) 2819 { 2820 l2arc_dev_hdr_phys_t l2dhdr, rebuild; 2821 int error = B_FALSE; 2822 2823 bzero(&l2dhdr, sizeof (l2dhdr)); 2824 bzero(&rebuild, sizeof (rebuild)); 2825 2826 if (pread64(fd, &l2dhdr, sizeof (l2dhdr), 2827 VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) { 2828 error = B_TRUE; 2829 } else { 2830 if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC)) 2831 byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr)); 2832 2833 if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC) 2834 error = B_TRUE; 2835 } 2836 2837 if (error) { 2838 (void) printf("L2ARC device header not found\n\n"); 2839 /* Do not return an error here for backward compatibility */ 2840 return (0); 2841 } else if (!dump_opt['q']) { 2842 print_l2arc_header(); 2843 2844 (void) printf(" magic: %llu\n", 2845 (u_longlong_t)l2dhdr.dh_magic); 2846 (void) printf(" version: %llu\n", 2847 (u_longlong_t)l2dhdr.dh_version); 2848 (void) printf(" pool_guid: %llu\n", 2849 (u_longlong_t)l2dhdr.dh_spa_guid); 2850 (void) printf(" flags: %llu\n", 2851 (u_longlong_t)l2dhdr.dh_flags); 2852 (void) printf(" start_lbps[0]: %llu\n", 2853 (u_longlong_t) 2854 l2dhdr.dh_start_lbps[0].lbp_daddr); 2855 (void) printf(" start_lbps[1]: %llu\n", 2856 (u_longlong_t) 2857 l2dhdr.dh_start_lbps[1].lbp_daddr); 2858 (void) printf(" log_blk_ent: %llu\n", 2859 (u_longlong_t)l2dhdr.dh_log_entries); 2860 (void) printf(" start: %llu\n", 2861 (u_longlong_t)l2dhdr.dh_start); 2862 (void) printf(" end: %llu\n", 2863 (u_longlong_t)l2dhdr.dh_end); 2864 (void) printf(" evict: %llu\n", 2865 (u_longlong_t)l2dhdr.dh_evict); 2866 (void) printf(" lb_asize_refcount: %llu\n", 2867 (u_longlong_t)l2dhdr.dh_lb_asize); 2868 (void) printf(" lb_count_refcount: %llu\n\n", 2869 (u_longlong_t)l2dhdr.dh_lb_count); 2870 } 2871 2872 dump_l2arc_log_blocks(fd, l2dhdr, &rebuild); 2873 /* 2874 * The total aligned size of log blocks and the number of log blocks 2875 * reported in the header of the device may be less than what zdb 2876 * reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild(). 2877 * This happens because dump_l2arc_log_blocks() lacks the memory 2878 * pressure valve that l2arc_rebuild() has. Thus, if we are on a system 2879 * with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize 2880 * and dh_lb_count will be lower to begin with than what exists on the 2881 * device. This is normal and zdb should not exit with an error. The 2882 * opposite case should never happen though, the values reported in the 2883 * header should never be higher than what dump_l2arc_log_blocks() and 2884 * l2arc_rebuild() report. If this happens there is a leak in the 2885 * accounting of log blocks. 2886 */ 2887 if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize || 2888 l2dhdr.dh_lb_count > rebuild.dh_lb_count) 2889 return (1); 2890 2891 return (0); 2892 } 2893 2894 static char curpath[PATH_MAX]; 2895 2896 /* 2897 * Iterate through the path components, recursively passing 2898 * current one's obj and remaining path until we find the obj 2899 * for the last one. 2900 */ 2901 static int 2902 dump_path_impl(objset_t *os, uint64_t obj, char *name) 2903 { 2904 int err; 2905 int header = 1; 2906 uint64_t child_obj; 2907 char *s; 2908 dmu_buf_t *db; 2909 dmu_object_info_t doi; 2910 2911 if ((s = strchr(name, '/')) != NULL) 2912 *s = '\0'; 2913 err = zap_lookup(os, obj, name, 8, 1, &child_obj); 2914 2915 (void) strlcat(curpath, name, sizeof (curpath)); 2916 2917 if (err != 0) { 2918 (void) fprintf(stderr, "failed to lookup %s: %s\n", 2919 curpath, strerror(err)); 2920 return (err); 2921 } 2922 2923 child_obj = ZFS_DIRENT_OBJ(child_obj); 2924 err = sa_buf_hold(os, child_obj, FTAG, &db); 2925 if (err != 0) { 2926 (void) fprintf(stderr, 2927 "failed to get SA dbuf for obj %llu: %s\n", 2928 (u_longlong_t)child_obj, strerror(err)); 2929 return (EINVAL); 2930 } 2931 dmu_object_info_from_db(db, &doi); 2932 sa_buf_rele(db, FTAG); 2933 2934 if (doi.doi_bonus_type != DMU_OT_SA && 2935 doi.doi_bonus_type != DMU_OT_ZNODE) { 2936 (void) fprintf(stderr, "invalid bonus type %d for obj %llu\n", 2937 doi.doi_bonus_type, (u_longlong_t)child_obj); 2938 return (EINVAL); 2939 } 2940 2941 if (dump_opt['v'] > 6) { 2942 (void) printf("obj=%llu %s type=%d bonustype=%d\n", 2943 (u_longlong_t)child_obj, curpath, doi.doi_type, 2944 doi.doi_bonus_type); 2945 } 2946 2947 (void) strlcat(curpath, "/", sizeof (curpath)); 2948 2949 switch (doi.doi_type) { 2950 case DMU_OT_DIRECTORY_CONTENTS: 2951 if (s != NULL && *(s + 1) != '\0') 2952 return (dump_path_impl(os, child_obj, s + 1)); 2953 /*FALLTHROUGH*/ 2954 case DMU_OT_PLAIN_FILE_CONTENTS: 2955 dump_object(os, child_obj, dump_opt['v'], &header, NULL); 2956 return (0); 2957 default: 2958 (void) fprintf(stderr, "object %llu has non-file/directory " 2959 "type %d\n", (u_longlong_t)obj, doi.doi_type); 2960 break; 2961 } 2962 2963 return (EINVAL); 2964 } 2965 2966 /* 2967 * Dump the blocks for the object specified by path inside the dataset. 2968 */ 2969 static int 2970 dump_path(char *ds, char *path) 2971 { 2972 int err; 2973 objset_t *os; 2974 uint64_t root_obj; 2975 2976 err = open_objset(ds, DMU_OST_ZFS, FTAG, &os); 2977 if (err != 0) 2978 return (err); 2979 2980 err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj); 2981 if (err != 0) { 2982 (void) fprintf(stderr, "can't lookup root znode: %s\n", 2983 strerror(err)); 2984 dmu_objset_disown(os, B_FALSE, FTAG); 2985 return (EINVAL); 2986 } 2987 2988 (void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds); 2989 2990 err = dump_path_impl(os, root_obj, path); 2991 2992 close_objset(os, FTAG); 2993 return (err); 2994 } 2995 2996 typedef struct cksum_record { 2997 zio_cksum_t cksum; 2998 boolean_t labels[VDEV_LABELS]; 2999 avl_node_t link; 3000 } cksum_record_t; 3001 3002 static int 3003 cksum_record_compare(const void *x1, const void *x2) 3004 { 3005 const cksum_record_t *l = (cksum_record_t *)x1; 3006 const cksum_record_t *r = (cksum_record_t *)x2; 3007 int arraysize = ARRAY_SIZE(l->cksum.zc_word); 3008 int difference; 3009 3010 for (int i = 0; i < arraysize; i++) { 3011 difference = AVL_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]); 3012 if (difference) 3013 break; 3014 } 3015 3016 return (difference); 3017 } 3018 3019 static cksum_record_t * 3020 cksum_record_alloc(zio_cksum_t *cksum, int l) 3021 { 3022 cksum_record_t *rec; 3023 3024 rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL); 3025 rec->cksum = *cksum; 3026 rec->labels[l] = B_TRUE; 3027 3028 return (rec); 3029 } 3030 3031 static cksum_record_t * 3032 cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum) 3033 { 3034 cksum_record_t lookup = { .cksum = *cksum }; 3035 avl_index_t where; 3036 3037 return (avl_find(tree, &lookup, &where)); 3038 } 3039 3040 static cksum_record_t * 3041 cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l) 3042 { 3043 cksum_record_t *rec; 3044 3045 rec = cksum_record_lookup(tree, cksum); 3046 if (rec) { 3047 rec->labels[l] = B_TRUE; 3048 } else { 3049 rec = cksum_record_alloc(cksum, l); 3050 avl_add(tree, rec); 3051 } 3052 3053 return (rec); 3054 } 3055 3056 static int 3057 first_label(cksum_record_t *rec) 3058 { 3059 for (int i = 0; i < VDEV_LABELS; i++) 3060 if (rec->labels[i]) 3061 return (i); 3062 3063 return (-1); 3064 } 3065 3066 static void 3067 print_label_numbers(char *prefix, cksum_record_t *rec) 3068 { 3069 printf("%s", prefix); 3070 for (int i = 0; i < VDEV_LABELS; i++) 3071 if (rec->labels[i] == B_TRUE) 3072 printf("%d ", i); 3073 printf("\n"); 3074 } 3075 3076 #define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT) 3077 3078 typedef struct zdb_label { 3079 vdev_label_t label; 3080 nvlist_t *config_nv; 3081 cksum_record_t *config; 3082 cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT]; 3083 boolean_t header_printed; 3084 boolean_t read_failed; 3085 } zdb_label_t; 3086 3087 static void 3088 print_label_header(zdb_label_t *label, int l) 3089 { 3090 3091 if (dump_opt['q']) 3092 return; 3093 3094 if (label->header_printed == B_TRUE) 3095 return; 3096 3097 (void) printf("------------------------------------\n"); 3098 (void) printf("LABEL %d\n", l); 3099 (void) printf("------------------------------------\n"); 3100 3101 label->header_printed = B_TRUE; 3102 } 3103 3104 static void 3105 dump_config_from_label(zdb_label_t *label, size_t buflen, int l) 3106 { 3107 if (dump_opt['q']) 3108 return; 3109 3110 if ((dump_opt['l'] < 3) && (first_label(label->config) != l)) 3111 return; 3112 3113 print_label_header(label, l); 3114 dump_nvlist(label->config_nv, 4); 3115 print_label_numbers(" labels = ", label->config); 3116 } 3117 3118 #define ZDB_MAX_UB_HEADER_SIZE 32 3119 3120 static void 3121 dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num) 3122 { 3123 vdev_t vd; 3124 char header[ZDB_MAX_UB_HEADER_SIZE]; 3125 3126 vd.vdev_ashift = ashift; 3127 vd.vdev_top = &vd; 3128 3129 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) { 3130 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i); 3131 uberblock_t *ub = (void *)((char *)&label->label + uoff); 3132 cksum_record_t *rec = label->uberblocks[i]; 3133 3134 if (rec == NULL) { 3135 if (dump_opt['u'] >= 2) { 3136 print_label_header(label, label_num); 3137 (void) printf(" Uberblock[%d] invalid\n", i); 3138 } 3139 continue; 3140 } 3141 3142 if ((dump_opt['u'] < 3) && (first_label(rec) != label_num)) 3143 continue; 3144 3145 print_label_header(label, label_num); 3146 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, 3147 " Uberblock[%d]\n", i); 3148 dump_uberblock(ub, header, ""); 3149 print_label_numbers(" labels = ", rec); 3150 } 3151 } 3152 3153 static int 3154 dump_label(const char *dev) 3155 { 3156 char path[MAXPATHLEN]; 3157 zdb_label_t labels[VDEV_LABELS]; 3158 uint64_t psize, ashift, l2cache; 3159 struct stat64 statbuf; 3160 boolean_t config_found = B_FALSE; 3161 boolean_t error = B_FALSE; 3162 boolean_t read_l2arc_header = B_FALSE; 3163 avl_tree_t config_tree; 3164 avl_tree_t uberblock_tree; 3165 void *node, *cookie; 3166 int fd; 3167 3168 bzero(labels, sizeof (labels)); 3169 3170 (void) strlcpy(path, dev, sizeof (path)); 3171 if (dev[0] == '/') { 3172 if (strncmp(dev, ZFS_DISK_ROOTD, 3173 strlen(ZFS_DISK_ROOTD)) == 0) { 3174 (void) snprintf(path, sizeof (path), "%s%s", 3175 ZFS_RDISK_ROOTD, dev + strlen(ZFS_DISK_ROOTD)); 3176 } 3177 } else if (stat64(path, &statbuf) != 0) { 3178 char *s; 3179 3180 (void) snprintf(path, sizeof (path), "%s%s", ZFS_RDISK_ROOTD, 3181 dev); 3182 if (((s = strrchr(dev, 's')) == NULL && 3183 (s = strchr(dev, 'p')) == NULL) || 3184 !isdigit(*(s + 1))) 3185 (void) strlcat(path, "s0", sizeof (path)); 3186 } 3187 3188 if ((fd = open64(path, O_RDONLY)) < 0) { 3189 (void) fprintf(stderr, "cannot open '%s': %s\n", path, 3190 strerror(errno)); 3191 exit(1); 3192 } 3193 3194 if (fstat64(fd, &statbuf) != 0) { 3195 (void) fprintf(stderr, "failed to stat '%s': %s\n", path, 3196 strerror(errno)); 3197 (void) close(fd); 3198 exit(1); 3199 } 3200 3201 if (S_ISBLK(statbuf.st_mode)) { 3202 (void) fprintf(stderr, 3203 "cannot use '%s': character device required\n", path); 3204 (void) close(fd); 3205 exit(1); 3206 } 3207 3208 avl_create(&config_tree, cksum_record_compare, 3209 sizeof (cksum_record_t), offsetof(cksum_record_t, link)); 3210 avl_create(&uberblock_tree, cksum_record_compare, 3211 sizeof (cksum_record_t), offsetof(cksum_record_t, link)); 3212 3213 psize = statbuf.st_size; 3214 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); 3215 ashift = SPA_MINBLOCKSHIFT; 3216 3217 /* 3218 * 1. Read the label from disk 3219 * 2. Unpack the configuration and insert in config tree. 3220 * 3. Traverse all uberblocks and insert in uberblock tree. 3221 */ 3222 for (int l = 0; l < VDEV_LABELS; l++) { 3223 zdb_label_t *label = &labels[l]; 3224 char *buf = label->label.vl_vdev_phys.vp_nvlist; 3225 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist); 3226 nvlist_t *config; 3227 cksum_record_t *rec; 3228 zio_cksum_t cksum; 3229 vdev_t vd; 3230 3231 if (pread64(fd, &label->label, sizeof (label->label), 3232 vdev_label_offset(psize, l, 0)) != sizeof (label->label)) { 3233 if (!dump_opt['q']) 3234 (void) printf("failed to read label %d\n", l); 3235 label->read_failed = B_TRUE; 3236 error = B_TRUE; 3237 continue; 3238 } 3239 3240 label->read_failed = B_FALSE; 3241 3242 if (nvlist_unpack(buf, buflen, &config, 0) == 0) { 3243 nvlist_t *vdev_tree = NULL; 3244 size_t size; 3245 3246 if ((nvlist_lookup_nvlist(config, 3247 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || 3248 (nvlist_lookup_uint64(vdev_tree, 3249 ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) 3250 ashift = SPA_MINBLOCKSHIFT; 3251 3252 /* If the device is a cache device clear the header. */ 3253 if (!read_l2arc_header) { 3254 if (nvlist_lookup_uint64(config, 3255 ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 && 3256 l2cache == POOL_STATE_L2CACHE) { 3257 read_l2arc_header = B_TRUE; 3258 } 3259 } 3260 3261 if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0) 3262 size = buflen; 3263 3264 fletcher_4_native(buf, size, NULL, &cksum); 3265 rec = cksum_record_insert(&config_tree, &cksum, l); 3266 3267 label->config = rec; 3268 label->config_nv = config; 3269 config_found = B_TRUE; 3270 } else { 3271 error = B_TRUE; 3272 } 3273 3274 vd.vdev_ashift = ashift; 3275 vd.vdev_top = &vd; 3276 3277 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) { 3278 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i); 3279 uberblock_t *ub = (void *)((char *)label + uoff); 3280 3281 if (uberblock_verify(ub)) 3282 continue; 3283 3284 fletcher_4_native(ub, sizeof (*ub), NULL, &cksum); 3285 rec = cksum_record_insert(&uberblock_tree, &cksum, l); 3286 3287 label->uberblocks[i] = rec; 3288 } 3289 } 3290 3291 /* 3292 * Dump the label and uberblocks. 3293 */ 3294 for (int l = 0; l < VDEV_LABELS; l++) { 3295 zdb_label_t *label = &labels[l]; 3296 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist); 3297 3298 if (label->read_failed == B_TRUE) 3299 continue; 3300 3301 if (label->config_nv) { 3302 dump_config_from_label(label, buflen, l); 3303 } else { 3304 if (!dump_opt['q']) 3305 (void) printf("failed to unpack label %d\n", l); 3306 } 3307 3308 if (dump_opt['u']) 3309 dump_label_uberblocks(label, ashift, l); 3310 3311 nvlist_free(label->config_nv); 3312 } 3313 3314 /* 3315 * Dump the L2ARC header, if existent. 3316 */ 3317 if (read_l2arc_header) 3318 error |= dump_l2arc_header(fd); 3319 3320 cookie = NULL; 3321 while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL) 3322 umem_free(node, sizeof (cksum_record_t)); 3323 3324 cookie = NULL; 3325 while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL) 3326 umem_free(node, sizeof (cksum_record_t)); 3327 3328 avl_destroy(&config_tree); 3329 avl_destroy(&uberblock_tree); 3330 3331 (void) close(fd); 3332 3333 return (config_found == B_FALSE ? 2 : 3334 (error == B_TRUE ? 1 : 0)); 3335 } 3336 3337 static uint64_t dataset_feature_count[SPA_FEATURES]; 3338 static uint64_t remap_deadlist_count = 0; 3339 3340 static int 3341 dump_one_dir(const char *dsname, void *arg __unused) 3342 { 3343 int error; 3344 objset_t *os; 3345 3346 error = open_objset(dsname, DMU_OST_ANY, FTAG, &os); 3347 if (error != 0) 3348 return (0); 3349 3350 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 3351 if (!dmu_objset_ds(os)->ds_feature_inuse[f]) 3352 continue; 3353 ASSERT(spa_feature_table[f].fi_flags & 3354 ZFEATURE_FLAG_PER_DATASET); 3355 dataset_feature_count[f]++; 3356 } 3357 3358 if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) { 3359 remap_deadlist_count++; 3360 } 3361 3362 dump_dir(os); 3363 close_objset(os, FTAG); 3364 fuid_table_destroy(); 3365 return (0); 3366 } 3367 3368 /* 3369 * Block statistics. 3370 */ 3371 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2) 3372 typedef struct zdb_blkstats { 3373 uint64_t zb_asize; 3374 uint64_t zb_lsize; 3375 uint64_t zb_psize; 3376 uint64_t zb_count; 3377 uint64_t zb_gangs; 3378 uint64_t zb_ditto_samevdev; 3379 uint64_t zb_ditto_same_ms; 3380 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE]; 3381 } zdb_blkstats_t; 3382 3383 /* 3384 * Extended object types to report deferred frees and dedup auto-ditto blocks. 3385 */ 3386 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) 3387 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) 3388 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2) 3389 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3) 3390 3391 static const char *zdb_ot_extname[] = { 3392 "deferred free", 3393 "dedup ditto", 3394 "other", 3395 "Total", 3396 }; 3397 3398 #define ZB_TOTAL DN_MAX_LEVELS 3399 3400 typedef struct zdb_cb { 3401 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; 3402 uint64_t zcb_removing_size; 3403 uint64_t zcb_checkpoint_size; 3404 uint64_t zcb_dedup_asize; 3405 uint64_t zcb_dedup_blocks; 3406 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES]; 3407 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES] 3408 [BPE_PAYLOAD_SIZE]; 3409 uint64_t zcb_start; 3410 hrtime_t zcb_lastprint; 3411 uint64_t zcb_totalasize; 3412 uint64_t zcb_errors[256]; 3413 int zcb_readfails; 3414 int zcb_haderrors; 3415 spa_t *zcb_spa; 3416 uint32_t **zcb_vd_obsolete_counts; 3417 } zdb_cb_t; 3418 3419 /* test if two DVA offsets from same vdev are within the same metaslab */ 3420 static boolean_t 3421 same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2) 3422 { 3423 vdev_t *vd = vdev_lookup_top(spa, vdev); 3424 uint64_t ms_shift = vd->vdev_ms_shift; 3425 3426 return ((off1 >> ms_shift) == (off2 >> ms_shift)); 3427 } 3428 3429 static void 3430 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, 3431 dmu_object_type_t type) 3432 { 3433 uint64_t refcnt = 0; 3434 3435 ASSERT(type < ZDB_OT_TOTAL); 3436 3437 if (zilog && zil_bp_tree_add(zilog, bp) != 0) 3438 return; 3439 3440 spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER); 3441 3442 for (int i = 0; i < 4; i++) { 3443 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; 3444 int t = (i & 1) ? type : ZDB_OT_TOTAL; 3445 int equal; 3446 zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; 3447 3448 zb->zb_asize += BP_GET_ASIZE(bp); 3449 zb->zb_lsize += BP_GET_LSIZE(bp); 3450 zb->zb_psize += BP_GET_PSIZE(bp); 3451 zb->zb_count++; 3452 3453 /* 3454 * The histogram is only big enough to record blocks up to 3455 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last, 3456 * "other", bucket. 3457 */ 3458 unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT; 3459 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1); 3460 zb->zb_psize_histogram[idx]++; 3461 3462 zb->zb_gangs += BP_COUNT_GANG(bp); 3463 3464 switch (BP_GET_NDVAS(bp)) { 3465 case 2: 3466 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3467 DVA_GET_VDEV(&bp->blk_dva[1])) { 3468 zb->zb_ditto_samevdev++; 3469 3470 if (same_metaslab(zcb->zcb_spa, 3471 DVA_GET_VDEV(&bp->blk_dva[0]), 3472 DVA_GET_OFFSET(&bp->blk_dva[0]), 3473 DVA_GET_OFFSET(&bp->blk_dva[1]))) 3474 zb->zb_ditto_same_ms++; 3475 } 3476 break; 3477 case 3: 3478 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 3479 DVA_GET_VDEV(&bp->blk_dva[1])) + 3480 (DVA_GET_VDEV(&bp->blk_dva[0]) == 3481 DVA_GET_VDEV(&bp->blk_dva[2])) + 3482 (DVA_GET_VDEV(&bp->blk_dva[1]) == 3483 DVA_GET_VDEV(&bp->blk_dva[2])); 3484 if (equal != 0) { 3485 zb->zb_ditto_samevdev++; 3486 3487 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3488 DVA_GET_VDEV(&bp->blk_dva[1]) && 3489 same_metaslab(zcb->zcb_spa, 3490 DVA_GET_VDEV(&bp->blk_dva[0]), 3491 DVA_GET_OFFSET(&bp->blk_dva[0]), 3492 DVA_GET_OFFSET(&bp->blk_dva[1]))) 3493 zb->zb_ditto_same_ms++; 3494 else if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3495 DVA_GET_VDEV(&bp->blk_dva[2]) && 3496 same_metaslab(zcb->zcb_spa, 3497 DVA_GET_VDEV(&bp->blk_dva[0]), 3498 DVA_GET_OFFSET(&bp->blk_dva[0]), 3499 DVA_GET_OFFSET(&bp->blk_dva[2]))) 3500 zb->zb_ditto_same_ms++; 3501 else if (DVA_GET_VDEV(&bp->blk_dva[1]) == 3502 DVA_GET_VDEV(&bp->blk_dva[2]) && 3503 same_metaslab(zcb->zcb_spa, 3504 DVA_GET_VDEV(&bp->blk_dva[1]), 3505 DVA_GET_OFFSET(&bp->blk_dva[1]), 3506 DVA_GET_OFFSET(&bp->blk_dva[2]))) 3507 zb->zb_ditto_same_ms++; 3508 } 3509 break; 3510 } 3511 } 3512 3513 spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG); 3514 3515 if (BP_IS_EMBEDDED(bp)) { 3516 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++; 3517 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)] 3518 [BPE_GET_PSIZE(bp)]++; 3519 return; 3520 } 3521 3522 if (dump_opt['L']) 3523 return; 3524 3525 if (BP_GET_DEDUP(bp)) { 3526 ddt_t *ddt; 3527 ddt_entry_t *dde; 3528 3529 ddt = ddt_select(zcb->zcb_spa, bp); 3530 ddt_enter(ddt); 3531 dde = ddt_lookup(ddt, bp, B_FALSE); 3532 3533 if (dde == NULL) { 3534 refcnt = 0; 3535 } else { 3536 ddt_phys_t *ddp = ddt_phys_select(dde, bp); 3537 ddt_phys_decref(ddp); 3538 refcnt = ddp->ddp_refcnt; 3539 if (ddt_phys_total_refcnt(dde) == 0) 3540 ddt_remove(ddt, dde); 3541 } 3542 ddt_exit(ddt); 3543 } 3544 3545 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, 3546 refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa), 3547 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); 3548 } 3549 3550 static void 3551 zdb_blkptr_done(zio_t *zio) 3552 { 3553 spa_t *spa = zio->io_spa; 3554 blkptr_t *bp = zio->io_bp; 3555 int ioerr = zio->io_error; 3556 zdb_cb_t *zcb = zio->io_private; 3557 zbookmark_phys_t *zb = &zio->io_bookmark; 3558 3559 abd_free(zio->io_abd); 3560 3561 mutex_enter(&spa->spa_scrub_lock); 3562 spa->spa_load_verify_ios--; 3563 cv_broadcast(&spa->spa_scrub_io_cv); 3564 3565 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3566 char blkbuf[BP_SPRINTF_LEN]; 3567 3568 zcb->zcb_haderrors = 1; 3569 zcb->zcb_errors[ioerr]++; 3570 3571 if (dump_opt['b'] >= 2) 3572 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3573 else 3574 blkbuf[0] = '\0'; 3575 3576 (void) printf("zdb_blkptr_cb: " 3577 "Got error %d reading " 3578 "<%llu, %llu, %lld, %llx> %s -- skipping\n", 3579 ioerr, 3580 (u_longlong_t)zb->zb_objset, 3581 (u_longlong_t)zb->zb_object, 3582 (u_longlong_t)zb->zb_level, 3583 (u_longlong_t)zb->zb_blkid, 3584 blkbuf); 3585 } 3586 mutex_exit(&spa->spa_scrub_lock); 3587 } 3588 3589 static int 3590 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 3591 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 3592 { 3593 zdb_cb_t *zcb = arg; 3594 dmu_object_type_t type; 3595 boolean_t is_metadata; 3596 3597 if (bp == NULL) 3598 return (0); 3599 3600 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) { 3601 char blkbuf[BP_SPRINTF_LEN]; 3602 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 3603 (void) printf("objset %llu object %llu " 3604 "level %lld offset 0x%llx %s\n", 3605 (u_longlong_t)zb->zb_objset, 3606 (u_longlong_t)zb->zb_object, 3607 (longlong_t)zb->zb_level, 3608 (u_longlong_t)blkid2offset(dnp, bp, zb), 3609 blkbuf); 3610 } 3611 3612 if (BP_IS_HOLE(bp)) 3613 return (0); 3614 3615 type = BP_GET_TYPE(bp); 3616 3617 zdb_count_block(zcb, zilog, bp, 3618 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type); 3619 3620 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); 3621 3622 if (!BP_IS_EMBEDDED(bp) && 3623 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) { 3624 size_t size = BP_GET_PSIZE(bp); 3625 abd_t *abd = abd_alloc(size, B_FALSE); 3626 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; 3627 3628 /* If it's an intent log block, failure is expected. */ 3629 if (zb->zb_level == ZB_ZIL_LEVEL) 3630 flags |= ZIO_FLAG_SPECULATIVE; 3631 3632 mutex_enter(&spa->spa_scrub_lock); 3633 while (spa->spa_load_verify_ios > max_inflight) 3634 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3635 spa->spa_load_verify_ios++; 3636 mutex_exit(&spa->spa_scrub_lock); 3637 3638 zio_nowait(zio_read(NULL, spa, bp, abd, size, 3639 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb)); 3640 } 3641 3642 zcb->zcb_readfails = 0; 3643 3644 /* only call gethrtime() every 100 blocks */ 3645 static int iters; 3646 if (++iters > 100) 3647 iters = 0; 3648 else 3649 return (0); 3650 3651 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) { 3652 uint64_t now = gethrtime(); 3653 char buf[10]; 3654 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize; 3655 int kb_per_sec = 3656 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000)); 3657 int sec_remaining = 3658 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec; 3659 3660 /* make sure nicenum has enough space */ 3661 CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ); 3662 3663 zfs_nicenum(bytes, buf, sizeof (buf)); 3664 (void) fprintf(stderr, 3665 "\r%5s completed (%4dMB/s) " 3666 "estimated time remaining: %uhr %02umin %02usec ", 3667 buf, kb_per_sec / 1024, 3668 sec_remaining / 60 / 60, 3669 sec_remaining / 60 % 60, 3670 sec_remaining % 60); 3671 3672 zcb->zcb_lastprint = now; 3673 } 3674 3675 return (0); 3676 } 3677 3678 static void 3679 zdb_leak(void *arg, uint64_t start, uint64_t size) 3680 { 3681 vdev_t *vd = arg; 3682 3683 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 3684 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); 3685 } 3686 3687 static metaslab_ops_t zdb_metaslab_ops = { 3688 NULL /* alloc */ 3689 }; 3690 3691 typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, 3692 uint64_t txg, void *arg); 3693 3694 typedef struct unflushed_iter_cb_arg { 3695 spa_t *uic_spa; 3696 uint64_t uic_txg; 3697 void *uic_arg; 3698 zdb_log_sm_cb_t uic_cb; 3699 } unflushed_iter_cb_arg_t; 3700 3701 static int 3702 iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg) 3703 { 3704 unflushed_iter_cb_arg_t *uic = arg; 3705 3706 return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg)); 3707 } 3708 3709 static void 3710 iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg) 3711 { 3712 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 3713 return; 3714 3715 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3716 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg); 3717 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) { 3718 space_map_t *sm = NULL; 3719 VERIFY0(space_map_open(&sm, spa_meta_objset(spa), 3720 sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT)); 3721 3722 unflushed_iter_cb_arg_t uic = { 3723 .uic_spa = spa, 3724 .uic_txg = sls->sls_txg, 3725 .uic_arg = arg, 3726 .uic_cb = cb 3727 }; 3728 3729 VERIFY0(space_map_iterate(sm, space_map_length(sm), 3730 iterate_through_spacemap_logs_cb, &uic)); 3731 space_map_close(sm); 3732 } 3733 spa_config_exit(spa, SCL_CONFIG, FTAG); 3734 } 3735 3736 /* ARGSUSED */ 3737 static int 3738 load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme, 3739 uint64_t txg, void *arg) 3740 { 3741 spa_vdev_removal_t *svr = arg; 3742 3743 uint64_t offset = sme->sme_offset; 3744 uint64_t size = sme->sme_run; 3745 3746 /* skip vdevs we don't care about */ 3747 if (sme->sme_vdev != svr->svr_vdev_id) 3748 return (0); 3749 3750 vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev); 3751 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3752 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE); 3753 3754 if (txg < metaslab_unflushed_txg(ms)) 3755 return (0); 3756 3757 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3758 ASSERT(vim != NULL); 3759 if (offset >= vdev_indirect_mapping_max_offset(vim)) 3760 return (0); 3761 3762 if (sme->sme_type == SM_ALLOC) 3763 range_tree_add(svr->svr_allocd_segs, offset, size); 3764 else 3765 range_tree_remove(svr->svr_allocd_segs, offset, size); 3766 3767 return (0); 3768 } 3769 3770 static void 3771 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) 3772 { 3773 ddt_bookmark_t ddb; 3774 ddt_entry_t dde; 3775 int error; 3776 3777 ASSERT(!dump_opt['L']); 3778 3779 bzero(&ddb, sizeof (ddb)); 3780 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { 3781 blkptr_t blk; 3782 ddt_phys_t *ddp = dde.dde_phys; 3783 3784 if (ddb.ddb_class == DDT_CLASS_UNIQUE) 3785 return; 3786 3787 ASSERT(ddt_phys_total_refcnt(&dde) > 1); 3788 3789 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 3790 if (ddp->ddp_phys_birth == 0) 3791 continue; 3792 ddt_bp_create(ddb.ddb_checksum, 3793 &dde.dde_key, ddp, &blk); 3794 if (p == DDT_PHYS_DITTO) { 3795 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); 3796 } else { 3797 zcb->zcb_dedup_asize += 3798 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); 3799 zcb->zcb_dedup_blocks++; 3800 } 3801 } 3802 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; 3803 ddt_enter(ddt); 3804 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); 3805 ddt_exit(ddt); 3806 } 3807 3808 ASSERT(error == ENOENT); 3809 } 3810 3811 /* ARGSUSED */ 3812 static void 3813 claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 3814 uint64_t size, void *arg) 3815 { 3816 /* 3817 * This callback was called through a remap from 3818 * a device being removed. Therefore, the vdev that 3819 * this callback is applied to is a concrete 3820 * vdev. 3821 */ 3822 ASSERT(vdev_is_concrete(vd)); 3823 3824 VERIFY0(metaslab_claim_impl(vd, offset, size, 3825 spa_min_claim_txg(vd->vdev_spa))); 3826 } 3827 3828 static void 3829 claim_segment_cb(void *arg, uint64_t offset, uint64_t size) 3830 { 3831 vdev_t *vd = arg; 3832 3833 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 3834 claim_segment_impl_cb, NULL); 3835 } 3836 3837 /* 3838 * After accounting for all allocated blocks that are directly referenced, 3839 * we might have missed a reference to a block from a partially complete 3840 * (and thus unused) indirect mapping object. We perform a secondary pass 3841 * through the metaslabs we have already mapped and claim the destination 3842 * blocks. 3843 */ 3844 static void 3845 zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb) 3846 { 3847 if (dump_opt['L']) 3848 return; 3849 3850 if (spa->spa_vdev_removal == NULL) 3851 return; 3852 3853 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3854 3855 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 3856 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 3857 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3858 3859 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 3860 3861 range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 3862 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 3863 metaslab_t *msp = vd->vdev_ms[msi]; 3864 3865 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 3866 break; 3867 3868 ASSERT0(range_tree_space(allocs)); 3869 if (msp->ms_sm != NULL) 3870 VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC)); 3871 range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs); 3872 } 3873 range_tree_destroy(allocs); 3874 3875 iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr); 3876 3877 /* 3878 * Clear everything past what has been synced, 3879 * because we have not allocated mappings for 3880 * it yet. 3881 */ 3882 range_tree_clear(svr->svr_allocd_segs, 3883 vdev_indirect_mapping_max_offset(vim), 3884 vd->vdev_asize - vdev_indirect_mapping_max_offset(vim)); 3885 3886 zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs); 3887 range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd); 3888 3889 spa_config_exit(spa, SCL_CONFIG, FTAG); 3890 } 3891 3892 /* ARGSUSED */ 3893 static int 3894 increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3895 { 3896 zdb_cb_t *zcb = arg; 3897 spa_t *spa = zcb->zcb_spa; 3898 vdev_t *vd; 3899 const dva_t *dva = &bp->blk_dva[0]; 3900 3901 ASSERT(!dump_opt['L']); 3902 ASSERT3U(BP_GET_NDVAS(bp), ==, 1); 3903 3904 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3905 vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva)); 3906 ASSERT3P(vd, !=, NULL); 3907 spa_config_exit(spa, SCL_VDEV, FTAG); 3908 3909 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 3910 ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL); 3911 3912 vdev_indirect_mapping_increment_obsolete_count( 3913 vd->vdev_indirect_mapping, 3914 DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva), 3915 zcb->zcb_vd_obsolete_counts[vd->vdev_id]); 3916 3917 return (0); 3918 } 3919 3920 static uint32_t * 3921 zdb_load_obsolete_counts(vdev_t *vd) 3922 { 3923 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 3924 spa_t *spa = vd->vdev_spa; 3925 spa_condensing_indirect_phys_t *scip = 3926 &spa->spa_condensing_indirect_phys; 3927 uint32_t *counts; 3928 3929 EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL); 3930 counts = vdev_indirect_mapping_load_obsolete_counts(vim); 3931 if (vd->vdev_obsolete_sm != NULL) { 3932 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts, 3933 vd->vdev_obsolete_sm); 3934 } 3935 if (scip->scip_vdev == vd->vdev_id && 3936 scip->scip_prev_obsolete_sm_object != 0) { 3937 space_map_t *prev_obsolete_sm = NULL; 3938 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, 3939 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); 3940 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts, 3941 prev_obsolete_sm); 3942 space_map_close(prev_obsolete_sm); 3943 } 3944 return (counts); 3945 } 3946 3947 typedef struct checkpoint_sm_exclude_entry_arg { 3948 vdev_t *cseea_vd; 3949 uint64_t cseea_checkpoint_size; 3950 } checkpoint_sm_exclude_entry_arg_t; 3951 3952 static int 3953 checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg) 3954 { 3955 checkpoint_sm_exclude_entry_arg_t *cseea = arg; 3956 vdev_t *vd = cseea->cseea_vd; 3957 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift]; 3958 uint64_t end = sme->sme_offset + sme->sme_run; 3959 3960 ASSERT(sme->sme_type == SM_FREE); 3961 3962 /* 3963 * Since the vdev_checkpoint_sm exists in the vdev level 3964 * and the ms_sm space maps exist in the metaslab level, 3965 * an entry in the checkpoint space map could theoretically 3966 * cross the boundaries of the metaslab that it belongs. 3967 * 3968 * In reality, because of the way that we populate and 3969 * manipulate the checkpoint's space maps currently, 3970 * there shouldn't be any entries that cross metaslabs. 3971 * Hence the assertion below. 3972 * 3973 * That said, there is no fundamental requirement that 3974 * the checkpoint's space map entries should not cross 3975 * metaslab boundaries. So if needed we could add code 3976 * that handles metaslab-crossing segments in the future. 3977 */ 3978 VERIFY3U(sme->sme_offset, >=, ms->ms_start); 3979 VERIFY3U(end, <=, ms->ms_start + ms->ms_size); 3980 3981 /* 3982 * By removing the entry from the allocated segments we 3983 * also verify that the entry is there to begin with. 3984 */ 3985 mutex_enter(&ms->ms_lock); 3986 range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run); 3987 mutex_exit(&ms->ms_lock); 3988 3989 cseea->cseea_checkpoint_size += sme->sme_run; 3990 return (0); 3991 } 3992 3993 static void 3994 zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb) 3995 { 3996 spa_t *spa = vd->vdev_spa; 3997 space_map_t *checkpoint_sm = NULL; 3998 uint64_t checkpoint_sm_obj; 3999 4000 /* 4001 * If there is no vdev_top_zap, we are in a pool whose 4002 * version predates the pool checkpoint feature. 4003 */ 4004 if (vd->vdev_top_zap == 0) 4005 return; 4006 4007 /* 4008 * If there is no reference of the vdev_checkpoint_sm in 4009 * the vdev_top_zap, then one of the following scenarios 4010 * is true: 4011 * 4012 * 1] There is no checkpoint 4013 * 2] There is a checkpoint, but no checkpointed blocks 4014 * have been freed yet 4015 * 3] The current vdev is indirect 4016 * 4017 * In these cases we return immediately. 4018 */ 4019 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap, 4020 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 4021 return; 4022 4023 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap, 4024 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, 4025 &checkpoint_sm_obj)); 4026 4027 checkpoint_sm_exclude_entry_arg_t cseea; 4028 cseea.cseea_vd = vd; 4029 cseea.cseea_checkpoint_size = 0; 4030 4031 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa), 4032 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift)); 4033 4034 VERIFY0(space_map_iterate(checkpoint_sm, 4035 space_map_length(checkpoint_sm), 4036 checkpoint_sm_exclude_entry_cb, &cseea)); 4037 space_map_close(checkpoint_sm); 4038 4039 zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size; 4040 } 4041 4042 static void 4043 zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb) 4044 { 4045 ASSERT(!dump_opt['L']); 4046 4047 vdev_t *rvd = spa->spa_root_vdev; 4048 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 4049 ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id); 4050 zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb); 4051 } 4052 } 4053 4054 static int 4055 count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme, 4056 uint64_t txg, void *arg) 4057 { 4058 int64_t *ualloc_space = arg; 4059 uint64_t offset = sme->sme_offset; 4060 uint64_t vdev_id = sme->sme_vdev; 4061 4062 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 4063 if (!vdev_is_concrete(vd)) 4064 return (0); 4065 4066 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4067 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE); 4068 4069 if (txg < metaslab_unflushed_txg(ms)) 4070 return (0); 4071 4072 if (sme->sme_type == SM_ALLOC) 4073 *ualloc_space += sme->sme_run; 4074 else 4075 *ualloc_space -= sme->sme_run; 4076 4077 return (0); 4078 } 4079 4080 static int64_t 4081 get_unflushed_alloc_space(spa_t *spa) 4082 { 4083 if (dump_opt['L']) 4084 return (0); 4085 4086 int64_t ualloc_space = 0; 4087 iterate_through_spacemap_logs(spa, count_unflushed_space_cb, 4088 &ualloc_space); 4089 return (ualloc_space); 4090 } 4091 4092 static int 4093 load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg) 4094 { 4095 maptype_t *uic_maptype = arg; 4096 uint64_t offset = sme->sme_offset; 4097 uint64_t size = sme->sme_run; 4098 uint64_t vdev_id = sme->sme_vdev; 4099 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 4100 4101 /* skip indirect vdevs */ 4102 if (!vdev_is_concrete(vd)) 4103 return (0); 4104 4105 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4106 4107 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE); 4108 ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE); 4109 4110 if (txg < metaslab_unflushed_txg(ms)) 4111 return (0); 4112 4113 if (*uic_maptype == sme->sme_type) 4114 range_tree_add(ms->ms_allocatable, offset, size); 4115 else 4116 range_tree_remove(ms->ms_allocatable, offset, size); 4117 4118 return (0); 4119 } 4120 4121 static void 4122 load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype) 4123 { 4124 iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype); 4125 } 4126 4127 static void 4128 load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype) 4129 { 4130 vdev_t *rvd = spa->spa_root_vdev; 4131 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 4132 vdev_t *vd = rvd->vdev_child[i]; 4133 4134 ASSERT3U(i, ==, vd->vdev_id); 4135 4136 if (vd->vdev_ops == &vdev_indirect_ops) 4137 continue; 4138 4139 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 4140 metaslab_t *msp = vd->vdev_ms[m]; 4141 4142 (void) fprintf(stderr, 4143 "\rloading concrete vdev %llu, " 4144 "metaslab %llu of %llu ...", 4145 (longlong_t)vd->vdev_id, 4146 (longlong_t)msp->ms_id, 4147 (longlong_t)vd->vdev_ms_count); 4148 4149 mutex_enter(&msp->ms_lock); 4150 range_tree_vacate(msp->ms_allocatable, NULL, NULL); 4151 4152 /* 4153 * We don't want to spend the CPU manipulating the 4154 * size-ordered tree, so clear the range_tree ops. 4155 */ 4156 msp->ms_allocatable->rt_ops = NULL; 4157 4158 if (msp->ms_sm != NULL) { 4159 VERIFY0(space_map_load(msp->ms_sm, 4160 msp->ms_allocatable, maptype)); 4161 } 4162 if (!msp->ms_loaded) 4163 msp->ms_loaded = B_TRUE; 4164 mutex_exit(&msp->ms_lock); 4165 } 4166 } 4167 4168 load_unflushed_to_ms_allocatables(spa, maptype); 4169 } 4170 4171 /* 4172 * vm_idxp is an in-out parameter which (for indirect vdevs) is the 4173 * index in vim_entries that has the first entry in this metaslab. 4174 * On return, it will be set to the first entry after this metaslab. 4175 */ 4176 static void 4177 load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp, 4178 uint64_t *vim_idxp) 4179 { 4180 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 4181 4182 mutex_enter(&msp->ms_lock); 4183 range_tree_vacate(msp->ms_allocatable, NULL, NULL); 4184 4185 /* 4186 * We don't want to spend the CPU manipulating the 4187 * size-ordered tree, so clear the range_tree ops. 4188 */ 4189 msp->ms_allocatable->rt_ops = NULL; 4190 4191 for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim); 4192 (*vim_idxp)++) { 4193 vdev_indirect_mapping_entry_phys_t *vimep = 4194 &vim->vim_entries[*vim_idxp]; 4195 uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep); 4196 uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst); 4197 ASSERT3U(ent_offset, >=, msp->ms_start); 4198 if (ent_offset >= msp->ms_start + msp->ms_size) 4199 break; 4200 4201 /* 4202 * Mappings do not cross metaslab boundaries, 4203 * because we create them by walking the metaslabs. 4204 */ 4205 ASSERT3U(ent_offset + ent_len, <=, 4206 msp->ms_start + msp->ms_size); 4207 range_tree_add(msp->ms_allocatable, ent_offset, ent_len); 4208 } 4209 4210 if (!msp->ms_loaded) 4211 msp->ms_loaded = B_TRUE; 4212 mutex_exit(&msp->ms_lock); 4213 } 4214 4215 static void 4216 zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb) 4217 { 4218 ASSERT(!dump_opt['L']); 4219 4220 vdev_t *rvd = spa->spa_root_vdev; 4221 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 4222 vdev_t *vd = rvd->vdev_child[c]; 4223 4224 ASSERT3U(c, ==, vd->vdev_id); 4225 4226 if (vd->vdev_ops != &vdev_indirect_ops) 4227 continue; 4228 4229 /* 4230 * Note: we don't check for mapping leaks on 4231 * removing vdevs because their ms_allocatable's 4232 * are used to look for leaks in allocated space. 4233 */ 4234 zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd); 4235 4236 /* 4237 * Normally, indirect vdevs don't have any 4238 * metaslabs. We want to set them up for 4239 * zio_claim(). 4240 */ 4241 VERIFY0(vdev_metaslab_init(vd, 0)); 4242 4243 #if defined(DEBUG) 4244 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 4245 #endif 4246 uint64_t vim_idx = 0; 4247 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 4248 4249 (void) fprintf(stderr, 4250 "\rloading indirect vdev %llu, " 4251 "metaslab %llu of %llu ...", 4252 (longlong_t)vd->vdev_id, 4253 (longlong_t)vd->vdev_ms[m]->ms_id, 4254 (longlong_t)vd->vdev_ms_count); 4255 4256 load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m], 4257 &vim_idx); 4258 } 4259 ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim)); 4260 } 4261 } 4262 4263 static void 4264 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) 4265 { 4266 zcb->zcb_spa = spa; 4267 4268 if (dump_opt['L']) 4269 return; 4270 4271 dsl_pool_t *dp = spa->spa_dsl_pool; 4272 vdev_t *rvd = spa->spa_root_vdev; 4273 4274 /* 4275 * We are going to be changing the meaning of the metaslab's 4276 * ms_allocatable. Ensure that the allocator doesn't try to 4277 * use the tree. 4278 */ 4279 spa->spa_normal_class->mc_ops = &zdb_metaslab_ops; 4280 spa->spa_log_class->mc_ops = &zdb_metaslab_ops; 4281 4282 zcb->zcb_vd_obsolete_counts = 4283 umem_zalloc(rvd->vdev_children * sizeof (uint32_t *), 4284 UMEM_NOFAIL); 4285 4286 /* 4287 * For leak detection, we overload the ms_allocatable trees 4288 * to contain allocated segments instead of free segments. 4289 * As a result, we can't use the normal metaslab_load/unload 4290 * interfaces. 4291 */ 4292 zdb_leak_init_prepare_indirect_vdevs(spa, zcb); 4293 load_concrete_ms_allocatable_trees(spa, SM_ALLOC); 4294 4295 /* 4296 * On load_concrete_ms_allocatable_trees() we loaded all the 4297 * allocated entries from the ms_sm to the ms_allocatable for 4298 * each metaslab. If the pool has a checkpoint or is in the 4299 * middle of discarding a checkpoint, some of these blocks 4300 * may have been freed but their ms_sm may not have been 4301 * updated because they are referenced by the checkpoint. In 4302 * order to avoid false-positives during leak-detection, we 4303 * go through the vdev's checkpoint space map and exclude all 4304 * its entries from their relevant ms_allocatable. 4305 * 4306 * We also aggregate the space held by the checkpoint and add 4307 * it to zcb_checkpoint_size. 4308 * 4309 * Note that at this point we are also verifying that all the 4310 * entries on the checkpoint_sm are marked as allocated in 4311 * the ms_sm of their relevant metaslab. 4312 * [see comment in checkpoint_sm_exclude_entry_cb()] 4313 */ 4314 zdb_leak_init_exclude_checkpoint(spa, zcb); 4315 ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa)); 4316 4317 /* for cleaner progress output */ 4318 (void) fprintf(stderr, "\n"); 4319 4320 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) { 4321 ASSERT(spa_feature_is_enabled(spa, 4322 SPA_FEATURE_DEVICE_REMOVAL)); 4323 (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj, 4324 increment_indirect_mapping_cb, zcb, NULL); 4325 } 4326 4327 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4328 zdb_ddt_leak_init(spa, zcb); 4329 spa_config_exit(spa, SCL_CONFIG, FTAG); 4330 } 4331 4332 static boolean_t 4333 zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb) 4334 { 4335 boolean_t leaks = B_FALSE; 4336 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 4337 uint64_t total_leaked = 0; 4338 4339 ASSERT(vim != NULL); 4340 4341 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) { 4342 vdev_indirect_mapping_entry_phys_t *vimep = 4343 &vim->vim_entries[i]; 4344 uint64_t obsolete_bytes = 0; 4345 uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep); 4346 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4347 4348 /* 4349 * This is not very efficient but it's easy to 4350 * verify correctness. 4351 */ 4352 for (uint64_t inner_offset = 0; 4353 inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst); 4354 inner_offset += 1 << vd->vdev_ashift) { 4355 if (range_tree_contains(msp->ms_allocatable, 4356 offset + inner_offset, 1 << vd->vdev_ashift)) { 4357 obsolete_bytes += 1 << vd->vdev_ashift; 4358 } 4359 } 4360 4361 int64_t bytes_leaked = obsolete_bytes - 4362 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]; 4363 ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=, 4364 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]); 4365 if (bytes_leaked != 0 && 4366 (vdev_obsolete_counts_are_precise(vd) || 4367 dump_opt['d'] >= 5)) { 4368 (void) printf("obsolete indirect mapping count " 4369 "mismatch on %llu:%llx:%llx : %llx bytes leaked\n", 4370 (u_longlong_t)vd->vdev_id, 4371 (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep), 4372 (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst), 4373 (u_longlong_t)bytes_leaked); 4374 } 4375 total_leaked += ABS(bytes_leaked); 4376 } 4377 4378 if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) { 4379 int pct_leaked = total_leaked * 100 / 4380 vdev_indirect_mapping_bytes_mapped(vim); 4381 (void) printf("cannot verify obsolete indirect mapping " 4382 "counts of vdev %llu because precise feature was not " 4383 "enabled when it was removed: %d%% (%llx bytes) of mapping" 4384 "unreferenced\n", 4385 (u_longlong_t)vd->vdev_id, pct_leaked, 4386 (u_longlong_t)total_leaked); 4387 } else if (total_leaked > 0) { 4388 (void) printf("obsolete indirect mapping count mismatch " 4389 "for vdev %llu -- %llx total bytes mismatched\n", 4390 (u_longlong_t)vd->vdev_id, 4391 (u_longlong_t)total_leaked); 4392 leaks |= B_TRUE; 4393 } 4394 4395 vdev_indirect_mapping_free_obsolete_counts(vim, 4396 zcb->zcb_vd_obsolete_counts[vd->vdev_id]); 4397 zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL; 4398 4399 return (leaks); 4400 } 4401 4402 static boolean_t 4403 zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb) 4404 { 4405 if (dump_opt['L']) 4406 return (B_FALSE); 4407 4408 boolean_t leaks = B_FALSE; 4409 4410 vdev_t *rvd = spa->spa_root_vdev; 4411 for (unsigned c = 0; c < rvd->vdev_children; c++) { 4412 vdev_t *vd = rvd->vdev_child[c]; 4413 #if DEBUG 4414 metaslab_group_t *mg = vd->vdev_mg; 4415 #endif 4416 4417 if (zcb->zcb_vd_obsolete_counts[c] != NULL) { 4418 leaks |= zdb_check_for_obsolete_leaks(vd, zcb); 4419 } 4420 4421 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 4422 metaslab_t *msp = vd->vdev_ms[m]; 4423 ASSERT3P(mg, ==, msp->ms_group); 4424 4425 /* 4426 * ms_allocatable has been overloaded 4427 * to contain allocated segments. Now that 4428 * we finished traversing all blocks, any 4429 * block that remains in the ms_allocatable 4430 * represents an allocated block that we 4431 * did not claim during the traversal. 4432 * Claimed blocks would have been removed 4433 * from the ms_allocatable. For indirect 4434 * vdevs, space remaining in the tree 4435 * represents parts of the mapping that are 4436 * not referenced, which is not a bug. 4437 */ 4438 if (vd->vdev_ops == &vdev_indirect_ops) { 4439 range_tree_vacate(msp->ms_allocatable, 4440 NULL, NULL); 4441 } else { 4442 range_tree_vacate(msp->ms_allocatable, 4443 zdb_leak, vd); 4444 } 4445 if (msp->ms_loaded) { 4446 msp->ms_loaded = B_FALSE; 4447 } 4448 } 4449 4450 } 4451 4452 umem_free(zcb->zcb_vd_obsolete_counts, 4453 rvd->vdev_children * sizeof (uint32_t *)); 4454 zcb->zcb_vd_obsolete_counts = NULL; 4455 4456 return (leaks); 4457 } 4458 4459 /* ARGSUSED */ 4460 static int 4461 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 4462 { 4463 zdb_cb_t *zcb = arg; 4464 4465 if (dump_opt['b'] >= 5) { 4466 char blkbuf[BP_SPRINTF_LEN]; 4467 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 4468 (void) printf("[%s] %s\n", 4469 "deferred free", blkbuf); 4470 } 4471 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); 4472 return (0); 4473 } 4474 4475 static int 4476 dump_block_stats(spa_t *spa) 4477 { 4478 zdb_cb_t zcb; 4479 zdb_blkstats_t *zb, *tzb; 4480 uint64_t norm_alloc, norm_space, total_alloc, total_found; 4481 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 4482 TRAVERSE_NO_DECRYPT | TRAVERSE_HARD; 4483 boolean_t leaks = B_FALSE; 4484 int err; 4485 4486 bzero(&zcb, sizeof (zcb)); 4487 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n", 4488 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", 4489 (dump_opt['c'] == 1) ? "metadata " : "", 4490 dump_opt['c'] ? "checksums " : "", 4491 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", 4492 !dump_opt['L'] ? "nothing leaked " : ""); 4493 4494 /* 4495 * When leak detection is enabled we load all space maps as SM_ALLOC 4496 * maps, then traverse the pool claiming each block we discover. If 4497 * the pool is perfectly consistent, the segment trees will be empty 4498 * when we're done. Anything left over is a leak; any block we can't 4499 * claim (because it's not part of any space map) is a double 4500 * allocation, reference to a freed block, or an unclaimed log block. 4501 * 4502 * When leak detection is disabled (-L option) we still traverse the 4503 * pool claiming each block we discover, but we skip opening any space 4504 * maps. 4505 */ 4506 bzero(&zcb, sizeof (zdb_cb_t)); 4507 zdb_leak_init(spa, &zcb); 4508 4509 /* 4510 * If there's a deferred-free bplist, process that first. 4511 */ 4512 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, 4513 count_block_cb, &zcb, NULL); 4514 4515 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 4516 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, 4517 count_block_cb, &zcb, NULL); 4518 } 4519 4520 zdb_claim_removing(spa, &zcb); 4521 4522 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 4523 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset, 4524 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb, 4525 &zcb, NULL)); 4526 } 4527 4528 if (dump_opt['c'] > 1) 4529 flags |= TRAVERSE_PREFETCH_DATA; 4530 4531 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa)); 4532 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa)); 4533 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa)); 4534 zcb.zcb_start = zcb.zcb_lastprint = gethrtime(); 4535 err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); 4536 4537 /* 4538 * If we've traversed the data blocks then we need to wait for those 4539 * I/Os to complete. We leverage "The Godfather" zio to wait on 4540 * all async I/Os to complete. 4541 */ 4542 if (dump_opt['c']) { 4543 for (int i = 0; i < max_ncpus; i++) { 4544 (void) zio_wait(spa->spa_async_zio_root[i]); 4545 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 4546 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 4547 ZIO_FLAG_GODFATHER); 4548 } 4549 } 4550 4551 /* 4552 * Done after zio_wait() since zcb_haderrors is modified in 4553 * zdb_blkptr_done() 4554 */ 4555 zcb.zcb_haderrors |= err; 4556 4557 if (zcb.zcb_haderrors) { 4558 (void) printf("\nError counts:\n\n"); 4559 (void) printf("\t%5s %s\n", "errno", "count"); 4560 for (int e = 0; e < 256; e++) { 4561 if (zcb.zcb_errors[e] != 0) { 4562 (void) printf("\t%5d %llu\n", 4563 e, (u_longlong_t)zcb.zcb_errors[e]); 4564 } 4565 } 4566 } 4567 4568 /* 4569 * Report any leaked segments. 4570 */ 4571 leaks |= zdb_leak_fini(spa, &zcb); 4572 4573 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; 4574 4575 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 4576 norm_space = metaslab_class_get_space(spa_normal_class(spa)); 4577 4578 total_alloc = norm_alloc + 4579 metaslab_class_get_alloc(spa_log_class(spa)) + 4580 metaslab_class_get_alloc(spa_special_class(spa)) + 4581 metaslab_class_get_alloc(spa_dedup_class(spa)) + 4582 get_unflushed_alloc_space(spa); 4583 total_found = tzb->zb_asize - zcb.zcb_dedup_asize + 4584 zcb.zcb_removing_size + zcb.zcb_checkpoint_size; 4585 4586 if (total_found == total_alloc && !dump_opt['L']) { 4587 (void) printf("\n\tNo leaks (block sum matches space" 4588 " maps exactly)\n"); 4589 } else if (!dump_opt['L']) { 4590 (void) printf("block traversal size %llu != alloc %llu " 4591 "(%s %lld)\n", 4592 (u_longlong_t)total_found, 4593 (u_longlong_t)total_alloc, 4594 (dump_opt['L']) ? "unreachable" : "leaked", 4595 (longlong_t)(total_alloc - total_found)); 4596 leaks = B_TRUE; 4597 } 4598 4599 if (tzb->zb_count == 0) 4600 return (2); 4601 4602 (void) printf("\n"); 4603 (void) printf("\t%-16s %14llu\n", "bp count:", 4604 (u_longlong_t)tzb->zb_count); 4605 (void) printf("\t%-16s %14llu\n", "ganged count:", 4606 (longlong_t)tzb->zb_gangs); 4607 (void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:", 4608 (u_longlong_t)tzb->zb_lsize, 4609 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); 4610 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n", 4611 "bp physical:", (u_longlong_t)tzb->zb_psize, 4612 (u_longlong_t)(tzb->zb_psize / tzb->zb_count), 4613 (double)tzb->zb_lsize / tzb->zb_psize); 4614 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n", 4615 "bp allocated:", (u_longlong_t)tzb->zb_asize, 4616 (u_longlong_t)(tzb->zb_asize / tzb->zb_count), 4617 (double)tzb->zb_lsize / tzb->zb_asize); 4618 (void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n", 4619 "bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize, 4620 (u_longlong_t)zcb.zcb_dedup_blocks, 4621 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); 4622 (void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:", 4623 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); 4624 4625 if (spa_special_class(spa)->mc_rotor != NULL) { 4626 uint64_t alloc = metaslab_class_get_alloc( 4627 spa_special_class(spa)); 4628 uint64_t space = metaslab_class_get_space( 4629 spa_special_class(spa)); 4630 4631 (void) printf("\t%-16s %14llu used: %5.2f%%\n", 4632 "Special class", (u_longlong_t)alloc, 4633 100.0 * alloc / space); 4634 } 4635 4636 if (spa_dedup_class(spa)->mc_rotor != NULL) { 4637 uint64_t alloc = metaslab_class_get_alloc( 4638 spa_dedup_class(spa)); 4639 uint64_t space = metaslab_class_get_space( 4640 spa_dedup_class(spa)); 4641 4642 (void) printf("\t%-16s %14llu used: %5.2f%%\n", 4643 "Dedup class", (u_longlong_t)alloc, 4644 100.0 * alloc / space); 4645 } 4646 4647 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) { 4648 if (zcb.zcb_embedded_blocks[i] == 0) 4649 continue; 4650 (void) printf("\n"); 4651 (void) printf("\tadditional, non-pointer bps of type %u: " 4652 "%10llu\n", 4653 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]); 4654 4655 if (dump_opt['b'] >= 3) { 4656 (void) printf("\t number of (compressed) bytes: " 4657 "number of bps\n"); 4658 dump_histogram(zcb.zcb_embedded_histogram[i], 4659 sizeof (zcb.zcb_embedded_histogram[i]) / 4660 sizeof (zcb.zcb_embedded_histogram[i][0]), 0); 4661 } 4662 } 4663 4664 if (tzb->zb_ditto_samevdev != 0) { 4665 (void) printf("\tDittoed blocks on same vdev: %llu\n", 4666 (longlong_t)tzb->zb_ditto_samevdev); 4667 } 4668 if (tzb->zb_ditto_same_ms != 0) { 4669 (void) printf("\tDittoed blocks in same metaslab: %llu\n", 4670 (longlong_t)tzb->zb_ditto_same_ms); 4671 } 4672 4673 for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) { 4674 vdev_t *vd = spa->spa_root_vdev->vdev_child[v]; 4675 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 4676 4677 if (vim == NULL) { 4678 continue; 4679 } 4680 4681 char mem[32]; 4682 zdb_nicenum(vdev_indirect_mapping_num_entries(vim), 4683 mem, vdev_indirect_mapping_size(vim)); 4684 4685 (void) printf("\tindirect vdev id %llu has %llu segments " 4686 "(%s in memory)\n", 4687 (longlong_t)vd->vdev_id, 4688 (longlong_t)vdev_indirect_mapping_num_entries(vim), mem); 4689 } 4690 4691 if (dump_opt['b'] >= 2) { 4692 int l, t, level; 4693 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" 4694 "\t avg\t comp\t%%Total\tType\n"); 4695 4696 for (t = 0; t <= ZDB_OT_TOTAL; t++) { 4697 char csize[32], lsize[32], psize[32], asize[32]; 4698 char avg[32], gang[32]; 4699 const char *typename; 4700 4701 /* make sure nicenum has enough space */ 4702 CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ); 4703 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ); 4704 CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ); 4705 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ); 4706 CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ); 4707 CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ); 4708 4709 if (t < DMU_OT_NUMTYPES) 4710 typename = dmu_ot[t].ot_name; 4711 else 4712 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; 4713 4714 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { 4715 (void) printf("%6s\t%5s\t%5s\t%5s" 4716 "\t%5s\t%5s\t%6s\t%s\n", 4717 "-", 4718 "-", 4719 "-", 4720 "-", 4721 "-", 4722 "-", 4723 "-", 4724 typename); 4725 continue; 4726 } 4727 4728 for (l = ZB_TOTAL - 1; l >= -1; l--) { 4729 level = (l == -1 ? ZB_TOTAL : l); 4730 zb = &zcb.zcb_type[level][t]; 4731 4732 if (zb->zb_asize == 0) 4733 continue; 4734 4735 if (dump_opt['b'] < 3 && level != ZB_TOTAL) 4736 continue; 4737 4738 if (level == 0 && zb->zb_asize == 4739 zcb.zcb_type[ZB_TOTAL][t].zb_asize) 4740 continue; 4741 4742 zdb_nicenum(zb->zb_count, csize, 4743 sizeof (csize)); 4744 zdb_nicenum(zb->zb_lsize, lsize, 4745 sizeof (lsize)); 4746 zdb_nicenum(zb->zb_psize, psize, 4747 sizeof (psize)); 4748 zdb_nicenum(zb->zb_asize, asize, 4749 sizeof (asize)); 4750 zdb_nicenum(zb->zb_asize / zb->zb_count, avg, 4751 sizeof (avg)); 4752 zdb_nicenum(zb->zb_gangs, gang, sizeof (gang)); 4753 4754 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" 4755 "\t%5.2f\t%6.2f\t", 4756 csize, lsize, psize, asize, avg, 4757 (double)zb->zb_lsize / zb->zb_psize, 4758 100.0 * zb->zb_asize / tzb->zb_asize); 4759 4760 if (level == ZB_TOTAL) 4761 (void) printf("%s\n", typename); 4762 else 4763 (void) printf(" L%d %s\n", 4764 level, typename); 4765 4766 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) { 4767 (void) printf("\t number of ganged " 4768 "blocks: %s\n", gang); 4769 } 4770 4771 if (dump_opt['b'] >= 4) { 4772 (void) printf("psize " 4773 "(in 512-byte sectors): " 4774 "number of blocks\n"); 4775 dump_histogram(zb->zb_psize_histogram, 4776 PSIZE_HISTO_SIZE, 0); 4777 } 4778 } 4779 } 4780 } 4781 4782 (void) printf("\n"); 4783 4784 if (leaks) 4785 return (2); 4786 4787 if (zcb.zcb_haderrors) 4788 return (3); 4789 4790 return (0); 4791 } 4792 4793 typedef struct zdb_ddt_entry { 4794 ddt_key_t zdde_key; 4795 uint64_t zdde_ref_blocks; 4796 uint64_t zdde_ref_lsize; 4797 uint64_t zdde_ref_psize; 4798 uint64_t zdde_ref_dsize; 4799 avl_node_t zdde_node; 4800 } zdb_ddt_entry_t; 4801 4802 /* ARGSUSED */ 4803 static int 4804 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 4805 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 4806 { 4807 avl_tree_t *t = arg; 4808 avl_index_t where; 4809 zdb_ddt_entry_t *zdde, zdde_search; 4810 4811 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 4812 return (0); 4813 4814 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { 4815 (void) printf("traversing objset %llu, %llu objects, " 4816 "%lu blocks so far\n", 4817 (u_longlong_t)zb->zb_objset, 4818 (u_longlong_t)BP_GET_FILL(bp), 4819 avl_numnodes(t)); 4820 } 4821 4822 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || 4823 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) 4824 return (0); 4825 4826 ddt_key_fill(&zdde_search.zdde_key, bp); 4827 4828 zdde = avl_find(t, &zdde_search, &where); 4829 4830 if (zdde == NULL) { 4831 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); 4832 zdde->zdde_key = zdde_search.zdde_key; 4833 avl_insert(t, zdde, where); 4834 } 4835 4836 zdde->zdde_ref_blocks += 1; 4837 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); 4838 zdde->zdde_ref_psize += BP_GET_PSIZE(bp); 4839 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); 4840 4841 return (0); 4842 } 4843 4844 static void 4845 dump_simulated_ddt(spa_t *spa) 4846 { 4847 avl_tree_t t; 4848 void *cookie = NULL; 4849 zdb_ddt_entry_t *zdde; 4850 ddt_histogram_t ddh_total; 4851 ddt_stat_t dds_total; 4852 4853 bzero(&ddh_total, sizeof (ddh_total)); 4854 bzero(&dds_total, sizeof (dds_total)); 4855 avl_create(&t, ddt_entry_compare, 4856 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); 4857 4858 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4859 4860 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 4861 TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t); 4862 4863 spa_config_exit(spa, SCL_CONFIG, FTAG); 4864 4865 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { 4866 ddt_stat_t dds; 4867 uint64_t refcnt = zdde->zdde_ref_blocks; 4868 ASSERT(refcnt != 0); 4869 4870 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; 4871 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; 4872 dds.dds_psize = zdde->zdde_ref_psize / refcnt; 4873 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; 4874 4875 dds.dds_ref_blocks = zdde->zdde_ref_blocks; 4876 dds.dds_ref_lsize = zdde->zdde_ref_lsize; 4877 dds.dds_ref_psize = zdde->zdde_ref_psize; 4878 dds.dds_ref_dsize = zdde->zdde_ref_dsize; 4879 4880 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1], 4881 &dds, 0); 4882 4883 umem_free(zdde, sizeof (*zdde)); 4884 } 4885 4886 avl_destroy(&t); 4887 4888 ddt_histogram_stat(&dds_total, &ddh_total); 4889 4890 (void) printf("Simulated DDT histogram:\n"); 4891 4892 zpool_dump_ddt(&dds_total, &ddh_total); 4893 4894 dump_dedup_ratio(&dds_total); 4895 } 4896 4897 static int 4898 verify_device_removal_feature_counts(spa_t *spa) 4899 { 4900 uint64_t dr_feature_refcount = 0; 4901 uint64_t oc_feature_refcount = 0; 4902 uint64_t indirect_vdev_count = 0; 4903 uint64_t precise_vdev_count = 0; 4904 uint64_t obsolete_counts_object_count = 0; 4905 uint64_t obsolete_sm_count = 0; 4906 uint64_t obsolete_counts_count = 0; 4907 uint64_t scip_count = 0; 4908 uint64_t obsolete_bpobj_count = 0; 4909 int ret = 0; 4910 4911 spa_condensing_indirect_phys_t *scip = 4912 &spa->spa_condensing_indirect_phys; 4913 if (scip->scip_next_mapping_object != 0) { 4914 vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev]; 4915 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 4916 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 4917 4918 (void) printf("Condensing indirect vdev %llu: new mapping " 4919 "object %llu, prev obsolete sm %llu\n", 4920 (u_longlong_t)scip->scip_vdev, 4921 (u_longlong_t)scip->scip_next_mapping_object, 4922 (u_longlong_t)scip->scip_prev_obsolete_sm_object); 4923 if (scip->scip_prev_obsolete_sm_object != 0) { 4924 space_map_t *prev_obsolete_sm = NULL; 4925 VERIFY0(space_map_open(&prev_obsolete_sm, 4926 spa->spa_meta_objset, 4927 scip->scip_prev_obsolete_sm_object, 4928 0, vd->vdev_asize, 0)); 4929 dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm); 4930 (void) printf("\n"); 4931 space_map_close(prev_obsolete_sm); 4932 } 4933 4934 scip_count += 2; 4935 } 4936 4937 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 4938 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 4939 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 4940 4941 if (vic->vic_mapping_object != 0) { 4942 ASSERT(vd->vdev_ops == &vdev_indirect_ops || 4943 vd->vdev_removing); 4944 indirect_vdev_count++; 4945 4946 if (vd->vdev_indirect_mapping->vim_havecounts) { 4947 obsolete_counts_count++; 4948 } 4949 } 4950 if (vdev_obsolete_counts_are_precise(vd)) { 4951 ASSERT(vic->vic_mapping_object != 0); 4952 precise_vdev_count++; 4953 } 4954 if (vdev_obsolete_sm_object(vd) != 0) { 4955 ASSERT(vic->vic_mapping_object != 0); 4956 obsolete_sm_count++; 4957 } 4958 } 4959 4960 (void) feature_get_refcount(spa, 4961 &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL], 4962 &dr_feature_refcount); 4963 (void) feature_get_refcount(spa, 4964 &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS], 4965 &oc_feature_refcount); 4966 4967 if (dr_feature_refcount != indirect_vdev_count) { 4968 ret = 1; 4969 (void) printf("Number of indirect vdevs (%llu) " \ 4970 "does not match feature count (%llu)\n", 4971 (u_longlong_t)indirect_vdev_count, 4972 (u_longlong_t)dr_feature_refcount); 4973 } else { 4974 (void) printf("Verified device_removal feature refcount " \ 4975 "of %llu is correct\n", 4976 (u_longlong_t)dr_feature_refcount); 4977 } 4978 4979 if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT, 4980 DMU_POOL_OBSOLETE_BPOBJ) == 0) { 4981 obsolete_bpobj_count++; 4982 } 4983 4984 4985 obsolete_counts_object_count = precise_vdev_count; 4986 obsolete_counts_object_count += obsolete_sm_count; 4987 obsolete_counts_object_count += obsolete_counts_count; 4988 obsolete_counts_object_count += scip_count; 4989 obsolete_counts_object_count += obsolete_bpobj_count; 4990 obsolete_counts_object_count += remap_deadlist_count; 4991 4992 if (oc_feature_refcount != obsolete_counts_object_count) { 4993 ret = 1; 4994 (void) printf("Number of obsolete counts objects (%llu) " \ 4995 "does not match feature count (%llu)\n", 4996 (u_longlong_t)obsolete_counts_object_count, 4997 (u_longlong_t)oc_feature_refcount); 4998 (void) printf("pv:%llu os:%llu oc:%llu sc:%llu " 4999 "ob:%llu rd:%llu\n", 5000 (u_longlong_t)precise_vdev_count, 5001 (u_longlong_t)obsolete_sm_count, 5002 (u_longlong_t)obsolete_counts_count, 5003 (u_longlong_t)scip_count, 5004 (u_longlong_t)obsolete_bpobj_count, 5005 (u_longlong_t)remap_deadlist_count); 5006 } else { 5007 (void) printf("Verified indirect_refcount feature refcount " \ 5008 "of %llu is correct\n", 5009 (u_longlong_t)oc_feature_refcount); 5010 } 5011 return (ret); 5012 } 5013 5014 static void 5015 zdb_set_skip_mmp(char *target) 5016 { 5017 spa_t *spa; 5018 5019 /* 5020 * Disable the activity check to allow examination of 5021 * active pools. 5022 */ 5023 mutex_enter(&spa_namespace_lock); 5024 if ((spa = spa_lookup(target)) != NULL) { 5025 spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP; 5026 } 5027 mutex_exit(&spa_namespace_lock); 5028 } 5029 5030 #define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE" 5031 /* 5032 * Import the checkpointed state of the pool specified by the target 5033 * parameter as readonly. The function also accepts a pool config 5034 * as an optional parameter, else it attempts to infer the config by 5035 * the name of the target pool. 5036 * 5037 * Note that the checkpointed state's pool name will be the name of 5038 * the original pool with the above suffix appened to it. In addition, 5039 * if the target is not a pool name (e.g. a path to a dataset) then 5040 * the new_path parameter is populated with the updated path to 5041 * reflect the fact that we are looking into the checkpointed state. 5042 * 5043 * The function returns a newly-allocated copy of the name of the 5044 * pool containing the checkpointed state. When this copy is no 5045 * longer needed it should be freed with free(3C). Same thing 5046 * applies to the new_path parameter if allocated. 5047 */ 5048 static char * 5049 import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path) 5050 { 5051 int error = 0; 5052 char *poolname, *bogus_name; 5053 5054 /* If the target is not a pool, the extract the pool name */ 5055 char *path_start = strchr(target, '/'); 5056 if (path_start != NULL) { 5057 size_t poolname_len = path_start - target; 5058 poolname = strndup(target, poolname_len); 5059 } else { 5060 poolname = target; 5061 } 5062 5063 if (cfg == NULL) { 5064 zdb_set_skip_mmp(poolname); 5065 error = spa_get_stats(poolname, &cfg, NULL, 0); 5066 if (error != 0) { 5067 fatal("Tried to read config of pool \"%s\" but " 5068 "spa_get_stats() failed with error %d\n", 5069 poolname, error); 5070 } 5071 } 5072 5073 (void) asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX); 5074 fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name); 5075 5076 error = spa_import(bogus_name, cfg, NULL, 5077 ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT | 5078 ZFS_IMPORT_SKIP_MMP); 5079 if (error != 0) { 5080 fatal("Tried to import pool \"%s\" but spa_import() failed " 5081 "with error %d\n", bogus_name, error); 5082 } 5083 5084 if (new_path != NULL && path_start != NULL) 5085 (void) asprintf(new_path, "%s%s", bogus_name, path_start); 5086 5087 if (target != poolname) 5088 free(poolname); 5089 5090 return (bogus_name); 5091 } 5092 5093 typedef struct verify_checkpoint_sm_entry_cb_arg { 5094 vdev_t *vcsec_vd; 5095 5096 /* the following fields are only used for printing progress */ 5097 uint64_t vcsec_entryid; 5098 uint64_t vcsec_num_entries; 5099 } verify_checkpoint_sm_entry_cb_arg_t; 5100 5101 #define ENTRIES_PER_PROGRESS_UPDATE 10000 5102 5103 static int 5104 verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg) 5105 { 5106 verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg; 5107 vdev_t *vd = vcsec->vcsec_vd; 5108 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift]; 5109 uint64_t end = sme->sme_offset + sme->sme_run; 5110 5111 ASSERT(sme->sme_type == SM_FREE); 5112 5113 if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) { 5114 (void) fprintf(stderr, 5115 "\rverifying vdev %llu, space map entry %llu of %llu ...", 5116 (longlong_t)vd->vdev_id, 5117 (longlong_t)vcsec->vcsec_entryid, 5118 (longlong_t)vcsec->vcsec_num_entries); 5119 } 5120 vcsec->vcsec_entryid++; 5121 5122 /* 5123 * See comment in checkpoint_sm_exclude_entry_cb() 5124 */ 5125 VERIFY3U(sme->sme_offset, >=, ms->ms_start); 5126 VERIFY3U(end, <=, ms->ms_start + ms->ms_size); 5127 5128 /* 5129 * The entries in the vdev_checkpoint_sm should be marked as 5130 * allocated in the checkpointed state of the pool, therefore 5131 * their respective ms_allocateable trees should not contain them. 5132 */ 5133 mutex_enter(&ms->ms_lock); 5134 range_tree_verify_not_present(ms->ms_allocatable, 5135 sme->sme_offset, sme->sme_run); 5136 mutex_exit(&ms->ms_lock); 5137 5138 return (0); 5139 } 5140 5141 /* 5142 * Verify that all segments in the vdev_checkpoint_sm are allocated 5143 * according to the checkpoint's ms_sm (i.e. are not in the checkpoint's 5144 * ms_allocatable). 5145 * 5146 * Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of 5147 * each vdev in the current state of the pool to the metaslab space maps 5148 * (ms_sm) of the checkpointed state of the pool. 5149 * 5150 * Note that the function changes the state of the ms_allocatable 5151 * trees of the current spa_t. The entries of these ms_allocatable 5152 * trees are cleared out and then repopulated from with the free 5153 * entries of their respective ms_sm space maps. 5154 */ 5155 static void 5156 verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current) 5157 { 5158 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev; 5159 vdev_t *current_rvd = current->spa_root_vdev; 5160 5161 load_concrete_ms_allocatable_trees(checkpoint, SM_FREE); 5162 5163 for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) { 5164 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c]; 5165 vdev_t *current_vd = current_rvd->vdev_child[c]; 5166 5167 space_map_t *checkpoint_sm = NULL; 5168 uint64_t checkpoint_sm_obj; 5169 5170 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) { 5171 /* 5172 * Since we don't allow device removal in a pool 5173 * that has a checkpoint, we expect that all removed 5174 * vdevs were removed from the pool before the 5175 * checkpoint. 5176 */ 5177 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops); 5178 continue; 5179 } 5180 5181 /* 5182 * If the checkpoint space map doesn't exist, then nothing 5183 * here is checkpointed so there's nothing to verify. 5184 */ 5185 if (current_vd->vdev_top_zap == 0 || 5186 zap_contains(spa_meta_objset(current), 5187 current_vd->vdev_top_zap, 5188 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 5189 continue; 5190 5191 VERIFY0(zap_lookup(spa_meta_objset(current), 5192 current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 5193 sizeof (uint64_t), 1, &checkpoint_sm_obj)); 5194 5195 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current), 5196 checkpoint_sm_obj, 0, current_vd->vdev_asize, 5197 current_vd->vdev_ashift)); 5198 5199 verify_checkpoint_sm_entry_cb_arg_t vcsec; 5200 vcsec.vcsec_vd = ckpoint_vd; 5201 vcsec.vcsec_entryid = 0; 5202 vcsec.vcsec_num_entries = 5203 space_map_length(checkpoint_sm) / sizeof (uint64_t); 5204 VERIFY0(space_map_iterate(checkpoint_sm, 5205 space_map_length(checkpoint_sm), 5206 verify_checkpoint_sm_entry_cb, &vcsec)); 5207 dump_spacemap(current->spa_meta_objset, checkpoint_sm); 5208 space_map_close(checkpoint_sm); 5209 } 5210 5211 /* 5212 * If we've added vdevs since we took the checkpoint, ensure 5213 * that their checkpoint space maps are empty. 5214 */ 5215 if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) { 5216 for (uint64_t c = ckpoint_rvd->vdev_children; 5217 c < current_rvd->vdev_children; c++) { 5218 vdev_t *current_vd = current_rvd->vdev_child[c]; 5219 VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL); 5220 } 5221 } 5222 5223 /* for cleaner progress output */ 5224 (void) fprintf(stderr, "\n"); 5225 } 5226 5227 /* 5228 * Verifies that all space that's allocated in the checkpoint is 5229 * still allocated in the current version, by checking that everything 5230 * in checkpoint's ms_allocatable (which is actually allocated, not 5231 * allocatable/free) is not present in current's ms_allocatable. 5232 * 5233 * Note that the function changes the state of the ms_allocatable 5234 * trees of both spas when called. The entries of all ms_allocatable 5235 * trees are cleared out and then repopulated from their respective 5236 * ms_sm space maps. In the checkpointed state we load the allocated 5237 * entries, and in the current state we load the free entries. 5238 */ 5239 static void 5240 verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current) 5241 { 5242 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev; 5243 vdev_t *current_rvd = current->spa_root_vdev; 5244 5245 load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC); 5246 load_concrete_ms_allocatable_trees(current, SM_FREE); 5247 5248 for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) { 5249 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i]; 5250 vdev_t *current_vd = current_rvd->vdev_child[i]; 5251 5252 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) { 5253 /* 5254 * See comment in verify_checkpoint_vdev_spacemaps() 5255 */ 5256 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops); 5257 continue; 5258 } 5259 5260 for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) { 5261 metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m]; 5262 metaslab_t *current_msp = current_vd->vdev_ms[m]; 5263 5264 (void) fprintf(stderr, 5265 "\rverifying vdev %llu of %llu, " 5266 "metaslab %llu of %llu ...", 5267 (longlong_t)current_vd->vdev_id, 5268 (longlong_t)current_rvd->vdev_children, 5269 (longlong_t)current_vd->vdev_ms[m]->ms_id, 5270 (longlong_t)current_vd->vdev_ms_count); 5271 5272 /* 5273 * We walk through the ms_allocatable trees that 5274 * are loaded with the allocated blocks from the 5275 * ms_sm spacemaps of the checkpoint. For each 5276 * one of these ranges we ensure that none of them 5277 * exists in the ms_allocatable trees of the 5278 * current state which are loaded with the ranges 5279 * that are currently free. 5280 * 5281 * This way we ensure that none of the blocks that 5282 * are part of the checkpoint were freed by mistake. 5283 */ 5284 range_tree_walk(ckpoint_msp->ms_allocatable, 5285 (range_tree_func_t *)range_tree_verify_not_present, 5286 current_msp->ms_allocatable); 5287 } 5288 } 5289 5290 /* for cleaner progress output */ 5291 (void) fprintf(stderr, "\n"); 5292 } 5293 5294 static void 5295 verify_checkpoint_blocks(spa_t *spa) 5296 { 5297 ASSERT(!dump_opt['L']); 5298 5299 spa_t *checkpoint_spa; 5300 char *checkpoint_pool; 5301 nvlist_t *config = NULL; 5302 int error = 0; 5303 5304 /* 5305 * We import the checkpointed state of the pool (under a different 5306 * name) so we can do verification on it against the current state 5307 * of the pool. 5308 */ 5309 checkpoint_pool = import_checkpointed_state(spa->spa_name, config, 5310 NULL); 5311 ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0); 5312 5313 error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG); 5314 if (error != 0) { 5315 fatal("Tried to open pool \"%s\" but spa_open() failed with " 5316 "error %d\n", checkpoint_pool, error); 5317 } 5318 5319 /* 5320 * Ensure that ranges in the checkpoint space maps of each vdev 5321 * are allocated according to the checkpointed state's metaslab 5322 * space maps. 5323 */ 5324 verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa); 5325 5326 /* 5327 * Ensure that allocated ranges in the checkpoint's metaslab 5328 * space maps remain allocated in the metaslab space maps of 5329 * the current state. 5330 */ 5331 verify_checkpoint_ms_spacemaps(checkpoint_spa, spa); 5332 5333 /* 5334 * Once we are done, we get rid of the checkpointed state. 5335 */ 5336 spa_close(checkpoint_spa, FTAG); 5337 free(checkpoint_pool); 5338 } 5339 5340 static void 5341 dump_leftover_checkpoint_blocks(spa_t *spa) 5342 { 5343 vdev_t *rvd = spa->spa_root_vdev; 5344 5345 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 5346 vdev_t *vd = rvd->vdev_child[i]; 5347 5348 space_map_t *checkpoint_sm = NULL; 5349 uint64_t checkpoint_sm_obj; 5350 5351 if (vd->vdev_top_zap == 0) 5352 continue; 5353 5354 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap, 5355 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0) 5356 continue; 5357 5358 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap, 5359 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 5360 sizeof (uint64_t), 1, &checkpoint_sm_obj)); 5361 5362 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa), 5363 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift)); 5364 dump_spacemap(spa->spa_meta_objset, checkpoint_sm); 5365 space_map_close(checkpoint_sm); 5366 } 5367 } 5368 5369 static int 5370 verify_checkpoint(spa_t *spa) 5371 { 5372 uberblock_t checkpoint; 5373 int error; 5374 5375 if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) 5376 return (0); 5377 5378 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5379 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 5380 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 5381 5382 if (error == ENOENT && !dump_opt['L']) { 5383 /* 5384 * If the feature is active but the uberblock is missing 5385 * then we must be in the middle of discarding the 5386 * checkpoint. 5387 */ 5388 (void) printf("\nPartially discarded checkpoint " 5389 "state found:\n"); 5390 dump_leftover_checkpoint_blocks(spa); 5391 return (0); 5392 } else if (error != 0) { 5393 (void) printf("lookup error %d when looking for " 5394 "checkpointed uberblock in MOS\n", error); 5395 return (error); 5396 } 5397 dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n"); 5398 5399 if (checkpoint.ub_checkpoint_txg == 0) { 5400 (void) printf("\nub_checkpoint_txg not set in checkpointed " 5401 "uberblock\n"); 5402 error = 3; 5403 } 5404 5405 if (error == 0 && !dump_opt['L']) 5406 verify_checkpoint_blocks(spa); 5407 5408 return (error); 5409 } 5410 5411 /* ARGSUSED */ 5412 static void 5413 mos_leaks_cb(void *arg, uint64_t start, uint64_t size) 5414 { 5415 for (uint64_t i = start; i < size; i++) { 5416 (void) printf("MOS object %llu referenced but not allocated\n", 5417 (u_longlong_t)i); 5418 } 5419 } 5420 5421 static range_tree_t *mos_refd_objs; 5422 5423 static void 5424 mos_obj_refd(uint64_t obj) 5425 { 5426 if (obj != 0 && mos_refd_objs != NULL) 5427 range_tree_add(mos_refd_objs, obj, 1); 5428 } 5429 5430 static void 5431 mos_leak_vdev_top_zap(vdev_t *vd) 5432 { 5433 uint64_t ms_flush_data_obj; 5434 5435 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), 5436 vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, 5437 sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj); 5438 if (error == ENOENT) 5439 return; 5440 ASSERT0(error); 5441 5442 mos_obj_refd(ms_flush_data_obj); 5443 } 5444 5445 static void 5446 mos_leak_vdev(vdev_t *vd) 5447 { 5448 mos_obj_refd(vd->vdev_dtl_object); 5449 mos_obj_refd(vd->vdev_ms_array); 5450 mos_obj_refd(vd->vdev_indirect_config.vic_births_object); 5451 mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object); 5452 mos_obj_refd(vd->vdev_leaf_zap); 5453 if (vd->vdev_checkpoint_sm != NULL) 5454 mos_obj_refd(vd->vdev_checkpoint_sm->sm_object); 5455 if (vd->vdev_indirect_mapping != NULL) { 5456 mos_obj_refd(vd->vdev_indirect_mapping-> 5457 vim_phys->vimp_counts_object); 5458 } 5459 if (vd->vdev_obsolete_sm != NULL) 5460 mos_obj_refd(vd->vdev_obsolete_sm->sm_object); 5461 5462 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) { 5463 metaslab_t *ms = vd->vdev_ms[m]; 5464 mos_obj_refd(space_map_object(ms->ms_sm)); 5465 } 5466 5467 if (vd->vdev_top_zap != 0) { 5468 mos_obj_refd(vd->vdev_top_zap); 5469 mos_leak_vdev_top_zap(vd); 5470 } 5471 5472 for (uint64_t c = 0; c < vd->vdev_children; c++) { 5473 mos_leak_vdev(vd->vdev_child[c]); 5474 } 5475 } 5476 5477 static void 5478 mos_leak_log_spacemaps(spa_t *spa) 5479 { 5480 uint64_t spacemap_zap; 5481 5482 int error = zap_lookup(spa_meta_objset(spa), 5483 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP, 5484 sizeof (spacemap_zap), 1, &spacemap_zap); 5485 if (error == ENOENT) 5486 return; 5487 ASSERT0(error); 5488 5489 mos_obj_refd(spacemap_zap); 5490 for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg); 5491 sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) 5492 mos_obj_refd(sls->sls_sm_obj); 5493 } 5494 5495 static int 5496 dump_mos_leaks(spa_t *spa) 5497 { 5498 int rv = 0; 5499 objset_t *mos = spa->spa_meta_objset; 5500 dsl_pool_t *dp = spa->spa_dsl_pool; 5501 5502 /* Visit and mark all referenced objects in the MOS */ 5503 5504 mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT); 5505 mos_obj_refd(spa->spa_pool_props_object); 5506 mos_obj_refd(spa->spa_config_object); 5507 mos_obj_refd(spa->spa_ddt_stat_object); 5508 mos_obj_refd(spa->spa_feat_desc_obj); 5509 mos_obj_refd(spa->spa_feat_enabled_txg_obj); 5510 mos_obj_refd(spa->spa_feat_for_read_obj); 5511 mos_obj_refd(spa->spa_feat_for_write_obj); 5512 mos_obj_refd(spa->spa_history); 5513 mos_obj_refd(spa->spa_errlog_last); 5514 mos_obj_refd(spa->spa_errlog_scrub); 5515 mos_obj_refd(spa->spa_all_vdev_zaps); 5516 mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj); 5517 mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj); 5518 mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj); 5519 bpobj_count_refd(&spa->spa_deferred_bpobj); 5520 mos_obj_refd(dp->dp_empty_bpobj); 5521 bpobj_count_refd(&dp->dp_obsolete_bpobj); 5522 bpobj_count_refd(&dp->dp_free_bpobj); 5523 mos_obj_refd(spa->spa_l2cache.sav_object); 5524 mos_obj_refd(spa->spa_spares.sav_object); 5525 5526 if (spa->spa_syncing_log_sm != NULL) 5527 mos_obj_refd(spa->spa_syncing_log_sm->sm_object); 5528 mos_leak_log_spacemaps(spa); 5529 5530 mos_obj_refd(spa->spa_condensing_indirect_phys. 5531 scip_next_mapping_object); 5532 mos_obj_refd(spa->spa_condensing_indirect_phys. 5533 scip_prev_obsolete_sm_object); 5534 if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) { 5535 vdev_indirect_mapping_t *vim = 5536 vdev_indirect_mapping_open(mos, 5537 spa->spa_condensing_indirect_phys.scip_next_mapping_object); 5538 mos_obj_refd(vim->vim_phys->vimp_counts_object); 5539 vdev_indirect_mapping_close(vim); 5540 } 5541 5542 if (dp->dp_origin_snap != NULL) { 5543 dsl_dataset_t *ds; 5544 5545 dsl_pool_config_enter(dp, FTAG); 5546 VERIFY0(dsl_dataset_hold_obj(dp, 5547 dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj, 5548 FTAG, &ds)); 5549 count_ds_mos_objects(ds); 5550 dump_deadlist(&ds->ds_deadlist); 5551 dsl_dataset_rele(ds, FTAG); 5552 dsl_pool_config_exit(dp, FTAG); 5553 5554 count_ds_mos_objects(dp->dp_origin_snap); 5555 dump_deadlist(&dp->dp_origin_snap->ds_deadlist); 5556 } 5557 count_dir_mos_objects(dp->dp_mos_dir); 5558 if (dp->dp_free_dir != NULL) 5559 count_dir_mos_objects(dp->dp_free_dir); 5560 if (dp->dp_leak_dir != NULL) 5561 count_dir_mos_objects(dp->dp_leak_dir); 5562 5563 mos_leak_vdev(spa->spa_root_vdev); 5564 5565 for (uint64_t class = 0; class < DDT_CLASSES; class++) { 5566 for (uint64_t type = 0; type < DDT_TYPES; type++) { 5567 for (uint64_t cksum = 0; 5568 cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) { 5569 ddt_t *ddt = spa->spa_ddt[cksum]; 5570 mos_obj_refd(ddt->ddt_object[type][class]); 5571 } 5572 } 5573 } 5574 5575 /* 5576 * Visit all allocated objects and make sure they are referenced. 5577 */ 5578 uint64_t object = 0; 5579 while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) { 5580 if (range_tree_contains(mos_refd_objs, object, 1)) { 5581 range_tree_remove(mos_refd_objs, object, 1); 5582 } else { 5583 dmu_object_info_t doi; 5584 const char *name; 5585 dmu_object_info(mos, object, &doi); 5586 if (doi.doi_type & DMU_OT_NEWTYPE) { 5587 dmu_object_byteswap_t bswap = 5588 DMU_OT_BYTESWAP(doi.doi_type); 5589 name = dmu_ot_byteswap[bswap].ob_name; 5590 } else { 5591 name = dmu_ot[doi.doi_type].ot_name; 5592 } 5593 5594 (void) printf("MOS object %llu (%s) leaked\n", 5595 (u_longlong_t)object, name); 5596 rv = 2; 5597 } 5598 } 5599 (void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL); 5600 if (!range_tree_is_empty(mos_refd_objs)) 5601 rv = 2; 5602 range_tree_vacate(mos_refd_objs, NULL, NULL); 5603 range_tree_destroy(mos_refd_objs); 5604 return (rv); 5605 } 5606 5607 typedef struct log_sm_obsolete_stats_arg { 5608 uint64_t lsos_current_txg; 5609 5610 uint64_t lsos_total_entries; 5611 uint64_t lsos_valid_entries; 5612 5613 uint64_t lsos_sm_entries; 5614 uint64_t lsos_valid_sm_entries; 5615 } log_sm_obsolete_stats_arg_t; 5616 5617 static int 5618 log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme, 5619 uint64_t txg, void *arg) 5620 { 5621 log_sm_obsolete_stats_arg_t *lsos = arg; 5622 uint64_t offset = sme->sme_offset; 5623 uint64_t vdev_id = sme->sme_vdev; 5624 5625 if (lsos->lsos_current_txg == 0) { 5626 /* this is the first log */ 5627 lsos->lsos_current_txg = txg; 5628 } else if (lsos->lsos_current_txg < txg) { 5629 /* we just changed log - print stats and reset */ 5630 (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n", 5631 (u_longlong_t)lsos->lsos_valid_sm_entries, 5632 (u_longlong_t)lsos->lsos_sm_entries, 5633 (u_longlong_t)lsos->lsos_current_txg); 5634 lsos->lsos_valid_sm_entries = 0; 5635 lsos->lsos_sm_entries = 0; 5636 lsos->lsos_current_txg = txg; 5637 } 5638 ASSERT3U(lsos->lsos_current_txg, ==, txg); 5639 5640 lsos->lsos_sm_entries++; 5641 lsos->lsos_total_entries++; 5642 5643 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 5644 if (!vdev_is_concrete(vd)) 5645 return (0); 5646 5647 metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5648 ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE); 5649 5650 if (txg < metaslab_unflushed_txg(ms)) 5651 return (0); 5652 lsos->lsos_valid_sm_entries++; 5653 lsos->lsos_valid_entries++; 5654 return (0); 5655 } 5656 5657 static void 5658 dump_log_spacemap_obsolete_stats(spa_t *spa) 5659 { 5660 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 5661 return; 5662 5663 log_sm_obsolete_stats_arg_t lsos; 5664 bzero(&lsos, sizeof (lsos)); 5665 5666 (void) printf("Log Space Map Obsolete Entry Statistics:\n"); 5667 5668 iterate_through_spacemap_logs(spa, 5669 log_spacemap_obsolete_stats_cb, &lsos); 5670 5671 /* print stats for latest log */ 5672 (void) printf("%-8llu valid entries out of %-8llu - txg %llu\n", 5673 (u_longlong_t)lsos.lsos_valid_sm_entries, 5674 (u_longlong_t)lsos.lsos_sm_entries, 5675 (u_longlong_t)lsos.lsos_current_txg); 5676 5677 (void) printf("%-8llu valid entries out of %-8llu - total\n\n", 5678 (u_longlong_t)lsos.lsos_valid_entries, 5679 (u_longlong_t)lsos.lsos_total_entries); 5680 } 5681 5682 static void 5683 dump_zpool(spa_t *spa) 5684 { 5685 dsl_pool_t *dp = spa_get_dsl(spa); 5686 int rc = 0; 5687 5688 if (dump_opt['S']) { 5689 dump_simulated_ddt(spa); 5690 return; 5691 } 5692 5693 if (!dump_opt['e'] && dump_opt['C'] > 1) { 5694 (void) printf("\nCached configuration:\n"); 5695 dump_nvlist(spa->spa_config, 8); 5696 } 5697 5698 if (dump_opt['C']) 5699 dump_config(spa); 5700 5701 if (dump_opt['u']) 5702 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); 5703 5704 if (dump_opt['D']) 5705 dump_all_ddts(spa); 5706 5707 if (dump_opt['d'] > 2 || dump_opt['m']) 5708 dump_metaslabs(spa); 5709 if (dump_opt['M']) 5710 dump_metaslab_groups(spa); 5711 if (dump_opt['d'] > 2 || dump_opt['m']) { 5712 dump_log_spacemaps(spa); 5713 dump_log_spacemap_obsolete_stats(spa); 5714 } 5715 5716 if (dump_opt['d'] || dump_opt['i']) { 5717 mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 5718 0); 5719 dump_dir(dp->dp_meta_objset); 5720 5721 if (dump_opt['d'] >= 3) { 5722 dsl_pool_t *dp = spa->spa_dsl_pool; 5723 dump_full_bpobj(&spa->spa_deferred_bpobj, 5724 "Deferred frees", 0); 5725 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 5726 dump_full_bpobj(&dp->dp_free_bpobj, 5727 "Pool snapshot frees", 0); 5728 } 5729 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) { 5730 ASSERT(spa_feature_is_enabled(spa, 5731 SPA_FEATURE_DEVICE_REMOVAL)); 5732 dump_full_bpobj(&dp->dp_obsolete_bpobj, 5733 "Pool obsolete blocks", 0); 5734 } 5735 5736 if (spa_feature_is_active(spa, 5737 SPA_FEATURE_ASYNC_DESTROY)) { 5738 dump_bptree(spa->spa_meta_objset, 5739 dp->dp_bptree_obj, 5740 "Pool dataset frees"); 5741 } 5742 dump_dtl(spa->spa_root_vdev, 0); 5743 } 5744 (void) dmu_objset_find(spa_name(spa), dump_one_dir, 5745 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 5746 5747 if (rc == 0 && !dump_opt['L']) 5748 rc = dump_mos_leaks(spa); 5749 5750 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { 5751 uint64_t refcount; 5752 5753 if (!(spa_feature_table[f].fi_flags & 5754 ZFEATURE_FLAG_PER_DATASET) || 5755 !spa_feature_is_enabled(spa, f)) { 5756 ASSERT0(dataset_feature_count[f]); 5757 continue; 5758 } 5759 (void) feature_get_refcount(spa, 5760 &spa_feature_table[f], &refcount); 5761 if (dataset_feature_count[f] != refcount) { 5762 (void) printf("%s feature refcount mismatch: " 5763 "%lld datasets != %lld refcount\n", 5764 spa_feature_table[f].fi_uname, 5765 (longlong_t)dataset_feature_count[f], 5766 (longlong_t)refcount); 5767 rc = 2; 5768 } else { 5769 (void) printf("Verified %s feature refcount " 5770 "of %llu is correct\n", 5771 spa_feature_table[f].fi_uname, 5772 (longlong_t)refcount); 5773 } 5774 } 5775 5776 if (rc == 0) 5777 rc = verify_device_removal_feature_counts(spa); 5778 } 5779 5780 if (rc == 0 && (dump_opt['b'] || dump_opt['c'])) 5781 rc = dump_block_stats(spa); 5782 5783 if (rc == 0) 5784 rc = verify_spacemap_refcounts(spa); 5785 5786 if (dump_opt['s']) 5787 show_pool_stats(spa); 5788 5789 if (dump_opt['h']) 5790 dump_history(spa); 5791 5792 if (rc == 0) 5793 rc = verify_checkpoint(spa); 5794 5795 if (rc != 0) { 5796 dump_debug_buffer(); 5797 exit(rc); 5798 } 5799 } 5800 5801 #define ZDB_FLAG_CHECKSUM 0x0001 5802 #define ZDB_FLAG_DECOMPRESS 0x0002 5803 #define ZDB_FLAG_BSWAP 0x0004 5804 #define ZDB_FLAG_GBH 0x0008 5805 #define ZDB_FLAG_INDIRECT 0x0010 5806 #define ZDB_FLAG_PHYS 0x0020 5807 #define ZDB_FLAG_RAW 0x0040 5808 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 5809 5810 static int flagbits[256]; 5811 5812 static void 5813 zdb_print_blkptr(blkptr_t *bp, int flags) 5814 { 5815 char blkbuf[BP_SPRINTF_LEN]; 5816 5817 if (flags & ZDB_FLAG_BSWAP) 5818 byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); 5819 5820 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); 5821 (void) printf("%s\n", blkbuf); 5822 } 5823 5824 static void 5825 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) 5826 { 5827 int i; 5828 5829 for (i = 0; i < nbps; i++) 5830 zdb_print_blkptr(&bp[i], flags); 5831 } 5832 5833 static void 5834 zdb_dump_gbh(void *buf, int flags) 5835 { 5836 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); 5837 } 5838 5839 static void 5840 zdb_dump_block_raw(void *buf, uint64_t size, int flags) 5841 { 5842 if (flags & ZDB_FLAG_BSWAP) 5843 byteswap_uint64_array(buf, size); 5844 (void) write(1, buf, size); 5845 } 5846 5847 static void 5848 zdb_dump_block(char *label, void *buf, uint64_t size, int flags) 5849 { 5850 uint64_t *d = (uint64_t *)buf; 5851 unsigned nwords = size / sizeof (uint64_t); 5852 int do_bswap = !!(flags & ZDB_FLAG_BSWAP); 5853 unsigned i, j; 5854 const char *hdr; 5855 char *c; 5856 5857 5858 if (do_bswap) 5859 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; 5860 else 5861 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; 5862 5863 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); 5864 5865 for (i = 0; i < nwords; i += 2) { 5866 (void) printf("%06llx: %016llx %016llx ", 5867 (u_longlong_t)(i * sizeof (uint64_t)), 5868 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), 5869 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); 5870 5871 c = (char *)&d[i]; 5872 for (j = 0; j < 2 * sizeof (uint64_t); j++) 5873 (void) printf("%c", isprint(c[j]) ? c[j] : '.'); 5874 (void) printf("\n"); 5875 } 5876 } 5877 5878 /* 5879 * There are two acceptable formats: 5880 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a 5881 * child[.child]* - For example: 0.1.1 5882 * 5883 * The second form can be used to specify arbitrary vdevs anywhere 5884 * in the heirarchy. For example, in a pool with a mirror of 5885 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . 5886 */ 5887 static vdev_t * 5888 zdb_vdev_lookup(vdev_t *vdev, const char *path) 5889 { 5890 char *s, *p, *q; 5891 unsigned i; 5892 5893 if (vdev == NULL) 5894 return (NULL); 5895 5896 /* First, assume the x.x.x.x format */ 5897 i = strtoul(path, &s, 10); 5898 if (s == path || (s && *s != '.' && *s != '\0')) 5899 goto name; 5900 if (i >= vdev->vdev_children) 5901 return (NULL); 5902 5903 vdev = vdev->vdev_child[i]; 5904 if (*s == '\0') 5905 return (vdev); 5906 return (zdb_vdev_lookup(vdev, s+1)); 5907 5908 name: 5909 for (i = 0; i < vdev->vdev_children; i++) { 5910 vdev_t *vc = vdev->vdev_child[i]; 5911 5912 if (vc->vdev_path == NULL) { 5913 vc = zdb_vdev_lookup(vc, path); 5914 if (vc == NULL) 5915 continue; 5916 else 5917 return (vc); 5918 } 5919 5920 p = strrchr(vc->vdev_path, '/'); 5921 p = p ? p + 1 : vc->vdev_path; 5922 q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; 5923 5924 if (strcmp(vc->vdev_path, path) == 0) 5925 return (vc); 5926 if (strcmp(p, path) == 0) 5927 return (vc); 5928 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) 5929 return (vc); 5930 } 5931 5932 return (NULL); 5933 } 5934 5935 /* ARGSUSED */ 5936 static int 5937 random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused) 5938 { 5939 return (random_get_pseudo_bytes(buf, len)); 5940 } 5941 5942 /* 5943 * Read a block from a pool and print it out. The syntax of the 5944 * block descriptor is: 5945 * 5946 * pool:vdev_specifier:offset:size[:flags] 5947 * 5948 * pool - The name of the pool you wish to read from 5949 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) 5950 * offset - offset, in hex, in bytes 5951 * size - Amount of data to read, in hex, in bytes 5952 * flags - A string of characters specifying options 5953 * b: Decode a blkptr at given offset within block 5954 * *c: Calculate and display checksums 5955 * d: Decompress data before dumping 5956 * e: Byteswap data before dumping 5957 * g: Display data as a gang block header 5958 * i: Display as an indirect block 5959 * p: Do I/O to physical offset 5960 * r: Dump raw data to stdout 5961 * 5962 * * = not yet implemented 5963 */ 5964 static void 5965 zdb_read_block(char *thing, spa_t *spa) 5966 { 5967 blkptr_t blk, *bp = &blk; 5968 dva_t *dva = bp->blk_dva; 5969 int flags = 0; 5970 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; 5971 zio_t *zio; 5972 vdev_t *vd; 5973 abd_t *pabd; 5974 void *lbuf, *buf; 5975 const char *s, *vdev; 5976 char *p, *dup, *flagstr; 5977 int i, error; 5978 5979 dup = strdup(thing); 5980 s = strtok(dup, ":"); 5981 vdev = s ? s : ""; 5982 s = strtok(NULL, ":"); 5983 offset = strtoull(s ? s : "", NULL, 16); 5984 s = strtok(NULL, ":"); 5985 size = strtoull(s ? s : "", NULL, 16); 5986 s = strtok(NULL, ":"); 5987 if (s) 5988 flagstr = strdup(s); 5989 else 5990 flagstr = strdup(""); 5991 5992 s = NULL; 5993 if (size == 0) 5994 s = "size must not be zero"; 5995 if (!IS_P2ALIGNED(size, DEV_BSIZE)) 5996 s = "size must be a multiple of sector size"; 5997 if (!IS_P2ALIGNED(offset, DEV_BSIZE)) 5998 s = "offset must be a multiple of sector size"; 5999 if (s) { 6000 (void) printf("Invalid block specifier: %s - %s\n", thing, s); 6001 free(dup); 6002 return; 6003 } 6004 6005 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { 6006 for (i = 0; flagstr[i]; i++) { 6007 int bit = flagbits[(uchar_t)flagstr[i]]; 6008 6009 if (bit == 0) { 6010 (void) printf("***Invalid flag: %c\n", 6011 flagstr[i]); 6012 continue; 6013 } 6014 flags |= bit; 6015 6016 /* If it's not something with an argument, keep going */ 6017 if ((bit & (ZDB_FLAG_CHECKSUM | 6018 ZDB_FLAG_PRINT_BLKPTR)) == 0) 6019 continue; 6020 6021 p = &flagstr[i + 1]; 6022 if (bit == ZDB_FLAG_PRINT_BLKPTR) 6023 blkptr_offset = strtoull(p, &p, 16); 6024 if (*p != ':' && *p != '\0') { 6025 (void) printf("***Invalid flag arg: '%s'\n", s); 6026 free(dup); 6027 return; 6028 } 6029 } 6030 } 6031 free(flagstr); 6032 6033 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); 6034 if (vd == NULL) { 6035 (void) printf("***Invalid vdev: %s\n", vdev); 6036 free(dup); 6037 return; 6038 } else { 6039 if (vd->vdev_path) 6040 (void) fprintf(stderr, "Found vdev: %s\n", 6041 vd->vdev_path); 6042 else 6043 (void) fprintf(stderr, "Found vdev type: %s\n", 6044 vd->vdev_ops->vdev_op_type); 6045 } 6046 6047 psize = size; 6048 lsize = size; 6049 6050 pabd = abd_alloc_linear(SPA_MAXBLOCKSIZE, B_FALSE); 6051 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 6052 6053 BP_ZERO(bp); 6054 6055 DVA_SET_VDEV(&dva[0], vd->vdev_id); 6056 DVA_SET_OFFSET(&dva[0], offset); 6057 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); 6058 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); 6059 6060 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 6061 6062 BP_SET_LSIZE(bp, lsize); 6063 BP_SET_PSIZE(bp, psize); 6064 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 6065 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 6066 BP_SET_TYPE(bp, DMU_OT_NONE); 6067 BP_SET_LEVEL(bp, 0); 6068 BP_SET_DEDUP(bp, 0); 6069 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 6070 6071 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6072 zio = zio_root(spa, NULL, NULL, 0); 6073 6074 if (vd == vd->vdev_top) { 6075 /* 6076 * Treat this as a normal block read. 6077 */ 6078 zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL, 6079 ZIO_PRIORITY_SYNC_READ, 6080 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); 6081 } else { 6082 /* 6083 * Treat this as a vdev child I/O. 6084 */ 6085 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd, 6086 psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, 6087 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | 6088 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | 6089 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL, 6090 NULL, NULL)); 6091 } 6092 6093 error = zio_wait(zio); 6094 spa_config_exit(spa, SCL_STATE, FTAG); 6095 6096 if (error) { 6097 (void) printf("Read of %s failed, error: %d\n", thing, error); 6098 goto out; 6099 } 6100 6101 if (flags & ZDB_FLAG_DECOMPRESS) { 6102 /* 6103 * We don't know how the data was compressed, so just try 6104 * every decompress function at every inflated blocksize. 6105 */ 6106 enum zio_compress c; 6107 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 6108 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); 6109 6110 abd_copy_to_buf(pbuf2, pabd, psize); 6111 6112 VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize, 6113 random_get_pseudo_bytes_cb, NULL)); 6114 6115 VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, 6116 SPA_MAXBLOCKSIZE - psize)); 6117 6118 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; 6119 lsize -= SPA_MINBLOCKSIZE) { 6120 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { 6121 if (zio_decompress_data(c, pabd, 6122 lbuf, psize, lsize) == 0 && 6123 zio_decompress_data_buf(c, pbuf2, 6124 lbuf2, psize, lsize) == 0 && 6125 bcmp(lbuf, lbuf2, lsize) == 0) 6126 break; 6127 } 6128 if (c != ZIO_COMPRESS_FUNCTIONS) 6129 break; 6130 lsize -= SPA_MINBLOCKSIZE; 6131 } 6132 6133 umem_free(pbuf2, SPA_MAXBLOCKSIZE); 6134 umem_free(lbuf2, SPA_MAXBLOCKSIZE); 6135 6136 if (lsize <= psize) { 6137 (void) printf("Decompress of %s failed\n", thing); 6138 goto out; 6139 } 6140 buf = lbuf; 6141 size = lsize; 6142 } else { 6143 buf = abd_to_buf(pabd); 6144 size = psize; 6145 } 6146 6147 if (flags & ZDB_FLAG_PRINT_BLKPTR) 6148 zdb_print_blkptr((blkptr_t *)(void *) 6149 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); 6150 else if (flags & ZDB_FLAG_RAW) 6151 zdb_dump_block_raw(buf, size, flags); 6152 else if (flags & ZDB_FLAG_INDIRECT) 6153 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), 6154 flags); 6155 else if (flags & ZDB_FLAG_GBH) 6156 zdb_dump_gbh(buf, flags); 6157 else 6158 zdb_dump_block(thing, buf, size, flags); 6159 6160 out: 6161 abd_free(pabd); 6162 umem_free(lbuf, SPA_MAXBLOCKSIZE); 6163 free(dup); 6164 } 6165 6166 static void 6167 zdb_embedded_block(char *thing) 6168 { 6169 blkptr_t bp; 6170 unsigned long long *words = (void *)&bp; 6171 char *buf; 6172 int err; 6173 6174 bzero(&bp, sizeof (bp)); 6175 err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:" 6176 "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx", 6177 words + 0, words + 1, words + 2, words + 3, 6178 words + 4, words + 5, words + 6, words + 7, 6179 words + 8, words + 9, words + 10, words + 11, 6180 words + 12, words + 13, words + 14, words + 15); 6181 if (err != 16) { 6182 (void) fprintf(stderr, "invalid input format\n"); 6183 exit(1); 6184 } 6185 ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE); 6186 buf = malloc(SPA_MAXBLOCKSIZE); 6187 if (buf == NULL) { 6188 (void) fprintf(stderr, "out of memory\n"); 6189 exit(1); 6190 } 6191 err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp)); 6192 if (err != 0) { 6193 (void) fprintf(stderr, "decode failed: %u\n", err); 6194 exit(1); 6195 } 6196 zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0); 6197 free(buf); 6198 } 6199 6200 int 6201 main(int argc, char **argv) 6202 { 6203 int c; 6204 struct rlimit rl = { 1024, 1024 }; 6205 spa_t *spa = NULL; 6206 objset_t *os = NULL; 6207 int dump_all = 1; 6208 int verbose = 0; 6209 int error = 0; 6210 char **searchdirs = NULL; 6211 int nsearch = 0; 6212 char *target, *target_pool; 6213 nvlist_t *policy = NULL; 6214 uint64_t max_txg = UINT64_MAX; 6215 int flags = ZFS_IMPORT_MISSING_LOG; 6216 int rewind = ZPOOL_NEVER_REWIND; 6217 char *spa_config_path_env; 6218 boolean_t target_is_spa = B_TRUE; 6219 nvlist_t *cfg = NULL; 6220 6221 (void) setrlimit(RLIMIT_NOFILE, &rl); 6222 (void) enable_extended_FILE_stdio(-1, -1); 6223 6224 dprintf_setup(&argc, argv); 6225 6226 /* 6227 * If there is an environment variable SPA_CONFIG_PATH it overrides 6228 * default spa_config_path setting. If -U flag is specified it will 6229 * override this environment variable settings once again. 6230 */ 6231 spa_config_path_env = getenv("SPA_CONFIG_PATH"); 6232 if (spa_config_path_env != NULL) 6233 spa_config_path = spa_config_path_env; 6234 6235 /* 6236 * For performance reasons, we set this tunable down. We do so before 6237 * the arg parsing section so that the user can override this value if 6238 * they choose. 6239 */ 6240 zfs_btree_verify_intensity = 3; 6241 6242 while ((c = getopt(argc, argv, 6243 "AbcCdDeEFGhiI:klLmMo:Op:PqRsSt:uU:vVx:X")) != -1) { 6244 switch (c) { 6245 case 'b': 6246 case 'c': 6247 case 'C': 6248 case 'd': 6249 case 'D': 6250 case 'E': 6251 case 'G': 6252 case 'h': 6253 case 'i': 6254 case 'l': 6255 case 'm': 6256 case 'M': 6257 case 'O': 6258 case 'R': 6259 case 's': 6260 case 'S': 6261 case 'u': 6262 dump_opt[c]++; 6263 dump_all = 0; 6264 break; 6265 case 'A': 6266 case 'e': 6267 case 'F': 6268 case 'k': 6269 case 'L': 6270 case 'P': 6271 case 'q': 6272 case 'X': 6273 dump_opt[c]++; 6274 break; 6275 /* NB: Sort single match options below. */ 6276 case 'I': 6277 max_inflight = strtoull(optarg, NULL, 0); 6278 if (max_inflight == 0) { 6279 (void) fprintf(stderr, "maximum number " 6280 "of inflight I/Os must be greater " 6281 "than 0\n"); 6282 usage(); 6283 } 6284 break; 6285 case 'o': 6286 error = set_global_var(optarg); 6287 if (error != 0) 6288 usage(); 6289 break; 6290 case 'p': 6291 if (searchdirs == NULL) { 6292 searchdirs = umem_alloc(sizeof (char *), 6293 UMEM_NOFAIL); 6294 } else { 6295 char **tmp = umem_alloc((nsearch + 1) * 6296 sizeof (char *), UMEM_NOFAIL); 6297 bcopy(searchdirs, tmp, nsearch * 6298 sizeof (char *)); 6299 umem_free(searchdirs, 6300 nsearch * sizeof (char *)); 6301 searchdirs = tmp; 6302 } 6303 searchdirs[nsearch++] = optarg; 6304 break; 6305 case 't': 6306 max_txg = strtoull(optarg, NULL, 0); 6307 if (max_txg < TXG_INITIAL) { 6308 (void) fprintf(stderr, "incorrect txg " 6309 "specified: %s\n", optarg); 6310 usage(); 6311 } 6312 break; 6313 case 'U': 6314 spa_config_path = optarg; 6315 if (spa_config_path[0] != '/') { 6316 (void) fprintf(stderr, 6317 "cachefile must be an absolute path " 6318 "(i.e. start with a slash)\n"); 6319 usage(); 6320 } 6321 break; 6322 case 'v': 6323 verbose++; 6324 break; 6325 case 'V': 6326 flags = ZFS_IMPORT_VERBATIM; 6327 break; 6328 case 'x': 6329 vn_dumpdir = optarg; 6330 break; 6331 default: 6332 usage(); 6333 break; 6334 } 6335 } 6336 6337 if (!dump_opt['e'] && searchdirs != NULL) { 6338 (void) fprintf(stderr, "-p option requires use of -e\n"); 6339 usage(); 6340 } 6341 6342 /* 6343 * ZDB does not typically re-read blocks; therefore limit the ARC 6344 * to 256 MB, which can be used entirely for metadata. 6345 */ 6346 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024; 6347 6348 /* 6349 * "zdb -c" uses checksum-verifying scrub i/os which are async reads. 6350 * "zdb -b" uses traversal prefetch which uses async reads. 6351 * For good performance, let several of them be active at once. 6352 */ 6353 zfs_vdev_async_read_max_active = 10; 6354 6355 /* 6356 * Disable reference tracking for better performance. 6357 */ 6358 reference_tracking_enable = B_FALSE; 6359 6360 /* 6361 * Do not fail spa_load when spa_load_verify fails. This is needed 6362 * to load non-idle pools. 6363 */ 6364 spa_load_verify_dryrun = B_TRUE; 6365 6366 kernel_init(FREAD); 6367 6368 if (dump_all) 6369 verbose = MAX(verbose, 1); 6370 6371 for (c = 0; c < 256; c++) { 6372 if (dump_all && strchr("AeEFklLOPRSX", c) == NULL) 6373 dump_opt[c] = 1; 6374 if (dump_opt[c]) 6375 dump_opt[c] += verbose; 6376 } 6377 6378 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); 6379 zfs_recover = (dump_opt['A'] > 1); 6380 6381 argc -= optind; 6382 argv += optind; 6383 6384 if (argc < 2 && dump_opt['R']) 6385 usage(); 6386 6387 if (dump_opt['E']) { 6388 if (argc != 1) 6389 usage(); 6390 zdb_embedded_block(argv[0]); 6391 return (0); 6392 } 6393 6394 if (argc < 1) { 6395 if (!dump_opt['e'] && dump_opt['C']) { 6396 dump_cachefile(spa_config_path); 6397 return (0); 6398 } 6399 usage(); 6400 } 6401 6402 if (dump_opt['l']) 6403 return (dump_label(argv[0])); 6404 6405 if (dump_opt['O']) { 6406 if (argc != 2) 6407 usage(); 6408 dump_opt['v'] = verbose + 3; 6409 return (dump_path(argv[0], argv[1])); 6410 } 6411 6412 if (dump_opt['X'] || dump_opt['F']) 6413 rewind = ZPOOL_DO_REWIND | 6414 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); 6415 6416 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || 6417 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 || 6418 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0) 6419 fatal("internal error: %s", strerror(ENOMEM)); 6420 6421 error = 0; 6422 target = argv[0]; 6423 6424 if (strpbrk(target, "/@") != NULL) { 6425 size_t targetlen; 6426 6427 target_pool = strdup(target); 6428 *strpbrk(target_pool, "/@") = '\0'; 6429 6430 target_is_spa = B_FALSE; 6431 targetlen = strlen(target); 6432 if (targetlen && target[targetlen - 1] == '/') 6433 target[targetlen - 1] = '\0'; 6434 } else { 6435 target_pool = target; 6436 } 6437 6438 if (dump_opt['e']) { 6439 importargs_t args = { 0 }; 6440 6441 args.paths = nsearch; 6442 args.path = searchdirs; 6443 args.can_be_active = B_TRUE; 6444 6445 error = zpool_find_config(NULL, target_pool, &cfg, &args, 6446 &libzpool_config_ops); 6447 6448 if (error == 0) { 6449 6450 if (nvlist_add_nvlist(cfg, 6451 ZPOOL_LOAD_POLICY, policy) != 0) { 6452 fatal("can't open '%s': %s", 6453 target, strerror(ENOMEM)); 6454 } 6455 6456 if (dump_opt['C'] > 1) { 6457 (void) printf("\nConfiguration for import:\n"); 6458 dump_nvlist(cfg, 8); 6459 } 6460 6461 /* 6462 * Disable the activity check to allow examination of 6463 * active pools. 6464 */ 6465 error = spa_import(target_pool, cfg, NULL, 6466 flags | ZFS_IMPORT_SKIP_MMP); 6467 } 6468 } 6469 6470 char *checkpoint_pool = NULL; 6471 char *checkpoint_target = NULL; 6472 if (dump_opt['k']) { 6473 checkpoint_pool = import_checkpointed_state(target, cfg, 6474 &checkpoint_target); 6475 6476 if (checkpoint_target != NULL) 6477 target = checkpoint_target; 6478 6479 } 6480 6481 if (error == 0) { 6482 if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) { 6483 ASSERT(checkpoint_pool != NULL); 6484 ASSERT(checkpoint_target == NULL); 6485 6486 error = spa_open(checkpoint_pool, &spa, FTAG); 6487 if (error != 0) { 6488 fatal("Tried to open pool \"%s\" but " 6489 "spa_open() failed with error %d\n", 6490 checkpoint_pool, error); 6491 } 6492 6493 } else if (target_is_spa || dump_opt['R']) { 6494 zdb_set_skip_mmp(target); 6495 error = spa_open_rewind(target, &spa, FTAG, policy, 6496 NULL); 6497 if (error) { 6498 /* 6499 * If we're missing the log device then 6500 * try opening the pool after clearing the 6501 * log state. 6502 */ 6503 mutex_enter(&spa_namespace_lock); 6504 if ((spa = spa_lookup(target)) != NULL && 6505 spa->spa_log_state == SPA_LOG_MISSING) { 6506 spa->spa_log_state = SPA_LOG_CLEAR; 6507 error = 0; 6508 } 6509 mutex_exit(&spa_namespace_lock); 6510 6511 if (!error) { 6512 error = spa_open_rewind(target, &spa, 6513 FTAG, policy, NULL); 6514 } 6515 } 6516 } else { 6517 zdb_set_skip_mmp(target); 6518 error = open_objset(target, DMU_OST_ANY, FTAG, &os); 6519 } 6520 } 6521 nvlist_free(policy); 6522 6523 if (error) 6524 fatal("can't open '%s': %s", target, strerror(error)); 6525 6526 argv++; 6527 argc--; 6528 if (!dump_opt['R']) { 6529 if (argc > 0) { 6530 zopt_objects = argc; 6531 zopt_object = calloc(zopt_objects, sizeof (uint64_t)); 6532 for (unsigned i = 0; i < zopt_objects; i++) { 6533 errno = 0; 6534 zopt_object[i] = strtoull(argv[i], NULL, 0); 6535 if (zopt_object[i] == 0 && errno != 0) 6536 fatal("bad number %s: %s", 6537 argv[i], strerror(errno)); 6538 } 6539 } 6540 if (os != NULL) { 6541 dump_dir(os); 6542 } else if (zopt_objects > 0 && !dump_opt['m']) { 6543 dump_dir(spa->spa_meta_objset); 6544 } else { 6545 dump_zpool(spa); 6546 } 6547 } else { 6548 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; 6549 flagbits['c'] = ZDB_FLAG_CHECKSUM; 6550 flagbits['d'] = ZDB_FLAG_DECOMPRESS; 6551 flagbits['e'] = ZDB_FLAG_BSWAP; 6552 flagbits['g'] = ZDB_FLAG_GBH; 6553 flagbits['i'] = ZDB_FLAG_INDIRECT; 6554 flagbits['p'] = ZDB_FLAG_PHYS; 6555 flagbits['r'] = ZDB_FLAG_RAW; 6556 6557 for (int i = 0; i < argc; i++) 6558 zdb_read_block(argv[i], spa); 6559 } 6560 6561 if (dump_opt['k']) { 6562 free(checkpoint_pool); 6563 if (!target_is_spa) 6564 free(checkpoint_target); 6565 } 6566 6567 if (os != NULL) 6568 close_objset(os, FTAG); 6569 else 6570 spa_close(spa, FTAG); 6571 6572 fuid_table_destroy(); 6573 6574 dump_debug_buffer(); 6575 6576 kernel_fini(); 6577 6578 return (error); 6579 } 6580