1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 Steven Hartland. All rights reserved. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2017 Joyent, Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 */ 30 31 /* 32 * The objective of this program is to provide a DMU/ZAP/SPA stress test 33 * that runs entirely in userland, is easy to use, and easy to extend. 34 * 35 * The overall design of the ztest program is as follows: 36 * 37 * (1) For each major functional area (e.g. adding vdevs to a pool, 38 * creating and destroying datasets, reading and writing objects, etc) 39 * we have a simple routine to test that functionality. These 40 * individual routines do not have to do anything "stressful". 41 * 42 * (2) We turn these simple functionality tests into a stress test by 43 * running them all in parallel, with as many threads as desired, 44 * and spread across as many datasets, objects, and vdevs as desired. 45 * 46 * (3) While all this is happening, we inject faults into the pool to 47 * verify that self-healing data really works. 48 * 49 * (4) Every time we open a dataset, we change its checksum and compression 50 * functions. Thus even individual objects vary from block to block 51 * in which checksum they use and whether they're compressed. 52 * 53 * (5) To verify that we never lose on-disk consistency after a crash, 54 * we run the entire test in a child of the main process. 55 * At random times, the child self-immolates with a SIGKILL. 56 * This is the software equivalent of pulling the power cord. 57 * The parent then runs the test again, using the existing 58 * storage pool, as many times as desired. If backwards compatibility 59 * testing is enabled ztest will sometimes run the "older" version 60 * of ztest after a SIGKILL. 61 * 62 * (6) To verify that we don't have future leaks or temporal incursions, 63 * many of the functional tests record the transaction group number 64 * as part of their data. When reading old data, they verify that 65 * the transaction group number is less than the current, open txg. 66 * If you add a new test, please do this if applicable. 67 * 68 * (7) Threads are created with a reduced stack size, for sanity checking. 69 * Therefore, it's important not to allocate huge buffers on the stack. 70 * 71 * When run with no arguments, ztest runs for about five minutes and 72 * produces no output if successful. To get a little bit of information, 73 * specify -V. To get more information, specify -VV, and so on. 74 * 75 * To turn this into an overnight stress test, use -T to specify run time. 76 * 77 * You can ask more vdevs [-v], datasets [-d], or threads [-t] 78 * to increase the pool capacity, fanout, and overall stress level. 79 * 80 * Use the -k option to set the desired frequency of kills. 81 * 82 * When ztest invokes itself it passes all relevant information through a 83 * temporary file which is mmap-ed in the child process. This allows shared 84 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always 85 * stored at offset 0 of this file and contains information on the size and 86 * number of shared structures in the file. The information stored in this file 87 * must remain backwards compatible with older versions of ztest so that 88 * ztest can invoke them during backwards compatibility testing (-B). 89 */ 90 91 #include <sys/zfs_context.h> 92 #include <sys/spa.h> 93 #include <sys/dmu.h> 94 #include <sys/txg.h> 95 #include <sys/dbuf.h> 96 #include <sys/zap.h> 97 #include <sys/dmu_objset.h> 98 #include <sys/poll.h> 99 #include <sys/stat.h> 100 #include <sys/time.h> 101 #include <sys/wait.h> 102 #include <sys/mman.h> 103 #include <sys/resource.h> 104 #include <sys/zio.h> 105 #include <sys/zil.h> 106 #include <sys/zil_impl.h> 107 #include <sys/vdev_draid.h> 108 #include <sys/vdev_impl.h> 109 #include <sys/vdev_file.h> 110 #include <sys/vdev_initialize.h> 111 #include <sys/vdev_raidz.h> 112 #include <sys/vdev_trim.h> 113 #include <sys/spa_impl.h> 114 #include <sys/metaslab_impl.h> 115 #include <sys/dsl_prop.h> 116 #include <sys/dsl_dataset.h> 117 #include <sys/dsl_destroy.h> 118 #include <sys/dsl_scan.h> 119 #include <sys/zio_checksum.h> 120 #include <sys/zfs_refcount.h> 121 #include <sys/zfeature.h> 122 #include <sys/dsl_userhold.h> 123 #include <sys/abd.h> 124 #include <sys/blake3.h> 125 #include <stdio.h> 126 #include <stdlib.h> 127 #include <unistd.h> 128 #include <getopt.h> 129 #include <signal.h> 130 #include <umem.h> 131 #include <ctype.h> 132 #include <math.h> 133 #include <sys/fs/zfs.h> 134 #include <zfs_fletcher.h> 135 #include <libnvpair.h> 136 #include <libzutil.h> 137 #include <sys/crypto/icp.h> 138 #include <sys/zfs_impl.h> 139 #if (__GLIBC__ && !__UCLIBC__) 140 #include <execinfo.h> /* for backtrace() */ 141 #endif 142 143 static int ztest_fd_data = -1; 144 static int ztest_fd_rand = -1; 145 146 typedef struct ztest_shared_hdr { 147 uint64_t zh_hdr_size; 148 uint64_t zh_opts_size; 149 uint64_t zh_size; 150 uint64_t zh_stats_size; 151 uint64_t zh_stats_count; 152 uint64_t zh_ds_size; 153 uint64_t zh_ds_count; 154 } ztest_shared_hdr_t; 155 156 static ztest_shared_hdr_t *ztest_shared_hdr; 157 158 enum ztest_class_state { 159 ZTEST_VDEV_CLASS_OFF, 160 ZTEST_VDEV_CLASS_ON, 161 ZTEST_VDEV_CLASS_RND 162 }; 163 164 #define ZO_GVARS_MAX_ARGLEN ((size_t)64) 165 #define ZO_GVARS_MAX_COUNT ((size_t)10) 166 167 typedef struct ztest_shared_opts { 168 char zo_pool[ZFS_MAX_DATASET_NAME_LEN]; 169 char zo_dir[ZFS_MAX_DATASET_NAME_LEN]; 170 char zo_alt_ztest[MAXNAMELEN]; 171 char zo_alt_libpath[MAXNAMELEN]; 172 uint64_t zo_vdevs; 173 uint64_t zo_vdevtime; 174 size_t zo_vdev_size; 175 int zo_ashift; 176 int zo_mirrors; 177 int zo_raid_children; 178 int zo_raid_parity; 179 char zo_raid_type[8]; 180 int zo_draid_data; 181 int zo_draid_spares; 182 int zo_datasets; 183 int zo_threads; 184 uint64_t zo_passtime; 185 uint64_t zo_killrate; 186 int zo_verbose; 187 int zo_init; 188 uint64_t zo_time; 189 uint64_t zo_maxloops; 190 uint64_t zo_metaslab_force_ganging; 191 int zo_mmp_test; 192 int zo_special_vdevs; 193 int zo_dump_dbgmsg; 194 int zo_gvars_count; 195 char zo_gvars[ZO_GVARS_MAX_COUNT][ZO_GVARS_MAX_ARGLEN]; 196 } ztest_shared_opts_t; 197 198 /* Default values for command line options. */ 199 #define DEFAULT_POOL "ztest" 200 #define DEFAULT_VDEV_DIR "/tmp" 201 #define DEFAULT_VDEV_COUNT 5 202 #define DEFAULT_VDEV_SIZE (SPA_MINDEVSIZE * 4) /* 256m default size */ 203 #define DEFAULT_VDEV_SIZE_STR "256M" 204 #define DEFAULT_ASHIFT SPA_MINBLOCKSHIFT 205 #define DEFAULT_MIRRORS 2 206 #define DEFAULT_RAID_CHILDREN 4 207 #define DEFAULT_RAID_PARITY 1 208 #define DEFAULT_DRAID_DATA 4 209 #define DEFAULT_DRAID_SPARES 1 210 #define DEFAULT_DATASETS_COUNT 7 211 #define DEFAULT_THREADS 23 212 #define DEFAULT_RUN_TIME 300 /* 300 seconds */ 213 #define DEFAULT_RUN_TIME_STR "300 sec" 214 #define DEFAULT_PASS_TIME 60 /* 60 seconds */ 215 #define DEFAULT_PASS_TIME_STR "60 sec" 216 #define DEFAULT_KILL_RATE 70 /* 70% kill rate */ 217 #define DEFAULT_KILLRATE_STR "70%" 218 #define DEFAULT_INITS 1 219 #define DEFAULT_MAX_LOOPS 50 /* 5 minutes */ 220 #define DEFAULT_FORCE_GANGING (64 << 10) 221 #define DEFAULT_FORCE_GANGING_STR "64K" 222 223 /* Simplifying assumption: -1 is not a valid default. */ 224 #define NO_DEFAULT -1 225 226 static const ztest_shared_opts_t ztest_opts_defaults = { 227 .zo_pool = DEFAULT_POOL, 228 .zo_dir = DEFAULT_VDEV_DIR, 229 .zo_alt_ztest = { '\0' }, 230 .zo_alt_libpath = { '\0' }, 231 .zo_vdevs = DEFAULT_VDEV_COUNT, 232 .zo_ashift = DEFAULT_ASHIFT, 233 .zo_mirrors = DEFAULT_MIRRORS, 234 .zo_raid_children = DEFAULT_RAID_CHILDREN, 235 .zo_raid_parity = DEFAULT_RAID_PARITY, 236 .zo_raid_type = VDEV_TYPE_RAIDZ, 237 .zo_vdev_size = DEFAULT_VDEV_SIZE, 238 .zo_draid_data = DEFAULT_DRAID_DATA, /* data drives */ 239 .zo_draid_spares = DEFAULT_DRAID_SPARES, /* distributed spares */ 240 .zo_datasets = DEFAULT_DATASETS_COUNT, 241 .zo_threads = DEFAULT_THREADS, 242 .zo_passtime = DEFAULT_PASS_TIME, 243 .zo_killrate = DEFAULT_KILL_RATE, 244 .zo_verbose = 0, 245 .zo_mmp_test = 0, 246 .zo_init = DEFAULT_INITS, 247 .zo_time = DEFAULT_RUN_TIME, 248 .zo_maxloops = DEFAULT_MAX_LOOPS, /* max loops during spa_freeze() */ 249 .zo_metaslab_force_ganging = DEFAULT_FORCE_GANGING, 250 .zo_special_vdevs = ZTEST_VDEV_CLASS_RND, 251 .zo_gvars_count = 0, 252 }; 253 254 extern uint64_t metaslab_force_ganging; 255 extern uint64_t metaslab_df_alloc_threshold; 256 extern uint64_t zfs_deadman_synctime_ms; 257 extern uint_t metaslab_preload_limit; 258 extern int zfs_compressed_arc_enabled; 259 extern int zfs_abd_scatter_enabled; 260 extern uint_t dmu_object_alloc_chunk_shift; 261 extern boolean_t zfs_force_some_double_word_sm_entries; 262 extern unsigned long zio_decompress_fail_fraction; 263 extern unsigned long zfs_reconstruct_indirect_damage_fraction; 264 265 266 static ztest_shared_opts_t *ztest_shared_opts; 267 static ztest_shared_opts_t ztest_opts; 268 static const char *const ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345"; 269 270 typedef struct ztest_shared_ds { 271 uint64_t zd_seq; 272 } ztest_shared_ds_t; 273 274 static ztest_shared_ds_t *ztest_shared_ds; 275 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d]) 276 277 #define BT_MAGIC 0x123456789abcdefULL 278 #define MAXFAULTS(zs) \ 279 (MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raid_parity + 1) - 1) 280 281 enum ztest_io_type { 282 ZTEST_IO_WRITE_TAG, 283 ZTEST_IO_WRITE_PATTERN, 284 ZTEST_IO_WRITE_ZEROES, 285 ZTEST_IO_TRUNCATE, 286 ZTEST_IO_SETATTR, 287 ZTEST_IO_REWRITE, 288 ZTEST_IO_TYPES 289 }; 290 291 typedef struct ztest_block_tag { 292 uint64_t bt_magic; 293 uint64_t bt_objset; 294 uint64_t bt_object; 295 uint64_t bt_dnodesize; 296 uint64_t bt_offset; 297 uint64_t bt_gen; 298 uint64_t bt_txg; 299 uint64_t bt_crtxg; 300 } ztest_block_tag_t; 301 302 typedef struct bufwad { 303 uint64_t bw_index; 304 uint64_t bw_txg; 305 uint64_t bw_data; 306 } bufwad_t; 307 308 /* 309 * It would be better to use a rangelock_t per object. Unfortunately 310 * the rangelock_t is not a drop-in replacement for rl_t, because we 311 * still need to map from object ID to rangelock_t. 312 */ 313 typedef enum { 314 RL_READER, 315 RL_WRITER, 316 RL_APPEND 317 } rl_type_t; 318 319 typedef struct rll { 320 void *rll_writer; 321 int rll_readers; 322 kmutex_t rll_lock; 323 kcondvar_t rll_cv; 324 } rll_t; 325 326 typedef struct rl { 327 uint64_t rl_object; 328 uint64_t rl_offset; 329 uint64_t rl_size; 330 rll_t *rl_lock; 331 } rl_t; 332 333 #define ZTEST_RANGE_LOCKS 64 334 #define ZTEST_OBJECT_LOCKS 64 335 336 /* 337 * Object descriptor. Used as a template for object lookup/create/remove. 338 */ 339 typedef struct ztest_od { 340 uint64_t od_dir; 341 uint64_t od_object; 342 dmu_object_type_t od_type; 343 dmu_object_type_t od_crtype; 344 uint64_t od_blocksize; 345 uint64_t od_crblocksize; 346 uint64_t od_crdnodesize; 347 uint64_t od_gen; 348 uint64_t od_crgen; 349 char od_name[ZFS_MAX_DATASET_NAME_LEN]; 350 } ztest_od_t; 351 352 /* 353 * Per-dataset state. 354 */ 355 typedef struct ztest_ds { 356 ztest_shared_ds_t *zd_shared; 357 objset_t *zd_os; 358 pthread_rwlock_t zd_zilog_lock; 359 zilog_t *zd_zilog; 360 ztest_od_t *zd_od; /* debugging aid */ 361 char zd_name[ZFS_MAX_DATASET_NAME_LEN]; 362 kmutex_t zd_dirobj_lock; 363 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS]; 364 rll_t zd_range_lock[ZTEST_RANGE_LOCKS]; 365 } ztest_ds_t; 366 367 /* 368 * Per-iteration state. 369 */ 370 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id); 371 372 typedef struct ztest_info { 373 ztest_func_t *zi_func; /* test function */ 374 uint64_t zi_iters; /* iterations per execution */ 375 uint64_t *zi_interval; /* execute every <interval> seconds */ 376 const char *zi_funcname; /* name of test function */ 377 } ztest_info_t; 378 379 typedef struct ztest_shared_callstate { 380 uint64_t zc_count; /* per-pass count */ 381 uint64_t zc_time; /* per-pass time */ 382 uint64_t zc_next; /* next time to call this function */ 383 } ztest_shared_callstate_t; 384 385 static ztest_shared_callstate_t *ztest_shared_callstate; 386 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c]) 387 388 ztest_func_t ztest_dmu_read_write; 389 ztest_func_t ztest_dmu_write_parallel; 390 ztest_func_t ztest_dmu_object_alloc_free; 391 ztest_func_t ztest_dmu_object_next_chunk; 392 ztest_func_t ztest_dmu_commit_callbacks; 393 ztest_func_t ztest_zap; 394 ztest_func_t ztest_zap_parallel; 395 ztest_func_t ztest_zil_commit; 396 ztest_func_t ztest_zil_remount; 397 ztest_func_t ztest_dmu_read_write_zcopy; 398 ztest_func_t ztest_dmu_objset_create_destroy; 399 ztest_func_t ztest_dmu_prealloc; 400 ztest_func_t ztest_fzap; 401 ztest_func_t ztest_dmu_snapshot_create_destroy; 402 ztest_func_t ztest_dsl_prop_get_set; 403 ztest_func_t ztest_spa_prop_get_set; 404 ztest_func_t ztest_spa_create_destroy; 405 ztest_func_t ztest_fault_inject; 406 ztest_func_t ztest_dmu_snapshot_hold; 407 ztest_func_t ztest_mmp_enable_disable; 408 ztest_func_t ztest_scrub; 409 ztest_func_t ztest_dsl_dataset_promote_busy; 410 ztest_func_t ztest_vdev_attach_detach; 411 ztest_func_t ztest_vdev_LUN_growth; 412 ztest_func_t ztest_vdev_add_remove; 413 ztest_func_t ztest_vdev_class_add; 414 ztest_func_t ztest_vdev_aux_add_remove; 415 ztest_func_t ztest_split_pool; 416 ztest_func_t ztest_reguid; 417 ztest_func_t ztest_spa_upgrade; 418 ztest_func_t ztest_device_removal; 419 ztest_func_t ztest_spa_checkpoint_create_discard; 420 ztest_func_t ztest_initialize; 421 ztest_func_t ztest_trim; 422 ztest_func_t ztest_blake3; 423 ztest_func_t ztest_fletcher; 424 ztest_func_t ztest_fletcher_incr; 425 ztest_func_t ztest_verify_dnode_bt; 426 427 static uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ 428 static uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ 429 static uint64_t zopt_often = 1ULL * NANOSEC; /* every second */ 430 static uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */ 431 static uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */ 432 433 #define ZTI_INIT(func, iters, interval) \ 434 { .zi_func = (func), \ 435 .zi_iters = (iters), \ 436 .zi_interval = (interval), \ 437 .zi_funcname = # func } 438 439 static ztest_info_t ztest_info[] = { 440 ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always), 441 ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always), 442 ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always), 443 ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes), 444 ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always), 445 ZTI_INIT(ztest_zap, 30, &zopt_always), 446 ZTI_INIT(ztest_zap_parallel, 100, &zopt_always), 447 ZTI_INIT(ztest_split_pool, 1, &zopt_sometimes), 448 ZTI_INIT(ztest_zil_commit, 1, &zopt_incessant), 449 ZTI_INIT(ztest_zil_remount, 1, &zopt_sometimes), 450 ZTI_INIT(ztest_dmu_read_write_zcopy, 1, &zopt_often), 451 ZTI_INIT(ztest_dmu_objset_create_destroy, 1, &zopt_often), 452 ZTI_INIT(ztest_dsl_prop_get_set, 1, &zopt_often), 453 ZTI_INIT(ztest_spa_prop_get_set, 1, &zopt_sometimes), 454 #if 0 455 ZTI_INIT(ztest_dmu_prealloc, 1, &zopt_sometimes), 456 #endif 457 ZTI_INIT(ztest_fzap, 1, &zopt_sometimes), 458 ZTI_INIT(ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes), 459 ZTI_INIT(ztest_spa_create_destroy, 1, &zopt_sometimes), 460 ZTI_INIT(ztest_fault_inject, 1, &zopt_sometimes), 461 ZTI_INIT(ztest_dmu_snapshot_hold, 1, &zopt_sometimes), 462 ZTI_INIT(ztest_mmp_enable_disable, 1, &zopt_sometimes), 463 ZTI_INIT(ztest_reguid, 1, &zopt_rarely), 464 ZTI_INIT(ztest_scrub, 1, &zopt_rarely), 465 ZTI_INIT(ztest_spa_upgrade, 1, &zopt_rarely), 466 ZTI_INIT(ztest_dsl_dataset_promote_busy, 1, &zopt_rarely), 467 ZTI_INIT(ztest_vdev_attach_detach, 1, &zopt_sometimes), 468 ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely), 469 ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime), 470 ZTI_INIT(ztest_vdev_class_add, 1, &ztest_opts.zo_vdevtime), 471 ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime), 472 ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes), 473 ZTI_INIT(ztest_spa_checkpoint_create_discard, 1, &zopt_rarely), 474 ZTI_INIT(ztest_initialize, 1, &zopt_sometimes), 475 ZTI_INIT(ztest_trim, 1, &zopt_sometimes), 476 ZTI_INIT(ztest_blake3, 1, &zopt_rarely), 477 ZTI_INIT(ztest_fletcher, 1, &zopt_rarely), 478 ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely), 479 ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes), 480 }; 481 482 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) 483 484 /* 485 * The following struct is used to hold a list of uncalled commit callbacks. 486 * The callbacks are ordered by txg number. 487 */ 488 typedef struct ztest_cb_list { 489 kmutex_t zcl_callbacks_lock; 490 list_t zcl_callbacks; 491 } ztest_cb_list_t; 492 493 /* 494 * Stuff we need to share writably between parent and child. 495 */ 496 typedef struct ztest_shared { 497 boolean_t zs_do_init; 498 hrtime_t zs_proc_start; 499 hrtime_t zs_proc_stop; 500 hrtime_t zs_thread_start; 501 hrtime_t zs_thread_stop; 502 hrtime_t zs_thread_kill; 503 uint64_t zs_enospc_count; 504 uint64_t zs_vdev_next_leaf; 505 uint64_t zs_vdev_aux; 506 uint64_t zs_alloc; 507 uint64_t zs_space; 508 uint64_t zs_splits; 509 uint64_t zs_mirrors; 510 uint64_t zs_metaslab_sz; 511 uint64_t zs_metaslab_df_alloc_threshold; 512 uint64_t zs_guid; 513 } ztest_shared_t; 514 515 #define ID_PARALLEL -1ULL 516 517 static char ztest_dev_template[] = "%s/%s.%llua"; 518 static char ztest_aux_template[] = "%s/%s.%s.%llu"; 519 static ztest_shared_t *ztest_shared; 520 521 static spa_t *ztest_spa = NULL; 522 static ztest_ds_t *ztest_ds; 523 524 static kmutex_t ztest_vdev_lock; 525 static boolean_t ztest_device_removal_active = B_FALSE; 526 static boolean_t ztest_pool_scrubbed = B_FALSE; 527 static kmutex_t ztest_checkpoint_lock; 528 529 /* 530 * The ztest_name_lock protects the pool and dataset namespace used by 531 * the individual tests. To modify the namespace, consumers must grab 532 * this lock as writer. Grabbing the lock as reader will ensure that the 533 * namespace does not change while the lock is held. 534 */ 535 static pthread_rwlock_t ztest_name_lock; 536 537 static boolean_t ztest_dump_core = B_TRUE; 538 static boolean_t ztest_exiting; 539 540 /* Global commit callback list */ 541 static ztest_cb_list_t zcl; 542 /* Commit cb delay */ 543 static uint64_t zc_min_txg_delay = UINT64_MAX; 544 static int zc_cb_counter = 0; 545 546 /* 547 * Minimum number of commit callbacks that need to be registered for us to check 548 * whether the minimum txg delay is acceptable. 549 */ 550 #define ZTEST_COMMIT_CB_MIN_REG 100 551 552 /* 553 * If a number of txgs equal to this threshold have been created after a commit 554 * callback has been registered but not called, then we assume there is an 555 * implementation bug. 556 */ 557 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000) 558 559 enum ztest_object { 560 ZTEST_META_DNODE = 0, 561 ZTEST_DIROBJ, 562 ZTEST_OBJECTS 563 }; 564 565 static __attribute__((noreturn)) void usage(boolean_t requested); 566 static int ztest_scrub_impl(spa_t *spa); 567 568 /* 569 * These libumem hooks provide a reasonable set of defaults for the allocator's 570 * debugging facilities. 571 */ 572 const char * 573 _umem_debug_init(void) 574 { 575 return ("default,verbose"); /* $UMEM_DEBUG setting */ 576 } 577 578 const char * 579 _umem_logging_init(void) 580 { 581 return ("fail,contents"); /* $UMEM_LOGGING setting */ 582 } 583 584 static void 585 dump_debug_buffer(void) 586 { 587 ssize_t ret __attribute__((unused)); 588 589 if (!ztest_opts.zo_dump_dbgmsg) 590 return; 591 592 /* 593 * We use write() instead of printf() so that this function 594 * is safe to call from a signal handler. 595 */ 596 ret = write(STDOUT_FILENO, "\n", 1); 597 zfs_dbgmsg_print("ztest"); 598 } 599 600 #define BACKTRACE_SZ 100 601 602 static void sig_handler(int signo) 603 { 604 struct sigaction action; 605 #if (__GLIBC__ && !__UCLIBC__) /* backtrace() is a GNU extension */ 606 int nptrs; 607 void *buffer[BACKTRACE_SZ]; 608 609 nptrs = backtrace(buffer, BACKTRACE_SZ); 610 backtrace_symbols_fd(buffer, nptrs, STDERR_FILENO); 611 #endif 612 dump_debug_buffer(); 613 614 /* 615 * Restore default action and re-raise signal so SIGSEGV and 616 * SIGABRT can trigger a core dump. 617 */ 618 action.sa_handler = SIG_DFL; 619 sigemptyset(&action.sa_mask); 620 action.sa_flags = 0; 621 (void) sigaction(signo, &action, NULL); 622 raise(signo); 623 } 624 625 #define FATAL_MSG_SZ 1024 626 627 static const char *fatal_msg; 628 629 static __attribute__((format(printf, 2, 3))) __attribute__((noreturn)) void 630 fatal(int do_perror, const char *message, ...) 631 { 632 va_list args; 633 int save_errno = errno; 634 char *buf; 635 636 (void) fflush(stdout); 637 buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL); 638 if (buf == NULL) 639 goto out; 640 641 va_start(args, message); 642 (void) sprintf(buf, "ztest: "); 643 /* LINTED */ 644 (void) vsprintf(buf + strlen(buf), message, args); 645 va_end(args); 646 if (do_perror) { 647 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), 648 ": %s", strerror(save_errno)); 649 } 650 (void) fprintf(stderr, "%s\n", buf); 651 fatal_msg = buf; /* to ease debugging */ 652 653 out: 654 if (ztest_dump_core) 655 abort(); 656 else 657 dump_debug_buffer(); 658 659 exit(3); 660 } 661 662 static int 663 str2shift(const char *buf) 664 { 665 const char *ends = "BKMGTPEZ"; 666 int i; 667 668 if (buf[0] == '\0') 669 return (0); 670 for (i = 0; i < strlen(ends); i++) { 671 if (toupper(buf[0]) == ends[i]) 672 break; 673 } 674 if (i == strlen(ends)) { 675 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", 676 buf); 677 usage(B_FALSE); 678 } 679 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { 680 return (10*i); 681 } 682 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); 683 usage(B_FALSE); 684 } 685 686 static uint64_t 687 nicenumtoull(const char *buf) 688 { 689 char *end; 690 uint64_t val; 691 692 val = strtoull(buf, &end, 0); 693 if (end == buf) { 694 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); 695 usage(B_FALSE); 696 } else if (end[0] == '.') { 697 double fval = strtod(buf, &end); 698 fval *= pow(2, str2shift(end)); 699 /* 700 * UINT64_MAX is not exactly representable as a double. 701 * The closest representation is UINT64_MAX + 1, so we 702 * use a >= comparison instead of > for the bounds check. 703 */ 704 if (fval >= (double)UINT64_MAX) { 705 (void) fprintf(stderr, "ztest: value too large: %s\n", 706 buf); 707 usage(B_FALSE); 708 } 709 val = (uint64_t)fval; 710 } else { 711 int shift = str2shift(end); 712 if (shift >= 64 || (val << shift) >> shift != val) { 713 (void) fprintf(stderr, "ztest: value too large: %s\n", 714 buf); 715 usage(B_FALSE); 716 } 717 val <<= shift; 718 } 719 return (val); 720 } 721 722 typedef struct ztest_option { 723 const char short_opt; 724 const char *long_opt; 725 const char *long_opt_param; 726 const char *comment; 727 unsigned int default_int; 728 const char *default_str; 729 } ztest_option_t; 730 731 /* 732 * The following option_table is used for generating the usage info as well as 733 * the long and short option information for calling getopt_long(). 734 */ 735 static ztest_option_t option_table[] = { 736 { 'v', "vdevs", "INTEGER", "Number of vdevs", DEFAULT_VDEV_COUNT, 737 NULL}, 738 { 's', "vdev-size", "INTEGER", "Size of each vdev", 739 NO_DEFAULT, DEFAULT_VDEV_SIZE_STR}, 740 { 'a', "alignment-shift", "INTEGER", 741 "Alignment shift; use 0 for random", DEFAULT_ASHIFT, NULL}, 742 { 'm', "mirror-copies", "INTEGER", "Number of mirror copies", 743 DEFAULT_MIRRORS, NULL}, 744 { 'r', "raid-disks", "INTEGER", "Number of raidz/draid disks", 745 DEFAULT_RAID_CHILDREN, NULL}, 746 { 'R', "raid-parity", "INTEGER", "Raid parity", 747 DEFAULT_RAID_PARITY, NULL}, 748 { 'K', "raid-kind", "raidz|draid|random", "Raid kind", 749 NO_DEFAULT, "random"}, 750 { 'D', "draid-data", "INTEGER", "Number of draid data drives", 751 DEFAULT_DRAID_DATA, NULL}, 752 { 'S', "draid-spares", "INTEGER", "Number of draid spares", 753 DEFAULT_DRAID_SPARES, NULL}, 754 { 'd', "datasets", "INTEGER", "Number of datasets", 755 DEFAULT_DATASETS_COUNT, NULL}, 756 { 't', "threads", "INTEGER", "Number of ztest threads", 757 DEFAULT_THREADS, NULL}, 758 { 'g', "gang-block-threshold", "INTEGER", 759 "Metaslab gang block threshold", 760 NO_DEFAULT, DEFAULT_FORCE_GANGING_STR}, 761 { 'i', "init-count", "INTEGER", "Number of times to initialize pool", 762 DEFAULT_INITS, NULL}, 763 { 'k', "kill-percentage", "INTEGER", "Kill percentage", 764 NO_DEFAULT, DEFAULT_KILLRATE_STR}, 765 { 'p', "pool-name", "STRING", "Pool name", 766 NO_DEFAULT, DEFAULT_POOL}, 767 { 'f', "vdev-file-directory", "PATH", "File directory for vdev files", 768 NO_DEFAULT, DEFAULT_VDEV_DIR}, 769 { 'M', "multi-host", NULL, 770 "Multi-host; simulate pool imported on remote host", 771 NO_DEFAULT, NULL}, 772 { 'E', "use-existing-pool", NULL, 773 "Use existing pool instead of creating new one", NO_DEFAULT, NULL}, 774 { 'T', "run-time", "INTEGER", "Total run time", 775 NO_DEFAULT, DEFAULT_RUN_TIME_STR}, 776 { 'P', "pass-time", "INTEGER", "Time per pass", 777 NO_DEFAULT, DEFAULT_PASS_TIME_STR}, 778 { 'F', "freeze-loops", "INTEGER", "Max loops in spa_freeze()", 779 DEFAULT_MAX_LOOPS, NULL}, 780 { 'B', "alt-ztest", "PATH", "Alternate ztest path", 781 NO_DEFAULT, NULL}, 782 { 'C', "vdev-class-state", "on|off|random", "vdev class state", 783 NO_DEFAULT, "random"}, 784 { 'o', "option", "\"OPTION=INTEGER\"", 785 "Set global variable to an unsigned 32-bit integer value", 786 NO_DEFAULT, NULL}, 787 { 'G', "dump-debug-msg", NULL, 788 "Dump zfs_dbgmsg buffer before exiting due to an error", 789 NO_DEFAULT, NULL}, 790 { 'V', "verbose", NULL, 791 "Verbose (use multiple times for ever more verbosity)", 792 NO_DEFAULT, NULL}, 793 { 'h', "help", NULL, "Show this help", 794 NO_DEFAULT, NULL}, 795 {0, 0, 0, 0, 0, 0} 796 }; 797 798 static struct option *long_opts = NULL; 799 static char *short_opts = NULL; 800 801 static void 802 init_options(void) 803 { 804 ASSERT3P(long_opts, ==, NULL); 805 ASSERT3P(short_opts, ==, NULL); 806 807 int count = sizeof (option_table) / sizeof (option_table[0]); 808 long_opts = umem_alloc(sizeof (struct option) * count, UMEM_NOFAIL); 809 810 short_opts = umem_alloc(sizeof (char) * 2 * count, UMEM_NOFAIL); 811 int short_opt_index = 0; 812 813 for (int i = 0; i < count; i++) { 814 long_opts[i].val = option_table[i].short_opt; 815 long_opts[i].name = option_table[i].long_opt; 816 long_opts[i].has_arg = option_table[i].long_opt_param != NULL 817 ? required_argument : no_argument; 818 long_opts[i].flag = NULL; 819 short_opts[short_opt_index++] = option_table[i].short_opt; 820 if (option_table[i].long_opt_param != NULL) { 821 short_opts[short_opt_index++] = ':'; 822 } 823 } 824 } 825 826 static void 827 fini_options(void) 828 { 829 int count = sizeof (option_table) / sizeof (option_table[0]); 830 831 umem_free(long_opts, sizeof (struct option) * count); 832 umem_free(short_opts, sizeof (char) * 2 * count); 833 834 long_opts = NULL; 835 short_opts = NULL; 836 } 837 838 static __attribute__((noreturn)) void 839 usage(boolean_t requested) 840 { 841 char option[80]; 842 FILE *fp = requested ? stdout : stderr; 843 844 (void) fprintf(fp, "Usage: %s [OPTIONS...]\n", DEFAULT_POOL); 845 for (int i = 0; option_table[i].short_opt != 0; i++) { 846 if (option_table[i].long_opt_param != NULL) { 847 (void) sprintf(option, " -%c --%s=%s", 848 option_table[i].short_opt, 849 option_table[i].long_opt, 850 option_table[i].long_opt_param); 851 } else { 852 (void) sprintf(option, " -%c --%s", 853 option_table[i].short_opt, 854 option_table[i].long_opt); 855 } 856 (void) fprintf(fp, " %-40s%s", option, 857 option_table[i].comment); 858 859 if (option_table[i].long_opt_param != NULL) { 860 if (option_table[i].default_str != NULL) { 861 (void) fprintf(fp, " (default: %s)", 862 option_table[i].default_str); 863 } else if (option_table[i].default_int != NO_DEFAULT) { 864 (void) fprintf(fp, " (default: %u)", 865 option_table[i].default_int); 866 } 867 } 868 (void) fprintf(fp, "\n"); 869 } 870 exit(requested ? 0 : 1); 871 } 872 873 static uint64_t 874 ztest_random(uint64_t range) 875 { 876 uint64_t r; 877 878 ASSERT3S(ztest_fd_rand, >=, 0); 879 880 if (range == 0) 881 return (0); 882 883 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r)) 884 fatal(B_TRUE, "short read from /dev/urandom"); 885 886 return (r % range); 887 } 888 889 static void 890 ztest_parse_name_value(const char *input, ztest_shared_opts_t *zo) 891 { 892 char name[32]; 893 char *value; 894 int state = ZTEST_VDEV_CLASS_RND; 895 896 (void) strlcpy(name, input, sizeof (name)); 897 898 value = strchr(name, '='); 899 if (value == NULL) { 900 (void) fprintf(stderr, "missing value in property=value " 901 "'-C' argument (%s)\n", input); 902 usage(B_FALSE); 903 } 904 *(value) = '\0'; 905 value++; 906 907 if (strcmp(value, "on") == 0) { 908 state = ZTEST_VDEV_CLASS_ON; 909 } else if (strcmp(value, "off") == 0) { 910 state = ZTEST_VDEV_CLASS_OFF; 911 } else if (strcmp(value, "random") == 0) { 912 state = ZTEST_VDEV_CLASS_RND; 913 } else { 914 (void) fprintf(stderr, "invalid property value '%s'\n", value); 915 usage(B_FALSE); 916 } 917 918 if (strcmp(name, "special") == 0) { 919 zo->zo_special_vdevs = state; 920 } else { 921 (void) fprintf(stderr, "invalid property name '%s'\n", name); 922 usage(B_FALSE); 923 } 924 if (zo->zo_verbose >= 3) 925 (void) printf("%s vdev state is '%s'\n", name, value); 926 } 927 928 static void 929 process_options(int argc, char **argv) 930 { 931 char *path; 932 ztest_shared_opts_t *zo = &ztest_opts; 933 934 int opt; 935 uint64_t value; 936 const char *raid_kind = "random"; 937 938 memcpy(zo, &ztest_opts_defaults, sizeof (*zo)); 939 940 init_options(); 941 942 while ((opt = getopt_long(argc, argv, short_opts, long_opts, 943 NULL)) != EOF) { 944 value = 0; 945 switch (opt) { 946 case 'v': 947 case 's': 948 case 'a': 949 case 'm': 950 case 'r': 951 case 'R': 952 case 'D': 953 case 'S': 954 case 'd': 955 case 't': 956 case 'g': 957 case 'i': 958 case 'k': 959 case 'T': 960 case 'P': 961 case 'F': 962 value = nicenumtoull(optarg); 963 } 964 switch (opt) { 965 case 'v': 966 zo->zo_vdevs = value; 967 break; 968 case 's': 969 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value); 970 break; 971 case 'a': 972 zo->zo_ashift = value; 973 break; 974 case 'm': 975 zo->zo_mirrors = value; 976 break; 977 case 'r': 978 zo->zo_raid_children = MAX(1, value); 979 break; 980 case 'R': 981 zo->zo_raid_parity = MIN(MAX(value, 1), 3); 982 break; 983 case 'K': 984 raid_kind = optarg; 985 break; 986 case 'D': 987 zo->zo_draid_data = MAX(1, value); 988 break; 989 case 'S': 990 zo->zo_draid_spares = MAX(1, value); 991 break; 992 case 'd': 993 zo->zo_datasets = MAX(1, value); 994 break; 995 case 't': 996 zo->zo_threads = MAX(1, value); 997 break; 998 case 'g': 999 zo->zo_metaslab_force_ganging = 1000 MAX(SPA_MINBLOCKSIZE << 1, value); 1001 break; 1002 case 'i': 1003 zo->zo_init = value; 1004 break; 1005 case 'k': 1006 zo->zo_killrate = value; 1007 break; 1008 case 'p': 1009 (void) strlcpy(zo->zo_pool, optarg, 1010 sizeof (zo->zo_pool)); 1011 break; 1012 case 'f': 1013 path = realpath(optarg, NULL); 1014 if (path == NULL) { 1015 (void) fprintf(stderr, "error: %s: %s\n", 1016 optarg, strerror(errno)); 1017 usage(B_FALSE); 1018 } else { 1019 (void) strlcpy(zo->zo_dir, path, 1020 sizeof (zo->zo_dir)); 1021 free(path); 1022 } 1023 break; 1024 case 'M': 1025 zo->zo_mmp_test = 1; 1026 break; 1027 case 'V': 1028 zo->zo_verbose++; 1029 break; 1030 case 'E': 1031 zo->zo_init = 0; 1032 break; 1033 case 'T': 1034 zo->zo_time = value; 1035 break; 1036 case 'P': 1037 zo->zo_passtime = MAX(1, value); 1038 break; 1039 case 'F': 1040 zo->zo_maxloops = MAX(1, value); 1041 break; 1042 case 'B': 1043 (void) strlcpy(zo->zo_alt_ztest, optarg, 1044 sizeof (zo->zo_alt_ztest)); 1045 break; 1046 case 'C': 1047 ztest_parse_name_value(optarg, zo); 1048 break; 1049 case 'o': 1050 if (zo->zo_gvars_count >= ZO_GVARS_MAX_COUNT) { 1051 (void) fprintf(stderr, 1052 "max global var count (%zu) exceeded\n", 1053 ZO_GVARS_MAX_COUNT); 1054 usage(B_FALSE); 1055 } 1056 char *v = zo->zo_gvars[zo->zo_gvars_count]; 1057 if (strlcpy(v, optarg, ZO_GVARS_MAX_ARGLEN) >= 1058 ZO_GVARS_MAX_ARGLEN) { 1059 (void) fprintf(stderr, 1060 "global var option '%s' is too long\n", 1061 optarg); 1062 usage(B_FALSE); 1063 } 1064 zo->zo_gvars_count++; 1065 break; 1066 case 'G': 1067 zo->zo_dump_dbgmsg = 1; 1068 break; 1069 case 'h': 1070 usage(B_TRUE); 1071 break; 1072 case '?': 1073 default: 1074 usage(B_FALSE); 1075 break; 1076 } 1077 } 1078 1079 fini_options(); 1080 1081 /* When raid choice is 'random' add a draid pool 50% of the time */ 1082 if (strcmp(raid_kind, "random") == 0) { 1083 raid_kind = (ztest_random(2) == 0) ? "draid" : "raidz"; 1084 1085 if (ztest_opts.zo_verbose >= 3) 1086 (void) printf("choosing RAID type '%s'\n", raid_kind); 1087 } 1088 1089 if (strcmp(raid_kind, "draid") == 0) { 1090 uint64_t min_devsize; 1091 1092 /* With fewer disk use 256M, otherwise 128M is OK */ 1093 min_devsize = (ztest_opts.zo_raid_children < 16) ? 1094 (256ULL << 20) : (128ULL << 20); 1095 1096 /* No top-level mirrors with dRAID for now */ 1097 zo->zo_mirrors = 0; 1098 1099 /* Use more appropriate defaults for dRAID */ 1100 if (zo->zo_vdevs == ztest_opts_defaults.zo_vdevs) 1101 zo->zo_vdevs = 1; 1102 if (zo->zo_raid_children == 1103 ztest_opts_defaults.zo_raid_children) 1104 zo->zo_raid_children = 16; 1105 if (zo->zo_ashift < 12) 1106 zo->zo_ashift = 12; 1107 if (zo->zo_vdev_size < min_devsize) 1108 zo->zo_vdev_size = min_devsize; 1109 1110 if (zo->zo_draid_data + zo->zo_raid_parity > 1111 zo->zo_raid_children - zo->zo_draid_spares) { 1112 (void) fprintf(stderr, "error: too few draid " 1113 "children (%d) for stripe width (%d)\n", 1114 zo->zo_raid_children, 1115 zo->zo_draid_data + zo->zo_raid_parity); 1116 usage(B_FALSE); 1117 } 1118 1119 (void) strlcpy(zo->zo_raid_type, VDEV_TYPE_DRAID, 1120 sizeof (zo->zo_raid_type)); 1121 1122 } else /* using raidz */ { 1123 ASSERT0(strcmp(raid_kind, "raidz")); 1124 1125 zo->zo_raid_parity = MIN(zo->zo_raid_parity, 1126 zo->zo_raid_children - 1); 1127 } 1128 1129 zo->zo_vdevtime = 1130 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs : 1131 UINT64_MAX >> 2); 1132 1133 if (*zo->zo_alt_ztest) { 1134 const char *invalid_what = "ztest"; 1135 char *val = zo->zo_alt_ztest; 1136 if (0 != access(val, X_OK) || 1137 (strrchr(val, '/') == NULL && (errno == EINVAL))) 1138 goto invalid; 1139 1140 int dirlen = strrchr(val, '/') - val; 1141 strlcpy(zo->zo_alt_libpath, val, 1142 MIN(sizeof (zo->zo_alt_libpath), dirlen + 1)); 1143 invalid_what = "library path", val = zo->zo_alt_libpath; 1144 if (strrchr(val, '/') == NULL && (errno == EINVAL)) 1145 goto invalid; 1146 *strrchr(val, '/') = '\0'; 1147 strlcat(val, "/lib", sizeof (zo->zo_alt_libpath)); 1148 1149 if (0 != access(zo->zo_alt_libpath, X_OK)) 1150 goto invalid; 1151 return; 1152 1153 invalid: 1154 ztest_dump_core = B_FALSE; 1155 fatal(B_TRUE, "invalid alternate %s %s", invalid_what, val); 1156 } 1157 } 1158 1159 static void 1160 ztest_kill(ztest_shared_t *zs) 1161 { 1162 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa)); 1163 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa)); 1164 1165 /* 1166 * Before we kill ourselves, make sure that the config is updated. 1167 * See comment above spa_write_cachefile(). 1168 */ 1169 mutex_enter(&spa_namespace_lock); 1170 spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE, B_FALSE); 1171 mutex_exit(&spa_namespace_lock); 1172 1173 (void) raise(SIGKILL); 1174 } 1175 1176 static void 1177 ztest_record_enospc(const char *s) 1178 { 1179 (void) s; 1180 ztest_shared->zs_enospc_count++; 1181 } 1182 1183 static uint64_t 1184 ztest_get_ashift(void) 1185 { 1186 if (ztest_opts.zo_ashift == 0) 1187 return (SPA_MINBLOCKSHIFT + ztest_random(5)); 1188 return (ztest_opts.zo_ashift); 1189 } 1190 1191 static boolean_t 1192 ztest_is_draid_spare(const char *name) 1193 { 1194 uint64_t spare_id = 0, parity = 0, vdev_id = 0; 1195 1196 if (sscanf(name, VDEV_TYPE_DRAID "%"PRIu64"-%"PRIu64"-%"PRIu64"", 1197 &parity, &vdev_id, &spare_id) == 3) { 1198 return (B_TRUE); 1199 } 1200 1201 return (B_FALSE); 1202 } 1203 1204 static nvlist_t * 1205 make_vdev_file(const char *path, const char *aux, const char *pool, 1206 size_t size, uint64_t ashift) 1207 { 1208 char *pathbuf = NULL; 1209 uint64_t vdev; 1210 nvlist_t *file; 1211 boolean_t draid_spare = B_FALSE; 1212 1213 1214 if (ashift == 0) 1215 ashift = ztest_get_ashift(); 1216 1217 if (path == NULL) { 1218 pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 1219 path = pathbuf; 1220 1221 if (aux != NULL) { 1222 vdev = ztest_shared->zs_vdev_aux; 1223 (void) snprintf(pathbuf, MAXPATHLEN, 1224 ztest_aux_template, ztest_opts.zo_dir, 1225 pool == NULL ? ztest_opts.zo_pool : pool, 1226 aux, vdev); 1227 } else { 1228 vdev = ztest_shared->zs_vdev_next_leaf++; 1229 (void) snprintf(pathbuf, MAXPATHLEN, 1230 ztest_dev_template, ztest_opts.zo_dir, 1231 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 1232 } 1233 } else { 1234 draid_spare = ztest_is_draid_spare(path); 1235 } 1236 1237 if (size != 0 && !draid_spare) { 1238 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); 1239 if (fd == -1) 1240 fatal(B_TRUE, "can't open %s", path); 1241 if (ftruncate(fd, size) != 0) 1242 fatal(B_TRUE, "can't ftruncate %s", path); 1243 (void) close(fd); 1244 } 1245 1246 file = fnvlist_alloc(); 1247 fnvlist_add_string(file, ZPOOL_CONFIG_TYPE, 1248 draid_spare ? VDEV_TYPE_DRAID_SPARE : VDEV_TYPE_FILE); 1249 fnvlist_add_string(file, ZPOOL_CONFIG_PATH, path); 1250 fnvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift); 1251 umem_free(pathbuf, MAXPATHLEN); 1252 1253 return (file); 1254 } 1255 1256 static nvlist_t * 1257 make_vdev_raid(const char *path, const char *aux, const char *pool, size_t size, 1258 uint64_t ashift, int r) 1259 { 1260 nvlist_t *raid, **child; 1261 int c; 1262 1263 if (r < 2) 1264 return (make_vdev_file(path, aux, pool, size, ashift)); 1265 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); 1266 1267 for (c = 0; c < r; c++) 1268 child[c] = make_vdev_file(path, aux, pool, size, ashift); 1269 1270 raid = fnvlist_alloc(); 1271 fnvlist_add_string(raid, ZPOOL_CONFIG_TYPE, 1272 ztest_opts.zo_raid_type); 1273 fnvlist_add_uint64(raid, ZPOOL_CONFIG_NPARITY, 1274 ztest_opts.zo_raid_parity); 1275 fnvlist_add_nvlist_array(raid, ZPOOL_CONFIG_CHILDREN, 1276 (const nvlist_t **)child, r); 1277 1278 if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) { 1279 uint64_t ndata = ztest_opts.zo_draid_data; 1280 uint64_t nparity = ztest_opts.zo_raid_parity; 1281 uint64_t nspares = ztest_opts.zo_draid_spares; 1282 uint64_t children = ztest_opts.zo_raid_children; 1283 uint64_t ngroups = 1; 1284 1285 /* 1286 * Calculate the minimum number of groups required to fill a 1287 * slice. This is the LCM of the stripe width (data + parity) 1288 * and the number of data drives (children - spares). 1289 */ 1290 while (ngroups * (ndata + nparity) % (children - nspares) != 0) 1291 ngroups++; 1292 1293 /* Store the basic dRAID configuration. */ 1294 fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NDATA, ndata); 1295 fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NSPARES, nspares); 1296 fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups); 1297 } 1298 1299 for (c = 0; c < r; c++) 1300 fnvlist_free(child[c]); 1301 1302 umem_free(child, r * sizeof (nvlist_t *)); 1303 1304 return (raid); 1305 } 1306 1307 static nvlist_t * 1308 make_vdev_mirror(const char *path, const char *aux, const char *pool, 1309 size_t size, uint64_t ashift, int r, int m) 1310 { 1311 nvlist_t *mirror, **child; 1312 int c; 1313 1314 if (m < 1) 1315 return (make_vdev_raid(path, aux, pool, size, ashift, r)); 1316 1317 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); 1318 1319 for (c = 0; c < m; c++) 1320 child[c] = make_vdev_raid(path, aux, pool, size, ashift, r); 1321 1322 mirror = fnvlist_alloc(); 1323 fnvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MIRROR); 1324 fnvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, 1325 (const nvlist_t **)child, m); 1326 1327 for (c = 0; c < m; c++) 1328 fnvlist_free(child[c]); 1329 1330 umem_free(child, m * sizeof (nvlist_t *)); 1331 1332 return (mirror); 1333 } 1334 1335 static nvlist_t * 1336 make_vdev_root(const char *path, const char *aux, const char *pool, size_t size, 1337 uint64_t ashift, const char *class, int r, int m, int t) 1338 { 1339 nvlist_t *root, **child; 1340 int c; 1341 boolean_t log; 1342 1343 ASSERT3S(t, >, 0); 1344 1345 log = (class != NULL && strcmp(class, "log") == 0); 1346 1347 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); 1348 1349 for (c = 0; c < t; c++) { 1350 child[c] = make_vdev_mirror(path, aux, pool, size, ashift, 1351 r, m); 1352 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, log); 1353 1354 if (class != NULL && class[0] != '\0') { 1355 ASSERT(m > 1 || log); /* expecting a mirror */ 1356 fnvlist_add_string(child[c], 1357 ZPOOL_CONFIG_ALLOCATION_BIAS, class); 1358 } 1359 } 1360 1361 root = fnvlist_alloc(); 1362 fnvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT); 1363 fnvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, 1364 (const nvlist_t **)child, t); 1365 1366 for (c = 0; c < t; c++) 1367 fnvlist_free(child[c]); 1368 1369 umem_free(child, t * sizeof (nvlist_t *)); 1370 1371 return (root); 1372 } 1373 1374 /* 1375 * Find a random spa version. Returns back a random spa version in the 1376 * range [initial_version, SPA_VERSION_FEATURES]. 1377 */ 1378 static uint64_t 1379 ztest_random_spa_version(uint64_t initial_version) 1380 { 1381 uint64_t version = initial_version; 1382 1383 if (version <= SPA_VERSION_BEFORE_FEATURES) { 1384 version = version + 1385 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1); 1386 } 1387 1388 if (version > SPA_VERSION_BEFORE_FEATURES) 1389 version = SPA_VERSION_FEATURES; 1390 1391 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 1392 return (version); 1393 } 1394 1395 static int 1396 ztest_random_blocksize(void) 1397 { 1398 ASSERT3U(ztest_spa->spa_max_ashift, !=, 0); 1399 1400 /* 1401 * Choose a block size >= the ashift. 1402 * If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks. 1403 */ 1404 int maxbs = SPA_OLD_MAXBLOCKSHIFT; 1405 if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE) 1406 maxbs = 20; 1407 uint64_t block_shift = 1408 ztest_random(maxbs - ztest_spa->spa_max_ashift + 1); 1409 return (1 << (SPA_MINBLOCKSHIFT + block_shift)); 1410 } 1411 1412 static int 1413 ztest_random_dnodesize(void) 1414 { 1415 int slots; 1416 int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT; 1417 1418 if (max_slots == DNODE_MIN_SLOTS) 1419 return (DNODE_MIN_SIZE); 1420 1421 /* 1422 * Weight the random distribution more heavily toward smaller 1423 * dnode sizes since that is more likely to reflect real-world 1424 * usage. 1425 */ 1426 ASSERT3U(max_slots, >, 4); 1427 switch (ztest_random(10)) { 1428 case 0: 1429 slots = 5 + ztest_random(max_slots - 4); 1430 break; 1431 case 1 ... 4: 1432 slots = 2 + ztest_random(3); 1433 break; 1434 default: 1435 slots = 1; 1436 break; 1437 } 1438 1439 return (slots << DNODE_SHIFT); 1440 } 1441 1442 static int 1443 ztest_random_ibshift(void) 1444 { 1445 return (DN_MIN_INDBLKSHIFT + 1446 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1)); 1447 } 1448 1449 static uint64_t 1450 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok) 1451 { 1452 uint64_t top; 1453 vdev_t *rvd = spa->spa_root_vdev; 1454 vdev_t *tvd; 1455 1456 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 1457 1458 do { 1459 top = ztest_random(rvd->vdev_children); 1460 tvd = rvd->vdev_child[top]; 1461 } while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) || 1462 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL); 1463 1464 return (top); 1465 } 1466 1467 static uint64_t 1468 ztest_random_dsl_prop(zfs_prop_t prop) 1469 { 1470 uint64_t value; 1471 1472 do { 1473 value = zfs_prop_random_value(prop, ztest_random(-1ULL)); 1474 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF); 1475 1476 return (value); 1477 } 1478 1479 static int 1480 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value, 1481 boolean_t inherit) 1482 { 1483 const char *propname = zfs_prop_to_name(prop); 1484 const char *valname; 1485 char *setpoint; 1486 uint64_t curval; 1487 int error; 1488 1489 error = dsl_prop_set_int(osname, propname, 1490 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value); 1491 1492 if (error == ENOSPC) { 1493 ztest_record_enospc(FTAG); 1494 return (error); 1495 } 1496 ASSERT0(error); 1497 1498 setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 1499 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint)); 1500 1501 if (ztest_opts.zo_verbose >= 6) { 1502 int err; 1503 1504 err = zfs_prop_index_to_string(prop, curval, &valname); 1505 if (err) 1506 (void) printf("%s %s = %llu at '%s'\n", osname, 1507 propname, (unsigned long long)curval, setpoint); 1508 else 1509 (void) printf("%s %s = %s at '%s'\n", 1510 osname, propname, valname, setpoint); 1511 } 1512 umem_free(setpoint, MAXPATHLEN); 1513 1514 return (error); 1515 } 1516 1517 static int 1518 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value) 1519 { 1520 spa_t *spa = ztest_spa; 1521 nvlist_t *props = NULL; 1522 int error; 1523 1524 props = fnvlist_alloc(); 1525 fnvlist_add_uint64(props, zpool_prop_to_name(prop), value); 1526 1527 error = spa_prop_set(spa, props); 1528 1529 fnvlist_free(props); 1530 1531 if (error == ENOSPC) { 1532 ztest_record_enospc(FTAG); 1533 return (error); 1534 } 1535 ASSERT0(error); 1536 1537 return (error); 1538 } 1539 1540 static int 1541 ztest_dmu_objset_own(const char *name, dmu_objset_type_t type, 1542 boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp) 1543 { 1544 int err; 1545 char *cp = NULL; 1546 char ddname[ZFS_MAX_DATASET_NAME_LEN]; 1547 1548 strlcpy(ddname, name, sizeof (ddname)); 1549 cp = strchr(ddname, '@'); 1550 if (cp != NULL) 1551 *cp = '\0'; 1552 1553 err = dmu_objset_own(name, type, readonly, decrypt, tag, osp); 1554 while (decrypt && err == EACCES) { 1555 dsl_crypto_params_t *dcp; 1556 nvlist_t *crypto_args = fnvlist_alloc(); 1557 1558 fnvlist_add_uint8_array(crypto_args, "wkeydata", 1559 (uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN); 1560 VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL, 1561 crypto_args, &dcp)); 1562 err = spa_keystore_load_wkey(ddname, dcp, B_FALSE); 1563 /* 1564 * Note: if there was an error loading, the wkey was not 1565 * consumed, and needs to be freed. 1566 */ 1567 dsl_crypto_params_free(dcp, (err != 0)); 1568 fnvlist_free(crypto_args); 1569 1570 if (err == EINVAL) { 1571 /* 1572 * We couldn't load a key for this dataset so try 1573 * the parent. This loop will eventually hit the 1574 * encryption root since ztest only makes clones 1575 * as children of their origin datasets. 1576 */ 1577 cp = strrchr(ddname, '/'); 1578 if (cp == NULL) 1579 return (err); 1580 1581 *cp = '\0'; 1582 err = EACCES; 1583 continue; 1584 } else if (err != 0) { 1585 break; 1586 } 1587 1588 err = dmu_objset_own(name, type, readonly, decrypt, tag, osp); 1589 break; 1590 } 1591 1592 return (err); 1593 } 1594 1595 static void 1596 ztest_rll_init(rll_t *rll) 1597 { 1598 rll->rll_writer = NULL; 1599 rll->rll_readers = 0; 1600 mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL); 1601 cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL); 1602 } 1603 1604 static void 1605 ztest_rll_destroy(rll_t *rll) 1606 { 1607 ASSERT3P(rll->rll_writer, ==, NULL); 1608 ASSERT0(rll->rll_readers); 1609 mutex_destroy(&rll->rll_lock); 1610 cv_destroy(&rll->rll_cv); 1611 } 1612 1613 static void 1614 ztest_rll_lock(rll_t *rll, rl_type_t type) 1615 { 1616 mutex_enter(&rll->rll_lock); 1617 1618 if (type == RL_READER) { 1619 while (rll->rll_writer != NULL) 1620 (void) cv_wait(&rll->rll_cv, &rll->rll_lock); 1621 rll->rll_readers++; 1622 } else { 1623 while (rll->rll_writer != NULL || rll->rll_readers) 1624 (void) cv_wait(&rll->rll_cv, &rll->rll_lock); 1625 rll->rll_writer = curthread; 1626 } 1627 1628 mutex_exit(&rll->rll_lock); 1629 } 1630 1631 static void 1632 ztest_rll_unlock(rll_t *rll) 1633 { 1634 mutex_enter(&rll->rll_lock); 1635 1636 if (rll->rll_writer) { 1637 ASSERT0(rll->rll_readers); 1638 rll->rll_writer = NULL; 1639 } else { 1640 ASSERT3S(rll->rll_readers, >, 0); 1641 ASSERT3P(rll->rll_writer, ==, NULL); 1642 rll->rll_readers--; 1643 } 1644 1645 if (rll->rll_writer == NULL && rll->rll_readers == 0) 1646 cv_broadcast(&rll->rll_cv); 1647 1648 mutex_exit(&rll->rll_lock); 1649 } 1650 1651 static void 1652 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) 1653 { 1654 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1655 1656 ztest_rll_lock(rll, type); 1657 } 1658 1659 static void 1660 ztest_object_unlock(ztest_ds_t *zd, uint64_t object) 1661 { 1662 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; 1663 1664 ztest_rll_unlock(rll); 1665 } 1666 1667 static rl_t * 1668 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, 1669 uint64_t size, rl_type_t type) 1670 { 1671 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); 1672 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; 1673 rl_t *rl; 1674 1675 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); 1676 rl->rl_object = object; 1677 rl->rl_offset = offset; 1678 rl->rl_size = size; 1679 rl->rl_lock = rll; 1680 1681 ztest_rll_lock(rll, type); 1682 1683 return (rl); 1684 } 1685 1686 static void 1687 ztest_range_unlock(rl_t *rl) 1688 { 1689 rll_t *rll = rl->rl_lock; 1690 1691 ztest_rll_unlock(rll); 1692 1693 umem_free(rl, sizeof (*rl)); 1694 } 1695 1696 static void 1697 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os) 1698 { 1699 zd->zd_os = os; 1700 zd->zd_zilog = dmu_objset_zil(os); 1701 zd->zd_shared = szd; 1702 dmu_objset_name(os, zd->zd_name); 1703 int l; 1704 1705 if (zd->zd_shared != NULL) 1706 zd->zd_shared->zd_seq = 0; 1707 1708 VERIFY0(pthread_rwlock_init(&zd->zd_zilog_lock, NULL)); 1709 mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL); 1710 1711 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1712 ztest_rll_init(&zd->zd_object_lock[l]); 1713 1714 for (l = 0; l < ZTEST_RANGE_LOCKS; l++) 1715 ztest_rll_init(&zd->zd_range_lock[l]); 1716 } 1717 1718 static void 1719 ztest_zd_fini(ztest_ds_t *zd) 1720 { 1721 int l; 1722 1723 mutex_destroy(&zd->zd_dirobj_lock); 1724 (void) pthread_rwlock_destroy(&zd->zd_zilog_lock); 1725 1726 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) 1727 ztest_rll_destroy(&zd->zd_object_lock[l]); 1728 1729 for (l = 0; l < ZTEST_RANGE_LOCKS; l++) 1730 ztest_rll_destroy(&zd->zd_range_lock[l]); 1731 } 1732 1733 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) 1734 1735 static uint64_t 1736 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) 1737 { 1738 uint64_t txg; 1739 int error; 1740 1741 /* 1742 * Attempt to assign tx to some transaction group. 1743 */ 1744 error = dmu_tx_assign(tx, txg_how); 1745 if (error) { 1746 if (error == ERESTART) { 1747 ASSERT3U(txg_how, ==, TXG_NOWAIT); 1748 dmu_tx_wait(tx); 1749 } else { 1750 ASSERT3U(error, ==, ENOSPC); 1751 ztest_record_enospc(tag); 1752 } 1753 dmu_tx_abort(tx); 1754 return (0); 1755 } 1756 txg = dmu_tx_get_txg(tx); 1757 ASSERT3U(txg, !=, 0); 1758 return (txg); 1759 } 1760 1761 static void 1762 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1763 uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg, 1764 uint64_t crtxg) 1765 { 1766 bt->bt_magic = BT_MAGIC; 1767 bt->bt_objset = dmu_objset_id(os); 1768 bt->bt_object = object; 1769 bt->bt_dnodesize = dnodesize; 1770 bt->bt_offset = offset; 1771 bt->bt_gen = gen; 1772 bt->bt_txg = txg; 1773 bt->bt_crtxg = crtxg; 1774 } 1775 1776 static void 1777 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, 1778 uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg, 1779 uint64_t crtxg) 1780 { 1781 ASSERT3U(bt->bt_magic, ==, BT_MAGIC); 1782 ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); 1783 ASSERT3U(bt->bt_object, ==, object); 1784 ASSERT3U(bt->bt_dnodesize, ==, dnodesize); 1785 ASSERT3U(bt->bt_offset, ==, offset); 1786 ASSERT3U(bt->bt_gen, <=, gen); 1787 ASSERT3U(bt->bt_txg, <=, txg); 1788 ASSERT3U(bt->bt_crtxg, ==, crtxg); 1789 } 1790 1791 static ztest_block_tag_t * 1792 ztest_bt_bonus(dmu_buf_t *db) 1793 { 1794 dmu_object_info_t doi; 1795 ztest_block_tag_t *bt; 1796 1797 dmu_object_info_from_db(db, &doi); 1798 ASSERT3U(doi.doi_bonus_size, <=, db->db_size); 1799 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); 1800 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); 1801 1802 return (bt); 1803 } 1804 1805 /* 1806 * Generate a token to fill up unused bonus buffer space. Try to make 1807 * it unique to the object, generation, and offset to verify that data 1808 * is not getting overwritten by data from other dnodes. 1809 */ 1810 #define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \ 1811 (((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset)) 1812 1813 /* 1814 * Fill up the unused bonus buffer region before the block tag with a 1815 * verifiable pattern. Filling the whole bonus area with non-zero data 1816 * helps ensure that all dnode traversal code properly skips the 1817 * interior regions of large dnodes. 1818 */ 1819 static void 1820 ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj, 1821 objset_t *os, uint64_t gen) 1822 { 1823 uint64_t *bonusp; 1824 1825 ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8)); 1826 1827 for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) { 1828 uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os), 1829 gen, bonusp - (uint64_t *)db->db_data); 1830 *bonusp = token; 1831 } 1832 } 1833 1834 /* 1835 * Verify that the unused area of a bonus buffer is filled with the 1836 * expected tokens. 1837 */ 1838 static void 1839 ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj, 1840 objset_t *os, uint64_t gen) 1841 { 1842 uint64_t *bonusp; 1843 1844 for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) { 1845 uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os), 1846 gen, bonusp - (uint64_t *)db->db_data); 1847 VERIFY3U(*bonusp, ==, token); 1848 } 1849 } 1850 1851 /* 1852 * ZIL logging ops 1853 */ 1854 1855 #define lrz_type lr_mode 1856 #define lrz_blocksize lr_uid 1857 #define lrz_ibshift lr_gid 1858 #define lrz_bonustype lr_rdev 1859 #define lrz_dnodesize lr_crtime[1] 1860 1861 static void 1862 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) 1863 { 1864 char *name = (void *)(lr + 1); /* name follows lr */ 1865 size_t namesize = strlen(name) + 1; 1866 itx_t *itx; 1867 1868 if (zil_replaying(zd->zd_zilog, tx)) 1869 return; 1870 1871 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); 1872 memcpy(&itx->itx_lr + 1, &lr->lr_common + 1, 1873 sizeof (*lr) + namesize - sizeof (lr_t)); 1874 1875 zil_itx_assign(zd->zd_zilog, itx, tx); 1876 } 1877 1878 static void 1879 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object) 1880 { 1881 char *name = (void *)(lr + 1); /* name follows lr */ 1882 size_t namesize = strlen(name) + 1; 1883 itx_t *itx; 1884 1885 if (zil_replaying(zd->zd_zilog, tx)) 1886 return; 1887 1888 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); 1889 memcpy(&itx->itx_lr + 1, &lr->lr_common + 1, 1890 sizeof (*lr) + namesize - sizeof (lr_t)); 1891 1892 itx->itx_oid = object; 1893 zil_itx_assign(zd->zd_zilog, itx, tx); 1894 } 1895 1896 static void 1897 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) 1898 { 1899 itx_t *itx; 1900 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); 1901 1902 if (zil_replaying(zd->zd_zilog, tx)) 1903 return; 1904 1905 if (lr->lr_length > zil_max_log_data(zd->zd_zilog, sizeof (lr_write_t))) 1906 write_state = WR_INDIRECT; 1907 1908 itx = zil_itx_create(TX_WRITE, 1909 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); 1910 1911 if (write_state == WR_COPIED && 1912 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, 1913 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { 1914 zil_itx_destroy(itx); 1915 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1916 write_state = WR_NEED_COPY; 1917 } 1918 itx->itx_private = zd; 1919 itx->itx_wr_state = write_state; 1920 itx->itx_sync = (ztest_random(8) == 0); 1921 1922 memcpy(&itx->itx_lr + 1, &lr->lr_common + 1, 1923 sizeof (*lr) - sizeof (lr_t)); 1924 1925 zil_itx_assign(zd->zd_zilog, itx, tx); 1926 } 1927 1928 static void 1929 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) 1930 { 1931 itx_t *itx; 1932 1933 if (zil_replaying(zd->zd_zilog, tx)) 1934 return; 1935 1936 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1937 memcpy(&itx->itx_lr + 1, &lr->lr_common + 1, 1938 sizeof (*lr) - sizeof (lr_t)); 1939 1940 itx->itx_sync = B_FALSE; 1941 zil_itx_assign(zd->zd_zilog, itx, tx); 1942 } 1943 1944 static void 1945 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) 1946 { 1947 itx_t *itx; 1948 1949 if (zil_replaying(zd->zd_zilog, tx)) 1950 return; 1951 1952 itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); 1953 memcpy(&itx->itx_lr + 1, &lr->lr_common + 1, 1954 sizeof (*lr) - sizeof (lr_t)); 1955 1956 itx->itx_sync = B_FALSE; 1957 zil_itx_assign(zd->zd_zilog, itx, tx); 1958 } 1959 1960 /* 1961 * ZIL replay ops 1962 */ 1963 static int 1964 ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap) 1965 { 1966 ztest_ds_t *zd = arg1; 1967 lr_create_t *lr = arg2; 1968 char *name = (void *)(lr + 1); /* name follows lr */ 1969 objset_t *os = zd->zd_os; 1970 ztest_block_tag_t *bbt; 1971 dmu_buf_t *db; 1972 dmu_tx_t *tx; 1973 uint64_t txg; 1974 int error = 0; 1975 int bonuslen; 1976 1977 if (byteswap) 1978 byteswap_uint64_array(lr, sizeof (*lr)); 1979 1980 ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ); 1981 ASSERT3S(name[0], !=, '\0'); 1982 1983 tx = dmu_tx_create(os); 1984 1985 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); 1986 1987 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 1988 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1989 } else { 1990 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1991 } 1992 1993 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 1994 if (txg == 0) 1995 return (ENOSPC); 1996 1997 ASSERT3U(dmu_objset_zil(os)->zl_replay, ==, !!lr->lr_foid); 1998 bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize); 1999 2000 if (lr->lrz_type == DMU_OT_ZAP_OTHER) { 2001 if (lr->lr_foid == 0) { 2002 lr->lr_foid = zap_create_dnsize(os, 2003 lr->lrz_type, lr->lrz_bonustype, 2004 bonuslen, lr->lrz_dnodesize, tx); 2005 } else { 2006 error = zap_create_claim_dnsize(os, lr->lr_foid, 2007 lr->lrz_type, lr->lrz_bonustype, 2008 bonuslen, lr->lrz_dnodesize, tx); 2009 } 2010 } else { 2011 if (lr->lr_foid == 0) { 2012 lr->lr_foid = dmu_object_alloc_dnsize(os, 2013 lr->lrz_type, 0, lr->lrz_bonustype, 2014 bonuslen, lr->lrz_dnodesize, tx); 2015 } else { 2016 error = dmu_object_claim_dnsize(os, lr->lr_foid, 2017 lr->lrz_type, 0, lr->lrz_bonustype, 2018 bonuslen, lr->lrz_dnodesize, tx); 2019 } 2020 } 2021 2022 if (error) { 2023 ASSERT3U(error, ==, EEXIST); 2024 ASSERT(zd->zd_zilog->zl_replay); 2025 dmu_tx_commit(tx); 2026 return (error); 2027 } 2028 2029 ASSERT3U(lr->lr_foid, !=, 0); 2030 2031 if (lr->lrz_type != DMU_OT_ZAP_OTHER) 2032 VERIFY0(dmu_object_set_blocksize(os, lr->lr_foid, 2033 lr->lrz_blocksize, lr->lrz_ibshift, tx)); 2034 2035 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 2036 bbt = ztest_bt_bonus(db); 2037 dmu_buf_will_dirty(db, tx); 2038 ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL, 2039 lr->lr_gen, txg, txg); 2040 ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen); 2041 dmu_buf_rele(db, FTAG); 2042 2043 VERIFY0(zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, 2044 &lr->lr_foid, tx)); 2045 2046 (void) ztest_log_create(zd, tx, lr); 2047 2048 dmu_tx_commit(tx); 2049 2050 return (0); 2051 } 2052 2053 static int 2054 ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap) 2055 { 2056 ztest_ds_t *zd = arg1; 2057 lr_remove_t *lr = arg2; 2058 char *name = (void *)(lr + 1); /* name follows lr */ 2059 objset_t *os = zd->zd_os; 2060 dmu_object_info_t doi; 2061 dmu_tx_t *tx; 2062 uint64_t object, txg; 2063 2064 if (byteswap) 2065 byteswap_uint64_array(lr, sizeof (*lr)); 2066 2067 ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ); 2068 ASSERT3S(name[0], !=, '\0'); 2069 2070 VERIFY0( 2071 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); 2072 ASSERT3U(object, !=, 0); 2073 2074 ztest_object_lock(zd, object, RL_WRITER); 2075 2076 VERIFY0(dmu_object_info(os, object, &doi)); 2077 2078 tx = dmu_tx_create(os); 2079 2080 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); 2081 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 2082 2083 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2084 if (txg == 0) { 2085 ztest_object_unlock(zd, object); 2086 return (ENOSPC); 2087 } 2088 2089 if (doi.doi_type == DMU_OT_ZAP_OTHER) { 2090 VERIFY0(zap_destroy(os, object, tx)); 2091 } else { 2092 VERIFY0(dmu_object_free(os, object, tx)); 2093 } 2094 2095 VERIFY0(zap_remove(os, lr->lr_doid, name, tx)); 2096 2097 (void) ztest_log_remove(zd, tx, lr, object); 2098 2099 dmu_tx_commit(tx); 2100 2101 ztest_object_unlock(zd, object); 2102 2103 return (0); 2104 } 2105 2106 static int 2107 ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap) 2108 { 2109 ztest_ds_t *zd = arg1; 2110 lr_write_t *lr = arg2; 2111 objset_t *os = zd->zd_os; 2112 void *data = lr + 1; /* data follows lr */ 2113 uint64_t offset, length; 2114 ztest_block_tag_t *bt = data; 2115 ztest_block_tag_t *bbt; 2116 uint64_t gen, txg, lrtxg, crtxg; 2117 dmu_object_info_t doi; 2118 dmu_tx_t *tx; 2119 dmu_buf_t *db; 2120 arc_buf_t *abuf = NULL; 2121 rl_t *rl; 2122 2123 if (byteswap) 2124 byteswap_uint64_array(lr, sizeof (*lr)); 2125 2126 offset = lr->lr_offset; 2127 length = lr->lr_length; 2128 2129 /* If it's a dmu_sync() block, write the whole block */ 2130 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 2131 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 2132 if (length < blocksize) { 2133 offset -= offset % blocksize; 2134 length = blocksize; 2135 } 2136 } 2137 2138 if (bt->bt_magic == BSWAP_64(BT_MAGIC)) 2139 byteswap_uint64_array(bt, sizeof (*bt)); 2140 2141 if (bt->bt_magic != BT_MAGIC) 2142 bt = NULL; 2143 2144 ztest_object_lock(zd, lr->lr_foid, RL_READER); 2145 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); 2146 2147 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 2148 2149 dmu_object_info_from_db(db, &doi); 2150 2151 bbt = ztest_bt_bonus(db); 2152 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 2153 gen = bbt->bt_gen; 2154 crtxg = bbt->bt_crtxg; 2155 lrtxg = lr->lr_common.lrc_txg; 2156 2157 tx = dmu_tx_create(os); 2158 2159 dmu_tx_hold_write(tx, lr->lr_foid, offset, length); 2160 2161 if (ztest_random(8) == 0 && length == doi.doi_data_block_size && 2162 P2PHASE(offset, length) == 0) 2163 abuf = dmu_request_arcbuf(db, length); 2164 2165 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2166 if (txg == 0) { 2167 if (abuf != NULL) 2168 dmu_return_arcbuf(abuf); 2169 dmu_buf_rele(db, FTAG); 2170 ztest_range_unlock(rl); 2171 ztest_object_unlock(zd, lr->lr_foid); 2172 return (ENOSPC); 2173 } 2174 2175 if (bt != NULL) { 2176 /* 2177 * Usually, verify the old data before writing new data -- 2178 * but not always, because we also want to verify correct 2179 * behavior when the data was not recently read into cache. 2180 */ 2181 ASSERT(doi.doi_data_block_size); 2182 ASSERT0(offset % doi.doi_data_block_size); 2183 if (ztest_random(4) != 0) { 2184 int prefetch = ztest_random(2) ? 2185 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; 2186 ztest_block_tag_t rbt; 2187 2188 VERIFY(dmu_read(os, lr->lr_foid, offset, 2189 sizeof (rbt), &rbt, prefetch) == 0); 2190 if (rbt.bt_magic == BT_MAGIC) { 2191 ztest_bt_verify(&rbt, os, lr->lr_foid, 0, 2192 offset, gen, txg, crtxg); 2193 } 2194 } 2195 2196 /* 2197 * Writes can appear to be newer than the bonus buffer because 2198 * the ztest_get_data() callback does a dmu_read() of the 2199 * open-context data, which may be different than the data 2200 * as it was when the write was generated. 2201 */ 2202 if (zd->zd_zilog->zl_replay) { 2203 ztest_bt_verify(bt, os, lr->lr_foid, 0, offset, 2204 MAX(gen, bt->bt_gen), MAX(txg, lrtxg), 2205 bt->bt_crtxg); 2206 } 2207 2208 /* 2209 * Set the bt's gen/txg to the bonus buffer's gen/txg 2210 * so that all of the usual ASSERTs will work. 2211 */ 2212 ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg, 2213 crtxg); 2214 } 2215 2216 if (abuf == NULL) { 2217 dmu_write(os, lr->lr_foid, offset, length, data, tx); 2218 } else { 2219 memcpy(abuf->b_data, data, length); 2220 VERIFY0(dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx)); 2221 } 2222 2223 (void) ztest_log_write(zd, tx, lr); 2224 2225 dmu_buf_rele(db, FTAG); 2226 2227 dmu_tx_commit(tx); 2228 2229 ztest_range_unlock(rl); 2230 ztest_object_unlock(zd, lr->lr_foid); 2231 2232 return (0); 2233 } 2234 2235 static int 2236 ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap) 2237 { 2238 ztest_ds_t *zd = arg1; 2239 lr_truncate_t *lr = arg2; 2240 objset_t *os = zd->zd_os; 2241 dmu_tx_t *tx; 2242 uint64_t txg; 2243 rl_t *rl; 2244 2245 if (byteswap) 2246 byteswap_uint64_array(lr, sizeof (*lr)); 2247 2248 ztest_object_lock(zd, lr->lr_foid, RL_READER); 2249 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, 2250 RL_WRITER); 2251 2252 tx = dmu_tx_create(os); 2253 2254 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); 2255 2256 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2257 if (txg == 0) { 2258 ztest_range_unlock(rl); 2259 ztest_object_unlock(zd, lr->lr_foid); 2260 return (ENOSPC); 2261 } 2262 2263 VERIFY0(dmu_free_range(os, lr->lr_foid, lr->lr_offset, 2264 lr->lr_length, tx)); 2265 2266 (void) ztest_log_truncate(zd, tx, lr); 2267 2268 dmu_tx_commit(tx); 2269 2270 ztest_range_unlock(rl); 2271 ztest_object_unlock(zd, lr->lr_foid); 2272 2273 return (0); 2274 } 2275 2276 static int 2277 ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap) 2278 { 2279 ztest_ds_t *zd = arg1; 2280 lr_setattr_t *lr = arg2; 2281 objset_t *os = zd->zd_os; 2282 dmu_tx_t *tx; 2283 dmu_buf_t *db; 2284 ztest_block_tag_t *bbt; 2285 uint64_t txg, lrtxg, crtxg, dnodesize; 2286 2287 if (byteswap) 2288 byteswap_uint64_array(lr, sizeof (*lr)); 2289 2290 ztest_object_lock(zd, lr->lr_foid, RL_WRITER); 2291 2292 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); 2293 2294 tx = dmu_tx_create(os); 2295 dmu_tx_hold_bonus(tx, lr->lr_foid); 2296 2297 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2298 if (txg == 0) { 2299 dmu_buf_rele(db, FTAG); 2300 ztest_object_unlock(zd, lr->lr_foid); 2301 return (ENOSPC); 2302 } 2303 2304 bbt = ztest_bt_bonus(db); 2305 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 2306 crtxg = bbt->bt_crtxg; 2307 lrtxg = lr->lr_common.lrc_txg; 2308 dnodesize = bbt->bt_dnodesize; 2309 2310 if (zd->zd_zilog->zl_replay) { 2311 ASSERT3U(lr->lr_size, !=, 0); 2312 ASSERT3U(lr->lr_mode, !=, 0); 2313 ASSERT3U(lrtxg, !=, 0); 2314 } else { 2315 /* 2316 * Randomly change the size and increment the generation. 2317 */ 2318 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * 2319 sizeof (*bbt); 2320 lr->lr_mode = bbt->bt_gen + 1; 2321 ASSERT0(lrtxg); 2322 } 2323 2324 /* 2325 * Verify that the current bonus buffer is not newer than our txg. 2326 */ 2327 ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode, 2328 MAX(txg, lrtxg), crtxg); 2329 2330 dmu_buf_will_dirty(db, tx); 2331 2332 ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); 2333 ASSERT3U(lr->lr_size, <=, db->db_size); 2334 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx)); 2335 bbt = ztest_bt_bonus(db); 2336 2337 ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode, 2338 txg, crtxg); 2339 ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen); 2340 dmu_buf_rele(db, FTAG); 2341 2342 (void) ztest_log_setattr(zd, tx, lr); 2343 2344 dmu_tx_commit(tx); 2345 2346 ztest_object_unlock(zd, lr->lr_foid); 2347 2348 return (0); 2349 } 2350 2351 static zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { 2352 NULL, /* 0 no such transaction type */ 2353 ztest_replay_create, /* TX_CREATE */ 2354 NULL, /* TX_MKDIR */ 2355 NULL, /* TX_MKXATTR */ 2356 NULL, /* TX_SYMLINK */ 2357 ztest_replay_remove, /* TX_REMOVE */ 2358 NULL, /* TX_RMDIR */ 2359 NULL, /* TX_LINK */ 2360 NULL, /* TX_RENAME */ 2361 ztest_replay_write, /* TX_WRITE */ 2362 ztest_replay_truncate, /* TX_TRUNCATE */ 2363 ztest_replay_setattr, /* TX_SETATTR */ 2364 NULL, /* TX_ACL */ 2365 NULL, /* TX_CREATE_ACL */ 2366 NULL, /* TX_CREATE_ATTR */ 2367 NULL, /* TX_CREATE_ACL_ATTR */ 2368 NULL, /* TX_MKDIR_ACL */ 2369 NULL, /* TX_MKDIR_ATTR */ 2370 NULL, /* TX_MKDIR_ACL_ATTR */ 2371 NULL, /* TX_WRITE2 */ 2372 NULL, /* TX_SETSAXATTR */ 2373 NULL, /* TX_RENAME_EXCHANGE */ 2374 NULL, /* TX_RENAME_WHITEOUT */ 2375 }; 2376 2377 /* 2378 * ZIL get_data callbacks 2379 */ 2380 2381 static void 2382 ztest_get_done(zgd_t *zgd, int error) 2383 { 2384 (void) error; 2385 ztest_ds_t *zd = zgd->zgd_private; 2386 uint64_t object = ((rl_t *)zgd->zgd_lr)->rl_object; 2387 2388 if (zgd->zgd_db) 2389 dmu_buf_rele(zgd->zgd_db, zgd); 2390 2391 ztest_range_unlock((rl_t *)zgd->zgd_lr); 2392 ztest_object_unlock(zd, object); 2393 2394 umem_free(zgd, sizeof (*zgd)); 2395 } 2396 2397 static int 2398 ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, 2399 struct lwb *lwb, zio_t *zio) 2400 { 2401 (void) arg2; 2402 ztest_ds_t *zd = arg; 2403 objset_t *os = zd->zd_os; 2404 uint64_t object = lr->lr_foid; 2405 uint64_t offset = lr->lr_offset; 2406 uint64_t size = lr->lr_length; 2407 uint64_t txg = lr->lr_common.lrc_txg; 2408 uint64_t crtxg; 2409 dmu_object_info_t doi; 2410 dmu_buf_t *db; 2411 zgd_t *zgd; 2412 int error; 2413 2414 ASSERT3P(lwb, !=, NULL); 2415 ASSERT3U(size, !=, 0); 2416 2417 ztest_object_lock(zd, object, RL_READER); 2418 error = dmu_bonus_hold(os, object, FTAG, &db); 2419 if (error) { 2420 ztest_object_unlock(zd, object); 2421 return (error); 2422 } 2423 2424 crtxg = ztest_bt_bonus(db)->bt_crtxg; 2425 2426 if (crtxg == 0 || crtxg > txg) { 2427 dmu_buf_rele(db, FTAG); 2428 ztest_object_unlock(zd, object); 2429 return (ENOENT); 2430 } 2431 2432 dmu_object_info_from_db(db, &doi); 2433 dmu_buf_rele(db, FTAG); 2434 db = NULL; 2435 2436 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); 2437 zgd->zgd_lwb = lwb; 2438 zgd->zgd_private = zd; 2439 2440 if (buf != NULL) { /* immediate write */ 2441 zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd, 2442 object, offset, size, RL_READER); 2443 2444 error = dmu_read(os, object, offset, size, buf, 2445 DMU_READ_NO_PREFETCH); 2446 ASSERT0(error); 2447 } else { 2448 ASSERT3P(zio, !=, NULL); 2449 size = doi.doi_data_block_size; 2450 if (ISP2(size)) { 2451 offset = P2ALIGN(offset, size); 2452 } else { 2453 ASSERT3U(offset, <, size); 2454 offset = 0; 2455 } 2456 2457 zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd, 2458 object, offset, size, RL_READER); 2459 2460 error = dmu_buf_hold_noread(os, object, offset, zgd, &db); 2461 2462 if (error == 0) { 2463 blkptr_t *bp = &lr->lr_blkptr; 2464 2465 zgd->zgd_db = db; 2466 zgd->zgd_bp = bp; 2467 2468 ASSERT3U(db->db_offset, ==, offset); 2469 ASSERT3U(db->db_size, ==, size); 2470 2471 error = dmu_sync(zio, lr->lr_common.lrc_txg, 2472 ztest_get_done, zgd); 2473 2474 if (error == 0) 2475 return (0); 2476 } 2477 } 2478 2479 ztest_get_done(zgd, error); 2480 2481 return (error); 2482 } 2483 2484 static void * 2485 ztest_lr_alloc(size_t lrsize, char *name) 2486 { 2487 char *lr; 2488 size_t namesize = name ? strlen(name) + 1 : 0; 2489 2490 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); 2491 2492 if (name) 2493 memcpy(lr + lrsize, name, namesize); 2494 2495 return (lr); 2496 } 2497 2498 static void 2499 ztest_lr_free(void *lr, size_t lrsize, char *name) 2500 { 2501 size_t namesize = name ? strlen(name) + 1 : 0; 2502 2503 umem_free(lr, lrsize + namesize); 2504 } 2505 2506 /* 2507 * Lookup a bunch of objects. Returns the number of objects not found. 2508 */ 2509 static int 2510 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) 2511 { 2512 int missing = 0; 2513 int error; 2514 int i; 2515 2516 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2517 2518 for (i = 0; i < count; i++, od++) { 2519 od->od_object = 0; 2520 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, 2521 sizeof (uint64_t), 1, &od->od_object); 2522 if (error) { 2523 ASSERT3S(error, ==, ENOENT); 2524 ASSERT0(od->od_object); 2525 missing++; 2526 } else { 2527 dmu_buf_t *db; 2528 ztest_block_tag_t *bbt; 2529 dmu_object_info_t doi; 2530 2531 ASSERT3U(od->od_object, !=, 0); 2532 ASSERT0(missing); /* there should be no gaps */ 2533 2534 ztest_object_lock(zd, od->od_object, RL_READER); 2535 VERIFY0(dmu_bonus_hold(zd->zd_os, od->od_object, 2536 FTAG, &db)); 2537 dmu_object_info_from_db(db, &doi); 2538 bbt = ztest_bt_bonus(db); 2539 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); 2540 od->od_type = doi.doi_type; 2541 od->od_blocksize = doi.doi_data_block_size; 2542 od->od_gen = bbt->bt_gen; 2543 dmu_buf_rele(db, FTAG); 2544 ztest_object_unlock(zd, od->od_object); 2545 } 2546 } 2547 2548 return (missing); 2549 } 2550 2551 static int 2552 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) 2553 { 2554 int missing = 0; 2555 int i; 2556 2557 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2558 2559 for (i = 0; i < count; i++, od++) { 2560 if (missing) { 2561 od->od_object = 0; 2562 missing++; 2563 continue; 2564 } 2565 2566 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2567 2568 lr->lr_doid = od->od_dir; 2569 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ 2570 lr->lrz_type = od->od_crtype; 2571 lr->lrz_blocksize = od->od_crblocksize; 2572 lr->lrz_ibshift = ztest_random_ibshift(); 2573 lr->lrz_bonustype = DMU_OT_UINT64_OTHER; 2574 lr->lrz_dnodesize = od->od_crdnodesize; 2575 lr->lr_gen = od->od_crgen; 2576 lr->lr_crtime[0] = time(NULL); 2577 2578 if (ztest_replay_create(zd, lr, B_FALSE) != 0) { 2579 ASSERT0(missing); 2580 od->od_object = 0; 2581 missing++; 2582 } else { 2583 od->od_object = lr->lr_foid; 2584 od->od_type = od->od_crtype; 2585 od->od_blocksize = od->od_crblocksize; 2586 od->od_gen = od->od_crgen; 2587 ASSERT3U(od->od_object, !=, 0); 2588 } 2589 2590 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2591 } 2592 2593 return (missing); 2594 } 2595 2596 static int 2597 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) 2598 { 2599 int missing = 0; 2600 int error; 2601 int i; 2602 2603 ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock)); 2604 2605 od += count - 1; 2606 2607 for (i = count - 1; i >= 0; i--, od--) { 2608 if (missing) { 2609 missing++; 2610 continue; 2611 } 2612 2613 /* 2614 * No object was found. 2615 */ 2616 if (od->od_object == 0) 2617 continue; 2618 2619 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); 2620 2621 lr->lr_doid = od->od_dir; 2622 2623 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { 2624 ASSERT3U(error, ==, ENOSPC); 2625 missing++; 2626 } else { 2627 od->od_object = 0; 2628 } 2629 ztest_lr_free(lr, sizeof (*lr), od->od_name); 2630 } 2631 2632 return (missing); 2633 } 2634 2635 static int 2636 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, 2637 void *data) 2638 { 2639 lr_write_t *lr; 2640 int error; 2641 2642 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); 2643 2644 lr->lr_foid = object; 2645 lr->lr_offset = offset; 2646 lr->lr_length = size; 2647 lr->lr_blkoff = 0; 2648 BP_ZERO(&lr->lr_blkptr); 2649 2650 memcpy(lr + 1, data, size); 2651 2652 error = ztest_replay_write(zd, lr, B_FALSE); 2653 2654 ztest_lr_free(lr, sizeof (*lr) + size, NULL); 2655 2656 return (error); 2657 } 2658 2659 static int 2660 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2661 { 2662 lr_truncate_t *lr; 2663 int error; 2664 2665 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2666 2667 lr->lr_foid = object; 2668 lr->lr_offset = offset; 2669 lr->lr_length = size; 2670 2671 error = ztest_replay_truncate(zd, lr, B_FALSE); 2672 2673 ztest_lr_free(lr, sizeof (*lr), NULL); 2674 2675 return (error); 2676 } 2677 2678 static int 2679 ztest_setattr(ztest_ds_t *zd, uint64_t object) 2680 { 2681 lr_setattr_t *lr; 2682 int error; 2683 2684 lr = ztest_lr_alloc(sizeof (*lr), NULL); 2685 2686 lr->lr_foid = object; 2687 lr->lr_size = 0; 2688 lr->lr_mode = 0; 2689 2690 error = ztest_replay_setattr(zd, lr, B_FALSE); 2691 2692 ztest_lr_free(lr, sizeof (*lr), NULL); 2693 2694 return (error); 2695 } 2696 2697 static void 2698 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) 2699 { 2700 objset_t *os = zd->zd_os; 2701 dmu_tx_t *tx; 2702 uint64_t txg; 2703 rl_t *rl; 2704 2705 txg_wait_synced(dmu_objset_pool(os), 0); 2706 2707 ztest_object_lock(zd, object, RL_READER); 2708 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); 2709 2710 tx = dmu_tx_create(os); 2711 2712 dmu_tx_hold_write(tx, object, offset, size); 2713 2714 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); 2715 2716 if (txg != 0) { 2717 dmu_prealloc(os, object, offset, size, tx); 2718 dmu_tx_commit(tx); 2719 txg_wait_synced(dmu_objset_pool(os), txg); 2720 } else { 2721 (void) dmu_free_long_range(os, object, offset, size); 2722 } 2723 2724 ztest_range_unlock(rl); 2725 ztest_object_unlock(zd, object); 2726 } 2727 2728 static void 2729 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) 2730 { 2731 int err; 2732 ztest_block_tag_t wbt; 2733 dmu_object_info_t doi; 2734 enum ztest_io_type io_type; 2735 uint64_t blocksize; 2736 void *data; 2737 2738 VERIFY0(dmu_object_info(zd->zd_os, object, &doi)); 2739 blocksize = doi.doi_data_block_size; 2740 data = umem_alloc(blocksize, UMEM_NOFAIL); 2741 2742 /* 2743 * Pick an i/o type at random, biased toward writing block tags. 2744 */ 2745 io_type = ztest_random(ZTEST_IO_TYPES); 2746 if (ztest_random(2) == 0) 2747 io_type = ZTEST_IO_WRITE_TAG; 2748 2749 (void) pthread_rwlock_rdlock(&zd->zd_zilog_lock); 2750 2751 switch (io_type) { 2752 2753 case ZTEST_IO_WRITE_TAG: 2754 ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize, 2755 offset, 0, 0, 0); 2756 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); 2757 break; 2758 2759 case ZTEST_IO_WRITE_PATTERN: 2760 (void) memset(data, 'a' + (object + offset) % 5, blocksize); 2761 if (ztest_random(2) == 0) { 2762 /* 2763 * Induce fletcher2 collisions to ensure that 2764 * zio_ddt_collision() detects and resolves them 2765 * when using fletcher2-verify for deduplication. 2766 */ 2767 ((uint64_t *)data)[0] ^= 1ULL << 63; 2768 ((uint64_t *)data)[4] ^= 1ULL << 63; 2769 } 2770 (void) ztest_write(zd, object, offset, blocksize, data); 2771 break; 2772 2773 case ZTEST_IO_WRITE_ZEROES: 2774 memset(data, 0, blocksize); 2775 (void) ztest_write(zd, object, offset, blocksize, data); 2776 break; 2777 2778 case ZTEST_IO_TRUNCATE: 2779 (void) ztest_truncate(zd, object, offset, blocksize); 2780 break; 2781 2782 case ZTEST_IO_SETATTR: 2783 (void) ztest_setattr(zd, object); 2784 break; 2785 default: 2786 break; 2787 2788 case ZTEST_IO_REWRITE: 2789 (void) pthread_rwlock_rdlock(&ztest_name_lock); 2790 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2791 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa), 2792 B_FALSE); 2793 ASSERT(err == 0 || err == ENOSPC); 2794 err = ztest_dsl_prop_set_uint64(zd->zd_name, 2795 ZFS_PROP_COMPRESSION, 2796 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), 2797 B_FALSE); 2798 ASSERT(err == 0 || err == ENOSPC); 2799 (void) pthread_rwlock_unlock(&ztest_name_lock); 2800 2801 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data, 2802 DMU_READ_NO_PREFETCH)); 2803 2804 (void) ztest_write(zd, object, offset, blocksize, data); 2805 break; 2806 } 2807 2808 (void) pthread_rwlock_unlock(&zd->zd_zilog_lock); 2809 2810 umem_free(data, blocksize); 2811 } 2812 2813 /* 2814 * Initialize an object description template. 2815 */ 2816 static void 2817 ztest_od_init(ztest_od_t *od, uint64_t id, const char *tag, uint64_t index, 2818 dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize, 2819 uint64_t gen) 2820 { 2821 od->od_dir = ZTEST_DIROBJ; 2822 od->od_object = 0; 2823 2824 od->od_crtype = type; 2825 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); 2826 od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize(); 2827 od->od_crgen = gen; 2828 2829 od->od_type = DMU_OT_NONE; 2830 od->od_blocksize = 0; 2831 od->od_gen = 0; 2832 2833 (void) snprintf(od->od_name, sizeof (od->od_name), 2834 "%s(%"PRId64")[%"PRIu64"]", 2835 tag, id, index); 2836 } 2837 2838 /* 2839 * Lookup or create the objects for a test using the od template. 2840 * If the objects do not all exist, or if 'remove' is specified, 2841 * remove any existing objects and create new ones. Otherwise, 2842 * use the existing objects. 2843 */ 2844 static int 2845 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) 2846 { 2847 int count = size / sizeof (*od); 2848 int rv = 0; 2849 2850 mutex_enter(&zd->zd_dirobj_lock); 2851 if ((ztest_lookup(zd, od, count) != 0 || remove) && 2852 (ztest_remove(zd, od, count) != 0 || 2853 ztest_create(zd, od, count) != 0)) 2854 rv = -1; 2855 zd->zd_od = od; 2856 mutex_exit(&zd->zd_dirobj_lock); 2857 2858 return (rv); 2859 } 2860 2861 void 2862 ztest_zil_commit(ztest_ds_t *zd, uint64_t id) 2863 { 2864 (void) id; 2865 zilog_t *zilog = zd->zd_zilog; 2866 2867 (void) pthread_rwlock_rdlock(&zd->zd_zilog_lock); 2868 2869 zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); 2870 2871 /* 2872 * Remember the committed values in zd, which is in parent/child 2873 * shared memory. If we die, the next iteration of ztest_run() 2874 * will verify that the log really does contain this record. 2875 */ 2876 mutex_enter(&zilog->zl_lock); 2877 ASSERT3P(zd->zd_shared, !=, NULL); 2878 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq); 2879 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq; 2880 mutex_exit(&zilog->zl_lock); 2881 2882 (void) pthread_rwlock_unlock(&zd->zd_zilog_lock); 2883 } 2884 2885 /* 2886 * This function is designed to simulate the operations that occur during a 2887 * mount/unmount operation. We hold the dataset across these operations in an 2888 * attempt to expose any implicit assumptions about ZIL management. 2889 */ 2890 void 2891 ztest_zil_remount(ztest_ds_t *zd, uint64_t id) 2892 { 2893 (void) id; 2894 objset_t *os = zd->zd_os; 2895 2896 /* 2897 * We hold the ztest_vdev_lock so we don't cause problems with 2898 * other threads that wish to remove a log device, such as 2899 * ztest_device_removal(). 2900 */ 2901 mutex_enter(&ztest_vdev_lock); 2902 2903 /* 2904 * We grab the zd_dirobj_lock to ensure that no other thread is 2905 * updating the zil (i.e. adding in-memory log records) and the 2906 * zd_zilog_lock to block any I/O. 2907 */ 2908 mutex_enter(&zd->zd_dirobj_lock); 2909 (void) pthread_rwlock_wrlock(&zd->zd_zilog_lock); 2910 2911 /* zfsvfs_teardown() */ 2912 zil_close(zd->zd_zilog); 2913 2914 /* zfsvfs_setup() */ 2915 VERIFY3P(zil_open(os, ztest_get_data, NULL), ==, zd->zd_zilog); 2916 zil_replay(os, zd, ztest_replay_vector); 2917 2918 (void) pthread_rwlock_unlock(&zd->zd_zilog_lock); 2919 mutex_exit(&zd->zd_dirobj_lock); 2920 mutex_exit(&ztest_vdev_lock); 2921 } 2922 2923 /* 2924 * Verify that we can't destroy an active pool, create an existing pool, 2925 * or create a pool with a bad vdev spec. 2926 */ 2927 void 2928 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) 2929 { 2930 (void) zd, (void) id; 2931 ztest_shared_opts_t *zo = &ztest_opts; 2932 spa_t *spa; 2933 nvlist_t *nvroot; 2934 2935 if (zo->zo_mmp_test) 2936 return; 2937 2938 /* 2939 * Attempt to create using a bad file. 2940 */ 2941 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1); 2942 VERIFY3U(ENOENT, ==, 2943 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL)); 2944 fnvlist_free(nvroot); 2945 2946 /* 2947 * Attempt to create using a bad mirror. 2948 */ 2949 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 2, 1); 2950 VERIFY3U(ENOENT, ==, 2951 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL)); 2952 fnvlist_free(nvroot); 2953 2954 /* 2955 * Attempt to create an existing pool. It shouldn't matter 2956 * what's in the nvroot; we should fail with EEXIST. 2957 */ 2958 (void) pthread_rwlock_rdlock(&ztest_name_lock); 2959 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1); 2960 VERIFY3U(EEXIST, ==, 2961 spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL)); 2962 fnvlist_free(nvroot); 2963 2964 /* 2965 * We open a reference to the spa and then we try to export it 2966 * expecting one of the following errors: 2967 * 2968 * EBUSY 2969 * Because of the reference we just opened. 2970 * 2971 * ZFS_ERR_EXPORT_IN_PROGRESS 2972 * For the case that there is another ztest thread doing 2973 * an export concurrently. 2974 */ 2975 VERIFY0(spa_open(zo->zo_pool, &spa, FTAG)); 2976 int error = spa_destroy(zo->zo_pool); 2977 if (error != EBUSY && error != ZFS_ERR_EXPORT_IN_PROGRESS) { 2978 fatal(B_FALSE, "spa_destroy(%s) returned unexpected value %d", 2979 spa->spa_name, error); 2980 } 2981 spa_close(spa, FTAG); 2982 2983 (void) pthread_rwlock_unlock(&ztest_name_lock); 2984 } 2985 2986 /* 2987 * Start and then stop the MMP threads to ensure the startup and shutdown code 2988 * works properly. Actual protection and property-related code tested via ZTS. 2989 */ 2990 void 2991 ztest_mmp_enable_disable(ztest_ds_t *zd, uint64_t id) 2992 { 2993 (void) zd, (void) id; 2994 ztest_shared_opts_t *zo = &ztest_opts; 2995 spa_t *spa = ztest_spa; 2996 2997 if (zo->zo_mmp_test) 2998 return; 2999 3000 /* 3001 * Since enabling MMP involves setting a property, it could not be done 3002 * while the pool is suspended. 3003 */ 3004 if (spa_suspended(spa)) 3005 return; 3006 3007 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3008 mutex_enter(&spa->spa_props_lock); 3009 3010 zfs_multihost_fail_intervals = 0; 3011 3012 if (!spa_multihost(spa)) { 3013 spa->spa_multihost = B_TRUE; 3014 mmp_thread_start(spa); 3015 } 3016 3017 mutex_exit(&spa->spa_props_lock); 3018 spa_config_exit(spa, SCL_CONFIG, FTAG); 3019 3020 txg_wait_synced(spa_get_dsl(spa), 0); 3021 mmp_signal_all_threads(); 3022 txg_wait_synced(spa_get_dsl(spa), 0); 3023 3024 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3025 mutex_enter(&spa->spa_props_lock); 3026 3027 if (spa_multihost(spa)) { 3028 mmp_thread_stop(spa); 3029 spa->spa_multihost = B_FALSE; 3030 } 3031 3032 mutex_exit(&spa->spa_props_lock); 3033 spa_config_exit(spa, SCL_CONFIG, FTAG); 3034 } 3035 3036 void 3037 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id) 3038 { 3039 (void) zd, (void) id; 3040 spa_t *spa; 3041 uint64_t initial_version = SPA_VERSION_INITIAL; 3042 uint64_t version, newversion; 3043 nvlist_t *nvroot, *props; 3044 char *name; 3045 3046 if (ztest_opts.zo_mmp_test) 3047 return; 3048 3049 /* dRAID added after feature flags, skip upgrade test. */ 3050 if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) 3051 return; 3052 3053 mutex_enter(&ztest_vdev_lock); 3054 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool); 3055 3056 /* 3057 * Clean up from previous runs. 3058 */ 3059 (void) spa_destroy(name); 3060 3061 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0, 3062 NULL, ztest_opts.zo_raid_children, ztest_opts.zo_mirrors, 1); 3063 3064 /* 3065 * If we're configuring a RAIDZ device then make sure that the 3066 * initial version is capable of supporting that feature. 3067 */ 3068 switch (ztest_opts.zo_raid_parity) { 3069 case 0: 3070 case 1: 3071 initial_version = SPA_VERSION_INITIAL; 3072 break; 3073 case 2: 3074 initial_version = SPA_VERSION_RAIDZ2; 3075 break; 3076 case 3: 3077 initial_version = SPA_VERSION_RAIDZ3; 3078 break; 3079 } 3080 3081 /* 3082 * Create a pool with a spa version that can be upgraded. Pick 3083 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES. 3084 */ 3085 do { 3086 version = ztest_random_spa_version(initial_version); 3087 } while (version > SPA_VERSION_BEFORE_FEATURES); 3088 3089 props = fnvlist_alloc(); 3090 fnvlist_add_uint64(props, 3091 zpool_prop_to_name(ZPOOL_PROP_VERSION), version); 3092 VERIFY0(spa_create(name, nvroot, props, NULL, NULL)); 3093 fnvlist_free(nvroot); 3094 fnvlist_free(props); 3095 3096 VERIFY0(spa_open(name, &spa, FTAG)); 3097 VERIFY3U(spa_version(spa), ==, version); 3098 newversion = ztest_random_spa_version(version + 1); 3099 3100 if (ztest_opts.zo_verbose >= 4) { 3101 (void) printf("upgrading spa version from " 3102 "%"PRIu64" to %"PRIu64"\n", 3103 version, newversion); 3104 } 3105 3106 spa_upgrade(spa, newversion); 3107 VERIFY3U(spa_version(spa), >, version); 3108 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config, 3109 zpool_prop_to_name(ZPOOL_PROP_VERSION))); 3110 spa_close(spa, FTAG); 3111 3112 kmem_strfree(name); 3113 mutex_exit(&ztest_vdev_lock); 3114 } 3115 3116 static void 3117 ztest_spa_checkpoint(spa_t *spa) 3118 { 3119 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 3120 3121 int error = spa_checkpoint(spa->spa_name); 3122 3123 switch (error) { 3124 case 0: 3125 case ZFS_ERR_DEVRM_IN_PROGRESS: 3126 case ZFS_ERR_DISCARDING_CHECKPOINT: 3127 case ZFS_ERR_CHECKPOINT_EXISTS: 3128 break; 3129 case ENOSPC: 3130 ztest_record_enospc(FTAG); 3131 break; 3132 default: 3133 fatal(B_FALSE, "spa_checkpoint(%s) = %d", spa->spa_name, error); 3134 } 3135 } 3136 3137 static void 3138 ztest_spa_discard_checkpoint(spa_t *spa) 3139 { 3140 ASSERT(MUTEX_HELD(&ztest_checkpoint_lock)); 3141 3142 int error = spa_checkpoint_discard(spa->spa_name); 3143 3144 switch (error) { 3145 case 0: 3146 case ZFS_ERR_DISCARDING_CHECKPOINT: 3147 case ZFS_ERR_NO_CHECKPOINT: 3148 break; 3149 default: 3150 fatal(B_FALSE, "spa_discard_checkpoint(%s) = %d", 3151 spa->spa_name, error); 3152 } 3153 3154 } 3155 3156 void 3157 ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id) 3158 { 3159 (void) zd, (void) id; 3160 spa_t *spa = ztest_spa; 3161 3162 mutex_enter(&ztest_checkpoint_lock); 3163 if (ztest_random(2) == 0) { 3164 ztest_spa_checkpoint(spa); 3165 } else { 3166 ztest_spa_discard_checkpoint(spa); 3167 } 3168 mutex_exit(&ztest_checkpoint_lock); 3169 } 3170 3171 3172 static vdev_t * 3173 vdev_lookup_by_path(vdev_t *vd, const char *path) 3174 { 3175 vdev_t *mvd; 3176 int c; 3177 3178 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) 3179 return (vd); 3180 3181 for (c = 0; c < vd->vdev_children; c++) 3182 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != 3183 NULL) 3184 return (mvd); 3185 3186 return (NULL); 3187 } 3188 3189 static int 3190 spa_num_top_vdevs(spa_t *spa) 3191 { 3192 vdev_t *rvd = spa->spa_root_vdev; 3193 ASSERT3U(spa_config_held(spa, SCL_VDEV, RW_READER), ==, SCL_VDEV); 3194 return (rvd->vdev_children); 3195 } 3196 3197 /* 3198 * Verify that vdev_add() works as expected. 3199 */ 3200 void 3201 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) 3202 { 3203 (void) zd, (void) id; 3204 ztest_shared_t *zs = ztest_shared; 3205 spa_t *spa = ztest_spa; 3206 uint64_t leaves; 3207 uint64_t guid; 3208 nvlist_t *nvroot; 3209 int error; 3210 3211 if (ztest_opts.zo_mmp_test) 3212 return; 3213 3214 mutex_enter(&ztest_vdev_lock); 3215 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * 3216 ztest_opts.zo_raid_children; 3217 3218 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3219 3220 ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves; 3221 3222 /* 3223 * If we have slogs then remove them 1/4 of the time. 3224 */ 3225 if (spa_has_slogs(spa) && ztest_random(4) == 0) { 3226 metaslab_group_t *mg; 3227 3228 /* 3229 * find the first real slog in log allocation class 3230 */ 3231 mg = spa_log_class(spa)->mc_allocator[0].mca_rotor; 3232 while (!mg->mg_vd->vdev_islog) 3233 mg = mg->mg_next; 3234 3235 guid = mg->mg_vd->vdev_guid; 3236 3237 spa_config_exit(spa, SCL_VDEV, FTAG); 3238 3239 /* 3240 * We have to grab the zs_name_lock as writer to 3241 * prevent a race between removing a slog (dmu_objset_find) 3242 * and destroying a dataset. Removing the slog will 3243 * grab a reference on the dataset which may cause 3244 * dsl_destroy_head() to fail with EBUSY thus 3245 * leaving the dataset in an inconsistent state. 3246 */ 3247 pthread_rwlock_wrlock(&ztest_name_lock); 3248 error = spa_vdev_remove(spa, guid, B_FALSE); 3249 pthread_rwlock_unlock(&ztest_name_lock); 3250 3251 switch (error) { 3252 case 0: 3253 case EEXIST: /* Generic zil_reset() error */ 3254 case EBUSY: /* Replay required */ 3255 case EACCES: /* Crypto key not loaded */ 3256 case ZFS_ERR_CHECKPOINT_EXISTS: 3257 case ZFS_ERR_DISCARDING_CHECKPOINT: 3258 break; 3259 default: 3260 fatal(B_FALSE, "spa_vdev_remove() = %d", error); 3261 } 3262 } else { 3263 spa_config_exit(spa, SCL_VDEV, FTAG); 3264 3265 /* 3266 * Make 1/4 of the devices be log devices 3267 */ 3268 nvroot = make_vdev_root(NULL, NULL, NULL, 3269 ztest_opts.zo_vdev_size, 0, (ztest_random(4) == 0) ? 3270 "log" : NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 3271 1); 3272 3273 error = spa_vdev_add(spa, nvroot); 3274 fnvlist_free(nvroot); 3275 3276 switch (error) { 3277 case 0: 3278 break; 3279 case ENOSPC: 3280 ztest_record_enospc("spa_vdev_add"); 3281 break; 3282 default: 3283 fatal(B_FALSE, "spa_vdev_add() = %d", error); 3284 } 3285 } 3286 3287 mutex_exit(&ztest_vdev_lock); 3288 } 3289 3290 void 3291 ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id) 3292 { 3293 (void) zd, (void) id; 3294 ztest_shared_t *zs = ztest_shared; 3295 spa_t *spa = ztest_spa; 3296 uint64_t leaves; 3297 nvlist_t *nvroot; 3298 const char *class = (ztest_random(2) == 0) ? 3299 VDEV_ALLOC_BIAS_SPECIAL : VDEV_ALLOC_BIAS_DEDUP; 3300 int error; 3301 3302 /* 3303 * By default add a special vdev 50% of the time 3304 */ 3305 if ((ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_OFF) || 3306 (ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_RND && 3307 ztest_random(2) == 0)) { 3308 return; 3309 } 3310 3311 mutex_enter(&ztest_vdev_lock); 3312 3313 /* Only test with mirrors */ 3314 if (zs->zs_mirrors < 2) { 3315 mutex_exit(&ztest_vdev_lock); 3316 return; 3317 } 3318 3319 /* requires feature@allocation_classes */ 3320 if (!spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)) { 3321 mutex_exit(&ztest_vdev_lock); 3322 return; 3323 } 3324 3325 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * 3326 ztest_opts.zo_raid_children; 3327 3328 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3329 ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves; 3330 spa_config_exit(spa, SCL_VDEV, FTAG); 3331 3332 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 3333 class, ztest_opts.zo_raid_children, zs->zs_mirrors, 1); 3334 3335 error = spa_vdev_add(spa, nvroot); 3336 fnvlist_free(nvroot); 3337 3338 if (error == ENOSPC) 3339 ztest_record_enospc("spa_vdev_add"); 3340 else if (error != 0) 3341 fatal(B_FALSE, "spa_vdev_add() = %d", error); 3342 3343 /* 3344 * 50% of the time allow small blocks in the special class 3345 */ 3346 if (error == 0 && 3347 spa_special_class(spa)->mc_groups == 1 && ztest_random(2) == 0) { 3348 if (ztest_opts.zo_verbose >= 3) 3349 (void) printf("Enabling special VDEV small blocks\n"); 3350 error = ztest_dsl_prop_set_uint64(zd->zd_name, 3351 ZFS_PROP_SPECIAL_SMALL_BLOCKS, 32768, B_FALSE); 3352 ASSERT(error == 0 || error == ENOSPC); 3353 } 3354 3355 mutex_exit(&ztest_vdev_lock); 3356 3357 if (ztest_opts.zo_verbose >= 3) { 3358 metaslab_class_t *mc; 3359 3360 if (strcmp(class, VDEV_ALLOC_BIAS_SPECIAL) == 0) 3361 mc = spa_special_class(spa); 3362 else 3363 mc = spa_dedup_class(spa); 3364 (void) printf("Added a %s mirrored vdev (of %d)\n", 3365 class, (int)mc->mc_groups); 3366 } 3367 } 3368 3369 /* 3370 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. 3371 */ 3372 void 3373 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) 3374 { 3375 (void) zd, (void) id; 3376 ztest_shared_t *zs = ztest_shared; 3377 spa_t *spa = ztest_spa; 3378 vdev_t *rvd = spa->spa_root_vdev; 3379 spa_aux_vdev_t *sav; 3380 const char *aux; 3381 char *path; 3382 uint64_t guid = 0; 3383 int error, ignore_err = 0; 3384 3385 if (ztest_opts.zo_mmp_test) 3386 return; 3387 3388 path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 3389 3390 if (ztest_random(2) == 0) { 3391 sav = &spa->spa_spares; 3392 aux = ZPOOL_CONFIG_SPARES; 3393 } else { 3394 sav = &spa->spa_l2cache; 3395 aux = ZPOOL_CONFIG_L2CACHE; 3396 } 3397 3398 mutex_enter(&ztest_vdev_lock); 3399 3400 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3401 3402 if (sav->sav_count != 0 && ztest_random(4) == 0) { 3403 /* 3404 * Pick a random device to remove. 3405 */ 3406 vdev_t *svd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 3407 3408 /* dRAID spares cannot be removed; try anyways to see ENOTSUP */ 3409 if (strstr(svd->vdev_path, VDEV_TYPE_DRAID) != NULL) 3410 ignore_err = ENOTSUP; 3411 3412 guid = svd->vdev_guid; 3413 } else { 3414 /* 3415 * Find an unused device we can add. 3416 */ 3417 zs->zs_vdev_aux = 0; 3418 for (;;) { 3419 int c; 3420 (void) snprintf(path, MAXPATHLEN, ztest_aux_template, 3421 ztest_opts.zo_dir, ztest_opts.zo_pool, aux, 3422 zs->zs_vdev_aux); 3423 for (c = 0; c < sav->sav_count; c++) 3424 if (strcmp(sav->sav_vdevs[c]->vdev_path, 3425 path) == 0) 3426 break; 3427 if (c == sav->sav_count && 3428 vdev_lookup_by_path(rvd, path) == NULL) 3429 break; 3430 zs->zs_vdev_aux++; 3431 } 3432 } 3433 3434 spa_config_exit(spa, SCL_VDEV, FTAG); 3435 3436 if (guid == 0) { 3437 /* 3438 * Add a new device. 3439 */ 3440 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL, 3441 (ztest_opts.zo_vdev_size * 5) / 4, 0, NULL, 0, 0, 1); 3442 error = spa_vdev_add(spa, nvroot); 3443 3444 switch (error) { 3445 case 0: 3446 break; 3447 default: 3448 fatal(B_FALSE, "spa_vdev_add(%p) = %d", nvroot, error); 3449 } 3450 fnvlist_free(nvroot); 3451 } else { 3452 /* 3453 * Remove an existing device. Sometimes, dirty its 3454 * vdev state first to make sure we handle removal 3455 * of devices that have pending state changes. 3456 */ 3457 if (ztest_random(2) == 0) 3458 (void) vdev_online(spa, guid, 0, NULL); 3459 3460 error = spa_vdev_remove(spa, guid, B_FALSE); 3461 3462 switch (error) { 3463 case 0: 3464 case EBUSY: 3465 case ZFS_ERR_CHECKPOINT_EXISTS: 3466 case ZFS_ERR_DISCARDING_CHECKPOINT: 3467 break; 3468 default: 3469 if (error != ignore_err) 3470 fatal(B_FALSE, 3471 "spa_vdev_remove(%"PRIu64") = %d", 3472 guid, error); 3473 } 3474 } 3475 3476 mutex_exit(&ztest_vdev_lock); 3477 3478 umem_free(path, MAXPATHLEN); 3479 } 3480 3481 /* 3482 * split a pool if it has mirror tlvdevs 3483 */ 3484 void 3485 ztest_split_pool(ztest_ds_t *zd, uint64_t id) 3486 { 3487 (void) zd, (void) id; 3488 ztest_shared_t *zs = ztest_shared; 3489 spa_t *spa = ztest_spa; 3490 vdev_t *rvd = spa->spa_root_vdev; 3491 nvlist_t *tree, **child, *config, *split, **schild; 3492 uint_t c, children, schildren = 0, lastlogid = 0; 3493 int error = 0; 3494 3495 if (ztest_opts.zo_mmp_test) 3496 return; 3497 3498 mutex_enter(&ztest_vdev_lock); 3499 3500 /* ensure we have a usable config; mirrors of raidz aren't supported */ 3501 if (zs->zs_mirrors < 3 || ztest_opts.zo_raid_children > 1) { 3502 mutex_exit(&ztest_vdev_lock); 3503 return; 3504 } 3505 3506 /* clean up the old pool, if any */ 3507 (void) spa_destroy("splitp"); 3508 3509 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3510 3511 /* generate a config from the existing config */ 3512 mutex_enter(&spa->spa_props_lock); 3513 tree = fnvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE); 3514 mutex_exit(&spa->spa_props_lock); 3515 3516 VERIFY0(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, 3517 &child, &children)); 3518 3519 schild = umem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 3520 UMEM_NOFAIL); 3521 for (c = 0; c < children; c++) { 3522 vdev_t *tvd = rvd->vdev_child[c]; 3523 nvlist_t **mchild; 3524 uint_t mchildren; 3525 3526 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) { 3527 schild[schildren] = fnvlist_alloc(); 3528 fnvlist_add_string(schild[schildren], 3529 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE); 3530 fnvlist_add_uint64(schild[schildren], 3531 ZPOOL_CONFIG_IS_HOLE, 1); 3532 if (lastlogid == 0) 3533 lastlogid = schildren; 3534 ++schildren; 3535 continue; 3536 } 3537 lastlogid = 0; 3538 VERIFY0(nvlist_lookup_nvlist_array(child[c], 3539 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren)); 3540 schild[schildren++] = fnvlist_dup(mchild[0]); 3541 } 3542 3543 /* OK, create a config that can be used to split */ 3544 split = fnvlist_alloc(); 3545 fnvlist_add_string(split, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT); 3546 fnvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, 3547 (const nvlist_t **)schild, lastlogid != 0 ? lastlogid : schildren); 3548 3549 config = fnvlist_alloc(); 3550 fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split); 3551 3552 for (c = 0; c < schildren; c++) 3553 fnvlist_free(schild[c]); 3554 umem_free(schild, rvd->vdev_children * sizeof (nvlist_t *)); 3555 fnvlist_free(split); 3556 3557 spa_config_exit(spa, SCL_VDEV, FTAG); 3558 3559 (void) pthread_rwlock_wrlock(&ztest_name_lock); 3560 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE); 3561 (void) pthread_rwlock_unlock(&ztest_name_lock); 3562 3563 fnvlist_free(config); 3564 3565 if (error == 0) { 3566 (void) printf("successful split - results:\n"); 3567 mutex_enter(&spa_namespace_lock); 3568 show_pool_stats(spa); 3569 show_pool_stats(spa_lookup("splitp")); 3570 mutex_exit(&spa_namespace_lock); 3571 ++zs->zs_splits; 3572 --zs->zs_mirrors; 3573 } 3574 mutex_exit(&ztest_vdev_lock); 3575 } 3576 3577 /* 3578 * Verify that we can attach and detach devices. 3579 */ 3580 void 3581 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) 3582 { 3583 (void) zd, (void) id; 3584 ztest_shared_t *zs = ztest_shared; 3585 spa_t *spa = ztest_spa; 3586 spa_aux_vdev_t *sav = &spa->spa_spares; 3587 vdev_t *rvd = spa->spa_root_vdev; 3588 vdev_t *oldvd, *newvd, *pvd; 3589 nvlist_t *root; 3590 uint64_t leaves; 3591 uint64_t leaf, top; 3592 uint64_t ashift = ztest_get_ashift(); 3593 uint64_t oldguid, pguid; 3594 uint64_t oldsize, newsize; 3595 char *oldpath, *newpath; 3596 int replacing; 3597 int oldvd_has_siblings = B_FALSE; 3598 int newvd_is_spare = B_FALSE; 3599 int newvd_is_dspare = B_FALSE; 3600 int oldvd_is_log; 3601 int oldvd_is_special; 3602 int error, expected_error; 3603 3604 if (ztest_opts.zo_mmp_test) 3605 return; 3606 3607 oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 3608 newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 3609 3610 mutex_enter(&ztest_vdev_lock); 3611 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children; 3612 3613 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3614 3615 /* 3616 * If a vdev is in the process of being removed, its removal may 3617 * finish while we are in progress, leading to an unexpected error 3618 * value. Don't bother trying to attach while we are in the middle 3619 * of removal. 3620 */ 3621 if (ztest_device_removal_active) { 3622 spa_config_exit(spa, SCL_ALL, FTAG); 3623 goto out; 3624 } 3625 3626 /* 3627 * Decide whether to do an attach or a replace. 3628 */ 3629 replacing = ztest_random(2); 3630 3631 /* 3632 * Pick a random top-level vdev. 3633 */ 3634 top = ztest_random_vdev_top(spa, B_TRUE); 3635 3636 /* 3637 * Pick a random leaf within it. 3638 */ 3639 leaf = ztest_random(leaves); 3640 3641 /* 3642 * Locate this vdev. 3643 */ 3644 oldvd = rvd->vdev_child[top]; 3645 3646 /* pick a child from the mirror */ 3647 if (zs->zs_mirrors >= 1) { 3648 ASSERT3P(oldvd->vdev_ops, ==, &vdev_mirror_ops); 3649 ASSERT3U(oldvd->vdev_children, >=, zs->zs_mirrors); 3650 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raid_children]; 3651 } 3652 3653 /* pick a child out of the raidz group */ 3654 if (ztest_opts.zo_raid_children > 1) { 3655 if (strcmp(oldvd->vdev_ops->vdev_op_type, "raidz") == 0) 3656 ASSERT3P(oldvd->vdev_ops, ==, &vdev_raidz_ops); 3657 else 3658 ASSERT3P(oldvd->vdev_ops, ==, &vdev_draid_ops); 3659 ASSERT3U(oldvd->vdev_children, ==, ztest_opts.zo_raid_children); 3660 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raid_children]; 3661 } 3662 3663 /* 3664 * If we're already doing an attach or replace, oldvd may be a 3665 * mirror vdev -- in which case, pick a random child. 3666 */ 3667 while (oldvd->vdev_children != 0) { 3668 oldvd_has_siblings = B_TRUE; 3669 ASSERT3U(oldvd->vdev_children, >=, 2); 3670 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)]; 3671 } 3672 3673 oldguid = oldvd->vdev_guid; 3674 oldsize = vdev_get_min_asize(oldvd); 3675 oldvd_is_log = oldvd->vdev_top->vdev_islog; 3676 oldvd_is_special = 3677 oldvd->vdev_top->vdev_alloc_bias == VDEV_BIAS_SPECIAL || 3678 oldvd->vdev_top->vdev_alloc_bias == VDEV_BIAS_DEDUP; 3679 (void) strlcpy(oldpath, oldvd->vdev_path, MAXPATHLEN); 3680 pvd = oldvd->vdev_parent; 3681 pguid = pvd->vdev_guid; 3682 3683 /* 3684 * If oldvd has siblings, then half of the time, detach it. Prior 3685 * to the detach the pool is scrubbed in order to prevent creating 3686 * unrepairable blocks as a result of the data corruption injection. 3687 */ 3688 if (oldvd_has_siblings && ztest_random(2) == 0) { 3689 spa_config_exit(spa, SCL_ALL, FTAG); 3690 3691 error = ztest_scrub_impl(spa); 3692 if (error) 3693 goto out; 3694 3695 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE); 3696 if (error != 0 && error != ENODEV && error != EBUSY && 3697 error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS && 3698 error != ZFS_ERR_DISCARDING_CHECKPOINT) 3699 fatal(B_FALSE, "detach (%s) returned %d", 3700 oldpath, error); 3701 goto out; 3702 } 3703 3704 /* 3705 * For the new vdev, choose with equal probability between the two 3706 * standard paths (ending in either 'a' or 'b') or a random hot spare. 3707 */ 3708 if (sav->sav_count != 0 && ztest_random(3) == 0) { 3709 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; 3710 newvd_is_spare = B_TRUE; 3711 3712 if (newvd->vdev_ops == &vdev_draid_spare_ops) 3713 newvd_is_dspare = B_TRUE; 3714 3715 (void) strlcpy(newpath, newvd->vdev_path, MAXPATHLEN); 3716 } else { 3717 (void) snprintf(newpath, MAXPATHLEN, ztest_dev_template, 3718 ztest_opts.zo_dir, ztest_opts.zo_pool, 3719 top * leaves + leaf); 3720 if (ztest_random(2) == 0) 3721 newpath[strlen(newpath) - 1] = 'b'; 3722 newvd = vdev_lookup_by_path(rvd, newpath); 3723 } 3724 3725 if (newvd) { 3726 /* 3727 * Reopen to ensure the vdev's asize field isn't stale. 3728 */ 3729 vdev_reopen(newvd); 3730 newsize = vdev_get_min_asize(newvd); 3731 } else { 3732 /* 3733 * Make newsize a little bigger or smaller than oldsize. 3734 * If it's smaller, the attach should fail. 3735 * If it's larger, and we're doing a replace, 3736 * we should get dynamic LUN growth when we're done. 3737 */ 3738 newsize = 10 * oldsize / (9 + ztest_random(3)); 3739 } 3740 3741 /* 3742 * If pvd is not a mirror or root, the attach should fail with ENOTSUP, 3743 * unless it's a replace; in that case any non-replacing parent is OK. 3744 * 3745 * If newvd is already part of the pool, it should fail with EBUSY. 3746 * 3747 * If newvd is too small, it should fail with EOVERFLOW. 3748 * 3749 * If newvd is a distributed spare and it's being attached to a 3750 * dRAID which is not its parent it should fail with EINVAL. 3751 */ 3752 if (pvd->vdev_ops != &vdev_mirror_ops && 3753 pvd->vdev_ops != &vdev_root_ops && (!replacing || 3754 pvd->vdev_ops == &vdev_replacing_ops || 3755 pvd->vdev_ops == &vdev_spare_ops)) 3756 expected_error = ENOTSUP; 3757 else if (newvd_is_spare && 3758 (!replacing || oldvd_is_log || oldvd_is_special)) 3759 expected_error = ENOTSUP; 3760 else if (newvd == oldvd) 3761 expected_error = replacing ? 0 : EBUSY; 3762 else if (vdev_lookup_by_path(rvd, newpath) != NULL) 3763 expected_error = EBUSY; 3764 else if (!newvd_is_dspare && newsize < oldsize) 3765 expected_error = EOVERFLOW; 3766 else if (ashift > oldvd->vdev_top->vdev_ashift) 3767 expected_error = EDOM; 3768 else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd)) 3769 expected_error = EINVAL; 3770 else 3771 expected_error = 0; 3772 3773 spa_config_exit(spa, SCL_ALL, FTAG); 3774 3775 /* 3776 * Build the nvlist describing newpath. 3777 */ 3778 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0, 3779 ashift, NULL, 0, 0, 1); 3780 3781 /* 3782 * When supported select either a healing or sequential resilver. 3783 */ 3784 boolean_t rebuilding = B_FALSE; 3785 if (pvd->vdev_ops == &vdev_mirror_ops || 3786 pvd->vdev_ops == &vdev_root_ops) { 3787 rebuilding = !!ztest_random(2); 3788 } 3789 3790 error = spa_vdev_attach(spa, oldguid, root, replacing, rebuilding); 3791 3792 fnvlist_free(root); 3793 3794 /* 3795 * If our parent was the replacing vdev, but the replace completed, 3796 * then instead of failing with ENOTSUP we may either succeed, 3797 * fail with ENODEV, or fail with EOVERFLOW. 3798 */ 3799 if (expected_error == ENOTSUP && 3800 (error == 0 || error == ENODEV || error == EOVERFLOW)) 3801 expected_error = error; 3802 3803 /* 3804 * If someone grew the LUN, the replacement may be too small. 3805 */ 3806 if (error == EOVERFLOW || error == EBUSY) 3807 expected_error = error; 3808 3809 if (error == ZFS_ERR_CHECKPOINT_EXISTS || 3810 error == ZFS_ERR_DISCARDING_CHECKPOINT || 3811 error == ZFS_ERR_RESILVER_IN_PROGRESS || 3812 error == ZFS_ERR_REBUILD_IN_PROGRESS) 3813 expected_error = error; 3814 3815 if (error != expected_error && expected_error != EBUSY) { 3816 fatal(B_FALSE, "attach (%s %"PRIu64", %s %"PRIu64", %d) " 3817 "returned %d, expected %d", 3818 oldpath, oldsize, newpath, 3819 newsize, replacing, error, expected_error); 3820 } 3821 out: 3822 mutex_exit(&ztest_vdev_lock); 3823 3824 umem_free(oldpath, MAXPATHLEN); 3825 umem_free(newpath, MAXPATHLEN); 3826 } 3827 3828 void 3829 ztest_device_removal(ztest_ds_t *zd, uint64_t id) 3830 { 3831 (void) zd, (void) id; 3832 spa_t *spa = ztest_spa; 3833 vdev_t *vd; 3834 uint64_t guid; 3835 int error; 3836 3837 mutex_enter(&ztest_vdev_lock); 3838 3839 if (ztest_device_removal_active) { 3840 mutex_exit(&ztest_vdev_lock); 3841 return; 3842 } 3843 3844 /* 3845 * Remove a random top-level vdev and wait for removal to finish. 3846 */ 3847 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3848 vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE)); 3849 guid = vd->vdev_guid; 3850 spa_config_exit(spa, SCL_VDEV, FTAG); 3851 3852 error = spa_vdev_remove(spa, guid, B_FALSE); 3853 if (error == 0) { 3854 ztest_device_removal_active = B_TRUE; 3855 mutex_exit(&ztest_vdev_lock); 3856 3857 /* 3858 * spa->spa_vdev_removal is created in a sync task that 3859 * is initiated via dsl_sync_task_nowait(). Since the 3860 * task may not run before spa_vdev_remove() returns, we 3861 * must wait at least 1 txg to ensure that the removal 3862 * struct has been created. 3863 */ 3864 txg_wait_synced(spa_get_dsl(spa), 0); 3865 3866 while (spa->spa_removing_phys.sr_state == DSS_SCANNING) 3867 txg_wait_synced(spa_get_dsl(spa), 0); 3868 } else { 3869 mutex_exit(&ztest_vdev_lock); 3870 return; 3871 } 3872 3873 /* 3874 * The pool needs to be scrubbed after completing device removal. 3875 * Failure to do so may result in checksum errors due to the 3876 * strategy employed by ztest_fault_inject() when selecting which 3877 * offset are redundant and can be damaged. 3878 */ 3879 error = spa_scan(spa, POOL_SCAN_SCRUB); 3880 if (error == 0) { 3881 while (dsl_scan_scrubbing(spa_get_dsl(spa))) 3882 txg_wait_synced(spa_get_dsl(spa), 0); 3883 } 3884 3885 mutex_enter(&ztest_vdev_lock); 3886 ztest_device_removal_active = B_FALSE; 3887 mutex_exit(&ztest_vdev_lock); 3888 } 3889 3890 /* 3891 * Callback function which expands the physical size of the vdev. 3892 */ 3893 static vdev_t * 3894 grow_vdev(vdev_t *vd, void *arg) 3895 { 3896 spa_t *spa __maybe_unused = vd->vdev_spa; 3897 size_t *newsize = arg; 3898 size_t fsize; 3899 int fd; 3900 3901 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE); 3902 ASSERT(vd->vdev_ops->vdev_op_leaf); 3903 3904 if ((fd = open(vd->vdev_path, O_RDWR)) == -1) 3905 return (vd); 3906 3907 fsize = lseek(fd, 0, SEEK_END); 3908 VERIFY0(ftruncate(fd, *newsize)); 3909 3910 if (ztest_opts.zo_verbose >= 6) { 3911 (void) printf("%s grew from %lu to %lu bytes\n", 3912 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize); 3913 } 3914 (void) close(fd); 3915 return (NULL); 3916 } 3917 3918 /* 3919 * Callback function which expands a given vdev by calling vdev_online(). 3920 */ 3921 static vdev_t * 3922 online_vdev(vdev_t *vd, void *arg) 3923 { 3924 (void) arg; 3925 spa_t *spa = vd->vdev_spa; 3926 vdev_t *tvd = vd->vdev_top; 3927 uint64_t guid = vd->vdev_guid; 3928 uint64_t generation = spa->spa_config_generation + 1; 3929 vdev_state_t newstate = VDEV_STATE_UNKNOWN; 3930 int error; 3931 3932 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE); 3933 ASSERT(vd->vdev_ops->vdev_op_leaf); 3934 3935 /* Calling vdev_online will initialize the new metaslabs */ 3936 spa_config_exit(spa, SCL_STATE, spa); 3937 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate); 3938 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 3939 3940 /* 3941 * If vdev_online returned an error or the underlying vdev_open 3942 * failed then we abort the expand. The only way to know that 3943 * vdev_open fails is by checking the returned newstate. 3944 */ 3945 if (error || newstate != VDEV_STATE_HEALTHY) { 3946 if (ztest_opts.zo_verbose >= 5) { 3947 (void) printf("Unable to expand vdev, state %u, " 3948 "error %d\n", newstate, error); 3949 } 3950 return (vd); 3951 } 3952 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY); 3953 3954 /* 3955 * Since we dropped the lock we need to ensure that we're 3956 * still talking to the original vdev. It's possible this 3957 * vdev may have been detached/replaced while we were 3958 * trying to online it. 3959 */ 3960 if (generation != spa->spa_config_generation) { 3961 if (ztest_opts.zo_verbose >= 5) { 3962 (void) printf("vdev configuration has changed, " 3963 "guid %"PRIu64", state %"PRIu64", " 3964 "expected gen %"PRIu64", got gen %"PRIu64"\n", 3965 guid, 3966 tvd->vdev_state, 3967 generation, 3968 spa->spa_config_generation); 3969 } 3970 return (vd); 3971 } 3972 return (NULL); 3973 } 3974 3975 /* 3976 * Traverse the vdev tree calling the supplied function. 3977 * We continue to walk the tree until we either have walked all 3978 * children or we receive a non-NULL return from the callback. 3979 * If a NULL callback is passed, then we just return back the first 3980 * leaf vdev we encounter. 3981 */ 3982 static vdev_t * 3983 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg) 3984 { 3985 uint_t c; 3986 3987 if (vd->vdev_ops->vdev_op_leaf) { 3988 if (func == NULL) 3989 return (vd); 3990 else 3991 return (func(vd, arg)); 3992 } 3993 3994 for (c = 0; c < vd->vdev_children; c++) { 3995 vdev_t *cvd = vd->vdev_child[c]; 3996 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL) 3997 return (cvd); 3998 } 3999 return (NULL); 4000 } 4001 4002 /* 4003 * Verify that dynamic LUN growth works as expected. 4004 */ 4005 void 4006 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id) 4007 { 4008 (void) zd, (void) id; 4009 spa_t *spa = ztest_spa; 4010 vdev_t *vd, *tvd; 4011 metaslab_class_t *mc; 4012 metaslab_group_t *mg; 4013 size_t psize, newsize; 4014 uint64_t top; 4015 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count; 4016 4017 mutex_enter(&ztest_checkpoint_lock); 4018 mutex_enter(&ztest_vdev_lock); 4019 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 4020 4021 /* 4022 * If there is a vdev removal in progress, it could complete while 4023 * we are running, in which case we would not be able to verify 4024 * that the metaslab_class space increased (because it decreases 4025 * when the device removal completes). 4026 */ 4027 if (ztest_device_removal_active) { 4028 spa_config_exit(spa, SCL_STATE, spa); 4029 mutex_exit(&ztest_vdev_lock); 4030 mutex_exit(&ztest_checkpoint_lock); 4031 return; 4032 } 4033 4034 top = ztest_random_vdev_top(spa, B_TRUE); 4035 4036 tvd = spa->spa_root_vdev->vdev_child[top]; 4037 mg = tvd->vdev_mg; 4038 mc = mg->mg_class; 4039 old_ms_count = tvd->vdev_ms_count; 4040 old_class_space = metaslab_class_get_space(mc); 4041 4042 /* 4043 * Determine the size of the first leaf vdev associated with 4044 * our top-level device. 4045 */ 4046 vd = vdev_walk_tree(tvd, NULL, NULL); 4047 ASSERT3P(vd, !=, NULL); 4048 ASSERT(vd->vdev_ops->vdev_op_leaf); 4049 4050 psize = vd->vdev_psize; 4051 4052 /* 4053 * We only try to expand the vdev if it's healthy, less than 4x its 4054 * original size, and it has a valid psize. 4055 */ 4056 if (tvd->vdev_state != VDEV_STATE_HEALTHY || 4057 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) { 4058 spa_config_exit(spa, SCL_STATE, spa); 4059 mutex_exit(&ztest_vdev_lock); 4060 mutex_exit(&ztest_checkpoint_lock); 4061 return; 4062 } 4063 ASSERT3U(psize, >, 0); 4064 newsize = psize + MAX(psize / 8, SPA_MAXBLOCKSIZE); 4065 ASSERT3U(newsize, >, psize); 4066 4067 if (ztest_opts.zo_verbose >= 6) { 4068 (void) printf("Expanding LUN %s from %lu to %lu\n", 4069 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize); 4070 } 4071 4072 /* 4073 * Growing the vdev is a two step process: 4074 * 1). expand the physical size (i.e. relabel) 4075 * 2). online the vdev to create the new metaslabs 4076 */ 4077 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL || 4078 vdev_walk_tree(tvd, online_vdev, NULL) != NULL || 4079 tvd->vdev_state != VDEV_STATE_HEALTHY) { 4080 if (ztest_opts.zo_verbose >= 5) { 4081 (void) printf("Could not expand LUN because " 4082 "the vdev configuration changed.\n"); 4083 } 4084 spa_config_exit(spa, SCL_STATE, spa); 4085 mutex_exit(&ztest_vdev_lock); 4086 mutex_exit(&ztest_checkpoint_lock); 4087 return; 4088 } 4089 4090 spa_config_exit(spa, SCL_STATE, spa); 4091 4092 /* 4093 * Expanding the LUN will update the config asynchronously, 4094 * thus we must wait for the async thread to complete any 4095 * pending tasks before proceeding. 4096 */ 4097 for (;;) { 4098 boolean_t done; 4099 mutex_enter(&spa->spa_async_lock); 4100 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks); 4101 mutex_exit(&spa->spa_async_lock); 4102 if (done) 4103 break; 4104 txg_wait_synced(spa_get_dsl(spa), 0); 4105 (void) poll(NULL, 0, 100); 4106 } 4107 4108 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 4109 4110 tvd = spa->spa_root_vdev->vdev_child[top]; 4111 new_ms_count = tvd->vdev_ms_count; 4112 new_class_space = metaslab_class_get_space(mc); 4113 4114 if (tvd->vdev_mg != mg || mg->mg_class != mc) { 4115 if (ztest_opts.zo_verbose >= 5) { 4116 (void) printf("Could not verify LUN expansion due to " 4117 "intervening vdev offline or remove.\n"); 4118 } 4119 spa_config_exit(spa, SCL_STATE, spa); 4120 mutex_exit(&ztest_vdev_lock); 4121 mutex_exit(&ztest_checkpoint_lock); 4122 return; 4123 } 4124 4125 /* 4126 * Make sure we were able to grow the vdev. 4127 */ 4128 if (new_ms_count <= old_ms_count) { 4129 fatal(B_FALSE, 4130 "LUN expansion failed: ms_count %"PRIu64" < %"PRIu64"\n", 4131 old_ms_count, new_ms_count); 4132 } 4133 4134 /* 4135 * Make sure we were able to grow the pool. 4136 */ 4137 if (new_class_space <= old_class_space) { 4138 fatal(B_FALSE, 4139 "LUN expansion failed: class_space %"PRIu64" < %"PRIu64"\n", 4140 old_class_space, new_class_space); 4141 } 4142 4143 if (ztest_opts.zo_verbose >= 5) { 4144 char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ]; 4145 4146 nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf)); 4147 nicenum(new_class_space, newnumbuf, sizeof (newnumbuf)); 4148 (void) printf("%s grew from %s to %s\n", 4149 spa->spa_name, oldnumbuf, newnumbuf); 4150 } 4151 4152 spa_config_exit(spa, SCL_STATE, spa); 4153 mutex_exit(&ztest_vdev_lock); 4154 mutex_exit(&ztest_checkpoint_lock); 4155 } 4156 4157 /* 4158 * Verify that dmu_objset_{create,destroy,open,close} work as expected. 4159 */ 4160 static void 4161 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 4162 { 4163 (void) arg, (void) cr; 4164 4165 /* 4166 * Create the objects common to all ztest datasets. 4167 */ 4168 VERIFY0(zap_create_claim(os, ZTEST_DIROBJ, 4169 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx)); 4170 } 4171 4172 static int 4173 ztest_dataset_create(char *dsname) 4174 { 4175 int err; 4176 uint64_t rand; 4177 dsl_crypto_params_t *dcp = NULL; 4178 4179 /* 4180 * 50% of the time, we create encrypted datasets 4181 * using a random cipher suite and a hard-coded 4182 * wrapping key. 4183 */ 4184 rand = ztest_random(2); 4185 if (rand != 0) { 4186 nvlist_t *crypto_args = fnvlist_alloc(); 4187 nvlist_t *props = fnvlist_alloc(); 4188 4189 /* slight bias towards the default cipher suite */ 4190 rand = ztest_random(ZIO_CRYPT_FUNCTIONS); 4191 if (rand < ZIO_CRYPT_AES_128_CCM) 4192 rand = ZIO_CRYPT_ON; 4193 4194 fnvlist_add_uint64(props, 4195 zfs_prop_to_name(ZFS_PROP_ENCRYPTION), rand); 4196 fnvlist_add_uint8_array(crypto_args, "wkeydata", 4197 (uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN); 4198 4199 /* 4200 * These parameters aren't really used by the kernel. They 4201 * are simply stored so that userspace knows how to load 4202 * the wrapping key. 4203 */ 4204 fnvlist_add_uint64(props, 4205 zfs_prop_to_name(ZFS_PROP_KEYFORMAT), ZFS_KEYFORMAT_RAW); 4206 fnvlist_add_string(props, 4207 zfs_prop_to_name(ZFS_PROP_KEYLOCATION), "prompt"); 4208 fnvlist_add_uint64(props, 4209 zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 0ULL); 4210 fnvlist_add_uint64(props, 4211 zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 0ULL); 4212 4213 VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, props, 4214 crypto_args, &dcp)); 4215 4216 /* 4217 * Cycle through all available encryption implementations 4218 * to verify interoperability. 4219 */ 4220 VERIFY0(gcm_impl_set("cycle")); 4221 VERIFY0(aes_impl_set("cycle")); 4222 4223 fnvlist_free(crypto_args); 4224 fnvlist_free(props); 4225 } 4226 4227 err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, dcp, 4228 ztest_objset_create_cb, NULL); 4229 dsl_crypto_params_free(dcp, !!err); 4230 4231 rand = ztest_random(100); 4232 if (err || rand < 80) 4233 return (err); 4234 4235 if (ztest_opts.zo_verbose >= 5) 4236 (void) printf("Setting dataset %s to sync always\n", dsname); 4237 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, 4238 ZFS_SYNC_ALWAYS, B_FALSE)); 4239 } 4240 4241 static int 4242 ztest_objset_destroy_cb(const char *name, void *arg) 4243 { 4244 (void) arg; 4245 objset_t *os; 4246 dmu_object_info_t doi; 4247 int error; 4248 4249 /* 4250 * Verify that the dataset contains a directory object. 4251 */ 4252 VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, 4253 B_TRUE, FTAG, &os)); 4254 error = dmu_object_info(os, ZTEST_DIROBJ, &doi); 4255 if (error != ENOENT) { 4256 /* We could have crashed in the middle of destroying it */ 4257 ASSERT0(error); 4258 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER); 4259 ASSERT3S(doi.doi_physical_blocks_512, >=, 0); 4260 } 4261 dmu_objset_disown(os, B_TRUE, FTAG); 4262 4263 /* 4264 * Destroy the dataset. 4265 */ 4266 if (strchr(name, '@') != NULL) { 4267 error = dsl_destroy_snapshot(name, B_TRUE); 4268 if (error != ECHRNG) { 4269 /* 4270 * The program was executed, but encountered a runtime 4271 * error, such as insufficient slop, or a hold on the 4272 * dataset. 4273 */ 4274 ASSERT0(error); 4275 } 4276 } else { 4277 error = dsl_destroy_head(name); 4278 if (error == ENOSPC) { 4279 /* There could be checkpoint or insufficient slop */ 4280 ztest_record_enospc(FTAG); 4281 } else if (error != EBUSY) { 4282 /* There could be a hold on this dataset */ 4283 ASSERT0(error); 4284 } 4285 } 4286 return (0); 4287 } 4288 4289 static boolean_t 4290 ztest_snapshot_create(char *osname, uint64_t id) 4291 { 4292 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 4293 int error; 4294 4295 (void) snprintf(snapname, sizeof (snapname), "%"PRIu64"", id); 4296 4297 error = dmu_objset_snapshot_one(osname, snapname); 4298 if (error == ENOSPC) { 4299 ztest_record_enospc(FTAG); 4300 return (B_FALSE); 4301 } 4302 if (error != 0 && error != EEXIST && error != ECHRNG) { 4303 fatal(B_FALSE, "ztest_snapshot_create(%s@%s) = %d", osname, 4304 snapname, error); 4305 } 4306 return (B_TRUE); 4307 } 4308 4309 static boolean_t 4310 ztest_snapshot_destroy(char *osname, uint64_t id) 4311 { 4312 char snapname[ZFS_MAX_DATASET_NAME_LEN]; 4313 int error; 4314 4315 (void) snprintf(snapname, sizeof (snapname), "%s@%"PRIu64"", 4316 osname, id); 4317 4318 error = dsl_destroy_snapshot(snapname, B_FALSE); 4319 if (error != 0 && error != ENOENT && error != ECHRNG) 4320 fatal(B_FALSE, "ztest_snapshot_destroy(%s) = %d", 4321 snapname, error); 4322 return (B_TRUE); 4323 } 4324 4325 void 4326 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) 4327 { 4328 (void) zd; 4329 ztest_ds_t *zdtmp; 4330 int iters; 4331 int error; 4332 objset_t *os, *os2; 4333 char name[ZFS_MAX_DATASET_NAME_LEN]; 4334 zilog_t *zilog; 4335 int i; 4336 4337 zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL); 4338 4339 (void) pthread_rwlock_rdlock(&ztest_name_lock); 4340 4341 (void) snprintf(name, sizeof (name), "%s/temp_%"PRIu64"", 4342 ztest_opts.zo_pool, id); 4343 4344 /* 4345 * If this dataset exists from a previous run, process its replay log 4346 * half of the time. If we don't replay it, then dsl_destroy_head() 4347 * (invoked from ztest_objset_destroy_cb()) should just throw it away. 4348 */ 4349 if (ztest_random(2) == 0 && 4350 ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, 4351 B_TRUE, FTAG, &os) == 0) { 4352 ztest_zd_init(zdtmp, NULL, os); 4353 zil_replay(os, zdtmp, ztest_replay_vector); 4354 ztest_zd_fini(zdtmp); 4355 dmu_objset_disown(os, B_TRUE, FTAG); 4356 } 4357 4358 /* 4359 * There may be an old instance of the dataset we're about to 4360 * create lying around from a previous run. If so, destroy it 4361 * and all of its snapshots. 4362 */ 4363 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 4364 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 4365 4366 /* 4367 * Verify that the destroyed dataset is no longer in the namespace. 4368 * It may still be present if the destroy above fails with ENOSPC. 4369 */ 4370 error = ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, B_TRUE, 4371 FTAG, &os); 4372 if (error == 0) { 4373 dmu_objset_disown(os, B_TRUE, FTAG); 4374 ztest_record_enospc(FTAG); 4375 goto out; 4376 } 4377 VERIFY3U(ENOENT, ==, error); 4378 4379 /* 4380 * Verify that we can create a new dataset. 4381 */ 4382 error = ztest_dataset_create(name); 4383 if (error) { 4384 if (error == ENOSPC) { 4385 ztest_record_enospc(FTAG); 4386 goto out; 4387 } 4388 fatal(B_FALSE, "dmu_objset_create(%s) = %d", name, error); 4389 } 4390 4391 VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, B_TRUE, 4392 FTAG, &os)); 4393 4394 ztest_zd_init(zdtmp, NULL, os); 4395 4396 /* 4397 * Open the intent log for it. 4398 */ 4399 zilog = zil_open(os, ztest_get_data, NULL); 4400 4401 /* 4402 * Put some objects in there, do a little I/O to them, 4403 * and randomly take a couple of snapshots along the way. 4404 */ 4405 iters = ztest_random(5); 4406 for (i = 0; i < iters; i++) { 4407 ztest_dmu_object_alloc_free(zdtmp, id); 4408 if (ztest_random(iters) == 0) 4409 (void) ztest_snapshot_create(name, i); 4410 } 4411 4412 /* 4413 * Verify that we cannot create an existing dataset. 4414 */ 4415 VERIFY3U(EEXIST, ==, 4416 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL, NULL)); 4417 4418 /* 4419 * Verify that we can hold an objset that is also owned. 4420 */ 4421 VERIFY0(dmu_objset_hold(name, FTAG, &os2)); 4422 dmu_objset_rele(os2, FTAG); 4423 4424 /* 4425 * Verify that we cannot own an objset that is already owned. 4426 */ 4427 VERIFY3U(EBUSY, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER, 4428 B_FALSE, B_TRUE, FTAG, &os2)); 4429 4430 zil_close(zilog); 4431 dmu_objset_disown(os, B_TRUE, FTAG); 4432 ztest_zd_fini(zdtmp); 4433 out: 4434 (void) pthread_rwlock_unlock(&ztest_name_lock); 4435 4436 umem_free(zdtmp, sizeof (ztest_ds_t)); 4437 } 4438 4439 /* 4440 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. 4441 */ 4442 void 4443 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id) 4444 { 4445 (void) pthread_rwlock_rdlock(&ztest_name_lock); 4446 (void) ztest_snapshot_destroy(zd->zd_name, id); 4447 (void) ztest_snapshot_create(zd->zd_name, id); 4448 (void) pthread_rwlock_unlock(&ztest_name_lock); 4449 } 4450 4451 /* 4452 * Cleanup non-standard snapshots and clones. 4453 */ 4454 static void 4455 ztest_dsl_dataset_cleanup(char *osname, uint64_t id) 4456 { 4457 char *snap1name; 4458 char *clone1name; 4459 char *snap2name; 4460 char *clone2name; 4461 char *snap3name; 4462 int error; 4463 4464 snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4465 clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4466 snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4467 clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4468 snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4469 4470 (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"", 4471 osname, id); 4472 (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"", 4473 osname, id); 4474 (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"", 4475 clone1name, id); 4476 (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"", 4477 osname, id); 4478 (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"", 4479 clone1name, id); 4480 4481 error = dsl_destroy_head(clone2name); 4482 if (error && error != ENOENT) 4483 fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone2name, error); 4484 error = dsl_destroy_snapshot(snap3name, B_FALSE); 4485 if (error && error != ENOENT) 4486 fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d", 4487 snap3name, error); 4488 error = dsl_destroy_snapshot(snap2name, B_FALSE); 4489 if (error && error != ENOENT) 4490 fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d", 4491 snap2name, error); 4492 error = dsl_destroy_head(clone1name); 4493 if (error && error != ENOENT) 4494 fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone1name, error); 4495 error = dsl_destroy_snapshot(snap1name, B_FALSE); 4496 if (error && error != ENOENT) 4497 fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d", 4498 snap1name, error); 4499 4500 umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN); 4501 umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN); 4502 umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN); 4503 umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN); 4504 umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN); 4505 } 4506 4507 /* 4508 * Verify dsl_dataset_promote handles EBUSY 4509 */ 4510 void 4511 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) 4512 { 4513 objset_t *os; 4514 char *snap1name; 4515 char *clone1name; 4516 char *snap2name; 4517 char *clone2name; 4518 char *snap3name; 4519 char *osname = zd->zd_name; 4520 int error; 4521 4522 snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4523 clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4524 snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4525 clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4526 snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL); 4527 4528 (void) pthread_rwlock_rdlock(&ztest_name_lock); 4529 4530 ztest_dsl_dataset_cleanup(osname, id); 4531 4532 (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"", 4533 osname, id); 4534 (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"", 4535 osname, id); 4536 (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"", 4537 clone1name, id); 4538 (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"", 4539 osname, id); 4540 (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"", 4541 clone1name, id); 4542 4543 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1); 4544 if (error && error != EEXIST) { 4545 if (error == ENOSPC) { 4546 ztest_record_enospc(FTAG); 4547 goto out; 4548 } 4549 fatal(B_FALSE, "dmu_take_snapshot(%s) = %d", snap1name, error); 4550 } 4551 4552 error = dmu_objset_clone(clone1name, snap1name); 4553 if (error) { 4554 if (error == ENOSPC) { 4555 ztest_record_enospc(FTAG); 4556 goto out; 4557 } 4558 fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone1name, error); 4559 } 4560 4561 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1); 4562 if (error && error != EEXIST) { 4563 if (error == ENOSPC) { 4564 ztest_record_enospc(FTAG); 4565 goto out; 4566 } 4567 fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap2name, error); 4568 } 4569 4570 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1); 4571 if (error && error != EEXIST) { 4572 if (error == ENOSPC) { 4573 ztest_record_enospc(FTAG); 4574 goto out; 4575 } 4576 fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap3name, error); 4577 } 4578 4579 error = dmu_objset_clone(clone2name, snap3name); 4580 if (error) { 4581 if (error == ENOSPC) { 4582 ztest_record_enospc(FTAG); 4583 goto out; 4584 } 4585 fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone2name, error); 4586 } 4587 4588 error = ztest_dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, B_TRUE, 4589 FTAG, &os); 4590 if (error) 4591 fatal(B_FALSE, "dmu_objset_own(%s) = %d", snap2name, error); 4592 error = dsl_dataset_promote(clone2name, NULL); 4593 if (error == ENOSPC) { 4594 dmu_objset_disown(os, B_TRUE, FTAG); 4595 ztest_record_enospc(FTAG); 4596 goto out; 4597 } 4598 if (error != EBUSY) 4599 fatal(B_FALSE, "dsl_dataset_promote(%s), %d, not EBUSY", 4600 clone2name, error); 4601 dmu_objset_disown(os, B_TRUE, FTAG); 4602 4603 out: 4604 ztest_dsl_dataset_cleanup(osname, id); 4605 4606 (void) pthread_rwlock_unlock(&ztest_name_lock); 4607 4608 umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN); 4609 umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN); 4610 umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN); 4611 umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN); 4612 umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN); 4613 } 4614 4615 #undef OD_ARRAY_SIZE 4616 #define OD_ARRAY_SIZE 4 4617 4618 /* 4619 * Verify that dmu_object_{alloc,free} work as expected. 4620 */ 4621 void 4622 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) 4623 { 4624 ztest_od_t *od; 4625 int batchsize; 4626 int size; 4627 int b; 4628 4629 size = sizeof (ztest_od_t) * OD_ARRAY_SIZE; 4630 od = umem_alloc(size, UMEM_NOFAIL); 4631 batchsize = OD_ARRAY_SIZE; 4632 4633 for (b = 0; b < batchsize; b++) 4634 ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 4635 0, 0, 0); 4636 4637 /* 4638 * Destroy the previous batch of objects, create a new batch, 4639 * and do some I/O on the new objects. 4640 */ 4641 if (ztest_object_init(zd, od, size, B_TRUE) != 0) { 4642 zd->zd_od = NULL; 4643 umem_free(od, size); 4644 return; 4645 } 4646 4647 while (ztest_random(4 * batchsize) != 0) 4648 ztest_io(zd, od[ztest_random(batchsize)].od_object, 4649 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 4650 4651 umem_free(od, size); 4652 } 4653 4654 /* 4655 * Rewind the global allocator to verify object allocation backfilling. 4656 */ 4657 void 4658 ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id) 4659 { 4660 (void) id; 4661 objset_t *os = zd->zd_os; 4662 uint_t dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; 4663 uint64_t object; 4664 4665 /* 4666 * Rewind the global allocator randomly back to a lower object number 4667 * to force backfilling and reclamation of recently freed dnodes. 4668 */ 4669 mutex_enter(&os->os_obj_lock); 4670 object = ztest_random(os->os_obj_next_chunk); 4671 os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk); 4672 mutex_exit(&os->os_obj_lock); 4673 } 4674 4675 #undef OD_ARRAY_SIZE 4676 #define OD_ARRAY_SIZE 2 4677 4678 /* 4679 * Verify that dmu_{read,write} work as expected. 4680 */ 4681 void 4682 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) 4683 { 4684 int size; 4685 ztest_od_t *od; 4686 4687 objset_t *os = zd->zd_os; 4688 size = sizeof (ztest_od_t) * OD_ARRAY_SIZE; 4689 od = umem_alloc(size, UMEM_NOFAIL); 4690 dmu_tx_t *tx; 4691 int freeit, error; 4692 uint64_t i, n, s, txg; 4693 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; 4694 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 4695 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t); 4696 uint64_t regions = 997; 4697 uint64_t stride = 123456789ULL; 4698 uint64_t width = 40; 4699 int free_percent = 5; 4700 4701 /* 4702 * This test uses two objects, packobj and bigobj, that are always 4703 * updated together (i.e. in the same tx) so that their contents are 4704 * in sync and can be compared. Their contents relate to each other 4705 * in a simple way: packobj is a dense array of 'bufwad' structures, 4706 * while bigobj is a sparse array of the same bufwads. Specifically, 4707 * for any index n, there are three bufwads that should be identical: 4708 * 4709 * packobj, at offset n * sizeof (bufwad_t) 4710 * bigobj, at the head of the nth chunk 4711 * bigobj, at the tail of the nth chunk 4712 * 4713 * The chunk size is arbitrary. It doesn't have to be a power of two, 4714 * and it doesn't have any relation to the object blocksize. 4715 * The only requirement is that it can hold at least two bufwads. 4716 * 4717 * Normally, we write the bufwad to each of these locations. 4718 * However, free_percent of the time we instead write zeroes to 4719 * packobj and perform a dmu_free_range() on bigobj. By comparing 4720 * bigobj to packobj, we can verify that the DMU is correctly 4721 * tracking which parts of an object are allocated and free, 4722 * and that the contents of the allocated blocks are correct. 4723 */ 4724 4725 /* 4726 * Read the directory info. If it's the first time, set things up. 4727 */ 4728 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize); 4729 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0, 4730 chunksize); 4731 4732 if (ztest_object_init(zd, od, size, B_FALSE) != 0) { 4733 umem_free(od, size); 4734 return; 4735 } 4736 4737 bigobj = od[0].od_object; 4738 packobj = od[1].od_object; 4739 chunksize = od[0].od_gen; 4740 ASSERT3U(chunksize, ==, od[1].od_gen); 4741 4742 /* 4743 * Prefetch a random chunk of the big object. 4744 * Our aim here is to get some async reads in flight 4745 * for blocks that we may free below; the DMU should 4746 * handle this race correctly. 4747 */ 4748 n = ztest_random(regions) * stride + ztest_random(width); 4749 s = 1 + ztest_random(2 * width - 1); 4750 dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize, 4751 ZIO_PRIORITY_SYNC_READ); 4752 4753 /* 4754 * Pick a random index and compute the offsets into packobj and bigobj. 4755 */ 4756 n = ztest_random(regions) * stride + ztest_random(width); 4757 s = 1 + ztest_random(width - 1); 4758 4759 packoff = n * sizeof (bufwad_t); 4760 packsize = s * sizeof (bufwad_t); 4761 4762 bigoff = n * chunksize; 4763 bigsize = s * chunksize; 4764 4765 packbuf = umem_alloc(packsize, UMEM_NOFAIL); 4766 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); 4767 4768 /* 4769 * free_percent of the time, free a range of bigobj rather than 4770 * overwriting it. 4771 */ 4772 freeit = (ztest_random(100) < free_percent); 4773 4774 /* 4775 * Read the current contents of our objects. 4776 */ 4777 error = dmu_read(os, packobj, packoff, packsize, packbuf, 4778 DMU_READ_PREFETCH); 4779 ASSERT0(error); 4780 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf, 4781 DMU_READ_PREFETCH); 4782 ASSERT0(error); 4783 4784 /* 4785 * Get a tx for the mods to both packobj and bigobj. 4786 */ 4787 tx = dmu_tx_create(os); 4788 4789 dmu_tx_hold_write(tx, packobj, packoff, packsize); 4790 4791 if (freeit) 4792 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize); 4793 else 4794 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 4795 4796 /* This accounts for setting the checksum/compression. */ 4797 dmu_tx_hold_bonus(tx, bigobj); 4798 4799 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 4800 if (txg == 0) { 4801 umem_free(packbuf, packsize); 4802 umem_free(bigbuf, bigsize); 4803 umem_free(od, size); 4804 return; 4805 } 4806 4807 enum zio_checksum cksum; 4808 do { 4809 cksum = (enum zio_checksum) 4810 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM); 4811 } while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS); 4812 dmu_object_set_checksum(os, bigobj, cksum, tx); 4813 4814 enum zio_compress comp; 4815 do { 4816 comp = (enum zio_compress) 4817 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION); 4818 } while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS); 4819 dmu_object_set_compress(os, bigobj, comp, tx); 4820 4821 /* 4822 * For each index from n to n + s, verify that the existing bufwad 4823 * in packobj matches the bufwads at the head and tail of the 4824 * corresponding chunk in bigobj. Then update all three bufwads 4825 * with the new values we want to write out. 4826 */ 4827 for (i = 0; i < s; i++) { 4828 /* LINTED */ 4829 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 4830 /* LINTED */ 4831 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 4832 /* LINTED */ 4833 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 4834 4835 ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize); 4836 ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize); 4837 4838 if (pack->bw_txg > txg) 4839 fatal(B_FALSE, 4840 "future leak: got %"PRIx64", open txg is %"PRIx64"", 4841 pack->bw_txg, txg); 4842 4843 if (pack->bw_data != 0 && pack->bw_index != n + i) 4844 fatal(B_FALSE, "wrong index: " 4845 "got %"PRIx64", wanted %"PRIx64"+%"PRIx64"", 4846 pack->bw_index, n, i); 4847 4848 if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0) 4849 fatal(B_FALSE, "pack/bigH mismatch in %p/%p", 4850 pack, bigH); 4851 4852 if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0) 4853 fatal(B_FALSE, "pack/bigT mismatch in %p/%p", 4854 pack, bigT); 4855 4856 if (freeit) { 4857 memset(pack, 0, sizeof (bufwad_t)); 4858 } else { 4859 pack->bw_index = n + i; 4860 pack->bw_txg = txg; 4861 pack->bw_data = 1 + ztest_random(-2ULL); 4862 } 4863 *bigH = *pack; 4864 *bigT = *pack; 4865 } 4866 4867 /* 4868 * We've verified all the old bufwads, and made new ones. 4869 * Now write them out. 4870 */ 4871 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 4872 4873 if (freeit) { 4874 if (ztest_opts.zo_verbose >= 7) { 4875 (void) printf("freeing offset %"PRIx64" size %"PRIx64"" 4876 " txg %"PRIx64"\n", 4877 bigoff, bigsize, txg); 4878 } 4879 VERIFY0(dmu_free_range(os, bigobj, bigoff, bigsize, tx)); 4880 } else { 4881 if (ztest_opts.zo_verbose >= 7) { 4882 (void) printf("writing offset %"PRIx64" size %"PRIx64"" 4883 " txg %"PRIx64"\n", 4884 bigoff, bigsize, txg); 4885 } 4886 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx); 4887 } 4888 4889 dmu_tx_commit(tx); 4890 4891 /* 4892 * Sanity check the stuff we just wrote. 4893 */ 4894 { 4895 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 4896 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 4897 4898 VERIFY0(dmu_read(os, packobj, packoff, 4899 packsize, packcheck, DMU_READ_PREFETCH)); 4900 VERIFY0(dmu_read(os, bigobj, bigoff, 4901 bigsize, bigcheck, DMU_READ_PREFETCH)); 4902 4903 ASSERT0(memcmp(packbuf, packcheck, packsize)); 4904 ASSERT0(memcmp(bigbuf, bigcheck, bigsize)); 4905 4906 umem_free(packcheck, packsize); 4907 umem_free(bigcheck, bigsize); 4908 } 4909 4910 umem_free(packbuf, packsize); 4911 umem_free(bigbuf, bigsize); 4912 umem_free(od, size); 4913 } 4914 4915 static void 4916 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, 4917 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg) 4918 { 4919 uint64_t i; 4920 bufwad_t *pack; 4921 bufwad_t *bigH; 4922 bufwad_t *bigT; 4923 4924 /* 4925 * For each index from n to n + s, verify that the existing bufwad 4926 * in packobj matches the bufwads at the head and tail of the 4927 * corresponding chunk in bigobj. Then update all three bufwads 4928 * with the new values we want to write out. 4929 */ 4930 for (i = 0; i < s; i++) { 4931 /* LINTED */ 4932 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); 4933 /* LINTED */ 4934 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize); 4935 /* LINTED */ 4936 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1; 4937 4938 ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize); 4939 ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize); 4940 4941 if (pack->bw_txg > txg) 4942 fatal(B_FALSE, 4943 "future leak: got %"PRIx64", open txg is %"PRIx64"", 4944 pack->bw_txg, txg); 4945 4946 if (pack->bw_data != 0 && pack->bw_index != n + i) 4947 fatal(B_FALSE, "wrong index: " 4948 "got %"PRIx64", wanted %"PRIx64"+%"PRIx64"", 4949 pack->bw_index, n, i); 4950 4951 if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0) 4952 fatal(B_FALSE, "pack/bigH mismatch in %p/%p", 4953 pack, bigH); 4954 4955 if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0) 4956 fatal(B_FALSE, "pack/bigT mismatch in %p/%p", 4957 pack, bigT); 4958 4959 pack->bw_index = n + i; 4960 pack->bw_txg = txg; 4961 pack->bw_data = 1 + ztest_random(-2ULL); 4962 4963 *bigH = *pack; 4964 *bigT = *pack; 4965 } 4966 } 4967 4968 #undef OD_ARRAY_SIZE 4969 #define OD_ARRAY_SIZE 2 4970 4971 void 4972 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) 4973 { 4974 objset_t *os = zd->zd_os; 4975 ztest_od_t *od; 4976 dmu_tx_t *tx; 4977 uint64_t i; 4978 int error; 4979 int size; 4980 uint64_t n, s, txg; 4981 bufwad_t *packbuf, *bigbuf; 4982 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; 4983 uint64_t blocksize = ztest_random_blocksize(); 4984 uint64_t chunksize = blocksize; 4985 uint64_t regions = 997; 4986 uint64_t stride = 123456789ULL; 4987 uint64_t width = 9; 4988 dmu_buf_t *bonus_db; 4989 arc_buf_t **bigbuf_arcbufs; 4990 dmu_object_info_t doi; 4991 4992 size = sizeof (ztest_od_t) * OD_ARRAY_SIZE; 4993 od = umem_alloc(size, UMEM_NOFAIL); 4994 4995 /* 4996 * This test uses two objects, packobj and bigobj, that are always 4997 * updated together (i.e. in the same tx) so that their contents are 4998 * in sync and can be compared. Their contents relate to each other 4999 * in a simple way: packobj is a dense array of 'bufwad' structures, 5000 * while bigobj is a sparse array of the same bufwads. Specifically, 5001 * for any index n, there are three bufwads that should be identical: 5002 * 5003 * packobj, at offset n * sizeof (bufwad_t) 5004 * bigobj, at the head of the nth chunk 5005 * bigobj, at the tail of the nth chunk 5006 * 5007 * The chunk size is set equal to bigobj block size so that 5008 * dmu_assign_arcbuf_by_dbuf() can be tested for object updates. 5009 */ 5010 5011 /* 5012 * Read the directory info. If it's the first time, set things up. 5013 */ 5014 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0); 5015 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0, 5016 chunksize); 5017 5018 5019 if (ztest_object_init(zd, od, size, B_FALSE) != 0) { 5020 umem_free(od, size); 5021 return; 5022 } 5023 5024 bigobj = od[0].od_object; 5025 packobj = od[1].od_object; 5026 blocksize = od[0].od_blocksize; 5027 chunksize = blocksize; 5028 ASSERT3U(chunksize, ==, od[1].od_gen); 5029 5030 VERIFY0(dmu_object_info(os, bigobj, &doi)); 5031 VERIFY(ISP2(doi.doi_data_block_size)); 5032 VERIFY3U(chunksize, ==, doi.doi_data_block_size); 5033 VERIFY3U(chunksize, >=, 2 * sizeof (bufwad_t)); 5034 5035 /* 5036 * Pick a random index and compute the offsets into packobj and bigobj. 5037 */ 5038 n = ztest_random(regions) * stride + ztest_random(width); 5039 s = 1 + ztest_random(width - 1); 5040 5041 packoff = n * sizeof (bufwad_t); 5042 packsize = s * sizeof (bufwad_t); 5043 5044 bigoff = n * chunksize; 5045 bigsize = s * chunksize; 5046 5047 packbuf = umem_zalloc(packsize, UMEM_NOFAIL); 5048 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL); 5049 5050 VERIFY0(dmu_bonus_hold(os, bigobj, FTAG, &bonus_db)); 5051 5052 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL); 5053 5054 /* 5055 * Iteration 0 test zcopy for DB_UNCACHED dbufs. 5056 * Iteration 1 test zcopy to already referenced dbufs. 5057 * Iteration 2 test zcopy to dirty dbuf in the same txg. 5058 * Iteration 3 test zcopy to dbuf dirty in previous txg. 5059 * Iteration 4 test zcopy when dbuf is no longer dirty. 5060 * Iteration 5 test zcopy when it can't be done. 5061 * Iteration 6 one more zcopy write. 5062 */ 5063 for (i = 0; i < 7; i++) { 5064 uint64_t j; 5065 uint64_t off; 5066 5067 /* 5068 * In iteration 5 (i == 5) use arcbufs 5069 * that don't match bigobj blksz to test 5070 * dmu_assign_arcbuf_by_dbuf() when it can't directly 5071 * assign an arcbuf to a dbuf. 5072 */ 5073 for (j = 0; j < s; j++) { 5074 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 5075 bigbuf_arcbufs[j] = 5076 dmu_request_arcbuf(bonus_db, chunksize); 5077 } else { 5078 bigbuf_arcbufs[2 * j] = 5079 dmu_request_arcbuf(bonus_db, chunksize / 2); 5080 bigbuf_arcbufs[2 * j + 1] = 5081 dmu_request_arcbuf(bonus_db, chunksize / 2); 5082 } 5083 } 5084 5085 /* 5086 * Get a tx for the mods to both packobj and bigobj. 5087 */ 5088 tx = dmu_tx_create(os); 5089 5090 dmu_tx_hold_write(tx, packobj, packoff, packsize); 5091 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); 5092 5093 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5094 if (txg == 0) { 5095 umem_free(packbuf, packsize); 5096 umem_free(bigbuf, bigsize); 5097 for (j = 0; j < s; j++) { 5098 if (i != 5 || 5099 chunksize < (SPA_MINBLOCKSIZE * 2)) { 5100 dmu_return_arcbuf(bigbuf_arcbufs[j]); 5101 } else { 5102 dmu_return_arcbuf( 5103 bigbuf_arcbufs[2 * j]); 5104 dmu_return_arcbuf( 5105 bigbuf_arcbufs[2 * j + 1]); 5106 } 5107 } 5108 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 5109 umem_free(od, size); 5110 dmu_buf_rele(bonus_db, FTAG); 5111 return; 5112 } 5113 5114 /* 5115 * 50% of the time don't read objects in the 1st iteration to 5116 * test dmu_assign_arcbuf_by_dbuf() for the case when there are 5117 * no existing dbufs for the specified offsets. 5118 */ 5119 if (i != 0 || ztest_random(2) != 0) { 5120 error = dmu_read(os, packobj, packoff, 5121 packsize, packbuf, DMU_READ_PREFETCH); 5122 ASSERT0(error); 5123 error = dmu_read(os, bigobj, bigoff, bigsize, 5124 bigbuf, DMU_READ_PREFETCH); 5125 ASSERT0(error); 5126 } 5127 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize, 5128 n, chunksize, txg); 5129 5130 /* 5131 * We've verified all the old bufwads, and made new ones. 5132 * Now write them out. 5133 */ 5134 dmu_write(os, packobj, packoff, packsize, packbuf, tx); 5135 if (ztest_opts.zo_verbose >= 7) { 5136 (void) printf("writing offset %"PRIx64" size %"PRIx64"" 5137 " txg %"PRIx64"\n", 5138 bigoff, bigsize, txg); 5139 } 5140 for (off = bigoff, j = 0; j < s; j++, off += chunksize) { 5141 dmu_buf_t *dbt; 5142 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 5143 memcpy(bigbuf_arcbufs[j]->b_data, 5144 (caddr_t)bigbuf + (off - bigoff), 5145 chunksize); 5146 } else { 5147 memcpy(bigbuf_arcbufs[2 * j]->b_data, 5148 (caddr_t)bigbuf + (off - bigoff), 5149 chunksize / 2); 5150 memcpy(bigbuf_arcbufs[2 * j + 1]->b_data, 5151 (caddr_t)bigbuf + (off - bigoff) + 5152 chunksize / 2, 5153 chunksize / 2); 5154 } 5155 5156 if (i == 1) { 5157 VERIFY(dmu_buf_hold(os, bigobj, off, 5158 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); 5159 } 5160 if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { 5161 VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, 5162 off, bigbuf_arcbufs[j], tx)); 5163 } else { 5164 VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, 5165 off, bigbuf_arcbufs[2 * j], tx)); 5166 VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, 5167 off + chunksize / 2, 5168 bigbuf_arcbufs[2 * j + 1], tx)); 5169 } 5170 if (i == 1) { 5171 dmu_buf_rele(dbt, FTAG); 5172 } 5173 } 5174 dmu_tx_commit(tx); 5175 5176 /* 5177 * Sanity check the stuff we just wrote. 5178 */ 5179 { 5180 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); 5181 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); 5182 5183 VERIFY0(dmu_read(os, packobj, packoff, 5184 packsize, packcheck, DMU_READ_PREFETCH)); 5185 VERIFY0(dmu_read(os, bigobj, bigoff, 5186 bigsize, bigcheck, DMU_READ_PREFETCH)); 5187 5188 ASSERT0(memcmp(packbuf, packcheck, packsize)); 5189 ASSERT0(memcmp(bigbuf, bigcheck, bigsize)); 5190 5191 umem_free(packcheck, packsize); 5192 umem_free(bigcheck, bigsize); 5193 } 5194 if (i == 2) { 5195 txg_wait_open(dmu_objset_pool(os), 0, B_TRUE); 5196 } else if (i == 3) { 5197 txg_wait_synced(dmu_objset_pool(os), 0); 5198 } 5199 } 5200 5201 dmu_buf_rele(bonus_db, FTAG); 5202 umem_free(packbuf, packsize); 5203 umem_free(bigbuf, bigsize); 5204 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); 5205 umem_free(od, size); 5206 } 5207 5208 void 5209 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) 5210 { 5211 (void) id; 5212 ztest_od_t *od; 5213 5214 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5215 uint64_t offset = (1ULL << (ztest_random(20) + 43)) + 5216 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 5217 5218 /* 5219 * Have multiple threads write to large offsets in an object 5220 * to verify that parallel writes to an object -- even to the 5221 * same blocks within the object -- doesn't cause any trouble. 5222 */ 5223 ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0); 5224 5225 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) 5226 return; 5227 5228 while (ztest_random(10) != 0) 5229 ztest_io(zd, od->od_object, offset); 5230 5231 umem_free(od, sizeof (ztest_od_t)); 5232 } 5233 5234 void 5235 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) 5236 { 5237 ztest_od_t *od; 5238 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + 5239 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 5240 uint64_t count = ztest_random(20) + 1; 5241 uint64_t blocksize = ztest_random_blocksize(); 5242 void *data; 5243 5244 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5245 5246 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0); 5247 5248 if (ztest_object_init(zd, od, sizeof (ztest_od_t), 5249 !ztest_random(2)) != 0) { 5250 umem_free(od, sizeof (ztest_od_t)); 5251 return; 5252 } 5253 5254 if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) { 5255 umem_free(od, sizeof (ztest_od_t)); 5256 return; 5257 } 5258 5259 ztest_prealloc(zd, od->od_object, offset, count * blocksize); 5260 5261 data = umem_zalloc(blocksize, UMEM_NOFAIL); 5262 5263 while (ztest_random(count) != 0) { 5264 uint64_t randoff = offset + (ztest_random(count) * blocksize); 5265 if (ztest_write(zd, od->od_object, randoff, blocksize, 5266 data) != 0) 5267 break; 5268 while (ztest_random(4) != 0) 5269 ztest_io(zd, od->od_object, randoff); 5270 } 5271 5272 umem_free(data, blocksize); 5273 umem_free(od, sizeof (ztest_od_t)); 5274 } 5275 5276 /* 5277 * Verify that zap_{create,destroy,add,remove,update} work as expected. 5278 */ 5279 #define ZTEST_ZAP_MIN_INTS 1 5280 #define ZTEST_ZAP_MAX_INTS 4 5281 #define ZTEST_ZAP_MAX_PROPS 1000 5282 5283 void 5284 ztest_zap(ztest_ds_t *zd, uint64_t id) 5285 { 5286 objset_t *os = zd->zd_os; 5287 ztest_od_t *od; 5288 uint64_t object; 5289 uint64_t txg, last_txg; 5290 uint64_t value[ZTEST_ZAP_MAX_INTS]; 5291 uint64_t zl_ints, zl_intsize, prop; 5292 int i, ints; 5293 dmu_tx_t *tx; 5294 char propname[100], txgname[100]; 5295 int error; 5296 const char *const hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; 5297 5298 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5299 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0); 5300 5301 if (ztest_object_init(zd, od, sizeof (ztest_od_t), 5302 !ztest_random(2)) != 0) 5303 goto out; 5304 5305 object = od->od_object; 5306 5307 /* 5308 * Generate a known hash collision, and verify that 5309 * we can lookup and remove both entries. 5310 */ 5311 tx = dmu_tx_create(os); 5312 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 5313 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5314 if (txg == 0) 5315 goto out; 5316 for (i = 0; i < 2; i++) { 5317 value[i] = i; 5318 VERIFY0(zap_add(os, object, hc[i], sizeof (uint64_t), 5319 1, &value[i], tx)); 5320 } 5321 for (i = 0; i < 2; i++) { 5322 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i], 5323 sizeof (uint64_t), 1, &value[i], tx)); 5324 VERIFY0( 5325 zap_length(os, object, hc[i], &zl_intsize, &zl_ints)); 5326 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 5327 ASSERT3U(zl_ints, ==, 1); 5328 } 5329 for (i = 0; i < 2; i++) { 5330 VERIFY0(zap_remove(os, object, hc[i], tx)); 5331 } 5332 dmu_tx_commit(tx); 5333 5334 /* 5335 * Generate a bunch of random entries. 5336 */ 5337 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); 5338 5339 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 5340 (void) sprintf(propname, "prop_%"PRIu64"", prop); 5341 (void) sprintf(txgname, "txg_%"PRIu64"", prop); 5342 memset(value, 0, sizeof (value)); 5343 last_txg = 0; 5344 5345 /* 5346 * If these zap entries already exist, validate their contents. 5347 */ 5348 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 5349 if (error == 0) { 5350 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 5351 ASSERT3U(zl_ints, ==, 1); 5352 5353 VERIFY0(zap_lookup(os, object, txgname, zl_intsize, 5354 zl_ints, &last_txg)); 5355 5356 VERIFY0(zap_length(os, object, propname, &zl_intsize, 5357 &zl_ints)); 5358 5359 ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); 5360 ASSERT3U(zl_ints, ==, ints); 5361 5362 VERIFY0(zap_lookup(os, object, propname, zl_intsize, 5363 zl_ints, value)); 5364 5365 for (i = 0; i < ints; i++) { 5366 ASSERT3U(value[i], ==, last_txg + object + i); 5367 } 5368 } else { 5369 ASSERT3U(error, ==, ENOENT); 5370 } 5371 5372 /* 5373 * Atomically update two entries in our zap object. 5374 * The first is named txg_%llu, and contains the txg 5375 * in which the property was last updated. The second 5376 * is named prop_%llu, and the nth element of its value 5377 * should be txg + object + n. 5378 */ 5379 tx = dmu_tx_create(os); 5380 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 5381 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5382 if (txg == 0) 5383 goto out; 5384 5385 if (last_txg > txg) 5386 fatal(B_FALSE, "zap future leak: old %"PRIu64" new %"PRIu64"", 5387 last_txg, txg); 5388 5389 for (i = 0; i < ints; i++) 5390 value[i] = txg + object + i; 5391 5392 VERIFY0(zap_update(os, object, txgname, sizeof (uint64_t), 5393 1, &txg, tx)); 5394 VERIFY0(zap_update(os, object, propname, sizeof (uint64_t), 5395 ints, value, tx)); 5396 5397 dmu_tx_commit(tx); 5398 5399 /* 5400 * Remove a random pair of entries. 5401 */ 5402 prop = ztest_random(ZTEST_ZAP_MAX_PROPS); 5403 (void) sprintf(propname, "prop_%"PRIu64"", prop); 5404 (void) sprintf(txgname, "txg_%"PRIu64"", prop); 5405 5406 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); 5407 5408 if (error == ENOENT) 5409 goto out; 5410 5411 ASSERT0(error); 5412 5413 tx = dmu_tx_create(os); 5414 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 5415 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5416 if (txg == 0) 5417 goto out; 5418 VERIFY0(zap_remove(os, object, txgname, tx)); 5419 VERIFY0(zap_remove(os, object, propname, tx)); 5420 dmu_tx_commit(tx); 5421 out: 5422 umem_free(od, sizeof (ztest_od_t)); 5423 } 5424 5425 /* 5426 * Test case to test the upgrading of a microzap to fatzap. 5427 */ 5428 void 5429 ztest_fzap(ztest_ds_t *zd, uint64_t id) 5430 { 5431 objset_t *os = zd->zd_os; 5432 ztest_od_t *od; 5433 uint64_t object, txg, value; 5434 5435 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5436 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0); 5437 5438 if (ztest_object_init(zd, od, sizeof (ztest_od_t), 5439 !ztest_random(2)) != 0) 5440 goto out; 5441 object = od->od_object; 5442 5443 /* 5444 * Add entries to this ZAP and make sure it spills over 5445 * and gets upgraded to a fatzap. Also, since we are adding 5446 * 2050 entries we should see ptrtbl growth and leaf-block split. 5447 */ 5448 for (value = 0; value < 2050; value++) { 5449 char name[ZFS_MAX_DATASET_NAME_LEN]; 5450 dmu_tx_t *tx; 5451 int error; 5452 5453 (void) snprintf(name, sizeof (name), "fzap-%"PRIu64"-%"PRIu64"", 5454 id, value); 5455 5456 tx = dmu_tx_create(os); 5457 dmu_tx_hold_zap(tx, object, B_TRUE, name); 5458 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5459 if (txg == 0) 5460 goto out; 5461 error = zap_add(os, object, name, sizeof (uint64_t), 1, 5462 &value, tx); 5463 ASSERT(error == 0 || error == EEXIST); 5464 dmu_tx_commit(tx); 5465 } 5466 out: 5467 umem_free(od, sizeof (ztest_od_t)); 5468 } 5469 5470 void 5471 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) 5472 { 5473 (void) id; 5474 objset_t *os = zd->zd_os; 5475 ztest_od_t *od; 5476 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; 5477 dmu_tx_t *tx; 5478 int i, namelen, error; 5479 int micro = ztest_random(2); 5480 char name[20], string_value[20]; 5481 void *data; 5482 5483 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5484 ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0); 5485 5486 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { 5487 umem_free(od, sizeof (ztest_od_t)); 5488 return; 5489 } 5490 5491 object = od->od_object; 5492 5493 /* 5494 * Generate a random name of the form 'xxx.....' where each 5495 * x is a random printable character and the dots are dots. 5496 * There are 94 such characters, and the name length goes from 5497 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. 5498 */ 5499 namelen = ztest_random(sizeof (name) - 5) + 5 + 1; 5500 5501 for (i = 0; i < 3; i++) 5502 name[i] = '!' + ztest_random('~' - '!' + 1); 5503 for (; i < namelen - 1; i++) 5504 name[i] = '.'; 5505 name[i] = '\0'; 5506 5507 if ((namelen & 1) || micro) { 5508 wsize = sizeof (txg); 5509 wc = 1; 5510 data = &txg; 5511 } else { 5512 wsize = 1; 5513 wc = namelen; 5514 data = string_value; 5515 } 5516 5517 count = -1ULL; 5518 VERIFY0(zap_count(os, object, &count)); 5519 ASSERT3S(count, !=, -1ULL); 5520 5521 /* 5522 * Select an operation: length, lookup, add, update, remove. 5523 */ 5524 i = ztest_random(5); 5525 5526 if (i >= 2) { 5527 tx = dmu_tx_create(os); 5528 dmu_tx_hold_zap(tx, object, B_TRUE, NULL); 5529 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); 5530 if (txg == 0) { 5531 umem_free(od, sizeof (ztest_od_t)); 5532 return; 5533 } 5534 memcpy(string_value, name, namelen); 5535 } else { 5536 tx = NULL; 5537 txg = 0; 5538 memset(string_value, 0, namelen); 5539 } 5540 5541 switch (i) { 5542 5543 case 0: 5544 error = zap_length(os, object, name, &zl_wsize, &zl_wc); 5545 if (error == 0) { 5546 ASSERT3U(wsize, ==, zl_wsize); 5547 ASSERT3U(wc, ==, zl_wc); 5548 } else { 5549 ASSERT3U(error, ==, ENOENT); 5550 } 5551 break; 5552 5553 case 1: 5554 error = zap_lookup(os, object, name, wsize, wc, data); 5555 if (error == 0) { 5556 if (data == string_value && 5557 memcmp(name, data, namelen) != 0) 5558 fatal(B_FALSE, "name '%s' != val '%s' len %d", 5559 name, (char *)data, namelen); 5560 } else { 5561 ASSERT3U(error, ==, ENOENT); 5562 } 5563 break; 5564 5565 case 2: 5566 error = zap_add(os, object, name, wsize, wc, data, tx); 5567 ASSERT(error == 0 || error == EEXIST); 5568 break; 5569 5570 case 3: 5571 VERIFY0(zap_update(os, object, name, wsize, wc, data, tx)); 5572 break; 5573 5574 case 4: 5575 error = zap_remove(os, object, name, tx); 5576 ASSERT(error == 0 || error == ENOENT); 5577 break; 5578 } 5579 5580 if (tx != NULL) 5581 dmu_tx_commit(tx); 5582 5583 umem_free(od, sizeof (ztest_od_t)); 5584 } 5585 5586 /* 5587 * Commit callback data. 5588 */ 5589 typedef struct ztest_cb_data { 5590 list_node_t zcd_node; 5591 uint64_t zcd_txg; 5592 int zcd_expected_err; 5593 boolean_t zcd_added; 5594 boolean_t zcd_called; 5595 spa_t *zcd_spa; 5596 } ztest_cb_data_t; 5597 5598 /* This is the actual commit callback function */ 5599 static void 5600 ztest_commit_callback(void *arg, int error) 5601 { 5602 ztest_cb_data_t *data = arg; 5603 uint64_t synced_txg; 5604 5605 VERIFY3P(data, !=, NULL); 5606 VERIFY3S(data->zcd_expected_err, ==, error); 5607 VERIFY(!data->zcd_called); 5608 5609 synced_txg = spa_last_synced_txg(data->zcd_spa); 5610 if (data->zcd_txg > synced_txg) 5611 fatal(B_FALSE, 5612 "commit callback of txg %"PRIu64" called prematurely, " 5613 "last synced txg = %"PRIu64"\n", 5614 data->zcd_txg, synced_txg); 5615 5616 data->zcd_called = B_TRUE; 5617 5618 if (error == ECANCELED) { 5619 ASSERT0(data->zcd_txg); 5620 ASSERT(!data->zcd_added); 5621 5622 /* 5623 * The private callback data should be destroyed here, but 5624 * since we are going to check the zcd_called field after 5625 * dmu_tx_abort(), we will destroy it there. 5626 */ 5627 return; 5628 } 5629 5630 ASSERT(data->zcd_added); 5631 ASSERT3U(data->zcd_txg, !=, 0); 5632 5633 (void) mutex_enter(&zcl.zcl_callbacks_lock); 5634 5635 /* See if this cb was called more quickly */ 5636 if ((synced_txg - data->zcd_txg) < zc_min_txg_delay) 5637 zc_min_txg_delay = synced_txg - data->zcd_txg; 5638 5639 /* Remove our callback from the list */ 5640 list_remove(&zcl.zcl_callbacks, data); 5641 5642 (void) mutex_exit(&zcl.zcl_callbacks_lock); 5643 5644 umem_free(data, sizeof (ztest_cb_data_t)); 5645 } 5646 5647 /* Allocate and initialize callback data structure */ 5648 static ztest_cb_data_t * 5649 ztest_create_cb_data(objset_t *os, uint64_t txg) 5650 { 5651 ztest_cb_data_t *cb_data; 5652 5653 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL); 5654 5655 cb_data->zcd_txg = txg; 5656 cb_data->zcd_spa = dmu_objset_spa(os); 5657 list_link_init(&cb_data->zcd_node); 5658 5659 return (cb_data); 5660 } 5661 5662 /* 5663 * Commit callback test. 5664 */ 5665 void 5666 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) 5667 { 5668 objset_t *os = zd->zd_os; 5669 ztest_od_t *od; 5670 dmu_tx_t *tx; 5671 ztest_cb_data_t *cb_data[3], *tmp_cb; 5672 uint64_t old_txg, txg; 5673 int i, error = 0; 5674 5675 od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL); 5676 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0); 5677 5678 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { 5679 umem_free(od, sizeof (ztest_od_t)); 5680 return; 5681 } 5682 5683 tx = dmu_tx_create(os); 5684 5685 cb_data[0] = ztest_create_cb_data(os, 0); 5686 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); 5687 5688 dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t)); 5689 5690 /* Every once in a while, abort the transaction on purpose */ 5691 if (ztest_random(100) == 0) 5692 error = -1; 5693 5694 if (!error) 5695 error = dmu_tx_assign(tx, TXG_NOWAIT); 5696 5697 txg = error ? 0 : dmu_tx_get_txg(tx); 5698 5699 cb_data[0]->zcd_txg = txg; 5700 cb_data[1] = ztest_create_cb_data(os, txg); 5701 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]); 5702 5703 if (error) { 5704 /* 5705 * It's not a strict requirement to call the registered 5706 * callbacks from inside dmu_tx_abort(), but that's what 5707 * it's supposed to happen in the current implementation 5708 * so we will check for that. 5709 */ 5710 for (i = 0; i < 2; i++) { 5711 cb_data[i]->zcd_expected_err = ECANCELED; 5712 VERIFY(!cb_data[i]->zcd_called); 5713 } 5714 5715 dmu_tx_abort(tx); 5716 5717 for (i = 0; i < 2; i++) { 5718 VERIFY(cb_data[i]->zcd_called); 5719 umem_free(cb_data[i], sizeof (ztest_cb_data_t)); 5720 } 5721 5722 umem_free(od, sizeof (ztest_od_t)); 5723 return; 5724 } 5725 5726 cb_data[2] = ztest_create_cb_data(os, txg); 5727 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]); 5728 5729 /* 5730 * Read existing data to make sure there isn't a future leak. 5731 */ 5732 VERIFY0(dmu_read(os, od->od_object, 0, sizeof (uint64_t), 5733 &old_txg, DMU_READ_PREFETCH)); 5734 5735 if (old_txg > txg) 5736 fatal(B_FALSE, 5737 "future leak: got %"PRIu64", open txg is %"PRIu64"", 5738 old_txg, txg); 5739 5740 dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx); 5741 5742 (void) mutex_enter(&zcl.zcl_callbacks_lock); 5743 5744 /* 5745 * Since commit callbacks don't have any ordering requirement and since 5746 * it is theoretically possible for a commit callback to be called 5747 * after an arbitrary amount of time has elapsed since its txg has been 5748 * synced, it is difficult to reliably determine whether a commit 5749 * callback hasn't been called due to high load or due to a flawed 5750 * implementation. 5751 * 5752 * In practice, we will assume that if after a certain number of txgs a 5753 * commit callback hasn't been called, then most likely there's an 5754 * implementation bug.. 5755 */ 5756 tmp_cb = list_head(&zcl.zcl_callbacks); 5757 if (tmp_cb != NULL && 5758 tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) { 5759 fatal(B_FALSE, 5760 "Commit callback threshold exceeded, " 5761 "oldest txg: %"PRIu64", open txg: %"PRIu64"\n", 5762 tmp_cb->zcd_txg, txg); 5763 } 5764 5765 /* 5766 * Let's find the place to insert our callbacks. 5767 * 5768 * Even though the list is ordered by txg, it is possible for the 5769 * insertion point to not be the end because our txg may already be 5770 * quiescing at this point and other callbacks in the open txg 5771 * (from other objsets) may have sneaked in. 5772 */ 5773 tmp_cb = list_tail(&zcl.zcl_callbacks); 5774 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg) 5775 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb); 5776 5777 /* Add the 3 callbacks to the list */ 5778 for (i = 0; i < 3; i++) { 5779 if (tmp_cb == NULL) 5780 list_insert_head(&zcl.zcl_callbacks, cb_data[i]); 5781 else 5782 list_insert_after(&zcl.zcl_callbacks, tmp_cb, 5783 cb_data[i]); 5784 5785 cb_data[i]->zcd_added = B_TRUE; 5786 VERIFY(!cb_data[i]->zcd_called); 5787 5788 tmp_cb = cb_data[i]; 5789 } 5790 5791 zc_cb_counter += 3; 5792 5793 (void) mutex_exit(&zcl.zcl_callbacks_lock); 5794 5795 dmu_tx_commit(tx); 5796 5797 umem_free(od, sizeof (ztest_od_t)); 5798 } 5799 5800 /* 5801 * Visit each object in the dataset. Verify that its properties 5802 * are consistent what was stored in the block tag when it was created, 5803 * and that its unused bonus buffer space has not been overwritten. 5804 */ 5805 void 5806 ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id) 5807 { 5808 (void) id; 5809 objset_t *os = zd->zd_os; 5810 uint64_t obj; 5811 int err = 0; 5812 5813 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 5814 ztest_block_tag_t *bt = NULL; 5815 dmu_object_info_t doi; 5816 dmu_buf_t *db; 5817 5818 ztest_object_lock(zd, obj, RL_READER); 5819 if (dmu_bonus_hold(os, obj, FTAG, &db) != 0) { 5820 ztest_object_unlock(zd, obj); 5821 continue; 5822 } 5823 5824 dmu_object_info_from_db(db, &doi); 5825 if (doi.doi_bonus_size >= sizeof (*bt)) 5826 bt = ztest_bt_bonus(db); 5827 5828 if (bt && bt->bt_magic == BT_MAGIC) { 5829 ztest_bt_verify(bt, os, obj, doi.doi_dnodesize, 5830 bt->bt_offset, bt->bt_gen, bt->bt_txg, 5831 bt->bt_crtxg); 5832 ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen); 5833 } 5834 5835 dmu_buf_rele(db, FTAG); 5836 ztest_object_unlock(zd, obj); 5837 } 5838 } 5839 5840 void 5841 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id) 5842 { 5843 (void) id; 5844 zfs_prop_t proplist[] = { 5845 ZFS_PROP_CHECKSUM, 5846 ZFS_PROP_COMPRESSION, 5847 ZFS_PROP_COPIES, 5848 ZFS_PROP_DEDUP 5849 }; 5850 5851 (void) pthread_rwlock_rdlock(&ztest_name_lock); 5852 5853 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++) { 5854 int error = ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p], 5855 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2)); 5856 ASSERT(error == 0 || error == ENOSPC); 5857 } 5858 5859 int error = ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_RECORDSIZE, 5860 ztest_random_blocksize(), (int)ztest_random(2)); 5861 ASSERT(error == 0 || error == ENOSPC); 5862 5863 (void) pthread_rwlock_unlock(&ztest_name_lock); 5864 } 5865 5866 void 5867 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id) 5868 { 5869 (void) zd, (void) id; 5870 nvlist_t *props = NULL; 5871 5872 (void) pthread_rwlock_rdlock(&ztest_name_lock); 5873 5874 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2)); 5875 5876 VERIFY0(spa_prop_get(ztest_spa, &props)); 5877 5878 if (ztest_opts.zo_verbose >= 6) 5879 dump_nvlist(props, 4); 5880 5881 fnvlist_free(props); 5882 5883 (void) pthread_rwlock_unlock(&ztest_name_lock); 5884 } 5885 5886 static int 5887 user_release_one(const char *snapname, const char *holdname) 5888 { 5889 nvlist_t *snaps, *holds; 5890 int error; 5891 5892 snaps = fnvlist_alloc(); 5893 holds = fnvlist_alloc(); 5894 fnvlist_add_boolean(holds, holdname); 5895 fnvlist_add_nvlist(snaps, snapname, holds); 5896 fnvlist_free(holds); 5897 error = dsl_dataset_user_release(snaps, NULL); 5898 fnvlist_free(snaps); 5899 return (error); 5900 } 5901 5902 /* 5903 * Test snapshot hold/release and deferred destroy. 5904 */ 5905 void 5906 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id) 5907 { 5908 int error; 5909 objset_t *os = zd->zd_os; 5910 objset_t *origin; 5911 char snapname[100]; 5912 char fullname[100]; 5913 char clonename[100]; 5914 char tag[100]; 5915 char osname[ZFS_MAX_DATASET_NAME_LEN]; 5916 nvlist_t *holds; 5917 5918 (void) pthread_rwlock_rdlock(&ztest_name_lock); 5919 5920 dmu_objset_name(os, osname); 5921 5922 (void) snprintf(snapname, sizeof (snapname), "sh1_%"PRIu64"", id); 5923 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname); 5924 (void) snprintf(clonename, sizeof (clonename), "%s/ch1_%"PRIu64"", 5925 osname, id); 5926 (void) snprintf(tag, sizeof (tag), "tag_%"PRIu64"", id); 5927 5928 /* 5929 * Clean up from any previous run. 5930 */ 5931 error = dsl_destroy_head(clonename); 5932 if (error != ENOENT) 5933 ASSERT0(error); 5934 error = user_release_one(fullname, tag); 5935 if (error != ESRCH && error != ENOENT) 5936 ASSERT0(error); 5937 error = dsl_destroy_snapshot(fullname, B_FALSE); 5938 if (error != ENOENT) 5939 ASSERT0(error); 5940 5941 /* 5942 * Create snapshot, clone it, mark snap for deferred destroy, 5943 * destroy clone, verify snap was also destroyed. 5944 */ 5945 error = dmu_objset_snapshot_one(osname, snapname); 5946 if (error) { 5947 if (error == ENOSPC) { 5948 ztest_record_enospc("dmu_objset_snapshot"); 5949 goto out; 5950 } 5951 fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error); 5952 } 5953 5954 error = dmu_objset_clone(clonename, fullname); 5955 if (error) { 5956 if (error == ENOSPC) { 5957 ztest_record_enospc("dmu_objset_clone"); 5958 goto out; 5959 } 5960 fatal(B_FALSE, "dmu_objset_clone(%s) = %d", clonename, error); 5961 } 5962 5963 error = dsl_destroy_snapshot(fullname, B_TRUE); 5964 if (error) { 5965 fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 5966 fullname, error); 5967 } 5968 5969 error = dsl_destroy_head(clonename); 5970 if (error) 5971 fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clonename, error); 5972 5973 error = dmu_objset_hold(fullname, FTAG, &origin); 5974 if (error != ENOENT) 5975 fatal(B_FALSE, "dmu_objset_hold(%s) = %d", fullname, error); 5976 5977 /* 5978 * Create snapshot, add temporary hold, verify that we can't 5979 * destroy a held snapshot, mark for deferred destroy, 5980 * release hold, verify snapshot was destroyed. 5981 */ 5982 error = dmu_objset_snapshot_one(osname, snapname); 5983 if (error) { 5984 if (error == ENOSPC) { 5985 ztest_record_enospc("dmu_objset_snapshot"); 5986 goto out; 5987 } 5988 fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error); 5989 } 5990 5991 holds = fnvlist_alloc(); 5992 fnvlist_add_string(holds, fullname, tag); 5993 error = dsl_dataset_user_hold(holds, 0, NULL); 5994 fnvlist_free(holds); 5995 5996 if (error == ENOSPC) { 5997 ztest_record_enospc("dsl_dataset_user_hold"); 5998 goto out; 5999 } else if (error) { 6000 fatal(B_FALSE, "dsl_dataset_user_hold(%s, %s) = %u", 6001 fullname, tag, error); 6002 } 6003 6004 error = dsl_destroy_snapshot(fullname, B_FALSE); 6005 if (error != EBUSY) { 6006 fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_FALSE) = %d", 6007 fullname, error); 6008 } 6009 6010 error = dsl_destroy_snapshot(fullname, B_TRUE); 6011 if (error) { 6012 fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d", 6013 fullname, error); 6014 } 6015 6016 error = user_release_one(fullname, tag); 6017 if (error) 6018 fatal(B_FALSE, "user_release_one(%s, %s) = %d", 6019 fullname, tag, error); 6020 6021 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT); 6022 6023 out: 6024 (void) pthread_rwlock_unlock(&ztest_name_lock); 6025 } 6026 6027 /* 6028 * Inject random faults into the on-disk data. 6029 */ 6030 void 6031 ztest_fault_inject(ztest_ds_t *zd, uint64_t id) 6032 { 6033 (void) zd, (void) id; 6034 ztest_shared_t *zs = ztest_shared; 6035 spa_t *spa = ztest_spa; 6036 int fd; 6037 uint64_t offset; 6038 uint64_t leaves; 6039 uint64_t bad = 0x1990c0ffeedecadeull; 6040 uint64_t top, leaf; 6041 char *path0; 6042 char *pathrand; 6043 size_t fsize; 6044 int bshift = SPA_MAXBLOCKSHIFT + 2; 6045 int iters = 1000; 6046 int maxfaults; 6047 int mirror_save; 6048 vdev_t *vd0 = NULL; 6049 uint64_t guid0 = 0; 6050 boolean_t islog = B_FALSE; 6051 6052 path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 6053 pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 6054 6055 mutex_enter(&ztest_vdev_lock); 6056 6057 /* 6058 * Device removal is in progress, fault injection must be disabled 6059 * until it completes and the pool is scrubbed. The fault injection 6060 * strategy for damaging blocks does not take in to account evacuated 6061 * blocks which may have already been damaged. 6062 */ 6063 if (ztest_device_removal_active) { 6064 mutex_exit(&ztest_vdev_lock); 6065 goto out; 6066 } 6067 6068 maxfaults = MAXFAULTS(zs); 6069 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children; 6070 mirror_save = zs->zs_mirrors; 6071 mutex_exit(&ztest_vdev_lock); 6072 6073 ASSERT3U(leaves, >=, 1); 6074 6075 /* 6076 * While ztest is running the number of leaves will not change. This 6077 * is critical for the fault injection logic as it determines where 6078 * errors can be safely injected such that they are always repairable. 6079 * 6080 * When restarting ztest a different number of leaves may be requested 6081 * which will shift the regions to be damaged. This is fine as long 6082 * as the pool has been scrubbed prior to using the new mapping. 6083 * Failure to do can result in non-repairable damage being injected. 6084 */ 6085 if (ztest_pool_scrubbed == B_FALSE) 6086 goto out; 6087 6088 /* 6089 * Grab the name lock as reader. There are some operations 6090 * which don't like to have their vdevs changed while 6091 * they are in progress (i.e. spa_change_guid). Those 6092 * operations will have grabbed the name lock as writer. 6093 */ 6094 (void) pthread_rwlock_rdlock(&ztest_name_lock); 6095 6096 /* 6097 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. 6098 */ 6099 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6100 6101 if (ztest_random(2) == 0) { 6102 /* 6103 * Inject errors on a normal data device or slog device. 6104 */ 6105 top = ztest_random_vdev_top(spa, B_TRUE); 6106 leaf = ztest_random(leaves) + zs->zs_splits; 6107 6108 /* 6109 * Generate paths to the first leaf in this top-level vdev, 6110 * and to the random leaf we selected. We'll induce transient 6111 * write failures and random online/offline activity on leaf 0, 6112 * and we'll write random garbage to the randomly chosen leaf. 6113 */ 6114 (void) snprintf(path0, MAXPATHLEN, ztest_dev_template, 6115 ztest_opts.zo_dir, ztest_opts.zo_pool, 6116 top * leaves + zs->zs_splits); 6117 (void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template, 6118 ztest_opts.zo_dir, ztest_opts.zo_pool, 6119 top * leaves + leaf); 6120 6121 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); 6122 if (vd0 != NULL && vd0->vdev_top->vdev_islog) 6123 islog = B_TRUE; 6124 6125 /* 6126 * If the top-level vdev needs to be resilvered 6127 * then we only allow faults on the device that is 6128 * resilvering. 6129 */ 6130 if (vd0 != NULL && maxfaults != 1 && 6131 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) || 6132 vd0->vdev_resilver_txg != 0)) { 6133 /* 6134 * Make vd0 explicitly claim to be unreadable, 6135 * or unwritable, or reach behind its back 6136 * and close the underlying fd. We can do this if 6137 * maxfaults == 0 because we'll fail and reexecute, 6138 * and we can do it if maxfaults >= 2 because we'll 6139 * have enough redundancy. If maxfaults == 1, the 6140 * combination of this with injection of random data 6141 * corruption below exceeds the pool's fault tolerance. 6142 */ 6143 vdev_file_t *vf = vd0->vdev_tsd; 6144 6145 zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d", 6146 (long long)vd0->vdev_id, (int)maxfaults); 6147 6148 if (vf != NULL && ztest_random(3) == 0) { 6149 (void) close(vf->vf_file->f_fd); 6150 vf->vf_file->f_fd = -1; 6151 } else if (ztest_random(2) == 0) { 6152 vd0->vdev_cant_read = B_TRUE; 6153 } else { 6154 vd0->vdev_cant_write = B_TRUE; 6155 } 6156 guid0 = vd0->vdev_guid; 6157 } 6158 } else { 6159 /* 6160 * Inject errors on an l2cache device. 6161 */ 6162 spa_aux_vdev_t *sav = &spa->spa_l2cache; 6163 6164 if (sav->sav_count == 0) { 6165 spa_config_exit(spa, SCL_STATE, FTAG); 6166 (void) pthread_rwlock_unlock(&ztest_name_lock); 6167 goto out; 6168 } 6169 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; 6170 guid0 = vd0->vdev_guid; 6171 (void) strlcpy(path0, vd0->vdev_path, MAXPATHLEN); 6172 (void) strlcpy(pathrand, vd0->vdev_path, MAXPATHLEN); 6173 6174 leaf = 0; 6175 leaves = 1; 6176 maxfaults = INT_MAX; /* no limit on cache devices */ 6177 } 6178 6179 spa_config_exit(spa, SCL_STATE, FTAG); 6180 (void) pthread_rwlock_unlock(&ztest_name_lock); 6181 6182 /* 6183 * If we can tolerate two or more faults, or we're dealing 6184 * with a slog, randomly online/offline vd0. 6185 */ 6186 if ((maxfaults >= 2 || islog) && guid0 != 0) { 6187 if (ztest_random(10) < 6) { 6188 int flags = (ztest_random(2) == 0 ? 6189 ZFS_OFFLINE_TEMPORARY : 0); 6190 6191 /* 6192 * We have to grab the zs_name_lock as writer to 6193 * prevent a race between offlining a slog and 6194 * destroying a dataset. Offlining the slog will 6195 * grab a reference on the dataset which may cause 6196 * dsl_destroy_head() to fail with EBUSY thus 6197 * leaving the dataset in an inconsistent state. 6198 */ 6199 if (islog) 6200 (void) pthread_rwlock_wrlock(&ztest_name_lock); 6201 6202 VERIFY3U(vdev_offline(spa, guid0, flags), !=, EBUSY); 6203 6204 if (islog) 6205 (void) pthread_rwlock_unlock(&ztest_name_lock); 6206 } else { 6207 /* 6208 * Ideally we would like to be able to randomly 6209 * call vdev_[on|off]line without holding locks 6210 * to force unpredictable failures but the side 6211 * effects of vdev_[on|off]line prevent us from 6212 * doing so. We grab the ztest_vdev_lock here to 6213 * prevent a race between injection testing and 6214 * aux_vdev removal. 6215 */ 6216 mutex_enter(&ztest_vdev_lock); 6217 (void) vdev_online(spa, guid0, 0, NULL); 6218 mutex_exit(&ztest_vdev_lock); 6219 } 6220 } 6221 6222 if (maxfaults == 0) 6223 goto out; 6224 6225 /* 6226 * We have at least single-fault tolerance, so inject data corruption. 6227 */ 6228 fd = open(pathrand, O_RDWR); 6229 6230 if (fd == -1) /* we hit a gap in the device namespace */ 6231 goto out; 6232 6233 fsize = lseek(fd, 0, SEEK_END); 6234 6235 while (--iters != 0) { 6236 /* 6237 * The offset must be chosen carefully to ensure that 6238 * we do not inject a given logical block with errors 6239 * on two different leaf devices, because ZFS can not 6240 * tolerate that (if maxfaults==1). 6241 * 6242 * To achieve this we divide each leaf device into 6243 * chunks of size (# leaves * SPA_MAXBLOCKSIZE * 4). 6244 * Each chunk is further divided into error-injection 6245 * ranges (can accept errors) and clear ranges (we do 6246 * not inject errors in those). Each error-injection 6247 * range can accept errors only for a single leaf vdev. 6248 * Error-injection ranges are separated by clear ranges. 6249 * 6250 * For example, with 3 leaves, each chunk looks like: 6251 * 0 to 32M: injection range for leaf 0 6252 * 32M to 64M: clear range - no injection allowed 6253 * 64M to 96M: injection range for leaf 1 6254 * 96M to 128M: clear range - no injection allowed 6255 * 128M to 160M: injection range for leaf 2 6256 * 160M to 192M: clear range - no injection allowed 6257 * 6258 * Each clear range must be large enough such that a 6259 * single block cannot straddle it. This way a block 6260 * can't be a target in two different injection ranges 6261 * (on different leaf vdevs). 6262 */ 6263 offset = ztest_random(fsize / (leaves << bshift)) * 6264 (leaves << bshift) + (leaf << bshift) + 6265 (ztest_random(1ULL << (bshift - 1)) & -8ULL); 6266 6267 /* 6268 * Only allow damage to the labels at one end of the vdev. 6269 * 6270 * If all labels are damaged, the device will be totally 6271 * inaccessible, which will result in loss of data, 6272 * because we also damage (parts of) the other side of 6273 * the mirror/raidz. 6274 * 6275 * Additionally, we will always have both an even and an 6276 * odd label, so that we can handle crashes in the 6277 * middle of vdev_config_sync(). 6278 */ 6279 if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE) 6280 continue; 6281 6282 /* 6283 * The two end labels are stored at the "end" of the disk, but 6284 * the end of the disk (vdev_psize) is aligned to 6285 * sizeof (vdev_label_t). 6286 */ 6287 uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t)); 6288 if ((leaf & 1) == 1 && 6289 offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE) 6290 continue; 6291 6292 mutex_enter(&ztest_vdev_lock); 6293 if (mirror_save != zs->zs_mirrors) { 6294 mutex_exit(&ztest_vdev_lock); 6295 (void) close(fd); 6296 goto out; 6297 } 6298 6299 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) 6300 fatal(B_TRUE, 6301 "can't inject bad word at 0x%"PRIx64" in %s", 6302 offset, pathrand); 6303 6304 mutex_exit(&ztest_vdev_lock); 6305 6306 if (ztest_opts.zo_verbose >= 7) 6307 (void) printf("injected bad word into %s," 6308 " offset 0x%"PRIx64"\n", pathrand, offset); 6309 } 6310 6311 (void) close(fd); 6312 out: 6313 umem_free(path0, MAXPATHLEN); 6314 umem_free(pathrand, MAXPATHLEN); 6315 } 6316 6317 /* 6318 * By design ztest will never inject uncorrectable damage in to the pool. 6319 * Issue a scrub, wait for it to complete, and verify there is never any 6320 * persistent damage. 6321 * 6322 * Only after a full scrub has been completed is it safe to start injecting 6323 * data corruption. See the comment in zfs_fault_inject(). 6324 */ 6325 static int 6326 ztest_scrub_impl(spa_t *spa) 6327 { 6328 int error = spa_scan(spa, POOL_SCAN_SCRUB); 6329 if (error) 6330 return (error); 6331 6332 while (dsl_scan_scrubbing(spa_get_dsl(spa))) 6333 txg_wait_synced(spa_get_dsl(spa), 0); 6334 6335 if (spa_approx_errlog_size(spa) > 0) 6336 return (ECKSUM); 6337 6338 ztest_pool_scrubbed = B_TRUE; 6339 6340 return (0); 6341 } 6342 6343 /* 6344 * Scrub the pool. 6345 */ 6346 void 6347 ztest_scrub(ztest_ds_t *zd, uint64_t id) 6348 { 6349 (void) zd, (void) id; 6350 spa_t *spa = ztest_spa; 6351 int error; 6352 6353 /* 6354 * Scrub in progress by device removal. 6355 */ 6356 if (ztest_device_removal_active) 6357 return; 6358 6359 /* 6360 * Start a scrub, wait a moment, then force a restart. 6361 */ 6362 (void) spa_scan(spa, POOL_SCAN_SCRUB); 6363 (void) poll(NULL, 0, 100); 6364 6365 error = ztest_scrub_impl(spa); 6366 if (error == EBUSY) 6367 error = 0; 6368 ASSERT0(error); 6369 } 6370 6371 /* 6372 * Change the guid for the pool. 6373 */ 6374 void 6375 ztest_reguid(ztest_ds_t *zd, uint64_t id) 6376 { 6377 (void) zd, (void) id; 6378 spa_t *spa = ztest_spa; 6379 uint64_t orig, load; 6380 int error; 6381 ztest_shared_t *zs = ztest_shared; 6382 6383 if (ztest_opts.zo_mmp_test) 6384 return; 6385 6386 orig = spa_guid(spa); 6387 load = spa_load_guid(spa); 6388 6389 (void) pthread_rwlock_wrlock(&ztest_name_lock); 6390 error = spa_change_guid(spa); 6391 zs->zs_guid = spa_guid(spa); 6392 (void) pthread_rwlock_unlock(&ztest_name_lock); 6393 6394 if (error != 0) 6395 return; 6396 6397 if (ztest_opts.zo_verbose >= 4) { 6398 (void) printf("Changed guid old %"PRIu64" -> %"PRIu64"\n", 6399 orig, spa_guid(spa)); 6400 } 6401 6402 VERIFY3U(orig, !=, spa_guid(spa)); 6403 VERIFY3U(load, ==, spa_load_guid(spa)); 6404 } 6405 6406 void 6407 ztest_blake3(ztest_ds_t *zd, uint64_t id) 6408 { 6409 (void) zd, (void) id; 6410 hrtime_t end = gethrtime() + NANOSEC; 6411 zio_cksum_salt_t salt; 6412 void *salt_ptr = &salt.zcs_bytes; 6413 struct abd *abd_data, *abd_meta; 6414 void *buf, *templ; 6415 int i, *ptr; 6416 uint32_t size; 6417 BLAKE3_CTX ctx; 6418 const zfs_impl_t *blake3 = zfs_impl_get_ops("blake3"); 6419 6420 size = ztest_random_blocksize(); 6421 buf = umem_alloc(size, UMEM_NOFAIL); 6422 abd_data = abd_alloc(size, B_FALSE); 6423 abd_meta = abd_alloc(size, B_TRUE); 6424 6425 for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++) 6426 *ptr = ztest_random(UINT_MAX); 6427 memset(salt_ptr, 'A', 32); 6428 6429 abd_copy_from_buf_off(abd_data, buf, 0, size); 6430 abd_copy_from_buf_off(abd_meta, buf, 0, size); 6431 6432 while (gethrtime() <= end) { 6433 int run_count = 100; 6434 zio_cksum_t zc_ref1, zc_ref2; 6435 zio_cksum_t zc_res1, zc_res2; 6436 6437 void *ref1 = &zc_ref1; 6438 void *ref2 = &zc_ref2; 6439 void *res1 = &zc_res1; 6440 void *res2 = &zc_res2; 6441 6442 /* BLAKE3_KEY_LEN = 32 */ 6443 VERIFY0(blake3->setname("generic")); 6444 templ = abd_checksum_blake3_tmpl_init(&salt); 6445 Blake3_InitKeyed(&ctx, salt_ptr); 6446 Blake3_Update(&ctx, buf, size); 6447 Blake3_Final(&ctx, ref1); 6448 zc_ref2 = zc_ref1; 6449 ZIO_CHECKSUM_BSWAP(&zc_ref2); 6450 abd_checksum_blake3_tmpl_free(templ); 6451 6452 VERIFY0(blake3->setname("cycle")); 6453 while (run_count-- > 0) { 6454 6455 /* Test current implementation */ 6456 Blake3_InitKeyed(&ctx, salt_ptr); 6457 Blake3_Update(&ctx, buf, size); 6458 Blake3_Final(&ctx, res1); 6459 zc_res2 = zc_res1; 6460 ZIO_CHECKSUM_BSWAP(&zc_res2); 6461 6462 VERIFY0(memcmp(ref1, res1, 32)); 6463 VERIFY0(memcmp(ref2, res2, 32)); 6464 6465 /* Test ABD - data */ 6466 templ = abd_checksum_blake3_tmpl_init(&salt); 6467 abd_checksum_blake3_native(abd_data, size, 6468 templ, &zc_res1); 6469 abd_checksum_blake3_byteswap(abd_data, size, 6470 templ, &zc_res2); 6471 6472 VERIFY0(memcmp(ref1, res1, 32)); 6473 VERIFY0(memcmp(ref2, res2, 32)); 6474 6475 /* Test ABD - metadata */ 6476 abd_checksum_blake3_native(abd_meta, size, 6477 templ, &zc_res1); 6478 abd_checksum_blake3_byteswap(abd_meta, size, 6479 templ, &zc_res2); 6480 abd_checksum_blake3_tmpl_free(templ); 6481 6482 VERIFY0(memcmp(ref1, res1, 32)); 6483 VERIFY0(memcmp(ref2, res2, 32)); 6484 6485 } 6486 } 6487 6488 abd_free(abd_data); 6489 abd_free(abd_meta); 6490 umem_free(buf, size); 6491 } 6492 6493 void 6494 ztest_fletcher(ztest_ds_t *zd, uint64_t id) 6495 { 6496 (void) zd, (void) id; 6497 hrtime_t end = gethrtime() + NANOSEC; 6498 6499 while (gethrtime() <= end) { 6500 int run_count = 100; 6501 void *buf; 6502 struct abd *abd_data, *abd_meta; 6503 uint32_t size; 6504 int *ptr; 6505 int i; 6506 zio_cksum_t zc_ref; 6507 zio_cksum_t zc_ref_byteswap; 6508 6509 size = ztest_random_blocksize(); 6510 6511 buf = umem_alloc(size, UMEM_NOFAIL); 6512 abd_data = abd_alloc(size, B_FALSE); 6513 abd_meta = abd_alloc(size, B_TRUE); 6514 6515 for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++) 6516 *ptr = ztest_random(UINT_MAX); 6517 6518 abd_copy_from_buf_off(abd_data, buf, 0, size); 6519 abd_copy_from_buf_off(abd_meta, buf, 0, size); 6520 6521 VERIFY0(fletcher_4_impl_set("scalar")); 6522 fletcher_4_native(buf, size, NULL, &zc_ref); 6523 fletcher_4_byteswap(buf, size, NULL, &zc_ref_byteswap); 6524 6525 VERIFY0(fletcher_4_impl_set("cycle")); 6526 while (run_count-- > 0) { 6527 zio_cksum_t zc; 6528 zio_cksum_t zc_byteswap; 6529 6530 fletcher_4_byteswap(buf, size, NULL, &zc_byteswap); 6531 fletcher_4_native(buf, size, NULL, &zc); 6532 6533 VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc))); 6534 VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap, 6535 sizeof (zc_byteswap))); 6536 6537 /* Test ABD - data */ 6538 abd_fletcher_4_byteswap(abd_data, size, NULL, 6539 &zc_byteswap); 6540 abd_fletcher_4_native(abd_data, size, NULL, &zc); 6541 6542 VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc))); 6543 VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap, 6544 sizeof (zc_byteswap))); 6545 6546 /* Test ABD - metadata */ 6547 abd_fletcher_4_byteswap(abd_meta, size, NULL, 6548 &zc_byteswap); 6549 abd_fletcher_4_native(abd_meta, size, NULL, &zc); 6550 6551 VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc))); 6552 VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap, 6553 sizeof (zc_byteswap))); 6554 6555 } 6556 6557 umem_free(buf, size); 6558 abd_free(abd_data); 6559 abd_free(abd_meta); 6560 } 6561 } 6562 6563 void 6564 ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id) 6565 { 6566 (void) zd, (void) id; 6567 void *buf; 6568 size_t size; 6569 int *ptr; 6570 int i; 6571 zio_cksum_t zc_ref; 6572 zio_cksum_t zc_ref_bswap; 6573 6574 hrtime_t end = gethrtime() + NANOSEC; 6575 6576 while (gethrtime() <= end) { 6577 int run_count = 100; 6578 6579 size = ztest_random_blocksize(); 6580 buf = umem_alloc(size, UMEM_NOFAIL); 6581 6582 for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++) 6583 *ptr = ztest_random(UINT_MAX); 6584 6585 VERIFY0(fletcher_4_impl_set("scalar")); 6586 fletcher_4_native(buf, size, NULL, &zc_ref); 6587 fletcher_4_byteswap(buf, size, NULL, &zc_ref_bswap); 6588 6589 VERIFY0(fletcher_4_impl_set("cycle")); 6590 6591 while (run_count-- > 0) { 6592 zio_cksum_t zc; 6593 zio_cksum_t zc_bswap; 6594 size_t pos = 0; 6595 6596 ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0); 6597 ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0); 6598 6599 while (pos < size) { 6600 size_t inc = 64 * ztest_random(size / 67); 6601 /* sometimes add few bytes to test non-simd */ 6602 if (ztest_random(100) < 10) 6603 inc += P2ALIGN(ztest_random(64), 6604 sizeof (uint32_t)); 6605 6606 if (inc > (size - pos)) 6607 inc = size - pos; 6608 6609 fletcher_4_incremental_native(buf + pos, inc, 6610 &zc); 6611 fletcher_4_incremental_byteswap(buf + pos, inc, 6612 &zc_bswap); 6613 6614 pos += inc; 6615 } 6616 6617 VERIFY3U(pos, ==, size); 6618 6619 VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref)); 6620 VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap)); 6621 6622 /* 6623 * verify if incremental on the whole buffer is 6624 * equivalent to non-incremental version 6625 */ 6626 ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0); 6627 ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0); 6628 6629 fletcher_4_incremental_native(buf, size, &zc); 6630 fletcher_4_incremental_byteswap(buf, size, &zc_bswap); 6631 6632 VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref)); 6633 VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap)); 6634 } 6635 6636 umem_free(buf, size); 6637 } 6638 } 6639 6640 static int 6641 ztest_set_global_vars(void) 6642 { 6643 for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) { 6644 char *kv = ztest_opts.zo_gvars[i]; 6645 VERIFY3U(strlen(kv), <=, ZO_GVARS_MAX_ARGLEN); 6646 VERIFY3U(strlen(kv), >, 0); 6647 int err = set_global_var(kv); 6648 if (ztest_opts.zo_verbose > 0) { 6649 (void) printf("setting global var %s ... %s\n", kv, 6650 err ? "failed" : "ok"); 6651 } 6652 if (err != 0) { 6653 (void) fprintf(stderr, 6654 "failed to set global var '%s'\n", kv); 6655 return (err); 6656 } 6657 } 6658 return (0); 6659 } 6660 6661 static char ** 6662 ztest_global_vars_to_zdb_args(void) 6663 { 6664 char **args = calloc(2*ztest_opts.zo_gvars_count + 1, sizeof (char *)); 6665 char **cur = args; 6666 if (args == NULL) 6667 return (NULL); 6668 for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) { 6669 *cur++ = (char *)"-o"; 6670 *cur++ = ztest_opts.zo_gvars[i]; 6671 } 6672 ASSERT3P(cur, ==, &args[2*ztest_opts.zo_gvars_count]); 6673 *cur = NULL; 6674 return (args); 6675 } 6676 6677 /* The end of strings is indicated by a NULL element */ 6678 static char * 6679 join_strings(char **strings, const char *sep) 6680 { 6681 size_t totallen = 0; 6682 for (char **sp = strings; *sp != NULL; sp++) { 6683 totallen += strlen(*sp); 6684 totallen += strlen(sep); 6685 } 6686 if (totallen > 0) { 6687 ASSERT(totallen >= strlen(sep)); 6688 totallen -= strlen(sep); 6689 } 6690 6691 size_t buflen = totallen + 1; 6692 char *o = umem_alloc(buflen, UMEM_NOFAIL); /* trailing 0 byte */ 6693 o[0] = '\0'; 6694 for (char **sp = strings; *sp != NULL; sp++) { 6695 size_t would; 6696 would = strlcat(o, *sp, buflen); 6697 VERIFY3U(would, <, buflen); 6698 if (*(sp+1) == NULL) { 6699 break; 6700 } 6701 would = strlcat(o, sep, buflen); 6702 VERIFY3U(would, <, buflen); 6703 } 6704 ASSERT3S(strlen(o), ==, totallen); 6705 return (o); 6706 } 6707 6708 static int 6709 ztest_check_path(char *path) 6710 { 6711 struct stat s; 6712 /* return true on success */ 6713 return (!stat(path, &s)); 6714 } 6715 6716 static void 6717 ztest_get_zdb_bin(char *bin, int len) 6718 { 6719 char *zdb_path; 6720 /* 6721 * Try to use $ZDB and in-tree zdb path. If not successful, just 6722 * let popen to search through PATH. 6723 */ 6724 if ((zdb_path = getenv("ZDB"))) { 6725 strlcpy(bin, zdb_path, len); /* In env */ 6726 if (!ztest_check_path(bin)) { 6727 ztest_dump_core = 0; 6728 fatal(B_TRUE, "invalid ZDB '%s'", bin); 6729 } 6730 return; 6731 } 6732 6733 VERIFY3P(realpath(getexecname(), bin), !=, NULL); 6734 if (strstr(bin, ".libs/ztest")) { 6735 strstr(bin, ".libs/ztest")[0] = '\0'; /* In-tree */ 6736 strcat(bin, "zdb"); 6737 if (ztest_check_path(bin)) 6738 return; 6739 } 6740 strcpy(bin, "zdb"); 6741 } 6742 6743 static vdev_t * 6744 ztest_random_concrete_vdev_leaf(vdev_t *vd) 6745 { 6746 if (vd == NULL) 6747 return (NULL); 6748 6749 if (vd->vdev_children == 0) 6750 return (vd); 6751 6752 vdev_t *eligible[vd->vdev_children]; 6753 int eligible_idx = 0, i; 6754 for (i = 0; i < vd->vdev_children; i++) { 6755 vdev_t *cvd = vd->vdev_child[i]; 6756 if (cvd->vdev_top->vdev_removing) 6757 continue; 6758 if (cvd->vdev_children > 0 || 6759 (vdev_is_concrete(cvd) && !cvd->vdev_detached)) { 6760 eligible[eligible_idx++] = cvd; 6761 } 6762 } 6763 VERIFY3S(eligible_idx, >, 0); 6764 6765 uint64_t child_no = ztest_random(eligible_idx); 6766 return (ztest_random_concrete_vdev_leaf(eligible[child_no])); 6767 } 6768 6769 void 6770 ztest_initialize(ztest_ds_t *zd, uint64_t id) 6771 { 6772 (void) zd, (void) id; 6773 spa_t *spa = ztest_spa; 6774 int error = 0; 6775 6776 mutex_enter(&ztest_vdev_lock); 6777 6778 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6779 6780 /* Random leaf vdev */ 6781 vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev); 6782 if (rand_vd == NULL) { 6783 spa_config_exit(spa, SCL_VDEV, FTAG); 6784 mutex_exit(&ztest_vdev_lock); 6785 return; 6786 } 6787 6788 /* 6789 * The random vdev we've selected may change as soon as we 6790 * drop the spa_config_lock. We create local copies of things 6791 * we're interested in. 6792 */ 6793 uint64_t guid = rand_vd->vdev_guid; 6794 char *path = strdup(rand_vd->vdev_path); 6795 boolean_t active = rand_vd->vdev_initialize_thread != NULL; 6796 6797 zfs_dbgmsg("vd %px, guid %llu", rand_vd, (u_longlong_t)guid); 6798 spa_config_exit(spa, SCL_VDEV, FTAG); 6799 6800 uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS); 6801 6802 nvlist_t *vdev_guids = fnvlist_alloc(); 6803 nvlist_t *vdev_errlist = fnvlist_alloc(); 6804 fnvlist_add_uint64(vdev_guids, path, guid); 6805 error = spa_vdev_initialize(spa, vdev_guids, cmd, vdev_errlist); 6806 fnvlist_free(vdev_guids); 6807 fnvlist_free(vdev_errlist); 6808 6809 switch (cmd) { 6810 case POOL_INITIALIZE_CANCEL: 6811 if (ztest_opts.zo_verbose >= 4) { 6812 (void) printf("Cancel initialize %s", path); 6813 if (!active) 6814 (void) printf(" failed (no initialize active)"); 6815 (void) printf("\n"); 6816 } 6817 break; 6818 case POOL_INITIALIZE_START: 6819 if (ztest_opts.zo_verbose >= 4) { 6820 (void) printf("Start initialize %s", path); 6821 if (active && error == 0) 6822 (void) printf(" failed (already active)"); 6823 else if (error != 0) 6824 (void) printf(" failed (error %d)", error); 6825 (void) printf("\n"); 6826 } 6827 break; 6828 case POOL_INITIALIZE_SUSPEND: 6829 if (ztest_opts.zo_verbose >= 4) { 6830 (void) printf("Suspend initialize %s", path); 6831 if (!active) 6832 (void) printf(" failed (no initialize active)"); 6833 (void) printf("\n"); 6834 } 6835 break; 6836 } 6837 free(path); 6838 mutex_exit(&ztest_vdev_lock); 6839 } 6840 6841 void 6842 ztest_trim(ztest_ds_t *zd, uint64_t id) 6843 { 6844 (void) zd, (void) id; 6845 spa_t *spa = ztest_spa; 6846 int error = 0; 6847 6848 mutex_enter(&ztest_vdev_lock); 6849 6850 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6851 6852 /* Random leaf vdev */ 6853 vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev); 6854 if (rand_vd == NULL) { 6855 spa_config_exit(spa, SCL_VDEV, FTAG); 6856 mutex_exit(&ztest_vdev_lock); 6857 return; 6858 } 6859 6860 /* 6861 * The random vdev we've selected may change as soon as we 6862 * drop the spa_config_lock. We create local copies of things 6863 * we're interested in. 6864 */ 6865 uint64_t guid = rand_vd->vdev_guid; 6866 char *path = strdup(rand_vd->vdev_path); 6867 boolean_t active = rand_vd->vdev_trim_thread != NULL; 6868 6869 zfs_dbgmsg("vd %p, guid %llu", rand_vd, (u_longlong_t)guid); 6870 spa_config_exit(spa, SCL_VDEV, FTAG); 6871 6872 uint64_t cmd = ztest_random(POOL_TRIM_FUNCS); 6873 uint64_t rate = 1 << ztest_random(30); 6874 boolean_t partial = (ztest_random(5) > 0); 6875 boolean_t secure = (ztest_random(5) > 0); 6876 6877 nvlist_t *vdev_guids = fnvlist_alloc(); 6878 nvlist_t *vdev_errlist = fnvlist_alloc(); 6879 fnvlist_add_uint64(vdev_guids, path, guid); 6880 error = spa_vdev_trim(spa, vdev_guids, cmd, rate, partial, 6881 secure, vdev_errlist); 6882 fnvlist_free(vdev_guids); 6883 fnvlist_free(vdev_errlist); 6884 6885 switch (cmd) { 6886 case POOL_TRIM_CANCEL: 6887 if (ztest_opts.zo_verbose >= 4) { 6888 (void) printf("Cancel TRIM %s", path); 6889 if (!active) 6890 (void) printf(" failed (no TRIM active)"); 6891 (void) printf("\n"); 6892 } 6893 break; 6894 case POOL_TRIM_START: 6895 if (ztest_opts.zo_verbose >= 4) { 6896 (void) printf("Start TRIM %s", path); 6897 if (active && error == 0) 6898 (void) printf(" failed (already active)"); 6899 else if (error != 0) 6900 (void) printf(" failed (error %d)", error); 6901 (void) printf("\n"); 6902 } 6903 break; 6904 case POOL_TRIM_SUSPEND: 6905 if (ztest_opts.zo_verbose >= 4) { 6906 (void) printf("Suspend TRIM %s", path); 6907 if (!active) 6908 (void) printf(" failed (no TRIM active)"); 6909 (void) printf("\n"); 6910 } 6911 break; 6912 } 6913 free(path); 6914 mutex_exit(&ztest_vdev_lock); 6915 } 6916 6917 /* 6918 * Verify pool integrity by running zdb. 6919 */ 6920 static void 6921 ztest_run_zdb(uint64_t guid) 6922 { 6923 int status; 6924 char *bin; 6925 char *zdb; 6926 char *zbuf; 6927 const int len = MAXPATHLEN + MAXNAMELEN + 20; 6928 FILE *fp; 6929 6930 bin = umem_alloc(len, UMEM_NOFAIL); 6931 zdb = umem_alloc(len, UMEM_NOFAIL); 6932 zbuf = umem_alloc(1024, UMEM_NOFAIL); 6933 6934 ztest_get_zdb_bin(bin, len); 6935 6936 char **set_gvars_args = ztest_global_vars_to_zdb_args(); 6937 if (set_gvars_args == NULL) { 6938 fatal(B_FALSE, "Failed to allocate memory in " 6939 "ztest_global_vars_to_zdb_args(). Cannot run zdb.\n"); 6940 } 6941 char *set_gvars_args_joined = join_strings(set_gvars_args, " "); 6942 free(set_gvars_args); 6943 6944 size_t would = snprintf(zdb, len, 6945 "%s -bcc%s%s -G -d -Y -e -y %s -p %s %"PRIu64, 6946 bin, 6947 ztest_opts.zo_verbose >= 3 ? "s" : "", 6948 ztest_opts.zo_verbose >= 4 ? "v" : "", 6949 set_gvars_args_joined, 6950 ztest_opts.zo_dir, 6951 guid); 6952 ASSERT3U(would, <, len); 6953 6954 umem_free(set_gvars_args_joined, strlen(set_gvars_args_joined) + 1); 6955 6956 if (ztest_opts.zo_verbose >= 5) 6957 (void) printf("Executing %s\n", zdb); 6958 6959 fp = popen(zdb, "r"); 6960 6961 while (fgets(zbuf, 1024, fp) != NULL) 6962 if (ztest_opts.zo_verbose >= 3) 6963 (void) printf("%s", zbuf); 6964 6965 status = pclose(fp); 6966 6967 if (status == 0) 6968 goto out; 6969 6970 ztest_dump_core = 0; 6971 if (WIFEXITED(status)) 6972 fatal(B_FALSE, "'%s' exit code %d", zdb, WEXITSTATUS(status)); 6973 else 6974 fatal(B_FALSE, "'%s' died with signal %d", 6975 zdb, WTERMSIG(status)); 6976 out: 6977 umem_free(bin, len); 6978 umem_free(zdb, len); 6979 umem_free(zbuf, 1024); 6980 } 6981 6982 static void 6983 ztest_walk_pool_directory(const char *header) 6984 { 6985 spa_t *spa = NULL; 6986 6987 if (ztest_opts.zo_verbose >= 6) 6988 (void) puts(header); 6989 6990 mutex_enter(&spa_namespace_lock); 6991 while ((spa = spa_next(spa)) != NULL) 6992 if (ztest_opts.zo_verbose >= 6) 6993 (void) printf("\t%s\n", spa_name(spa)); 6994 mutex_exit(&spa_namespace_lock); 6995 } 6996 6997 static void 6998 ztest_spa_import_export(char *oldname, char *newname) 6999 { 7000 nvlist_t *config, *newconfig; 7001 uint64_t pool_guid; 7002 spa_t *spa; 7003 int error; 7004 7005 if (ztest_opts.zo_verbose >= 4) { 7006 (void) printf("import/export: old = %s, new = %s\n", 7007 oldname, newname); 7008 } 7009 7010 /* 7011 * Clean up from previous runs. 7012 */ 7013 (void) spa_destroy(newname); 7014 7015 /* 7016 * Get the pool's configuration and guid. 7017 */ 7018 VERIFY0(spa_open(oldname, &spa, FTAG)); 7019 7020 /* 7021 * Kick off a scrub to tickle scrub/export races. 7022 */ 7023 if (ztest_random(2) == 0) 7024 (void) spa_scan(spa, POOL_SCAN_SCRUB); 7025 7026 pool_guid = spa_guid(spa); 7027 spa_close(spa, FTAG); 7028 7029 ztest_walk_pool_directory("pools before export"); 7030 7031 /* 7032 * Export it. 7033 */ 7034 VERIFY0(spa_export(oldname, &config, B_FALSE, B_FALSE)); 7035 7036 ztest_walk_pool_directory("pools after export"); 7037 7038 /* 7039 * Try to import it. 7040 */ 7041 newconfig = spa_tryimport(config); 7042 ASSERT3P(newconfig, !=, NULL); 7043 fnvlist_free(newconfig); 7044 7045 /* 7046 * Import it under the new name. 7047 */ 7048 error = spa_import(newname, config, NULL, 0); 7049 if (error != 0) { 7050 dump_nvlist(config, 0); 7051 fatal(B_FALSE, "couldn't import pool %s as %s: error %u", 7052 oldname, newname, error); 7053 } 7054 7055 ztest_walk_pool_directory("pools after import"); 7056 7057 /* 7058 * Try to import it again -- should fail with EEXIST. 7059 */ 7060 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0)); 7061 7062 /* 7063 * Try to import it under a different name -- should fail with EEXIST. 7064 */ 7065 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0)); 7066 7067 /* 7068 * Verify that the pool is no longer visible under the old name. 7069 */ 7070 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG)); 7071 7072 /* 7073 * Verify that we can open and close the pool using the new name. 7074 */ 7075 VERIFY0(spa_open(newname, &spa, FTAG)); 7076 ASSERT3U(pool_guid, ==, spa_guid(spa)); 7077 spa_close(spa, FTAG); 7078 7079 fnvlist_free(config); 7080 } 7081 7082 static void 7083 ztest_resume(spa_t *spa) 7084 { 7085 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6) 7086 (void) printf("resuming from suspended state\n"); 7087 spa_vdev_state_enter(spa, SCL_NONE); 7088 vdev_clear(spa, NULL); 7089 (void) spa_vdev_state_exit(spa, NULL, 0); 7090 (void) zio_resume(spa); 7091 } 7092 7093 static __attribute__((noreturn)) void 7094 ztest_resume_thread(void *arg) 7095 { 7096 spa_t *spa = arg; 7097 7098 while (!ztest_exiting) { 7099 if (spa_suspended(spa)) 7100 ztest_resume(spa); 7101 (void) poll(NULL, 0, 100); 7102 7103 /* 7104 * Periodically change the zfs_compressed_arc_enabled setting. 7105 */ 7106 if (ztest_random(10) == 0) 7107 zfs_compressed_arc_enabled = ztest_random(2); 7108 7109 /* 7110 * Periodically change the zfs_abd_scatter_enabled setting. 7111 */ 7112 if (ztest_random(10) == 0) 7113 zfs_abd_scatter_enabled = ztest_random(2); 7114 } 7115 7116 thread_exit(); 7117 } 7118 7119 static __attribute__((noreturn)) void 7120 ztest_deadman_thread(void *arg) 7121 { 7122 ztest_shared_t *zs = arg; 7123 spa_t *spa = ztest_spa; 7124 hrtime_t delay, overdue, last_run = gethrtime(); 7125 7126 delay = (zs->zs_thread_stop - zs->zs_thread_start) + 7127 MSEC2NSEC(zfs_deadman_synctime_ms); 7128 7129 while (!ztest_exiting) { 7130 /* 7131 * Wait for the delay timer while checking occasionally 7132 * if we should stop. 7133 */ 7134 if (gethrtime() < last_run + delay) { 7135 (void) poll(NULL, 0, 1000); 7136 continue; 7137 } 7138 7139 /* 7140 * If the pool is suspended then fail immediately. Otherwise, 7141 * check to see if the pool is making any progress. If 7142 * vdev_deadman() discovers that there hasn't been any recent 7143 * I/Os then it will end up aborting the tests. 7144 */ 7145 if (spa_suspended(spa) || spa->spa_root_vdev == NULL) { 7146 fatal(B_FALSE, 7147 "aborting test after %llu seconds because " 7148 "pool has transitioned to a suspended state.", 7149 (u_longlong_t)zfs_deadman_synctime_ms / 1000); 7150 } 7151 vdev_deadman(spa->spa_root_vdev, FTAG); 7152 7153 /* 7154 * If the process doesn't complete within a grace period of 7155 * zfs_deadman_synctime_ms over the expected finish time, 7156 * then it may be hung and is terminated. 7157 */ 7158 overdue = zs->zs_proc_stop + MSEC2NSEC(zfs_deadman_synctime_ms); 7159 if (gethrtime() > overdue) { 7160 fatal(B_FALSE, 7161 "aborting test after %llu seconds because " 7162 "the process is overdue for termination.", 7163 (gethrtime() - zs->zs_proc_start) / NANOSEC); 7164 } 7165 7166 (void) printf("ztest has been running for %lld seconds\n", 7167 (gethrtime() - zs->zs_proc_start) / NANOSEC); 7168 7169 last_run = gethrtime(); 7170 delay = MSEC2NSEC(zfs_deadman_checktime_ms); 7171 } 7172 7173 thread_exit(); 7174 } 7175 7176 static void 7177 ztest_execute(int test, ztest_info_t *zi, uint64_t id) 7178 { 7179 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets]; 7180 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test); 7181 hrtime_t functime = gethrtime(); 7182 int i; 7183 7184 for (i = 0; i < zi->zi_iters; i++) 7185 zi->zi_func(zd, id); 7186 7187 functime = gethrtime() - functime; 7188 7189 atomic_add_64(&zc->zc_count, 1); 7190 atomic_add_64(&zc->zc_time, functime); 7191 7192 if (ztest_opts.zo_verbose >= 4) 7193 (void) printf("%6.2f sec in %s\n", 7194 (double)functime / NANOSEC, zi->zi_funcname); 7195 } 7196 7197 static __attribute__((noreturn)) void 7198 ztest_thread(void *arg) 7199 { 7200 int rand; 7201 uint64_t id = (uintptr_t)arg; 7202 ztest_shared_t *zs = ztest_shared; 7203 uint64_t call_next; 7204 hrtime_t now; 7205 ztest_info_t *zi; 7206 ztest_shared_callstate_t *zc; 7207 7208 while ((now = gethrtime()) < zs->zs_thread_stop) { 7209 /* 7210 * See if it's time to force a crash. 7211 */ 7212 if (now > zs->zs_thread_kill) 7213 ztest_kill(zs); 7214 7215 /* 7216 * If we're getting ENOSPC with some regularity, stop. 7217 */ 7218 if (zs->zs_enospc_count > 10) 7219 break; 7220 7221 /* 7222 * Pick a random function to execute. 7223 */ 7224 rand = ztest_random(ZTEST_FUNCS); 7225 zi = &ztest_info[rand]; 7226 zc = ZTEST_GET_SHARED_CALLSTATE(rand); 7227 call_next = zc->zc_next; 7228 7229 if (now >= call_next && 7230 atomic_cas_64(&zc->zc_next, call_next, call_next + 7231 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) { 7232 ztest_execute(rand, zi, id); 7233 } 7234 } 7235 7236 thread_exit(); 7237 } 7238 7239 static void 7240 ztest_dataset_name(char *dsname, const char *pool, int d) 7241 { 7242 (void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d); 7243 } 7244 7245 static void 7246 ztest_dataset_destroy(int d) 7247 { 7248 char name[ZFS_MAX_DATASET_NAME_LEN]; 7249 int t; 7250 7251 ztest_dataset_name(name, ztest_opts.zo_pool, d); 7252 7253 if (ztest_opts.zo_verbose >= 3) 7254 (void) printf("Destroying %s to free up space\n", name); 7255 7256 /* 7257 * Cleanup any non-standard clones and snapshots. In general, 7258 * ztest thread t operates on dataset (t % zopt_datasets), 7259 * so there may be more than one thing to clean up. 7260 */ 7261 for (t = d; t < ztest_opts.zo_threads; 7262 t += ztest_opts.zo_datasets) 7263 ztest_dsl_dataset_cleanup(name, t); 7264 7265 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL, 7266 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); 7267 } 7268 7269 static void 7270 ztest_dataset_dirobj_verify(ztest_ds_t *zd) 7271 { 7272 uint64_t usedobjs, dirobjs, scratch; 7273 7274 /* 7275 * ZTEST_DIROBJ is the object directory for the entire dataset. 7276 * Therefore, the number of objects in use should equal the 7277 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself. 7278 * If not, we have an object leak. 7279 * 7280 * Note that we can only check this in ztest_dataset_open(), 7281 * when the open-context and syncing-context values agree. 7282 * That's because zap_count() returns the open-context value, 7283 * while dmu_objset_space() returns the rootbp fill count. 7284 */ 7285 VERIFY0(zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs)); 7286 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch); 7287 ASSERT3U(dirobjs + 1, ==, usedobjs); 7288 } 7289 7290 static int 7291 ztest_dataset_open(int d) 7292 { 7293 ztest_ds_t *zd = &ztest_ds[d]; 7294 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq; 7295 objset_t *os; 7296 zilog_t *zilog; 7297 char name[ZFS_MAX_DATASET_NAME_LEN]; 7298 int error; 7299 7300 ztest_dataset_name(name, ztest_opts.zo_pool, d); 7301 7302 (void) pthread_rwlock_rdlock(&ztest_name_lock); 7303 7304 error = ztest_dataset_create(name); 7305 if (error == ENOSPC) { 7306 (void) pthread_rwlock_unlock(&ztest_name_lock); 7307 ztest_record_enospc(FTAG); 7308 return (error); 7309 } 7310 ASSERT(error == 0 || error == EEXIST); 7311 7312 VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, 7313 B_TRUE, zd, &os)); 7314 (void) pthread_rwlock_unlock(&ztest_name_lock); 7315 7316 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os); 7317 7318 zilog = zd->zd_zilog; 7319 7320 if (zilog->zl_header->zh_claim_lr_seq != 0 && 7321 zilog->zl_header->zh_claim_lr_seq < committed_seq) 7322 fatal(B_FALSE, "missing log records: " 7323 "claimed %"PRIu64" < committed %"PRIu64"", 7324 zilog->zl_header->zh_claim_lr_seq, committed_seq); 7325 7326 ztest_dataset_dirobj_verify(zd); 7327 7328 zil_replay(os, zd, ztest_replay_vector); 7329 7330 ztest_dataset_dirobj_verify(zd); 7331 7332 if (ztest_opts.zo_verbose >= 6) 7333 (void) printf("%s replay %"PRIu64" blocks, " 7334 "%"PRIu64" records, seq %"PRIu64"\n", 7335 zd->zd_name, 7336 zilog->zl_parse_blk_count, 7337 zilog->zl_parse_lr_count, 7338 zilog->zl_replaying_seq); 7339 7340 zilog = zil_open(os, ztest_get_data, NULL); 7341 7342 if (zilog->zl_replaying_seq != 0 && 7343 zilog->zl_replaying_seq < committed_seq) 7344 fatal(B_FALSE, "missing log records: " 7345 "replayed %"PRIu64" < committed %"PRIu64"", 7346 zilog->zl_replaying_seq, committed_seq); 7347 7348 return (0); 7349 } 7350 7351 static void 7352 ztest_dataset_close(int d) 7353 { 7354 ztest_ds_t *zd = &ztest_ds[d]; 7355 7356 zil_close(zd->zd_zilog); 7357 dmu_objset_disown(zd->zd_os, B_TRUE, zd); 7358 7359 ztest_zd_fini(zd); 7360 } 7361 7362 static int 7363 ztest_replay_zil_cb(const char *name, void *arg) 7364 { 7365 (void) arg; 7366 objset_t *os; 7367 ztest_ds_t *zdtmp; 7368 7369 VERIFY0(ztest_dmu_objset_own(name, DMU_OST_ANY, B_TRUE, 7370 B_TRUE, FTAG, &os)); 7371 7372 zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL); 7373 7374 ztest_zd_init(zdtmp, NULL, os); 7375 zil_replay(os, zdtmp, ztest_replay_vector); 7376 ztest_zd_fini(zdtmp); 7377 7378 if (dmu_objset_zil(os)->zl_parse_lr_count != 0 && 7379 ztest_opts.zo_verbose >= 6) { 7380 zilog_t *zilog = dmu_objset_zil(os); 7381 7382 (void) printf("%s replay %"PRIu64" blocks, " 7383 "%"PRIu64" records, seq %"PRIu64"\n", 7384 name, 7385 zilog->zl_parse_blk_count, 7386 zilog->zl_parse_lr_count, 7387 zilog->zl_replaying_seq); 7388 } 7389 7390 umem_free(zdtmp, sizeof (ztest_ds_t)); 7391 7392 dmu_objset_disown(os, B_TRUE, FTAG); 7393 return (0); 7394 } 7395 7396 static void 7397 ztest_freeze(void) 7398 { 7399 ztest_ds_t *zd = &ztest_ds[0]; 7400 spa_t *spa; 7401 int numloops = 0; 7402 7403 if (ztest_opts.zo_verbose >= 3) 7404 (void) printf("testing spa_freeze()...\n"); 7405 7406 kernel_init(SPA_MODE_READ | SPA_MODE_WRITE); 7407 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 7408 VERIFY0(ztest_dataset_open(0)); 7409 ztest_spa = spa; 7410 7411 /* 7412 * Force the first log block to be transactionally allocated. 7413 * We have to do this before we freeze the pool -- otherwise 7414 * the log chain won't be anchored. 7415 */ 7416 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) { 7417 ztest_dmu_object_alloc_free(zd, 0); 7418 zil_commit(zd->zd_zilog, 0); 7419 } 7420 7421 txg_wait_synced(spa_get_dsl(spa), 0); 7422 7423 /* 7424 * Freeze the pool. This stops spa_sync() from doing anything, 7425 * so that the only way to record changes from now on is the ZIL. 7426 */ 7427 spa_freeze(spa); 7428 7429 /* 7430 * Because it is hard to predict how much space a write will actually 7431 * require beforehand, we leave ourselves some fudge space to write over 7432 * capacity. 7433 */ 7434 uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2; 7435 7436 /* 7437 * Run tests that generate log records but don't alter the pool config 7438 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc). 7439 * We do a txg_wait_synced() after each iteration to force the txg 7440 * to increase well beyond the last synced value in the uberblock. 7441 * The ZIL should be OK with that. 7442 * 7443 * Run a random number of times less than zo_maxloops and ensure we do 7444 * not run out of space on the pool. 7445 */ 7446 while (ztest_random(10) != 0 && 7447 numloops++ < ztest_opts.zo_maxloops && 7448 metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) { 7449 ztest_od_t od; 7450 ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0); 7451 VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE)); 7452 ztest_io(zd, od.od_object, 7453 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); 7454 txg_wait_synced(spa_get_dsl(spa), 0); 7455 } 7456 7457 /* 7458 * Commit all of the changes we just generated. 7459 */ 7460 zil_commit(zd->zd_zilog, 0); 7461 txg_wait_synced(spa_get_dsl(spa), 0); 7462 7463 /* 7464 * Close our dataset and close the pool. 7465 */ 7466 ztest_dataset_close(0); 7467 spa_close(spa, FTAG); 7468 kernel_fini(); 7469 7470 /* 7471 * Open and close the pool and dataset to induce log replay. 7472 */ 7473 kernel_init(SPA_MODE_READ | SPA_MODE_WRITE); 7474 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 7475 ASSERT3U(spa_freeze_txg(spa), ==, UINT64_MAX); 7476 VERIFY0(ztest_dataset_open(0)); 7477 ztest_spa = spa; 7478 txg_wait_synced(spa_get_dsl(spa), 0); 7479 ztest_dataset_close(0); 7480 ztest_reguid(NULL, 0); 7481 7482 spa_close(spa, FTAG); 7483 kernel_fini(); 7484 } 7485 7486 static void 7487 ztest_import_impl(void) 7488 { 7489 importargs_t args = { 0 }; 7490 nvlist_t *cfg = NULL; 7491 int nsearch = 1; 7492 char *searchdirs[nsearch]; 7493 int flags = ZFS_IMPORT_MISSING_LOG; 7494 7495 searchdirs[0] = ztest_opts.zo_dir; 7496 args.paths = nsearch; 7497 args.path = searchdirs; 7498 args.can_be_active = B_FALSE; 7499 7500 libpc_handle_t lpch = { 7501 .lpc_lib_handle = NULL, 7502 .lpc_ops = &libzpool_config_ops, 7503 .lpc_printerr = B_TRUE 7504 }; 7505 VERIFY0(zpool_find_config(&lpch, ztest_opts.zo_pool, &cfg, &args)); 7506 VERIFY0(spa_import(ztest_opts.zo_pool, cfg, NULL, flags)); 7507 fnvlist_free(cfg); 7508 } 7509 7510 /* 7511 * Import a storage pool with the given name. 7512 */ 7513 static void 7514 ztest_import(ztest_shared_t *zs) 7515 { 7516 spa_t *spa; 7517 7518 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 7519 mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL); 7520 VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL)); 7521 7522 kernel_init(SPA_MODE_READ | SPA_MODE_WRITE); 7523 7524 ztest_import_impl(); 7525 7526 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 7527 zs->zs_metaslab_sz = 7528 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 7529 zs->zs_guid = spa_guid(spa); 7530 spa_close(spa, FTAG); 7531 7532 kernel_fini(); 7533 7534 if (!ztest_opts.zo_mmp_test) { 7535 ztest_run_zdb(zs->zs_guid); 7536 ztest_freeze(); 7537 ztest_run_zdb(zs->zs_guid); 7538 } 7539 7540 (void) pthread_rwlock_destroy(&ztest_name_lock); 7541 mutex_destroy(&ztest_vdev_lock); 7542 mutex_destroy(&ztest_checkpoint_lock); 7543 } 7544 7545 /* 7546 * Kick off threads to run tests on all datasets in parallel. 7547 */ 7548 static void 7549 ztest_run(ztest_shared_t *zs) 7550 { 7551 spa_t *spa; 7552 objset_t *os; 7553 kthread_t *resume_thread, *deadman_thread; 7554 kthread_t **run_threads; 7555 uint64_t object; 7556 int error; 7557 int t, d; 7558 7559 ztest_exiting = B_FALSE; 7560 7561 /* 7562 * Initialize parent/child shared state. 7563 */ 7564 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 7565 mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL); 7566 VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL)); 7567 7568 zs->zs_thread_start = gethrtime(); 7569 zs->zs_thread_stop = 7570 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC; 7571 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop); 7572 zs->zs_thread_kill = zs->zs_thread_stop; 7573 if (ztest_random(100) < ztest_opts.zo_killrate) { 7574 zs->zs_thread_kill -= 7575 ztest_random(ztest_opts.zo_passtime * NANOSEC); 7576 } 7577 7578 mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL); 7579 7580 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t), 7581 offsetof(ztest_cb_data_t, zcd_node)); 7582 7583 /* 7584 * Open our pool. It may need to be imported first depending on 7585 * what tests were running when the previous pass was terminated. 7586 */ 7587 kernel_init(SPA_MODE_READ | SPA_MODE_WRITE); 7588 error = spa_open(ztest_opts.zo_pool, &spa, FTAG); 7589 if (error) { 7590 VERIFY3S(error, ==, ENOENT); 7591 ztest_import_impl(); 7592 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 7593 zs->zs_metaslab_sz = 7594 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 7595 } 7596 7597 metaslab_preload_limit = ztest_random(20) + 1; 7598 ztest_spa = spa; 7599 7600 VERIFY0(vdev_raidz_impl_set("cycle")); 7601 7602 dmu_objset_stats_t dds; 7603 VERIFY0(ztest_dmu_objset_own(ztest_opts.zo_pool, 7604 DMU_OST_ANY, B_TRUE, B_TRUE, FTAG, &os)); 7605 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 7606 dmu_objset_fast_stat(os, &dds); 7607 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 7608 dmu_objset_disown(os, B_TRUE, FTAG); 7609 7610 /* 7611 * Create a thread to periodically resume suspended I/O. 7612 */ 7613 resume_thread = thread_create(NULL, 0, ztest_resume_thread, 7614 spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri); 7615 7616 /* 7617 * Create a deadman thread and set to panic if we hang. 7618 */ 7619 deadman_thread = thread_create(NULL, 0, ztest_deadman_thread, 7620 zs, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri); 7621 7622 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 7623 7624 /* 7625 * Verify that we can safely inquire about any object, 7626 * whether it's allocated or not. To make it interesting, 7627 * we probe a 5-wide window around each power of two. 7628 * This hits all edge cases, including zero and the max. 7629 */ 7630 for (t = 0; t < 64; t++) { 7631 for (d = -5; d <= 5; d++) { 7632 error = dmu_object_info(spa->spa_meta_objset, 7633 (1ULL << t) + d, NULL); 7634 ASSERT(error == 0 || error == ENOENT || 7635 error == EINVAL); 7636 } 7637 } 7638 7639 /* 7640 * If we got any ENOSPC errors on the previous run, destroy something. 7641 */ 7642 if (zs->zs_enospc_count != 0) { 7643 int d = ztest_random(ztest_opts.zo_datasets); 7644 ztest_dataset_destroy(d); 7645 } 7646 zs->zs_enospc_count = 0; 7647 7648 /* 7649 * If we were in the middle of ztest_device_removal() and were killed 7650 * we need to ensure the removal and scrub complete before running 7651 * any tests that check ztest_device_removal_active. The removal will 7652 * be restarted automatically when the spa is opened, but we need to 7653 * initiate the scrub manually if it is not already in progress. Note 7654 * that we always run the scrub whenever an indirect vdev exists 7655 * because we have no way of knowing for sure if ztest_device_removal() 7656 * fully completed its scrub before the pool was reimported. 7657 */ 7658 if (spa->spa_removing_phys.sr_state == DSS_SCANNING || 7659 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 7660 while (spa->spa_removing_phys.sr_state == DSS_SCANNING) 7661 txg_wait_synced(spa_get_dsl(spa), 0); 7662 7663 error = ztest_scrub_impl(spa); 7664 if (error == EBUSY) 7665 error = 0; 7666 ASSERT0(error); 7667 } 7668 7669 run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *), 7670 UMEM_NOFAIL); 7671 7672 if (ztest_opts.zo_verbose >= 4) 7673 (void) printf("starting main threads...\n"); 7674 7675 /* 7676 * Replay all logs of all datasets in the pool. This is primarily for 7677 * temporary datasets which wouldn't otherwise get replayed, which 7678 * can trigger failures when attempting to offline a SLOG in 7679 * ztest_fault_inject(). 7680 */ 7681 (void) dmu_objset_find(ztest_opts.zo_pool, ztest_replay_zil_cb, 7682 NULL, DS_FIND_CHILDREN); 7683 7684 /* 7685 * Kick off all the tests that run in parallel. 7686 */ 7687 for (t = 0; t < ztest_opts.zo_threads; t++) { 7688 if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) { 7689 umem_free(run_threads, ztest_opts.zo_threads * 7690 sizeof (kthread_t *)); 7691 return; 7692 } 7693 7694 run_threads[t] = thread_create(NULL, 0, ztest_thread, 7695 (void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE, 7696 defclsyspri); 7697 } 7698 7699 /* 7700 * Wait for all of the tests to complete. 7701 */ 7702 for (t = 0; t < ztest_opts.zo_threads; t++) 7703 VERIFY0(thread_join(run_threads[t])); 7704 7705 /* 7706 * Close all datasets. This must be done after all the threads 7707 * are joined so we can be sure none of the datasets are in-use 7708 * by any of the threads. 7709 */ 7710 for (t = 0; t < ztest_opts.zo_threads; t++) { 7711 if (t < ztest_opts.zo_datasets) 7712 ztest_dataset_close(t); 7713 } 7714 7715 txg_wait_synced(spa_get_dsl(spa), 0); 7716 7717 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 7718 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa)); 7719 7720 umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *)); 7721 7722 /* Kill the resume and deadman threads */ 7723 ztest_exiting = B_TRUE; 7724 VERIFY0(thread_join(resume_thread)); 7725 VERIFY0(thread_join(deadman_thread)); 7726 ztest_resume(spa); 7727 7728 /* 7729 * Right before closing the pool, kick off a bunch of async I/O; 7730 * spa_close() should wait for it to complete. 7731 */ 7732 for (object = 1; object < 50; object++) { 7733 dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20, 7734 ZIO_PRIORITY_SYNC_READ); 7735 } 7736 7737 /* Verify that at least one commit cb was called in a timely fashion */ 7738 if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG) 7739 VERIFY0(zc_min_txg_delay); 7740 7741 spa_close(spa, FTAG); 7742 7743 /* 7744 * Verify that we can loop over all pools. 7745 */ 7746 mutex_enter(&spa_namespace_lock); 7747 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) 7748 if (ztest_opts.zo_verbose > 3) 7749 (void) printf("spa_next: found %s\n", spa_name(spa)); 7750 mutex_exit(&spa_namespace_lock); 7751 7752 /* 7753 * Verify that we can export the pool and reimport it under a 7754 * different name. 7755 */ 7756 if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) { 7757 char name[ZFS_MAX_DATASET_NAME_LEN]; 7758 (void) snprintf(name, sizeof (name), "%s_import", 7759 ztest_opts.zo_pool); 7760 ztest_spa_import_export(ztest_opts.zo_pool, name); 7761 ztest_spa_import_export(name, ztest_opts.zo_pool); 7762 } 7763 7764 kernel_fini(); 7765 7766 list_destroy(&zcl.zcl_callbacks); 7767 mutex_destroy(&zcl.zcl_callbacks_lock); 7768 (void) pthread_rwlock_destroy(&ztest_name_lock); 7769 mutex_destroy(&ztest_vdev_lock); 7770 mutex_destroy(&ztest_checkpoint_lock); 7771 } 7772 7773 static void 7774 print_time(hrtime_t t, char *timebuf) 7775 { 7776 hrtime_t s = t / NANOSEC; 7777 hrtime_t m = s / 60; 7778 hrtime_t h = m / 60; 7779 hrtime_t d = h / 24; 7780 7781 s -= m * 60; 7782 m -= h * 60; 7783 h -= d * 24; 7784 7785 timebuf[0] = '\0'; 7786 7787 if (d) 7788 (void) sprintf(timebuf, 7789 "%llud%02lluh%02llum%02llus", d, h, m, s); 7790 else if (h) 7791 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); 7792 else if (m) 7793 (void) sprintf(timebuf, "%llum%02llus", m, s); 7794 else 7795 (void) sprintf(timebuf, "%llus", s); 7796 } 7797 7798 static nvlist_t * 7799 make_random_props(void) 7800 { 7801 nvlist_t *props; 7802 7803 props = fnvlist_alloc(); 7804 7805 if (ztest_random(2) == 0) 7806 return (props); 7807 7808 fnvlist_add_uint64(props, 7809 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1); 7810 7811 return (props); 7812 } 7813 7814 /* 7815 * Create a storage pool with the given name and initial vdev size. 7816 * Then test spa_freeze() functionality. 7817 */ 7818 static void 7819 ztest_init(ztest_shared_t *zs) 7820 { 7821 spa_t *spa; 7822 nvlist_t *nvroot, *props; 7823 int i; 7824 7825 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL); 7826 mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL); 7827 VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL)); 7828 7829 kernel_init(SPA_MODE_READ | SPA_MODE_WRITE); 7830 7831 /* 7832 * Create the storage pool. 7833 */ 7834 (void) spa_destroy(ztest_opts.zo_pool); 7835 ztest_shared->zs_vdev_next_leaf = 0; 7836 zs->zs_splits = 0; 7837 zs->zs_mirrors = ztest_opts.zo_mirrors; 7838 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0, 7839 NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1); 7840 props = make_random_props(); 7841 7842 /* 7843 * We don't expect the pool to suspend unless maxfaults == 0, 7844 * in which case ztest_fault_inject() temporarily takes away 7845 * the only valid replica. 7846 */ 7847 fnvlist_add_uint64(props, 7848 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 7849 MAXFAULTS(zs) ? ZIO_FAILURE_MODE_PANIC : ZIO_FAILURE_MODE_WAIT); 7850 7851 for (i = 0; i < SPA_FEATURES; i++) { 7852 char *buf; 7853 7854 if (!spa_feature_table[i].fi_zfs_mod_supported) 7855 continue; 7856 7857 /* 7858 * 75% chance of using the log space map feature. We want ztest 7859 * to exercise both the code paths that use the log space map 7860 * feature and the ones that don't. 7861 */ 7862 if (i == SPA_FEATURE_LOG_SPACEMAP && ztest_random(4) == 0) 7863 continue; 7864 7865 VERIFY3S(-1, !=, asprintf(&buf, "feature@%s", 7866 spa_feature_table[i].fi_uname)); 7867 fnvlist_add_uint64(props, buf, 0); 7868 free(buf); 7869 } 7870 7871 VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL, NULL)); 7872 fnvlist_free(nvroot); 7873 fnvlist_free(props); 7874 7875 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); 7876 zs->zs_metaslab_sz = 7877 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 7878 zs->zs_guid = spa_guid(spa); 7879 spa_close(spa, FTAG); 7880 7881 kernel_fini(); 7882 7883 if (!ztest_opts.zo_mmp_test) { 7884 ztest_run_zdb(zs->zs_guid); 7885 ztest_freeze(); 7886 ztest_run_zdb(zs->zs_guid); 7887 } 7888 7889 (void) pthread_rwlock_destroy(&ztest_name_lock); 7890 mutex_destroy(&ztest_vdev_lock); 7891 mutex_destroy(&ztest_checkpoint_lock); 7892 } 7893 7894 static void 7895 setup_data_fd(void) 7896 { 7897 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX"; 7898 7899 ztest_fd_data = mkstemp(ztest_name_data); 7900 ASSERT3S(ztest_fd_data, >=, 0); 7901 (void) unlink(ztest_name_data); 7902 } 7903 7904 static int 7905 shared_data_size(ztest_shared_hdr_t *hdr) 7906 { 7907 int size; 7908 7909 size = hdr->zh_hdr_size; 7910 size += hdr->zh_opts_size; 7911 size += hdr->zh_size; 7912 size += hdr->zh_stats_size * hdr->zh_stats_count; 7913 size += hdr->zh_ds_size * hdr->zh_ds_count; 7914 7915 return (size); 7916 } 7917 7918 static void 7919 setup_hdr(void) 7920 { 7921 int size; 7922 ztest_shared_hdr_t *hdr; 7923 7924 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 7925 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 7926 ASSERT3P(hdr, !=, MAP_FAILED); 7927 7928 VERIFY0(ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t))); 7929 7930 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t); 7931 hdr->zh_opts_size = sizeof (ztest_shared_opts_t); 7932 hdr->zh_size = sizeof (ztest_shared_t); 7933 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t); 7934 hdr->zh_stats_count = ZTEST_FUNCS; 7935 hdr->zh_ds_size = sizeof (ztest_shared_ds_t); 7936 hdr->zh_ds_count = ztest_opts.zo_datasets; 7937 7938 size = shared_data_size(hdr); 7939 VERIFY0(ftruncate(ztest_fd_data, size)); 7940 7941 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 7942 } 7943 7944 static void 7945 setup_data(void) 7946 { 7947 int size, offset; 7948 ztest_shared_hdr_t *hdr; 7949 uint8_t *buf; 7950 7951 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()), 7952 PROT_READ, MAP_SHARED, ztest_fd_data, 0); 7953 ASSERT3P(hdr, !=, MAP_FAILED); 7954 7955 size = shared_data_size(hdr); 7956 7957 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize())); 7958 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()), 7959 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0); 7960 ASSERT3P(hdr, !=, MAP_FAILED); 7961 buf = (uint8_t *)hdr; 7962 7963 offset = hdr->zh_hdr_size; 7964 ztest_shared_opts = (void *)&buf[offset]; 7965 offset += hdr->zh_opts_size; 7966 ztest_shared = (void *)&buf[offset]; 7967 offset += hdr->zh_size; 7968 ztest_shared_callstate = (void *)&buf[offset]; 7969 offset += hdr->zh_stats_size * hdr->zh_stats_count; 7970 ztest_shared_ds = (void *)&buf[offset]; 7971 } 7972 7973 static boolean_t 7974 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp) 7975 { 7976 pid_t pid; 7977 int status; 7978 char *cmdbuf = NULL; 7979 7980 pid = fork(); 7981 7982 if (cmd == NULL) { 7983 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL); 7984 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN); 7985 cmd = cmdbuf; 7986 } 7987 7988 if (pid == -1) 7989 fatal(B_TRUE, "fork failed"); 7990 7991 if (pid == 0) { /* child */ 7992 char fd_data_str[12]; 7993 7994 VERIFY3S(11, >=, 7995 snprintf(fd_data_str, 12, "%d", ztest_fd_data)); 7996 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1)); 7997 7998 if (libpath != NULL) { 7999 const char *curlp = getenv("LD_LIBRARY_PATH"); 8000 if (curlp == NULL) 8001 VERIFY0(setenv("LD_LIBRARY_PATH", libpath, 1)); 8002 else { 8003 char *newlp = NULL; 8004 VERIFY3S(-1, !=, 8005 asprintf(&newlp, "%s:%s", libpath, curlp)); 8006 VERIFY0(setenv("LD_LIBRARY_PATH", newlp, 1)); 8007 free(newlp); 8008 } 8009 } 8010 (void) execl(cmd, cmd, (char *)NULL); 8011 ztest_dump_core = B_FALSE; 8012 fatal(B_TRUE, "exec failed: %s", cmd); 8013 } 8014 8015 if (cmdbuf != NULL) { 8016 umem_free(cmdbuf, MAXPATHLEN); 8017 cmd = NULL; 8018 } 8019 8020 while (waitpid(pid, &status, 0) != pid) 8021 continue; 8022 if (statusp != NULL) 8023 *statusp = status; 8024 8025 if (WIFEXITED(status)) { 8026 if (WEXITSTATUS(status) != 0) { 8027 (void) fprintf(stderr, "child exited with code %d\n", 8028 WEXITSTATUS(status)); 8029 exit(2); 8030 } 8031 return (B_FALSE); 8032 } else if (WIFSIGNALED(status)) { 8033 if (!ignorekill || WTERMSIG(status) != SIGKILL) { 8034 (void) fprintf(stderr, "child died with signal %d\n", 8035 WTERMSIG(status)); 8036 exit(3); 8037 } 8038 return (B_TRUE); 8039 } else { 8040 (void) fprintf(stderr, "something strange happened to child\n"); 8041 exit(4); 8042 } 8043 } 8044 8045 static void 8046 ztest_run_init(void) 8047 { 8048 int i; 8049 8050 ztest_shared_t *zs = ztest_shared; 8051 8052 /* 8053 * Blow away any existing copy of zpool.cache 8054 */ 8055 (void) remove(spa_config_path); 8056 8057 if (ztest_opts.zo_init == 0) { 8058 if (ztest_opts.zo_verbose >= 1) 8059 (void) printf("Importing pool %s\n", 8060 ztest_opts.zo_pool); 8061 ztest_import(zs); 8062 return; 8063 } 8064 8065 /* 8066 * Create and initialize our storage pool. 8067 */ 8068 for (i = 1; i <= ztest_opts.zo_init; i++) { 8069 memset(zs, 0, sizeof (*zs)); 8070 if (ztest_opts.zo_verbose >= 3 && 8071 ztest_opts.zo_init != 1) { 8072 (void) printf("ztest_init(), pass %d\n", i); 8073 } 8074 ztest_init(zs); 8075 } 8076 } 8077 8078 int 8079 main(int argc, char **argv) 8080 { 8081 int kills = 0; 8082 int iters = 0; 8083 int older = 0; 8084 int newer = 0; 8085 ztest_shared_t *zs; 8086 ztest_info_t *zi; 8087 ztest_shared_callstate_t *zc; 8088 char timebuf[100]; 8089 char numbuf[NN_NUMBUF_SZ]; 8090 char *cmd; 8091 boolean_t hasalt; 8092 int f, err; 8093 char *fd_data_str = getenv("ZTEST_FD_DATA"); 8094 struct sigaction action; 8095 8096 (void) setvbuf(stdout, NULL, _IOLBF, 0); 8097 8098 dprintf_setup(&argc, argv); 8099 zfs_deadman_synctime_ms = 300000; 8100 zfs_deadman_checktime_ms = 30000; 8101 /* 8102 * As two-word space map entries may not come up often (especially 8103 * if pool and vdev sizes are small) we want to force at least some 8104 * of them so the feature get tested. 8105 */ 8106 zfs_force_some_double_word_sm_entries = B_TRUE; 8107 8108 /* 8109 * Verify that even extensively damaged split blocks with many 8110 * segments can be reconstructed in a reasonable amount of time 8111 * when reconstruction is known to be possible. 8112 * 8113 * Note: the lower this value is, the more damage we inflict, and 8114 * the more time ztest spends in recovering that damage. We chose 8115 * to induce damage 1/100th of the time so recovery is tested but 8116 * not so frequently that ztest doesn't get to test other code paths. 8117 */ 8118 zfs_reconstruct_indirect_damage_fraction = 100; 8119 8120 action.sa_handler = sig_handler; 8121 sigemptyset(&action.sa_mask); 8122 action.sa_flags = 0; 8123 8124 if (sigaction(SIGSEGV, &action, NULL) < 0) { 8125 (void) fprintf(stderr, "ztest: cannot catch SIGSEGV: %s.\n", 8126 strerror(errno)); 8127 exit(EXIT_FAILURE); 8128 } 8129 8130 if (sigaction(SIGABRT, &action, NULL) < 0) { 8131 (void) fprintf(stderr, "ztest: cannot catch SIGABRT: %s.\n", 8132 strerror(errno)); 8133 exit(EXIT_FAILURE); 8134 } 8135 8136 /* 8137 * Force random_get_bytes() to use /dev/urandom in order to prevent 8138 * ztest from needlessly depleting the system entropy pool. 8139 */ 8140 random_path = "/dev/urandom"; 8141 ztest_fd_rand = open(random_path, O_RDONLY | O_CLOEXEC); 8142 ASSERT3S(ztest_fd_rand, >=, 0); 8143 8144 if (!fd_data_str) { 8145 process_options(argc, argv); 8146 8147 setup_data_fd(); 8148 setup_hdr(); 8149 setup_data(); 8150 memcpy(ztest_shared_opts, &ztest_opts, 8151 sizeof (*ztest_shared_opts)); 8152 } else { 8153 ztest_fd_data = atoi(fd_data_str); 8154 setup_data(); 8155 memcpy(&ztest_opts, ztest_shared_opts, sizeof (ztest_opts)); 8156 } 8157 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count); 8158 8159 err = ztest_set_global_vars(); 8160 if (err != 0 && !fd_data_str) { 8161 /* error message done by ztest_set_global_vars */ 8162 exit(EXIT_FAILURE); 8163 } else { 8164 /* children should not be spawned if setting gvars fails */ 8165 VERIFY3S(err, ==, 0); 8166 } 8167 8168 /* Override location of zpool.cache */ 8169 VERIFY3S(asprintf((char **)&spa_config_path, "%s/zpool.cache", 8170 ztest_opts.zo_dir), !=, -1); 8171 8172 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t), 8173 UMEM_NOFAIL); 8174 zs = ztest_shared; 8175 8176 if (fd_data_str) { 8177 metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging; 8178 metaslab_df_alloc_threshold = 8179 zs->zs_metaslab_df_alloc_threshold; 8180 8181 if (zs->zs_do_init) 8182 ztest_run_init(); 8183 else 8184 ztest_run(zs); 8185 exit(0); 8186 } 8187 8188 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0); 8189 8190 if (ztest_opts.zo_verbose >= 1) { 8191 (void) printf("%"PRIu64" vdevs, %d datasets, %d threads," 8192 "%d %s disks, %"PRIu64" seconds...\n\n", 8193 ztest_opts.zo_vdevs, 8194 ztest_opts.zo_datasets, 8195 ztest_opts.zo_threads, 8196 ztest_opts.zo_raid_children, 8197 ztest_opts.zo_raid_type, 8198 ztest_opts.zo_time); 8199 } 8200 8201 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL); 8202 (void) strlcpy(cmd, getexecname(), MAXNAMELEN); 8203 8204 zs->zs_do_init = B_TRUE; 8205 if (strlen(ztest_opts.zo_alt_ztest) != 0) { 8206 if (ztest_opts.zo_verbose >= 1) { 8207 (void) printf("Executing older ztest for " 8208 "initialization: %s\n", ztest_opts.zo_alt_ztest); 8209 } 8210 VERIFY(!exec_child(ztest_opts.zo_alt_ztest, 8211 ztest_opts.zo_alt_libpath, B_FALSE, NULL)); 8212 } else { 8213 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL)); 8214 } 8215 zs->zs_do_init = B_FALSE; 8216 8217 zs->zs_proc_start = gethrtime(); 8218 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC; 8219 8220 for (f = 0; f < ZTEST_FUNCS; f++) { 8221 zi = &ztest_info[f]; 8222 zc = ZTEST_GET_SHARED_CALLSTATE(f); 8223 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop) 8224 zc->zc_next = UINT64_MAX; 8225 else 8226 zc->zc_next = zs->zs_proc_start + 8227 ztest_random(2 * zi->zi_interval[0] + 1); 8228 } 8229 8230 /* 8231 * Run the tests in a loop. These tests include fault injection 8232 * to verify that self-healing data works, and forced crashes 8233 * to verify that we never lose on-disk consistency. 8234 */ 8235 while (gethrtime() < zs->zs_proc_stop) { 8236 int status; 8237 boolean_t killed; 8238 8239 /* 8240 * Initialize the workload counters for each function. 8241 */ 8242 for (f = 0; f < ZTEST_FUNCS; f++) { 8243 zc = ZTEST_GET_SHARED_CALLSTATE(f); 8244 zc->zc_count = 0; 8245 zc->zc_time = 0; 8246 } 8247 8248 /* Set the allocation switch size */ 8249 zs->zs_metaslab_df_alloc_threshold = 8250 ztest_random(zs->zs_metaslab_sz / 4) + 1; 8251 8252 if (!hasalt || ztest_random(2) == 0) { 8253 if (hasalt && ztest_opts.zo_verbose >= 1) { 8254 (void) printf("Executing newer ztest: %s\n", 8255 cmd); 8256 } 8257 newer++; 8258 killed = exec_child(cmd, NULL, B_TRUE, &status); 8259 } else { 8260 if (hasalt && ztest_opts.zo_verbose >= 1) { 8261 (void) printf("Executing older ztest: %s\n", 8262 ztest_opts.zo_alt_ztest); 8263 } 8264 older++; 8265 killed = exec_child(ztest_opts.zo_alt_ztest, 8266 ztest_opts.zo_alt_libpath, B_TRUE, &status); 8267 } 8268 8269 if (killed) 8270 kills++; 8271 iters++; 8272 8273 if (ztest_opts.zo_verbose >= 1) { 8274 hrtime_t now = gethrtime(); 8275 8276 now = MIN(now, zs->zs_proc_stop); 8277 print_time(zs->zs_proc_stop - now, timebuf); 8278 nicenum(zs->zs_space, numbuf, sizeof (numbuf)); 8279 8280 (void) printf("Pass %3d, %8s, %3"PRIu64" ENOSPC, " 8281 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", 8282 iters, 8283 WIFEXITED(status) ? "Complete" : "SIGKILL", 8284 zs->zs_enospc_count, 8285 100.0 * zs->zs_alloc / zs->zs_space, 8286 numbuf, 8287 100.0 * (now - zs->zs_proc_start) / 8288 (ztest_opts.zo_time * NANOSEC), timebuf); 8289 } 8290 8291 if (ztest_opts.zo_verbose >= 2) { 8292 (void) printf("\nWorkload summary:\n\n"); 8293 (void) printf("%7s %9s %s\n", 8294 "Calls", "Time", "Function"); 8295 (void) printf("%7s %9s %s\n", 8296 "-----", "----", "--------"); 8297 for (f = 0; f < ZTEST_FUNCS; f++) { 8298 zi = &ztest_info[f]; 8299 zc = ZTEST_GET_SHARED_CALLSTATE(f); 8300 print_time(zc->zc_time, timebuf); 8301 (void) printf("%7"PRIu64" %9s %s\n", 8302 zc->zc_count, timebuf, 8303 zi->zi_funcname); 8304 } 8305 (void) printf("\n"); 8306 } 8307 8308 if (!ztest_opts.zo_mmp_test) 8309 ztest_run_zdb(zs->zs_guid); 8310 } 8311 8312 if (ztest_opts.zo_verbose >= 1) { 8313 if (hasalt) { 8314 (void) printf("%d runs of older ztest: %s\n", older, 8315 ztest_opts.zo_alt_ztest); 8316 (void) printf("%d runs of newer ztest: %s\n", newer, 8317 cmd); 8318 } 8319 (void) printf("%d killed, %d completed, %.0f%% kill rate\n", 8320 kills, iters - kills, (100.0 * kills) / MAX(1, iters)); 8321 } 8322 8323 umem_free(cmd, MAXNAMELEN); 8324 8325 return (0); 8326 } 8327