1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 25eda14cbcSMatt Macy */ 26eda14cbcSMatt Macy 27eda14cbcSMatt Macy /* 28eda14cbcSMatt Macy * ZFS fault injection 29eda14cbcSMatt Macy * 30eda14cbcSMatt Macy * To handle fault injection, we keep track of a series of zinject_record_t 31eda14cbcSMatt Macy * structures which describe which logical block(s) should be injected with a 32eda14cbcSMatt Macy * fault. These are kept in a global list. Each record corresponds to a given 33eda14cbcSMatt Macy * spa_t and maintains a special hold on the spa_t so that it cannot be deleted 34eda14cbcSMatt Macy * or exported while the injection record exists. 35eda14cbcSMatt Macy * 36eda14cbcSMatt Macy * Device level injection is done using the 'zi_guid' field. If this is set, it 37eda14cbcSMatt Macy * means that the error is destined for a particular device, not a piece of 38eda14cbcSMatt Macy * data. 39eda14cbcSMatt Macy * 40eda14cbcSMatt Macy * This is a rather poor data structure and algorithm, but we don't expect more 41eda14cbcSMatt Macy * than a few faults at any one time, so it should be sufficient for our needs. 42eda14cbcSMatt Macy */ 43eda14cbcSMatt Macy 44eda14cbcSMatt Macy #include <sys/arc.h> 45eda14cbcSMatt Macy #include <sys/zio.h> 46eda14cbcSMatt Macy #include <sys/zfs_ioctl.h> 47eda14cbcSMatt Macy #include <sys/vdev_impl.h> 48eda14cbcSMatt Macy #include <sys/dmu_objset.h> 49eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 50eda14cbcSMatt Macy #include <sys/fs/zfs.h> 51eda14cbcSMatt Macy 52eda14cbcSMatt Macy uint32_t zio_injection_enabled = 0; 53eda14cbcSMatt Macy 54eda14cbcSMatt Macy /* 55eda14cbcSMatt Macy * Data describing each zinject handler registered on the system, and 56eda14cbcSMatt Macy * contains the list node linking the handler in the global zinject 57eda14cbcSMatt Macy * handler list. 58eda14cbcSMatt Macy */ 59eda14cbcSMatt Macy typedef struct inject_handler { 60eda14cbcSMatt Macy int zi_id; 61eda14cbcSMatt Macy spa_t *zi_spa; 62eda14cbcSMatt Macy zinject_record_t zi_record; 63eda14cbcSMatt Macy uint64_t *zi_lanes; 64eda14cbcSMatt Macy int zi_next_lane; 65eda14cbcSMatt Macy list_node_t zi_link; 66eda14cbcSMatt Macy } inject_handler_t; 67eda14cbcSMatt Macy 68eda14cbcSMatt Macy /* 69eda14cbcSMatt Macy * List of all zinject handlers registered on the system, protected by 70eda14cbcSMatt Macy * the inject_lock defined below. 71eda14cbcSMatt Macy */ 72eda14cbcSMatt Macy static list_t inject_handlers; 73eda14cbcSMatt Macy 74eda14cbcSMatt Macy /* 75eda14cbcSMatt Macy * This protects insertion into, and traversal of, the inject handler 76eda14cbcSMatt Macy * list defined above; as well as the inject_delay_count. Any time a 77eda14cbcSMatt Macy * handler is inserted or removed from the list, this lock should be 78eda14cbcSMatt Macy * taken as a RW_WRITER; and any time traversal is done over the list 79eda14cbcSMatt Macy * (without modification to it) this lock should be taken as a RW_READER. 80eda14cbcSMatt Macy */ 81eda14cbcSMatt Macy static krwlock_t inject_lock; 82eda14cbcSMatt Macy 83eda14cbcSMatt Macy /* 84eda14cbcSMatt Macy * This holds the number of zinject delay handlers that have been 85eda14cbcSMatt Macy * registered on the system. It is protected by the inject_lock defined 86eda14cbcSMatt Macy * above. Thus modifications to this count must be a RW_WRITER of the 87eda14cbcSMatt Macy * inject_lock, and reads of this count must be (at least) a RW_READER 88eda14cbcSMatt Macy * of the lock. 89eda14cbcSMatt Macy */ 90eda14cbcSMatt Macy static int inject_delay_count = 0; 91eda14cbcSMatt Macy 92eda14cbcSMatt Macy /* 93eda14cbcSMatt Macy * This lock is used only in zio_handle_io_delay(), refer to the comment 94eda14cbcSMatt Macy * in that function for more details. 95eda14cbcSMatt Macy */ 96eda14cbcSMatt Macy static kmutex_t inject_delay_mtx; 97eda14cbcSMatt Macy 98eda14cbcSMatt Macy /* 99eda14cbcSMatt Macy * Used to assign unique identifying numbers to each new zinject handler. 100eda14cbcSMatt Macy */ 101eda14cbcSMatt Macy static int inject_next_id = 1; 102eda14cbcSMatt Macy 103eda14cbcSMatt Macy /* 104eda14cbcSMatt Macy * Test if the requested frequency was triggered 105eda14cbcSMatt Macy */ 106eda14cbcSMatt Macy static boolean_t 107eda14cbcSMatt Macy freq_triggered(uint32_t frequency) 108eda14cbcSMatt Macy { 109eda14cbcSMatt Macy /* 110eda14cbcSMatt Macy * zero implies always (100%) 111eda14cbcSMatt Macy */ 112eda14cbcSMatt Macy if (frequency == 0) 113eda14cbcSMatt Macy return (B_TRUE); 114eda14cbcSMatt Macy 115eda14cbcSMatt Macy /* 116eda14cbcSMatt Macy * Note: we still handle legacy (unscaled) frequency values 117eda14cbcSMatt Macy */ 118eda14cbcSMatt Macy uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX; 119eda14cbcSMatt Macy 12033b8c039SMartin Matuska return (random_in_range(maximum) < frequency); 121eda14cbcSMatt Macy } 122eda14cbcSMatt Macy 123eda14cbcSMatt Macy /* 124eda14cbcSMatt Macy * Returns true if the given record matches the I/O in progress. 125eda14cbcSMatt Macy */ 126eda14cbcSMatt Macy static boolean_t 127eda14cbcSMatt Macy zio_match_handler(const zbookmark_phys_t *zb, uint64_t type, int dva, 128eda14cbcSMatt Macy zinject_record_t *record, int error) 129eda14cbcSMatt Macy { 130eda14cbcSMatt Macy /* 131eda14cbcSMatt Macy * Check for a match against the MOS, which is based on type 132eda14cbcSMatt Macy */ 133eda14cbcSMatt Macy if (zb->zb_objset == DMU_META_OBJSET && 134eda14cbcSMatt Macy record->zi_objset == DMU_META_OBJSET && 135eda14cbcSMatt Macy record->zi_object == DMU_META_DNODE_OBJECT) { 136eda14cbcSMatt Macy if (record->zi_type == DMU_OT_NONE || 137eda14cbcSMatt Macy type == record->zi_type) 138eda14cbcSMatt Macy return (freq_triggered(record->zi_freq)); 139eda14cbcSMatt Macy else 140eda14cbcSMatt Macy return (B_FALSE); 141eda14cbcSMatt Macy } 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy /* 144eda14cbcSMatt Macy * Check for an exact match. 145eda14cbcSMatt Macy */ 146eda14cbcSMatt Macy if (zb->zb_objset == record->zi_objset && 147eda14cbcSMatt Macy zb->zb_object == record->zi_object && 148eda14cbcSMatt Macy zb->zb_level == record->zi_level && 149eda14cbcSMatt Macy zb->zb_blkid >= record->zi_start && 150eda14cbcSMatt Macy zb->zb_blkid <= record->zi_end && 151eda14cbcSMatt Macy (record->zi_dvas == 0 || (record->zi_dvas & (1ULL << dva))) && 152eda14cbcSMatt Macy error == record->zi_error) { 153eda14cbcSMatt Macy return (freq_triggered(record->zi_freq)); 154eda14cbcSMatt Macy } 155eda14cbcSMatt Macy 156eda14cbcSMatt Macy return (B_FALSE); 157eda14cbcSMatt Macy } 158eda14cbcSMatt Macy 159eda14cbcSMatt Macy /* 160eda14cbcSMatt Macy * Panic the system when a config change happens in the function 161eda14cbcSMatt Macy * specified by tag. 162eda14cbcSMatt Macy */ 163eda14cbcSMatt Macy void 164eda14cbcSMatt Macy zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type) 165eda14cbcSMatt Macy { 166eda14cbcSMatt Macy inject_handler_t *handler; 167eda14cbcSMatt Macy 168eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 171eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 172eda14cbcSMatt Macy 173eda14cbcSMatt Macy if (spa != handler->zi_spa) 174eda14cbcSMatt Macy continue; 175eda14cbcSMatt Macy 176eda14cbcSMatt Macy if (handler->zi_record.zi_type == type && 177eda14cbcSMatt Macy strcmp(tag, handler->zi_record.zi_func) == 0) 178eda14cbcSMatt Macy panic("Panic requested in function %s\n", tag); 179eda14cbcSMatt Macy } 180eda14cbcSMatt Macy 181eda14cbcSMatt Macy rw_exit(&inject_lock); 182eda14cbcSMatt Macy } 183eda14cbcSMatt Macy 184eda14cbcSMatt Macy /* 185eda14cbcSMatt Macy * Inject a decryption failure. Decryption failures can occur in 186eda14cbcSMatt Macy * both the ARC and the ZIO layers. 187eda14cbcSMatt Macy */ 188eda14cbcSMatt Macy int 189eda14cbcSMatt Macy zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb, 190eda14cbcSMatt Macy uint64_t type, int error) 191eda14cbcSMatt Macy { 192eda14cbcSMatt Macy int ret = 0; 193eda14cbcSMatt Macy inject_handler_t *handler; 194eda14cbcSMatt Macy 195eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 196eda14cbcSMatt Macy 197eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 198eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 199eda14cbcSMatt Macy 200eda14cbcSMatt Macy if (spa != handler->zi_spa || 201eda14cbcSMatt Macy handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT) 202eda14cbcSMatt Macy continue; 203eda14cbcSMatt Macy 204eda14cbcSMatt Macy if (zio_match_handler(zb, type, ZI_NO_DVA, 205eda14cbcSMatt Macy &handler->zi_record, error)) { 206eda14cbcSMatt Macy ret = error; 207eda14cbcSMatt Macy break; 208eda14cbcSMatt Macy } 209eda14cbcSMatt Macy } 210eda14cbcSMatt Macy 211eda14cbcSMatt Macy rw_exit(&inject_lock); 212eda14cbcSMatt Macy return (ret); 213eda14cbcSMatt Macy } 214eda14cbcSMatt Macy 215eda14cbcSMatt Macy /* 216eda14cbcSMatt Macy * If this is a physical I/O for a vdev child determine which DVA it is 217eda14cbcSMatt Macy * for. We iterate backwards through the DVAs matching on the offset so 218eda14cbcSMatt Macy * that we end up with ZI_NO_DVA (-1) if we don't find a match. 219eda14cbcSMatt Macy */ 220eda14cbcSMatt Macy static int 221eda14cbcSMatt Macy zio_match_dva(zio_t *zio) 222eda14cbcSMatt Macy { 223eda14cbcSMatt Macy int i = ZI_NO_DVA; 224eda14cbcSMatt Macy 225eda14cbcSMatt Macy if (zio->io_bp != NULL && zio->io_vd != NULL && 226eda14cbcSMatt Macy zio->io_child_type == ZIO_CHILD_VDEV) { 227eda14cbcSMatt Macy for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) { 228eda14cbcSMatt Macy dva_t *dva = &zio->io_bp->blk_dva[i]; 229eda14cbcSMatt Macy uint64_t off = DVA_GET_OFFSET(dva); 230eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(zio->io_spa, 231eda14cbcSMatt Macy DVA_GET_VDEV(dva)); 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy /* Compensate for vdev label added to leaves */ 234eda14cbcSMatt Macy if (zio->io_vd->vdev_ops->vdev_op_leaf) 235eda14cbcSMatt Macy off += VDEV_LABEL_START_SIZE; 236eda14cbcSMatt Macy 237eda14cbcSMatt Macy if (zio->io_vd == vd && zio->io_offset == off) 238eda14cbcSMatt Macy break; 239eda14cbcSMatt Macy } 240eda14cbcSMatt Macy } 241eda14cbcSMatt Macy 242eda14cbcSMatt Macy return (i); 243eda14cbcSMatt Macy } 244eda14cbcSMatt Macy 245eda14cbcSMatt Macy 246eda14cbcSMatt Macy /* 247eda14cbcSMatt Macy * Determine if the I/O in question should return failure. Returns the errno 248eda14cbcSMatt Macy * to be returned to the caller. 249eda14cbcSMatt Macy */ 250eda14cbcSMatt Macy int 251eda14cbcSMatt Macy zio_handle_fault_injection(zio_t *zio, int error) 252eda14cbcSMatt Macy { 253eda14cbcSMatt Macy int ret = 0; 254eda14cbcSMatt Macy inject_handler_t *handler; 255eda14cbcSMatt Macy 256eda14cbcSMatt Macy /* 257eda14cbcSMatt Macy * Ignore I/O not associated with any logical data. 258eda14cbcSMatt Macy */ 259eda14cbcSMatt Macy if (zio->io_logical == NULL) 260eda14cbcSMatt Macy return (0); 261eda14cbcSMatt Macy 262eda14cbcSMatt Macy /* 263eda14cbcSMatt Macy * Currently, we only support fault injection on reads. 264eda14cbcSMatt Macy */ 265eda14cbcSMatt Macy if (zio->io_type != ZIO_TYPE_READ) 266eda14cbcSMatt Macy return (0); 267eda14cbcSMatt Macy 2687877fdebSMatt Macy /* 2697877fdebSMatt Macy * A rebuild I/O has no checksum to verify. 2707877fdebSMatt Macy */ 2717877fdebSMatt Macy if (zio->io_priority == ZIO_PRIORITY_REBUILD && error == ECKSUM) 2727877fdebSMatt Macy return (0); 2737877fdebSMatt Macy 274eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 275eda14cbcSMatt Macy 276eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 277eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 278eda14cbcSMatt Macy if (zio->io_spa != handler->zi_spa || 279eda14cbcSMatt Macy handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT) 280eda14cbcSMatt Macy continue; 281eda14cbcSMatt Macy 282eda14cbcSMatt Macy /* If this handler matches, return the specified error */ 283eda14cbcSMatt Macy if (zio_match_handler(&zio->io_logical->io_bookmark, 284eda14cbcSMatt Macy zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE, 285eda14cbcSMatt Macy zio_match_dva(zio), &handler->zi_record, error)) { 286eda14cbcSMatt Macy ret = error; 287eda14cbcSMatt Macy break; 288eda14cbcSMatt Macy } 289eda14cbcSMatt Macy } 290eda14cbcSMatt Macy 291eda14cbcSMatt Macy rw_exit(&inject_lock); 292eda14cbcSMatt Macy 293eda14cbcSMatt Macy return (ret); 294eda14cbcSMatt Macy } 295eda14cbcSMatt Macy 296eda14cbcSMatt Macy /* 297eda14cbcSMatt Macy * Determine if the zio is part of a label update and has an injection 298eda14cbcSMatt Macy * handler associated with that portion of the label. Currently, we 299eda14cbcSMatt Macy * allow error injection in either the nvlist or the uberblock region of 300eda14cbcSMatt Macy * of the vdev label. 301eda14cbcSMatt Macy */ 302eda14cbcSMatt Macy int 303eda14cbcSMatt Macy zio_handle_label_injection(zio_t *zio, int error) 304eda14cbcSMatt Macy { 305eda14cbcSMatt Macy inject_handler_t *handler; 306eda14cbcSMatt Macy vdev_t *vd = zio->io_vd; 307eda14cbcSMatt Macy uint64_t offset = zio->io_offset; 308eda14cbcSMatt Macy int label; 309eda14cbcSMatt Macy int ret = 0; 310eda14cbcSMatt Macy 311eda14cbcSMatt Macy if (offset >= VDEV_LABEL_START_SIZE && 312eda14cbcSMatt Macy offset < vd->vdev_psize - VDEV_LABEL_END_SIZE) 313eda14cbcSMatt Macy return (0); 314eda14cbcSMatt Macy 315eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 316eda14cbcSMatt Macy 317eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 318eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 319eda14cbcSMatt Macy uint64_t start = handler->zi_record.zi_start; 320eda14cbcSMatt Macy uint64_t end = handler->zi_record.zi_end; 321eda14cbcSMatt Macy 322eda14cbcSMatt Macy if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT) 323eda14cbcSMatt Macy continue; 324eda14cbcSMatt Macy 325eda14cbcSMatt Macy /* 326eda14cbcSMatt Macy * The injection region is the relative offsets within a 327eda14cbcSMatt Macy * vdev label. We must determine the label which is being 328eda14cbcSMatt Macy * updated and adjust our region accordingly. 329eda14cbcSMatt Macy */ 330eda14cbcSMatt Macy label = vdev_label_number(vd->vdev_psize, offset); 331eda14cbcSMatt Macy start = vdev_label_offset(vd->vdev_psize, label, start); 332eda14cbcSMatt Macy end = vdev_label_offset(vd->vdev_psize, label, end); 333eda14cbcSMatt Macy 334eda14cbcSMatt Macy if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid && 335eda14cbcSMatt Macy (offset >= start && offset <= end)) { 336eda14cbcSMatt Macy ret = error; 337eda14cbcSMatt Macy break; 338eda14cbcSMatt Macy } 339eda14cbcSMatt Macy } 340eda14cbcSMatt Macy rw_exit(&inject_lock); 341eda14cbcSMatt Macy return (ret); 342eda14cbcSMatt Macy } 343eda14cbcSMatt Macy 344eda14cbcSMatt Macy static int 345eda14cbcSMatt Macy zio_inject_bitflip_cb(void *data, size_t len, void *private) 346eda14cbcSMatt Macy { 347*c03c5b1cSMartin Matuska zio_t *zio = private; 348eda14cbcSMatt Macy uint8_t *buffer = data; 34933b8c039SMartin Matuska uint_t byte = random_in_range(len); 350eda14cbcSMatt Macy 351*c03c5b1cSMartin Matuska ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 352eda14cbcSMatt Macy 353eda14cbcSMatt Macy /* flip a single random bit in an abd data buffer */ 35433b8c039SMartin Matuska buffer[byte] ^= 1 << random_in_range(8); 355eda14cbcSMatt Macy 356eda14cbcSMatt Macy return (1); /* stop after first flip */ 357eda14cbcSMatt Macy } 358eda14cbcSMatt Macy 359eda14cbcSMatt Macy static int 360eda14cbcSMatt Macy zio_handle_device_injection_impl(vdev_t *vd, zio_t *zio, int err1, int err2) 361eda14cbcSMatt Macy { 362eda14cbcSMatt Macy inject_handler_t *handler; 363eda14cbcSMatt Macy int ret = 0; 364eda14cbcSMatt Macy 365eda14cbcSMatt Macy /* 366eda14cbcSMatt Macy * We skip over faults in the labels unless it's during 367eda14cbcSMatt Macy * device open (i.e. zio == NULL). 368eda14cbcSMatt Macy */ 369eda14cbcSMatt Macy if (zio != NULL) { 370eda14cbcSMatt Macy uint64_t offset = zio->io_offset; 371eda14cbcSMatt Macy 372eda14cbcSMatt Macy if (offset < VDEV_LABEL_START_SIZE || 373eda14cbcSMatt Macy offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE) 374eda14cbcSMatt Macy return (0); 375eda14cbcSMatt Macy } 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 378eda14cbcSMatt Macy 379eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 380eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 381eda14cbcSMatt Macy 382eda14cbcSMatt Macy if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT) 383eda14cbcSMatt Macy continue; 384eda14cbcSMatt Macy 385eda14cbcSMatt Macy if (vd->vdev_guid == handler->zi_record.zi_guid) { 386eda14cbcSMatt Macy if (handler->zi_record.zi_failfast && 387eda14cbcSMatt Macy (zio == NULL || (zio->io_flags & 388eda14cbcSMatt Macy (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) { 389eda14cbcSMatt Macy continue; 390eda14cbcSMatt Macy } 391eda14cbcSMatt Macy 392eda14cbcSMatt Macy /* Handle type specific I/O failures */ 393eda14cbcSMatt Macy if (zio != NULL && 394eda14cbcSMatt Macy handler->zi_record.zi_iotype != ZIO_TYPES && 395eda14cbcSMatt Macy handler->zi_record.zi_iotype != zio->io_type) 396eda14cbcSMatt Macy continue; 397eda14cbcSMatt Macy 398eda14cbcSMatt Macy if (handler->zi_record.zi_error == err1 || 399eda14cbcSMatt Macy handler->zi_record.zi_error == err2) { 400eda14cbcSMatt Macy /* 401eda14cbcSMatt Macy * limit error injection if requested 402eda14cbcSMatt Macy */ 403eda14cbcSMatt Macy if (!freq_triggered(handler->zi_record.zi_freq)) 404eda14cbcSMatt Macy continue; 405eda14cbcSMatt Macy 406eda14cbcSMatt Macy /* 407eda14cbcSMatt Macy * For a failed open, pretend like the device 408eda14cbcSMatt Macy * has gone away. 409eda14cbcSMatt Macy */ 410eda14cbcSMatt Macy if (err1 == ENXIO) 411eda14cbcSMatt Macy vd->vdev_stat.vs_aux = 412eda14cbcSMatt Macy VDEV_AUX_OPEN_FAILED; 413eda14cbcSMatt Macy 414eda14cbcSMatt Macy /* 415eda14cbcSMatt Macy * Treat these errors as if they had been 416eda14cbcSMatt Macy * retried so that all the appropriate stats 417eda14cbcSMatt Macy * and FMA events are generated. 418eda14cbcSMatt Macy */ 419eda14cbcSMatt Macy if (!handler->zi_record.zi_failfast && 420eda14cbcSMatt Macy zio != NULL) 421eda14cbcSMatt Macy zio->io_flags |= ZIO_FLAG_IO_RETRY; 422eda14cbcSMatt Macy 423eda14cbcSMatt Macy /* 424eda14cbcSMatt Macy * EILSEQ means flip a bit after a read 425eda14cbcSMatt Macy */ 426eda14cbcSMatt Macy if (handler->zi_record.zi_error == EILSEQ) { 427eda14cbcSMatt Macy if (zio == NULL) 428eda14cbcSMatt Macy break; 429eda14cbcSMatt Macy 430eda14cbcSMatt Macy /* locate buffer data and flip a bit */ 431eda14cbcSMatt Macy (void) abd_iterate_func(zio->io_abd, 0, 432eda14cbcSMatt Macy zio->io_size, zio_inject_bitflip_cb, 433eda14cbcSMatt Macy zio); 434eda14cbcSMatt Macy break; 435eda14cbcSMatt Macy } 436eda14cbcSMatt Macy 437eda14cbcSMatt Macy ret = handler->zi_record.zi_error; 438eda14cbcSMatt Macy break; 439eda14cbcSMatt Macy } 440eda14cbcSMatt Macy if (handler->zi_record.zi_error == ENXIO) { 441eda14cbcSMatt Macy ret = SET_ERROR(EIO); 442eda14cbcSMatt Macy break; 443eda14cbcSMatt Macy } 444eda14cbcSMatt Macy } 445eda14cbcSMatt Macy } 446eda14cbcSMatt Macy 447eda14cbcSMatt Macy rw_exit(&inject_lock); 448eda14cbcSMatt Macy 449eda14cbcSMatt Macy return (ret); 450eda14cbcSMatt Macy } 451eda14cbcSMatt Macy 452eda14cbcSMatt Macy int 453eda14cbcSMatt Macy zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error) 454eda14cbcSMatt Macy { 455eda14cbcSMatt Macy return (zio_handle_device_injection_impl(vd, zio, error, INT_MAX)); 456eda14cbcSMatt Macy } 457eda14cbcSMatt Macy 458eda14cbcSMatt Macy int 459eda14cbcSMatt Macy zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1, int err2) 460eda14cbcSMatt Macy { 461eda14cbcSMatt Macy return (zio_handle_device_injection_impl(vd, zio, err1, err2)); 462eda14cbcSMatt Macy } 463eda14cbcSMatt Macy 464eda14cbcSMatt Macy /* 465eda14cbcSMatt Macy * Simulate hardware that ignores cache flushes. For requested number 466eda14cbcSMatt Macy * of seconds nix the actual writing to disk. 467eda14cbcSMatt Macy */ 468eda14cbcSMatt Macy void 469eda14cbcSMatt Macy zio_handle_ignored_writes(zio_t *zio) 470eda14cbcSMatt Macy { 471eda14cbcSMatt Macy inject_handler_t *handler; 472eda14cbcSMatt Macy 473eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 474eda14cbcSMatt Macy 475eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 476eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 477eda14cbcSMatt Macy 478eda14cbcSMatt Macy /* Ignore errors not destined for this pool */ 479eda14cbcSMatt Macy if (zio->io_spa != handler->zi_spa || 480eda14cbcSMatt Macy handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES) 481eda14cbcSMatt Macy continue; 482eda14cbcSMatt Macy 483eda14cbcSMatt Macy /* 484eda14cbcSMatt Macy * Positive duration implies # of seconds, negative 485eda14cbcSMatt Macy * a number of txgs 486eda14cbcSMatt Macy */ 487eda14cbcSMatt Macy if (handler->zi_record.zi_timer == 0) { 488eda14cbcSMatt Macy if (handler->zi_record.zi_duration > 0) 489eda14cbcSMatt Macy handler->zi_record.zi_timer = ddi_get_lbolt64(); 490eda14cbcSMatt Macy else 491eda14cbcSMatt Macy handler->zi_record.zi_timer = zio->io_txg; 492eda14cbcSMatt Macy } 493eda14cbcSMatt Macy 494eda14cbcSMatt Macy /* Have a "problem" writing 60% of the time */ 49533b8c039SMartin Matuska if (random_in_range(100) < 60) 496eda14cbcSMatt Macy zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 497eda14cbcSMatt Macy break; 498eda14cbcSMatt Macy } 499eda14cbcSMatt Macy 500eda14cbcSMatt Macy rw_exit(&inject_lock); 501eda14cbcSMatt Macy } 502eda14cbcSMatt Macy 503eda14cbcSMatt Macy void 504eda14cbcSMatt Macy spa_handle_ignored_writes(spa_t *spa) 505eda14cbcSMatt Macy { 506eda14cbcSMatt Macy inject_handler_t *handler; 507eda14cbcSMatt Macy 508eda14cbcSMatt Macy if (zio_injection_enabled == 0) 509eda14cbcSMatt Macy return; 510eda14cbcSMatt Macy 511eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 512eda14cbcSMatt Macy 513eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 514eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) { 515eda14cbcSMatt Macy 516eda14cbcSMatt Macy if (spa != handler->zi_spa || 517eda14cbcSMatt Macy handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES) 518eda14cbcSMatt Macy continue; 519eda14cbcSMatt Macy 520eda14cbcSMatt Macy if (handler->zi_record.zi_duration > 0) { 521eda14cbcSMatt Macy VERIFY(handler->zi_record.zi_timer == 0 || 522eda14cbcSMatt Macy ddi_time_after64( 523eda14cbcSMatt Macy (int64_t)handler->zi_record.zi_timer + 524eda14cbcSMatt Macy handler->zi_record.zi_duration * hz, 525eda14cbcSMatt Macy ddi_get_lbolt64())); 526eda14cbcSMatt Macy } else { 527eda14cbcSMatt Macy /* duration is negative so the subtraction here adds */ 528eda14cbcSMatt Macy VERIFY(handler->zi_record.zi_timer == 0 || 529eda14cbcSMatt Macy handler->zi_record.zi_timer - 530eda14cbcSMatt Macy handler->zi_record.zi_duration >= 531eda14cbcSMatt Macy spa_syncing_txg(spa)); 532eda14cbcSMatt Macy } 533eda14cbcSMatt Macy } 534eda14cbcSMatt Macy 535eda14cbcSMatt Macy rw_exit(&inject_lock); 536eda14cbcSMatt Macy } 537eda14cbcSMatt Macy 538eda14cbcSMatt Macy hrtime_t 539eda14cbcSMatt Macy zio_handle_io_delay(zio_t *zio) 540eda14cbcSMatt Macy { 541eda14cbcSMatt Macy vdev_t *vd = zio->io_vd; 542eda14cbcSMatt Macy inject_handler_t *min_handler = NULL; 543eda14cbcSMatt Macy hrtime_t min_target = 0; 544eda14cbcSMatt Macy 545eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 546eda14cbcSMatt Macy 547eda14cbcSMatt Macy /* 548eda14cbcSMatt Macy * inject_delay_count is a subset of zio_injection_enabled that 549eda14cbcSMatt Macy * is only incremented for delay handlers. These checks are 550eda14cbcSMatt Macy * mainly added to remind the reader why we're not explicitly 551eda14cbcSMatt Macy * checking zio_injection_enabled like the other functions. 552eda14cbcSMatt Macy */ 553eda14cbcSMatt Macy IMPLY(inject_delay_count > 0, zio_injection_enabled > 0); 554eda14cbcSMatt Macy IMPLY(zio_injection_enabled == 0, inject_delay_count == 0); 555eda14cbcSMatt Macy 556eda14cbcSMatt Macy /* 557eda14cbcSMatt Macy * If there aren't any inject delay handlers registered, then we 558eda14cbcSMatt Macy * can short circuit and simply return 0 here. A value of zero 559eda14cbcSMatt Macy * informs zio_delay_interrupt() that this request should not be 560eda14cbcSMatt Macy * delayed. This short circuit keeps us from acquiring the 561eda14cbcSMatt Macy * inject_delay_mutex unnecessarily. 562eda14cbcSMatt Macy */ 563eda14cbcSMatt Macy if (inject_delay_count == 0) { 564eda14cbcSMatt Macy rw_exit(&inject_lock); 565eda14cbcSMatt Macy return (0); 566eda14cbcSMatt Macy } 567eda14cbcSMatt Macy 568eda14cbcSMatt Macy /* 569eda14cbcSMatt Macy * Each inject handler has a number of "lanes" associated with 570eda14cbcSMatt Macy * it. Each lane is able to handle requests independently of one 571eda14cbcSMatt Macy * another, and at a latency defined by the inject handler 572eda14cbcSMatt Macy * record's zi_timer field. Thus if a handler in configured with 573eda14cbcSMatt Macy * a single lane with a 10ms latency, it will delay requests 574eda14cbcSMatt Macy * such that only a single request is completed every 10ms. So, 575eda14cbcSMatt Macy * if more than one request is attempted per each 10ms interval, 576eda14cbcSMatt Macy * the average latency of the requests will be greater than 577eda14cbcSMatt Macy * 10ms; but if only a single request is submitted each 10ms 578eda14cbcSMatt Macy * interval the average latency will be 10ms. 579eda14cbcSMatt Macy * 580eda14cbcSMatt Macy * We need to acquire this mutex to prevent multiple concurrent 581eda14cbcSMatt Macy * threads being assigned to the same lane of a given inject 582eda14cbcSMatt Macy * handler. The mutex allows us to perform the following two 583eda14cbcSMatt Macy * operations atomically: 584eda14cbcSMatt Macy * 585eda14cbcSMatt Macy * 1. determine the minimum handler and minimum target 586eda14cbcSMatt Macy * value of all the possible handlers 587eda14cbcSMatt Macy * 2. update that minimum handler's lane array 588eda14cbcSMatt Macy * 589eda14cbcSMatt Macy * Without atomicity, two (or more) threads could pick the same 590eda14cbcSMatt Macy * lane in step (1), and then conflict with each other in step 591eda14cbcSMatt Macy * (2). This could allow a single lane handler to process 592eda14cbcSMatt Macy * multiple requests simultaneously, which shouldn't be possible. 593eda14cbcSMatt Macy */ 594eda14cbcSMatt Macy mutex_enter(&inject_delay_mtx); 595eda14cbcSMatt Macy 596eda14cbcSMatt Macy for (inject_handler_t *handler = list_head(&inject_handlers); 597eda14cbcSMatt Macy handler != NULL; handler = list_next(&inject_handlers, handler)) { 598eda14cbcSMatt Macy if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO) 599eda14cbcSMatt Macy continue; 600eda14cbcSMatt Macy 601eda14cbcSMatt Macy if (!freq_triggered(handler->zi_record.zi_freq)) 602eda14cbcSMatt Macy continue; 603eda14cbcSMatt Macy 604eda14cbcSMatt Macy if (vd->vdev_guid != handler->zi_record.zi_guid) 605eda14cbcSMatt Macy continue; 606eda14cbcSMatt Macy 607eda14cbcSMatt Macy /* 608eda14cbcSMatt Macy * Defensive; should never happen as the array allocation 609eda14cbcSMatt Macy * occurs prior to inserting this handler on the list. 610eda14cbcSMatt Macy */ 611eda14cbcSMatt Macy ASSERT3P(handler->zi_lanes, !=, NULL); 612eda14cbcSMatt Macy 613eda14cbcSMatt Macy /* 614eda14cbcSMatt Macy * This should never happen, the zinject command should 615eda14cbcSMatt Macy * prevent a user from setting an IO delay with zero lanes. 616eda14cbcSMatt Macy */ 617eda14cbcSMatt Macy ASSERT3U(handler->zi_record.zi_nlanes, !=, 0); 618eda14cbcSMatt Macy 619eda14cbcSMatt Macy ASSERT3U(handler->zi_record.zi_nlanes, >, 620eda14cbcSMatt Macy handler->zi_next_lane); 621eda14cbcSMatt Macy 622eda14cbcSMatt Macy /* 623eda14cbcSMatt Macy * We want to issue this IO to the lane that will become 624eda14cbcSMatt Macy * idle the soonest, so we compare the soonest this 625eda14cbcSMatt Macy * specific handler can complete the IO with all other 626eda14cbcSMatt Macy * handlers, to find the lowest value of all possible 627eda14cbcSMatt Macy * lanes. We then use this lane to submit the request. 628eda14cbcSMatt Macy * 629eda14cbcSMatt Macy * Since each handler has a constant value for its 630eda14cbcSMatt Macy * delay, we can just use the "next" lane for that 631eda14cbcSMatt Macy * handler; as it will always be the lane with the 632eda14cbcSMatt Macy * lowest value for that particular handler (i.e. the 633eda14cbcSMatt Macy * lane that will become idle the soonest). This saves a 634eda14cbcSMatt Macy * scan of each handler's lanes array. 635eda14cbcSMatt Macy * 636eda14cbcSMatt Macy * There's two cases to consider when determining when 637eda14cbcSMatt Macy * this specific IO request should complete. If this 638eda14cbcSMatt Macy * lane is idle, we want to "submit" the request now so 639eda14cbcSMatt Macy * it will complete after zi_timer milliseconds. Thus, 640eda14cbcSMatt Macy * we set the target to now + zi_timer. 641eda14cbcSMatt Macy * 642eda14cbcSMatt Macy * If the lane is busy, we want this request to complete 643eda14cbcSMatt Macy * zi_timer milliseconds after the lane becomes idle. 644eda14cbcSMatt Macy * Since the 'zi_lanes' array holds the time at which 645eda14cbcSMatt Macy * each lane will become idle, we use that value to 646eda14cbcSMatt Macy * determine when this request should complete. 647eda14cbcSMatt Macy */ 648eda14cbcSMatt Macy hrtime_t idle = handler->zi_record.zi_timer + gethrtime(); 649eda14cbcSMatt Macy hrtime_t busy = handler->zi_record.zi_timer + 650eda14cbcSMatt Macy handler->zi_lanes[handler->zi_next_lane]; 651eda14cbcSMatt Macy hrtime_t target = MAX(idle, busy); 652eda14cbcSMatt Macy 653eda14cbcSMatt Macy if (min_handler == NULL) { 654eda14cbcSMatt Macy min_handler = handler; 655eda14cbcSMatt Macy min_target = target; 656eda14cbcSMatt Macy continue; 657eda14cbcSMatt Macy } 658eda14cbcSMatt Macy 659eda14cbcSMatt Macy ASSERT3P(min_handler, !=, NULL); 660eda14cbcSMatt Macy ASSERT3U(min_target, !=, 0); 661eda14cbcSMatt Macy 662eda14cbcSMatt Macy /* 663eda14cbcSMatt Macy * We don't yet increment the "next lane" variable since 664eda14cbcSMatt Macy * we still might find a lower value lane in another 665eda14cbcSMatt Macy * handler during any remaining iterations. Once we're 666eda14cbcSMatt Macy * sure we've selected the absolute minimum, we'll claim 667eda14cbcSMatt Macy * the lane and increment the handler's "next lane" 668eda14cbcSMatt Macy * field below. 669eda14cbcSMatt Macy */ 670eda14cbcSMatt Macy 671eda14cbcSMatt Macy if (target < min_target) { 672eda14cbcSMatt Macy min_handler = handler; 673eda14cbcSMatt Macy min_target = target; 674eda14cbcSMatt Macy } 675eda14cbcSMatt Macy } 676eda14cbcSMatt Macy 677eda14cbcSMatt Macy /* 678eda14cbcSMatt Macy * 'min_handler' will be NULL if no IO delays are registered for 679eda14cbcSMatt Macy * this vdev, otherwise it will point to the handler containing 680eda14cbcSMatt Macy * the lane that will become idle the soonest. 681eda14cbcSMatt Macy */ 682eda14cbcSMatt Macy if (min_handler != NULL) { 683eda14cbcSMatt Macy ASSERT3U(min_target, !=, 0); 684eda14cbcSMatt Macy min_handler->zi_lanes[min_handler->zi_next_lane] = min_target; 685eda14cbcSMatt Macy 686eda14cbcSMatt Macy /* 687eda14cbcSMatt Macy * If we've used all possible lanes for this handler, 688eda14cbcSMatt Macy * loop back and start using the first lane again; 689eda14cbcSMatt Macy * otherwise, just increment the lane index. 690eda14cbcSMatt Macy */ 691eda14cbcSMatt Macy min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) % 692eda14cbcSMatt Macy min_handler->zi_record.zi_nlanes; 693eda14cbcSMatt Macy } 694eda14cbcSMatt Macy 695eda14cbcSMatt Macy mutex_exit(&inject_delay_mtx); 696eda14cbcSMatt Macy rw_exit(&inject_lock); 697eda14cbcSMatt Macy 698eda14cbcSMatt Macy return (min_target); 699eda14cbcSMatt Macy } 700eda14cbcSMatt Macy 701eda14cbcSMatt Macy static int 702eda14cbcSMatt Macy zio_calculate_range(const char *pool, zinject_record_t *record) 703eda14cbcSMatt Macy { 704eda14cbcSMatt Macy dsl_pool_t *dp; 705eda14cbcSMatt Macy dsl_dataset_t *ds; 706eda14cbcSMatt Macy objset_t *os = NULL; 707eda14cbcSMatt Macy dnode_t *dn = NULL; 708eda14cbcSMatt Macy int error; 709eda14cbcSMatt Macy 710eda14cbcSMatt Macy /* 711eda14cbcSMatt Macy * Obtain the dnode for object using pool, objset, and object 712eda14cbcSMatt Macy */ 713eda14cbcSMatt Macy error = dsl_pool_hold(pool, FTAG, &dp); 714eda14cbcSMatt Macy if (error) 715eda14cbcSMatt Macy return (error); 716eda14cbcSMatt Macy 717eda14cbcSMatt Macy error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds); 718eda14cbcSMatt Macy dsl_pool_rele(dp, FTAG); 719eda14cbcSMatt Macy if (error) 720eda14cbcSMatt Macy return (error); 721eda14cbcSMatt Macy 722eda14cbcSMatt Macy error = dmu_objset_from_ds(ds, &os); 723eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 724eda14cbcSMatt Macy if (error) 725eda14cbcSMatt Macy return (error); 726eda14cbcSMatt Macy 727eda14cbcSMatt Macy error = dnode_hold(os, record->zi_object, FTAG, &dn); 728eda14cbcSMatt Macy if (error) 729eda14cbcSMatt Macy return (error); 730eda14cbcSMatt Macy 731eda14cbcSMatt Macy /* 732eda14cbcSMatt Macy * Translate the range into block IDs 733eda14cbcSMatt Macy */ 734eda14cbcSMatt Macy if (record->zi_start != 0 || record->zi_end != -1ULL) { 735eda14cbcSMatt Macy record->zi_start >>= dn->dn_datablkshift; 736eda14cbcSMatt Macy record->zi_end >>= dn->dn_datablkshift; 737eda14cbcSMatt Macy } 738eda14cbcSMatt Macy if (record->zi_level > 0) { 739eda14cbcSMatt Macy if (record->zi_level >= dn->dn_nlevels) { 740eda14cbcSMatt Macy dnode_rele(dn, FTAG); 741eda14cbcSMatt Macy return (SET_ERROR(EDOM)); 742eda14cbcSMatt Macy } 743eda14cbcSMatt Macy 744eda14cbcSMatt Macy if (record->zi_start != 0 || record->zi_end != 0) { 745eda14cbcSMatt Macy int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 746eda14cbcSMatt Macy 747eda14cbcSMatt Macy for (int level = record->zi_level; level > 0; level--) { 748eda14cbcSMatt Macy record->zi_start >>= shift; 749eda14cbcSMatt Macy record->zi_end >>= shift; 750eda14cbcSMatt Macy } 751eda14cbcSMatt Macy } 752eda14cbcSMatt Macy } 753eda14cbcSMatt Macy 754eda14cbcSMatt Macy dnode_rele(dn, FTAG); 755eda14cbcSMatt Macy return (0); 756eda14cbcSMatt Macy } 757eda14cbcSMatt Macy 758eda14cbcSMatt Macy /* 759eda14cbcSMatt Macy * Create a new handler for the given record. We add it to the list, adding 760eda14cbcSMatt Macy * a reference to the spa_t in the process. We increment zio_injection_enabled, 761eda14cbcSMatt Macy * which is the switch to trigger all fault injection. 762eda14cbcSMatt Macy */ 763eda14cbcSMatt Macy int 764eda14cbcSMatt Macy zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record) 765eda14cbcSMatt Macy { 766eda14cbcSMatt Macy inject_handler_t *handler; 767eda14cbcSMatt Macy int error; 768eda14cbcSMatt Macy spa_t *spa; 769eda14cbcSMatt Macy 770eda14cbcSMatt Macy /* 771eda14cbcSMatt Macy * If this is pool-wide metadata, make sure we unload the corresponding 772eda14cbcSMatt Macy * spa_t, so that the next attempt to load it will trigger the fault. 773eda14cbcSMatt Macy * We call spa_reset() to unload the pool appropriately. 774eda14cbcSMatt Macy */ 775eda14cbcSMatt Macy if (flags & ZINJECT_UNLOAD_SPA) 776eda14cbcSMatt Macy if ((error = spa_reset(name)) != 0) 777eda14cbcSMatt Macy return (error); 778eda14cbcSMatt Macy 779eda14cbcSMatt Macy if (record->zi_cmd == ZINJECT_DELAY_IO) { 780eda14cbcSMatt Macy /* 781eda14cbcSMatt Macy * A value of zero for the number of lanes or for the 782eda14cbcSMatt Macy * delay time doesn't make sense. 783eda14cbcSMatt Macy */ 784eda14cbcSMatt Macy if (record->zi_timer == 0 || record->zi_nlanes == 0) 785eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 786eda14cbcSMatt Macy 787eda14cbcSMatt Macy /* 788eda14cbcSMatt Macy * The number of lanes is directly mapped to the size of 789eda14cbcSMatt Macy * an array used by the handler. Thus, to ensure the 790eda14cbcSMatt Macy * user doesn't trigger an allocation that's "too large" 791eda14cbcSMatt Macy * we cap the number of lanes here. 792eda14cbcSMatt Macy */ 793eda14cbcSMatt Macy if (record->zi_nlanes >= UINT16_MAX) 794eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 795eda14cbcSMatt Macy } 796eda14cbcSMatt Macy 797eda14cbcSMatt Macy /* 798eda14cbcSMatt Macy * If the supplied range was in bytes -- calculate the actual blkid 799eda14cbcSMatt Macy */ 800eda14cbcSMatt Macy if (flags & ZINJECT_CALC_RANGE) { 801eda14cbcSMatt Macy error = zio_calculate_range(name, record); 802eda14cbcSMatt Macy if (error != 0) 803eda14cbcSMatt Macy return (error); 804eda14cbcSMatt Macy } 805eda14cbcSMatt Macy 806eda14cbcSMatt Macy if (!(flags & ZINJECT_NULL)) { 807eda14cbcSMatt Macy /* 808eda14cbcSMatt Macy * spa_inject_ref() will add an injection reference, which will 809eda14cbcSMatt Macy * prevent the pool from being removed from the namespace while 810eda14cbcSMatt Macy * still allowing it to be unloaded. 811eda14cbcSMatt Macy */ 812eda14cbcSMatt Macy if ((spa = spa_inject_addref(name)) == NULL) 813eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 814eda14cbcSMatt Macy 815eda14cbcSMatt Macy handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP); 816eda14cbcSMatt Macy 817eda14cbcSMatt Macy handler->zi_spa = spa; 818eda14cbcSMatt Macy handler->zi_record = *record; 819eda14cbcSMatt Macy 820eda14cbcSMatt Macy if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) { 821eda14cbcSMatt Macy handler->zi_lanes = kmem_zalloc( 822eda14cbcSMatt Macy sizeof (*handler->zi_lanes) * 823eda14cbcSMatt Macy handler->zi_record.zi_nlanes, KM_SLEEP); 824eda14cbcSMatt Macy handler->zi_next_lane = 0; 825eda14cbcSMatt Macy } else { 826eda14cbcSMatt Macy handler->zi_lanes = NULL; 827eda14cbcSMatt Macy handler->zi_next_lane = 0; 828eda14cbcSMatt Macy } 829eda14cbcSMatt Macy 830eda14cbcSMatt Macy rw_enter(&inject_lock, RW_WRITER); 831eda14cbcSMatt Macy 832eda14cbcSMatt Macy /* 833eda14cbcSMatt Macy * We can't move this increment into the conditional 834eda14cbcSMatt Macy * above because we need to hold the RW_WRITER lock of 835eda14cbcSMatt Macy * inject_lock, and we don't want to hold that while 836eda14cbcSMatt Macy * allocating the handler's zi_lanes array. 837eda14cbcSMatt Macy */ 838eda14cbcSMatt Macy if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) { 839eda14cbcSMatt Macy ASSERT3S(inject_delay_count, >=, 0); 840eda14cbcSMatt Macy inject_delay_count++; 841eda14cbcSMatt Macy ASSERT3S(inject_delay_count, >, 0); 842eda14cbcSMatt Macy } 843eda14cbcSMatt Macy 844eda14cbcSMatt Macy *id = handler->zi_id = inject_next_id++; 845eda14cbcSMatt Macy list_insert_tail(&inject_handlers, handler); 846eda14cbcSMatt Macy atomic_inc_32(&zio_injection_enabled); 847eda14cbcSMatt Macy 848eda14cbcSMatt Macy rw_exit(&inject_lock); 849eda14cbcSMatt Macy } 850eda14cbcSMatt Macy 851eda14cbcSMatt Macy /* 852eda14cbcSMatt Macy * Flush the ARC, so that any attempts to read this data will end up 853eda14cbcSMatt Macy * going to the ZIO layer. Note that this is a little overkill, but 854eda14cbcSMatt Macy * we don't have the necessary ARC interfaces to do anything else, and 855eda14cbcSMatt Macy * fault injection isn't a performance critical path. 856eda14cbcSMatt Macy */ 857eda14cbcSMatt Macy if (flags & ZINJECT_FLUSH_ARC) 858eda14cbcSMatt Macy /* 859eda14cbcSMatt Macy * We must use FALSE to ensure arc_flush returns, since 860eda14cbcSMatt Macy * we're not preventing concurrent ARC insertions. 861eda14cbcSMatt Macy */ 862eda14cbcSMatt Macy arc_flush(NULL, FALSE); 863eda14cbcSMatt Macy 864eda14cbcSMatt Macy return (0); 865eda14cbcSMatt Macy } 866eda14cbcSMatt Macy 867eda14cbcSMatt Macy /* 868eda14cbcSMatt Macy * Returns the next record with an ID greater than that supplied to the 869eda14cbcSMatt Macy * function. Used to iterate over all handlers in the system. 870eda14cbcSMatt Macy */ 871eda14cbcSMatt Macy int 872eda14cbcSMatt Macy zio_inject_list_next(int *id, char *name, size_t buflen, 873eda14cbcSMatt Macy zinject_record_t *record) 874eda14cbcSMatt Macy { 875eda14cbcSMatt Macy inject_handler_t *handler; 876eda14cbcSMatt Macy int ret; 877eda14cbcSMatt Macy 878eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 879eda14cbcSMatt Macy rw_enter(&inject_lock, RW_READER); 880eda14cbcSMatt Macy 881eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 882eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) 883eda14cbcSMatt Macy if (handler->zi_id > *id) 884eda14cbcSMatt Macy break; 885eda14cbcSMatt Macy 886eda14cbcSMatt Macy if (handler) { 887eda14cbcSMatt Macy *record = handler->zi_record; 888eda14cbcSMatt Macy *id = handler->zi_id; 889eda14cbcSMatt Macy (void) strncpy(name, spa_name(handler->zi_spa), buflen); 890eda14cbcSMatt Macy ret = 0; 891eda14cbcSMatt Macy } else { 892eda14cbcSMatt Macy ret = SET_ERROR(ENOENT); 893eda14cbcSMatt Macy } 894eda14cbcSMatt Macy 895eda14cbcSMatt Macy rw_exit(&inject_lock); 896eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 897eda14cbcSMatt Macy 898eda14cbcSMatt Macy return (ret); 899eda14cbcSMatt Macy } 900eda14cbcSMatt Macy 901eda14cbcSMatt Macy /* 902eda14cbcSMatt Macy * Clear the fault handler with the given identifier, or return ENOENT if none 903eda14cbcSMatt Macy * exists. 904eda14cbcSMatt Macy */ 905eda14cbcSMatt Macy int 906eda14cbcSMatt Macy zio_clear_fault(int id) 907eda14cbcSMatt Macy { 908eda14cbcSMatt Macy inject_handler_t *handler; 909eda14cbcSMatt Macy 910eda14cbcSMatt Macy rw_enter(&inject_lock, RW_WRITER); 911eda14cbcSMatt Macy 912eda14cbcSMatt Macy for (handler = list_head(&inject_handlers); handler != NULL; 913eda14cbcSMatt Macy handler = list_next(&inject_handlers, handler)) 914eda14cbcSMatt Macy if (handler->zi_id == id) 915eda14cbcSMatt Macy break; 916eda14cbcSMatt Macy 917eda14cbcSMatt Macy if (handler == NULL) { 918eda14cbcSMatt Macy rw_exit(&inject_lock); 919eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 920eda14cbcSMatt Macy } 921eda14cbcSMatt Macy 922eda14cbcSMatt Macy if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) { 923eda14cbcSMatt Macy ASSERT3S(inject_delay_count, >, 0); 924eda14cbcSMatt Macy inject_delay_count--; 925eda14cbcSMatt Macy ASSERT3S(inject_delay_count, >=, 0); 926eda14cbcSMatt Macy } 927eda14cbcSMatt Macy 928eda14cbcSMatt Macy list_remove(&inject_handlers, handler); 929eda14cbcSMatt Macy rw_exit(&inject_lock); 930eda14cbcSMatt Macy 931eda14cbcSMatt Macy if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) { 932eda14cbcSMatt Macy ASSERT3P(handler->zi_lanes, !=, NULL); 933eda14cbcSMatt Macy kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) * 934eda14cbcSMatt Macy handler->zi_record.zi_nlanes); 935eda14cbcSMatt Macy } else { 936eda14cbcSMatt Macy ASSERT3P(handler->zi_lanes, ==, NULL); 937eda14cbcSMatt Macy } 938eda14cbcSMatt Macy 939eda14cbcSMatt Macy spa_inject_delref(handler->zi_spa); 940eda14cbcSMatt Macy kmem_free(handler, sizeof (inject_handler_t)); 941eda14cbcSMatt Macy atomic_dec_32(&zio_injection_enabled); 942eda14cbcSMatt Macy 943eda14cbcSMatt Macy return (0); 944eda14cbcSMatt Macy } 945eda14cbcSMatt Macy 946eda14cbcSMatt Macy void 947eda14cbcSMatt Macy zio_inject_init(void) 948eda14cbcSMatt Macy { 949eda14cbcSMatt Macy rw_init(&inject_lock, NULL, RW_DEFAULT, NULL); 950eda14cbcSMatt Macy mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL); 951eda14cbcSMatt Macy list_create(&inject_handlers, sizeof (inject_handler_t), 952eda14cbcSMatt Macy offsetof(inject_handler_t, zi_link)); 953eda14cbcSMatt Macy } 954eda14cbcSMatt Macy 955eda14cbcSMatt Macy void 956eda14cbcSMatt Macy zio_inject_fini(void) 957eda14cbcSMatt Macy { 958eda14cbcSMatt Macy list_destroy(&inject_handlers); 959eda14cbcSMatt Macy mutex_destroy(&inject_delay_mtx); 960eda14cbcSMatt Macy rw_destroy(&inject_lock); 961eda14cbcSMatt Macy } 962eda14cbcSMatt Macy 963eda14cbcSMatt Macy #if defined(_KERNEL) 964eda14cbcSMatt Macy EXPORT_SYMBOL(zio_injection_enabled); 965eda14cbcSMatt Macy EXPORT_SYMBOL(zio_inject_fault); 966eda14cbcSMatt Macy EXPORT_SYMBOL(zio_inject_list_next); 967eda14cbcSMatt Macy EXPORT_SYMBOL(zio_clear_fault); 968eda14cbcSMatt Macy EXPORT_SYMBOL(zio_handle_fault_injection); 969eda14cbcSMatt Macy EXPORT_SYMBOL(zio_handle_device_injection); 970eda14cbcSMatt Macy EXPORT_SYMBOL(zio_handle_label_injection); 971eda14cbcSMatt Macy #endif 972