1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ZFS fault injection 28 * 29 * To handle fault injection, we keep track of a series of zinject_record_t 30 * structures which describe which logical block(s) should be injected with a 31 * fault. These are kept in a global list. Each record corresponds to a given 32 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted 33 * or exported while the injection record exists. 34 * 35 * Device level injection is done using the 'zi_guid' field. If this is set, it 36 * means that the error is destined for a particular device, not a piece of 37 * data. 38 * 39 * This is a rather poor data structure and algorithm, but we don't expect more 40 * than a few faults at any one time, so it should be sufficient for our needs. 41 */ 42 43 #include <sys/arc.h> 44 #include <sys/zio_impl.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/dmu_objset.h> 48 #include <sys/fs/zfs.h> 49 50 uint32_t zio_injection_enabled; 51 52 typedef struct inject_handler { 53 int zi_id; 54 spa_t *zi_spa; 55 zinject_record_t zi_record; 56 list_node_t zi_link; 57 } inject_handler_t; 58 59 static list_t inject_handlers; 60 static krwlock_t inject_lock; 61 static int inject_next_id = 1; 62 63 /* 64 * Returns true if the given record matches the I/O in progress. 65 */ 66 static boolean_t 67 zio_match_handler(zbookmark_t *zb, uint64_t type, 68 zinject_record_t *record, int error) 69 { 70 /* 71 * Check for a match against the MOS, which is based on type 72 */ 73 if (zb->zb_objset == DMU_META_OBJSET && 74 record->zi_objset == DMU_META_OBJSET && 75 record->zi_object == DMU_META_DNODE_OBJECT) { 76 if (record->zi_type == DMU_OT_NONE || 77 type == record->zi_type) 78 return (record->zi_freq == 0 || 79 spa_get_random(100) < record->zi_freq); 80 else 81 return (B_FALSE); 82 } 83 84 /* 85 * Check for an exact match. 86 */ 87 if (zb->zb_objset == record->zi_objset && 88 zb->zb_object == record->zi_object && 89 zb->zb_level == record->zi_level && 90 zb->zb_blkid >= record->zi_start && 91 zb->zb_blkid <= record->zi_end && 92 error == record->zi_error) 93 return (record->zi_freq == 0 || 94 spa_get_random(100) < record->zi_freq); 95 96 return (B_FALSE); 97 } 98 99 /* 100 * Panic the system when a config change happens in the function 101 * specified by tag. 102 */ 103 void 104 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type) 105 { 106 inject_handler_t *handler; 107 108 rw_enter(&inject_lock, RW_READER); 109 110 for (handler = list_head(&inject_handlers); handler != NULL; 111 handler = list_next(&inject_handlers, handler)) { 112 113 if (spa != handler->zi_spa) 114 continue; 115 116 if (handler->zi_record.zi_type == type && 117 strcmp(tag, handler->zi_record.zi_func) == 0) 118 panic("Panic requested in function %s\n", tag); 119 } 120 121 rw_exit(&inject_lock); 122 } 123 124 /* 125 * Determine if the I/O in question should return failure. Returns the errno 126 * to be returned to the caller. 127 */ 128 int 129 zio_handle_fault_injection(zio_t *zio, int error) 130 { 131 int ret = 0; 132 inject_handler_t *handler; 133 134 /* 135 * Ignore I/O not associated with any logical data. 136 */ 137 if (zio->io_logical == NULL) 138 return (0); 139 140 /* 141 * Currently, we only support fault injection on reads. 142 */ 143 if (zio->io_type != ZIO_TYPE_READ) 144 return (0); 145 146 rw_enter(&inject_lock, RW_READER); 147 148 for (handler = list_head(&inject_handlers); handler != NULL; 149 handler = list_next(&inject_handlers, handler)) { 150 151 /* Ignore errors not destined for this pool */ 152 if (zio->io_spa != handler->zi_spa) 153 continue; 154 155 /* Ignore device errors and panic injection */ 156 if (handler->zi_record.zi_guid != 0 || 157 handler->zi_record.zi_func[0] != '\0' || 158 handler->zi_record.zi_duration != 0) 159 continue; 160 161 /* If this handler matches, return EIO */ 162 if (zio_match_handler(&zio->io_logical->io_bookmark, 163 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE, 164 &handler->zi_record, error)) { 165 ret = error; 166 break; 167 } 168 } 169 170 rw_exit(&inject_lock); 171 172 return (ret); 173 } 174 175 /* 176 * Determine if the zio is part of a label update and has an injection 177 * handler associated with that portion of the label. Currently, we 178 * allow error injection in either the nvlist or the uberblock region of 179 * of the vdev label. 180 */ 181 int 182 zio_handle_label_injection(zio_t *zio, int error) 183 { 184 inject_handler_t *handler; 185 vdev_t *vd = zio->io_vd; 186 uint64_t offset = zio->io_offset; 187 int label; 188 int ret = 0; 189 190 if (offset >= VDEV_LABEL_START_SIZE && 191 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE) 192 return (0); 193 194 rw_enter(&inject_lock, RW_READER); 195 196 for (handler = list_head(&inject_handlers); handler != NULL; 197 handler = list_next(&inject_handlers, handler)) { 198 uint64_t start = handler->zi_record.zi_start; 199 uint64_t end = handler->zi_record.zi_end; 200 201 /* Ignore device only faults or panic injection */ 202 if (handler->zi_record.zi_start == 0 || 203 handler->zi_record.zi_func[0] != '\0' || 204 handler->zi_record.zi_duration != 0) 205 continue; 206 207 /* 208 * The injection region is the relative offsets within a 209 * vdev label. We must determine the label which is being 210 * updated and adjust our region accordingly. 211 */ 212 label = vdev_label_number(vd->vdev_psize, offset); 213 start = vdev_label_offset(vd->vdev_psize, label, start); 214 end = vdev_label_offset(vd->vdev_psize, label, end); 215 216 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid && 217 (offset >= start && offset <= end)) { 218 ret = error; 219 break; 220 } 221 } 222 rw_exit(&inject_lock); 223 return (ret); 224 } 225 226 227 int 228 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error) 229 { 230 inject_handler_t *handler; 231 int ret = 0; 232 233 /* 234 * We skip over faults in the labels unless it's during 235 * device open (i.e. zio == NULL). 236 */ 237 if (zio != NULL) { 238 uint64_t offset = zio->io_offset; 239 240 if (offset < VDEV_LABEL_START_SIZE || 241 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE) 242 return (0); 243 } 244 245 rw_enter(&inject_lock, RW_READER); 246 247 for (handler = list_head(&inject_handlers); handler != NULL; 248 handler = list_next(&inject_handlers, handler)) { 249 250 /* 251 * Ignore label specific faults, panic injection 252 * or fake writes 253 */ 254 if (handler->zi_record.zi_start != 0 || 255 handler->zi_record.zi_func[0] != '\0' || 256 handler->zi_record.zi_duration != 0) 257 continue; 258 259 if (vd->vdev_guid == handler->zi_record.zi_guid) { 260 if (handler->zi_record.zi_failfast && 261 (zio == NULL || (zio->io_flags & 262 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) { 263 continue; 264 } 265 266 /* Handle type specific I/O failures */ 267 if (zio != NULL && 268 handler->zi_record.zi_iotype != ZIO_TYPES && 269 handler->zi_record.zi_iotype != zio->io_type) 270 continue; 271 272 if (handler->zi_record.zi_error == error) { 273 /* 274 * For a failed open, pretend like the device 275 * has gone away. 276 */ 277 if (error == ENXIO) 278 vd->vdev_stat.vs_aux = 279 VDEV_AUX_OPEN_FAILED; 280 ret = error; 281 break; 282 } 283 if (handler->zi_record.zi_error == ENXIO) { 284 ret = EIO; 285 break; 286 } 287 } 288 } 289 290 rw_exit(&inject_lock); 291 292 return (ret); 293 } 294 295 /* 296 * Simulate hardware that ignores cache flushes. For requested number 297 * of seconds nix the actual writing to disk. 298 */ 299 void 300 zio_handle_ignored_writes(zio_t *zio) 301 { 302 inject_handler_t *handler; 303 304 rw_enter(&inject_lock, RW_READER); 305 306 for (handler = list_head(&inject_handlers); handler != NULL; 307 handler = list_next(&inject_handlers, handler)) { 308 309 /* Ignore errors not destined for this pool */ 310 if (zio->io_spa != handler->zi_spa) 311 continue; 312 313 if (handler->zi_record.zi_duration == 0) 314 continue; 315 316 /* 317 * Positive duration implies # of seconds, negative 318 * a number of txgs 319 */ 320 if (handler->zi_record.zi_timer == 0) { 321 if (handler->zi_record.zi_duration > 0) 322 handler->zi_record.zi_timer = ddi_get_lbolt64(); 323 else 324 handler->zi_record.zi_timer = zio->io_txg; 325 } 326 327 /* Have a "problem" writing 60% of the time */ 328 if (spa_get_random(100) < 60) 329 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 330 break; 331 } 332 333 rw_exit(&inject_lock); 334 } 335 336 void 337 spa_handle_ignored_writes(spa_t *spa) 338 { 339 inject_handler_t *handler; 340 341 if (zio_injection_enabled == 0) 342 return; 343 344 rw_enter(&inject_lock, RW_READER); 345 346 for (handler = list_head(&inject_handlers); handler != NULL; 347 handler = list_next(&inject_handlers, handler)) { 348 349 /* Ignore errors not destined for this pool */ 350 if (spa != handler->zi_spa) 351 continue; 352 353 if (handler->zi_record.zi_duration == 0) 354 continue; 355 356 if (handler->zi_record.zi_duration > 0) { 357 VERIFY(handler->zi_record.zi_timer == 0 || 358 handler->zi_record.zi_timer + 359 handler->zi_record.zi_duration * hz > 360 ddi_get_lbolt64()); 361 } else { 362 /* duration is negative so the subtraction here adds */ 363 VERIFY(handler->zi_record.zi_timer == 0 || 364 handler->zi_record.zi_timer - 365 handler->zi_record.zi_duration >= 366 spa_syncing_txg(spa)); 367 } 368 } 369 370 rw_exit(&inject_lock); 371 } 372 373 /* 374 * Create a new handler for the given record. We add it to the list, adding 375 * a reference to the spa_t in the process. We increment zio_injection_enabled, 376 * which is the switch to trigger all fault injection. 377 */ 378 int 379 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record) 380 { 381 inject_handler_t *handler; 382 int error; 383 spa_t *spa; 384 385 /* 386 * If this is pool-wide metadata, make sure we unload the corresponding 387 * spa_t, so that the next attempt to load it will trigger the fault. 388 * We call spa_reset() to unload the pool appropriately. 389 */ 390 if (flags & ZINJECT_UNLOAD_SPA) 391 if ((error = spa_reset(name)) != 0) 392 return (error); 393 394 if (!(flags & ZINJECT_NULL)) { 395 /* 396 * spa_inject_ref() will add an injection reference, which will 397 * prevent the pool from being removed from the namespace while 398 * still allowing it to be unloaded. 399 */ 400 if ((spa = spa_inject_addref(name)) == NULL) 401 return (ENOENT); 402 403 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP); 404 405 rw_enter(&inject_lock, RW_WRITER); 406 407 *id = handler->zi_id = inject_next_id++; 408 handler->zi_spa = spa; 409 handler->zi_record = *record; 410 list_insert_tail(&inject_handlers, handler); 411 atomic_add_32(&zio_injection_enabled, 1); 412 413 rw_exit(&inject_lock); 414 } 415 416 /* 417 * Flush the ARC, so that any attempts to read this data will end up 418 * going to the ZIO layer. Note that this is a little overkill, but 419 * we don't have the necessary ARC interfaces to do anything else, and 420 * fault injection isn't a performance critical path. 421 */ 422 if (flags & ZINJECT_FLUSH_ARC) 423 arc_flush(NULL); 424 425 return (0); 426 } 427 428 /* 429 * Returns the next record with an ID greater than that supplied to the 430 * function. Used to iterate over all handlers in the system. 431 */ 432 int 433 zio_inject_list_next(int *id, char *name, size_t buflen, 434 zinject_record_t *record) 435 { 436 inject_handler_t *handler; 437 int ret; 438 439 mutex_enter(&spa_namespace_lock); 440 rw_enter(&inject_lock, RW_READER); 441 442 for (handler = list_head(&inject_handlers); handler != NULL; 443 handler = list_next(&inject_handlers, handler)) 444 if (handler->zi_id > *id) 445 break; 446 447 if (handler) { 448 *record = handler->zi_record; 449 *id = handler->zi_id; 450 (void) strncpy(name, spa_name(handler->zi_spa), buflen); 451 ret = 0; 452 } else { 453 ret = ENOENT; 454 } 455 456 rw_exit(&inject_lock); 457 mutex_exit(&spa_namespace_lock); 458 459 return (ret); 460 } 461 462 /* 463 * Clear the fault handler with the given identifier, or return ENOENT if none 464 * exists. 465 */ 466 int 467 zio_clear_fault(int id) 468 { 469 inject_handler_t *handler; 470 int ret; 471 472 rw_enter(&inject_lock, RW_WRITER); 473 474 for (handler = list_head(&inject_handlers); handler != NULL; 475 handler = list_next(&inject_handlers, handler)) 476 if (handler->zi_id == id) 477 break; 478 479 if (handler == NULL) { 480 ret = ENOENT; 481 } else { 482 list_remove(&inject_handlers, handler); 483 spa_inject_delref(handler->zi_spa); 484 kmem_free(handler, sizeof (inject_handler_t)); 485 atomic_add_32(&zio_injection_enabled, -1); 486 ret = 0; 487 } 488 489 rw_exit(&inject_lock); 490 491 return (ret); 492 } 493 494 void 495 zio_inject_init(void) 496 { 497 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL); 498 list_create(&inject_handlers, sizeof (inject_handler_t), 499 offsetof(inject_handler_t, zi_link)); 500 } 501 502 void 503 zio_inject_fini(void) 504 { 505 list_destroy(&inject_handlers); 506 rw_destroy(&inject_lock); 507 } 508