1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ZFS fault injection 28 * 29 * To handle fault injection, we keep track of a series of zinject_record_t 30 * structures which describe which logical block(s) should be injected with a 31 * fault. These are kept in a global list. Each record corresponds to a given 32 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted 33 * or exported while the injection record exists. 34 * 35 * Device level injection is done using the 'zi_guid' field. If this is set, it 36 * means that the error is destined for a particular device, not a piece of 37 * data. 38 * 39 * This is a rather poor data structure and algorithm, but we don't expect more 40 * than a few faults at any one time, so it should be sufficient for our needs. 41 */ 42 43 #include <sys/arc.h> 44 #include <sys/zio_impl.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/spa_impl.h> 47 #include <sys/vdev_impl.h> 48 #include <sys/fs/zfs.h> 49 50 uint32_t zio_injection_enabled; 51 52 typedef struct inject_handler { 53 int zi_id; 54 spa_t *zi_spa; 55 zinject_record_t zi_record; 56 list_node_t zi_link; 57 } inject_handler_t; 58 59 static list_t inject_handlers; 60 static krwlock_t inject_lock; 61 static int inject_next_id = 1; 62 63 /* 64 * Returns true if the given record matches the I/O in progress. 65 */ 66 static boolean_t 67 zio_match_handler(zbookmark_t *zb, uint64_t type, 68 zinject_record_t *record, int error) 69 { 70 /* 71 * Check for a match against the MOS, which is based on type 72 */ 73 if (zb->zb_objset == 0 && record->zi_objset == 0 && 74 record->zi_object == 0) { 75 if (record->zi_type == DMU_OT_NONE || 76 type == record->zi_type) 77 return (record->zi_freq == 0 || 78 spa_get_random(100) < record->zi_freq); 79 else 80 return (B_FALSE); 81 } 82 83 /* 84 * Check for an exact match. 85 */ 86 if (zb->zb_objset == record->zi_objset && 87 zb->zb_object == record->zi_object && 88 zb->zb_level == record->zi_level && 89 zb->zb_blkid >= record->zi_start && 90 zb->zb_blkid <= record->zi_end && 91 error == record->zi_error) 92 return (record->zi_freq == 0 || 93 spa_get_random(100) < record->zi_freq); 94 95 return (B_FALSE); 96 } 97 98 /* 99 * Panic the system when a config change happens in the function 100 * specified by tag. 101 */ 102 void 103 zio_handle_panic_injection(spa_t *spa, char *tag) 104 { 105 inject_handler_t *handler; 106 107 rw_enter(&inject_lock, RW_READER); 108 109 for (handler = list_head(&inject_handlers); handler != NULL; 110 handler = list_next(&inject_handlers, handler)) { 111 112 if (spa != handler->zi_spa) 113 continue; 114 115 if (strcmp(tag, handler->zi_record.zi_func) == 0) 116 panic("Panic requested in function %s\n", tag); 117 } 118 119 rw_exit(&inject_lock); 120 } 121 122 /* 123 * Determine if the I/O in question should return failure. Returns the errno 124 * to be returned to the caller. 125 */ 126 int 127 zio_handle_fault_injection(zio_t *zio, int error) 128 { 129 int ret = 0; 130 inject_handler_t *handler; 131 132 /* 133 * Ignore I/O not associated with any logical data. 134 */ 135 if (zio->io_logical == NULL) 136 return (0); 137 138 /* 139 * Currently, we only support fault injection on reads. 140 */ 141 if (zio->io_type != ZIO_TYPE_READ) 142 return (0); 143 144 rw_enter(&inject_lock, RW_READER); 145 146 for (handler = list_head(&inject_handlers); handler != NULL; 147 handler = list_next(&inject_handlers, handler)) { 148 149 /* Ignore errors not destined for this pool */ 150 if (zio->io_spa != handler->zi_spa) 151 continue; 152 153 /* Ignore device errors and panic injection */ 154 if (handler->zi_record.zi_guid != 0 || 155 handler->zi_record.zi_func[0] != '\0') 156 continue; 157 158 /* If this handler matches, return EIO */ 159 if (zio_match_handler(&zio->io_logical->io_bookmark, 160 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE, 161 &handler->zi_record, error)) { 162 ret = error; 163 break; 164 } 165 } 166 167 rw_exit(&inject_lock); 168 169 return (ret); 170 } 171 172 /* 173 * Determine if the zio is part of a label update and has an injection 174 * handler associated with that portion of the label. Currently, we 175 * allow error injection in either the nvlist or the uberblock region of 176 * of the vdev label. 177 */ 178 int 179 zio_handle_label_injection(zio_t *zio, int error) 180 { 181 inject_handler_t *handler; 182 vdev_t *vd = zio->io_vd; 183 uint64_t offset = zio->io_offset; 184 int label; 185 int ret = 0; 186 187 if (offset >= VDEV_LABEL_START_SIZE && 188 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE) 189 return (0); 190 191 rw_enter(&inject_lock, RW_READER); 192 193 for (handler = list_head(&inject_handlers); handler != NULL; 194 handler = list_next(&inject_handlers, handler)) { 195 uint64_t start = handler->zi_record.zi_start; 196 uint64_t end = handler->zi_record.zi_end; 197 198 /* Ignore device only faults or panic injection */ 199 if (handler->zi_record.zi_start == 0 || 200 handler->zi_record.zi_func[0] != '\0') 201 continue; 202 203 /* 204 * The injection region is the relative offsets within a 205 * vdev label. We must determine the label which is being 206 * updated and adjust our region accordingly. 207 */ 208 label = vdev_label_number(vd->vdev_psize, offset); 209 start = vdev_label_offset(vd->vdev_psize, label, start); 210 end = vdev_label_offset(vd->vdev_psize, label, end); 211 212 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid && 213 (offset >= start && offset <= end)) { 214 ret = error; 215 break; 216 } 217 } 218 rw_exit(&inject_lock); 219 return (ret); 220 } 221 222 223 int 224 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error) 225 { 226 inject_handler_t *handler; 227 int ret = 0; 228 229 /* 230 * We skip over faults in the labels unless it's during 231 * device open (i.e. zio == NULL). 232 */ 233 if (zio != NULL) { 234 uint64_t offset = zio->io_offset; 235 236 if (offset < VDEV_LABEL_START_SIZE || 237 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE) 238 return (0); 239 } 240 241 rw_enter(&inject_lock, RW_READER); 242 243 for (handler = list_head(&inject_handlers); handler != NULL; 244 handler = list_next(&inject_handlers, handler)) { 245 246 /* Ignore label specific faults or panic injection */ 247 if (handler->zi_record.zi_start != 0 || 248 handler->zi_record.zi_func[0] != '\0') 249 continue; 250 251 if (vd->vdev_guid == handler->zi_record.zi_guid) { 252 if (handler->zi_record.zi_failfast && 253 (zio == NULL || (zio->io_flags & 254 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) { 255 continue; 256 } 257 258 /* Handle type specific I/O failures */ 259 if (zio != NULL && 260 handler->zi_record.zi_iotype != ZIO_TYPES && 261 handler->zi_record.zi_iotype != zio->io_type) 262 continue; 263 264 if (handler->zi_record.zi_error == error) { 265 /* 266 * For a failed open, pretend like the device 267 * has gone away. 268 */ 269 if (error == ENXIO) 270 vd->vdev_stat.vs_aux = 271 VDEV_AUX_OPEN_FAILED; 272 ret = error; 273 break; 274 } 275 if (handler->zi_record.zi_error == ENXIO) { 276 ret = EIO; 277 break; 278 } 279 } 280 } 281 282 rw_exit(&inject_lock); 283 284 return (ret); 285 } 286 287 /* 288 * Create a new handler for the given record. We add it to the list, adding 289 * a reference to the spa_t in the process. We increment zio_injection_enabled, 290 * which is the switch to trigger all fault injection. 291 */ 292 int 293 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record) 294 { 295 inject_handler_t *handler; 296 int error; 297 spa_t *spa; 298 299 /* 300 * If this is pool-wide metadata, make sure we unload the corresponding 301 * spa_t, so that the next attempt to load it will trigger the fault. 302 * We call spa_reset() to unload the pool appropriately. 303 */ 304 if (flags & ZINJECT_UNLOAD_SPA) 305 if ((error = spa_reset(name)) != 0) 306 return (error); 307 308 if (!(flags & ZINJECT_NULL)) { 309 /* 310 * spa_inject_ref() will add an injection reference, which will 311 * prevent the pool from being removed from the namespace while 312 * still allowing it to be unloaded. 313 */ 314 if ((spa = spa_inject_addref(name)) == NULL) 315 return (ENOENT); 316 317 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP); 318 319 rw_enter(&inject_lock, RW_WRITER); 320 321 *id = handler->zi_id = inject_next_id++; 322 handler->zi_spa = spa; 323 handler->zi_record = *record; 324 list_insert_tail(&inject_handlers, handler); 325 atomic_add_32(&zio_injection_enabled, 1); 326 327 rw_exit(&inject_lock); 328 } 329 330 /* 331 * Flush the ARC, so that any attempts to read this data will end up 332 * going to the ZIO layer. Note that this is a little overkill, but 333 * we don't have the necessary ARC interfaces to do anything else, and 334 * fault injection isn't a performance critical path. 335 */ 336 if (flags & ZINJECT_FLUSH_ARC) 337 arc_flush(NULL); 338 339 return (0); 340 } 341 342 /* 343 * Returns the next record with an ID greater than that supplied to the 344 * function. Used to iterate over all handlers in the system. 345 */ 346 int 347 zio_inject_list_next(int *id, char *name, size_t buflen, 348 zinject_record_t *record) 349 { 350 inject_handler_t *handler; 351 int ret; 352 353 mutex_enter(&spa_namespace_lock); 354 rw_enter(&inject_lock, RW_READER); 355 356 for (handler = list_head(&inject_handlers); handler != NULL; 357 handler = list_next(&inject_handlers, handler)) 358 if (handler->zi_id > *id) 359 break; 360 361 if (handler) { 362 *record = handler->zi_record; 363 *id = handler->zi_id; 364 (void) strncpy(name, spa_name(handler->zi_spa), buflen); 365 ret = 0; 366 } else { 367 ret = ENOENT; 368 } 369 370 rw_exit(&inject_lock); 371 mutex_exit(&spa_namespace_lock); 372 373 return (ret); 374 } 375 376 /* 377 * Clear the fault handler with the given identifier, or return ENOENT if none 378 * exists. 379 */ 380 int 381 zio_clear_fault(int id) 382 { 383 inject_handler_t *handler; 384 int ret; 385 386 rw_enter(&inject_lock, RW_WRITER); 387 388 for (handler = list_head(&inject_handlers); handler != NULL; 389 handler = list_next(&inject_handlers, handler)) 390 if (handler->zi_id == id) 391 break; 392 393 if (handler == NULL) { 394 ret = ENOENT; 395 } else { 396 list_remove(&inject_handlers, handler); 397 spa_inject_delref(handler->zi_spa); 398 kmem_free(handler, sizeof (inject_handler_t)); 399 atomic_add_32(&zio_injection_enabled, -1); 400 ret = 0; 401 } 402 403 rw_exit(&inject_lock); 404 405 return (ret); 406 } 407 408 void 409 zio_inject_init(void) 410 { 411 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL); 412 list_create(&inject_handlers, sizeof (inject_handler_t), 413 offsetof(inject_handler_t, zi_link)); 414 } 415 416 void 417 zio_inject_fini(void) 418 { 419 list_destroy(&inject_handlers); 420 rw_destroy(&inject_lock); 421 } 422