1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2017, Intel Corporation.
25 */
26
27 /*
28 * ZFS fault injection
29 *
30 * To handle fault injection, we keep track of a series of zinject_record_t
31 * structures which describe which logical block(s) should be injected with a
32 * fault. These are kept in a global list. Each record corresponds to a given
33 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
34 * or exported while the injection record exists.
35 *
36 * Device level injection is done using the 'zi_guid' field. If this is set, it
37 * means that the error is destined for a particular device, not a piece of
38 * data.
39 *
40 * This is a rather poor data structure and algorithm, but we don't expect more
41 * than a few faults at any one time, so it should be sufficient for our needs.
42 */
43
44 #include <sys/arc.h>
45 #include <sys/zio_impl.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/dsl_dataset.h>
50 #include <sys/fs/zfs.h>
51
52 uint32_t zio_injection_enabled = 0;
53
54 /*
55 * Data describing each zinject handler registered on the system, and
56 * contains the list node linking the handler in the global zinject
57 * handler list.
58 */
59 typedef struct inject_handler {
60 int zi_id;
61 spa_t *zi_spa;
62 zinject_record_t zi_record;
63 uint64_t *zi_lanes;
64 int zi_next_lane;
65 list_node_t zi_link;
66 } inject_handler_t;
67
68 /*
69 * List of all zinject handlers registered on the system, protected by
70 * the inject_lock defined below.
71 */
72 static list_t inject_handlers;
73
74 /*
75 * This protects insertion into, and traversal of, the inject handler
76 * list defined above; as well as the inject_delay_count. Any time a
77 * handler is inserted or removed from the list, this lock should be
78 * taken as a RW_WRITER; and any time traversal is done over the list
79 * (without modification to it) this lock should be taken as a RW_READER.
80 */
81 static krwlock_t inject_lock;
82
83 /*
84 * This holds the number of zinject delay handlers that have been
85 * registered on the system. It is protected by the inject_lock defined
86 * above. Thus modifications to this count must be a RW_WRITER of the
87 * inject_lock, and reads of this count must be (at least) a RW_READER
88 * of the lock.
89 */
90 static int inject_delay_count = 0;
91
92 /*
93 * This lock is used only in zio_handle_io_delay(), refer to the comment
94 * in that function for more details.
95 */
96 static kmutex_t inject_delay_mtx;
97
98 /*
99 * Used to assign unique identifying numbers to each new zinject handler.
100 */
101 static int inject_next_id = 1;
102
103 /*
104 * Test if the requested frequency was triggered
105 */
106 static boolean_t
freq_triggered(uint32_t frequency)107 freq_triggered(uint32_t frequency)
108 {
109 /*
110 * zero implies always (100%)
111 */
112 if (frequency == 0)
113 return (B_TRUE);
114
115 /*
116 * Note: we still handle legacy (unscaled) frequecy values
117 */
118 uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;
119
120 return (spa_get_random(maximum) < frequency);
121 }
122
123 /*
124 * Returns true if the given record matches the I/O in progress.
125 */
126 static boolean_t
zio_match_handler(zbookmark_phys_t * zb,uint64_t type,int dva,zinject_record_t * record,int error)127 zio_match_handler(zbookmark_phys_t *zb, uint64_t type, int dva,
128 zinject_record_t *record, int error)
129 {
130 /*
131 * Check for a match against the MOS, which is based on type
132 */
133 if (zb->zb_objset == DMU_META_OBJSET &&
134 record->zi_objset == DMU_META_OBJSET &&
135 record->zi_object == DMU_META_DNODE_OBJECT) {
136 if (record->zi_type == DMU_OT_NONE ||
137 type == record->zi_type)
138 return (freq_triggered(record->zi_freq));
139 else
140 return (B_FALSE);
141 }
142
143 /*
144 * Check for an exact match.
145 */
146 if (zb->zb_objset == record->zi_objset &&
147 zb->zb_object == record->zi_object &&
148 zb->zb_level == record->zi_level &&
149 zb->zb_blkid >= record->zi_start &&
150 zb->zb_blkid <= record->zi_end &&
151 (record->zi_dvas == 0 || (record->zi_dvas & (1ULL << dva))) &&
152 error == record->zi_error) {
153 return (freq_triggered(record->zi_freq));
154 }
155
156 return (B_FALSE);
157 }
158
159 /*
160 * Panic the system when a config change happens in the function
161 * specified by tag.
162 */
163 void
zio_handle_panic_injection(spa_t * spa,char * tag,uint64_t type)164 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type)
165 {
166 inject_handler_t *handler;
167
168 rw_enter(&inject_lock, RW_READER);
169
170 for (handler = list_head(&inject_handlers); handler != NULL;
171 handler = list_next(&inject_handlers, handler)) {
172
173 if (spa != handler->zi_spa)
174 continue;
175
176 if (handler->zi_record.zi_type == type &&
177 strcmp(tag, handler->zi_record.zi_func) == 0)
178 panic("Panic requested in function %s\n", tag);
179 }
180
181 rw_exit(&inject_lock);
182 }
183
184
185 /*
186 * If this is a physical I/O for a vdev child determine which DVA it is
187 * for. We iterate backwards through the DVAs matching on the offset so
188 * that we end up with ZI_NO_DVA (-1) if we don't find a match.
189 */
190 static int
zio_match_dva(zio_t * zio)191 zio_match_dva(zio_t *zio)
192 {
193 int i = ZI_NO_DVA;
194
195 if (zio->io_bp != NULL && zio->io_vd != NULL &&
196 zio->io_child_type == ZIO_CHILD_VDEV) {
197 for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) {
198 dva_t *dva = &zio->io_bp->blk_dva[i];
199 uint64_t off = DVA_GET_OFFSET(dva);
200 vdev_t *vd = vdev_lookup_top(zio->io_spa,
201 DVA_GET_VDEV(dva));
202
203 /* Compensate for vdev label added to leaves */
204 if (zio->io_vd->vdev_ops->vdev_op_leaf)
205 off += VDEV_LABEL_START_SIZE;
206
207 if (zio->io_vd == vd && zio->io_offset == off)
208 break;
209 }
210 }
211
212 return (i);
213 }
214
215
216 /*
217 * Inject a decryption failure. Decryption failures can occur in
218 * both the ARC and the ZIO layers.
219 */
220 int
zio_handle_decrypt_injection(spa_t * spa,const zbookmark_phys_t * zb,uint64_t type,int error)221 zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
222 uint64_t type, int error)
223 {
224 int ret = 0;
225 inject_handler_t *handler;
226
227 rw_enter(&inject_lock, RW_READER);
228
229 for (handler = list_head(&inject_handlers); handler != NULL;
230 handler = list_next(&inject_handlers, handler)) {
231
232 if (spa != handler->zi_spa ||
233 handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
234 continue;
235
236 if (zio_match_handler((zbookmark_phys_t *)zb, type, ZI_NO_DVA,
237 &handler->zi_record, error)) {
238 ret = error;
239 break;
240 }
241 }
242
243 rw_exit(&inject_lock);
244 return (ret);
245 }
246
247 /*
248 * Determine if the I/O in question should return failure. Returns the errno
249 * to be returned to the caller.
250 */
251 int
zio_handle_fault_injection(zio_t * zio,int error)252 zio_handle_fault_injection(zio_t *zio, int error)
253 {
254 int ret = 0;
255 inject_handler_t *handler;
256
257 /*
258 * Ignore I/O not associated with any logical data.
259 */
260 if (zio->io_logical == NULL)
261 return (0);
262
263 /*
264 * Currently, we only support fault injection on reads.
265 */
266 if (zio->io_type != ZIO_TYPE_READ)
267 return (0);
268
269 rw_enter(&inject_lock, RW_READER);
270
271 for (handler = list_head(&inject_handlers); handler != NULL;
272 handler = list_next(&inject_handlers, handler)) {
273
274 if (zio->io_spa != handler->zi_spa ||
275 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
276 continue;
277
278 /* If this handler matches, return the specified error */
279 if (zio_match_handler(&zio->io_logical->io_bookmark,
280 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
281 zio_match_dva(zio), &handler->zi_record, error)) {
282 ret = error;
283 break;
284 }
285 }
286
287 rw_exit(&inject_lock);
288
289 return (ret);
290 }
291
292 /*
293 * Determine if the zio is part of a label update and has an injection
294 * handler associated with that portion of the label. Currently, we
295 * allow error injection in either the nvlist or the uberblock region of
296 * of the vdev label.
297 */
298 int
zio_handle_label_injection(zio_t * zio,int error)299 zio_handle_label_injection(zio_t *zio, int error)
300 {
301 inject_handler_t *handler;
302 vdev_t *vd = zio->io_vd;
303 uint64_t offset = zio->io_offset;
304 int label;
305 int ret = 0;
306
307 if (offset >= VDEV_LABEL_START_SIZE &&
308 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
309 return (0);
310
311 rw_enter(&inject_lock, RW_READER);
312
313 for (handler = list_head(&inject_handlers); handler != NULL;
314 handler = list_next(&inject_handlers, handler)) {
315 uint64_t start = handler->zi_record.zi_start;
316 uint64_t end = handler->zi_record.zi_end;
317
318 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
319 continue;
320
321 /*
322 * The injection region is the relative offsets within a
323 * vdev label. We must determine the label which is being
324 * updated and adjust our region accordingly.
325 */
326 label = vdev_label_number(vd->vdev_psize, offset);
327 start = vdev_label_offset(vd->vdev_psize, label, start);
328 end = vdev_label_offset(vd->vdev_psize, label, end);
329
330 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
331 (offset >= start && offset <= end)) {
332 ret = error;
333 break;
334 }
335 }
336 rw_exit(&inject_lock);
337 return (ret);
338 }
339
340
341 int
zio_handle_device_injection(vdev_t * vd,zio_t * zio,int error)342 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
343 {
344 inject_handler_t *handler;
345 int ret = 0;
346
347 /*
348 * We skip over faults in the labels unless it's during
349 * device open (i.e. zio == NULL).
350 */
351 if (zio != NULL) {
352 uint64_t offset = zio->io_offset;
353
354 if (offset < VDEV_LABEL_START_SIZE ||
355 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
356 return (0);
357 }
358
359 rw_enter(&inject_lock, RW_READER);
360
361 for (handler = list_head(&inject_handlers); handler != NULL;
362 handler = list_next(&inject_handlers, handler)) {
363
364 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
365 continue;
366
367 if (vd->vdev_guid == handler->zi_record.zi_guid) {
368 if (handler->zi_record.zi_failfast &&
369 (zio == NULL || (zio->io_flags &
370 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
371 continue;
372 }
373
374 /* Handle type specific I/O failures */
375 if (zio != NULL &&
376 handler->zi_record.zi_iotype != ZIO_TYPES &&
377 handler->zi_record.zi_iotype != zio->io_type)
378 continue;
379
380 if (handler->zi_record.zi_error == error) {
381 /*
382 * limit error injection if requested
383 */
384 if (!freq_triggered(handler->zi_record.zi_freq))
385 continue;
386
387 /*
388 * For a failed open, pretend like the device
389 * has gone away.
390 */
391 if (error == ENXIO)
392 vd->vdev_stat.vs_aux =
393 VDEV_AUX_OPEN_FAILED;
394
395 /*
396 * Treat these errors as if they had been
397 * retried so that all the appropriate stats
398 * and FMA events are generated.
399 */
400 if (!handler->zi_record.zi_failfast &&
401 zio != NULL)
402 zio->io_flags |= ZIO_FLAG_IO_RETRY;
403
404 ret = error;
405 break;
406 }
407 if (handler->zi_record.zi_error == ENXIO) {
408 ret = SET_ERROR(EIO);
409 break;
410 }
411 }
412 }
413
414 rw_exit(&inject_lock);
415
416 return (ret);
417 }
418
419 /*
420 * Simulate hardware that ignores cache flushes. For requested number
421 * of seconds nix the actual writing to disk.
422 */
423 void
zio_handle_ignored_writes(zio_t * zio)424 zio_handle_ignored_writes(zio_t *zio)
425 {
426 inject_handler_t *handler;
427
428 rw_enter(&inject_lock, RW_READER);
429
430 for (handler = list_head(&inject_handlers); handler != NULL;
431 handler = list_next(&inject_handlers, handler)) {
432
433 /* Ignore errors not destined for this pool */
434 if (zio->io_spa != handler->zi_spa ||
435 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
436 continue;
437
438 /*
439 * Positive duration implies # of seconds, negative
440 * a number of txgs
441 */
442 if (handler->zi_record.zi_timer == 0) {
443 if (handler->zi_record.zi_duration > 0)
444 handler->zi_record.zi_timer = ddi_get_lbolt64();
445 else
446 handler->zi_record.zi_timer = zio->io_txg;
447 }
448
449 /* Have a "problem" writing 60% of the time */
450 if (spa_get_random(100) < 60)
451 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
452 break;
453 }
454
455 rw_exit(&inject_lock);
456 }
457
458 void
spa_handle_ignored_writes(spa_t * spa)459 spa_handle_ignored_writes(spa_t *spa)
460 {
461 inject_handler_t *handler;
462
463 if (zio_injection_enabled == 0)
464 return;
465
466 rw_enter(&inject_lock, RW_READER);
467
468 for (handler = list_head(&inject_handlers); handler != NULL;
469 handler = list_next(&inject_handlers, handler)) {
470
471 if (spa != handler->zi_spa ||
472 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
473 continue;
474
475 if (handler->zi_record.zi_duration > 0) {
476 VERIFY(handler->zi_record.zi_timer == 0 ||
477 handler->zi_record.zi_timer +
478 handler->zi_record.zi_duration * hz >
479 ddi_get_lbolt64());
480 } else {
481 /* duration is negative so the subtraction here adds */
482 VERIFY(handler->zi_record.zi_timer == 0 ||
483 handler->zi_record.zi_timer -
484 handler->zi_record.zi_duration >=
485 spa_syncing_txg(spa));
486 }
487 }
488
489 rw_exit(&inject_lock);
490 }
491
492 hrtime_t
zio_handle_io_delay(zio_t * zio)493 zio_handle_io_delay(zio_t *zio)
494 {
495 vdev_t *vd = zio->io_vd;
496 inject_handler_t *min_handler = NULL;
497 hrtime_t min_target = 0;
498
499 rw_enter(&inject_lock, RW_READER);
500
501 /*
502 * inject_delay_count is a subset of zio_injection_enabled that
503 * is only incremented for delay handlers. These checks are
504 * mainly added to remind the reader why we're not explicitly
505 * checking zio_injection_enabled like the other functions.
506 */
507 IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
508 IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
509
510 /*
511 * If there aren't any inject delay handlers registered, then we
512 * can short circuit and simply return 0 here. A value of zero
513 * informs zio_delay_interrupt() that this request should not be
514 * delayed. This short circuit keeps us from acquiring the
515 * inject_delay_mutex unnecessarily.
516 */
517 if (inject_delay_count == 0) {
518 rw_exit(&inject_lock);
519 return (0);
520 }
521
522 /*
523 * Each inject handler has a number of "lanes" associated with
524 * it. Each lane is able to handle requests independently of one
525 * another, and at a latency defined by the inject handler
526 * record's zi_timer field. Thus if a handler in configured with
527 * a single lane with a 10ms latency, it will delay requests
528 * such that only a single request is completed every 10ms. So,
529 * if more than one request is attempted per each 10ms interval,
530 * the average latency of the requests will be greater than
531 * 10ms; but if only a single request is submitted each 10ms
532 * interval the average latency will be 10ms.
533 *
534 * We need to acquire this mutex to prevent multiple concurrent
535 * threads being assigned to the same lane of a given inject
536 * handler. The mutex allows us to perform the following two
537 * operations atomically:
538 *
539 * 1. determine the minimum handler and minimum target
540 * value of all the possible handlers
541 * 2. update that minimum handler's lane array
542 *
543 * Without atomicity, two (or more) threads could pick the same
544 * lane in step (1), and then conflict with each other in step
545 * (2). This could allow a single lane handler to process
546 * multiple requests simultaneously, which shouldn't be possible.
547 */
548 mutex_enter(&inject_delay_mtx);
549
550 for (inject_handler_t *handler = list_head(&inject_handlers);
551 handler != NULL; handler = list_next(&inject_handlers, handler)) {
552 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
553 continue;
554
555 if (!freq_triggered(handler->zi_record.zi_freq))
556 continue;
557
558 if (vd->vdev_guid != handler->zi_record.zi_guid)
559 continue;
560
561 /*
562 * Defensive; should never happen as the array allocation
563 * occurs prior to inserting this handler on the list.
564 */
565 ASSERT3P(handler->zi_lanes, !=, NULL);
566
567 /*
568 * This should never happen, the zinject command should
569 * prevent a user from setting an IO delay with zero lanes.
570 */
571 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
572
573 ASSERT3U(handler->zi_record.zi_nlanes, >,
574 handler->zi_next_lane);
575
576 /*
577 * We want to issue this IO to the lane that will become
578 * idle the soonest, so we compare the soonest this
579 * specific handler can complete the IO with all other
580 * handlers, to find the lowest value of all possible
581 * lanes. We then use this lane to submit the request.
582 *
583 * Since each handler has a constant value for its
584 * delay, we can just use the "next" lane for that
585 * handler; as it will always be the lane with the
586 * lowest value for that particular handler (i.e. the
587 * lane that will become idle the soonest). This saves a
588 * scan of each handler's lanes array.
589 *
590 * There's two cases to consider when determining when
591 * this specific IO request should complete. If this
592 * lane is idle, we want to "submit" the request now so
593 * it will complete after zi_timer milliseconds. Thus,
594 * we set the target to now + zi_timer.
595 *
596 * If the lane is busy, we want this request to complete
597 * zi_timer milliseconds after the lane becomes idle.
598 * Since the 'zi_lanes' array holds the time at which
599 * each lane will become idle, we use that value to
600 * determine when this request should complete.
601 */
602 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
603 hrtime_t busy = handler->zi_record.zi_timer +
604 handler->zi_lanes[handler->zi_next_lane];
605 hrtime_t target = MAX(idle, busy);
606
607 if (min_handler == NULL) {
608 min_handler = handler;
609 min_target = target;
610 continue;
611 }
612
613 ASSERT3P(min_handler, !=, NULL);
614 ASSERT3U(min_target, !=, 0);
615
616 /*
617 * We don't yet increment the "next lane" variable since
618 * we still might find a lower value lane in another
619 * handler during any remaining iterations. Once we're
620 * sure we've selected the absolute minimum, we'll claim
621 * the lane and increment the handler's "next lane"
622 * field below.
623 */
624
625 if (target < min_target) {
626 min_handler = handler;
627 min_target = target;
628 }
629 }
630
631 /*
632 * 'min_handler' will be NULL if no IO delays are registered for
633 * this vdev, otherwise it will point to the handler containing
634 * the lane that will become idle the soonest.
635 */
636 if (min_handler != NULL) {
637 ASSERT3U(min_target, !=, 0);
638 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
639
640 /*
641 * If we've used all possible lanes for this handler,
642 * loop back and start using the first lane again;
643 * otherwise, just increment the lane index.
644 */
645 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
646 min_handler->zi_record.zi_nlanes;
647 }
648
649 mutex_exit(&inject_delay_mtx);
650 rw_exit(&inject_lock);
651
652 return (min_target);
653 }
654
655 static int
zio_calculate_range(const char * pool,zinject_record_t * record)656 zio_calculate_range(const char *pool, zinject_record_t *record)
657 {
658 dsl_pool_t *dp;
659 dsl_dataset_t *ds;
660 objset_t *os = NULL;
661 dnode_t *dn = NULL;
662 int error;
663
664 /*
665 * Obtain the dnode for object using pool, objset, and object
666 */
667 error = dsl_pool_hold(pool, FTAG, &dp);
668 if (error)
669 return (error);
670
671 error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
672 dsl_pool_rele(dp, FTAG);
673 if (error)
674 return (error);
675
676 error = dmu_objset_from_ds(ds, &os);
677 dsl_dataset_rele(ds, FTAG);
678 if (error)
679 return (error);
680
681 error = dnode_hold(os, record->zi_object, FTAG, &dn);
682 if (error)
683 return (error);
684
685 /*
686 * Translate the range into block IDs
687 */
688 if (record->zi_start != 0 || record->zi_end != -1ULL) {
689 record->zi_start >>= dn->dn_datablkshift;
690 record->zi_end >>= dn->dn_datablkshift;
691 }
692 if (record->zi_level > 0) {
693 if (record->zi_level >= dn->dn_nlevels) {
694 dnode_rele(dn, FTAG);
695 return (SET_ERROR(EDOM));
696 }
697
698 if (record->zi_start != 0 || record->zi_end != 0) {
699 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
700
701 for (int level = record->zi_level; level > 0; level--) {
702 record->zi_start >>= shift;
703 record->zi_end >>= shift;
704 }
705 }
706 }
707
708 dnode_rele(dn, FTAG);
709 return (0);
710 }
711
712 /*
713 * Create a new handler for the given record. We add it to the list, adding
714 * a reference to the spa_t in the process. We increment zio_injection_enabled,
715 * which is the switch to trigger all fault injection.
716 */
717 int
zio_inject_fault(char * name,int flags,int * id,zinject_record_t * record)718 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
719 {
720 inject_handler_t *handler;
721 int error;
722 spa_t *spa;
723
724 /*
725 * If this is pool-wide metadata, make sure we unload the corresponding
726 * spa_t, so that the next attempt to load it will trigger the fault.
727 * We call spa_reset() to unload the pool appropriately.
728 */
729 if (flags & ZINJECT_UNLOAD_SPA)
730 if ((error = spa_reset(name)) != 0)
731 return (error);
732
733 if (record->zi_cmd == ZINJECT_DELAY_IO) {
734 /*
735 * A value of zero for the number of lanes or for the
736 * delay time doesn't make sense.
737 */
738 if (record->zi_timer == 0 || record->zi_nlanes == 0)
739 return (SET_ERROR(EINVAL));
740
741 /*
742 * The number of lanes is directly mapped to the size of
743 * an array used by the handler. Thus, to ensure the
744 * user doesn't trigger an allocation that's "too large"
745 * we cap the number of lanes here.
746 */
747 if (record->zi_nlanes >= UINT16_MAX)
748 return (SET_ERROR(EINVAL));
749 }
750
751 /*
752 * If the supplied range was in bytes -- calculate the actual blkid
753 */
754 if (flags & ZINJECT_CALC_RANGE) {
755 error = zio_calculate_range(name, record);
756 if (error != 0)
757 return (error);
758 }
759
760 if (!(flags & ZINJECT_NULL)) {
761 /*
762 * spa_inject_ref() will add an injection reference, which will
763 * prevent the pool from being removed from the namespace while
764 * still allowing it to be unloaded.
765 */
766 if ((spa = spa_inject_addref(name)) == NULL)
767 return (SET_ERROR(ENOENT));
768
769 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
770
771 handler->zi_spa = spa;
772 handler->zi_record = *record;
773
774 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
775 handler->zi_lanes = kmem_zalloc(
776 sizeof (*handler->zi_lanes) *
777 handler->zi_record.zi_nlanes, KM_SLEEP);
778 handler->zi_next_lane = 0;
779 } else {
780 handler->zi_lanes = NULL;
781 handler->zi_next_lane = 0;
782 }
783
784 rw_enter(&inject_lock, RW_WRITER);
785
786 /*
787 * We can't move this increment into the conditional
788 * above because we need to hold the RW_WRITER lock of
789 * inject_lock, and we don't want to hold that while
790 * allocating the handler's zi_lanes array.
791 */
792 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
793 ASSERT3S(inject_delay_count, >=, 0);
794 inject_delay_count++;
795 ASSERT3S(inject_delay_count, >, 0);
796 }
797
798 *id = handler->zi_id = inject_next_id++;
799 list_insert_tail(&inject_handlers, handler);
800 atomic_inc_32(&zio_injection_enabled);
801
802 rw_exit(&inject_lock);
803 }
804
805 /*
806 * Flush the ARC, so that any attempts to read this data will end up
807 * going to the ZIO layer. Note that this is a little overkill, but
808 * we don't have the necessary ARC interfaces to do anything else, and
809 * fault injection isn't a performance critical path.
810 */
811 if (flags & ZINJECT_FLUSH_ARC)
812 /*
813 * We must use FALSE to ensure arc_flush returns, since
814 * we're not preventing concurrent ARC insertions.
815 */
816 arc_flush(NULL, FALSE);
817
818 return (0);
819 }
820
821 /*
822 * Returns the next record with an ID greater than that supplied to the
823 * function. Used to iterate over all handlers in the system.
824 */
825 int
zio_inject_list_next(int * id,char * name,size_t buflen,zinject_record_t * record)826 zio_inject_list_next(int *id, char *name, size_t buflen,
827 zinject_record_t *record)
828 {
829 inject_handler_t *handler;
830 int ret;
831
832 mutex_enter(&spa_namespace_lock);
833 rw_enter(&inject_lock, RW_READER);
834
835 for (handler = list_head(&inject_handlers); handler != NULL;
836 handler = list_next(&inject_handlers, handler))
837 if (handler->zi_id > *id)
838 break;
839
840 if (handler) {
841 *record = handler->zi_record;
842 *id = handler->zi_id;
843 (void) strncpy(name, spa_name(handler->zi_spa), buflen);
844 ret = 0;
845 } else {
846 ret = SET_ERROR(ENOENT);
847 }
848
849 rw_exit(&inject_lock);
850 mutex_exit(&spa_namespace_lock);
851
852 return (ret);
853 }
854
855 /*
856 * Clear the fault handler with the given identifier, or return ENOENT if none
857 * exists.
858 */
859 int
zio_clear_fault(int id)860 zio_clear_fault(int id)
861 {
862 inject_handler_t *handler;
863
864 rw_enter(&inject_lock, RW_WRITER);
865
866 for (handler = list_head(&inject_handlers); handler != NULL;
867 handler = list_next(&inject_handlers, handler))
868 if (handler->zi_id == id)
869 break;
870
871 if (handler == NULL) {
872 rw_exit(&inject_lock);
873 return (SET_ERROR(ENOENT));
874 }
875
876 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
877 ASSERT3S(inject_delay_count, >, 0);
878 inject_delay_count--;
879 ASSERT3S(inject_delay_count, >=, 0);
880 }
881
882 list_remove(&inject_handlers, handler);
883 rw_exit(&inject_lock);
884
885 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
886 ASSERT3P(handler->zi_lanes, !=, NULL);
887 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
888 handler->zi_record.zi_nlanes);
889 } else {
890 ASSERT3P(handler->zi_lanes, ==, NULL);
891 }
892
893 spa_inject_delref(handler->zi_spa);
894 kmem_free(handler, sizeof (inject_handler_t));
895 atomic_dec_32(&zio_injection_enabled);
896
897 return (0);
898 }
899
900 void
zio_inject_init(void)901 zio_inject_init(void)
902 {
903 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
904 mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
905 list_create(&inject_handlers, sizeof (inject_handler_t),
906 offsetof(inject_handler_t, zi_link));
907 }
908
909 void
zio_inject_fini(void)910 zio_inject_fini(void)
911 {
912 list_destroy(&inject_handlers);
913 mutex_destroy(&inject_delay_mtx);
914 rw_destroy(&inject_lock);
915 }
916