Lines Matching +full:fault +full:- +full:inject

9  * or https://opensource.org/licenses/CDDL-1.0.
29 * ZFS fault injection
31 * To handle fault injection, we keep track of a series of zinject_record_t
33 * fault. These are kept in a global list. Each record corresponds to a given
77 * This protects insertion into, and traversal of, the inject handler
135 if (zb->zb_objset == DMU_META_OBJSET &&
136 record->zi_objset == DMU_META_OBJSET &&
137 record->zi_object == DMU_META_DNODE_OBJECT) {
138 if (record->zi_type == DMU_OT_NONE ||
139 type == record->zi_type)
140 return (freq_triggered(record->zi_freq));
148 if (zb->zb_objset == record->zi_objset &&
149 zb->zb_object == record->zi_object &&
150 zb->zb_level == record->zi_level &&
151 zb->zb_blkid >= record->zi_start &&
152 zb->zb_blkid <= record->zi_end &&
153 (record->zi_dvas == 0 ||
154 (dva != ZI_NO_DVA && (record->zi_dvas & (1ULL << dva)))) &&
155 error == record->zi_error) {
156 return (freq_triggered(record->zi_freq));
176 if (spa != handler->zi_spa)
179 if (handler->zi_record.zi_type == type &&
180 strcmp(tag, handler->zi_record.zi_func) == 0)
188 * Inject a decryption failure. Decryption failures can occur in
203 if (spa != handler->zi_spa ||
204 handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
208 &handler->zi_record, error)) {
221 * that we end up with ZI_NO_DVA (-1) if we don't find a match.
228 if (zio->io_bp != NULL && zio->io_vd != NULL &&
229 zio->io_child_type == ZIO_CHILD_VDEV) {
230 for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) {
231 dva_t *dva = &zio->io_bp->blk_dva[i];
233 vdev_t *vd = vdev_lookup_top(zio->io_spa,
237 if (zio->io_vd->vdev_ops->vdev_op_leaf)
240 if (zio->io_vd == vd && zio->io_offset == off)
262 if (zio->io_logical == NULL)
266 * Currently, we only support fault injection on reads.
268 if (zio->io_type != ZIO_TYPE_READ)
274 if (zio->io_priority == ZIO_PRIORITY_REBUILD && error == ECKSUM)
281 if (zio->io_spa != handler->zi_spa ||
282 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
286 if (zio_match_handler(&zio->io_logical->io_bookmark,
287 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
288 zio_match_dva(zio), &handler->zi_record, error)) {
309 vdev_t *vd = zio->io_vd;
310 uint64_t offset = zio->io_offset;
315 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
322 uint64_t start = handler->zi_record.zi_start;
323 uint64_t end = handler->zi_record.zi_end;
325 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
333 label = vdev_label_number(vd->vdev_psize, offset);
334 start = vdev_label_offset(vd->vdev_psize, label, start);
335 end = vdev_label_offset(vd->vdev_psize, label, end);
337 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
354 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
372 if (zio != NULL && zio->io_type != ZIO_TYPE_FLUSH) {
373 uint64_t offset = zio->io_offset;
376 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
385 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
388 if (vd->vdev_guid == handler->zi_record.zi_guid) {
389 if (handler->zi_record.zi_failfast &&
390 (zio == NULL || (zio->io_flags &
397 handler->zi_record.zi_iotype != ZIO_TYPES &&
398 handler->zi_record.zi_iotype != zio->io_type)
401 if (handler->zi_record.zi_error == err1 ||
402 handler->zi_record.zi_error == err2) {
406 if (!freq_triggered(handler->zi_record.zi_freq))
414 vd->vdev_stat.vs_aux =
422 if (!handler->zi_record.zi_failfast &&
424 zio->io_flags |= ZIO_FLAG_IO_RETRY;
429 if (handler->zi_record.zi_error == EILSEQ) {
434 (void) abd_iterate_func(zio->io_abd, 0,
435 zio->io_size, zio_inject_bitflip_cb,
440 ret = handler->zi_record.zi_error;
443 if (handler->zi_record.zi_error == ENXIO) {
482 if (zio->io_spa != handler->zi_spa ||
483 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
490 if (handler->zi_record.zi_timer == 0) {
491 if (handler->zi_record.zi_duration > 0)
492 handler->zi_record.zi_timer = ddi_get_lbolt64();
494 handler->zi_record.zi_timer = zio->io_txg;
499 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
519 if (spa != handler->zi_spa ||
520 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
523 if (handler->zi_record.zi_duration > 0) {
524 VERIFY(handler->zi_record.zi_timer == 0 ||
526 (int64_t)handler->zi_record.zi_timer +
527 handler->zi_record.zi_duration * hz,
531 VERIFY(handler->zi_record.zi_timer == 0 ||
532 handler->zi_record.zi_timer -
533 handler->zi_record.zi_duration >=
544 vdev_t *vd = zio->io_vd;
560 * If there aren't any inject delay handlers registered, then we
572 * Each inject handler has a number of "lanes" associated with
574 * another, and at a latency defined by the inject handler
584 * threads being assigned to the same lane of a given inject
601 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
604 if (!freq_triggered(handler->zi_record.zi_freq))
607 if (vd->vdev_guid != handler->zi_record.zi_guid)
610 /* also match on I/O type (e.g., -T read) */
611 if (handler->zi_record.zi_iotype != ZIO_TYPES &&
612 handler->zi_record.zi_iotype != zio->io_type) {
620 ASSERT3P(handler->zi_lanes, !=, NULL);
626 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
628 ASSERT3U(handler->zi_record.zi_nlanes, >,
629 handler->zi_next_lane);
657 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
658 hrtime_t busy = handler->zi_record.zi_timer +
659 handler->zi_lanes[handler->zi_next_lane];
693 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
700 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
701 min_handler->zi_record.zi_nlanes;
720 handler != NULL && handler->zi_record.zi_cmd == command;
722 ASSERT3P(handler->zi_spa_name, !=, NULL);
723 if (strcmp(spa_name(spa), handler->zi_spa_name) == 0) {
725 SEC2NSEC(handler->zi_record.zi_duration);
727 delay = pause - elapsed;
729 id = handler->zi_id;
744 /* all done with this one-shot handler */
750 * For testing, inject a delay during an import
759 * For testing, inject a delay during an export
783 error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
793 error = dnode_hold(os, record->zi_object, FTAG, &dn);
800 if (record->zi_start != 0 || record->zi_end != -1ULL) {
801 record->zi_start >>= dn->dn_datablkshift;
802 record->zi_end >>= dn->dn_datablkshift;
804 if (record->zi_level > 0) {
805 if (record->zi_level >= dn->dn_nlevels) {
810 if (record->zi_start != 0 || record->zi_end != 0) {
811 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
813 for (int level = record->zi_level; level > 0; level--) {
814 record->zi_start >>= shift;
815 record->zi_end >>= shift;
832 if (command != handler->zi_record.zi_cmd)
835 const char *pool = (handler->zi_spa_name != NULL) ?
836 handler->zi_spa_name : spa_name(handler->zi_spa);
849 * which is the switch to trigger all fault injection.
859 * If this is pool-wide metadata, make sure we unload the corresponding
860 * spa_t, so that the next attempt to load it will trigger the fault.
867 if (record->zi_cmd == ZINJECT_DELAY_IO) {
872 if (record->zi_timer == 0 || record->zi_nlanes == 0)
881 if (record->zi_nlanes >= UINT16_MAX)
886 * If the supplied range was in bytes -- calculate the actual blkid
900 if (record->zi_cmd == ZINJECT_DELAY_IMPORT ||
901 record->zi_cmd == ZINJECT_DELAY_EXPORT) {
902 if (record->zi_duration <= 0)
907 if (zio_pool_handler_exists(name, record->zi_cmd))
914 if (record->zi_cmd == ZINJECT_DELAY_IMPORT && has_spa)
916 if (record->zi_cmd == ZINJECT_DELAY_EXPORT && !has_spa)
931 handler->zi_spa = spa; /* note: can be NULL */
932 handler->zi_record = *record;
934 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
935 handler->zi_lanes = kmem_zalloc(
936 sizeof (*handler->zi_lanes) *
937 handler->zi_record.zi_nlanes, KM_SLEEP);
938 handler->zi_next_lane = 0;
940 handler->zi_lanes = NULL;
941 handler->zi_next_lane = 0;
944 if (handler->zi_spa == NULL)
945 handler->zi_spa_name = spa_strdup(name);
947 handler->zi_spa_name = NULL;
957 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
963 *id = handler->zi_id = inject_next_id++;
974 * fault injection isn't a performance critical path.
1002 if (handler->zi_id > *id)
1006 *record = handler->zi_record;
1007 *id = handler->zi_id;
1008 ASSERT(handler->zi_spa || handler->zi_spa_name);
1009 if (handler->zi_spa != NULL)
1010 (void) strlcpy(name, spa_name(handler->zi_spa), buflen);
1012 (void) strlcpy(name, handler->zi_spa_name, buflen);
1025 * Clear the fault handler with the given identifier, or return ENOENT if none
1037 if (handler->zi_id == id)
1045 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1047 inject_delay_count--;
1054 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1055 ASSERT3P(handler->zi_lanes, !=, NULL);
1056 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
1057 handler->zi_record.zi_nlanes);
1059 ASSERT3P(handler->zi_lanes, ==, NULL);
1062 if (handler->zi_spa_name != NULL)
1063 spa_strfree(handler->zi_spa_name);
1065 if (handler->zi_spa != NULL)
1066 spa_inject_delref(handler->zi_spa);