Lines Matching +full:two +full:- +full:lane
9 * or https://opensource.org/licenses/CDDL-1.0.
25 * Copyright (c) 2024-2025, Klara, Inc.
138 if (zb->zb_objset == DMU_META_OBJSET &&
139 record->zi_objset == DMU_META_OBJSET &&
140 record->zi_object == DMU_META_DNODE_OBJECT) {
141 if (record->zi_type == DMU_OT_NONE ||
142 type == record->zi_type)
150 if (zb->zb_objset == record->zi_objset &&
151 zb->zb_object == record->zi_object &&
152 zb->zb_level == record->zi_level &&
153 zb->zb_blkid >= record->zi_start &&
154 zb->zb_blkid <= record->zi_end &&
155 (record->zi_dvas == 0 ||
156 (dva != ZI_NO_DVA && (record->zi_dvas & (1ULL << dva)))) &&
157 error == record->zi_error) {
164 record->zi_match_count++;
165 injected = freq_triggered(record->zi_freq);
169 record->zi_inject_count++;
188 if (spa != handler->zi_spa)
191 if (handler->zi_record.zi_type == type &&
192 strcmp(tag, handler->zi_record.zi_func) == 0) {
193 handler->zi_record.zi_match_count++;
194 handler->zi_record.zi_inject_count++;
218 if (spa != handler->zi_spa ||
219 handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
223 &handler->zi_record, error)) {
236 * that we end up with ZI_NO_DVA (-1) if we don't find a match.
243 if (zio->io_bp != NULL && zio->io_vd != NULL &&
244 zio->io_child_type == ZIO_CHILD_VDEV) {
245 for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) {
246 dva_t *dva = &zio->io_bp->blk_dva[i];
248 vdev_t *vd = vdev_lookup_top(zio->io_spa,
252 if (zio->io_vd->vdev_ops->vdev_op_leaf)
255 if (zio->io_vd == vd && zio->io_offset == off)
277 if (zio->io_logical == NULL)
283 if (zio->io_type != ZIO_TYPE_READ)
289 if (zio->io_priority == ZIO_PRIORITY_REBUILD && error == ECKSUM)
296 if (zio->io_spa != handler->zi_spa ||
297 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
301 if (zio_match_handler(&zio->io_logical->io_bookmark,
302 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
303 zio_match_dva(zio), &handler->zi_record, error)) {
324 vdev_t *vd = zio->io_vd;
325 uint64_t offset = zio->io_offset;
330 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
337 uint64_t start = handler->zi_record.zi_start;
338 uint64_t end = handler->zi_record.zi_end;
340 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
348 label = vdev_label_number(vd->vdev_psize, offset);
349 start = vdev_label_offset(vd->vdev_psize, label, start);
350 end = vdev_label_offset(vd->vdev_psize, label, end);
352 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
354 handler->zi_record.zi_match_count++;
355 handler->zi_record.zi_inject_count++;
371 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
390 if (zio->io_flags & ZIO_FLAG_PROBE)
395 return (iotype == zio->io_type);
415 if (zio != NULL && zio->io_type != ZIO_TYPE_FLUSH &&
416 !(zio->io_flags & ZIO_FLAG_PROBE)) {
417 uint64_t offset = zio->io_offset;
420 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
429 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
432 if (vd->vdev_guid == handler->zi_record.zi_guid) {
433 if (handler->zi_record.zi_failfast &&
434 (zio == NULL || (zio->io_flags &
441 handler->zi_record.zi_iotype))
444 if (handler->zi_record.zi_error == err1 ||
445 handler->zi_record.zi_error == err2) {
446 handler->zi_record.zi_match_count++;
451 if (!freq_triggered(handler->zi_record.zi_freq))
454 handler->zi_record.zi_inject_count++;
461 vd->vdev_stat.vs_aux =
469 if (!handler->zi_record.zi_failfast &&
471 zio->io_flags |= ZIO_FLAG_IO_RETRY;
476 if (handler->zi_record.zi_error == EILSEQ) {
481 (void) abd_iterate_func(zio->io_abd, 0,
482 zio->io_size, zio_inject_bitflip_cb,
487 ret = handler->zi_record.zi_error;
490 if (handler->zi_record.zi_error == ENXIO) {
491 handler->zi_record.zi_match_count++;
492 handler->zi_record.zi_inject_count++;
531 if (zio->io_spa != handler->zi_spa ||
532 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
535 handler->zi_record.zi_match_count++;
541 if (handler->zi_record.zi_timer == 0) {
542 if (handler->zi_record.zi_duration > 0)
543 handler->zi_record.zi_timer = ddi_get_lbolt64();
545 handler->zi_record.zi_timer = zio->io_txg;
550 handler->zi_record.zi_inject_count++;
551 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
572 if (spa != handler->zi_spa ||
573 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
576 handler->zi_record.zi_match_count++;
577 handler->zi_record.zi_inject_count++;
579 if (handler->zi_record.zi_duration > 0) {
580 VERIFY(handler->zi_record.zi_timer == 0 ||
582 (int64_t)handler->zi_record.zi_timer +
583 handler->zi_record.zi_duration * hz,
587 VERIFY(handler->zi_record.zi_timer == 0 ||
588 handler->zi_record.zi_timer -
589 handler->zi_record.zi_duration >=
600 vdev_t *vd = zio->io_vd;
629 * it. Each lane is able to handle requests independently of one
632 * a single lane with a 10ms latency, it will delay requests
640 * threads being assigned to the same lane of a given inject
641 * handler. The mutex allows us to perform the following two
646 * 2. update that minimum handler's lane array
648 * Without atomicity, two (or more) threads could pick the same
649 * lane in step (1), and then conflict with each other in step
650 * (2). This could allow a single lane handler to process
657 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
660 if (vd->vdev_guid != handler->zi_record.zi_guid)
663 /* also match on I/O type (e.g., -T read) */
664 if (!zio_match_iotype(zio, handler->zi_record.zi_iotype))
671 ASSERT3P(handler->zi_lanes, !=, NULL);
677 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
679 ASSERT3U(handler->zi_record.zi_nlanes, >,
680 handler->zi_next_lane);
682 handler->zi_record.zi_match_count++;
685 if (!freq_triggered(handler->zi_record.zi_freq))
689 * We want to issue this IO to the lane that will become
693 * lanes. We then use this lane to submit the request.
696 * delay, we can just use the "next" lane for that
697 * handler; as it will always be the lane with the
699 * lane that will become idle the soonest). This saves a
702 * There's two cases to consider when determining when
704 * lane is idle, we want to "submit" the request now so
708 * If the lane is busy, we want this request to complete
709 * zi_timer milliseconds after the lane becomes idle.
711 * each lane will become idle, we use that value to
714 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
715 hrtime_t busy = handler->zi_record.zi_timer +
716 handler->zi_lanes[handler->zi_next_lane];
729 * We don't yet increment the "next lane" variable since
730 * we still might find a lower value lane in another
733 * the lane and increment the handler's "next lane"
746 * the lane that will become idle the soonest.
750 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
754 * loop back and start using the first lane again;
755 * otherwise, just increment the lane index.
757 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
758 min_handler->zi_record.zi_nlanes;
760 min_handler->zi_record.zi_inject_count++;
780 handler != NULL && handler->zi_record.zi_cmd == command;
782 ASSERT3P(handler->zi_spa_name, !=, NULL);
783 if (strcmp(spa_name(spa), handler->zi_spa_name) == 0) {
784 handler->zi_record.zi_match_count++;
786 SEC2NSEC(handler->zi_record.zi_duration);
788 handler->zi_record.zi_inject_count++;
789 delay = pause - elapsed;
791 id = handler->zi_id;
806 /* all done with this one-shot handler */
845 error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
855 error = dnode_hold(os, record->zi_object, FTAG, &dn);
862 if (record->zi_start != 0 || record->zi_end != -1ULL) {
863 record->zi_start >>= dn->dn_datablkshift;
864 record->zi_end >>= dn->dn_datablkshift;
866 if (record->zi_level > 0) {
867 if (record->zi_level >= dn->dn_nlevels) {
872 if (record->zi_start != 0 || record->zi_end != 0) {
873 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
875 for (int level = record->zi_level; level > 0; level--) {
876 record->zi_start >>= shift;
877 record->zi_end >>= shift;
894 if (command != handler->zi_record.zi_cmd)
897 const char *pool = (handler->zi_spa_name != NULL) ?
898 handler->zi_spa_name : spa_name(handler->zi_spa);
921 * If this is pool-wide metadata, make sure we unload the corresponding
929 if (record->zi_cmd == ZINJECT_DELAY_IO) {
934 if (record->zi_timer == 0 || record->zi_nlanes == 0)
943 if (record->zi_nlanes >= UINT16_MAX)
948 * If the supplied range was in bytes -- calculate the actual blkid
962 if (record->zi_cmd == ZINJECT_DELAY_IMPORT ||
963 record->zi_cmd == ZINJECT_DELAY_EXPORT) {
964 if (record->zi_duration <= 0)
969 if (zio_pool_handler_exists(name, record->zi_cmd))
976 if (record->zi_cmd == ZINJECT_DELAY_IMPORT && has_spa)
978 if (record->zi_cmd == ZINJECT_DELAY_EXPORT && !has_spa)
993 handler->zi_spa = spa; /* note: can be NULL */
994 handler->zi_record = *record;
996 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
997 handler->zi_lanes = kmem_zalloc(
998 sizeof (*handler->zi_lanes) *
999 handler->zi_record.zi_nlanes, KM_SLEEP);
1000 handler->zi_next_lane = 0;
1002 handler->zi_lanes = NULL;
1003 handler->zi_next_lane = 0;
1006 if (handler->zi_spa == NULL)
1007 handler->zi_spa_name = spa_strdup(name);
1009 handler->zi_spa_name = NULL;
1019 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1025 *id = handler->zi_id = inject_next_id++;
1064 if (handler->zi_id > *id)
1068 *record = handler->zi_record;
1069 *id = handler->zi_id;
1070 ASSERT(handler->zi_spa || handler->zi_spa_name);
1071 if (handler->zi_spa != NULL)
1072 (void) strlcpy(name, spa_name(handler->zi_spa), buflen);
1074 (void) strlcpy(name, handler->zi_spa_name, buflen);
1099 if (handler->zi_id == id)
1107 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1109 inject_delay_count--;
1116 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1117 ASSERT3P(handler->zi_lanes, !=, NULL);
1118 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
1119 handler->zi_record.zi_nlanes);
1121 ASSERT3P(handler->zi_lanes, ==, NULL);
1124 if (handler->zi_spa_name != NULL)
1125 spa_strfree(handler->zi_spa_name);
1127 if (handler->zi_spa != NULL)
1128 spa_inject_delref(handler->zi_spa);