Lines Matching +full:default +full:- +full:trim

9  * or https://opensource.org/licenses/CDDL-1.0.
41 * TRIM is a feature which is used to notify a SSD that some previously
47 * There are two supported TRIM methods; manual and automatic.
49 * Manual TRIM:
51 * A manual TRIM is initiated by running the 'zpool trim' command. A single
53 * managing that vdev TRIM process. This involves iterating over all the
55 * required TRIM I/Os.
60 * the TRIM are regularly written to the pool. This allows the TRIM to be
63 * Automatic TRIM:
65 * An automatic TRIM is enabled by setting the 'autotrim' pool property
67 * top-level (not leaf) vdev in the pool. These threads perform the same
68 * core TRIM process as a manual TRIM, but with a few key differences.
70 * 1) Automatic TRIM happens continuously in the background and operates
73 * 2) Each thread is associated with a top-level (not leaf) vdev. This has
76 * metaslab is disabled at a time. Unlike manual TRIM, this means each
77 * 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
80 * 3) There is no automatic TRIM progress information stored on disk, nor
83 * While the automatic TRIM process is highly effective it is more likely
84 * than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
86 * TRIM and are skipped. This means small amounts of freed space may not
93 * For this reason it may be beneficial to occasionally manually TRIM a pool
94 * even when automatic TRIM is enabled.
98 * Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
103 * Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
108 * Skip uninitialized metaslabs during the TRIM process. This option is
109 * useful for pools constructed from large thinly-provisioned devices where
110 * TRIM operations are slow. As a pool ages an increasing fraction of
113 * manual TRIM and will persist for the duration of the requested TRIM.
118 * Maximum number of queued TRIM I/Os per leaf vdev. The number of
119 * concurrent TRIM I/Os issued to the device is controlled by the
126 * metaslab. This setting represents a trade-off between issuing more
127 * efficient TRIM operations, by allowing them to be aggregated longer,
133 * time. This can result is larger TRIM operations, and increased memory
135 * has the opposite effect. The default value of 32 was determined though
143 * trimmed and a range tree containing the extents to TRIM. All provided
150 vdev_t *trim_vdev; /* Leaf vdev to TRIM */
152 zfs_range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
153 trim_type_t trim_type; /* Manual or auto TRIM */
154 uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
155 uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
156 enum trim_flag trim_flags; /* TRIM flags (secure) */
171 return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) || in vdev_trim_should_stop()
172 vd->vdev_detached || vd->vdev_top->vdev_removing || in vdev_trim_should_stop()
173 vd->vdev_top->vdev_rz_expanding); in vdev_trim_should_stop()
182 return (tvd->vdev_autotrim_exit_wanted || in vdev_autotrim_should_stop()
183 !vdev_writeable(tvd) || tvd->vdev_removing || in vdev_autotrim_should_stop()
184 tvd->vdev_rz_expanding || in vdev_autotrim_should_stop()
185 spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF); in vdev_autotrim_should_stop()
195 mutex_enter(&vd->vdev_autotrim_lock); in vdev_autotrim_wait_kick()
197 if (vd->vdev_autotrim_exit_wanted) in vdev_autotrim_wait_kick()
199 cv_wait_idle(&vd->vdev_autotrim_kick_cv, in vdev_autotrim_wait_kick()
200 &vd->vdev_autotrim_lock); in vdev_autotrim_wait_kick()
202 boolean_t exit_wanted = vd->vdev_autotrim_exit_wanted; in vdev_autotrim_wait_kick()
203 mutex_exit(&vd->vdev_autotrim_lock); in vdev_autotrim_wait_kick()
209 * The sync task for updating the on-disk state of a manual TRIM. This
227 vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE); in vdev_trim_zap_update_sync()
228 if (vd == NULL || vd->vdev_top->vdev_removing || in vdev_trim_zap_update_sync()
229 !vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding) in vdev_trim_zap_update_sync()
232 uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK]; in vdev_trim_zap_update_sync()
233 vd->vdev_trim_offset[txg & TXG_MASK] = 0; in vdev_trim_zap_update_sync()
235 VERIFY3U(vd->vdev_leaf_zap, !=, 0); in vdev_trim_zap_update_sync()
237 objset_t *mos = vd->vdev_spa->spa_meta_objset; in vdev_trim_zap_update_sync()
239 if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) { in vdev_trim_zap_update_sync()
241 if (vd->vdev_trim_last_offset == UINT64_MAX) in vdev_trim_zap_update_sync()
244 vd->vdev_trim_last_offset = last_offset; in vdev_trim_zap_update_sync()
245 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, in vdev_trim_zap_update_sync()
250 if (vd->vdev_trim_action_time > 0) { in vdev_trim_zap_update_sync()
251 uint64_t val = (uint64_t)vd->vdev_trim_action_time; in vdev_trim_zap_update_sync()
252 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, in vdev_trim_zap_update_sync()
257 if (vd->vdev_trim_rate > 0) { in vdev_trim_zap_update_sync()
258 uint64_t rate = (uint64_t)vd->vdev_trim_rate; in vdev_trim_zap_update_sync()
263 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, in vdev_trim_zap_update_sync()
267 uint64_t partial = vd->vdev_trim_partial; in vdev_trim_zap_update_sync()
271 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL, in vdev_trim_zap_update_sync()
274 uint64_t secure = vd->vdev_trim_secure; in vdev_trim_zap_update_sync()
278 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE, in vdev_trim_zap_update_sync()
282 uint64_t trim_state = vd->vdev_trim_state; in vdev_trim_zap_update_sync()
283 VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE, in vdev_trim_zap_update_sync()
288 * Update the on-disk state of a manual TRIM. This is called to request
289 * that a TRIM be started/suspended/canceled, or to change one of the
290 * TRIM options (partial, secure, rate).
296 ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); in vdev_trim_change_state()
297 spa_t *spa = vd->vdev_spa; in vdev_trim_change_state()
299 if (new_state == vd->vdev_trim_state) in vdev_trim_change_state()
306 *guid = vd->vdev_guid; in vdev_trim_change_state()
311 if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) { in vdev_trim_change_state()
312 vd->vdev_trim_action_time = gethrestime_sec(); in vdev_trim_change_state()
316 * If we're activating, then preserve the requested rate and trim in vdev_trim_change_state()
318 * as a sentinel to indicate they should be reset to default values. in vdev_trim_change_state()
321 if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE || in vdev_trim_change_state()
322 vd->vdev_trim_state == VDEV_TRIM_CANCELED) { in vdev_trim_change_state()
323 vd->vdev_trim_last_offset = UINT64_MAX; in vdev_trim_change_state()
324 vd->vdev_trim_rate = UINT64_MAX; in vdev_trim_change_state()
325 vd->vdev_trim_partial = UINT64_MAX; in vdev_trim_change_state()
326 vd->vdev_trim_secure = UINT64_MAX; in vdev_trim_change_state()
330 vd->vdev_trim_rate = rate; in vdev_trim_change_state()
333 vd->vdev_trim_partial = partial; in vdev_trim_change_state()
336 vd->vdev_trim_secure = secure; in vdev_trim_change_state()
339 vdev_trim_state_t old_state = vd->vdev_trim_state; in vdev_trim_change_state()
341 vd->vdev_trim_state = new_state; in vdev_trim_change_state()
343 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); in vdev_trim_change_state()
352 spa_history_log_internal(spa, "trim", tx, in vdev_trim_change_state()
353 "vdev=%s activated", vd->vdev_path); in vdev_trim_change_state()
357 spa_history_log_internal(spa, "trim", tx, in vdev_trim_change_state()
358 "vdev=%s suspended", vd->vdev_path); in vdev_trim_change_state()
364 spa_history_log_internal(spa, "trim", tx, in vdev_trim_change_state()
365 "vdev=%s canceled", vd->vdev_path); in vdev_trim_change_state()
370 spa_history_log_internal(spa, "trim", tx, in vdev_trim_change_state()
371 "vdev=%s complete", vd->vdev_path); in vdev_trim_change_state()
373 default: in vdev_trim_change_state()
384 * The zio_done_func_t done callback for each manual TRIM issued. It is
385 * responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
386 * and limiting the number of in flight TRIM I/Os.
391 vdev_t *vd = zio->io_vd; in vdev_trim_cb()
393 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_cb()
394 if (zio->io_error == ENXIO && !vdev_writeable(vd)) { in vdev_trim_cb()
401 &vd->vdev_trim_offset[zio->io_txg & TXG_MASK]; in vdev_trim_cb()
402 *offset = MIN(*offset, zio->io_offset); in vdev_trim_cb()
404 if (zio->io_error != 0) { in vdev_trim_cb()
405 vd->vdev_stat.vs_trim_errors++; in vdev_trim_cb()
406 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL, in vdev_trim_cb()
407 0, 0, 0, 0, 1, zio->io_orig_size); in vdev_trim_cb()
409 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL, in vdev_trim_cb()
410 1, zio->io_orig_size, 0, 0, 0, 0); in vdev_trim_cb()
413 vd->vdev_trim_bytes_done += zio->io_orig_size; in vdev_trim_cb()
416 ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0); in vdev_trim_cb()
417 vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--; in vdev_trim_cb()
418 cv_broadcast(&vd->vdev_trim_io_cv); in vdev_trim_cb()
419 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_cb()
421 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); in vdev_trim_cb()
425 * The zio_done_func_t done callback for each automatic TRIM issued. It
426 * is responsible for updating the TRIM stats and limiting the number of
427 * in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
433 vdev_t *vd = zio->io_vd; in vdev_autotrim_cb()
435 mutex_enter(&vd->vdev_trim_io_lock); in vdev_autotrim_cb()
437 if (zio->io_error != 0) { in vdev_autotrim_cb()
438 vd->vdev_stat.vs_trim_errors++; in vdev_autotrim_cb()
439 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO, in vdev_autotrim_cb()
440 0, 0, 0, 0, 1, zio->io_orig_size); in vdev_autotrim_cb()
442 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO, in vdev_autotrim_cb()
443 1, zio->io_orig_size, 0, 0, 0, 0); in vdev_autotrim_cb()
446 ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0); in vdev_autotrim_cb()
447 vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--; in vdev_autotrim_cb()
448 cv_broadcast(&vd->vdev_trim_io_cv); in vdev_autotrim_cb()
449 mutex_exit(&vd->vdev_trim_io_lock); in vdev_autotrim_cb()
451 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); in vdev_autotrim_cb()
455 * The zio_done_func_t done callback for each TRIM issued via
456 * vdev_trim_simple(). It is responsible for updating the TRIM stats and
457 * limiting the number of in flight TRIM I/Os. Simple TRIM I/Os are best
463 vdev_t *vd = zio->io_vd; in vdev_trim_simple_cb()
465 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_simple_cb()
467 if (zio->io_error != 0) { in vdev_trim_simple_cb()
468 vd->vdev_stat.vs_trim_errors++; in vdev_trim_simple_cb()
469 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE, in vdev_trim_simple_cb()
470 0, 0, 0, 0, 1, zio->io_orig_size); in vdev_trim_simple_cb()
472 spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE, in vdev_trim_simple_cb()
473 1, zio->io_orig_size, 0, 0, 0, 0); in vdev_trim_simple_cb()
476 ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE], >, 0); in vdev_trim_simple_cb()
477 vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE]--; in vdev_trim_simple_cb()
478 cv_broadcast(&vd->vdev_trim_io_cv); in vdev_trim_simple_cb()
479 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_simple_cb()
481 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); in vdev_trim_simple_cb()
484 * Returns the average trim rate in bytes/sec for the ta->trim_vdev.
489 return (ta->trim_bytes_done * 1000 / in vdev_trim_calculate_rate()
490 (NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1)); in vdev_trim_calculate_rate()
494 * Issues a physical TRIM and takes care of rate limiting (bytes/sec)
495 * and number of concurrent TRIM I/Os.
500 vdev_t *vd = ta->trim_vdev; in vdev_trim_range()
501 spa_t *spa = vd->vdev_spa; in vdev_trim_range()
504 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_range()
507 * Limit manual TRIM I/Os to the requested rate. This does not in vdev_trim_range()
508 * apply to automatic TRIM since no per vdev rate can be specified. in vdev_trim_range()
510 if (ta->trim_type == TRIM_TYPE_MANUAL) { in vdev_trim_range()
511 while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) && in vdev_trim_range()
512 vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) { in vdev_trim_range()
513 cv_timedwait_idle(&vd->vdev_trim_io_cv, in vdev_trim_range()
514 &vd->vdev_trim_io_lock, ddi_get_lbolt() + in vdev_trim_range()
518 ta->trim_bytes_done += size; in vdev_trim_range()
521 while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] + in vdev_trim_range()
522 vd->vdev_trim_inflight[2] >= zfs_trim_queue_limit) { in vdev_trim_range()
523 cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); in vdev_trim_range()
525 vd->vdev_trim_inflight[ta->trim_type]++; in vdev_trim_range()
526 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_range()
528 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); in vdev_trim_range()
533 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_range()
535 if (ta->trim_type == TRIM_TYPE_MANUAL && in vdev_trim_range()
536 vd->vdev_trim_offset[txg & TXG_MASK] == 0) { in vdev_trim_range()
538 *guid = vd->vdev_guid; in vdev_trim_range()
549 if ((ta->trim_type == TRIM_TYPE_MANUAL && in vdev_trim_range()
551 (ta->trim_type == TRIM_TYPE_AUTO && in vdev_trim_range()
552 vdev_autotrim_should_stop(vd->vdev_top))) { in vdev_trim_range()
553 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_range()
554 vd->vdev_trim_inflight[ta->trim_type]--; in vdev_trim_range()
555 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_range()
556 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); in vdev_trim_range()
557 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_range()
561 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_range()
563 if (ta->trim_type == TRIM_TYPE_MANUAL) in vdev_trim_range()
564 vd->vdev_trim_offset[txg & TXG_MASK] = start + size; in vdev_trim_range()
566 if (ta->trim_type == TRIM_TYPE_MANUAL) { in vdev_trim_range()
568 } else if (ta->trim_type == TRIM_TYPE_AUTO) { in vdev_trim_range()
574 zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd, in vdev_trim_range()
576 ta->trim_flags)); in vdev_trim_range()
585 * Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
586 * Additional parameters describing how the TRIM should be performed must
593 vdev_t *vd = ta->trim_vdev; in vdev_trim_ranges()
594 zfs_btree_t *t = &ta->trim_tree->rt_root; in vdev_trim_ranges()
596 uint64_t extent_bytes_max = ta->trim_extent_bytes_max; in vdev_trim_ranges()
597 uint64_t extent_bytes_min = ta->trim_extent_bytes_min; in vdev_trim_ranges()
598 spa_t *spa = vd->vdev_spa; in vdev_trim_ranges()
601 ta->trim_start_time = gethrtime(); in vdev_trim_ranges()
602 ta->trim_bytes_done = 0; in vdev_trim_ranges()
606 uint64_t size = zfs_rs_get_end(rs, ta->trim_tree) - in vdev_trim_ranges()
607 zfs_rs_get_start(rs, ta->trim_tree); in vdev_trim_ranges()
610 spa_iostats_trim_add(spa, ta->trim_type, in vdev_trim_ranges()
615 /* Split range into legally-sized physical chunks */ in vdev_trim_ranges()
616 uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1; in vdev_trim_ranges()
620 zfs_rs_get_start(rs, ta->trim_tree) + in vdev_trim_ranges()
621 (w *extent_bytes_max), MIN(size - in vdev_trim_ranges()
632 * returning. TRIM zios have lower priority over regular or syncing in vdev_trim_ranges()
633 * zios, so all TRIM zios for this metaslab must complete before the in vdev_trim_ranges()
634 * metaslab is re-enabled. Otherwise it's possible write zios to in vdev_trim_ranges()
635 * this metaslab could cut ahead of still queued TRIM zios for this in vdev_trim_ranges()
638 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_ranges()
639 while (vd->vdev_trim_inflight[0] > 0) { in vdev_trim_ranges()
640 cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); in vdev_trim_ranges()
642 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_ranges()
652 if (physical_rs->rs_end > *last_rs_end) in vdev_trim_xlate_last_rs_end()
653 *last_rs_end = physical_rs->rs_end; in vdev_trim_xlate_last_rs_end()
661 uint64_t size = physical_rs->rs_end - physical_rs->rs_start; in vdev_trim_xlate_progress()
662 vd->vdev_trim_bytes_est += size; in vdev_trim_xlate_progress()
664 if (vd->vdev_trim_last_offset >= physical_rs->rs_end) { in vdev_trim_xlate_progress()
665 vd->vdev_trim_bytes_done += size; in vdev_trim_xlate_progress()
666 } else if (vd->vdev_trim_last_offset > physical_rs->rs_start && in vdev_trim_xlate_progress()
667 vd->vdev_trim_last_offset <= physical_rs->rs_end) { in vdev_trim_xlate_progress()
668 vd->vdev_trim_bytes_done += in vdev_trim_xlate_progress()
669 vd->vdev_trim_last_offset - physical_rs->rs_start; in vdev_trim_xlate_progress()
674 * Calculates the completion percentage of a manual TRIM.
679 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || in vdev_trim_calculate_progress()
680 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); in vdev_trim_calculate_progress()
681 ASSERT(vd->vdev_leaf_zap != 0); in vdev_trim_calculate_progress()
683 vd->vdev_trim_bytes_est = 0; in vdev_trim_calculate_progress()
684 vd->vdev_trim_bytes_done = 0; in vdev_trim_calculate_progress()
686 for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) { in vdev_trim_calculate_progress()
687 metaslab_t *msp = vd->vdev_top->vdev_ms[i]; in vdev_trim_calculate_progress()
688 mutex_enter(&msp->ms_lock); in vdev_trim_calculate_progress()
690 uint64_t ms_free = (msp->ms_size - in vdev_trim_calculate_progress()
692 vdev_get_ndisks(vd->vdev_top); in vdev_trim_calculate_progress()
700 logical_rs.rs_start = msp->ms_start; in vdev_trim_calculate_progress()
701 logical_rs.rs_end = msp->ms_start + msp->ms_size; in vdev_trim_calculate_progress()
705 if (vd->vdev_trim_last_offset <= physical_rs.rs_start) { in vdev_trim_calculate_progress()
706 vd->vdev_trim_bytes_est += ms_free; in vdev_trim_calculate_progress()
707 mutex_exit(&msp->ms_lock); in vdev_trim_calculate_progress()
718 if (vd->vdev_trim_last_offset > last_rs_end) { in vdev_trim_calculate_progress()
719 vd->vdev_trim_bytes_done += ms_free; in vdev_trim_calculate_progress()
720 vd->vdev_trim_bytes_est += ms_free; in vdev_trim_calculate_progress()
721 mutex_exit(&msp->ms_lock); in vdev_trim_calculate_progress()
732 zfs_range_tree_t *rt = msp->ms_allocatable; in vdev_trim_calculate_progress()
733 zfs_btree_t *bt = &rt->rt_root; in vdev_trim_calculate_progress()
743 mutex_exit(&msp->ms_lock); in vdev_trim_calculate_progress()
748 * Load from disk the vdev's manual TRIM information. This includes the
749 * state, progress, and options provided when initiating the manual TRIM.
755 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) || in vdev_trim_load()
756 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER)); in vdev_trim_load()
757 ASSERT(vd->vdev_leaf_zap != 0); in vdev_trim_load()
759 if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE || in vdev_trim_load()
760 vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) { in vdev_trim_load()
761 err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_load()
762 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET, in vdev_trim_load()
763 sizeof (vd->vdev_trim_last_offset), 1, in vdev_trim_load()
764 &vd->vdev_trim_last_offset); in vdev_trim_load()
766 vd->vdev_trim_last_offset = 0; in vdev_trim_load()
771 err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_load()
772 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE, in vdev_trim_load()
773 sizeof (vd->vdev_trim_rate), 1, in vdev_trim_load()
774 &vd->vdev_trim_rate); in vdev_trim_load()
776 vd->vdev_trim_rate = 0; in vdev_trim_load()
782 err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_load()
783 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL, in vdev_trim_load()
784 sizeof (vd->vdev_trim_partial), 1, in vdev_trim_load()
785 &vd->vdev_trim_partial); in vdev_trim_load()
787 vd->vdev_trim_partial = 0; in vdev_trim_load()
793 err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_load()
794 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE, in vdev_trim_load()
795 sizeof (vd->vdev_trim_secure), 1, in vdev_trim_load()
796 &vd->vdev_trim_secure); in vdev_trim_load()
798 vd->vdev_trim_secure = 0; in vdev_trim_load()
813 vdev_t *vd = ta->trim_vdev; in vdev_trim_xlate_range_add()
816 * Only a manual trim will be traversing the vdev sequentially. in vdev_trim_xlate_range_add()
817 * For an auto trim all valid ranges should be added. in vdev_trim_xlate_range_add()
819 if (ta->trim_type == TRIM_TYPE_MANUAL) { in vdev_trim_xlate_range_add()
822 if (physical_rs->rs_end <= vd->vdev_trim_last_offset) in vdev_trim_xlate_range_add()
825 /* Pick up where we left off mid-range. */ in vdev_trim_xlate_range_add()
826 if (vd->vdev_trim_last_offset > physical_rs->rs_start) { in vdev_trim_xlate_range_add()
827 ASSERT3U(physical_rs->rs_end, >, in vdev_trim_xlate_range_add()
828 vd->vdev_trim_last_offset); in vdev_trim_xlate_range_add()
829 physical_rs->rs_start = vd->vdev_trim_last_offset; in vdev_trim_xlate_range_add()
833 ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start); in vdev_trim_xlate_range_add()
835 zfs_range_tree_add(ta->trim_tree, physical_rs->rs_start, in vdev_trim_xlate_range_add()
836 physical_rs->rs_end - physical_rs->rs_start); in vdev_trim_xlate_range_add()
847 vdev_t *vd = ta->trim_vdev; in vdev_trim_range_add()
858 metaslab_t *msp = ta->trim_msp; in vdev_trim_range_add()
860 VERIFY3B(msp->ms_loaded, ==, B_TRUE); in vdev_trim_range_add()
861 VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start, in vdev_trim_range_add()
865 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_trim_range_add()
870 * Each manual TRIM thread is responsible for trimming the unallocated
872 * over its top-level metaslabs and issuing TRIM I/O for the space described
880 spa_t *spa = vd->vdev_spa; in vdev_trim_thread()
889 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); in vdev_trim_thread()
894 vd->vdev_trim_last_offset = 0; in vdev_trim_thread()
895 vd->vdev_trim_rate = 0; in vdev_trim_thread()
896 vd->vdev_trim_partial = 0; in vdev_trim_thread()
897 vd->vdev_trim_secure = 0; in vdev_trim_thread()
909 * When a secure TRIM has been requested infer that the intent in vdev_trim_thread()
910 * is that everything must be trimmed. Override the default in vdev_trim_thread()
911 * minimum TRIM size to prevent ranges from being skipped. in vdev_trim_thread()
913 if (vd->vdev_trim_secure) { in vdev_trim_thread()
919 for (uint64_t i = 0; !vd->vdev_detached && in vdev_trim_thread()
920 i < vd->vdev_top->vdev_ms_count; i++) { in vdev_trim_thread()
921 metaslab_t *msp = vd->vdev_top->vdev_ms[i]; in vdev_trim_thread()
924 * If we've expanded the top-level vdev or it's our in vdev_trim_thread()
927 if (vd->vdev_top->vdev_ms_count != ms_count) { in vdev_trim_thread()
929 ms_count = vd->vdev_top->vdev_ms_count; in vdev_trim_thread()
934 mutex_enter(&msp->ms_lock); in vdev_trim_thread()
938 * If a partial TRIM was requested skip metaslabs which have in vdev_trim_thread()
941 if (msp->ms_sm == NULL && vd->vdev_trim_partial) { in vdev_trim_thread()
942 mutex_exit(&msp->ms_lock); in vdev_trim_thread()
950 zfs_range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, in vdev_trim_thread()
952 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL); in vdev_trim_thread()
953 mutex_exit(&msp->ms_lock); in vdev_trim_thread()
968 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_thread()
969 if (!vd->vdev_trim_exit_wanted) { in vdev_trim_thread()
972 vd->vdev_trim_rate, vd->vdev_trim_partial, in vdev_trim_thread()
973 vd->vdev_trim_secure); in vdev_trim_thread()
974 } else if (vd->vdev_faulted) { in vdev_trim_thread()
976 vd->vdev_trim_rate, vd->vdev_trim_partial, in vdev_trim_thread()
977 vd->vdev_trim_secure); in vdev_trim_thread()
980 ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0); in vdev_trim_thread()
985 * check to see if it needs to restart a trim. That thread will be in vdev_trim_thread()
989 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_thread()
991 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_thread()
993 vd->vdev_trim_thread = NULL; in vdev_trim_thread()
994 cv_broadcast(&vd->vdev_trim_cv); in vdev_trim_thread()
995 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_thread()
1001 * Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
1007 ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); in vdev_trim()
1008 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_trim()
1010 ASSERT3P(vd->vdev_trim_thread, ==, NULL); in vdev_trim()
1011 ASSERT(!vd->vdev_detached); in vdev_trim()
1012 ASSERT(!vd->vdev_trim_exit_wanted); in vdev_trim()
1013 ASSERT(!vd->vdev_top->vdev_removing); in vdev_trim()
1014 ASSERT(!vd->vdev_rz_expanding); in vdev_trim()
1017 vd->vdev_trim_thread = thread_create(NULL, 0, in vdev_trim()
1027 ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); in vdev_trim_stop_wait_impl()
1029 while (vd->vdev_trim_thread != NULL) in vdev_trim_stop_wait_impl()
1030 cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock); in vdev_trim_stop_wait_impl()
1032 ASSERT3P(vd->vdev_trim_thread, ==, NULL); in vdev_trim_stop_wait_impl()
1033 vd->vdev_trim_exit_wanted = B_FALSE; in vdev_trim_stop_wait_impl()
1037 * Wait for vdev trim threads which were listed to cleanly exit.
1046 spa->spa_export_thread == curthread); in vdev_trim_stop_wait()
1049 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_stop_wait()
1051 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_stop_wait()
1059 * required to call vdev_trim_stop_wait() to block for all the trim threads
1067 ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER)); in vdev_trim_stop()
1068 ASSERT(MUTEX_HELD(&vd->vdev_trim_lock)); in vdev_trim_stop()
1069 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_trim_stop()
1073 * Allow cancel requests to proceed even if the trim thread has in vdev_trim_stop()
1076 if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED) in vdev_trim_stop()
1080 vd->vdev_trim_exit_wanted = B_TRUE; in vdev_trim_stop()
1086 vd->vdev_spa->spa_export_thread == curthread); in vdev_trim_stop()
1098 if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) { in vdev_trim_stop_all_impl()
1099 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_stop_all_impl()
1101 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_stop_all_impl()
1105 for (uint64_t i = 0; i < vd->vdev_children; i++) { in vdev_trim_stop_all_impl()
1106 vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state, in vdev_trim_stop_all_impl()
1112 * Convenience function to stop trimming of a vdev tree and set all trim
1118 spa_t *spa = vd->vdev_spa; in vdev_trim_stop_all()
1123 spa->spa_export_thread == curthread); in vdev_trim_stop_all()
1135 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in vdev_trim_stop_all()
1136 vd_l2cache = spa->spa_l2cache.sav_vdevs[i]; in vdev_trim_stop_all()
1142 if (vd->vdev_spa->spa_sync_on) { in vdev_trim_stop_all()
1144 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); in vdev_trim_stop_all()
1151 * Conditionally restarts a manual TRIM given its on-disk state.
1157 vd->vdev_spa->spa_load_thread == curthread); in vdev_trim_restart()
1158 ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); in vdev_trim_restart()
1160 if (vd->vdev_leaf_zap != 0) { in vdev_trim_restart()
1161 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_restart()
1163 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_restart()
1164 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE, in vdev_trim_restart()
1167 vd->vdev_trim_state = trim_state; in vdev_trim_restart()
1170 err = zap_lookup(vd->vdev_spa->spa_meta_objset, in vdev_trim_restart()
1171 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME, in vdev_trim_restart()
1174 vd->vdev_trim_action_time = timestamp; in vdev_trim_restart()
1176 if ((vd->vdev_trim_state == VDEV_TRIM_SUSPENDED || in vdev_trim_restart()
1177 vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) { in vdev_trim_restart()
1180 } else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE && in vdev_trim_restart()
1181 vdev_writeable(vd) && !vd->vdev_top->vdev_removing && in vdev_trim_restart()
1182 !vd->vdev_top->vdev_rz_expanding && in vdev_trim_restart()
1183 vd->vdev_trim_thread == NULL) { in vdev_trim_restart()
1185 vdev_trim(vd, vd->vdev_trim_rate, in vdev_trim_restart()
1186 vd->vdev_trim_partial, vd->vdev_trim_secure); in vdev_trim_restart()
1189 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_restart()
1192 for (uint64_t i = 0; i < vd->vdev_children; i++) { in vdev_trim_restart()
1193 vdev_trim_restart(vd->vdev_child[i]); in vdev_trim_restart()
1198 * Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
1199 * every TRIM range is contained within ms_allocatable.
1205 metaslab_t *msp = ta->trim_msp; in vdev_trim_range_verify()
1207 VERIFY3B(msp->ms_loaded, ==, B_TRUE); in vdev_trim_range_verify()
1208 VERIFY3U(msp->ms_disabled, >, 0); in vdev_trim_range_verify()
1209 VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start, size)); in vdev_trim_range_verify()
1213 * Each automatic TRIM thread is responsible for managing the trimming of a
1214 * top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
1216 * N.B. This behavior is different from a manual TRIM where a thread
1217 * is created for each leaf vdev, instead of each top-level vdev.
1223 spa_t *spa = vd->vdev_spa; in vdev_autotrim_thread()
1226 mutex_enter(&vd->vdev_autotrim_lock); in vdev_autotrim_thread()
1227 ASSERT3P(vd->vdev_top, ==, vd); in vdev_autotrim_thread()
1228 ASSERT3P(vd->vdev_autotrim_thread, !=, NULL); in vdev_autotrim_thread()
1229 mutex_exit(&vd->vdev_autotrim_lock); in vdev_autotrim_thread()
1243 * For example, when zfs_trim_txg_batch = 32 (default) then in vdev_autotrim_thread()
1259 * 2) Selecting non-consecutive metaslabs distributes the in vdev_autotrim_thread()
1260 * TRIM commands for a group evenly over the entire device. in vdev_autotrim_thread()
1263 for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count; in vdev_autotrim_thread()
1265 metaslab_t *msp = vd->vdev_ms[i]; in vdev_autotrim_thread()
1274 mutex_enter(&msp->ms_lock); in vdev_autotrim_thread()
1278 * or when there are no recent frees to trim. in vdev_autotrim_thread()
1280 if (msp->ms_sm == NULL || in vdev_autotrim_thread()
1281 zfs_range_tree_is_empty(msp->ms_trim)) { in vdev_autotrim_thread()
1282 mutex_exit(&msp->ms_lock); in vdev_autotrim_thread()
1289 * This may happen when a manual TRIM or initialize in vdev_autotrim_thread()
1291 * of a manual TRIM, the ms_trim tree will have been in vdev_autotrim_thread()
1292 * vacated. Only ranges added after the manual TRIM in vdev_autotrim_thread()
1294 * These will be processed when the automatic TRIM in vdev_autotrim_thread()
1297 if (msp->ms_disabled > 1) { in vdev_autotrim_thread()
1298 mutex_exit(&msp->ms_lock); in vdev_autotrim_thread()
1309 zfs_range_tree_swap(&msp->ms_trim, &trim_tree); in vdev_autotrim_thread()
1310 ASSERT(zfs_range_tree_is_empty(msp->ms_trim)); in vdev_autotrim_thread()
1313 * There are two cases when constructing the per-vdev in vdev_autotrim_thread()
1314 * trim trees for a metaslab. If the top-level vdev in vdev_autotrim_thread()
1317 * and a trim tree should be constructed for each. in vdev_autotrim_thread()
1320 uint64_t children = vd->vdev_children; in vdev_autotrim_thread()
1331 tap[c].trim_vdev = vd->vdev_child[c]; in vdev_autotrim_thread()
1337 vdev_t *cvd = ta->trim_vdev; in vdev_autotrim_thread()
1339 ta->trim_msp = msp; in vdev_autotrim_thread()
1340 ta->trim_extent_bytes_max = extent_bytes_max; in vdev_autotrim_thread()
1341 ta->trim_extent_bytes_min = extent_bytes_min; in vdev_autotrim_thread()
1342 ta->trim_type = TRIM_TYPE_AUTO; in vdev_autotrim_thread()
1343 ta->trim_flags = 0; in vdev_autotrim_thread()
1345 if (cvd->vdev_detached || in vdev_autotrim_thread()
1347 !cvd->vdev_has_trim || in vdev_autotrim_thread()
1348 cvd->vdev_trim_thread != NULL) { in vdev_autotrim_thread()
1359 if (!cvd->vdev_ops->vdev_op_leaf) in vdev_autotrim_thread()
1362 ta->trim_tree = zfs_range_tree_create(NULL, in vdev_autotrim_thread()
1368 mutex_exit(&msp->ms_lock); in vdev_autotrim_thread()
1372 * Issue the TRIM I/Os for all ranges covered by the in vdev_autotrim_thread()
1373 * TRIM trees. These ranges are safe to TRIM because in vdev_autotrim_thread()
1381 * Always yield to a manual TRIM if one has in vdev_autotrim_thread()
1384 if (ta->trim_tree == NULL || in vdev_autotrim_thread()
1385 ta->trim_vdev->vdev_trim_thread != NULL) { in vdev_autotrim_thread()
1394 * of the required TRIM I/Os. in vdev_autotrim_thread()
1408 mutex_enter(&msp->ms_lock); in vdev_autotrim_thread()
1413 mutex_exit(&msp->ms_lock); in vdev_autotrim_thread()
1420 * Wait for couples of kicks, to ensure the trim io is in vdev_autotrim_thread()
1436 if (ta->trim_tree == NULL) in vdev_autotrim_thread()
1439 zfs_range_tree_vacate(ta->trim_tree, NULL, in vdev_autotrim_thread()
1441 zfs_range_tree_destroy(ta->trim_tree); in vdev_autotrim_thread()
1458 for (uint64_t c = 0; c < vd->vdev_children; c++) { in vdev_autotrim_thread()
1459 vdev_t *cvd = vd->vdev_child[c]; in vdev_autotrim_thread()
1460 mutex_enter(&cvd->vdev_trim_io_lock); in vdev_autotrim_thread()
1462 while (cvd->vdev_trim_inflight[1] > 0) { in vdev_autotrim_thread()
1463 cv_wait(&cvd->vdev_trim_io_cv, in vdev_autotrim_thread()
1464 &cvd->vdev_trim_io_lock); in vdev_autotrim_thread()
1466 mutex_exit(&cvd->vdev_trim_io_lock); in vdev_autotrim_thread()
1476 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { in vdev_autotrim_thread()
1477 metaslab_t *msp = vd->vdev_ms[i]; in vdev_autotrim_thread()
1479 mutex_enter(&msp->ms_lock); in vdev_autotrim_thread()
1480 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL); in vdev_autotrim_thread()
1481 mutex_exit(&msp->ms_lock); in vdev_autotrim_thread()
1485 mutex_enter(&vd->vdev_autotrim_lock); in vdev_autotrim_thread()
1486 ASSERT(vd->vdev_autotrim_thread != NULL); in vdev_autotrim_thread()
1487 vd->vdev_autotrim_thread = NULL; in vdev_autotrim_thread()
1488 cv_broadcast(&vd->vdev_autotrim_cv); in vdev_autotrim_thread()
1489 mutex_exit(&vd->vdev_autotrim_lock); in vdev_autotrim_thread()
1495 * Starts an autotrim thread, if needed, for each top-level vdev which can be
1496 * trimmed. A top-level vdev which has been evacuated will never be trimmed.
1501 vdev_t *root_vd = spa->spa_root_vdev; in vdev_autotrim()
1503 for (uint64_t i = 0; i < root_vd->vdev_children; i++) { in vdev_autotrim()
1504 vdev_t *tvd = root_vd->vdev_child[i]; in vdev_autotrim()
1506 mutex_enter(&tvd->vdev_autotrim_lock); in vdev_autotrim()
1507 if (vdev_writeable(tvd) && !tvd->vdev_removing && in vdev_autotrim()
1508 tvd->vdev_autotrim_thread == NULL && in vdev_autotrim()
1509 !tvd->vdev_rz_expanding) { in vdev_autotrim()
1510 ASSERT3P(tvd->vdev_top, ==, tvd); in vdev_autotrim()
1512 tvd->vdev_autotrim_thread = thread_create(NULL, 0, in vdev_autotrim()
1515 ASSERT(tvd->vdev_autotrim_thread != NULL); in vdev_autotrim()
1517 mutex_exit(&tvd->vdev_autotrim_lock); in vdev_autotrim()
1522 * Wait for the vdev_autotrim_thread associated with the passed top-level
1528 mutex_enter(&tvd->vdev_autotrim_lock); in vdev_autotrim_stop_wait()
1529 if (tvd->vdev_autotrim_thread != NULL) { in vdev_autotrim_stop_wait()
1530 tvd->vdev_autotrim_exit_wanted = B_TRUE; in vdev_autotrim_stop_wait()
1531 cv_broadcast(&tvd->vdev_autotrim_kick_cv); in vdev_autotrim_stop_wait()
1532 cv_wait(&tvd->vdev_autotrim_cv, in vdev_autotrim_stop_wait()
1533 &tvd->vdev_autotrim_lock); in vdev_autotrim_stop_wait()
1535 ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL); in vdev_autotrim_stop_wait()
1536 tvd->vdev_autotrim_exit_wanted = B_FALSE; in vdev_autotrim_stop_wait()
1538 mutex_exit(&tvd->vdev_autotrim_lock); in vdev_autotrim_stop_wait()
1546 vdev_t *root_vd = spa->spa_root_vdev; in vdev_autotrim_kick()
1549 for (uint64_t i = 0; i < root_vd->vdev_children; i++) { in vdev_autotrim_kick()
1550 tvd = root_vd->vdev_child[i]; in vdev_autotrim_kick()
1552 mutex_enter(&tvd->vdev_autotrim_lock); in vdev_autotrim_kick()
1553 if (tvd->vdev_autotrim_thread != NULL) in vdev_autotrim_kick()
1554 cv_broadcast(&tvd->vdev_autotrim_kick_cv); in vdev_autotrim_kick()
1555 mutex_exit(&tvd->vdev_autotrim_lock); in vdev_autotrim_kick()
1566 vdev_t *root_vd = spa->spa_root_vdev; in vdev_autotrim_stop_all()
1568 for (uint64_t i = 0; i < root_vd->vdev_children; i++) in vdev_autotrim_stop_all()
1569 vdev_autotrim_stop_wait(root_vd->vdev_child[i]); in vdev_autotrim_stop_all()
1579 spa->spa_load_thread == curthread); in vdev_autotrim_restart()
1580 if (spa->spa_autotrim) in vdev_autotrim_restart()
1588 spa_t *spa = vd->vdev_spa; in vdev_trim_l2arc_thread()
1596 vd->vdev_trim_last_offset = 0; in vdev_trim_l2arc_thread()
1597 vd->vdev_trim_rate = 0; in vdev_trim_l2arc_thread()
1598 vd->vdev_trim_partial = 0; in vdev_trim_l2arc_thread()
1599 vd->vdev_trim_secure = 0; in vdev_trim_l2arc_thread()
1608 physical_rs.rs_start = vd->vdev_trim_bytes_done = 0; in vdev_trim_l2arc_thread()
1609 physical_rs.rs_end = vd->vdev_trim_bytes_est = in vdev_trim_l2arc_thread()
1613 physical_rs.rs_end - physical_rs.rs_start); in vdev_trim_l2arc_thread()
1615 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1617 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1622 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_l2arc_thread()
1623 while (vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] > 0) { in vdev_trim_l2arc_thread()
1624 cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); in vdev_trim_l2arc_thread()
1626 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_l2arc_thread()
1631 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1632 if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) { in vdev_trim_l2arc_thread()
1634 vd->vdev_trim_rate, vd->vdev_trim_partial, in vdev_trim_l2arc_thread()
1635 vd->vdev_trim_secure); in vdev_trim_l2arc_thread()
1637 ASSERT(vd->vdev_trim_thread != NULL || in vdev_trim_l2arc_thread()
1638 vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] == 0); in vdev_trim_l2arc_thread()
1643 * must check to see if it needs to restart a trim. That thread in vdev_trim_l2arc_thread()
1648 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1649 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0); in vdev_trim_l2arc_thread()
1650 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1658 spa_config_enter(vd->vdev_spa, SCL_L2ARC, vd, in vdev_trim_l2arc_thread()
1660 memset(dev->l2ad_dev_hdr, 0, dev->l2ad_dev_hdr_asize); in vdev_trim_l2arc_thread()
1662 spa_config_exit(vd->vdev_spa, SCL_L2ARC, vd); in vdev_trim_l2arc_thread()
1664 vd->vdev_trim_thread = NULL; in vdev_trim_l2arc_thread()
1665 if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE) in vdev_trim_l2arc_thread()
1666 dev->l2ad_trim_all = B_FALSE; in vdev_trim_l2arc_thread()
1668 cv_broadcast(&vd->vdev_trim_cv); in vdev_trim_l2arc_thread()
1669 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_l2arc_thread()
1675 * Punches out TRIM threads for the L2ARC devices in a spa and assigns them
1676 * to vd->vdev_trim_thread variable. This facilitates the management of
1686 * Locate the spa's l2arc devices and kick off TRIM threads. in vdev_trim_l2arc()
1688 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in vdev_trim_l2arc()
1689 vdev_t *vd = spa->spa_l2cache.sav_vdevs[i]; in vdev_trim_l2arc()
1692 if (dev == NULL || !dev->l2ad_trim_all) { in vdev_trim_l2arc()
1694 * Don't attempt TRIM if the vdev is UNAVAIL or if the in vdev_trim_l2arc()
1695 * cache device was not marked for whole device TRIM in vdev_trim_l2arc()
1703 mutex_enter(&vd->vdev_trim_lock); in vdev_trim_l2arc()
1704 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_trim_l2arc()
1706 ASSERT3P(vd->vdev_trim_thread, ==, NULL); in vdev_trim_l2arc()
1707 ASSERT(!vd->vdev_detached); in vdev_trim_l2arc()
1708 ASSERT(!vd->vdev_trim_exit_wanted); in vdev_trim_l2arc()
1709 ASSERT(!vd->vdev_top->vdev_removing); in vdev_trim_l2arc()
1711 vd->vdev_trim_thread = thread_create(NULL, 0, in vdev_trim_l2arc()
1713 mutex_exit(&vd->vdev_trim_lock); in vdev_trim_l2arc()
1731 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_trim_simple()
1732 ASSERT(!vd->vdev_detached); in vdev_trim_simple()
1733 ASSERT(!vd->vdev_top->vdev_removing); in vdev_trim_simple()
1734 ASSERT(!vd->vdev_top->vdev_rz_expanding); in vdev_trim_simple()
1747 physical_rs.rs_end - physical_rs.rs_start); in vdev_trim_simple()
1754 mutex_enter(&vd->vdev_trim_io_lock); in vdev_trim_simple()
1755 while (vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE] > 0) { in vdev_trim_simple()
1756 cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock); in vdev_trim_simple()
1758 mutex_exit(&vd->vdev_trim_io_lock); in vdev_trim_simple()
1779 "Max size of TRIM commands, larger will be split");
1782 "Min size of TRIM commands, smaller will be skipped");
1788 "Min number of txgs to aggregate frees before issuing TRIM");