Lines Matching +full:heart +full:- +full:rate

9  * or https://opensource.org/licenses/CDDL-1.0.
39 * DVA-based Adjustable Replacement Cache
42 * based on the self-tuning, low overhead replacement cache
51 * subset of the blocks in the cache are un-evictable because we
79 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
123 * - L2ARC buflist creation
124 * - L2ARC buflist eviction
125 * - L2ARC write completion, which walks L2ARC buflists
126 * - ARC header destruction, as it removes from L2ARC buflists
127 * - ARC header release, as it removes from L2ARC buflists
139 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
146 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
151 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
152 * it will match its on-disk compression characteristics. This behavior can be
155 * uncompressed version of the on-disk data.
181 * +-----------+
184 * | L1- and |
186 * +-----------+
189 * +-----------+
192 * | b_buf +------------>+-----------+ arc_buf_t
193 * | b_pabd +-+ |b_next +---->+-----------+
194 * +-----------+ | |-----------| |b_next +-->NULL
195 * | |b_comp = T | +-----------+
196 * | |b_data +-+ |b_comp = F |
197 * | +-----------+ | |b_data +-+
198 * +->+------+ | +-----------+ |
200 * data | |<--------------+ | uncompressed
201 * +------+ compressed, | data
202 * shared +-->+------+
205 * +------+
223 * +-----------+
227 * +-----------+
230 * +-----------+
233 * | b_buf +------------>+---------+ arc_buf_t
234 * | | |b_next +---->+---------+
235 * | b_pabd +-+ |---------| |b_next +-->NULL
236 * +-----------+ | | | +---------+
237 * | |b_data +-+ | |
238 * | +---------+ | |b_data +-+
239 * +->+------+ | +---------+ |
242 * data +------+ | |
243 * ^ +->+------+ |
246 * | +------+ |
247 * +---------------------------------+
253 * with the transformed data and will memcpy the transformed on-disk block into
263 * to the on-disk block in the main data pool. This provides a significant
273 * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
279 * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
391 * when reading a new block into the ARC, we will evict an equal-sized block
446 * data hits on target data/metadata rate.
456 * These tunables are Linux-specific
674 x = x - x / ARCSTAT_F_AVG_FACTOR + \
682 * There are several ARC variables that are critical to export as kstats --
703 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
704 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
705 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
706 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
708 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
710 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
712 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
713 #define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
715 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
716 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
717 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
718 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
719 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
720 #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
721 #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
722 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
725 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
728 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
729 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
732 (hdr)->b_crypt_hdr.b_rabd != NULL)
734 (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
736 (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
739 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
741 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
743 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
746 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
747 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
748 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
749 #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
773 #define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
775 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1011 return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth)); in buf_hash()
1015 ((hdr)->b_dva.dva_word[0] == 0 && \
1016 (hdr)->b_dva.dva_word[1] == 0)
1022 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1023 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1024 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1029 hdr->b_dva.dva_word[0] = 0; in buf_discard_identity()
1030 hdr->b_dva.dva_word[1] = 0; in buf_discard_identity()
1031 hdr->b_birth = 0; in buf_discard_identity()
1045 hdr = hdr->b_hash_next) { in buf_hash_find()
1066 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); in buf_hash_insert()
1071 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); in buf_hash_insert()
1072 ASSERT(hdr->b_birth != 0); in buf_hash_insert()
1083 fhdr = fhdr->b_hash_next, i++) { in buf_hash_insert()
1084 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) in buf_hash_insert()
1088 hdr->b_hash_next = buf_hash_table.ht_table[idx]; in buf_hash_insert()
1108 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); in buf_hash_remove()
1116 hdrp = &fhdr->b_hash_next; in buf_hash_remove()
1118 *hdrp = hdr->b_hash_next; in buf_hash_remove()
1119 hdr->b_hash_next = NULL; in buf_hash_remove()
1125 buf_hash_table.ht_table[idx]->b_hash_next == NULL) in buf_hash_remove()
1159 * Constructor callback - called when the cache is empty
1169 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in hdr_full_cons()
1170 zfs_refcount_create(&hdr->b_l1hdr.b_refcnt); in hdr_full_cons()
1172 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); in hdr_full_cons()
1174 multilist_link_init(&hdr->b_l1hdr.b_arc_node); in hdr_full_cons()
1175 list_link_init(&hdr->b_l2hdr.b_l2node); in hdr_full_cons()
1206 * Destructor callback - called when a cached buf is
1216 zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt); in hdr_full_dest()
1218 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); in hdr_full_dest()
1220 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in hdr_full_dest()
1254 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). in buf_init()
1259 buf_hash_table.ht_mask = hsize - 1; in buf_init()
1286 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) in buf_init()
1287 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); in buf_init()
1304 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); in arc_buf_size()
1310 return (HDR_GET_LSIZE(buf->b_hdr)); in arc_buf_lsize()
1330 return (HDR_NOAUTH(buf->b_hdr) != 0); in arc_is_unauthenticated()
1337 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_get_raw_params()
1341 memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); in arc_get_raw_params()
1342 memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); in arc_get_raw_params()
1343 memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); in arc_get_raw_params()
1344 *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? in arc_get_raw_params()
1357 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); in arc_get_compression()
1363 * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
1375 return (buf->b_hdr->b_complevel); in arc_get_complevel()
1381 boolean_t shared = (buf->b_data != NULL && in arc_buf_is_shared()
1382 buf->b_hdr->b_l1hdr.b_pabd != NULL && in arc_buf_is_shared()
1383 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && in arc_buf_is_shared()
1384 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); in arc_buf_is_shared()
1385 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); in arc_buf_is_shared()
1399 * is a no-op.
1407 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_free()
1408 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { in arc_cksum_free()
1409 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); in arc_cksum_free()
1410 hdr->b_l1hdr.b_freeze_cksum = NULL; in arc_cksum_free()
1412 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_free()
1423 ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr)); in arc_hdr_has_uncompressed_buf()
1425 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { in arc_hdr_has_uncompressed_buf()
1437 * or if the buf is compressed, this is a no-op.
1443 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_cksum_verify()
1454 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1456 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { in arc_cksum_verify()
1457 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1461 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); in arc_cksum_verify()
1462 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) in arc_cksum_verify()
1464 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1476 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); in arc_cksum_is_equal()
1477 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); in arc_cksum_is_equal()
1492 return (zio_checksum_error_impl(zio->io_spa, zio->io_bp, in arc_cksum_is_equal()
1493 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, in arc_cksum_is_equal()
1494 zio->io_offset, NULL) == 0); in arc_cksum_is_equal()
1501 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1510 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_cksum_compute()
1512 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1513 if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) { in arc_cksum_compute()
1514 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1520 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), in arc_cksum_compute()
1522 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, in arc_cksum_compute()
1523 hdr->b_l1hdr.b_freeze_cksum); in arc_cksum_compute()
1524 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1534 panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr); in arc_buf_sigsegv()
1543 ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), in arc_buf_unwatch()
1556 ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), in arc_buf_watch()
1572 VERIFY3U(hdr->b_type, ==, type); in arc_buf_type()
1579 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); in arc_is_metadata()
1595 return ((uint32_t)-1); in arc_bufc_to_flags()
1601 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_thaw()
1603 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_buf_thaw()
1628 ASSERT(HDR_HAS_L1HDR(buf->b_hdr)); in arc_buf_freeze()
1635 * updated in a thread-safe way. When manipulating the flags either
1644 hdr->b_flags |= flags; in arc_hdr_set_flags()
1651 hdr->b_flags &= ~flags; in arc_hdr_clear_flags()
1659 * thread-safe manner.
1690 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_try_copy_decompressed_data()
1694 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_try_copy_decompressed_data()
1697 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; in arc_buf_try_copy_decompressed_data()
1698 from = from->b_next) { in arc_buf_try_copy_decompressed_data()
1705 memcpy(buf->b_data, from->b_data, arc_buf_size(buf)); in arc_buf_try_copy_decompressed_data()
1717 EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); in arc_buf_try_copy_decompressed_data()
1724 * Allocates an ARC buf header that's in an evicted & L2-cached state.
1726 * which circumvent the regular disk->arc->l2arc path and instead come
1727 * into being in the reverse order, i.e. l2arc->arc.
1738 ASSERT(dev->l2ad_vdev != NULL); in arc_buf_alloc_l2only()
1741 hdr->b_birth = birth; in arc_buf_alloc_l2only()
1742 hdr->b_type = type; in arc_buf_alloc_l2only()
1743 hdr->b_flags = 0; in arc_buf_alloc_l2only()
1749 hdr->b_complevel = complevel; in arc_buf_alloc_l2only()
1754 hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa); in arc_buf_alloc_l2only()
1756 hdr->b_dva = dva; in arc_buf_alloc_l2only()
1758 hdr->b_l2hdr.b_dev = dev; in arc_buf_alloc_l2only()
1759 hdr->b_l2hdr.b_daddr = daddr; in arc_buf_alloc_l2only()
1760 hdr->b_l2hdr.b_arcs_state = arcs_state; in arc_buf_alloc_l2only()
1790 abd_t *abd = hdr->b_l1hdr.b_pabd; in arc_hdr_authenticate()
1809 hdr->b_l1hdr.b_pabd, &abd, lsize, MIN(lsize, psize), in arc_hdr_authenticate()
1810 hdr->b_complevel); in arc_hdr_authenticate()
1816 abd_zero_off(abd, csize, psize - csize); in arc_hdr_authenticate()
1824 if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) { in arc_hdr_authenticate()
1828 psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in arc_hdr_authenticate()
1831 hdr->b_crypt_hdr.b_mac); in arc_hdr_authenticate()
1857 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in arc_hdr_decrypt()
1864 ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot, in arc_hdr_decrypt()
1865 B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv, in arc_hdr_decrypt()
1866 hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd, in arc_hdr_decrypt()
1867 hdr->b_crypt_hdr.b_rabd, &no_crypt); in arc_hdr_decrypt()
1872 abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, in arc_hdr_decrypt()
1892 hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr), in arc_hdr_decrypt()
1893 HDR_GET_LSIZE(hdr), &hdr->b_complevel); in arc_hdr_decrypt()
1898 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in arc_hdr_decrypt()
1900 hdr->b_l1hdr.b_pabd = cabd; in arc_hdr_decrypt()
1934 ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset); in arc_fill_hdr_crypt()
1937 } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) { in arc_fill_hdr_crypt()
1948 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_fill_hdr_crypt()
1971 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_untransform_in_place()
1974 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); in arc_buf_untransform_in_place()
1976 ASSERT3PF(hdr->b_l1hdr.b_pabd, !=, NULL, "hdr %px buf %px", hdr, buf); in arc_buf_untransform_in_place()
1978 zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data, in arc_buf_untransform_in_place()
1980 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_buf_untransform_in_place()
1981 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_buf_untransform_in_place()
1995 * the correct-sized data buffer.
2002 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_fill()
2007 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; in arc_buf_fill()
2010 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_fill()
2025 abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd, in arc_buf_fill()
2054 * be decrypted in-place. This is necessary because there may in arc_buf_fill()
2059 * arises for other types to be decrypted in-place, they must in arc_buf_fill()
2068 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); in arc_buf_fill()
2087 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, in arc_buf_fill()
2103 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; in arc_buf_fill()
2104 buf->b_data = in arc_buf_fill()
2114 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), in arc_buf_fill()
2116 buf->b_data = in arc_buf_fill()
2121 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); in arc_buf_fill()
2128 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_buf_fill()
2140 abd_get_from_buf_struct(&dabd, buf->b_data, in arc_buf_fill()
2143 hdr->b_l1hdr.b_pabd, &dabd, in arc_buf_fill()
2145 &hdr->b_complevel); in arc_buf_fill()
2172 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); in arc_buf_fill()
2204 spa_log_error(spa, zb, buf->b_hdr->b_birth); in arc_untransform()
2225 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evictable_space_increment()
2226 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_evictable_space_increment()
2228 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2233 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_evictable_space_increment()
2234 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2238 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2242 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_evictable_space_increment()
2243 buf = buf->b_next) { in arc_evictable_space_increment()
2246 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2264 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evictable_space_decrement()
2265 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_evictable_space_decrement()
2267 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2272 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_evictable_space_decrement()
2273 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2277 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2281 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_evictable_space_decrement()
2282 buf = buf->b_next) { in arc_evictable_space_decrement()
2285 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2299 arc_state_t *state = hdr->b_l1hdr.b_state; in add_reference()
2304 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in add_reference()
2305 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in add_reference()
2308 if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && in add_reference()
2310 /* We don't use the L2-only state list. */ in add_reference()
2311 multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr); in add_reference()
2325 arc_state_t *state = hdr->b_l1hdr.b_state; in remove_reference()
2331 if ((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) != 0) in remove_reference()
2343 multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr); in remove_reference()
2359 arc_buf_hdr_t *hdr = ab->b_hdr; in arc_buf_info()
2369 abi->abi_flags = hdr->b_flags; in arc_buf_info()
2372 l1hdr = &hdr->b_l1hdr; in arc_buf_info()
2373 state = l1hdr->b_state; in arc_buf_info()
2376 l2hdr = &hdr->b_l2hdr; in arc_buf_info()
2379 abi->abi_bufcnt = 0; in arc_buf_info()
2380 for (arc_buf_t *buf = l1hdr->b_buf; buf; buf = buf->b_next) in arc_buf_info()
2381 abi->abi_bufcnt++; in arc_buf_info()
2382 abi->abi_access = l1hdr->b_arc_access; in arc_buf_info()
2383 abi->abi_mru_hits = l1hdr->b_mru_hits; in arc_buf_info()
2384 abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits; in arc_buf_info()
2385 abi->abi_mfu_hits = l1hdr->b_mfu_hits; in arc_buf_info()
2386 abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits; in arc_buf_info()
2387 abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt); in arc_buf_info()
2391 abi->abi_l2arc_dattr = l2hdr->b_daddr; in arc_buf_info()
2392 abi->abi_l2arc_hits = l2hdr->b_hits; in arc_buf_info()
2395 abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; in arc_buf_info()
2396 abi->abi_state_contents = arc_buf_type(hdr); in arc_buf_info()
2397 abi->abi_size = arc_hdr_size(hdr); in arc_buf_info()
2420 old_state = hdr->b_l1hdr.b_state; in arc_change_state()
2421 refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt); in arc_change_state()
2422 update_old = (hdr->b_l1hdr.b_buf != NULL || in arc_change_state()
2423 hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_change_state()
2425 IMPLY(GHOST_STATE(old_state), hdr->b_l1hdr.b_buf == NULL); in arc_change_state()
2426 IMPLY(GHOST_STATE(new_state), hdr->b_l1hdr.b_buf == NULL); in arc_change_state()
2427 IMPLY(old_state == arc_anon, hdr->b_l1hdr.b_buf == NULL || in arc_change_state()
2428 ARC_BUF_LAST(hdr->b_l1hdr.b_buf)); in arc_change_state()
2451 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_change_state()
2452 multilist_remove(&old_state->arcs_list[type], in arc_change_state()
2460 * moving to some L1-cached state (i.e. not l2c_only or in arc_change_state()
2465 multilist_insert(&new_state->arcs_list[type], hdr); in arc_change_state()
2487 &new_state->arcs_size[type], in arc_change_state()
2489 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_change_state()
2498 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_change_state()
2499 buf = buf->b_next) { in arc_change_state()
2512 &new_state->arcs_size[type], in arc_change_state()
2516 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_change_state()
2518 &new_state->arcs_size[type], in arc_change_state()
2524 &new_state->arcs_size[type], in arc_change_state()
2533 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_change_state()
2545 &old_state->arcs_size[type], in arc_change_state()
2554 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_change_state()
2555 buf = buf->b_next) { in arc_change_state()
2568 &old_state->arcs_size[type], in arc_change_state()
2571 ASSERT(hdr->b_l1hdr.b_pabd != NULL || in arc_change_state()
2574 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_change_state()
2576 &old_state->arcs_size[type], in arc_change_state()
2582 &old_state->arcs_size[type], in arc_change_state()
2589 hdr->b_l1hdr.b_state = new_state; in arc_change_state()
2593 hdr->b_l2hdr.b_arcs_state = new_state->arcs_state; in arc_change_state()
2633 * very short-lived. in arc_space_consume()
2654 ARCSTAT_INCR(arcstat_data_size, -space); in arc_space_return()
2657 ARCSTAT_INCR(arcstat_metadata_size, -space); in arc_space_return()
2660 ARCSTAT_INCR(arcstat_bonus_size, -space); in arc_space_return()
2663 ARCSTAT_INCR(arcstat_dnode_size, -space); in arc_space_return()
2666 ARCSTAT_INCR(arcstat_dbuf_size, -space); in arc_space_return()
2669 ARCSTAT_INCR(arcstat_hdr_size, -space); in arc_space_return()
2672 aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space); in arc_space_return()
2675 ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space); in arc_space_return()
2680 ARCSTAT_INCR(arcstat_meta_used, -space); in arc_space_return()
2683 aggsum_add(&arc_sums.arcstat_size, -space); in arc_space_return()
2715 ASSERT3P(buf->b_hdr, ==, hdr); in arc_can_share()
2721 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && in arc_can_share()
2741 VERIFY(hdr->b_type == ARC_BUFC_DATA || in arc_buf_alloc_impl()
2742 hdr->b_type == ARC_BUFC_METADATA); in arc_buf_alloc_impl()
2748 buf->b_hdr = hdr; in arc_buf_alloc_impl()
2749 buf->b_data = NULL; in arc_buf_alloc_impl()
2750 buf->b_next = hdr->b_l1hdr.b_buf; in arc_buf_alloc_impl()
2751 buf->b_flags = 0; in arc_buf_alloc_impl()
2767 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; in arc_buf_alloc_impl()
2768 buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED; in arc_buf_alloc_impl()
2772 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; in arc_buf_alloc_impl()
2788 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be in arc_buf_alloc_impl()
2793 * need to be ABD-aware. It must be allocated via in arc_buf_alloc_impl()
2801 hdr->b_l1hdr.b_pabd != NULL && in arc_buf_alloc_impl()
2802 abd_is_linear(hdr->b_l1hdr.b_pabd) && in arc_buf_alloc_impl()
2803 !abd_is_linear_page(hdr->b_l1hdr.b_pabd); in arc_buf_alloc_impl()
2807 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); in arc_buf_alloc_impl()
2808 buf->b_flags |= ARC_BUF_FLAG_SHARED; in arc_buf_alloc_impl()
2811 buf->b_data = in arc_buf_alloc_impl()
2815 VERIFY3P(buf->b_data, !=, NULL); in arc_buf_alloc_impl()
2817 hdr->b_l1hdr.b_buf = buf; in arc_buf_alloc_impl()
2892 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_return_buf()
2894 ASSERT3P(buf->b_data, !=, NULL); in arc_return_buf()
2896 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag); in arc_return_buf()
2897 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); in arc_return_buf()
2899 arc_loaned_bytes_update(-arc_buf_size(buf)); in arc_return_buf()
2906 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_loan_inuse_buf()
2908 ASSERT3P(buf->b_data, !=, NULL); in arc_loan_inuse_buf()
2910 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); in arc_loan_inuse_buf()
2911 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); in arc_loan_inuse_buf()
2921 df->l2df_abd = abd; in l2arc_free_abd_on_write()
2922 df->l2df_size = size; in l2arc_free_abd_on_write()
2923 df->l2df_type = type; in l2arc_free_abd_on_write()
2932 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_hdr_free_on_write()
2937 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_hdr_free_on_write()
2938 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_free_on_write()
2941 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_hdr_free_on_write()
2944 (void) zfs_refcount_remove_many(&state->arcs_size[type], size, hdr); in arc_hdr_free_on_write()
2953 l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type); in arc_hdr_free_on_write()
2955 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); in arc_hdr_free_on_write()
2968 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_share_buf()
2978 &hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)], in arc_share_buf()
2980 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); in arc_share_buf()
2981 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, in arc_share_buf()
2984 buf->b_flags |= ARC_BUF_FLAG_SHARED; in arc_share_buf()
2993 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); in arc_share_buf()
3000 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_unshare_buf()
3008 &hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)], in arc_unshare_buf()
3011 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); in arc_unshare_buf()
3012 abd_free(hdr->b_l1hdr.b_pabd); in arc_unshare_buf()
3013 hdr->b_l1hdr.b_pabd = NULL; in arc_unshare_buf()
3014 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; in arc_unshare_buf()
3020 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); in arc_unshare_buf()
3021 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); in arc_unshare_buf()
3036 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; in arc_buf_remove()
3045 *bufp = buf->b_next; in arc_buf_remove()
3054 bufp = &(*bufp)->b_next; in arc_buf_remove()
3057 buf->b_next = NULL; in arc_buf_remove()
3065 * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
3071 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_destroy_impl()
3078 if (buf->b_data != NULL) { in arc_buf_destroy_impl()
3093 arc_free_data_buf(hdr, buf->b_data, size, buf); in arc_buf_destroy_impl()
3094 ARCSTAT_INCR(arcstat_overhead_size, -size); in arc_buf_destroy_impl()
3096 buf->b_data = NULL; in arc_buf_destroy_impl()
3104 hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) { in arc_buf_destroy_impl()
3106 for (b = hdr->b_l1hdr.b_buf; b; b = b->b_next) { in arc_buf_destroy_impl()
3136 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_buf_destroy_impl()
3170 buf->b_hdr = NULL; in arc_buf_destroy_impl()
3187 ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL); in arc_hdr_alloc_abd()
3188 hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr, in arc_hdr_alloc_abd()
3190 ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL); in arc_hdr_alloc_abd()
3194 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_alloc_abd()
3195 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr, in arc_hdr_alloc_abd()
3197 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_hdr_alloc_abd()
3210 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_hdr_free_abd()
3223 arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr); in arc_hdr_free_abd()
3225 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr); in arc_hdr_free_abd()
3229 hdr->b_crypt_hdr.b_rabd = NULL; in arc_hdr_free_abd()
3230 ARCSTAT_INCR(arcstat_raw_size, -size); in arc_hdr_free_abd()
3232 hdr->b_l1hdr.b_pabd = NULL; in arc_hdr_free_abd()
3235 if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) in arc_hdr_free_abd()
3236 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_hdr_free_abd()
3238 ARCSTAT_INCR(arcstat_compressed_size, -size); in arc_hdr_free_abd()
3239 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); in arc_hdr_free_abd()
3276 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_alloc()
3280 hdr->b_spa = spa; in arc_hdr_alloc()
3281 hdr->b_type = type; in arc_hdr_alloc()
3282 hdr->b_flags = 0; in arc_hdr_alloc()
3285 hdr->b_complevel = complevel; in arc_hdr_alloc()
3289 hdr->b_l1hdr.b_state = arc_anon; in arc_hdr_alloc()
3290 hdr->b_l1hdr.b_arc_access = 0; in arc_hdr_alloc()
3291 hdr->b_l1hdr.b_mru_hits = 0; in arc_hdr_alloc()
3292 hdr->b_l1hdr.b_mru_ghost_hits = 0; in arc_hdr_alloc()
3293 hdr->b_l1hdr.b_mfu_hits = 0; in arc_hdr_alloc()
3294 hdr->b_l1hdr.b_mfu_ghost_hits = 0; in arc_hdr_alloc()
3295 hdr->b_l1hdr.b_buf = NULL; in arc_hdr_alloc()
3297 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_alloc()
3305 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
3315 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; in arc_hdr_realloc()
3334 nhdr->b_l1hdr.b_state = arc_l2c_only; in arc_hdr_realloc()
3337 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_realloc()
3340 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_hdr_realloc()
3342 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_realloc()
3352 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_hdr_realloc()
3361 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_realloc()
3367 * The header has been reallocated so we need to re-insert it into any in arc_hdr_realloc()
3372 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); in arc_hdr_realloc()
3374 mutex_enter(&dev->l2ad_mtx); in arc_hdr_realloc()
3382 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); in arc_hdr_realloc()
3383 list_remove(&dev->l2ad_buflist, hdr); in arc_hdr_realloc()
3385 mutex_exit(&dev->l2ad_mtx); in arc_hdr_realloc()
3395 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, in arc_hdr_realloc()
3397 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in arc_hdr_realloc()
3418 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_convert_to_raw()
3422 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_convert_to_raw()
3424 buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED); in arc_convert_to_raw()
3426 hdr->b_crypt_hdr.b_dsobj = dsobj; in arc_convert_to_raw()
3427 hdr->b_crypt_hdr.b_ot = ot; in arc_convert_to_raw()
3428 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? in arc_convert_to_raw()
3434 memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); in arc_convert_to_raw()
3436 memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); in arc_convert_to_raw()
3438 memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); in arc_convert_to_raw()
3510 hdr->b_crypt_hdr.b_dsobj = dsobj; in arc_alloc_raw_buf()
3511 hdr->b_crypt_hdr.b_ot = ot; in arc_alloc_raw_buf()
3512 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? in arc_alloc_raw_buf()
3514 memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); in arc_alloc_raw_buf()
3515 memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); in arc_alloc_raw_buf()
3516 memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); in arc_alloc_raw_buf()
3538 arc_buf_contents_t type = hdr->b_type; in l2arc_hdr_arcstats_update()
3551 lsize_s = -lsize; in l2arc_hdr_arcstats_update()
3552 psize_s = -psize; in l2arc_hdr_arcstats_update()
3553 asize_s = -asize; in l2arc_hdr_arcstats_update()
3569 switch (hdr->b_l2hdr.b_arcs_state) { in l2arc_hdr_arcstats_update()
3605 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; in arc_hdr_l2hdr_destroy()
3606 l2arc_dev_t *dev = l2hdr->b_dev; in arc_hdr_l2hdr_destroy()
3608 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); in arc_hdr_l2hdr_destroy()
3611 list_remove(&dev->l2ad_buflist, hdr); in arc_hdr_l2hdr_destroy()
3614 if (dev->l2ad_vdev != NULL) { in arc_hdr_l2hdr_destroy()
3616 vdev_space_update(dev->l2ad_vdev, -asize, 0, 0); in arc_hdr_l2hdr_destroy()
3619 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), in arc_hdr_l2hdr_destroy()
3628 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_destroy()
3629 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_hdr_destroy()
3635 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; in arc_hdr_destroy()
3636 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); in arc_hdr_destroy()
3639 mutex_enter(&dev->l2ad_mtx); in arc_hdr_destroy()
3648 * want to re-destroy the header's L2 portion. in arc_hdr_destroy()
3659 mutex_exit(&dev->l2ad_mtx); in arc_hdr_destroy()
3674 while (hdr->b_l1hdr.b_buf != NULL) in arc_hdr_destroy()
3675 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); in arc_hdr_destroy()
3677 if (hdr->b_l1hdr.b_pabd != NULL) in arc_hdr_destroy()
3684 ASSERT3P(hdr->b_hash_next, ==, NULL); in arc_hdr_destroy()
3686 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_hdr_destroy()
3687 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_hdr_destroy()
3689 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_destroy()
3700 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_destroy()
3702 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_buf_destroy()
3703 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_buf_destroy()
3713 ASSERT3P(hdr, ==, buf->b_hdr); in arc_buf_destroy()
3714 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_buf_destroy()
3716 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); in arc_buf_destroy()
3717 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_destroy()
3729 * - arc_mru -> arc_mru_ghost
3730 * - arc_mfu -> arc_mfu_ghost
3731 * - arc_mru_ghost -> arc_l2c_only
3732 * - arc_mru_ghost -> deleted
3733 * - arc_mfu_ghost -> arc_l2c_only
3734 * - arc_mfu_ghost -> deleted
3735 * - arc_uncached -> deleted
3739 * progress at the same (or at least comparable) rate as from non-ghost states.
3742 * waiting for it. For non-ghost states it includes size of evicted data
3757 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evict_hdr()
3758 ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); in arc_evict_hdr()
3761 state = hdr->b_l1hdr.b_state; in arc_evict_hdr()
3782 ASSERT(hdr->b_l1hdr.b_pabd == NULL); in arc_evict_hdr()
3790 * dropping from L1+L2 cached to L2-only, in arc_evict_hdr()
3795 *real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE; in arc_evict_hdr()
3809 if ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && in arc_evict_hdr()
3810 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < in arc_evict_hdr()
3819 if (l2arc_write_eligible(hdr->b_spa, hdr)) { in arc_evict_hdr()
3823 switch (state->arcs_state) { in arc_evict_hdr()
3851 if (hdr->b_l1hdr.b_pabd != NULL) in arc_evict_hdr()
3873 int64_t remaining = arc_free_memory() - arc_sys_free / 2; in arc_set_need_free()
3876 arc_need_free = MAX(-remaining, 0); in arc_set_need_free()
3879 MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count)); in arc_set_need_free()
3912 * (only 'marker' will be removed and re-inserted). in arc_evict_state_impl()
3924 if (hdr->b_spa == 0) in arc_evict_state_impl()
3928 if (spa != 0 && hdr->b_spa != spa) { in arc_evict_state_impl()
3960 evict_count--; in arc_evict_state_impl()
3987 aw->aew_count <= arc_evict_count) { in arc_evict_state_impl()
3989 cv_broadcast(&aw->aew_cv); in arc_evict_state_impl()
3997 * if the average cached block is small), eviction can be on-CPU for in arc_evict_state_impl()
4016 marker->b_spa = 0; in arc_state_alloc_marker()
4068 multilist_t *ml = &state->arcs_list[type]; in arc_evict_state()
4115 bytes_remaining = bytes - total_evicted; in arc_evict_state()
4186 while (zfs_refcount_count(&state->arcs_esize[type]) != 0) { in arc_flush_state()
4208 if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) { in arc_evict_impl()
4209 delta = MIN(zfs_refcount_count(&state->arcs_esize[type]), in arc_evict_impl()
4220 * decreasing it, plus a balance factor, controlling the decrease rate, used
4242 s = MIN(64 - s, 32); in arc_evict_adj()
4244 uint64_t ofrac = (1ULL << 32) - frac; in arc_evict_adj()
4248 up = (up << s) / (total >> (32 - s)); in arc_evict_adj()
4251 down = (down << s) / (total >> (32 - s)); in arc_evict_adj()
4254 return (frac + up - down); in arc_evict_adj()
4282 mrud = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_DATA]) + in arc_evict()
4283 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_evict()
4284 mrum = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) + in arc_evict()
4285 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_evict()
4286 mfud = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_evict()
4287 mfum = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_evict()
4293 ngrd = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_evict()
4294 uint64_t grd = ngrd - ogrd; in arc_evict()
4296 ngrm = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_evict()
4297 uint64_t grm = ngrm - ogrm; in arc_evict()
4299 ngfd = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_evict()
4300 uint64_t gfd = ngfd - ogfd; in arc_evict()
4302 ngfm = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_evict()
4303 uint64_t gfm = ngfm - ogfm; in arc_evict()
4314 int64_t wt = t - (asize - ac); in arc_evict()
4322 int64_t nem = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) in arc_evict()
4323 + zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) in arc_evict()
4324 - zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) in arc_evict()
4325 - zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_evict()
4331 prune = arc_mf(prune, nem - w * 3 / 4, w / 4); in arc_evict()
4334 prune = MAX(prune, (dn - arc_dnode_limit) / sizeof (dnode_t) * in arc_evict()
4342 e = MIN((int64_t)(asize - ac), (int64_t)(mrum - w)); in arc_evict()
4345 mrum -= bytes; in arc_evict()
4346 asize -= bytes; in arc_evict()
4350 e = MIN((int64_t)(asize - ac), (int64_t)(m - bytes - w)); in arc_evict()
4353 mfum -= bytes; in arc_evict()
4354 asize -= bytes; in arc_evict()
4357 wt -= m - total_evicted; in arc_evict()
4359 e = MIN((int64_t)(asize - ac), (int64_t)(mrud - w)); in arc_evict()
4362 mrud -= bytes; in arc_evict()
4363 asize -= bytes; in arc_evict()
4366 e = asize - ac; in arc_evict()
4368 mfud -= bytes; in arc_evict()
4383 e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]) - in arc_evict()
4388 e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]) - in arc_evict()
4393 e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]) - in arc_evict()
4398 e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]) - in arc_evict()
4443 af->af_spa_guid = spa_guid; in arc_async_flush_add()
4444 af->af_cache_level = level; in arc_async_flush_add()
4445 taskq_init_ent(&af->af_tqent); in arc_async_flush_add()
4446 list_link_init(&af->af_node); in arc_async_flush_add()
4461 if (af->af_spa_guid == spa_guid && in arc_async_flush_remove()
4462 af->af_cache_level == level) { in arc_async_flush_remove()
4476 uint64_t spa_guid = af->af_spa_guid; in arc_flush_task()
4479 arc_async_flush_remove(spa_guid, af->af_cache_level); in arc_flush_task()
4481 uint64_t elaspsed = NSEC2MSEC(gethrtime() - start_time); in arc_flush_task()
4493 * It's OK if the spa is re-imported while this asynchronous flush is
4505 af, TQ_SLEEP, &af->af_tqent); in arc_flush_async()
4509 * Check if a guid is still in-use as part of an async teardown task
4517 if (af->af_spa_guid == spa_guid) { in arc_async_flush_guid_inuse()
4546 to_free = MIN(to_free, c - arc_c_min); in arc_reduce_target_size()
4547 arc_c = c - to_free; in arc_reduce_target_size()
4596 /* reach upper limit of cache size on 32-bit */ in arc_kmem_reap_soon()
4636 arc_ksp->ks_update(arc_ksp, KSTAT_READ); in arc_evict_cb_check()
4659 return ((zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_DATA]) + in arc_evict_cb_check()
4660 zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]) && in arc_evict_cb_check()
4661 ddi_get_lbolt() - arc_last_uncached_flush > in arc_evict_cb_check()
4712 cv_broadcast(&aw->aew_cv); in arc_evict_cb()
4732 * becoming implicitly blocked by a system-wide kmem reap -- which, in arc_reap_cb_check()
4801 can_free = arc_c - arc_c_min; in arc_reap_cb()
4802 to_free = (MAX(can_free, 0) >> arc_shrink_shift) - free_memory; in arc_reap_cb()
4906 int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) - arc_c - in arc_is_overflowing()
4994 last_count = last->aew_count; in arc_wait_for_eviction()
5071 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_get_data_impl()
5074 (void) zfs_refcount_add_many(&state->arcs_size[type], size, in arc_get_data_impl()
5086 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_get_data_impl()
5087 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_get_data_impl()
5088 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_get_data_impl()
5122 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_free_data_impl()
5126 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_free_data_impl()
5127 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_free_data_impl()
5130 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_free_data_impl()
5133 (void) zfs_refcount_remove_many(&state->arcs_size[type], size, tag); in arc_free_data_impl()
5135 VERIFY3U(hdr->b_type, ==, type); in arc_free_data_impl()
5187 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_access()
5193 ASSERT0(hdr->b_l1hdr.b_arc_access); in arc_access()
5194 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5204 } else if (hdr->b_l1hdr.b_state == arc_mru) { in arc_access()
5210 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5213 hdr->b_l1hdr.b_mru_hits++; in arc_access()
5221 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5229 if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access + in arc_access()
5231 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5235 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { in arc_access()
5243 hdr->b_l1hdr.b_mru_ghost_hits++; in arc_access()
5245 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5246 wmsum_add(&arc_mru_ghost->arcs_hits[arc_buf_type(hdr)], in arc_access()
5256 } else if (hdr->b_l1hdr.b_state == arc_mfu) { in arc_access()
5262 hdr->b_l1hdr.b_mfu_hits++; in arc_access()
5265 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5266 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { in arc_access()
5272 hdr->b_l1hdr.b_mfu_ghost_hits++; in arc_access()
5274 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5275 wmsum_add(&arc_mfu_ghost->arcs_hits[arc_buf_type(hdr)], in arc_access()
5279 } else if (hdr->b_l1hdr.b_state == arc_uncached) { in arc_access()
5286 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5287 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { in arc_access()
5292 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5297 hdr->b_l1hdr.b_state); in arc_access()
5308 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_access()
5315 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) in arc_buf_access()
5321 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { in arc_buf_access()
5327 ASSERT(hdr->b_l1hdr.b_state == arc_mru || in arc_buf_access()
5328 hdr->b_l1hdr.b_state == arc_mfu || in arc_buf_access()
5329 hdr->b_l1hdr.b_state == arc_uncached); in arc_buf_access()
5350 memcpy(arg, buf->b_data, arc_buf_size(buf)); in arc_bcopy_func()
5363 ASSERT(zio == NULL || zio->io_error != 0); in arc_getbuf_func()
5366 ASSERT(zio == NULL || zio->io_error == 0); in arc_getbuf_func()
5368 ASSERT(buf->b_data != NULL); in arc_getbuf_func()
5392 blkptr_t *bp = zio->io_bp; in arc_read_done()
5393 arc_buf_hdr_t *hdr = zio->io_private; in arc_read_done()
5399 * The hdr was inserted into hash-table and removed from lists in arc_read_done()
5409 ASSERT3U(hdr->b_birth, ==, BP_GET_BIRTH(zio->io_bp)); in arc_read_done()
5410 ASSERT3U(hdr->b_dva.dva_word[0], ==, in arc_read_done()
5411 BP_IDENTITY(zio->io_bp)->dva_word[0]); in arc_read_done()
5412 ASSERT3U(hdr->b_dva.dva_word[1], ==, in arc_read_done()
5413 BP_IDENTITY(zio->io_bp)->dva_word[1]); in arc_read_done()
5415 found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock); in arc_read_done()
5418 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || in arc_read_done()
5424 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); in arc_read_done()
5425 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; in arc_read_done()
5426 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, in arc_read_done()
5427 hdr->b_crypt_hdr.b_iv); in arc_read_done()
5429 if (zio->io_error == 0) { in arc_read_done()
5433 tmpbuf = abd_borrow_buf_copy(zio->io_abd, in arc_read_done()
5436 hdr->b_crypt_hdr.b_mac); in arc_read_done()
5437 abd_return_buf(zio->io_abd, tmpbuf, in arc_read_done()
5441 hdr->b_crypt_hdr.b_mac); in arc_read_done()
5446 if (zio->io_error == 0) { in arc_read_done()
5448 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { in arc_read_done()
5449 if (BP_GET_LEVEL(zio->io_bp) > 0) { in arc_read_done()
5450 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; in arc_read_done()
5452 hdr->b_l1hdr.b_byteswap = in arc_read_done()
5453 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); in arc_read_done()
5456 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_read_done()
5459 hdr->b_complevel = zio->io_prop.zp_complevel; in arc_read_done()
5467 callback_list = hdr->b_l1hdr.b_acb; in arc_read_done()
5469 hdr->b_l1hdr.b_acb = NULL; in arc_read_done()
5478 for (acb = callback_list; acb != NULL; acb = acb->acb_next) { in arc_read_done()
5483 if (!acb->acb_done || acb->acb_nobuf) in arc_read_done()
5488 if (zio->io_error != 0) in arc_read_done()
5491 int error = arc_buf_alloc_impl(hdr, zio->io_spa, in arc_read_done()
5492 &acb->acb_zb, acb->acb_private, acb->acb_encrypted, in arc_read_done()
5493 acb->acb_compressed, acb->acb_noauth, B_TRUE, in arc_read_done()
5494 &acb->acb_buf); in arc_read_done()
5497 * Assert non-speculative zios didn't fail because an in arc_read_done()
5500 ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || in arc_read_done()
5510 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { in arc_read_done()
5511 spa_log_error(zio->io_spa, &acb->acb_zb, in arc_read_done()
5512 BP_GET_LOGICAL_BIRTH(zio->io_bp)); in arc_read_done()
5515 zio->io_spa, NULL, &acb->acb_zb, zio, 0); in arc_read_done()
5534 zio->io_error = error; in arc_read_done()
5547 if (zio->io_error == 0) { in arc_read_done()
5548 arc_hdr_verify(hdr, zio->io_bp); in arc_read_done()
5551 if (hdr->b_l1hdr.b_state != arc_anon) in arc_read_done()
5565 if (acb->acb_done != NULL) { in arc_read_done()
5566 if (zio->io_error != 0 && acb->acb_buf != NULL) { in arc_read_done()
5572 arc_buf_destroy(acb->acb_buf, in arc_read_done()
5573 acb->acb_private); in arc_read_done()
5574 acb->acb_buf = NULL; in arc_read_done()
5576 acb->acb_done(zio, &zio->io_bookmark, zio->io_bp, in arc_read_done()
5577 acb->acb_buf, acb->acb_private); in arc_read_done()
5580 if (acb->acb_zio_dummy != NULL) { in arc_read_done()
5581 acb->acb_zio_dummy->io_error = zio->io_error; in arc_read_done()
5582 zio_nowait(acb->acb_zio_dummy); in arc_read_done()
5585 callback_list = acb->acb_prev; in arc_read_done()
5586 if (acb->acb_wait) { in arc_read_done()
5587 mutex_enter(&acb->acb_wait_lock); in arc_read_done()
5588 acb->acb_wait_error = zio->io_error; in arc_read_done()
5589 acb->acb_wait = B_FALSE; in arc_read_done()
5590 cv_signal(&acb->acb_wait_cv); in arc_read_done()
5591 mutex_exit(&acb->acb_wait_lock); in arc_read_done()
5619 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_cached()
5623 * more compile-time checking. in arc_cached()
5625 switch (state->arcs_state) { in arc_cached()
5659 * If a read request arrives for a block that has a read in-progress,
5660 * either wait for the in-progress read to complete (and return the
5720 (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) { in arc_read()
5743 zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head; in arc_read()
5745 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && in arc_read()
5749 * an in-flight async read. Request that the in arc_read()
5781 acb->acb_done = done; in arc_read()
5782 acb->acb_private = private; in arc_read()
5783 acb->acb_compressed = compressed_read; in arc_read()
5784 acb->acb_encrypted = encrypted_read; in arc_read()
5785 acb->acb_noauth = noauth_read; in arc_read()
5786 acb->acb_nobuf = no_buf; in arc_read()
5788 acb->acb_wait = B_TRUE; in arc_read()
5789 mutex_init(&acb->acb_wait_lock, NULL, in arc_read()
5791 cv_init(&acb->acb_wait_cv, NULL, in arc_read()
5794 acb->acb_zb = *zb; in arc_read()
5796 acb->acb_zio_dummy = zio_null(pio, in arc_read()
5799 acb->acb_zio_head = head_zio; in arc_read()
5800 acb->acb_next = hdr->b_l1hdr.b_acb; in arc_read()
5801 hdr->b_l1hdr.b_acb->acb_prev = acb; in arc_read()
5802 hdr->b_l1hdr.b_acb = acb; in arc_read()
5811 mutex_enter(&acb->acb_wait_lock); in arc_read()
5812 while (acb->acb_wait) { in arc_read()
5813 cv_wait(&acb->acb_wait_cv, in arc_read()
5814 &acb->acb_wait_lock); in arc_read()
5816 rc = acb->acb_wait_error; in arc_read()
5817 mutex_exit(&acb->acb_wait_lock); in arc_read()
5818 mutex_destroy(&acb->acb_wait_lock); in arc_read()
5819 cv_destroy(&acb->acb_wait_cv); in arc_read()
5825 ASSERT(hdr->b_l1hdr.b_state == arc_mru || in arc_read()
5826 hdr->b_l1hdr.b_state == arc_mfu || in arc_read()
5827 hdr->b_l1hdr.b_state == arc_uncached); in arc_read()
5847 spa_log_error(spa, zb, hdr->b_birth); in arc_read()
5912 hdr->b_dva = *BP_IDENTITY(bp); in arc_read()
5913 hdr->b_birth = BP_GET_BIRTH(bp); in arc_read()
5927 * L2-only (and thus didn't have an L1 hdr), in arc_read()
5935 if (GHOST_STATE(hdr->b_l1hdr.b_state)) { in arc_read()
5936 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_read()
5940 &hdr->b_l1hdr.b_refcnt)); in arc_read()
5941 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_read()
5943 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_read()
5957 acb->acb_wait = B_TRUE; in arc_read()
5958 mutex_init(&acb->acb_wait_lock, NULL, in arc_read()
5960 cv_init(&acb->acb_wait_cv, NULL, CV_DEFAULT, in arc_read()
5962 acb->acb_zio_head = in arc_read()
5963 hdr->b_l1hdr.b_acb->acb_zio_head; in arc_read()
5964 acb->acb_next = hdr->b_l1hdr.b_acb; in arc_read()
5965 hdr->b_l1hdr.b_acb->acb_prev = acb; in arc_read()
5966 hdr->b_l1hdr.b_acb = acb; in arc_read()
5968 mutex_enter(&acb->acb_wait_lock); in arc_read()
5969 while (acb->acb_wait) { in arc_read()
5970 cv_wait(&acb->acb_wait_cv, in arc_read()
5971 &acb->acb_wait_lock); in arc_read()
5973 mutex_exit(&acb->acb_wait_lock); in arc_read()
5974 mutex_destroy(&acb->acb_wait_lock); in arc_read()
5975 cv_destroy(&acb->acb_wait_cv); in arc_read()
6000 hdr_abd = hdr->b_crypt_hdr.b_rabd; in arc_read()
6003 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_read()
6005 hdr_abd = hdr->b_l1hdr.b_pabd; in arc_read()
6026 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); in arc_read()
6029 acb->acb_done = done; in arc_read()
6030 acb->acb_private = private; in arc_read()
6031 acb->acb_compressed = compressed_read; in arc_read()
6032 acb->acb_encrypted = encrypted_read; in arc_read()
6033 acb->acb_noauth = noauth_read; in arc_read()
6034 acb->acb_zb = *zb; in arc_read()
6036 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_read()
6037 hdr->b_l1hdr.b_acb = acb; in arc_read()
6040 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { in arc_read()
6041 devw = hdr->b_l2hdr.b_dev->l2ad_writing; in arc_read()
6042 addr = hdr->b_l2hdr.b_daddr; in arc_read()
6054 * the read IO is still in-flight. in arc_read()
6086 spa->spa_l2cache.sav_count > 0; in arc_read()
6105 hdr->b_l2hdr.b_hits++; in arc_read()
6109 cb->l2rcb_hdr = hdr; in arc_read()
6110 cb->l2rcb_bp = *bp; in arc_read()
6111 cb->l2rcb_zb = *zb; in arc_read()
6112 cb->l2rcb_flags = zio_flags; in arc_read()
6129 cb->l2rcb_abd = abd; in arc_read()
6135 addr + asize <= vd->vdev_psize - in arc_read()
6153 acb->acb_zio_head = rzio; in arc_read()
6190 * faulted cache device - that's also a miss.) in arc_read()
6209 acb->acb_zio_head = rzio; in arc_read()
6235 zio->io_error = rc; in arc_read()
6247 p->p_pfunc = func; in arc_add_prune_callback()
6248 p->p_private = private; in arc_add_prune_callback()
6249 list_link_init(&p->p_node); in arc_add_prune_callback()
6250 zfs_refcount_create(&p->p_refcnt); in arc_add_prune_callback()
6253 zfs_refcount_add(&p->p_refcnt, &arc_prune_list); in arc_add_prune_callback()
6266 if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0) in arc_remove_prune_callback()
6273 ASSERT0(zfs_refcount_count(&p->p_refcnt)); in arc_remove_prune_callback()
6274 zfs_refcount_destroy(&p->p_refcnt); in arc_remove_prune_callback()
6286 arc_prune_func_t *func = ap->p_pfunc; in arc_prune_task()
6289 func(ap->p_adjust, ap->p_private); in arc_prune_task()
6291 (void) zfs_refcount_remove(&ap->p_refcnt, func); in arc_prune_task()
6313 if (zfs_refcount_count(&ap->p_refcnt) >= 2) in arc_prune_async()
6316 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc); in arc_prune_async()
6317 ap->p_adjust = adjust; in arc_prune_async()
6320 (void) zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc); in arc_prune_async()
6346 * (i.e. prefetch) or has some other reference (i.e. a dedup-ed, in arc_freed()
6347 * dmu_sync-ed block). A block may also have a reference if it is in arc_freed()
6348 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would in arc_freed()
6365 zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { in arc_freed()
6384 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_release()
6399 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_release()
6404 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_release()
6406 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); in arc_release()
6407 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_release()
6409 hdr->b_l1hdr.b_arc_access = 0; in arc_release()
6429 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_release()
6434 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); in arc_release()
6437 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); in arc_release()
6450 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); in arc_release()
6456 if (hdr->b_l1hdr.b_buf != buf || !ARC_BUF_LAST(buf)) { in arc_release()
6458 uint64_t spa = hdr->b_spa; in arc_release()
6464 VERIFY3U(hdr->b_type, ==, type); in arc_release()
6466 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); in arc_release()
6470 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); in arc_release()
6487 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); in arc_release()
6506 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, in arc_release()
6507 buf->b_data, psize); in arc_release()
6509 VERIFY3P(lastbuf->b_data, !=, NULL); in arc_release()
6524 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_release()
6527 (void) zfs_refcount_remove_many(&state->arcs_size[type], in arc_release()
6530 if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { in arc_release()
6533 &state->arcs_esize[type], in arc_release()
6547 compress, hdr->b_complevel, type); in arc_release()
6548 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); in arc_release()
6549 ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt)); in arc_release()
6550 VERIFY3U(nhdr->b_type, ==, type); in arc_release()
6553 nhdr->b_l1hdr.b_buf = buf; in arc_release()
6554 (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); in arc_release()
6555 buf->b_hdr = nhdr; in arc_release()
6557 (void) zfs_refcount_add_many(&arc_anon->arcs_size[type], in arc_release()
6560 ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); in arc_release()
6562 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_release()
6564 hdr->b_l1hdr.b_mru_hits = 0; in arc_release()
6565 hdr->b_l1hdr.b_mru_ghost_hits = 0; in arc_release()
6566 hdr->b_l1hdr.b_mfu_hits = 0; in arc_release()
6567 hdr->b_l1hdr.b_mfu_ghost_hits = 0; in arc_release()
6569 hdr->b_l1hdr.b_arc_access = 0; in arc_release()
6580 return (buf->b_data != NULL && in arc_released()
6581 buf->b_hdr->b_l1hdr.b_state == arc_anon); in arc_released()
6588 return (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); in arc_referenced()
6595 arc_write_callback_t *callback = zio->io_private; in arc_write_ready()
6596 arc_buf_t *buf = callback->awcb_buf; in arc_write_ready()
6597 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write_ready()
6598 blkptr_t *bp = zio->io_bp; in arc_write_ready()
6603 ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); in arc_write_ready()
6604 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write_ready()
6611 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { in arc_write_ready()
6614 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_write_ready()
6626 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_write_ready()
6631 callback->awcb_ready(zio, buf, callback->awcb_private); in arc_write_ready()
6634 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); in arc_write_ready()
6646 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; in arc_write_ready()
6648 hdr->b_l1hdr.b_byteswap = in arc_write_ready()
6652 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_write_ready()
6656 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); in arc_write_ready()
6657 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; in arc_write_ready()
6658 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, in arc_write_ready()
6659 hdr->b_crypt_hdr.b_iv); in arc_write_ready()
6660 zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); in arc_write_ready()
6671 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_write_ready()
6673 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_write_ready()
6675 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_write_ready()
6676 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_write_ready()
6691 hdr->b_complevel = zio->io_prop.zp_complevel; in arc_write_ready()
6693 if (zio->io_error != 0 || psize == 0) in arc_write_ready()
6715 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); in arc_write_ready()
6728 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); in arc_write_ready()
6733 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); in arc_write_ready()
6735 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); in arc_write_ready()
6737 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, in arc_write_ready()
6741 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); in arc_write_ready()
6742 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); in arc_write_ready()
6743 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_write_ready()
6757 arc_write_callback_t *callback = zio->io_private; in arc_write_children_ready()
6758 arc_buf_t *buf = callback->awcb_buf; in arc_write_children_ready()
6760 callback->awcb_children_ready(zio, buf, callback->awcb_private); in arc_write_children_ready()
6766 arc_write_callback_t *callback = zio->io_private; in arc_write_done()
6767 arc_buf_t *buf = callback->awcb_buf; in arc_write_done()
6768 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write_done()
6770 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_write_done()
6772 if (zio->io_error == 0) { in arc_write_done()
6773 arc_hdr_verify(hdr, zio->io_bp); in arc_write_done()
6775 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { in arc_write_done()
6778 hdr->b_dva = *BP_IDENTITY(zio->io_bp); in arc_write_done()
6779 hdr->b_birth = BP_GET_BIRTH(zio->io_bp); in arc_write_done()
6786 * If the block to be written was all-zero or compressed enough to be in arc_write_done()
6795 ASSERT3U(zio->io_error, ==, 0); in arc_write_done()
6803 * sync-to-convergence, because we remove in arc_write_done()
6806 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { in arc_write_done()
6807 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) in arc_write_done()
6811 &exists->b_l1hdr.b_refcnt)); in arc_write_done()
6817 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { in arc_write_done()
6819 ASSERT(zio->io_prop.zp_nopwrite); in arc_write_done()
6820 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) in arc_write_done()
6825 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write_done()
6826 ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf)); in arc_write_done()
6827 ASSERT(hdr->b_l1hdr.b_state == arc_anon); in arc_write_done()
6828 ASSERT(BP_GET_DEDUP(zio->io_bp)); in arc_write_done()
6829 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); in arc_write_done()
6835 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) in arc_write_done()
6843 callback->awcb_done(zio, buf, callback->awcb_private); in arc_write_done()
6845 abd_free(zio->io_abd); in arc_write_done()
6857 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write()
6866 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_write()
6867 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write()
6877 localprop.zp_complevel = hdr->b_complevel; in arc_write()
6879 (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? in arc_write()
6881 memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt, in arc_write()
6883 memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv, in arc_write()
6885 memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac, in arc_write()
6890 MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1); in arc_write()
6896 localprop.zp_complevel = hdr->b_complevel; in arc_write()
6900 callback->awcb_ready = ready; in arc_write()
6901 callback->awcb_children_ready = children_ready; in arc_write()
6902 callback->awcb_done = done; in arc_write()
6903 callback->awcb_private = private; in arc_write()
6904 callback->awcb_buf = buf; in arc_write()
6910 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_write()
6923 VERIFY3P(buf->b_data, !=, NULL); in arc_write()
6933 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_write()
6936 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), in arc_write()
6947 atomic_add_64(&arc_tempreserve, -reserve); in arc_tempreserve_clear()
6981 (zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]) + in arc_tempreserve_space()
6982 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]) - in arc_tempreserve_space()
7018 &arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_tempreserve_space()
7020 zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_tempreserve_space()
7041 data->value.ui64 = in arc_kstat_update_state()
7042 zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]); in arc_kstat_update_state()
7043 metadata->value.ui64 = in arc_kstat_update_state()
7044 zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]); in arc_kstat_update_state()
7045 size->value.ui64 = data->value.ui64 + metadata->value.ui64; in arc_kstat_update_state()
7046 evict_data->value.ui64 = in arc_kstat_update_state()
7047 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); in arc_kstat_update_state()
7048 evict_metadata->value.ui64 = in arc_kstat_update_state()
7049 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); in arc_kstat_update_state()
7055 arc_stats_t *as = ksp->ks_data; in arc_kstat_update()
7060 as->arcstat_hits.value.ui64 = in arc_kstat_update()
7062 as->arcstat_iohits.value.ui64 = in arc_kstat_update()
7064 as->arcstat_misses.value.ui64 = in arc_kstat_update()
7066 as->arcstat_demand_data_hits.value.ui64 = in arc_kstat_update()
7068 as->arcstat_demand_data_iohits.value.ui64 = in arc_kstat_update()
7070 as->arcstat_demand_data_misses.value.ui64 = in arc_kstat_update()
7072 as->arcstat_demand_metadata_hits.value.ui64 = in arc_kstat_update()
7074 as->arcstat_demand_metadata_iohits.value.ui64 = in arc_kstat_update()
7076 as->arcstat_demand_metadata_misses.value.ui64 = in arc_kstat_update()
7078 as->arcstat_prefetch_data_hits.value.ui64 = in arc_kstat_update()
7080 as->arcstat_prefetch_data_iohits.value.ui64 = in arc_kstat_update()
7082 as->arcstat_prefetch_data_misses.value.ui64 = in arc_kstat_update()
7084 as->arcstat_prefetch_metadata_hits.value.ui64 = in arc_kstat_update()
7086 as->arcstat_prefetch_metadata_iohits.value.ui64 = in arc_kstat_update()
7088 as->arcstat_prefetch_metadata_misses.value.ui64 = in arc_kstat_update()
7090 as->arcstat_mru_hits.value.ui64 = in arc_kstat_update()
7092 as->arcstat_mru_ghost_hits.value.ui64 = in arc_kstat_update()
7094 as->arcstat_mfu_hits.value.ui64 = in arc_kstat_update()
7096 as->arcstat_mfu_ghost_hits.value.ui64 = in arc_kstat_update()
7098 as->arcstat_uncached_hits.value.ui64 = in arc_kstat_update()
7100 as->arcstat_deleted.value.ui64 = in arc_kstat_update()
7102 as->arcstat_mutex_miss.value.ui64 = in arc_kstat_update()
7104 as->arcstat_access_skip.value.ui64 = in arc_kstat_update()
7106 as->arcstat_evict_skip.value.ui64 = in arc_kstat_update()
7108 as->arcstat_evict_not_enough.value.ui64 = in arc_kstat_update()
7110 as->arcstat_evict_l2_cached.value.ui64 = in arc_kstat_update()
7112 as->arcstat_evict_l2_eligible.value.ui64 = in arc_kstat_update()
7114 as->arcstat_evict_l2_eligible_mfu.value.ui64 = in arc_kstat_update()
7116 as->arcstat_evict_l2_eligible_mru.value.ui64 = in arc_kstat_update()
7118 as->arcstat_evict_l2_ineligible.value.ui64 = in arc_kstat_update()
7120 as->arcstat_evict_l2_skip.value.ui64 = in arc_kstat_update()
7122 as->arcstat_hash_elements.value.ui64 = in arc_kstat_update()
7123 as->arcstat_hash_elements_max.value.ui64 = in arc_kstat_update()
7125 as->arcstat_hash_collisions.value.ui64 = in arc_kstat_update()
7127 as->arcstat_hash_chains.value.ui64 = in arc_kstat_update()
7129 as->arcstat_size.value.ui64 = in arc_kstat_update()
7131 as->arcstat_compressed_size.value.ui64 = in arc_kstat_update()
7133 as->arcstat_uncompressed_size.value.ui64 = in arc_kstat_update()
7135 as->arcstat_overhead_size.value.ui64 = in arc_kstat_update()
7137 as->arcstat_hdr_size.value.ui64 = in arc_kstat_update()
7139 as->arcstat_data_size.value.ui64 = in arc_kstat_update()
7141 as->arcstat_metadata_size.value.ui64 = in arc_kstat_update()
7143 as->arcstat_dbuf_size.value.ui64 = in arc_kstat_update()
7146 as->arcstat_other_size.value.ui64 = in arc_kstat_update()
7153 &as->arcstat_anon_size, in arc_kstat_update()
7154 &as->arcstat_anon_data, in arc_kstat_update()
7155 &as->arcstat_anon_metadata, in arc_kstat_update()
7156 &as->arcstat_anon_evictable_data, in arc_kstat_update()
7157 &as->arcstat_anon_evictable_metadata); in arc_kstat_update()
7159 &as->arcstat_mru_size, in arc_kstat_update()
7160 &as->arcstat_mru_data, in arc_kstat_update()
7161 &as->arcstat_mru_metadata, in arc_kstat_update()
7162 &as->arcstat_mru_evictable_data, in arc_kstat_update()
7163 &as->arcstat_mru_evictable_metadata); in arc_kstat_update()
7165 &as->arcstat_mru_ghost_size, in arc_kstat_update()
7166 &as->arcstat_mru_ghost_data, in arc_kstat_update()
7167 &as->arcstat_mru_ghost_metadata, in arc_kstat_update()
7168 &as->arcstat_mru_ghost_evictable_data, in arc_kstat_update()
7169 &as->arcstat_mru_ghost_evictable_metadata); in arc_kstat_update()
7171 &as->arcstat_mfu_size, in arc_kstat_update()
7172 &as->arcstat_mfu_data, in arc_kstat_update()
7173 &as->arcstat_mfu_metadata, in arc_kstat_update()
7174 &as->arcstat_mfu_evictable_data, in arc_kstat_update()
7175 &as->arcstat_mfu_evictable_metadata); in arc_kstat_update()
7177 &as->arcstat_mfu_ghost_size, in arc_kstat_update()
7178 &as->arcstat_mfu_ghost_data, in arc_kstat_update()
7179 &as->arcstat_mfu_ghost_metadata, in arc_kstat_update()
7180 &as->arcstat_mfu_ghost_evictable_data, in arc_kstat_update()
7181 &as->arcstat_mfu_ghost_evictable_metadata); in arc_kstat_update()
7183 &as->arcstat_uncached_size, in arc_kstat_update()
7184 &as->arcstat_uncached_data, in arc_kstat_update()
7185 &as->arcstat_uncached_metadata, in arc_kstat_update()
7186 &as->arcstat_uncached_evictable_data, in arc_kstat_update()
7187 &as->arcstat_uncached_evictable_metadata); in arc_kstat_update()
7189 as->arcstat_dnode_size.value.ui64 = in arc_kstat_update()
7191 as->arcstat_bonus_size.value.ui64 = in arc_kstat_update()
7193 as->arcstat_l2_hits.value.ui64 = in arc_kstat_update()
7195 as->arcstat_l2_misses.value.ui64 = in arc_kstat_update()
7197 as->arcstat_l2_prefetch_asize.value.ui64 = in arc_kstat_update()
7199 as->arcstat_l2_mru_asize.value.ui64 = in arc_kstat_update()
7201 as->arcstat_l2_mfu_asize.value.ui64 = in arc_kstat_update()
7203 as->arcstat_l2_bufc_data_asize.value.ui64 = in arc_kstat_update()
7205 as->arcstat_l2_bufc_metadata_asize.value.ui64 = in arc_kstat_update()
7207 as->arcstat_l2_feeds.value.ui64 = in arc_kstat_update()
7209 as->arcstat_l2_rw_clash.value.ui64 = in arc_kstat_update()
7211 as->arcstat_l2_read_bytes.value.ui64 = in arc_kstat_update()
7213 as->arcstat_l2_write_bytes.value.ui64 = in arc_kstat_update()
7215 as->arcstat_l2_writes_sent.value.ui64 = in arc_kstat_update()
7217 as->arcstat_l2_writes_done.value.ui64 = in arc_kstat_update()
7219 as->arcstat_l2_writes_error.value.ui64 = in arc_kstat_update()
7221 as->arcstat_l2_writes_lock_retry.value.ui64 = in arc_kstat_update()
7223 as->arcstat_l2_evict_lock_retry.value.ui64 = in arc_kstat_update()
7225 as->arcstat_l2_evict_reading.value.ui64 = in arc_kstat_update()
7227 as->arcstat_l2_evict_l1cached.value.ui64 = in arc_kstat_update()
7229 as->arcstat_l2_free_on_write.value.ui64 = in arc_kstat_update()
7231 as->arcstat_l2_abort_lowmem.value.ui64 = in arc_kstat_update()
7233 as->arcstat_l2_cksum_bad.value.ui64 = in arc_kstat_update()
7235 as->arcstat_l2_io_error.value.ui64 = in arc_kstat_update()
7237 as->arcstat_l2_lsize.value.ui64 = in arc_kstat_update()
7239 as->arcstat_l2_psize.value.ui64 = in arc_kstat_update()
7241 as->arcstat_l2_hdr_size.value.ui64 = in arc_kstat_update()
7243 as->arcstat_l2_log_blk_writes.value.ui64 = in arc_kstat_update()
7245 as->arcstat_l2_log_blk_asize.value.ui64 = in arc_kstat_update()
7247 as->arcstat_l2_log_blk_count.value.ui64 = in arc_kstat_update()
7249 as->arcstat_l2_rebuild_success.value.ui64 = in arc_kstat_update()
7251 as->arcstat_l2_rebuild_abort_unsupported.value.ui64 = in arc_kstat_update()
7253 as->arcstat_l2_rebuild_abort_io_errors.value.ui64 = in arc_kstat_update()
7255 as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 = in arc_kstat_update()
7257 as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 = in arc_kstat_update()
7259 as->arcstat_l2_rebuild_abort_lowmem.value.ui64 = in arc_kstat_update()
7261 as->arcstat_l2_rebuild_size.value.ui64 = in arc_kstat_update()
7263 as->arcstat_l2_rebuild_asize.value.ui64 = in arc_kstat_update()
7265 as->arcstat_l2_rebuild_bufs.value.ui64 = in arc_kstat_update()
7267 as->arcstat_l2_rebuild_bufs_precached.value.ui64 = in arc_kstat_update()
7269 as->arcstat_l2_rebuild_log_blks.value.ui64 = in arc_kstat_update()
7271 as->arcstat_memory_throttle_count.value.ui64 = in arc_kstat_update()
7273 as->arcstat_memory_direct_count.value.ui64 = in arc_kstat_update()
7275 as->arcstat_memory_indirect_count.value.ui64 = in arc_kstat_update()
7278 as->arcstat_memory_all_bytes.value.ui64 = in arc_kstat_update()
7280 as->arcstat_memory_free_bytes.value.ui64 = in arc_kstat_update()
7282 as->arcstat_memory_available_bytes.value.i64 = in arc_kstat_update()
7285 as->arcstat_prune.value.ui64 = in arc_kstat_update()
7287 as->arcstat_meta_used.value.ui64 = in arc_kstat_update()
7289 as->arcstat_async_upgrade_sync.value.ui64 = in arc_kstat_update()
7291 as->arcstat_predictive_prefetch.value.ui64 = in arc_kstat_update()
7293 as->arcstat_demand_hit_predictive_prefetch.value.ui64 = in arc_kstat_update()
7295 as->arcstat_demand_iohit_predictive_prefetch.value.ui64 = in arc_kstat_update()
7297 as->arcstat_prescient_prefetch.value.ui64 = in arc_kstat_update()
7299 as->arcstat_demand_hit_prescient_prefetch.value.ui64 = in arc_kstat_update()
7301 as->arcstat_demand_iohit_prescient_prefetch.value.ui64 = in arc_kstat_update()
7303 as->arcstat_raw_size.value.ui64 = in arc_kstat_update()
7305 as->arcstat_cached_only_in_progress.value.ui64 = in arc_kstat_update()
7307 as->arcstat_abd_chunk_waste_size.value.ui64 = in arc_kstat_update()
7345 return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % in arc_state_multilist_index_func()
7367 * updated manually. Non-zero zfs_* values which differ from the currently set
7375 /* Valid range: 32M - <arc_c_max> */ in arc_tuning_update()
7384 /* Valid range: 64M - <all physical memory> */ in arc_tuning_update()
7395 /* Valid range: 0 - <all physical memory> */ in arc_tuning_update()
7400 /* Valid range: 1 - N */ in arc_tuning_update()
7404 /* Valid range: 1 - N */ in arc_tuning_update()
7407 arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1); in arc_tuning_update()
7410 /* Valid range: 1 - N ms */ in arc_tuning_update()
7414 /* Valid range: 1 - N ms */ in arc_tuning_update()
7420 /* Valid range: 0 - 100 */ in arc_tuning_update()
7426 /* Valid range: 0 - <all physical memory> */ in arc_tuning_update()
7446 arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7448 arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7450 arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7452 arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7454 arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7456 arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7458 arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7460 arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7462 arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7464 arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7471 arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7473 arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7478 * any ARC state. The markers will be pre-allocated so as to minimize in arc_state_init()
7483 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7484 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7485 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7486 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7487 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7488 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7489 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7490 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7491 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7492 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7493 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7494 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7495 zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7496 zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7498 zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7499 zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7500 zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7501 zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7502 zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7503 zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7504 zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7505 zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7506 zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7507 zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7508 zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7509 zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7510 zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7511 zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7513 wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA], 0); in arc_state_init()
7514 wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA], 0); in arc_state_init()
7515 wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA], 0); in arc_state_init()
7516 wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA], 0); in arc_state_init()
7617 arc_anon->arcs_state = ARC_STATE_ANON; in arc_state_init()
7618 arc_mru->arcs_state = ARC_STATE_MRU; in arc_state_init()
7619 arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST; in arc_state_init()
7620 arc_mfu->arcs_state = ARC_STATE_MFU; in arc_state_init()
7621 arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST; in arc_state_init()
7622 arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY; in arc_state_init()
7623 arc_uncached->arcs_state = ARC_STATE_UNCACHED; in arc_state_init()
7629 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7630 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7631 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7632 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7633 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7634 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7635 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7636 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7637 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7638 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7639 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7640 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7641 zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7642 zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7644 zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7645 zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7646 zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7647 zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7648 zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7649 zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7650 zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7651 zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7652 zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7653 zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7654 zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7655 zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7656 zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7657 zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7659 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7660 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7661 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7662 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7663 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7664 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7665 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7666 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7667 multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7668 multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7669 multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7670 multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7672 wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_state_fini()
7673 wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_state_fini()
7674 wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_state_fini()
7675 wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_state_fini()
7811 * If zfs_arc_max is non-zero at init, meaning it was set in the kernel in arc_init()
7837 * 32-bit fixed point fractions of metadata from total ARC size, in arc_init()
7879 arc_ksp->ks_data = &arc_stats; in arc_init()
7880 arc_ksp->ks_update = arc_kstat_update; in arc_init()
7960 (void) zfs_refcount_remove(&p->p_refcnt, &arc_prune_list); in arc_fini()
7961 zfs_refcount_destroy(&p->p_refcnt); in arc_fini()
8008 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
8012 * include short-stroked disks, solid state disks, and other media with
8015 * +-----------------------+
8017 * +-----------------------+
8024 * +---------------+ |
8026 * +---------------+ |
8031 * +-------+ +-------+
8034 * +-------+ +-------+
8035 * +=========+ .-----.
8036 * : L2ARC : |-_____-|
8038 * +=========+ `-_____-'
8058 * It does this by periodically scanning buffers from the eviction-end of
8069 * head --> tail
8070 * +---------------------+----------+
8071 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
8072 * +---------------------+----------+ | o L2ARC eligible
8073 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
8074 * +---------------------+----------+ |
8079 * l2arc write hand <--[oooo]--'
8112 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
8162 * main ARC buffers. There are 2 linked-lists of log blocks headed by
8164 * time-wise and offset-wise interleaved, but that is an optimization rather
8170 * which contains our top-level reference structures. We update it each
8183 * ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
8185 * |+------+ ...--\-------/ \-----/--\------/ / |
8186 * | \--------------/ \--------------/ |
8195 * incurring a large amount of I/O round-trip latency. Having two lists
8199 * On-device data structures:
8215 * <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
8218 * <<nextwrite>> may overwrite this blk and/or its bufs --'
8235 * birth TXG uniquely identify a block in space and time - once created,
8257 if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || in l2arc_write_eligible()
8285 if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) { in l2arc_write_size()
8299 size = MIN(size, (dev->l2ad_end - dev->l2ad_start) / 4); in l2arc_write_size()
8301 size = P2ROUNDUP(size, 1ULL << dev->l2ad_vdev->vdev_ashift); in l2arc_write_size()
8313 * If the ARC lists are busy, increase our write rate; if the in l2arc_write_interval()
8315 * how much we previously wrote - if it was more than half of in l2arc_write_interval()
8336 return (dev->l2ad_vdev == NULL || vdev_is_dead(dev->l2ad_vdev) || in l2arc_dev_invalid()
8337 dev->l2ad_rebuild || dev->l2ad_trim_all || in l2arc_dev_invalid()
8338 dev->l2ad_spa == NULL || dev->l2ad_spa->spa_is_exporting); in l2arc_dev_invalid()
8365 /* loop around the list looking for a non-faulted vdev */ in l2arc_dev_get_next()
8397 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); in l2arc_dev_get_next()
8413 ASSERT3P(df->l2df_abd, !=, NULL); in l2arc_do_free_on_write()
8414 abd_free(df->l2df_abd); in l2arc_do_free_on_write()
8437 cb = zio->io_private; in l2arc_write_done()
8439 dev = cb->l2wcb_dev; in l2arc_write_done()
8440 l2dhdr = dev->l2ad_dev_hdr; in l2arc_write_done()
8442 head = cb->l2wcb_head; in l2arc_write_done()
8444 buflist = &dev->l2ad_buflist; in l2arc_write_done()
8453 mutex_enter(&dev->l2ad_mtx); in l2arc_write_done()
8480 mutex_exit(&dev->l2ad_mtx); in l2arc_write_done()
8495 * state while in-flight due to our ARC_FLAG_L2_WRITING in l2arc_write_done()
8501 * Skipped - drop L2ARC entry and mark the header as no in l2arc_write_done()
8504 if (zio->io_error != 0) { in l2arc_write_done()
8506 * Error - drop L2ARC entry. in l2arc_write_done()
8514 ASSERT(dev->l2ad_vdev != NULL); in l2arc_write_done()
8517 vdev_psize_to_asize(dev->l2ad_vdev, psize); in l2arc_write_done()
8518 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, in l2arc_write_done()
8537 while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) { in l2arc_write_done()
8538 abd_free(abd_buf->abd); in l2arc_write_done()
8540 if (zio->io_error != 0) { in l2arc_write_done()
8541 lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list); in l2arc_write_done()
8547 L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop); in l2arc_write_done()
8549 ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); in l2arc_write_done()
8551 zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, in l2arc_write_done()
8553 (void) zfs_refcount_remove(&dev->l2ad_lb_count, in l2arc_write_done()
8555 kmem_free(lb_ptr_buf->lb_ptr, in l2arc_write_done()
8560 list_destroy(&cb->l2wcb_abd_list); in l2arc_write_done()
8562 if (zio->io_error != 0) { in l2arc_write_done()
8570 lb_ptr_buf = list_head(&dev->l2ad_lbptr_list); in l2arc_write_done()
8580 dev->l2ad_dev_hdr_asize); in l2arc_write_done()
8582 memset(&l2dhdr->dh_start_lbps[i], 0, in l2arc_write_done()
8587 memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr, in l2arc_write_done()
8589 lb_ptr_buf = list_next(&dev->l2ad_lbptr_list, in l2arc_write_done()
8598 mutex_exit(&dev->l2ad_mtx); in l2arc_write_done()
8600 ASSERT(dev->l2ad_vdev != NULL); in l2arc_write_done()
8601 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); in l2arc_write_done()
8612 spa_t *spa = zio->io_spa; in l2arc_untransform()
8613 arc_buf_hdr_t *hdr = cb->l2rcb_hdr; in l2arc_untransform()
8614 blkptr_t *bp = zio->io_bp; in l2arc_untransform()
8626 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in l2arc_untransform()
8641 ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb, in l2arc_untransform()
8644 hdr->b_l1hdr.b_pabd, &no_crypt); in l2arc_untransform()
8656 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in l2arc_untransform()
8658 hdr->b_l1hdr.b_pabd = eabd; in l2arc_untransform()
8659 zio->io_abd = eabd; in l2arc_untransform()
8676 hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr), in l2arc_untransform()
8677 HDR_GET_LSIZE(hdr), &hdr->b_complevel); in l2arc_untransform()
8683 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in l2arc_untransform()
8685 hdr->b_l1hdr.b_pabd = cabd; in l2arc_untransform()
8686 zio->io_abd = cabd; in l2arc_untransform()
8687 zio->io_size = HDR_GET_LSIZE(hdr); in l2arc_untransform()
8705 l2arc_read_callback_t *cb = zio->io_private; in l2arc_read_done()
8709 boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) && in l2arc_read_done()
8710 (cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)); in l2arc_read_done()
8712 ASSERT3P(zio->io_vd, !=, NULL); in l2arc_read_done()
8713 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); in l2arc_read_done()
8715 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); in l2arc_read_done()
8718 hdr = cb->l2rcb_hdr; in l2arc_read_done()
8729 if (cb->l2rcb_abd != NULL) { in l2arc_read_done()
8730 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); in l2arc_read_done()
8731 if (zio->io_error == 0) { in l2arc_read_done()
8733 abd_copy(hdr->b_crypt_hdr.b_rabd, in l2arc_read_done()
8734 cb->l2rcb_abd, arc_hdr_size(hdr)); in l2arc_read_done()
8736 abd_copy(hdr->b_l1hdr.b_pabd, in l2arc_read_done()
8737 cb->l2rcb_abd, arc_hdr_size(hdr)); in l2arc_read_done()
8744 * - free the temporary buffer in l2arc_read_done()
8745 * - point zio to the real ARC buffer in l2arc_read_done()
8746 * - set zio size accordingly in l2arc_read_done()
8747 * These are required because zio is either re-used for in l2arc_read_done()
8752 abd_free(cb->l2rcb_abd); in l2arc_read_done()
8753 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); in l2arc_read_done()
8757 zio->io_abd = zio->io_orig_abd = in l2arc_read_done()
8758 hdr->b_crypt_hdr.b_rabd; in l2arc_read_done()
8760 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in l2arc_read_done()
8761 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; in l2arc_read_done()
8765 ASSERT3P(zio->io_abd, !=, NULL); in l2arc_read_done()
8770 ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd || in l2arc_read_done()
8771 (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd)); in l2arc_read_done()
8772 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ in l2arc_read_done()
8773 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ in l2arc_read_done()
8774 zio->io_prop.zp_complevel = hdr->b_complevel; in l2arc_read_done()
8786 if (valid_cksum && tfm_error == 0 && zio->io_error == 0 && in l2arc_read_done()
8789 zio->io_private = hdr; in l2arc_read_done()
8796 if (zio->io_error != 0) { in l2arc_read_done()
8799 zio->io_error = SET_ERROR(EIO); in l2arc_read_done()
8809 if (zio->io_waiter == NULL) { in l2arc_read_done()
8812 hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd; in l2arc_read_done()
8814 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); in l2arc_read_done()
8816 zio = zio_read(pio, zio->io_spa, zio->io_bp, in l2arc_read_done()
8817 abd, zio->io_size, arc_read_done, in l2arc_read_done()
8818 hdr, zio->io_priority, cb->l2rcb_flags, in l2arc_read_done()
8819 &cb->l2rcb_zb); in l2arc_read_done()
8826 for (struct arc_callback *acb = hdr->b_l1hdr.b_acb; in l2arc_read_done()
8827 acb != NULL; acb = acb->acb_next) in l2arc_read_done()
8828 acb->acb_zio_head = zio; in l2arc_read_done()
8860 ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; in l2arc_sublist_lock()
8863 ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; in l2arc_sublist_lock()
8866 ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; in l2arc_sublist_lock()
8869 ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; in l2arc_sublist_lock()
8876 * Return a randomly-selected sublist. This is acceptable in l2arc_sublist_lock()
8894 if (dev->l2ad_log_entries == 0) { in l2arc_log_blk_overhead()
8897 ASSERT(dev->l2ad_vdev != NULL); in l2arc_log_blk_overhead()
8902 dev->l2ad_log_entries - 1) / in l2arc_log_blk_overhead()
8903 dev->l2ad_log_entries; in l2arc_log_blk_overhead()
8905 return (vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_overhead()
8924 vdev_t *vd = dev->l2ad_vdev; in l2arc_evict()
8928 ASSERT(dev->l2ad_spa != NULL || all); in l2arc_evict()
8930 buflist = &dev->l2ad_buflist; in l2arc_evict()
8934 if (dev->l2ad_hand + distance > dev->l2ad_end) { in l2arc_evict()
8944 taddr = dev->l2ad_end; in l2arc_evict()
8946 taddr = dev->l2ad_hand + distance; in l2arc_evict()
8956 if (dev->l2ad_first) { in l2arc_evict()
8967 if (vd->vdev_has_trim && dev->l2ad_evict < taddr && in l2arc_evict()
8977 spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev); in l2arc_evict()
8979 dev->l2ad_evict - VDEV_LABEL_START_SIZE, in l2arc_evict()
8980 taddr - dev->l2ad_evict); in l2arc_evict()
8981 spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev, in l2arc_evict()
8993 dev->l2ad_evict = MAX(dev->l2ad_evict, taddr); in l2arc_evict()
8998 mutex_enter(&dev->l2ad_mtx); in l2arc_evict()
9005 for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf; in l2arc_evict()
9008 lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_evict()
9012 (lb_ptr_buf->lb_ptr)->lbp_prop); in l2arc_evict()
9019 if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) { in l2arc_evict()
9023 vdev_space_update(vd, -asize, 0, 0); in l2arc_evict()
9024 ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); in l2arc_evict()
9026 zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, in l2arc_evict()
9028 (void) zfs_refcount_remove(&dev->l2ad_lb_count, in l2arc_evict()
9030 list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_evict()
9031 kmem_free(lb_ptr_buf->lb_ptr, in l2arc_evict()
9053 mutex_exit(&dev->l2ad_mtx); in l2arc_evict()
9068 if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict || in l2arc_evict()
9069 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { in l2arc_evict()
9088 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); in l2arc_evict()
9104 mutex_exit(&dev->l2ad_mtx); in l2arc_evict()
9116 dev->l2ad_hand = dev->l2ad_start; in l2arc_evict()
9117 dev->l2ad_evict = dev->l2ad_start; in l2arc_evict()
9118 dev->l2ad_first = B_FALSE; in l2arc_evict()
9128 ASSERT3U(dev->l2ad_hand + distance, <=, dev->l2ad_end); in l2arc_evict()
9129 if (!dev->l2ad_first) in l2arc_evict()
9130 ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict); in l2arc_evict()
9144 abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd; in l2arc_apply_transforms()
9149 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in l2arc_apply_transforms()
9167 abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize); in l2arc_apply_transforms()
9168 abd_zero_off(to_write, psize, asize - psize); in l2arc_apply_transforms()
9176 abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); in l2arc_apply_transforms()
9178 abd_zero_off(to_write, size, asize - size); in l2arc_apply_transforms()
9185 size, MIN(size, psize), hdr->b_complevel); in l2arc_apply_transforms()
9188 * We can't re-compress the block into the original in l2arc_apply_transforms()
9196 abd_zero_off(cabd, csize, asize - csize); in l2arc_apply_transforms()
9205 * made it to this point, the key to re-encrypt in l2arc_apply_transforms()
9209 ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj, in l2arc_apply_transforms()
9214 ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key, in l2arc_apply_transforms()
9215 hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt, in l2arc_apply_transforms()
9216 hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd, in l2arc_apply_transforms()
9225 abd_zero_off(eabd, psize, asize - psize); in l2arc_apply_transforms()
9228 ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN)); in l2arc_apply_transforms()
9238 ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd); in l2arc_apply_transforms()
9259 cb = zio->io_private; in l2arc_blk_fetch_done()
9260 if (cb->l2rcb_abd != NULL) in l2arc_blk_fetch_done()
9261 abd_free(cb->l2rcb_abd); in l2arc_blk_fetch_done()
9270 * The headroom_boost is an in-out parameter used to maintain headroom boost
9286 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_write_buffers()
9288 ASSERT3P(dev->l2ad_vdev, !=, NULL); in l2arc_write_buffers()
9363 ASSERT(hdr->b_l1hdr.b_pabd != NULL || in l2arc_write_buffers()
9366 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_write_buffers()
9410 to_write = hdr->b_crypt_hdr.b_rabd; in l2arc_write_buffers()
9415 to_write = hdr->b_l1hdr.b_pabd; in l2arc_write_buffers()
9432 hdr->b_l2hdr.b_dev = dev; in l2arc_write_buffers()
9433 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; in l2arc_write_buffers()
9434 hdr->b_l2hdr.b_hits = 0; in l2arc_write_buffers()
9435 hdr->b_l2hdr.b_arcs_state = in l2arc_write_buffers()
9436 hdr->b_l1hdr.b_state->arcs_state; in l2arc_write_buffers()
9442 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in l2arc_write_buffers()
9445 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_write_buffers()
9447 mutex_enter(&dev->l2ad_mtx); in l2arc_write_buffers()
9454 list_insert_head(&dev->l2ad_buflist, head); in l2arc_write_buffers()
9456 list_insert_head(&dev->l2ad_buflist, hdr); in l2arc_write_buffers()
9457 mutex_exit(&dev->l2ad_mtx); in l2arc_write_buffers()
9465 cb->l2wcb_dev = dev; in l2arc_write_buffers()
9466 cb->l2wcb_head = head; in l2arc_write_buffers()
9467 list_create(&cb->l2wcb_abd_list, in l2arc_write_buffers()
9474 wzio = zio_write_phys(pio, dev->l2ad_vdev, in l2arc_write_buffers()
9475 dev->l2ad_hand, asize, to_write, in l2arc_write_buffers()
9480 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, in l2arc_write_buffers()
9486 dev->l2ad_hand += asize; in l2arc_write_buffers()
9521 if (dev->l2ad_evict != l2dhdr->dh_evict) in l2arc_write_buffers()
9527 if (!dev->l2ad_first) in l2arc_write_buffers()
9528 ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict); in l2arc_write_buffers()
9534 dev->l2ad_writing = B_TRUE; in l2arc_write_buffers()
9536 dev->l2ad_writing = B_FALSE; in l2arc_write_buffers()
9559 * heart of the L2ARC.
9597 * doing so the next spa to feed from: dev->l2ad_spa. This in l2arc_feed_thread()
9608 spa = dev->l2ad_spa; in l2arc_feed_thread()
9612 * If the pool is read-only then force the feed thread to in l2arc_feed_thread()
9676 if (dev->l2ad_vdev == vd) in l2arc_vdev_get()
9687 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_rebuild_dev()
9688 uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_rebuild_dev()
9689 spa_t *spa = dev->l2ad_spa; in l2arc_rebuild_dev()
9707 if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) { in l2arc_rebuild_dev()
9708 dev->l2ad_log_entries = 0; in l2arc_rebuild_dev()
9710 dev->l2ad_log_entries = MIN((dev->l2ad_end - in l2arc_rebuild_dev()
9711 dev->l2ad_start) >> SPA_MAXBLOCKSHIFT, in l2arc_rebuild_dev()
9718 if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) { in l2arc_rebuild_dev()
9732 dev->l2ad_log_ent_idx = 0; in l2arc_rebuild_dev()
9733 dev->l2ad_log_blk_payload_asize = 0; in l2arc_rebuild_dev()
9734 dev->l2ad_log_blk_payload_start = 0; in l2arc_rebuild_dev()
9743 dev->l2ad_rebuild = B_TRUE; in l2arc_rebuild_dev()
9756 dev->l2ad_trim_all = B_TRUE; in l2arc_rebuild_dev()
9780 adddev->l2ad_spa = spa; in l2arc_add_vdev()
9781 adddev->l2ad_vdev = vd; in l2arc_add_vdev()
9783 l2dhdr_asize = adddev->l2ad_dev_hdr_asize = in l2arc_add_vdev()
9784 MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift); in l2arc_add_vdev()
9785 adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize; in l2arc_add_vdev()
9786 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); in l2arc_add_vdev()
9787 ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end); in l2arc_add_vdev()
9788 adddev->l2ad_hand = adddev->l2ad_start; in l2arc_add_vdev()
9789 adddev->l2ad_evict = adddev->l2ad_start; in l2arc_add_vdev()
9790 adddev->l2ad_first = B_TRUE; in l2arc_add_vdev()
9791 adddev->l2ad_writing = B_FALSE; in l2arc_add_vdev()
9792 adddev->l2ad_trim_all = B_FALSE; in l2arc_add_vdev()
9793 list_link_init(&adddev->l2ad_node); in l2arc_add_vdev()
9794 adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP); in l2arc_add_vdev()
9796 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); in l2arc_add_vdev()
9801 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), in l2arc_add_vdev()
9808 list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t), in l2arc_add_vdev()
9811 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); in l2arc_add_vdev()
9812 zfs_refcount_create(&adddev->l2ad_alloc); in l2arc_add_vdev()
9813 zfs_refcount_create(&adddev->l2ad_lb_asize); in l2arc_add_vdev()
9814 zfs_refcount_create(&adddev->l2ad_lb_count); in l2arc_add_vdev()
9853 * When onlining the cache device (ie offline->online without exporting in l2arc_rebuild_vdev()
9855 * vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev() in l2arc_rebuild_vdev()
9876 l2arc_dev_t *remdev = rva->rva_l2arc_dev; in l2arc_device_teardown()
9883 list_destroy(&remdev->l2ad_buflist); in l2arc_device_teardown()
9884 ASSERT(list_is_empty(&remdev->l2ad_lbptr_list)); in l2arc_device_teardown()
9885 list_destroy(&remdev->l2ad_lbptr_list); in l2arc_device_teardown()
9886 mutex_destroy(&remdev->l2ad_mtx); in l2arc_device_teardown()
9887 zfs_refcount_destroy(&remdev->l2ad_alloc); in l2arc_device_teardown()
9888 zfs_refcount_destroy(&remdev->l2ad_lb_asize); in l2arc_device_teardown()
9889 zfs_refcount_destroy(&remdev->l2ad_lb_count); in l2arc_device_teardown()
9890 kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize); in l2arc_device_teardown()
9893 uint64_t elaspsed = NSEC2MSEC(gethrtime() - start_time); in l2arc_device_teardown()
9896 (u_longlong_t)rva->rva_spa_gid, in l2arc_device_teardown()
9897 (u_longlong_t)rva->rva_vdev_gid, in l2arc_device_teardown()
9901 if (rva->rva_async) in l2arc_device_teardown()
9902 arc_async_flush_remove(rva->rva_spa_gid, 2); in l2arc_device_teardown()
9912 spa_t *spa = vd->vdev_spa; in l2arc_remove_vdev()
9913 boolean_t asynchronous = spa->spa_state == POOL_STATE_EXPORTED || in l2arc_remove_vdev()
9914 spa->spa_state == POOL_STATE_DESTROYED; in l2arc_remove_vdev()
9927 rva->rva_l2arc_dev = remdev; in l2arc_remove_vdev()
9928 rva->rva_spa_gid = spa_load_guid(spa); in l2arc_remove_vdev()
9929 rva->rva_vdev_gid = remdev->l2ad_vdev->vdev_guid; in l2arc_remove_vdev()
9935 remdev->l2ad_rebuild_cancel = B_TRUE; in l2arc_remove_vdev()
9936 if (remdev->l2ad_rebuild_began == B_TRUE) { in l2arc_remove_vdev()
9937 while (remdev->l2ad_rebuild == B_TRUE) in l2arc_remove_vdev()
9941 rva->rva_async = asynchronous; in l2arc_remove_vdev()
9954 remdev->l2ad_spa = NULL; in l2arc_remove_vdev()
9955 remdev->l2ad_vdev = NULL; in l2arc_remove_vdev()
9964 arc_async_flush_t *af = arc_async_flush_add(rva->rva_spa_gid, 2); in l2arc_remove_vdev()
9967 TQ_SLEEP, &af->af_tqent); in l2arc_remove_vdev()
10043 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_start()
10045 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_start()
10051 if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) { in l2arc_spa_rebuild_start()
10052 dev->l2ad_rebuild_began = B_TRUE; in l2arc_spa_rebuild_start()
10064 spa->spa_export_thread == curthread); in l2arc_spa_rebuild_stop()
10066 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_stop()
10068 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_stop()
10072 dev->l2ad_rebuild_cancel = B_TRUE; in l2arc_spa_rebuild_stop()
10075 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_stop()
10077 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_stop()
10081 if (dev->l2ad_rebuild_began == B_TRUE) { in l2arc_spa_rebuild_stop()
10082 while (dev->l2ad_rebuild == B_TRUE) { in l2arc_spa_rebuild_stop()
10099 VERIFY(dev->l2ad_rebuild); in l2arc_dev_rebuild_thread()
10102 dev->l2ad_rebuild_began = B_FALSE; in l2arc_dev_rebuild_thread()
10103 dev->l2ad_rebuild = B_FALSE; in l2arc_dev_rebuild_thread()
10123 vdev_t *vd = dev->l2ad_vdev; in l2arc_rebuild()
10124 spa_t *spa = vd->vdev_spa; in l2arc_rebuild()
10126 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_rebuild()
10139 * that a spa_unload or device remove can be initiated - this is in l2arc_rebuild()
10150 dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start); in l2arc_rebuild()
10151 dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr + in l2arc_rebuild()
10152 L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop), in l2arc_rebuild()
10153 dev->l2ad_start); in l2arc_rebuild()
10154 dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST); in l2arc_rebuild()
10156 vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time; in l2arc_rebuild()
10157 vd->vdev_trim_state = l2dhdr->dh_trim_state; in l2arc_rebuild()
10167 memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps)); in l2arc_rebuild()
10184 * online the L2ARC dev at a later time (or re-import the pool) in l2arc_rebuild()
10203 uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop); in l2arc_rebuild()
10211 lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), in l2arc_rebuild()
10213 memcpy(lb_ptr_buf->lb_ptr, &lbps[0], in l2arc_rebuild()
10215 mutex_enter(&dev->l2ad_mtx); in l2arc_rebuild()
10216 list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_rebuild()
10219 zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); in l2arc_rebuild()
10220 zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); in l2arc_rebuild()
10221 mutex_exit(&dev->l2ad_mtx); in l2arc_rebuild()
10230 * -----|||----|||---|||----||| in l2arc_rebuild()
10232 * ---|||---|||----|||---||| in l2arc_rebuild()
10238 * (0). Only log blocks (0)-(3) should be restored. We check in l2arc_rebuild()
10247 lbps[0].lbp_payload_start, dev->l2ad_evict) && in l2arc_rebuild()
10248 !dev->l2ad_first) in l2arc_rebuild()
10254 if (dev->l2ad_rebuild_cancel) { in l2arc_rebuild()
10279 lbps[1] = this_lb->lb_prev_lbp; in l2arc_rebuild()
10299 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10304 } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) { in l2arc_rebuild()
10308 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10309 } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) { in l2arc_rebuild()
10312 * in the device header points to invalid/non-present log in l2arc_rebuild()
10317 memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize); in l2arc_rebuild()
10322 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10341 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_dev_hdr_read()
10342 const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_dev_hdr_read()
10345 guid = spa_guid(dev->l2ad_vdev->vdev_spa); in l2arc_dev_hdr_read()
10349 err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev, in l2arc_dev_hdr_read()
10361 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_dev_hdr_read()
10365 if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC)) in l2arc_dev_hdr_read()
10368 if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC || in l2arc_dev_hdr_read()
10369 l2dhdr->dh_spa_guid != guid || in l2arc_dev_hdr_read()
10370 l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid || in l2arc_dev_hdr_read()
10371 l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION || in l2arc_dev_hdr_read()
10372 l2dhdr->dh_log_entries != dev->l2ad_log_entries || in l2arc_dev_hdr_read()
10373 l2dhdr->dh_end != dev->l2ad_end || in l2arc_dev_hdr_read()
10374 !l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end, in l2arc_dev_hdr_read()
10375 l2dhdr->dh_evict) || in l2arc_dev_hdr_read()
10376 (l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE && in l2arc_dev_hdr_read()
10436 this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp, in l2arc_log_blk_read()
10445 * Start issuing IO for the next log block early - this in l2arc_log_blk_read()
10449 *next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp, in l2arc_log_blk_read()
10458 (u_longlong_t)this_lbp->lbp_daddr, in l2arc_log_blk_read()
10459 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_log_blk_read()
10467 asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop); in l2arc_log_blk_read()
10469 if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) { in l2arc_log_blk_read()
10473 (u_longlong_t)this_lbp->lbp_daddr, in l2arc_log_blk_read()
10474 (u_longlong_t)dev->l2ad_vdev->vdev_guid, in l2arc_log_blk_read()
10475 (u_longlong_t)dev->l2ad_hand, in l2arc_log_blk_read()
10476 (u_longlong_t)dev->l2ad_evict); in l2arc_log_blk_read()
10482 switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) { in l2arc_log_blk_read()
10491 L2BLK_GET_COMPRESS((this_lbp)->lbp_prop), in l2arc_log_blk_read()
10505 if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC)) in l2arc_log_blk_read()
10507 if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) { in l2arc_log_blk_read()
10512 /* Abort an in-flight fetch I/O in case of error */ in l2arc_log_blk_read()
10531 uint64_t log_entries = dev->l2ad_log_entries; in l2arc_log_blk_restore()
10540 for (int i = log_entries - 1; i >= 0; i--) { in l2arc_log_blk_restore()
10548 * HEAD <------ (time) ------ TAIL in l2arc_log_blk_restore()
10549 * direction +-----+-----+-----+-----+-----+ direction in l2arc_log_blk_restore()
10551 * fill +-----+-----+-----+-----+-----+ in l2arc_log_blk_restore()
10559 * l2arc_feed_thread() as dev->l2ad_rebuild is set to true. in l2arc_log_blk_restore()
10561 size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop); in l2arc_log_blk_restore()
10562 asize += vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_restore()
10563 L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop)); in l2arc_log_blk_restore()
10564 l2arc_hdr_restore(&lb->lb_entries[i], dev); in l2arc_log_blk_restore()
10589 arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop); in l2arc_hdr_restore()
10590 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_hdr_restore()
10591 L2BLK_GET_PSIZE((le)->le_prop)); in l2arc_hdr_restore()
10598 hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type, in l2arc_hdr_restore()
10599 dev, le->le_dva, le->le_daddr, in l2arc_hdr_restore()
10600 L2BLK_GET_PSIZE((le)->le_prop), asize, le->le_birth, in l2arc_hdr_restore()
10601 L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel, in l2arc_hdr_restore()
10602 L2BLK_GET_PROTECTED((le)->le_prop), in l2arc_hdr_restore()
10603 L2BLK_GET_PREFETCH((le)->le_prop), in l2arc_hdr_restore()
10604 L2BLK_GET_STATE((le)->le_prop)); in l2arc_hdr_restore()
10611 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_hdr_restore()
10613 mutex_enter(&dev->l2ad_mtx); in l2arc_hdr_restore()
10614 list_insert_tail(&dev->l2ad_buflist, hdr); in l2arc_hdr_restore()
10615 (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); in l2arc_hdr_restore()
10616 mutex_exit(&dev->l2ad_mtx); in l2arc_hdr_restore()
10630 exists->b_l2hdr.b_dev = dev; in l2arc_hdr_restore()
10631 exists->b_l2hdr.b_daddr = le->le_daddr; in l2arc_hdr_restore()
10632 exists->b_l2hdr.b_arcs_state = in l2arc_hdr_restore()
10633 L2BLK_GET_STATE((le)->le_prop); in l2arc_hdr_restore()
10636 mutex_enter(&dev->l2ad_mtx); in l2arc_hdr_restore()
10637 list_insert_tail(&dev->l2ad_buflist, exists); in l2arc_hdr_restore()
10638 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in l2arc_hdr_restore()
10640 mutex_exit(&dev->l2ad_mtx); in l2arc_hdr_restore()
10642 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_hdr_restore()
10670 asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); in l2arc_log_blk_fetch()
10674 cb->l2rcb_abd = abd_get_from_buf(lb, asize); in l2arc_log_blk_fetch()
10675 pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb, in l2arc_log_blk_fetch()
10677 (void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize, in l2arc_log_blk_fetch()
10678 cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL, in l2arc_log_blk_fetch()
10701 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_dev_hdr_update()
10702 const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_dev_hdr_update()
10706 VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER)); in l2arc_dev_hdr_update()
10708 l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC; in l2arc_dev_hdr_update()
10709 l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION; in l2arc_dev_hdr_update()
10710 l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa); in l2arc_dev_hdr_update()
10711 l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid; in l2arc_dev_hdr_update()
10712 l2dhdr->dh_log_entries = dev->l2ad_log_entries; in l2arc_dev_hdr_update()
10713 l2dhdr->dh_evict = dev->l2ad_evict; in l2arc_dev_hdr_update()
10714 l2dhdr->dh_start = dev->l2ad_start; in l2arc_dev_hdr_update()
10715 l2dhdr->dh_end = dev->l2ad_end; in l2arc_dev_hdr_update()
10716 l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize); in l2arc_dev_hdr_update()
10717 l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count); in l2arc_dev_hdr_update()
10718 l2dhdr->dh_flags = 0; in l2arc_dev_hdr_update()
10719 l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time; in l2arc_dev_hdr_update()
10720 l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state; in l2arc_dev_hdr_update()
10721 if (dev->l2ad_first) in l2arc_dev_hdr_update()
10722 l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST; in l2arc_dev_hdr_update()
10726 err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev, in l2arc_dev_hdr_update()
10735 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_dev_hdr_update()
10748 l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; in l2arc_log_blk_commit()
10749 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_log_blk_commit()
10756 VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries); in l2arc_log_blk_commit()
10759 abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb)); in l2arc_log_blk_commit()
10761 lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP); in l2arc_log_blk_commit()
10764 lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1]; in l2arc_log_blk_commit()
10765 lb->lb_magic = L2ARC_LOG_BLK_MAGIC; in l2arc_log_blk_commit()
10772 list_insert_tail(&cb->l2wcb_abd_list, abd_buf); in l2arc_log_blk_commit()
10776 abd_buf->abd, &abd, sizeof (*lb), in l2arc_log_blk_commit()
10778 dev->l2ad_vdev->vdev_ashift, in l2arc_log_blk_commit()
10779 dev->l2ad_vdev->vdev_ashift, sizeof (*lb)), 0); in l2arc_log_blk_commit()
10783 asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); in l2arc_log_blk_commit()
10790 l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0]; in l2arc_log_blk_commit()
10791 l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand; in l2arc_log_blk_commit()
10792 l2dhdr->dh_start_lbps[0].lbp_payload_asize = in l2arc_log_blk_commit()
10793 dev->l2ad_log_blk_payload_asize; in l2arc_log_blk_commit()
10794 l2dhdr->dh_start_lbps[0].lbp_payload_start = in l2arc_log_blk_commit()
10795 dev->l2ad_log_blk_payload_start; in l2arc_log_blk_commit()
10797 (&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb)); in l2arc_log_blk_commit()
10799 (&l2dhdr->dh_start_lbps[0])->lbp_prop, asize); in l2arc_log_blk_commit()
10801 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10805 abd_zero_off(abd, psize, asize - psize); in l2arc_log_blk_commit()
10807 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10813 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10819 &l2dhdr->dh_start_lbps[0].lbp_cksum); in l2arc_log_blk_commit()
10821 abd_free(abd_buf->abd); in l2arc_log_blk_commit()
10824 abd_buf->abd = abd; in l2arc_log_blk_commit()
10825 wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand, in l2arc_log_blk_commit()
10826 asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL, in l2arc_log_blk_commit()
10828 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio); in l2arc_log_blk_commit()
10831 dev->l2ad_hand += asize; in l2arc_log_blk_commit()
10832 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_log_blk_commit()
10838 memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0], in l2arc_log_blk_commit()
10840 mutex_enter(&dev->l2ad_mtx); in l2arc_log_blk_commit()
10841 list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_log_blk_commit()
10844 zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); in l2arc_log_blk_commit()
10845 zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); in l2arc_log_blk_commit()
10846 mutex_exit(&dev->l2ad_mtx); in l2arc_log_blk_commit()
10853 dev->l2ad_log_blk_payload_asize / asize); in l2arc_log_blk_commit()
10856 dev->l2ad_log_ent_idx = 0; in l2arc_log_blk_commit()
10857 dev->l2ad_log_blk_payload_asize = 0; in l2arc_log_blk_commit()
10858 dev->l2ad_log_blk_payload_start = 0; in l2arc_log_blk_commit()
10871 uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); in l2arc_log_blkptr_valid()
10872 uint64_t end = lbp->lbp_daddr + asize - 1; in l2arc_log_blkptr_valid()
10873 uint64_t start = lbp->lbp_payload_start; in l2arc_log_blkptr_valid()
10878 * - it fits entirely (including its payload) between l2ad_start and in l2arc_log_blkptr_valid()
10880 * - it has a valid size in l2arc_log_blkptr_valid()
10881 * - neither the log block itself nor part of its payload was evicted in l2arc_log_blkptr_valid()
10890 * --------------------------|||| in l2arc_log_blkptr_valid()
10897 l2arc_range_check_overlap(start, end, dev->l2ad_hand) || in l2arc_log_blkptr_valid()
10898 l2arc_range_check_overlap(start, end, dev->l2ad_evict) || in l2arc_log_blkptr_valid()
10899 l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) || in l2arc_log_blkptr_valid()
10900 l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end); in l2arc_log_blkptr_valid()
10902 return (start >= dev->l2ad_start && end <= dev->l2ad_end && in l2arc_log_blkptr_valid()
10904 (!evicted || dev->l2ad_first)); in l2arc_log_blkptr_valid()
10916 l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; in l2arc_log_blk_insert()
10919 if (dev->l2ad_log_entries == 0) in l2arc_log_blk_insert()
10922 int index = dev->l2ad_log_ent_idx++; in l2arc_log_blk_insert()
10924 ASSERT3S(index, <, dev->l2ad_log_entries); in l2arc_log_blk_insert()
10927 le = &lb->lb_entries[index]; in l2arc_log_blk_insert()
10929 le->le_dva = hdr->b_dva; in l2arc_log_blk_insert()
10930 le->le_birth = hdr->b_birth; in l2arc_log_blk_insert()
10931 le->le_daddr = hdr->b_l2hdr.b_daddr; in l2arc_log_blk_insert()
10933 dev->l2ad_log_blk_payload_start = le->le_daddr; in l2arc_log_blk_insert()
10934 L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr)); in l2arc_log_blk_insert()
10935 L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr)); in l2arc_log_blk_insert()
10936 L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr)); in l2arc_log_blk_insert()
10937 le->le_complevel = hdr->b_complevel; in l2arc_log_blk_insert()
10938 L2BLK_SET_TYPE((le)->le_prop, hdr->b_type); in l2arc_log_blk_insert()
10939 L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr))); in l2arc_log_blk_insert()
10940 L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr))); in l2arc_log_blk_insert()
10941 L2BLK_SET_STATE((le)->le_prop, hdr->b_l2hdr.b_arcs_state); in l2arc_log_blk_insert()
10943 dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_insert()
10946 return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries); in l2arc_log_blk_insert()
10950 * Checks whether a given L2ARC device address sits in a time-sequential
10954 * bottom -- Lower end of the range to check (written to earlier).
10955 * top -- Upper end of the range to check (written to later).
10956 * check -- The address for which we want to determine if it sits in
10959 * The 3-way conditional below represents the following cases:
10962 * <check>--------+-------------------+
10965 * |---------------<bottom>============<top>--------------|
10967 * bottom > top: Looped-around case:
10968 * <check>--------+------------------+
10971 * |===============<top>---------------<bottom>===========|
10974 * +---------------+---------<check>
11059 "Percent of ARC size allowed for L2ARC-only headers");