Lines Matching +full:lbp +full:- +full:enable
9 * or https://opensource.org/licenses/CDDL-1.0.
39 * DVA-based Adjustable Replacement Cache
42 * based on the self-tuning, low overhead replacement cache
51 * subset of the blocks in the cache are un-evictable because we
79 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
123 * - L2ARC buflist creation
124 * - L2ARC buflist eviction
125 * - L2ARC write completion, which walks L2ARC buflists
126 * - ARC header destruction, as it removes from L2ARC buflists
127 * - ARC header release, as it removes from L2ARC buflists
139 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
146 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
151 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
152 * it will match its on-disk compression characteristics. This behavior can be
155 * uncompressed version of the on-disk data.
181 * +-----------+
184 * | L1- and |
186 * +-----------+
189 * +-----------+
192 * | b_buf +------------>+-----------+ arc_buf_t
193 * | b_pabd +-+ |b_next +---->+-----------+
194 * +-----------+ | |-----------| |b_next +-->NULL
195 * | |b_comp = T | +-----------+
196 * | |b_data +-+ |b_comp = F |
197 * | +-----------+ | |b_data +-+
198 * +->+------+ | +-----------+ |
200 * data | |<--------------+ | uncompressed
201 * +------+ compressed, | data
202 * shared +-->+------+
205 * +------+
223 * +-----------+
227 * +-----------+
230 * +-----------+
233 * | b_buf +------------>+---------+ arc_buf_t
234 * | | |b_next +---->+---------+
235 * | b_pabd +-+ |---------| |b_next +-->NULL
236 * +-----------+ | | | +---------+
237 * | |b_data +-+ | |
238 * | +---------+ | |b_data +-+
239 * +->+------+ | +---------+ |
242 * data +------+ | |
243 * ^ +->+------+ |
246 * | +------+ |
247 * +---------------------------------+
253 * with the transformed data and will memcpy the transformed on-disk block into
263 * to the on-disk block in the main data pool. This provides a significant
273 * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
279 * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
315 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
391 * when reading a new block into the ARC, we will evict an equal-sized block
439 * Enable or disable compressed arc buffers.
456 * These tunables are Linux-specific
671 x = x - x / ARCSTAT_F_AVG_FACTOR + \
679 * There are several ARC variables that are critical to export as kstats --
700 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
701 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
702 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
703 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
705 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
707 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
709 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
710 #define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
712 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
713 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
714 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
715 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
716 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
717 #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
718 #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
719 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
722 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
725 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
726 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
729 (hdr)->b_crypt_hdr.b_rabd != NULL)
731 (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
733 (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
736 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
738 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
740 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
743 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
744 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
745 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
746 #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
770 #define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
772 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
975 const l2arc_log_blkptr_t *lbp);
991 return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth)); in buf_hash()
995 ((hdr)->b_dva.dva_word[0] == 0 && \
996 (hdr)->b_dva.dva_word[1] == 0)
1002 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1003 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1004 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1009 hdr->b_dva.dva_word[0] = 0; in buf_discard_identity()
1010 hdr->b_dva.dva_word[1] = 0; in buf_discard_identity()
1011 hdr->b_birth = 0; in buf_discard_identity()
1025 hdr = hdr->b_hash_next) { in buf_hash_find()
1046 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); in buf_hash_insert()
1051 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); in buf_hash_insert()
1052 ASSERT(hdr->b_birth != 0); in buf_hash_insert()
1063 fhdr = fhdr->b_hash_next, i++) { in buf_hash_insert()
1064 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) in buf_hash_insert()
1068 hdr->b_hash_next = buf_hash_table.ht_table[idx]; in buf_hash_insert()
1088 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); in buf_hash_remove()
1096 hdrp = &fhdr->b_hash_next; in buf_hash_remove()
1098 *hdrp = hdr->b_hash_next; in buf_hash_remove()
1099 hdr->b_hash_next = NULL; in buf_hash_remove()
1105 buf_hash_table.ht_table[idx]->b_hash_next == NULL) in buf_hash_remove()
1139 * Constructor callback - called when the cache is empty
1149 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in hdr_full_cons()
1150 zfs_refcount_create(&hdr->b_l1hdr.b_refcnt); in hdr_full_cons()
1152 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); in hdr_full_cons()
1154 multilist_link_init(&hdr->b_l1hdr.b_arc_node); in hdr_full_cons()
1155 list_link_init(&hdr->b_l2hdr.b_l2node); in hdr_full_cons()
1186 * Destructor callback - called when a cached buf is
1196 zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt); in hdr_full_dest()
1198 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); in hdr_full_dest()
1200 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in hdr_full_dest()
1234 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). in buf_init()
1239 buf_hash_table.ht_mask = hsize - 1; in buf_init()
1266 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) in buf_init()
1267 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); in buf_init()
1284 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); in arc_buf_size()
1290 return (HDR_GET_LSIZE(buf->b_hdr)); in arc_buf_lsize()
1310 return (HDR_NOAUTH(buf->b_hdr) != 0); in arc_is_unauthenticated()
1317 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_get_raw_params()
1321 memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); in arc_get_raw_params()
1322 memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); in arc_get_raw_params()
1323 memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); in arc_get_raw_params()
1324 *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? in arc_get_raw_params()
1337 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); in arc_get_compression()
1343 * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
1355 return (buf->b_hdr->b_complevel); in arc_get_complevel()
1361 boolean_t shared = (buf->b_data != NULL && in arc_buf_is_shared()
1362 buf->b_hdr->b_l1hdr.b_pabd != NULL && in arc_buf_is_shared()
1363 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && in arc_buf_is_shared()
1364 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); in arc_buf_is_shared()
1365 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); in arc_buf_is_shared()
1379 * is a no-op.
1387 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_free()
1388 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { in arc_cksum_free()
1389 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); in arc_cksum_free()
1390 hdr->b_l1hdr.b_freeze_cksum = NULL; in arc_cksum_free()
1392 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_free()
1403 ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr)); in arc_hdr_has_uncompressed_buf()
1405 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { in arc_hdr_has_uncompressed_buf()
1417 * or if the buf is compressed, this is a no-op.
1423 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_cksum_verify()
1434 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1436 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { in arc_cksum_verify()
1437 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1441 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); in arc_cksum_verify()
1442 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) in arc_cksum_verify()
1444 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_verify()
1456 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); in arc_cksum_is_equal()
1457 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); in arc_cksum_is_equal()
1472 return (zio_checksum_error_impl(zio->io_spa, zio->io_bp, in arc_cksum_is_equal()
1473 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, in arc_cksum_is_equal()
1474 zio->io_offset, NULL) == 0); in arc_cksum_is_equal()
1481 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1490 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_cksum_compute()
1492 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1493 if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) { in arc_cksum_compute()
1494 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1500 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), in arc_cksum_compute()
1502 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, in arc_cksum_compute()
1503 hdr->b_l1hdr.b_freeze_cksum); in arc_cksum_compute()
1504 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); in arc_cksum_compute()
1514 panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr); in arc_buf_sigsegv()
1523 ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), in arc_buf_unwatch()
1536 ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), in arc_buf_watch()
1552 VERIFY3U(hdr->b_type, ==, type); in arc_buf_type()
1559 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); in arc_is_metadata()
1575 return ((uint32_t)-1); in arc_bufc_to_flags()
1581 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_thaw()
1583 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_buf_thaw()
1608 ASSERT(HDR_HAS_L1HDR(buf->b_hdr)); in arc_buf_freeze()
1615 * updated in a thread-safe way. When manipulating the flags either
1624 hdr->b_flags |= flags; in arc_hdr_set_flags()
1631 hdr->b_flags &= ~flags; in arc_hdr_clear_flags()
1639 * thread-safe manner.
1670 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_try_copy_decompressed_data()
1674 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_try_copy_decompressed_data()
1677 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; in arc_buf_try_copy_decompressed_data()
1678 from = from->b_next) { in arc_buf_try_copy_decompressed_data()
1685 memcpy(buf->b_data, from->b_data, arc_buf_size(buf)); in arc_buf_try_copy_decompressed_data()
1697 EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); in arc_buf_try_copy_decompressed_data()
1704 * Allocates an ARC buf header that's in an evicted & L2-cached state.
1706 * which circumvent the regular disk->arc->l2arc path and instead come
1707 * into being in the reverse order, i.e. l2arc->arc.
1719 hdr->b_birth = birth; in arc_buf_alloc_l2only()
1720 hdr->b_type = type; in arc_buf_alloc_l2only()
1721 hdr->b_flags = 0; in arc_buf_alloc_l2only()
1726 hdr->b_complevel = complevel; in arc_buf_alloc_l2only()
1731 hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa); in arc_buf_alloc_l2only()
1733 hdr->b_dva = dva; in arc_buf_alloc_l2only()
1735 hdr->b_l2hdr.b_dev = dev; in arc_buf_alloc_l2only()
1736 hdr->b_l2hdr.b_daddr = daddr; in arc_buf_alloc_l2only()
1737 hdr->b_l2hdr.b_arcs_state = arcs_state; in arc_buf_alloc_l2only()
1767 abd_t *abd = hdr->b_l1hdr.b_pabd; in arc_hdr_authenticate()
1786 hdr->b_l1hdr.b_pabd, &abd, lsize, MIN(lsize, psize), in arc_hdr_authenticate()
1787 hdr->b_complevel); in arc_hdr_authenticate()
1793 abd_zero_off(abd, csize, psize - csize); in arc_hdr_authenticate()
1801 if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) { in arc_hdr_authenticate()
1805 psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in arc_hdr_authenticate()
1808 hdr->b_crypt_hdr.b_mac); in arc_hdr_authenticate()
1834 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in arc_hdr_decrypt()
1841 ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot, in arc_hdr_decrypt()
1842 B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv, in arc_hdr_decrypt()
1843 hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd, in arc_hdr_decrypt()
1844 hdr->b_crypt_hdr.b_rabd, &no_crypt); in arc_hdr_decrypt()
1849 abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, in arc_hdr_decrypt()
1869 hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr), in arc_hdr_decrypt()
1870 HDR_GET_LSIZE(hdr), &hdr->b_complevel); in arc_hdr_decrypt()
1875 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in arc_hdr_decrypt()
1877 hdr->b_l1hdr.b_pabd = cabd; in arc_hdr_decrypt()
1911 ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset); in arc_fill_hdr_crypt()
1914 } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) { in arc_fill_hdr_crypt()
1925 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_fill_hdr_crypt()
1948 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_untransform_in_place()
1951 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); in arc_buf_untransform_in_place()
1953 ASSERT3PF(hdr->b_l1hdr.b_pabd, !=, NULL, "hdr %px buf %px", hdr, buf); in arc_buf_untransform_in_place()
1955 zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data, in arc_buf_untransform_in_place()
1957 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_buf_untransform_in_place()
1958 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_buf_untransform_in_place()
1972 * the correct-sized data buffer.
1979 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_fill()
1984 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; in arc_buf_fill()
1987 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_fill()
2002 abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd, in arc_buf_fill()
2031 * be decrypted in-place. This is necessary because there may in arc_buf_fill()
2036 * arises for other types to be decrypted in-place, they must in arc_buf_fill()
2045 ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); in arc_buf_fill()
2064 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, in arc_buf_fill()
2080 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; in arc_buf_fill()
2081 buf->b_data = in arc_buf_fill()
2091 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), in arc_buf_fill()
2093 buf->b_data = in arc_buf_fill()
2098 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); in arc_buf_fill()
2105 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_buf_fill()
2117 abd_get_from_buf_struct(&dabd, buf->b_data, in arc_buf_fill()
2120 hdr->b_l1hdr.b_pabd, &dabd, in arc_buf_fill()
2122 &hdr->b_complevel); in arc_buf_fill()
2149 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); in arc_buf_fill()
2181 spa_log_error(spa, zb, buf->b_hdr->b_birth); in arc_untransform()
2202 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evictable_space_increment()
2203 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_evictable_space_increment()
2205 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2210 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_evictable_space_increment()
2211 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2215 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2219 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_evictable_space_increment()
2220 buf = buf->b_next) { in arc_evictable_space_increment()
2223 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_evictable_space_increment()
2241 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evictable_space_decrement()
2242 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_evictable_space_decrement()
2244 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2249 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_evictable_space_decrement()
2250 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2254 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2258 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_evictable_space_decrement()
2259 buf = buf->b_next) { in arc_evictable_space_decrement()
2262 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_evictable_space_decrement()
2276 arc_state_t *state = hdr->b_l1hdr.b_state; in add_reference()
2281 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in add_reference()
2282 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in add_reference()
2285 if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && in add_reference()
2287 /* We don't use the L2-only state list. */ in add_reference()
2288 multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr); in add_reference()
2302 arc_state_t *state = hdr->b_l1hdr.b_state; in remove_reference()
2308 if ((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) != 0) in remove_reference()
2320 multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr); in remove_reference()
2336 arc_buf_hdr_t *hdr = ab->b_hdr; in arc_buf_info()
2346 abi->abi_flags = hdr->b_flags; in arc_buf_info()
2349 l1hdr = &hdr->b_l1hdr; in arc_buf_info()
2350 state = l1hdr->b_state; in arc_buf_info()
2353 l2hdr = &hdr->b_l2hdr; in arc_buf_info()
2356 abi->abi_bufcnt = 0; in arc_buf_info()
2357 for (arc_buf_t *buf = l1hdr->b_buf; buf; buf = buf->b_next) in arc_buf_info()
2358 abi->abi_bufcnt++; in arc_buf_info()
2359 abi->abi_access = l1hdr->b_arc_access; in arc_buf_info()
2360 abi->abi_mru_hits = l1hdr->b_mru_hits; in arc_buf_info()
2361 abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits; in arc_buf_info()
2362 abi->abi_mfu_hits = l1hdr->b_mfu_hits; in arc_buf_info()
2363 abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits; in arc_buf_info()
2364 abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt); in arc_buf_info()
2368 abi->abi_l2arc_dattr = l2hdr->b_daddr; in arc_buf_info()
2369 abi->abi_l2arc_hits = l2hdr->b_hits; in arc_buf_info()
2372 abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; in arc_buf_info()
2373 abi->abi_state_contents = arc_buf_type(hdr); in arc_buf_info()
2374 abi->abi_size = arc_hdr_size(hdr); in arc_buf_info()
2397 old_state = hdr->b_l1hdr.b_state; in arc_change_state()
2398 refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt); in arc_change_state()
2399 update_old = (hdr->b_l1hdr.b_buf != NULL || in arc_change_state()
2400 hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_change_state()
2402 IMPLY(GHOST_STATE(old_state), hdr->b_l1hdr.b_buf == NULL); in arc_change_state()
2403 IMPLY(GHOST_STATE(new_state), hdr->b_l1hdr.b_buf == NULL); in arc_change_state()
2404 IMPLY(old_state == arc_anon, hdr->b_l1hdr.b_buf == NULL || in arc_change_state()
2405 ARC_BUF_LAST(hdr->b_l1hdr.b_buf)); in arc_change_state()
2428 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_change_state()
2429 multilist_remove(&old_state->arcs_list[type], in arc_change_state()
2437 * moving to some L1-cached state (i.e. not l2c_only or in arc_change_state()
2442 multilist_insert(&new_state->arcs_list[type], hdr); in arc_change_state()
2464 &new_state->arcs_size[type], in arc_change_state()
2466 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_change_state()
2475 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_change_state()
2476 buf = buf->b_next) { in arc_change_state()
2489 &new_state->arcs_size[type], in arc_change_state()
2493 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_change_state()
2495 &new_state->arcs_size[type], in arc_change_state()
2501 &new_state->arcs_size[type], in arc_change_state()
2510 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_change_state()
2522 &old_state->arcs_size[type], in arc_change_state()
2531 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; in arc_change_state()
2532 buf = buf->b_next) { in arc_change_state()
2545 &old_state->arcs_size[type], in arc_change_state()
2548 ASSERT(hdr->b_l1hdr.b_pabd != NULL || in arc_change_state()
2551 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_change_state()
2553 &old_state->arcs_size[type], in arc_change_state()
2559 &old_state->arcs_size[type], in arc_change_state()
2566 hdr->b_l1hdr.b_state = new_state; in arc_change_state()
2570 hdr->b_l2hdr.b_arcs_state = new_state->arcs_state; in arc_change_state()
2610 * very short-lived. in arc_space_consume()
2631 ARCSTAT_INCR(arcstat_data_size, -space); in arc_space_return()
2634 ARCSTAT_INCR(arcstat_metadata_size, -space); in arc_space_return()
2637 ARCSTAT_INCR(arcstat_bonus_size, -space); in arc_space_return()
2640 ARCSTAT_INCR(arcstat_dnode_size, -space); in arc_space_return()
2643 ARCSTAT_INCR(arcstat_dbuf_size, -space); in arc_space_return()
2646 ARCSTAT_INCR(arcstat_hdr_size, -space); in arc_space_return()
2649 aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space); in arc_space_return()
2652 ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space); in arc_space_return()
2657 ARCSTAT_INCR(arcstat_meta_used, -space); in arc_space_return()
2660 aggsum_add(&arc_sums.arcstat_size, -space); in arc_space_return()
2692 ASSERT3P(buf->b_hdr, ==, hdr); in arc_can_share()
2698 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && in arc_can_share()
2718 VERIFY(hdr->b_type == ARC_BUFC_DATA || in arc_buf_alloc_impl()
2719 hdr->b_type == ARC_BUFC_METADATA); in arc_buf_alloc_impl()
2725 buf->b_hdr = hdr; in arc_buf_alloc_impl()
2726 buf->b_data = NULL; in arc_buf_alloc_impl()
2727 buf->b_next = hdr->b_l1hdr.b_buf; in arc_buf_alloc_impl()
2728 buf->b_flags = 0; in arc_buf_alloc_impl()
2744 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; in arc_buf_alloc_impl()
2745 buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED; in arc_buf_alloc_impl()
2749 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; in arc_buf_alloc_impl()
2765 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be in arc_buf_alloc_impl()
2770 * need to be ABD-aware. It must be allocated via in arc_buf_alloc_impl()
2778 hdr->b_l1hdr.b_pabd != NULL && in arc_buf_alloc_impl()
2779 abd_is_linear(hdr->b_l1hdr.b_pabd) && in arc_buf_alloc_impl()
2780 !abd_is_linear_page(hdr->b_l1hdr.b_pabd); in arc_buf_alloc_impl()
2784 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); in arc_buf_alloc_impl()
2785 buf->b_flags |= ARC_BUF_FLAG_SHARED; in arc_buf_alloc_impl()
2788 buf->b_data = in arc_buf_alloc_impl()
2792 VERIFY3P(buf->b_data, !=, NULL); in arc_buf_alloc_impl()
2794 hdr->b_l1hdr.b_buf = buf; in arc_buf_alloc_impl()
2869 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_return_buf()
2871 ASSERT3P(buf->b_data, !=, NULL); in arc_return_buf()
2873 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag); in arc_return_buf()
2874 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); in arc_return_buf()
2876 arc_loaned_bytes_update(-arc_buf_size(buf)); in arc_return_buf()
2883 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_loan_inuse_buf()
2885 ASSERT3P(buf->b_data, !=, NULL); in arc_loan_inuse_buf()
2887 (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); in arc_loan_inuse_buf()
2888 (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); in arc_loan_inuse_buf()
2898 df->l2df_abd = abd; in l2arc_free_abd_on_write()
2899 df->l2df_size = size; in l2arc_free_abd_on_write()
2900 df->l2df_type = type; in l2arc_free_abd_on_write()
2909 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_hdr_free_on_write()
2914 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_hdr_free_on_write()
2915 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_free_on_write()
2918 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_hdr_free_on_write()
2921 (void) zfs_refcount_remove_many(&state->arcs_size[type], size, hdr); in arc_hdr_free_on_write()
2930 l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type); in arc_hdr_free_on_write()
2932 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); in arc_hdr_free_on_write()
2945 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_share_buf()
2955 &hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)], in arc_share_buf()
2957 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); in arc_share_buf()
2958 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, in arc_share_buf()
2961 buf->b_flags |= ARC_BUF_FLAG_SHARED; in arc_share_buf()
2970 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); in arc_share_buf()
2977 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_unshare_buf()
2985 &hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)], in arc_unshare_buf()
2988 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); in arc_unshare_buf()
2989 abd_free(hdr->b_l1hdr.b_pabd); in arc_unshare_buf()
2990 hdr->b_l1hdr.b_pabd = NULL; in arc_unshare_buf()
2991 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; in arc_unshare_buf()
2997 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); in arc_unshare_buf()
2998 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); in arc_unshare_buf()
3013 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; in arc_buf_remove()
3022 *bufp = buf->b_next; in arc_buf_remove()
3031 bufp = &(*bufp)->b_next; in arc_buf_remove()
3034 buf->b_next = NULL; in arc_buf_remove()
3042 * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
3048 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_destroy_impl()
3055 if (buf->b_data != NULL) { in arc_buf_destroy_impl()
3070 arc_free_data_buf(hdr, buf->b_data, size, buf); in arc_buf_destroy_impl()
3071 ARCSTAT_INCR(arcstat_overhead_size, -size); in arc_buf_destroy_impl()
3073 buf->b_data = NULL; in arc_buf_destroy_impl()
3081 hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) { in arc_buf_destroy_impl()
3083 for (b = hdr->b_l1hdr.b_buf; b; b = b->b_next) { in arc_buf_destroy_impl()
3113 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_buf_destroy_impl()
3147 buf->b_hdr = NULL; in arc_buf_destroy_impl()
3164 ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL); in arc_hdr_alloc_abd()
3165 hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr, in arc_hdr_alloc_abd()
3167 ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL); in arc_hdr_alloc_abd()
3171 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_alloc_abd()
3172 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr, in arc_hdr_alloc_abd()
3174 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_hdr_alloc_abd()
3187 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_hdr_free_abd()
3200 arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr); in arc_hdr_free_abd()
3202 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr); in arc_hdr_free_abd()
3206 hdr->b_crypt_hdr.b_rabd = NULL; in arc_hdr_free_abd()
3207 ARCSTAT_INCR(arcstat_raw_size, -size); in arc_hdr_free_abd()
3209 hdr->b_l1hdr.b_pabd = NULL; in arc_hdr_free_abd()
3212 if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) in arc_hdr_free_abd()
3213 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_hdr_free_abd()
3215 ARCSTAT_INCR(arcstat_compressed_size, -size); in arc_hdr_free_abd()
3216 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); in arc_hdr_free_abd()
3253 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_alloc()
3257 hdr->b_spa = spa; in arc_hdr_alloc()
3258 hdr->b_type = type; in arc_hdr_alloc()
3259 hdr->b_flags = 0; in arc_hdr_alloc()
3262 hdr->b_complevel = complevel; in arc_hdr_alloc()
3266 hdr->b_l1hdr.b_state = arc_anon; in arc_hdr_alloc()
3267 hdr->b_l1hdr.b_arc_access = 0; in arc_hdr_alloc()
3268 hdr->b_l1hdr.b_mru_hits = 0; in arc_hdr_alloc()
3269 hdr->b_l1hdr.b_mru_ghost_hits = 0; in arc_hdr_alloc()
3270 hdr->b_l1hdr.b_mfu_hits = 0; in arc_hdr_alloc()
3271 hdr->b_l1hdr.b_mfu_ghost_hits = 0; in arc_hdr_alloc()
3272 hdr->b_l1hdr.b_buf = NULL; in arc_hdr_alloc()
3274 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_alloc()
3282 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
3292 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; in arc_hdr_realloc()
3311 nhdr->b_l1hdr.b_state = arc_l2c_only; in arc_hdr_realloc()
3314 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_realloc()
3317 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_hdr_realloc()
3319 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_realloc()
3329 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_hdr_realloc()
3338 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_hdr_realloc()
3344 * The header has been reallocated so we need to re-insert it into any in arc_hdr_realloc()
3349 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); in arc_hdr_realloc()
3351 mutex_enter(&dev->l2ad_mtx); in arc_hdr_realloc()
3359 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); in arc_hdr_realloc()
3360 list_remove(&dev->l2ad_buflist, hdr); in arc_hdr_realloc()
3362 mutex_exit(&dev->l2ad_mtx); in arc_hdr_realloc()
3372 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, in arc_hdr_realloc()
3374 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in arc_hdr_realloc()
3395 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_convert_to_raw()
3399 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_convert_to_raw()
3401 buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED); in arc_convert_to_raw()
3403 hdr->b_crypt_hdr.b_dsobj = dsobj; in arc_convert_to_raw()
3404 hdr->b_crypt_hdr.b_ot = ot; in arc_convert_to_raw()
3405 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? in arc_convert_to_raw()
3411 memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); in arc_convert_to_raw()
3413 memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); in arc_convert_to_raw()
3415 memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); in arc_convert_to_raw()
3487 hdr->b_crypt_hdr.b_dsobj = dsobj; in arc_alloc_raw_buf()
3488 hdr->b_crypt_hdr.b_ot = ot; in arc_alloc_raw_buf()
3489 hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? in arc_alloc_raw_buf()
3491 memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); in arc_alloc_raw_buf()
3492 memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); in arc_alloc_raw_buf()
3493 memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); in arc_alloc_raw_buf()
3512 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; in l2arc_hdr_arcstats_update()
3513 l2arc_dev_t *dev = l2hdr->b_dev; in l2arc_hdr_arcstats_update()
3516 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); in l2arc_hdr_arcstats_update()
3517 arc_buf_contents_t type = hdr->b_type; in l2arc_hdr_arcstats_update()
3527 lsize_s = -lsize; in l2arc_hdr_arcstats_update()
3528 psize_s = -psize; in l2arc_hdr_arcstats_update()
3529 asize_s = -asize; in l2arc_hdr_arcstats_update()
3545 switch (hdr->b_l2hdr.b_arcs_state) { in l2arc_hdr_arcstats_update()
3581 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; in arc_hdr_l2hdr_destroy()
3582 l2arc_dev_t *dev = l2hdr->b_dev; in arc_hdr_l2hdr_destroy()
3584 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); in arc_hdr_l2hdr_destroy()
3586 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); in arc_hdr_l2hdr_destroy()
3589 list_remove(&dev->l2ad_buflist, hdr); in arc_hdr_l2hdr_destroy()
3592 vdev_space_update(dev->l2ad_vdev, -asize, 0, 0); in arc_hdr_l2hdr_destroy()
3594 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), in arc_hdr_l2hdr_destroy()
3603 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_hdr_destroy()
3604 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); in arc_hdr_destroy()
3610 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; in arc_hdr_destroy()
3611 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); in arc_hdr_destroy()
3614 mutex_enter(&dev->l2ad_mtx); in arc_hdr_destroy()
3623 * want to re-destroy the header's L2 portion. in arc_hdr_destroy()
3634 mutex_exit(&dev->l2ad_mtx); in arc_hdr_destroy()
3649 while (hdr->b_l1hdr.b_buf != NULL) in arc_hdr_destroy()
3650 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); in arc_hdr_destroy()
3652 if (hdr->b_l1hdr.b_pabd != NULL) in arc_hdr_destroy()
3659 ASSERT3P(hdr->b_hash_next, ==, NULL); in arc_hdr_destroy()
3661 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_hdr_destroy()
3662 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_hdr_destroy()
3664 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_hdr_destroy()
3675 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_destroy()
3677 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_buf_destroy()
3678 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_buf_destroy()
3688 ASSERT3P(hdr, ==, buf->b_hdr); in arc_buf_destroy()
3689 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_buf_destroy()
3691 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); in arc_buf_destroy()
3692 ASSERT3P(buf->b_data, !=, NULL); in arc_buf_destroy()
3704 * - arc_mru -> arc_mru_ghost
3705 * - arc_mfu -> arc_mfu_ghost
3706 * - arc_mru_ghost -> arc_l2c_only
3707 * - arc_mru_ghost -> deleted
3708 * - arc_mfu_ghost -> arc_l2c_only
3709 * - arc_mfu_ghost -> deleted
3710 * - arc_uncached -> deleted
3714 * progress at the same (or at least comparable) rate as from non-ghost states.
3717 * waiting for it. For non-ghost states it includes size of evicted data
3732 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_evict_hdr()
3733 ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt)); in arc_evict_hdr()
3736 state = hdr->b_l1hdr.b_state; in arc_evict_hdr()
3757 ASSERT(hdr->b_l1hdr.b_pabd == NULL); in arc_evict_hdr()
3765 * dropping from L1+L2 cached to L2-only, in arc_evict_hdr()
3770 *real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE; in arc_evict_hdr()
3784 if ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && in arc_evict_hdr()
3785 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < in arc_evict_hdr()
3794 if (l2arc_write_eligible(hdr->b_spa, hdr)) { in arc_evict_hdr()
3798 switch (state->arcs_state) { in arc_evict_hdr()
3826 if (hdr->b_l1hdr.b_pabd != NULL) in arc_evict_hdr()
3848 int64_t remaining = arc_free_memory() - arc_sys_free / 2; in arc_set_need_free()
3851 arc_need_free = MAX(-remaining, 0); in arc_set_need_free()
3854 MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count)); in arc_set_need_free()
3887 * (only 'marker' will be removed and re-inserted). in arc_evict_state_impl()
3899 if (hdr->b_spa == 0) in arc_evict_state_impl()
3903 if (spa != 0 && hdr->b_spa != spa) { in arc_evict_state_impl()
3935 evict_count--; in arc_evict_state_impl()
3962 aw->aew_count <= arc_evict_count) { in arc_evict_state_impl()
3964 cv_broadcast(&aw->aew_cv); in arc_evict_state_impl()
3972 * if the average cached block is small), eviction can be on-CPU for in arc_evict_state_impl()
3991 marker->b_spa = 0; in arc_state_alloc_marker()
4043 multilist_t *ml = &state->arcs_list[type]; in arc_evict_state()
4090 bytes_remaining = bytes - total_evicted; in arc_evict_state()
4161 while (zfs_refcount_count(&state->arcs_esize[type]) != 0) { in arc_flush_state()
4183 if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) { in arc_evict_impl()
4184 delta = MIN(zfs_refcount_count(&state->arcs_esize[type]), in arc_evict_impl()
4217 s = MIN(64 - s, 32); in arc_evict_adj()
4219 uint64_t ofrac = (1ULL << 32) - frac; in arc_evict_adj()
4223 up = (up << s) / (total >> (32 - s)); in arc_evict_adj()
4226 down = (down << s) / (total >> (32 - s)); in arc_evict_adj()
4229 return (frac + up - down); in arc_evict_adj()
4257 mrud = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_DATA]) + in arc_evict()
4258 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_evict()
4259 mrum = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) + in arc_evict()
4260 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_evict()
4261 mfud = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_evict()
4262 mfum = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_evict()
4268 ngrd = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_evict()
4269 uint64_t grd = ngrd - ogrd; in arc_evict()
4271 ngrm = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_evict()
4272 uint64_t grm = ngrm - ogrm; in arc_evict()
4274 ngfd = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_evict()
4275 uint64_t gfd = ngfd - ogfd; in arc_evict()
4277 ngfm = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_evict()
4278 uint64_t gfm = ngfm - ogfm; in arc_evict()
4289 int64_t wt = t - (asize - ac); in arc_evict()
4297 int64_t nem = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) in arc_evict()
4298 + zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) in arc_evict()
4299 - zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) in arc_evict()
4300 - zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_evict()
4306 prune = arc_mf(prune, nem - w * 3 / 4, w / 4); in arc_evict()
4309 prune = MAX(prune, (dn - arc_dnode_limit) / sizeof (dnode_t) * in arc_evict()
4317 e = MIN((int64_t)(asize - ac), (int64_t)(mrum - w)); in arc_evict()
4320 mrum -= bytes; in arc_evict()
4321 asize -= bytes; in arc_evict()
4325 e = MIN((int64_t)(asize - ac), (int64_t)(m - bytes - w)); in arc_evict()
4328 mfum -= bytes; in arc_evict()
4329 asize -= bytes; in arc_evict()
4332 wt -= m - total_evicted; in arc_evict()
4334 e = MIN((int64_t)(asize - ac), (int64_t)(mrud - w)); in arc_evict()
4337 mrud -= bytes; in arc_evict()
4338 asize -= bytes; in arc_evict()
4341 e = asize - ac; in arc_evict()
4343 mfud -= bytes; in arc_evict()
4358 e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]) - in arc_evict()
4363 e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]) - in arc_evict()
4368 e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]) - in arc_evict()
4373 e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]) - in arc_evict()
4431 to_free = MIN(to_free, c - arc_c_min); in arc_reduce_target_size()
4432 arc_c = c - to_free; in arc_reduce_target_size()
4481 /* reach upper limit of cache size on 32-bit */ in arc_kmem_reap_soon()
4521 arc_ksp->ks_update(arc_ksp, KSTAT_READ); in arc_evict_cb_check()
4544 return ((zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_DATA]) + in arc_evict_cb_check()
4545 zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]) && in arc_evict_cb_check()
4546 ddi_get_lbolt() - arc_last_uncached_flush > in arc_evict_cb_check()
4597 cv_broadcast(&aw->aew_cv); in arc_evict_cb()
4617 * becoming implicitly blocked by a system-wide kmem reap -- which, in arc_reap_cb_check()
4686 can_free = arc_c - arc_c_min; in arc_reap_cb()
4687 to_free = (MAX(can_free, 0) >> arc_shrink_shift) - free_memory; in arc_reap_cb()
4791 int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) - arc_c - in arc_is_overflowing()
4879 last_count = last->aew_count; in arc_wait_for_eviction()
4956 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_get_data_impl()
4959 (void) zfs_refcount_add_many(&state->arcs_size[type], size, in arc_get_data_impl()
4971 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_get_data_impl()
4972 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_get_data_impl()
4973 (void) zfs_refcount_add_many(&state->arcs_esize[type], in arc_get_data_impl()
5007 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_free_data_impl()
5011 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { in arc_free_data_impl()
5012 ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); in arc_free_data_impl()
5015 (void) zfs_refcount_remove_many(&state->arcs_esize[type], in arc_free_data_impl()
5018 (void) zfs_refcount_remove_many(&state->arcs_size[type], size, tag); in arc_free_data_impl()
5020 VERIFY3U(hdr->b_type, ==, type); in arc_free_data_impl()
5072 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_access()
5078 ASSERT0(hdr->b_l1hdr.b_arc_access); in arc_access()
5079 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5089 } else if (hdr->b_l1hdr.b_state == arc_mru) { in arc_access()
5095 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5098 hdr->b_l1hdr.b_mru_hits++; in arc_access()
5106 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5114 if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access + in arc_access()
5116 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5120 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { in arc_access()
5128 hdr->b_l1hdr.b_mru_ghost_hits++; in arc_access()
5130 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5131 wmsum_add(&arc_mru_ghost->arcs_hits[arc_buf_type(hdr)], in arc_access()
5141 } else if (hdr->b_l1hdr.b_state == arc_mfu) { in arc_access()
5147 hdr->b_l1hdr.b_mfu_hits++; in arc_access()
5150 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5151 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { in arc_access()
5157 hdr->b_l1hdr.b_mfu_ghost_hits++; in arc_access()
5159 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5160 wmsum_add(&arc_mfu_ghost->arcs_hits[arc_buf_type(hdr)], in arc_access()
5164 } else if (hdr->b_l1hdr.b_state == arc_uncached) { in arc_access()
5171 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5172 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { in arc_access()
5177 hdr->b_l1hdr.b_arc_access = now; in arc_access()
5182 hdr->b_l1hdr.b_state); in arc_access()
5193 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_buf_access()
5200 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) in arc_buf_access()
5206 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { in arc_buf_access()
5212 ASSERT(hdr->b_l1hdr.b_state == arc_mru || in arc_buf_access()
5213 hdr->b_l1hdr.b_state == arc_mfu || in arc_buf_access()
5214 hdr->b_l1hdr.b_state == arc_uncached); in arc_buf_access()
5235 memcpy(arg, buf->b_data, arc_buf_size(buf)); in arc_bcopy_func()
5248 ASSERT(zio == NULL || zio->io_error != 0); in arc_getbuf_func()
5251 ASSERT(zio == NULL || zio->io_error == 0); in arc_getbuf_func()
5253 ASSERT(buf->b_data != NULL); in arc_getbuf_func()
5277 blkptr_t *bp = zio->io_bp; in arc_read_done()
5278 arc_buf_hdr_t *hdr = zio->io_private; in arc_read_done()
5284 * The hdr was inserted into hash-table and removed from lists in arc_read_done()
5294 ASSERT3U(hdr->b_birth, ==, BP_GET_BIRTH(zio->io_bp)); in arc_read_done()
5295 ASSERT3U(hdr->b_dva.dva_word[0], ==, in arc_read_done()
5296 BP_IDENTITY(zio->io_bp)->dva_word[0]); in arc_read_done()
5297 ASSERT3U(hdr->b_dva.dva_word[1], ==, in arc_read_done()
5298 BP_IDENTITY(zio->io_bp)->dva_word[1]); in arc_read_done()
5300 found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock); in arc_read_done()
5303 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || in arc_read_done()
5309 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); in arc_read_done()
5310 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; in arc_read_done()
5311 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, in arc_read_done()
5312 hdr->b_crypt_hdr.b_iv); in arc_read_done()
5314 if (zio->io_error == 0) { in arc_read_done()
5318 tmpbuf = abd_borrow_buf_copy(zio->io_abd, in arc_read_done()
5321 hdr->b_crypt_hdr.b_mac); in arc_read_done()
5322 abd_return_buf(zio->io_abd, tmpbuf, in arc_read_done()
5326 hdr->b_crypt_hdr.b_mac); in arc_read_done()
5331 if (zio->io_error == 0) { in arc_read_done()
5333 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { in arc_read_done()
5334 if (BP_GET_LEVEL(zio->io_bp) > 0) { in arc_read_done()
5335 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; in arc_read_done()
5337 hdr->b_l1hdr.b_byteswap = in arc_read_done()
5338 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); in arc_read_done()
5341 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_read_done()
5344 hdr->b_complevel = zio->io_prop.zp_complevel; in arc_read_done()
5352 callback_list = hdr->b_l1hdr.b_acb; in arc_read_done()
5354 hdr->b_l1hdr.b_acb = NULL; in arc_read_done()
5363 for (acb = callback_list; acb != NULL; acb = acb->acb_next) { in arc_read_done()
5368 if (!acb->acb_done || acb->acb_nobuf) in arc_read_done()
5373 if (zio->io_error != 0) in arc_read_done()
5376 int error = arc_buf_alloc_impl(hdr, zio->io_spa, in arc_read_done()
5377 &acb->acb_zb, acb->acb_private, acb->acb_encrypted, in arc_read_done()
5378 acb->acb_compressed, acb->acb_noauth, B_TRUE, in arc_read_done()
5379 &acb->acb_buf); in arc_read_done()
5382 * Assert non-speculative zios didn't fail because an in arc_read_done()
5385 ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || in arc_read_done()
5395 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { in arc_read_done()
5396 spa_log_error(zio->io_spa, &acb->acb_zb, in arc_read_done()
5397 BP_GET_LOGICAL_BIRTH(zio->io_bp)); in arc_read_done()
5400 zio->io_spa, NULL, &acb->acb_zb, zio, 0); in arc_read_done()
5419 zio->io_error = error; in arc_read_done()
5432 if (zio->io_error == 0) { in arc_read_done()
5433 arc_hdr_verify(hdr, zio->io_bp); in arc_read_done()
5436 if (hdr->b_l1hdr.b_state != arc_anon) in arc_read_done()
5450 if (acb->acb_done != NULL) { in arc_read_done()
5451 if (zio->io_error != 0 && acb->acb_buf != NULL) { in arc_read_done()
5457 arc_buf_destroy(acb->acb_buf, in arc_read_done()
5458 acb->acb_private); in arc_read_done()
5459 acb->acb_buf = NULL; in arc_read_done()
5461 acb->acb_done(zio, &zio->io_bookmark, zio->io_bp, in arc_read_done()
5462 acb->acb_buf, acb->acb_private); in arc_read_done()
5465 if (acb->acb_zio_dummy != NULL) { in arc_read_done()
5466 acb->acb_zio_dummy->io_error = zio->io_error; in arc_read_done()
5467 zio_nowait(acb->acb_zio_dummy); in arc_read_done()
5470 callback_list = acb->acb_prev; in arc_read_done()
5471 if (acb->acb_wait) { in arc_read_done()
5472 mutex_enter(&acb->acb_wait_lock); in arc_read_done()
5473 acb->acb_wait_error = zio->io_error; in arc_read_done()
5474 acb->acb_wait = B_FALSE; in arc_read_done()
5475 cv_signal(&acb->acb_wait_cv); in arc_read_done()
5476 mutex_exit(&acb->acb_wait_lock); in arc_read_done()
5504 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_cached()
5508 * more compile-time checking. in arc_cached()
5510 switch (state->arcs_state) { in arc_cached()
5544 * If a read request arrives for a block that has a read in-progress,
5545 * either wait for the in-progress read to complete (and return the
5605 (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) { in arc_read()
5628 zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head; in arc_read()
5630 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && in arc_read()
5634 * an in-flight async read. Request that the in arc_read()
5666 acb->acb_done = done; in arc_read()
5667 acb->acb_private = private; in arc_read()
5668 acb->acb_compressed = compressed_read; in arc_read()
5669 acb->acb_encrypted = encrypted_read; in arc_read()
5670 acb->acb_noauth = noauth_read; in arc_read()
5671 acb->acb_nobuf = no_buf; in arc_read()
5673 acb->acb_wait = B_TRUE; in arc_read()
5674 mutex_init(&acb->acb_wait_lock, NULL, in arc_read()
5676 cv_init(&acb->acb_wait_cv, NULL, in arc_read()
5679 acb->acb_zb = *zb; in arc_read()
5681 acb->acb_zio_dummy = zio_null(pio, in arc_read()
5684 acb->acb_zio_head = head_zio; in arc_read()
5685 acb->acb_next = hdr->b_l1hdr.b_acb; in arc_read()
5686 hdr->b_l1hdr.b_acb->acb_prev = acb; in arc_read()
5687 hdr->b_l1hdr.b_acb = acb; in arc_read()
5696 mutex_enter(&acb->acb_wait_lock); in arc_read()
5697 while (acb->acb_wait) { in arc_read()
5698 cv_wait(&acb->acb_wait_cv, in arc_read()
5699 &acb->acb_wait_lock); in arc_read()
5701 rc = acb->acb_wait_error; in arc_read()
5702 mutex_exit(&acb->acb_wait_lock); in arc_read()
5703 mutex_destroy(&acb->acb_wait_lock); in arc_read()
5704 cv_destroy(&acb->acb_wait_cv); in arc_read()
5710 ASSERT(hdr->b_l1hdr.b_state == arc_mru || in arc_read()
5711 hdr->b_l1hdr.b_state == arc_mfu || in arc_read()
5712 hdr->b_l1hdr.b_state == arc_uncached); in arc_read()
5732 spa_log_error(spa, zb, hdr->b_birth); in arc_read()
5797 hdr->b_dva = *BP_IDENTITY(bp); in arc_read()
5798 hdr->b_birth = BP_GET_BIRTH(bp); in arc_read()
5812 * L2-only (and thus didn't have an L1 hdr), in arc_read()
5820 if (GHOST_STATE(hdr->b_l1hdr.b_state)) { in arc_read()
5821 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_read()
5825 &hdr->b_l1hdr.b_refcnt)); in arc_read()
5826 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); in arc_read()
5828 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); in arc_read()
5842 acb->acb_wait = B_TRUE; in arc_read()
5843 mutex_init(&acb->acb_wait_lock, NULL, in arc_read()
5845 cv_init(&acb->acb_wait_cv, NULL, CV_DEFAULT, in arc_read()
5847 acb->acb_zio_head = in arc_read()
5848 hdr->b_l1hdr.b_acb->acb_zio_head; in arc_read()
5849 acb->acb_next = hdr->b_l1hdr.b_acb; in arc_read()
5850 hdr->b_l1hdr.b_acb->acb_prev = acb; in arc_read()
5851 hdr->b_l1hdr.b_acb = acb; in arc_read()
5853 mutex_enter(&acb->acb_wait_lock); in arc_read()
5854 while (acb->acb_wait) { in arc_read()
5855 cv_wait(&acb->acb_wait_cv, in arc_read()
5856 &acb->acb_wait_lock); in arc_read()
5858 mutex_exit(&acb->acb_wait_lock); in arc_read()
5859 mutex_destroy(&acb->acb_wait_lock); in arc_read()
5860 cv_destroy(&acb->acb_wait_cv); in arc_read()
5885 hdr_abd = hdr->b_crypt_hdr.b_rabd; in arc_read()
5888 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in arc_read()
5890 hdr_abd = hdr->b_l1hdr.b_pabd; in arc_read()
5911 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); in arc_read()
5914 acb->acb_done = done; in arc_read()
5915 acb->acb_private = private; in arc_read()
5916 acb->acb_compressed = compressed_read; in arc_read()
5917 acb->acb_encrypted = encrypted_read; in arc_read()
5918 acb->acb_noauth = noauth_read; in arc_read()
5919 acb->acb_zb = *zb; in arc_read()
5921 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_read()
5922 hdr->b_l1hdr.b_acb = acb; in arc_read()
5925 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { in arc_read()
5926 devw = hdr->b_l2hdr.b_dev->l2ad_writing; in arc_read()
5927 addr = hdr->b_l2hdr.b_daddr; in arc_read()
5939 * the read IO is still in-flight. in arc_read()
5971 spa->spa_l2cache.sav_count > 0; in arc_read()
5990 hdr->b_l2hdr.b_hits++; in arc_read()
5994 cb->l2rcb_hdr = hdr; in arc_read()
5995 cb->l2rcb_bp = *bp; in arc_read()
5996 cb->l2rcb_zb = *zb; in arc_read()
5997 cb->l2rcb_flags = zio_flags; in arc_read()
6014 cb->l2rcb_abd = abd; in arc_read()
6020 addr + asize <= vd->vdev_psize - in arc_read()
6038 acb->acb_zio_head = rzio; in arc_read()
6075 * faulted cache device - that's also a miss.) in arc_read()
6094 acb->acb_zio_head = rzio; in arc_read()
6120 zio->io_error = rc; in arc_read()
6132 p->p_pfunc = func; in arc_add_prune_callback()
6133 p->p_private = private; in arc_add_prune_callback()
6134 list_link_init(&p->p_node); in arc_add_prune_callback()
6135 zfs_refcount_create(&p->p_refcnt); in arc_add_prune_callback()
6138 zfs_refcount_add(&p->p_refcnt, &arc_prune_list); in arc_add_prune_callback()
6151 if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0) in arc_remove_prune_callback()
6158 ASSERT0(zfs_refcount_count(&p->p_refcnt)); in arc_remove_prune_callback()
6159 zfs_refcount_destroy(&p->p_refcnt); in arc_remove_prune_callback()
6171 arc_prune_func_t *func = ap->p_pfunc; in arc_prune_task()
6174 func(ap->p_adjust, ap->p_private); in arc_prune_task()
6176 (void) zfs_refcount_remove(&ap->p_refcnt, func); in arc_prune_task()
6198 if (zfs_refcount_count(&ap->p_refcnt) >= 2) in arc_prune_async()
6201 zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc); in arc_prune_async()
6202 ap->p_adjust = adjust; in arc_prune_async()
6205 (void) zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc); in arc_prune_async()
6231 * (i.e. prefetch) or has some other reference (i.e. a dedup-ed, in arc_freed()
6232 * dmu_sync-ed block). A block may also have a reference if it is in arc_freed()
6233 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would in arc_freed()
6250 zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { in arc_freed()
6269 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_release()
6284 if (hdr->b_l1hdr.b_state == arc_anon) { in arc_release()
6289 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_release()
6291 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); in arc_release()
6292 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_release()
6294 hdr->b_l1hdr.b_arc_access = 0; in arc_release()
6314 arc_state_t *state = hdr->b_l1hdr.b_state; in arc_release()
6319 ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); in arc_release()
6322 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); in arc_release()
6335 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); in arc_release()
6341 if (hdr->b_l1hdr.b_buf != buf || !ARC_BUF_LAST(buf)) { in arc_release()
6343 uint64_t spa = hdr->b_spa; in arc_release()
6349 VERIFY3U(hdr->b_type, ==, type); in arc_release()
6351 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); in arc_release()
6355 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); in arc_release()
6372 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); in arc_release()
6391 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, in arc_release()
6392 buf->b_data, psize); in arc_release()
6394 VERIFY3P(lastbuf->b_data, !=, NULL); in arc_release()
6409 ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); in arc_release()
6412 (void) zfs_refcount_remove_many(&state->arcs_size[type], in arc_release()
6415 if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { in arc_release()
6418 &state->arcs_esize[type], in arc_release()
6432 compress, hdr->b_complevel, type); in arc_release()
6433 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); in arc_release()
6434 ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt)); in arc_release()
6435 VERIFY3U(nhdr->b_type, ==, type); in arc_release()
6438 nhdr->b_l1hdr.b_buf = buf; in arc_release()
6439 (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); in arc_release()
6440 buf->b_hdr = nhdr; in arc_release()
6442 (void) zfs_refcount_add_many(&arc_anon->arcs_size[type], in arc_release()
6445 ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); in arc_release()
6447 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); in arc_release()
6449 hdr->b_l1hdr.b_mru_hits = 0; in arc_release()
6450 hdr->b_l1hdr.b_mru_ghost_hits = 0; in arc_release()
6451 hdr->b_l1hdr.b_mfu_hits = 0; in arc_release()
6452 hdr->b_l1hdr.b_mfu_ghost_hits = 0; in arc_release()
6454 hdr->b_l1hdr.b_arc_access = 0; in arc_release()
6465 return (buf->b_data != NULL && in arc_released()
6466 buf->b_hdr->b_l1hdr.b_state == arc_anon); in arc_released()
6473 return (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); in arc_referenced()
6480 arc_write_callback_t *callback = zio->io_private; in arc_write_ready()
6481 arc_buf_t *buf = callback->awcb_buf; in arc_write_ready()
6482 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write_ready()
6483 blkptr_t *bp = zio->io_bp; in arc_write_ready()
6488 ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); in arc_write_ready()
6489 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write_ready()
6496 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { in arc_write_ready()
6499 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_write_ready()
6511 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_write_ready()
6516 callback->awcb_ready(zio, buf, callback->awcb_private); in arc_write_ready()
6519 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); in arc_write_ready()
6531 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; in arc_write_ready()
6533 hdr->b_l1hdr.b_byteswap = in arc_write_ready()
6537 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; in arc_write_ready()
6541 hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); in arc_write_ready()
6542 hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; in arc_write_ready()
6543 zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, in arc_write_ready()
6544 hdr->b_crypt_hdr.b_iv); in arc_write_ready()
6545 zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); in arc_write_ready()
6556 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_write_ready()
6558 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_write_ready()
6560 buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; in arc_write_ready()
6561 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; in arc_write_ready()
6576 hdr->b_complevel = zio->io_prop.zp_complevel; in arc_write_ready()
6578 if (zio->io_error != 0 || psize == 0) in arc_write_ready()
6600 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); in arc_write_ready()
6613 abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); in arc_write_ready()
6618 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); in arc_write_ready()
6620 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); in arc_write_ready()
6622 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, in arc_write_ready()
6626 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); in arc_write_ready()
6627 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); in arc_write_ready()
6628 ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf); in arc_write_ready()
6642 arc_write_callback_t *callback = zio->io_private; in arc_write_children_ready()
6643 arc_buf_t *buf = callback->awcb_buf; in arc_write_children_ready()
6645 callback->awcb_children_ready(zio, buf, callback->awcb_private); in arc_write_children_ready()
6651 arc_write_callback_t *callback = zio->io_private; in arc_write_done()
6652 arc_buf_t *buf = callback->awcb_buf; in arc_write_done()
6653 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write_done()
6655 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_write_done()
6657 if (zio->io_error == 0) { in arc_write_done()
6658 arc_hdr_verify(hdr, zio->io_bp); in arc_write_done()
6660 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { in arc_write_done()
6663 hdr->b_dva = *BP_IDENTITY(zio->io_bp); in arc_write_done()
6664 hdr->b_birth = BP_GET_BIRTH(zio->io_bp); in arc_write_done()
6671 * If the block to be written was all-zero or compressed enough to be in arc_write_done()
6680 ASSERT3U(zio->io_error, ==, 0); in arc_write_done()
6688 * sync-to-convergence, because we remove in arc_write_done()
6691 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { in arc_write_done()
6692 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) in arc_write_done()
6696 &exists->b_l1hdr.b_refcnt)); in arc_write_done()
6702 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { in arc_write_done()
6704 ASSERT(zio->io_prop.zp_nopwrite); in arc_write_done()
6705 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) in arc_write_done()
6710 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write_done()
6711 ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf)); in arc_write_done()
6712 ASSERT(hdr->b_l1hdr.b_state == arc_anon); in arc_write_done()
6713 ASSERT(BP_GET_DEDUP(zio->io_bp)); in arc_write_done()
6714 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); in arc_write_done()
6720 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) in arc_write_done()
6728 callback->awcb_done(zio, buf, callback->awcb_private); in arc_write_done()
6730 abd_free(zio->io_abd); in arc_write_done()
6742 arc_buf_hdr_t *hdr = buf->b_hdr; in arc_write()
6751 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); in arc_write()
6752 ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL); in arc_write()
6762 localprop.zp_complevel = hdr->b_complevel; in arc_write()
6764 (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? in arc_write()
6766 memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt, in arc_write()
6768 memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv, in arc_write()
6770 memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac, in arc_write()
6775 MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1); in arc_write()
6781 localprop.zp_complevel = hdr->b_complevel; in arc_write()
6785 callback->awcb_ready = ready; in arc_write()
6786 callback->awcb_children_ready = children_ready; in arc_write()
6787 callback->awcb_done = done; in arc_write()
6788 callback->awcb_private = private; in arc_write()
6789 callback->awcb_buf = buf; in arc_write()
6795 if (hdr->b_l1hdr.b_pabd != NULL) { in arc_write()
6808 VERIFY3P(buf->b_data, !=, NULL); in arc_write()
6818 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); in arc_write()
6821 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), in arc_write()
6832 atomic_add_64(&arc_tempreserve, -reserve); in arc_tempreserve_clear()
6866 (zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]) + in arc_tempreserve_space()
6867 zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]) - in arc_tempreserve_space()
6903 &arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_tempreserve_space()
6905 zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_tempreserve_space()
6926 data->value.ui64 = in arc_kstat_update_state()
6927 zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]); in arc_kstat_update_state()
6928 metadata->value.ui64 = in arc_kstat_update_state()
6929 zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]); in arc_kstat_update_state()
6930 size->value.ui64 = data->value.ui64 + metadata->value.ui64; in arc_kstat_update_state()
6931 evict_data->value.ui64 = in arc_kstat_update_state()
6932 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); in arc_kstat_update_state()
6933 evict_metadata->value.ui64 = in arc_kstat_update_state()
6934 zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); in arc_kstat_update_state()
6940 arc_stats_t *as = ksp->ks_data; in arc_kstat_update()
6945 as->arcstat_hits.value.ui64 = in arc_kstat_update()
6947 as->arcstat_iohits.value.ui64 = in arc_kstat_update()
6949 as->arcstat_misses.value.ui64 = in arc_kstat_update()
6951 as->arcstat_demand_data_hits.value.ui64 = in arc_kstat_update()
6953 as->arcstat_demand_data_iohits.value.ui64 = in arc_kstat_update()
6955 as->arcstat_demand_data_misses.value.ui64 = in arc_kstat_update()
6957 as->arcstat_demand_metadata_hits.value.ui64 = in arc_kstat_update()
6959 as->arcstat_demand_metadata_iohits.value.ui64 = in arc_kstat_update()
6961 as->arcstat_demand_metadata_misses.value.ui64 = in arc_kstat_update()
6963 as->arcstat_prefetch_data_hits.value.ui64 = in arc_kstat_update()
6965 as->arcstat_prefetch_data_iohits.value.ui64 = in arc_kstat_update()
6967 as->arcstat_prefetch_data_misses.value.ui64 = in arc_kstat_update()
6969 as->arcstat_prefetch_metadata_hits.value.ui64 = in arc_kstat_update()
6971 as->arcstat_prefetch_metadata_iohits.value.ui64 = in arc_kstat_update()
6973 as->arcstat_prefetch_metadata_misses.value.ui64 = in arc_kstat_update()
6975 as->arcstat_mru_hits.value.ui64 = in arc_kstat_update()
6977 as->arcstat_mru_ghost_hits.value.ui64 = in arc_kstat_update()
6979 as->arcstat_mfu_hits.value.ui64 = in arc_kstat_update()
6981 as->arcstat_mfu_ghost_hits.value.ui64 = in arc_kstat_update()
6983 as->arcstat_uncached_hits.value.ui64 = in arc_kstat_update()
6985 as->arcstat_deleted.value.ui64 = in arc_kstat_update()
6987 as->arcstat_mutex_miss.value.ui64 = in arc_kstat_update()
6989 as->arcstat_access_skip.value.ui64 = in arc_kstat_update()
6991 as->arcstat_evict_skip.value.ui64 = in arc_kstat_update()
6993 as->arcstat_evict_not_enough.value.ui64 = in arc_kstat_update()
6995 as->arcstat_evict_l2_cached.value.ui64 = in arc_kstat_update()
6997 as->arcstat_evict_l2_eligible.value.ui64 = in arc_kstat_update()
6999 as->arcstat_evict_l2_eligible_mfu.value.ui64 = in arc_kstat_update()
7001 as->arcstat_evict_l2_eligible_mru.value.ui64 = in arc_kstat_update()
7003 as->arcstat_evict_l2_ineligible.value.ui64 = in arc_kstat_update()
7005 as->arcstat_evict_l2_skip.value.ui64 = in arc_kstat_update()
7007 as->arcstat_hash_elements.value.ui64 = in arc_kstat_update()
7008 as->arcstat_hash_elements_max.value.ui64 = in arc_kstat_update()
7010 as->arcstat_hash_collisions.value.ui64 = in arc_kstat_update()
7012 as->arcstat_hash_chains.value.ui64 = in arc_kstat_update()
7014 as->arcstat_size.value.ui64 = in arc_kstat_update()
7016 as->arcstat_compressed_size.value.ui64 = in arc_kstat_update()
7018 as->arcstat_uncompressed_size.value.ui64 = in arc_kstat_update()
7020 as->arcstat_overhead_size.value.ui64 = in arc_kstat_update()
7022 as->arcstat_hdr_size.value.ui64 = in arc_kstat_update()
7024 as->arcstat_data_size.value.ui64 = in arc_kstat_update()
7026 as->arcstat_metadata_size.value.ui64 = in arc_kstat_update()
7028 as->arcstat_dbuf_size.value.ui64 = in arc_kstat_update()
7031 as->arcstat_other_size.value.ui64 = in arc_kstat_update()
7038 &as->arcstat_anon_size, in arc_kstat_update()
7039 &as->arcstat_anon_data, in arc_kstat_update()
7040 &as->arcstat_anon_metadata, in arc_kstat_update()
7041 &as->arcstat_anon_evictable_data, in arc_kstat_update()
7042 &as->arcstat_anon_evictable_metadata); in arc_kstat_update()
7044 &as->arcstat_mru_size, in arc_kstat_update()
7045 &as->arcstat_mru_data, in arc_kstat_update()
7046 &as->arcstat_mru_metadata, in arc_kstat_update()
7047 &as->arcstat_mru_evictable_data, in arc_kstat_update()
7048 &as->arcstat_mru_evictable_metadata); in arc_kstat_update()
7050 &as->arcstat_mru_ghost_size, in arc_kstat_update()
7051 &as->arcstat_mru_ghost_data, in arc_kstat_update()
7052 &as->arcstat_mru_ghost_metadata, in arc_kstat_update()
7053 &as->arcstat_mru_ghost_evictable_data, in arc_kstat_update()
7054 &as->arcstat_mru_ghost_evictable_metadata); in arc_kstat_update()
7056 &as->arcstat_mfu_size, in arc_kstat_update()
7057 &as->arcstat_mfu_data, in arc_kstat_update()
7058 &as->arcstat_mfu_metadata, in arc_kstat_update()
7059 &as->arcstat_mfu_evictable_data, in arc_kstat_update()
7060 &as->arcstat_mfu_evictable_metadata); in arc_kstat_update()
7062 &as->arcstat_mfu_ghost_size, in arc_kstat_update()
7063 &as->arcstat_mfu_ghost_data, in arc_kstat_update()
7064 &as->arcstat_mfu_ghost_metadata, in arc_kstat_update()
7065 &as->arcstat_mfu_ghost_evictable_data, in arc_kstat_update()
7066 &as->arcstat_mfu_ghost_evictable_metadata); in arc_kstat_update()
7068 &as->arcstat_uncached_size, in arc_kstat_update()
7069 &as->arcstat_uncached_data, in arc_kstat_update()
7070 &as->arcstat_uncached_metadata, in arc_kstat_update()
7071 &as->arcstat_uncached_evictable_data, in arc_kstat_update()
7072 &as->arcstat_uncached_evictable_metadata); in arc_kstat_update()
7074 as->arcstat_dnode_size.value.ui64 = in arc_kstat_update()
7076 as->arcstat_bonus_size.value.ui64 = in arc_kstat_update()
7078 as->arcstat_l2_hits.value.ui64 = in arc_kstat_update()
7080 as->arcstat_l2_misses.value.ui64 = in arc_kstat_update()
7082 as->arcstat_l2_prefetch_asize.value.ui64 = in arc_kstat_update()
7084 as->arcstat_l2_mru_asize.value.ui64 = in arc_kstat_update()
7086 as->arcstat_l2_mfu_asize.value.ui64 = in arc_kstat_update()
7088 as->arcstat_l2_bufc_data_asize.value.ui64 = in arc_kstat_update()
7090 as->arcstat_l2_bufc_metadata_asize.value.ui64 = in arc_kstat_update()
7092 as->arcstat_l2_feeds.value.ui64 = in arc_kstat_update()
7094 as->arcstat_l2_rw_clash.value.ui64 = in arc_kstat_update()
7096 as->arcstat_l2_read_bytes.value.ui64 = in arc_kstat_update()
7098 as->arcstat_l2_write_bytes.value.ui64 = in arc_kstat_update()
7100 as->arcstat_l2_writes_sent.value.ui64 = in arc_kstat_update()
7102 as->arcstat_l2_writes_done.value.ui64 = in arc_kstat_update()
7104 as->arcstat_l2_writes_error.value.ui64 = in arc_kstat_update()
7106 as->arcstat_l2_writes_lock_retry.value.ui64 = in arc_kstat_update()
7108 as->arcstat_l2_evict_lock_retry.value.ui64 = in arc_kstat_update()
7110 as->arcstat_l2_evict_reading.value.ui64 = in arc_kstat_update()
7112 as->arcstat_l2_evict_l1cached.value.ui64 = in arc_kstat_update()
7114 as->arcstat_l2_free_on_write.value.ui64 = in arc_kstat_update()
7116 as->arcstat_l2_abort_lowmem.value.ui64 = in arc_kstat_update()
7118 as->arcstat_l2_cksum_bad.value.ui64 = in arc_kstat_update()
7120 as->arcstat_l2_io_error.value.ui64 = in arc_kstat_update()
7122 as->arcstat_l2_lsize.value.ui64 = in arc_kstat_update()
7124 as->arcstat_l2_psize.value.ui64 = in arc_kstat_update()
7126 as->arcstat_l2_hdr_size.value.ui64 = in arc_kstat_update()
7128 as->arcstat_l2_log_blk_writes.value.ui64 = in arc_kstat_update()
7130 as->arcstat_l2_log_blk_asize.value.ui64 = in arc_kstat_update()
7132 as->arcstat_l2_log_blk_count.value.ui64 = in arc_kstat_update()
7134 as->arcstat_l2_rebuild_success.value.ui64 = in arc_kstat_update()
7136 as->arcstat_l2_rebuild_abort_unsupported.value.ui64 = in arc_kstat_update()
7138 as->arcstat_l2_rebuild_abort_io_errors.value.ui64 = in arc_kstat_update()
7140 as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 = in arc_kstat_update()
7142 as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 = in arc_kstat_update()
7144 as->arcstat_l2_rebuild_abort_lowmem.value.ui64 = in arc_kstat_update()
7146 as->arcstat_l2_rebuild_size.value.ui64 = in arc_kstat_update()
7148 as->arcstat_l2_rebuild_asize.value.ui64 = in arc_kstat_update()
7150 as->arcstat_l2_rebuild_bufs.value.ui64 = in arc_kstat_update()
7152 as->arcstat_l2_rebuild_bufs_precached.value.ui64 = in arc_kstat_update()
7154 as->arcstat_l2_rebuild_log_blks.value.ui64 = in arc_kstat_update()
7156 as->arcstat_memory_throttle_count.value.ui64 = in arc_kstat_update()
7158 as->arcstat_memory_direct_count.value.ui64 = in arc_kstat_update()
7160 as->arcstat_memory_indirect_count.value.ui64 = in arc_kstat_update()
7163 as->arcstat_memory_all_bytes.value.ui64 = in arc_kstat_update()
7165 as->arcstat_memory_free_bytes.value.ui64 = in arc_kstat_update()
7167 as->arcstat_memory_available_bytes.value.i64 = in arc_kstat_update()
7170 as->arcstat_prune.value.ui64 = in arc_kstat_update()
7172 as->arcstat_meta_used.value.ui64 = in arc_kstat_update()
7174 as->arcstat_async_upgrade_sync.value.ui64 = in arc_kstat_update()
7176 as->arcstat_predictive_prefetch.value.ui64 = in arc_kstat_update()
7178 as->arcstat_demand_hit_predictive_prefetch.value.ui64 = in arc_kstat_update()
7180 as->arcstat_demand_iohit_predictive_prefetch.value.ui64 = in arc_kstat_update()
7182 as->arcstat_prescient_prefetch.value.ui64 = in arc_kstat_update()
7184 as->arcstat_demand_hit_prescient_prefetch.value.ui64 = in arc_kstat_update()
7186 as->arcstat_demand_iohit_prescient_prefetch.value.ui64 = in arc_kstat_update()
7188 as->arcstat_raw_size.value.ui64 = in arc_kstat_update()
7190 as->arcstat_cached_only_in_progress.value.ui64 = in arc_kstat_update()
7192 as->arcstat_abd_chunk_waste_size.value.ui64 = in arc_kstat_update()
7230 return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % in arc_state_multilist_index_func()
7252 * updated manually. Non-zero zfs_* values which differ from the currently set
7260 /* Valid range: 32M - <arc_c_max> */ in arc_tuning_update()
7269 /* Valid range: 64M - <all physical memory> */ in arc_tuning_update()
7280 /* Valid range: 0 - <all physical memory> */ in arc_tuning_update()
7285 /* Valid range: 1 - N */ in arc_tuning_update()
7289 /* Valid range: 1 - N */ in arc_tuning_update()
7292 arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1); in arc_tuning_update()
7295 /* Valid range: 1 - N ms */ in arc_tuning_update()
7299 /* Valid range: 1 - N ms */ in arc_tuning_update()
7305 /* Valid range: 0 - 100 */ in arc_tuning_update()
7311 /* Valid range: 0 - <all physical memory> */ in arc_tuning_update()
7331 arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7333 arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7335 arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7337 arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7339 arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7341 arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7343 arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7345 arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7347 arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7349 arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7356 arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], in arc_state_init()
7358 arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], in arc_state_init()
7363 * any ARC state. The markers will be pre-allocated so as to minimize in arc_state_init()
7368 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7369 zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7370 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7371 zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7372 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7373 zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7374 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7375 zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7376 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7377 zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7378 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7379 zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7380 zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]); in arc_state_init()
7381 zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_DATA]); in arc_state_init()
7383 zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7384 zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7385 zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7386 zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7387 zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7388 zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7389 zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7390 zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7391 zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7392 zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7393 zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7394 zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7395 zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_DATA]); in arc_state_init()
7396 zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_METADATA]); in arc_state_init()
7398 wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA], 0); in arc_state_init()
7399 wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA], 0); in arc_state_init()
7400 wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA], 0); in arc_state_init()
7401 wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA], 0); in arc_state_init()
7502 arc_anon->arcs_state = ARC_STATE_ANON; in arc_state_init()
7503 arc_mru->arcs_state = ARC_STATE_MRU; in arc_state_init()
7504 arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST; in arc_state_init()
7505 arc_mfu->arcs_state = ARC_STATE_MFU; in arc_state_init()
7506 arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST; in arc_state_init()
7507 arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY; in arc_state_init()
7508 arc_uncached->arcs_state = ARC_STATE_UNCACHED; in arc_state_init()
7514 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7515 zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7516 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7517 zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7518 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7519 zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7520 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7521 zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7522 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7523 zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7524 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7525 zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7526 zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]); in arc_state_fini()
7527 zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_DATA]); in arc_state_fini()
7529 zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7530 zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7531 zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7532 zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7533 zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7534 zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7535 zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7536 zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7537 zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7538 zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7539 zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7540 zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7541 zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_DATA]); in arc_state_fini()
7542 zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_METADATA]); in arc_state_fini()
7544 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7545 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7546 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7547 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7548 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7549 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7550 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7551 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7552 multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7553 multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7554 multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_METADATA]); in arc_state_fini()
7555 multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_DATA]); in arc_state_fini()
7557 wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_state_fini()
7558 wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_state_fini()
7559 wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]); in arc_state_fini()
7560 wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]); in arc_state_fini()
7696 * If zfs_arc_max is non-zero at init, meaning it was set in the kernel in arc_init()
7722 * 32-bit fixed point fractions of metadata from total ARC size, in arc_init()
7758 arc_ksp->ks_data = &arc_stats; in arc_init()
7759 arc_ksp->ks_update = arc_kstat_update; in arc_init()
7832 (void) zfs_refcount_remove(&p->p_refcnt, &arc_prune_list); in arc_fini()
7833 zfs_refcount_destroy(&p->p_refcnt); in arc_fini()
7880 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
7884 * include short-stroked disks, solid state disks, and other media with
7887 * +-----------------------+
7889 * +-----------------------+
7896 * +---------------+ |
7898 * +---------------+ |
7903 * +-------+ +-------+
7906 * +-------+ +-------+
7907 * +=========+ .-----.
7908 * : L2ARC : |-_____-|
7910 * +=========+ `-_____-'
7930 * It does this by periodically scanning buffers from the eviction-end of
7941 * head --> tail
7942 * +---------------------+----------+
7943 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
7944 * +---------------------+----------+ | o L2ARC eligible
7945 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
7946 * +---------------------+----------+ |
7951 * l2arc write hand <--[oooo]--'
7984 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
8034 * main ARC buffers. There are 2 linked-lists of log blocks headed by
8036 * time-wise and offset-wise interleaved, but that is an optimization rather
8042 * which contains our top-level reference structures. We update it each
8055 * ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
8057 * |+------+ ...--\-------/ \-----/--\------/ / |
8058 * | \--------------/ \--------------/ |
8067 * incurring a large amount of I/O round-trip latency. Having two lists
8071 * On-device data structures:
8087 * <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
8090 * <<nextwrite>> may overwrite this blk and/or its bufs --'
8107 * birth TXG uniquely identify a block in space and time - once created,
8129 if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || in l2arc_write_eligible()
8157 if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) { in l2arc_write_size()
8171 size = MIN(size, (dev->l2ad_end - dev->l2ad_start) / 4); in l2arc_write_size()
8173 size = P2ROUNDUP(size, 1ULL << dev->l2ad_vdev->vdev_ashift); in l2arc_write_size()
8187 * how much we previously wrote - if it was more than half of in l2arc_write_interval()
8225 /* loop around the list looking for a non-faulted vdev */ in l2arc_dev_get_next()
8241 } while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild || in l2arc_dev_get_next()
8242 next->l2ad_trim_all || next->l2ad_spa->spa_is_exporting); in l2arc_dev_get_next()
8245 if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild || in l2arc_dev_get_next()
8246 next->l2ad_trim_all || next->l2ad_spa->spa_is_exporting) in l2arc_dev_get_next()
8259 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); in l2arc_dev_get_next()
8275 ASSERT3P(df->l2df_abd, !=, NULL); in l2arc_do_free_on_write()
8276 abd_free(df->l2df_abd); in l2arc_do_free_on_write()
8299 cb = zio->io_private; in l2arc_write_done()
8301 dev = cb->l2wcb_dev; in l2arc_write_done()
8302 l2dhdr = dev->l2ad_dev_hdr; in l2arc_write_done()
8304 head = cb->l2wcb_head; in l2arc_write_done()
8306 buflist = &dev->l2ad_buflist; in l2arc_write_done()
8315 mutex_enter(&dev->l2ad_mtx); in l2arc_write_done()
8342 mutex_exit(&dev->l2ad_mtx); in l2arc_write_done()
8357 * state while in-flight due to our ARC_FLAG_L2_WRITING in l2arc_write_done()
8363 * Skipped - drop L2ARC entry and mark the header as no in l2arc_write_done()
8366 if (zio->io_error != 0) { in l2arc_write_done()
8368 * Error - drop L2ARC entry. in l2arc_write_done()
8377 vdev_psize_to_asize(dev->l2ad_vdev, psize); in l2arc_write_done()
8378 (void) zfs_refcount_remove_many(&dev->l2ad_alloc, in l2arc_write_done()
8397 while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) { in l2arc_write_done()
8398 abd_free(abd_buf->abd); in l2arc_write_done()
8400 if (zio->io_error != 0) { in l2arc_write_done()
8401 lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list); in l2arc_write_done()
8407 L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop); in l2arc_write_done()
8409 ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); in l2arc_write_done()
8411 zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, in l2arc_write_done()
8413 (void) zfs_refcount_remove(&dev->l2ad_lb_count, in l2arc_write_done()
8415 kmem_free(lb_ptr_buf->lb_ptr, in l2arc_write_done()
8420 list_destroy(&cb->l2wcb_abd_list); in l2arc_write_done()
8422 if (zio->io_error != 0) { in l2arc_write_done()
8430 lb_ptr_buf = list_head(&dev->l2ad_lbptr_list); in l2arc_write_done()
8440 dev->l2ad_dev_hdr_asize); in l2arc_write_done()
8442 memset(&l2dhdr->dh_start_lbps[i], 0, in l2arc_write_done()
8447 memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr, in l2arc_write_done()
8449 lb_ptr_buf = list_next(&dev->l2ad_lbptr_list, in l2arc_write_done()
8458 mutex_exit(&dev->l2ad_mtx); in l2arc_write_done()
8460 ASSERT(dev->l2ad_vdev != NULL); in l2arc_write_done()
8461 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); in l2arc_write_done()
8472 spa_t *spa = zio->io_spa; in l2arc_untransform()
8473 arc_buf_hdr_t *hdr = cb->l2rcb_hdr; in l2arc_untransform()
8474 blkptr_t *bp = zio->io_bp; in l2arc_untransform()
8486 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in l2arc_untransform()
8501 ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb, in l2arc_untransform()
8504 hdr->b_l1hdr.b_pabd, &no_crypt); in l2arc_untransform()
8516 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in l2arc_untransform()
8518 hdr->b_l1hdr.b_pabd = eabd; in l2arc_untransform()
8519 zio->io_abd = eabd; in l2arc_untransform()
8536 hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr), in l2arc_untransform()
8537 HDR_GET_LSIZE(hdr), &hdr->b_complevel); in l2arc_untransform()
8543 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, in l2arc_untransform()
8545 hdr->b_l1hdr.b_pabd = cabd; in l2arc_untransform()
8546 zio->io_abd = cabd; in l2arc_untransform()
8547 zio->io_size = HDR_GET_LSIZE(hdr); in l2arc_untransform()
8565 l2arc_read_callback_t *cb = zio->io_private; in l2arc_read_done()
8569 boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) && in l2arc_read_done()
8570 (cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)); in l2arc_read_done()
8572 ASSERT3P(zio->io_vd, !=, NULL); in l2arc_read_done()
8573 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); in l2arc_read_done()
8575 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); in l2arc_read_done()
8578 hdr = cb->l2rcb_hdr; in l2arc_read_done()
8589 if (cb->l2rcb_abd != NULL) { in l2arc_read_done()
8590 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); in l2arc_read_done()
8591 if (zio->io_error == 0) { in l2arc_read_done()
8593 abd_copy(hdr->b_crypt_hdr.b_rabd, in l2arc_read_done()
8594 cb->l2rcb_abd, arc_hdr_size(hdr)); in l2arc_read_done()
8596 abd_copy(hdr->b_l1hdr.b_pabd, in l2arc_read_done()
8597 cb->l2rcb_abd, arc_hdr_size(hdr)); in l2arc_read_done()
8604 * - free the temporary buffer in l2arc_read_done()
8605 * - point zio to the real ARC buffer in l2arc_read_done()
8606 * - set zio size accordingly in l2arc_read_done()
8607 * These are required because zio is either re-used for in l2arc_read_done()
8612 abd_free(cb->l2rcb_abd); in l2arc_read_done()
8613 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); in l2arc_read_done()
8617 zio->io_abd = zio->io_orig_abd = in l2arc_read_done()
8618 hdr->b_crypt_hdr.b_rabd; in l2arc_read_done()
8620 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); in l2arc_read_done()
8621 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; in l2arc_read_done()
8625 ASSERT3P(zio->io_abd, !=, NULL); in l2arc_read_done()
8630 ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd || in l2arc_read_done()
8631 (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd)); in l2arc_read_done()
8632 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ in l2arc_read_done()
8633 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ in l2arc_read_done()
8634 zio->io_prop.zp_complevel = hdr->b_complevel; in l2arc_read_done()
8646 if (valid_cksum && tfm_error == 0 && zio->io_error == 0 && in l2arc_read_done()
8649 zio->io_private = hdr; in l2arc_read_done()
8656 if (zio->io_error != 0) { in l2arc_read_done()
8659 zio->io_error = SET_ERROR(EIO); in l2arc_read_done()
8669 if (zio->io_waiter == NULL) { in l2arc_read_done()
8672 hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd; in l2arc_read_done()
8674 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); in l2arc_read_done()
8676 zio = zio_read(pio, zio->io_spa, zio->io_bp, in l2arc_read_done()
8677 abd, zio->io_size, arc_read_done, in l2arc_read_done()
8678 hdr, zio->io_priority, cb->l2rcb_flags, in l2arc_read_done()
8679 &cb->l2rcb_zb); in l2arc_read_done()
8686 for (struct arc_callback *acb = hdr->b_l1hdr.b_acb; in l2arc_read_done()
8687 acb != NULL; acb = acb->acb_next) in l2arc_read_done()
8688 acb->acb_zio_head = zio; in l2arc_read_done()
8720 ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; in l2arc_sublist_lock()
8723 ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; in l2arc_sublist_lock()
8726 ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; in l2arc_sublist_lock()
8729 ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; in l2arc_sublist_lock()
8736 * Return a randomly-selected sublist. This is acceptable in l2arc_sublist_lock()
8754 if (dev->l2ad_log_entries == 0) { in l2arc_log_blk_overhead()
8760 dev->l2ad_log_entries - 1) / in l2arc_log_blk_overhead()
8761 dev->l2ad_log_entries; in l2arc_log_blk_overhead()
8763 return (vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_overhead()
8782 vdev_t *vd = dev->l2ad_vdev; in l2arc_evict()
8785 buflist = &dev->l2ad_buflist; in l2arc_evict()
8789 if (dev->l2ad_hand + distance > dev->l2ad_end) { in l2arc_evict()
8799 taddr = dev->l2ad_end; in l2arc_evict()
8801 taddr = dev->l2ad_hand + distance; in l2arc_evict()
8811 if (dev->l2ad_first) { in l2arc_evict()
8822 if (vd->vdev_has_trim && dev->l2ad_evict < taddr && in l2arc_evict()
8832 spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev); in l2arc_evict()
8834 dev->l2ad_evict - VDEV_LABEL_START_SIZE, in l2arc_evict()
8835 taddr - dev->l2ad_evict); in l2arc_evict()
8836 spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev, in l2arc_evict()
8848 dev->l2ad_evict = MAX(dev->l2ad_evict, taddr); in l2arc_evict()
8853 mutex_enter(&dev->l2ad_mtx); in l2arc_evict()
8860 for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf; in l2arc_evict()
8863 lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_evict()
8867 (lb_ptr_buf->lb_ptr)->lbp_prop); in l2arc_evict()
8874 if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) { in l2arc_evict()
8877 vdev_space_update(vd, -asize, 0, 0); in l2arc_evict()
8878 ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize); in l2arc_evict()
8880 zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize, in l2arc_evict()
8882 (void) zfs_refcount_remove(&dev->l2ad_lb_count, in l2arc_evict()
8884 list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_evict()
8885 kmem_free(lb_ptr_buf->lb_ptr, in l2arc_evict()
8907 mutex_exit(&dev->l2ad_mtx); in l2arc_evict()
8922 if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict || in l2arc_evict()
8923 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { in l2arc_evict()
8942 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); in l2arc_evict()
8958 mutex_exit(&dev->l2ad_mtx); in l2arc_evict()
8970 dev->l2ad_hand = dev->l2ad_start; in l2arc_evict()
8971 dev->l2ad_evict = dev->l2ad_start; in l2arc_evict()
8972 dev->l2ad_first = B_FALSE; in l2arc_evict()
8982 ASSERT3U(dev->l2ad_hand + distance, <=, dev->l2ad_end); in l2arc_evict()
8983 if (!dev->l2ad_first) in l2arc_evict()
8984 ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict); in l2arc_evict()
8998 abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd; in l2arc_apply_transforms()
9003 boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); in l2arc_apply_transforms()
9021 abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize); in l2arc_apply_transforms()
9022 abd_zero_off(to_write, psize, asize - psize); in l2arc_apply_transforms()
9030 abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); in l2arc_apply_transforms()
9032 abd_zero_off(to_write, size, asize - size); in l2arc_apply_transforms()
9039 size, MIN(size, psize), hdr->b_complevel); in l2arc_apply_transforms()
9042 * We can't re-compress the block into the original in l2arc_apply_transforms()
9050 abd_zero_off(cabd, csize, asize - csize); in l2arc_apply_transforms()
9059 * made it to this point, the key to re-encrypt in l2arc_apply_transforms()
9063 ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj, in l2arc_apply_transforms()
9068 ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key, in l2arc_apply_transforms()
9069 hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt, in l2arc_apply_transforms()
9070 hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd, in l2arc_apply_transforms()
9079 abd_zero_off(eabd, psize, asize - psize); in l2arc_apply_transforms()
9082 ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN)); in l2arc_apply_transforms()
9092 ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd); in l2arc_apply_transforms()
9113 cb = zio->io_private; in l2arc_blk_fetch_done()
9114 if (cb->l2rcb_abd != NULL) in l2arc_blk_fetch_done()
9115 abd_free(cb->l2rcb_abd); in l2arc_blk_fetch_done()
9124 * The headroom_boost is an in-out parameter used to maintain headroom boost
9140 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_write_buffers()
9142 ASSERT3P(dev->l2ad_vdev, !=, NULL); in l2arc_write_buffers()
9217 ASSERT(hdr->b_l1hdr.b_pabd != NULL || in l2arc_write_buffers()
9220 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_write_buffers()
9264 to_write = hdr->b_crypt_hdr.b_rabd; in l2arc_write_buffers()
9269 to_write = hdr->b_l1hdr.b_pabd; in l2arc_write_buffers()
9286 hdr->b_l2hdr.b_dev = dev; in l2arc_write_buffers()
9287 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; in l2arc_write_buffers()
9288 hdr->b_l2hdr.b_hits = 0; in l2arc_write_buffers()
9289 hdr->b_l2hdr.b_arcs_state = in l2arc_write_buffers()
9290 hdr->b_l1hdr.b_state->arcs_state; in l2arc_write_buffers()
9294 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in l2arc_write_buffers()
9297 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_write_buffers()
9299 mutex_enter(&dev->l2ad_mtx); in l2arc_write_buffers()
9306 list_insert_head(&dev->l2ad_buflist, head); in l2arc_write_buffers()
9308 list_insert_head(&dev->l2ad_buflist, hdr); in l2arc_write_buffers()
9309 mutex_exit(&dev->l2ad_mtx); in l2arc_write_buffers()
9317 cb->l2wcb_dev = dev; in l2arc_write_buffers()
9318 cb->l2wcb_head = head; in l2arc_write_buffers()
9319 list_create(&cb->l2wcb_abd_list, in l2arc_write_buffers()
9326 wzio = zio_write_phys(pio, dev->l2ad_vdev, in l2arc_write_buffers()
9327 dev->l2ad_hand, asize, to_write, in l2arc_write_buffers()
9332 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, in l2arc_write_buffers()
9338 dev->l2ad_hand += asize; in l2arc_write_buffers()
9373 if (dev->l2ad_evict != l2dhdr->dh_evict) in l2arc_write_buffers()
9379 if (!dev->l2ad_first) in l2arc_write_buffers()
9380 ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict); in l2arc_write_buffers()
9386 dev->l2ad_writing = B_TRUE; in l2arc_write_buffers()
9388 dev->l2ad_writing = B_FALSE; in l2arc_write_buffers()
9449 * doing so the next spa to feed from: dev->l2ad_spa. This in l2arc_feed_thread()
9460 spa = dev->l2ad_spa; in l2arc_feed_thread()
9464 * If the pool is read-only then force the feed thread to in l2arc_feed_thread()
9528 if (dev->l2ad_vdev == vd) in l2arc_vdev_get()
9539 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_rebuild_dev()
9540 uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_rebuild_dev()
9541 spa_t *spa = dev->l2ad_spa; in l2arc_rebuild_dev()
9551 * log entries per block so as to enable persistence. in l2arc_rebuild_dev()
9553 if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) { in l2arc_rebuild_dev()
9554 dev->l2ad_log_entries = 0; in l2arc_rebuild_dev()
9556 dev->l2ad_log_entries = MIN((dev->l2ad_end - in l2arc_rebuild_dev()
9557 dev->l2ad_start) >> SPA_MAXBLOCKSHIFT, in l2arc_rebuild_dev()
9564 if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) { in l2arc_rebuild_dev()
9578 dev->l2ad_log_ent_idx = 0; in l2arc_rebuild_dev()
9579 dev->l2ad_log_blk_payload_asize = 0; in l2arc_rebuild_dev()
9580 dev->l2ad_log_blk_payload_start = 0; in l2arc_rebuild_dev()
9589 dev->l2ad_rebuild = B_TRUE; in l2arc_rebuild_dev()
9602 dev->l2ad_trim_all = B_TRUE; in l2arc_rebuild_dev()
9626 adddev->l2ad_spa = spa; in l2arc_add_vdev()
9627 adddev->l2ad_vdev = vd; in l2arc_add_vdev()
9629 l2dhdr_asize = adddev->l2ad_dev_hdr_asize = in l2arc_add_vdev()
9630 MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift); in l2arc_add_vdev()
9631 adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize; in l2arc_add_vdev()
9632 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); in l2arc_add_vdev()
9633 ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end); in l2arc_add_vdev()
9634 adddev->l2ad_hand = adddev->l2ad_start; in l2arc_add_vdev()
9635 adddev->l2ad_evict = adddev->l2ad_start; in l2arc_add_vdev()
9636 adddev->l2ad_first = B_TRUE; in l2arc_add_vdev()
9637 adddev->l2ad_writing = B_FALSE; in l2arc_add_vdev()
9638 adddev->l2ad_trim_all = B_FALSE; in l2arc_add_vdev()
9639 list_link_init(&adddev->l2ad_node); in l2arc_add_vdev()
9640 adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP); in l2arc_add_vdev()
9642 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); in l2arc_add_vdev()
9647 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), in l2arc_add_vdev()
9654 list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t), in l2arc_add_vdev()
9657 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); in l2arc_add_vdev()
9658 zfs_refcount_create(&adddev->l2ad_alloc); in l2arc_add_vdev()
9659 zfs_refcount_create(&adddev->l2ad_lb_asize); in l2arc_add_vdev()
9660 zfs_refcount_create(&adddev->l2ad_lb_count); in l2arc_add_vdev()
9699 * When onlining the cache device (ie offline->online without exporting in l2arc_rebuild_vdev()
9701 * vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev() in l2arc_rebuild_vdev()
9728 if (remdev->l2ad_rebuild_began == B_TRUE) { in l2arc_remove_vdev()
9729 remdev->l2ad_rebuild_cancel = B_TRUE; in l2arc_remove_vdev()
9730 while (remdev->l2ad_rebuild == B_TRUE) in l2arc_remove_vdev()
9748 list_destroy(&remdev->l2ad_buflist); in l2arc_remove_vdev()
9749 ASSERT(list_is_empty(&remdev->l2ad_lbptr_list)); in l2arc_remove_vdev()
9750 list_destroy(&remdev->l2ad_lbptr_list); in l2arc_remove_vdev()
9751 mutex_destroy(&remdev->l2ad_mtx); in l2arc_remove_vdev()
9752 zfs_refcount_destroy(&remdev->l2ad_alloc); in l2arc_remove_vdev()
9753 zfs_refcount_destroy(&remdev->l2ad_lb_asize); in l2arc_remove_vdev()
9754 zfs_refcount_destroy(&remdev->l2ad_lb_count); in l2arc_remove_vdev()
9755 kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize); in l2arc_remove_vdev()
9832 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_start()
9834 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_start()
9840 if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) { in l2arc_spa_rebuild_start()
9841 dev->l2ad_rebuild_began = B_TRUE; in l2arc_spa_rebuild_start()
9853 spa->spa_export_thread == curthread); in l2arc_spa_rebuild_stop()
9855 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_stop()
9857 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_stop()
9861 dev->l2ad_rebuild_cancel = B_TRUE; in l2arc_spa_rebuild_stop()
9864 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { in l2arc_spa_rebuild_stop()
9866 l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]); in l2arc_spa_rebuild_stop()
9870 if (dev->l2ad_rebuild_began == B_TRUE) { in l2arc_spa_rebuild_stop()
9871 while (dev->l2ad_rebuild == B_TRUE) { in l2arc_spa_rebuild_stop()
9888 VERIFY(dev->l2ad_rebuild); in l2arc_dev_rebuild_thread()
9891 dev->l2ad_rebuild_began = B_FALSE; in l2arc_dev_rebuild_thread()
9892 dev->l2ad_rebuild = B_FALSE; in l2arc_dev_rebuild_thread()
9912 vdev_t *vd = dev->l2ad_vdev; in l2arc_rebuild()
9913 spa_t *spa = vd->vdev_spa; in l2arc_rebuild()
9915 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_rebuild()
9928 * that a spa_unload or device remove can be initiated - this is in l2arc_rebuild()
9939 dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start); in l2arc_rebuild()
9940 dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr + in l2arc_rebuild()
9941 L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop), in l2arc_rebuild()
9942 dev->l2ad_start); in l2arc_rebuild()
9943 dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST); in l2arc_rebuild()
9945 vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time; in l2arc_rebuild()
9946 vd->vdev_trim_state = l2dhdr->dh_trim_state; in l2arc_rebuild()
9956 memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps)); in l2arc_rebuild()
9973 * online the L2ARC dev at a later time (or re-import the pool) in l2arc_rebuild()
9992 uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop); in l2arc_rebuild()
10000 lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), in l2arc_rebuild()
10002 memcpy(lb_ptr_buf->lb_ptr, &lbps[0], in l2arc_rebuild()
10004 mutex_enter(&dev->l2ad_mtx); in l2arc_rebuild()
10005 list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_rebuild()
10008 zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); in l2arc_rebuild()
10009 zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); in l2arc_rebuild()
10010 mutex_exit(&dev->l2ad_mtx); in l2arc_rebuild()
10019 * -----|||----|||---|||----||| in l2arc_rebuild()
10021 * ---|||---|||----|||---||| in l2arc_rebuild()
10027 * (0). Only log blocks (0)-(3) should be restored. We check in l2arc_rebuild()
10036 lbps[0].lbp_payload_start, dev->l2ad_evict) && in l2arc_rebuild()
10037 !dev->l2ad_first) in l2arc_rebuild()
10043 if (dev->l2ad_rebuild_cancel) { in l2arc_rebuild()
10068 lbps[1] = this_lb->lb_prev_lbp; in l2arc_rebuild()
10085 } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) { in l2arc_rebuild()
10089 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10090 } else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) { in l2arc_rebuild()
10093 * in the device header points to invalid/non-present log in l2arc_rebuild()
10098 memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize); in l2arc_rebuild()
10106 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10110 (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count)); in l2arc_rebuild()
10129 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_dev_hdr_read()
10130 const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_dev_hdr_read()
10133 guid = spa_guid(dev->l2ad_vdev->vdev_spa); in l2arc_dev_hdr_read()
10137 err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev, in l2arc_dev_hdr_read()
10149 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_dev_hdr_read()
10153 if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC)) in l2arc_dev_hdr_read()
10156 if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC || in l2arc_dev_hdr_read()
10157 l2dhdr->dh_spa_guid != guid || in l2arc_dev_hdr_read()
10158 l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid || in l2arc_dev_hdr_read()
10159 l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION || in l2arc_dev_hdr_read()
10160 l2dhdr->dh_log_entries != dev->l2ad_log_entries || in l2arc_dev_hdr_read()
10161 l2dhdr->dh_end != dev->l2ad_end || in l2arc_dev_hdr_read()
10162 !l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end, in l2arc_dev_hdr_read()
10163 l2dhdr->dh_evict) || in l2arc_dev_hdr_read()
10164 (l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE && in l2arc_dev_hdr_read()
10224 this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp, in l2arc_log_blk_read()
10233 * Start issuing IO for the next log block early - this in l2arc_log_blk_read()
10237 *next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp, in l2arc_log_blk_read()
10246 (u_longlong_t)this_lbp->lbp_daddr, in l2arc_log_blk_read()
10247 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_log_blk_read()
10255 asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop); in l2arc_log_blk_read()
10257 if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) { in l2arc_log_blk_read()
10261 (u_longlong_t)this_lbp->lbp_daddr, in l2arc_log_blk_read()
10262 (u_longlong_t)dev->l2ad_vdev->vdev_guid, in l2arc_log_blk_read()
10263 (u_longlong_t)dev->l2ad_hand, in l2arc_log_blk_read()
10264 (u_longlong_t)dev->l2ad_evict); in l2arc_log_blk_read()
10270 switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) { in l2arc_log_blk_read()
10279 L2BLK_GET_COMPRESS((this_lbp)->lbp_prop), in l2arc_log_blk_read()
10293 if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC)) in l2arc_log_blk_read()
10295 if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) { in l2arc_log_blk_read()
10300 /* Abort an in-flight fetch I/O in case of error */ in l2arc_log_blk_read()
10319 uint64_t log_entries = dev->l2ad_log_entries; in l2arc_log_blk_restore()
10328 for (int i = log_entries - 1; i >= 0; i--) { in l2arc_log_blk_restore()
10336 * HEAD <------ (time) ------ TAIL in l2arc_log_blk_restore()
10337 * direction +-----+-----+-----+-----+-----+ direction in l2arc_log_blk_restore()
10339 * fill +-----+-----+-----+-----+-----+ in l2arc_log_blk_restore()
10347 * l2arc_feed_thread() as dev->l2ad_rebuild is set to true. in l2arc_log_blk_restore()
10349 size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop); in l2arc_log_blk_restore()
10350 asize += vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_restore()
10351 L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop)); in l2arc_log_blk_restore()
10352 l2arc_hdr_restore(&lb->lb_entries[i], dev); in l2arc_log_blk_restore()
10377 arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop); in l2arc_hdr_restore()
10385 hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type, in l2arc_hdr_restore()
10386 dev, le->le_dva, le->le_daddr, in l2arc_hdr_restore()
10387 L2BLK_GET_PSIZE((le)->le_prop), le->le_birth, in l2arc_hdr_restore()
10388 L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel, in l2arc_hdr_restore()
10389 L2BLK_GET_PROTECTED((le)->le_prop), in l2arc_hdr_restore()
10390 L2BLK_GET_PREFETCH((le)->le_prop), in l2arc_hdr_restore()
10391 L2BLK_GET_STATE((le)->le_prop)); in l2arc_hdr_restore()
10392 asize = vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_hdr_restore()
10393 L2BLK_GET_PSIZE((le)->le_prop)); in l2arc_hdr_restore()
10400 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_hdr_restore()
10402 mutex_enter(&dev->l2ad_mtx); in l2arc_hdr_restore()
10403 list_insert_tail(&dev->l2ad_buflist, hdr); in l2arc_hdr_restore()
10404 (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); in l2arc_hdr_restore()
10405 mutex_exit(&dev->l2ad_mtx); in l2arc_hdr_restore()
10419 exists->b_l2hdr.b_dev = dev; in l2arc_hdr_restore()
10420 exists->b_l2hdr.b_daddr = le->le_daddr; in l2arc_hdr_restore()
10421 exists->b_l2hdr.b_arcs_state = in l2arc_hdr_restore()
10422 L2BLK_GET_STATE((le)->le_prop); in l2arc_hdr_restore()
10423 mutex_enter(&dev->l2ad_mtx); in l2arc_hdr_restore()
10424 list_insert_tail(&dev->l2ad_buflist, exists); in l2arc_hdr_restore()
10425 (void) zfs_refcount_add_many(&dev->l2ad_alloc, in l2arc_hdr_restore()
10427 mutex_exit(&dev->l2ad_mtx); in l2arc_hdr_restore()
10429 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_hdr_restore()
10449 l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp, in l2arc_log_blk_fetch() argument
10457 asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); in l2arc_log_blk_fetch()
10461 cb->l2rcb_abd = abd_get_from_buf(lb, asize); in l2arc_log_blk_fetch()
10462 pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb, in l2arc_log_blk_fetch()
10464 (void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize, in l2arc_log_blk_fetch()
10465 cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL, in l2arc_log_blk_fetch()
10488 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_dev_hdr_update()
10489 const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize; in l2arc_dev_hdr_update()
10493 VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER)); in l2arc_dev_hdr_update()
10495 l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC; in l2arc_dev_hdr_update()
10496 l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION; in l2arc_dev_hdr_update()
10497 l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa); in l2arc_dev_hdr_update()
10498 l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid; in l2arc_dev_hdr_update()
10499 l2dhdr->dh_log_entries = dev->l2ad_log_entries; in l2arc_dev_hdr_update()
10500 l2dhdr->dh_evict = dev->l2ad_evict; in l2arc_dev_hdr_update()
10501 l2dhdr->dh_start = dev->l2ad_start; in l2arc_dev_hdr_update()
10502 l2dhdr->dh_end = dev->l2ad_end; in l2arc_dev_hdr_update()
10503 l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize); in l2arc_dev_hdr_update()
10504 l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count); in l2arc_dev_hdr_update()
10505 l2dhdr->dh_flags = 0; in l2arc_dev_hdr_update()
10506 l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time; in l2arc_dev_hdr_update()
10507 l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state; in l2arc_dev_hdr_update()
10508 if (dev->l2ad_first) in l2arc_dev_hdr_update()
10509 l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST; in l2arc_dev_hdr_update()
10513 err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev, in l2arc_dev_hdr_update()
10522 (u_longlong_t)dev->l2ad_vdev->vdev_guid); in l2arc_dev_hdr_update()
10535 l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; in l2arc_log_blk_commit()
10536 l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr; in l2arc_log_blk_commit()
10543 VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries); in l2arc_log_blk_commit()
10546 abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb)); in l2arc_log_blk_commit()
10548 lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP); in l2arc_log_blk_commit()
10551 lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1]; in l2arc_log_blk_commit()
10552 lb->lb_magic = L2ARC_LOG_BLK_MAGIC; in l2arc_log_blk_commit()
10559 list_insert_tail(&cb->l2wcb_abd_list, abd_buf); in l2arc_log_blk_commit()
10563 abd_buf->abd, &abd, sizeof (*lb), in l2arc_log_blk_commit()
10565 dev->l2ad_vdev->vdev_ashift, in l2arc_log_blk_commit()
10566 dev->l2ad_vdev->vdev_ashift, sizeof (*lb)), 0); in l2arc_log_blk_commit()
10570 asize = vdev_psize_to_asize(dev->l2ad_vdev, psize); in l2arc_log_blk_commit()
10577 l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0]; in l2arc_log_blk_commit()
10578 l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand; in l2arc_log_blk_commit()
10579 l2dhdr->dh_start_lbps[0].lbp_payload_asize = in l2arc_log_blk_commit()
10580 dev->l2ad_log_blk_payload_asize; in l2arc_log_blk_commit()
10581 l2dhdr->dh_start_lbps[0].lbp_payload_start = in l2arc_log_blk_commit()
10582 dev->l2ad_log_blk_payload_start; in l2arc_log_blk_commit()
10584 (&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb)); in l2arc_log_blk_commit()
10586 (&l2dhdr->dh_start_lbps[0])->lbp_prop, asize); in l2arc_log_blk_commit()
10588 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10592 abd_zero_off(abd, psize, asize - psize); in l2arc_log_blk_commit()
10594 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10600 (&l2dhdr->dh_start_lbps[0])->lbp_prop, in l2arc_log_blk_commit()
10606 &l2dhdr->dh_start_lbps[0].lbp_cksum); in l2arc_log_blk_commit()
10608 abd_free(abd_buf->abd); in l2arc_log_blk_commit()
10611 abd_buf->abd = abd; in l2arc_log_blk_commit()
10612 wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand, in l2arc_log_blk_commit()
10613 asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL, in l2arc_log_blk_commit()
10615 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio); in l2arc_log_blk_commit()
10618 dev->l2ad_hand += asize; in l2arc_log_blk_commit()
10619 vdev_space_update(dev->l2ad_vdev, asize, 0, 0); in l2arc_log_blk_commit()
10625 memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0], in l2arc_log_blk_commit()
10627 mutex_enter(&dev->l2ad_mtx); in l2arc_log_blk_commit()
10628 list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf); in l2arc_log_blk_commit()
10631 zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf); in l2arc_log_blk_commit()
10632 zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf); in l2arc_log_blk_commit()
10633 mutex_exit(&dev->l2ad_mtx); in l2arc_log_blk_commit()
10640 dev->l2ad_log_blk_payload_asize / asize); in l2arc_log_blk_commit()
10643 dev->l2ad_log_ent_idx = 0; in l2arc_log_blk_commit()
10644 dev->l2ad_log_blk_payload_asize = 0; in l2arc_log_blk_commit()
10645 dev->l2ad_log_blk_payload_start = 0; in l2arc_log_blk_commit()
10655 l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp) in l2arc_log_blkptr_valid() argument
10658 uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop); in l2arc_log_blkptr_valid()
10659 uint64_t end = lbp->lbp_daddr + asize - 1; in l2arc_log_blkptr_valid()
10660 uint64_t start = lbp->lbp_payload_start; in l2arc_log_blkptr_valid()
10665 * - it fits entirely (including its payload) between l2ad_start and in l2arc_log_blkptr_valid()
10667 * - it has a valid size in l2arc_log_blkptr_valid()
10668 * - neither the log block itself nor part of its payload was evicted in l2arc_log_blkptr_valid()
10677 * --------------------------|||| in l2arc_log_blkptr_valid()
10684 l2arc_range_check_overlap(start, end, dev->l2ad_hand) || in l2arc_log_blkptr_valid()
10685 l2arc_range_check_overlap(start, end, dev->l2ad_evict) || in l2arc_log_blkptr_valid()
10686 l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) || in l2arc_log_blkptr_valid()
10687 l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end); in l2arc_log_blkptr_valid()
10689 return (start >= dev->l2ad_start && end <= dev->l2ad_end && in l2arc_log_blkptr_valid()
10691 (!evicted || dev->l2ad_first)); in l2arc_log_blkptr_valid()
10703 l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk; in l2arc_log_blk_insert()
10706 if (dev->l2ad_log_entries == 0) in l2arc_log_blk_insert()
10709 int index = dev->l2ad_log_ent_idx++; in l2arc_log_blk_insert()
10711 ASSERT3S(index, <, dev->l2ad_log_entries); in l2arc_log_blk_insert()
10714 le = &lb->lb_entries[index]; in l2arc_log_blk_insert()
10716 le->le_dva = hdr->b_dva; in l2arc_log_blk_insert()
10717 le->le_birth = hdr->b_birth; in l2arc_log_blk_insert()
10718 le->le_daddr = hdr->b_l2hdr.b_daddr; in l2arc_log_blk_insert()
10720 dev->l2ad_log_blk_payload_start = le->le_daddr; in l2arc_log_blk_insert()
10721 L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr)); in l2arc_log_blk_insert()
10722 L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr)); in l2arc_log_blk_insert()
10723 L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr)); in l2arc_log_blk_insert()
10724 le->le_complevel = hdr->b_complevel; in l2arc_log_blk_insert()
10725 L2BLK_SET_TYPE((le)->le_prop, hdr->b_type); in l2arc_log_blk_insert()
10726 L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr))); in l2arc_log_blk_insert()
10727 L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr))); in l2arc_log_blk_insert()
10728 L2BLK_SET_STATE((le)->le_prop, hdr->b_l2hdr.b_arcs_state); in l2arc_log_blk_insert()
10730 dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev, in l2arc_log_blk_insert()
10733 return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries); in l2arc_log_blk_insert()
10737 * Checks whether a given L2ARC device address sits in a time-sequential
10741 * bottom -- Lower end of the range to check (written to earlier).
10742 * top -- Upper end of the range to check (written to later).
10743 * check -- The address for which we want to determine if it sits in
10746 * The 3-way conditional below represents the following cases:
10749 * <check>--------+-------------------+
10752 * |---------------<bottom>============<top>--------------|
10754 * bottom > top: Looped-around case:
10755 * <check>--------+------------------+
10758 * |===============<top>---------------<bottom>===========|
10761 * +---------------+---------<check>
10846 "Percent of ARC size allowed for L2ARC-only headers");