1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
26 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2019, Klara Inc.
29 * Copyright (c) 2019, Allan Jude
30 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
31 */
32
33 #include <sys/zfs_context.h>
34 #include <sys/arc.h>
35 #include <sys/dmu.h>
36 #include <sys/dmu_send.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/dbuf.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dmu_tx.h>
43 #include <sys/spa.h>
44 #include <sys/zio.h>
45 #include <sys/dmu_zfetch.h>
46 #include <sys/sa.h>
47 #include <sys/sa_impl.h>
48 #include <sys/zfeature.h>
49 #include <sys/blkptr.h>
50 #include <sys/range_tree.h>
51 #include <sys/trace_zfs.h>
52 #include <sys/callb.h>
53 #include <sys/abd.h>
54 #include <sys/brt.h>
55 #include <sys/vdev.h>
56 #include <cityhash.h>
57 #include <sys/spa_impl.h>
58 #include <sys/wmsum.h>
59 #include <sys/vdev_impl.h>
60
61 static kstat_t *dbuf_ksp;
62
63 typedef struct dbuf_stats {
64 /*
65 * Various statistics about the size of the dbuf cache.
66 */
67 kstat_named_t cache_count;
68 kstat_named_t cache_size_bytes;
69 kstat_named_t cache_size_bytes_max;
70 /*
71 * Statistics regarding the bounds on the dbuf cache size.
72 */
73 kstat_named_t cache_target_bytes;
74 kstat_named_t cache_lowater_bytes;
75 kstat_named_t cache_hiwater_bytes;
76 /*
77 * Total number of dbuf cache evictions that have occurred.
78 */
79 kstat_named_t cache_total_evicts;
80 /*
81 * The distribution of dbuf levels in the dbuf cache and
82 * the total size of all dbufs at each level.
83 */
84 kstat_named_t cache_levels[DN_MAX_LEVELS];
85 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
86 /*
87 * Statistics about the dbuf hash table.
88 */
89 kstat_named_t hash_hits;
90 kstat_named_t hash_misses;
91 kstat_named_t hash_collisions;
92 kstat_named_t hash_elements;
93 /*
94 * Number of sublists containing more than one dbuf in the dbuf
95 * hash table. Keep track of the longest hash chain.
96 */
97 kstat_named_t hash_chains;
98 kstat_named_t hash_chain_max;
99 /*
100 * Number of times a dbuf_create() discovers that a dbuf was
101 * already created and in the dbuf hash table.
102 */
103 kstat_named_t hash_insert_race;
104 /*
105 * Number of entries in the hash table dbuf and mutex arrays.
106 */
107 kstat_named_t hash_table_count;
108 kstat_named_t hash_mutex_count;
109 /*
110 * Statistics about the size of the metadata dbuf cache.
111 */
112 kstat_named_t metadata_cache_count;
113 kstat_named_t metadata_cache_size_bytes;
114 kstat_named_t metadata_cache_size_bytes_max;
115 /*
116 * For diagnostic purposes, this is incremented whenever we can't add
117 * something to the metadata cache because it's full, and instead put
118 * the data in the regular dbuf cache.
119 */
120 kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122
123 dbuf_stats_t dbuf_stats = {
124 { "cache_count", KSTAT_DATA_UINT64 },
125 { "cache_size_bytes", KSTAT_DATA_UINT64 },
126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
127 { "cache_target_bytes", KSTAT_DATA_UINT64 },
128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
130 { "cache_total_evicts", KSTAT_DATA_UINT64 },
131 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
133 { "hash_hits", KSTAT_DATA_UINT64 },
134 { "hash_misses", KSTAT_DATA_UINT64 },
135 { "hash_collisions", KSTAT_DATA_UINT64 },
136 { "hash_elements", KSTAT_DATA_UINT64 },
137 { "hash_chains", KSTAT_DATA_UINT64 },
138 { "hash_chain_max", KSTAT_DATA_UINT64 },
139 { "hash_insert_race", KSTAT_DATA_UINT64 },
140 { "hash_table_count", KSTAT_DATA_UINT64 },
141 { "hash_mutex_count", KSTAT_DATA_UINT64 },
142 { "metadata_cache_count", KSTAT_DATA_UINT64 },
143 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
144 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
145 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
146 };
147
148 struct {
149 wmsum_t cache_count;
150 wmsum_t cache_total_evicts;
151 wmsum_t cache_levels[DN_MAX_LEVELS];
152 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
153 wmsum_t hash_hits;
154 wmsum_t hash_misses;
155 wmsum_t hash_collisions;
156 wmsum_t hash_elements;
157 wmsum_t hash_chains;
158 wmsum_t hash_insert_race;
159 wmsum_t metadata_cache_count;
160 wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162
163 #define DBUF_STAT_INCR(stat, val) \
164 wmsum_add(&dbuf_sums.stat, val)
165 #define DBUF_STAT_DECR(stat, val) \
166 DBUF_STAT_INCR(stat, -(val))
167 #define DBUF_STAT_BUMP(stat) \
168 DBUF_STAT_INCR(stat, 1)
169 #define DBUF_STAT_BUMPDOWN(stat) \
170 DBUF_STAT_INCR(stat, -1)
171 #define DBUF_STAT_MAX(stat, v) { \
172 uint64_t _m; \
173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 continue; \
176 }
177
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180
181 /*
182 * Global data structures and functions for the dbuf cache.
183 */
184 static kmem_cache_t *dbuf_kmem_cache;
185 kmem_cache_t *dbuf_dirty_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192
193 /*
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 *
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
208 *
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
214 *
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
218 *
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
223 */
224 typedef struct dbuf_cache {
225 multilist_t cache;
226 zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243
244 /*
245 * The LRU dbuf cache uses a three-stage eviction policy:
246 * - A low water marker designates when the dbuf eviction thread
247 * should stop evicting from the dbuf cache.
248 * - When we reach the maximum size (aka mid water mark), we
249 * signal the eviction thread to run.
250 * - The high water mark indicates when the eviction thread
251 * is unable to keep up with the incoming load and eviction must
252 * happen in the context of the calling thread.
253 *
254 * The dbuf cache:
255 * (max size)
256 * low water mid water hi water
257 * +----------------------------------------+----------+----------+
258 * | | | |
259 * | | | |
260 * | | | |
261 * | | | |
262 * +----------------------------------------+----------+----------+
263 * stop signal evict
264 * evicting eviction directly
265 * thread
266 *
267 * The high and low water marks indicate the operating range for the eviction
268 * thread. The low water mark is, by default, 90% of the total size of the
269 * cache and the high water mark is at 110% (both of these percentages can be
270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271 * respectively). The eviction thread will try to ensure that the cache remains
272 * within this range by waking up every second and checking if the cache is
273 * above the low water mark. The thread can also be woken up by callers adding
274 * elements into the cache if the cache is larger than the mid water (i.e max
275 * cache size). Once the eviction thread is woken up and eviction is required,
276 * it will continue evicting buffers until it's able to reduce the cache size
277 * to the low water mark. If the cache size continues to grow and hits the high
278 * water mark, then callers adding elements to the cache will begin to evict
279 * directly from the cache until the cache is no longer above the high water
280 * mark.
281 */
282
283 /*
284 * The percentage above and below the maximum cache size.
285 */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288
289 static int
dbuf_cons(void * vdb,void * unused,int kmflag)290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 (void) unused, (void) kmflag;
293 dmu_buf_impl_t *db = vdb;
294 memset(db, 0, sizeof (dmu_buf_impl_t));
295
296 mutex_init(&db->db_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
297 rw_init(&db->db_rwlock, NULL, RW_NOLOCKDEP, NULL);
298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 multilist_link_init(&db->db_cache_link);
300 zfs_refcount_create(&db->db_holds);
301
302 return (0);
303 }
304
305 static void
dbuf_dest(void * vdb,void * unused)306 dbuf_dest(void *vdb, void *unused)
307 {
308 (void) unused;
309 dmu_buf_impl_t *db = vdb;
310 mutex_destroy(&db->db_mtx);
311 rw_destroy(&db->db_rwlock);
312 cv_destroy(&db->db_changed);
313 ASSERT(!multilist_link_active(&db->db_cache_link));
314 zfs_refcount_destroy(&db->db_holds);
315 }
316
317 /*
318 * dbuf hash table routines
319 */
320 static dbuf_hash_table_t dbuf_hash_table;
321
322 /*
323 * We use Cityhash for this. It's fast, and has good hash properties without
324 * requiring any large static buffers.
325 */
326 static uint64_t
dbuf_hash(void * os,uint64_t obj,uint8_t lvl,uint64_t blkid)327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331
332 #define DTRACE_SET_STATE(db, why) \
333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
334 const char *, why)
335
336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
337 ((dbuf)->db.db_object == (obj) && \
338 (dbuf)->db_objset == (os) && \
339 (dbuf)->db_level == (level) && \
340 (dbuf)->db_blkid == (blkid))
341
342 dmu_buf_impl_t *
dbuf_find(objset_t * os,uint64_t obj,uint8_t level,uint64_t blkid,uint64_t * hash_out)343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344 uint64_t *hash_out)
345 {
346 dbuf_hash_table_t *h = &dbuf_hash_table;
347 uint64_t hv;
348 uint64_t idx;
349 dmu_buf_impl_t *db;
350
351 hv = dbuf_hash(os, obj, level, blkid);
352 idx = hv & h->hash_table_mask;
353
354 mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 mutex_enter(&db->db_mtx);
358 if (db->db_state != DB_EVICTING) {
359 mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 return (db);
361 }
362 mutex_exit(&db->db_mtx);
363 }
364 }
365 mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 if (hash_out != NULL)
367 *hash_out = hv;
368 return (NULL);
369 }
370
371 static dmu_buf_impl_t *
dbuf_find_bonus(objset_t * os,uint64_t object)372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 dnode_t *dn;
375 dmu_buf_impl_t *db = NULL;
376
377 if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 if (dn->dn_bonus != NULL) {
380 db = dn->dn_bonus;
381 mutex_enter(&db->db_mtx);
382 }
383 rw_exit(&dn->dn_struct_rwlock);
384 dnode_rele(dn, FTAG);
385 }
386 return (db);
387 }
388
389 /*
390 * Insert an entry into the hash table. If there is already an element
391 * equal to elem in the hash table, then the already existing element
392 * will be returned and the new element will not be inserted.
393 * Otherwise returns NULL.
394 */
395 static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t * db)396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 dbuf_hash_table_t *h = &dbuf_hash_table;
399 objset_t *os = db->db_objset;
400 uint64_t obj = db->db.db_object;
401 int level = db->db_level;
402 uint64_t blkid, idx;
403 dmu_buf_impl_t *dbf;
404 uint32_t i;
405
406 blkid = db->db_blkid;
407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 idx = db->db_hash & h->hash_table_mask;
409
410 mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 dbf = dbf->db_hash_next, i++) {
413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 mutex_enter(&dbf->db_mtx);
415 if (dbf->db_state != DB_EVICTING) {
416 mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 return (dbf);
418 }
419 mutex_exit(&dbf->db_mtx);
420 }
421 }
422
423 if (i > 0) {
424 DBUF_STAT_BUMP(hash_collisions);
425 if (i == 1)
426 DBUF_STAT_BUMP(hash_chains);
427
428 DBUF_STAT_MAX(hash_chain_max, i);
429 }
430
431 mutex_enter(&db->db_mtx);
432 db->db_hash_next = h->hash_table[idx];
433 h->hash_table[idx] = db;
434 mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 DBUF_STAT_BUMP(hash_elements);
436
437 return (NULL);
438 }
439
440 /*
441 * This returns whether this dbuf should be stored in the metadata cache, which
442 * is based on whether it's from one of the dnode types that store data related
443 * to traversing dataset hierarchies.
444 */
445 static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t * db)446 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
447 {
448 DB_DNODE_ENTER(db);
449 dmu_object_type_t type = DB_DNODE(db)->dn_type;
450 DB_DNODE_EXIT(db);
451
452 /* Check if this dbuf is one of the types we care about */
453 if (DMU_OT_IS_METADATA_CACHED(type)) {
454 /* If we hit this, then we set something up wrong in dmu_ot */
455 ASSERT(DMU_OT_IS_METADATA(type));
456
457 /*
458 * Sanity check for small-memory systems: don't allocate too
459 * much memory for this purpose.
460 */
461 if (zfs_refcount_count(
462 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
463 dbuf_metadata_cache_target_bytes()) {
464 DBUF_STAT_BUMP(metadata_cache_overflow);
465 return (B_FALSE);
466 }
467
468 return (B_TRUE);
469 }
470
471 return (B_FALSE);
472 }
473
474 /*
475 * Remove an entry from the hash table. It must be in the EVICTING state.
476 */
477 static void
dbuf_hash_remove(dmu_buf_impl_t * db)478 dbuf_hash_remove(dmu_buf_impl_t *db)
479 {
480 dbuf_hash_table_t *h = &dbuf_hash_table;
481 uint64_t idx;
482 dmu_buf_impl_t *dbf, **dbp;
483
484 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
485 db->db_blkid), ==, db->db_hash);
486 idx = db->db_hash & h->hash_table_mask;
487
488 /*
489 * We mustn't hold db_mtx to maintain lock ordering:
490 * DBUF_HASH_MUTEX > db_mtx.
491 */
492 ASSERT(zfs_refcount_is_zero(&db->db_holds));
493 ASSERT(db->db_state == DB_EVICTING);
494 ASSERT(!MUTEX_HELD(&db->db_mtx));
495
496 mutex_enter(DBUF_HASH_MUTEX(h, idx));
497 dbp = &h->hash_table[idx];
498 while ((dbf = *dbp) != db) {
499 dbp = &dbf->db_hash_next;
500 ASSERT(dbf != NULL);
501 }
502 *dbp = db->db_hash_next;
503 db->db_hash_next = NULL;
504 if (h->hash_table[idx] &&
505 h->hash_table[idx]->db_hash_next == NULL)
506 DBUF_STAT_BUMPDOWN(hash_chains);
507 mutex_exit(DBUF_HASH_MUTEX(h, idx));
508 DBUF_STAT_BUMPDOWN(hash_elements);
509 }
510
511 typedef enum {
512 DBVU_EVICTING,
513 DBVU_NOT_EVICTING
514 } dbvu_verify_type_t;
515
516 static void
dbuf_verify_user(dmu_buf_impl_t * db,dbvu_verify_type_t verify_type)517 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
518 {
519 #ifdef ZFS_DEBUG
520 int64_t holds;
521
522 if (db->db_user == NULL)
523 return;
524
525 /* Only data blocks support the attachment of user data. */
526 ASSERT0(db->db_level);
527
528 /* Clients must resolve a dbuf before attaching user data. */
529 ASSERT(db->db.db_data != NULL);
530 ASSERT3U(db->db_state, ==, DB_CACHED);
531
532 holds = zfs_refcount_count(&db->db_holds);
533 if (verify_type == DBVU_EVICTING) {
534 /*
535 * Immediate eviction occurs when holds == dirtycnt.
536 * For normal eviction buffers, holds is zero on
537 * eviction, except when dbuf_fix_old_data() calls
538 * dbuf_clear_data(). However, the hold count can grow
539 * during eviction even though db_mtx is held (see
540 * dmu_bonus_hold() for an example), so we can only
541 * test the generic invariant that holds >= dirtycnt.
542 */
543 ASSERT3U(holds, >=, db->db_dirtycnt);
544 } else {
545 if (db->db_user_immediate_evict == TRUE)
546 ASSERT3U(holds, >=, db->db_dirtycnt);
547 else
548 ASSERT3U(holds, >, 0);
549 }
550 #endif
551 }
552
553 static void
dbuf_evict_user(dmu_buf_impl_t * db)554 dbuf_evict_user(dmu_buf_impl_t *db)
555 {
556 dmu_buf_user_t *dbu = db->db_user;
557
558 ASSERT(MUTEX_HELD(&db->db_mtx));
559
560 if (dbu == NULL)
561 return;
562
563 dbuf_verify_user(db, DBVU_EVICTING);
564 db->db_user = NULL;
565
566 #ifdef ZFS_DEBUG
567 if (dbu->dbu_clear_on_evict_dbufp != NULL)
568 *dbu->dbu_clear_on_evict_dbufp = NULL;
569 #endif
570
571 if (db->db_caching_status != DB_NO_CACHE) {
572 /*
573 * This is a cached dbuf, so the size of the user data is
574 * included in its cached amount. We adjust it here because the
575 * user data has already been detached from the dbuf, and the
576 * sync functions are not supposed to touch it (the dbuf might
577 * not exist anymore by the time the sync functions run.
578 */
579 uint64_t size = dbu->dbu_size;
580 (void) zfs_refcount_remove_many(
581 &dbuf_caches[db->db_caching_status].size, size, dbu);
582 if (db->db_caching_status == DB_DBUF_CACHE)
583 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
584 }
585
586 /*
587 * There are two eviction callbacks - one that we call synchronously
588 * and one that we invoke via a taskq. The async one is useful for
589 * avoiding lock order reversals and limiting stack depth.
590 *
591 * Note that if we have a sync callback but no async callback,
592 * it's likely that the sync callback will free the structure
593 * containing the dbu. In that case we need to take care to not
594 * dereference dbu after calling the sync evict func.
595 */
596 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
597
598 if (dbu->dbu_evict_func_sync != NULL)
599 dbu->dbu_evict_func_sync(dbu);
600
601 if (has_async) {
602 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
603 dbu, 0, &dbu->dbu_tqent);
604 }
605 }
606
607 boolean_t
dbuf_is_metadata(dmu_buf_impl_t * db)608 dbuf_is_metadata(dmu_buf_impl_t *db)
609 {
610 /*
611 * Consider indirect blocks and spill blocks to be meta data.
612 */
613 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
614 return (B_TRUE);
615 } else {
616 boolean_t is_metadata;
617
618 DB_DNODE_ENTER(db);
619 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
620 DB_DNODE_EXIT(db);
621
622 return (is_metadata);
623 }
624 }
625
626 /*
627 * We want to exclude buffers that are on a special allocation class from
628 * L2ARC.
629 */
630 boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t * db,blkptr_t * bp)631 dbuf_is_l2cacheable(dmu_buf_impl_t *db, blkptr_t *bp)
632 {
633 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
634 (db->db_objset->os_secondary_cache ==
635 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
636 if (l2arc_exclude_special == 0)
637 return (B_TRUE);
638
639 /*
640 * bp must be checked in the event it was passed from
641 * dbuf_read_impl() as the result of a the BP being set from
642 * a Direct I/O write in dbuf_read(). See comments in
643 * dbuf_read().
644 */
645 blkptr_t *db_bp = bp == NULL ? db->db_blkptr : bp;
646
647 if (db_bp == NULL || BP_IS_HOLE(db_bp))
648 return (B_FALSE);
649 uint64_t vdev = DVA_GET_VDEV(db_bp->blk_dva);
650 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
651 vdev_t *vd = NULL;
652
653 if (vdev < rvd->vdev_children)
654 vd = rvd->vdev_child[vdev];
655
656 if (vd == NULL)
657 return (B_TRUE);
658
659 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
660 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
661 return (B_TRUE);
662 }
663 return (B_FALSE);
664 }
665
666 static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t * bp,dnode_t * dn,int64_t level)667 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
668 {
669 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
670 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
671 (level > 0 ||
672 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
673 if (l2arc_exclude_special == 0)
674 return (B_TRUE);
675
676 if (bp == NULL || BP_IS_HOLE(bp))
677 return (B_FALSE);
678 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
679 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
680 vdev_t *vd = NULL;
681
682 if (vdev < rvd->vdev_children)
683 vd = rvd->vdev_child[vdev];
684
685 if (vd == NULL)
686 return (B_TRUE);
687
688 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
689 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
690 return (B_TRUE);
691 }
692 return (B_FALSE);
693 }
694
695
696 /*
697 * This function *must* return indices evenly distributed between all
698 * sublists of the multilist. This is needed due to how the dbuf eviction
699 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
700 * distributed between all sublists and uses this assumption when
701 * deciding which sublist to evict from and how much to evict from it.
702 */
703 static unsigned int
dbuf_cache_multilist_index_func(multilist_t * ml,void * obj)704 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
705 {
706 dmu_buf_impl_t *db = obj;
707
708 /*
709 * The assumption here, is the hash value for a given
710 * dmu_buf_impl_t will remain constant throughout it's lifetime
711 * (i.e. it's objset, object, level and blkid fields don't change).
712 * Thus, we don't need to store the dbuf's sublist index
713 * on insertion, as this index can be recalculated on removal.
714 *
715 * Also, the low order bits of the hash value are thought to be
716 * distributed evenly. Otherwise, in the case that the multilist
717 * has a power of two number of sublists, each sublists' usage
718 * would not be evenly distributed. In this context full 64bit
719 * division would be a waste of time, so limit it to 32 bits.
720 */
721 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
722 db->db_level, db->db_blkid) %
723 multilist_get_num_sublists(ml));
724 }
725
726 /*
727 * The target size of the dbuf cache can grow with the ARC target,
728 * unless limited by the tunable dbuf_cache_max_bytes.
729 */
730 static inline unsigned long
dbuf_cache_target_bytes(void)731 dbuf_cache_target_bytes(void)
732 {
733 return (MIN(dbuf_cache_max_bytes,
734 arc_target_bytes() >> dbuf_cache_shift));
735 }
736
737 /*
738 * The target size of the dbuf metadata cache can grow with the ARC target,
739 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
740 */
741 static inline unsigned long
dbuf_metadata_cache_target_bytes(void)742 dbuf_metadata_cache_target_bytes(void)
743 {
744 return (MIN(dbuf_metadata_cache_max_bytes,
745 arc_target_bytes() >> dbuf_metadata_cache_shift));
746 }
747
748 static inline uint64_t
dbuf_cache_hiwater_bytes(void)749 dbuf_cache_hiwater_bytes(void)
750 {
751 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
752 return (dbuf_cache_target +
753 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
754 }
755
756 static inline uint64_t
dbuf_cache_lowater_bytes(void)757 dbuf_cache_lowater_bytes(void)
758 {
759 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
760 return (dbuf_cache_target -
761 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
762 }
763
764 static inline boolean_t
dbuf_cache_above_lowater(void)765 dbuf_cache_above_lowater(void)
766 {
767 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
768 dbuf_cache_lowater_bytes());
769 }
770
771 /*
772 * Evict the oldest eligible dbuf from the dbuf cache.
773 */
774 static void
dbuf_evict_one(void)775 dbuf_evict_one(void)
776 {
777 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
778 multilist_sublist_t *mls = multilist_sublist_lock_idx(
779 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
780
781 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
782
783 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
784 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
785 db = multilist_sublist_prev(mls, db);
786 }
787
788 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
789 multilist_sublist_t *, mls);
790
791 if (db != NULL) {
792 multilist_sublist_remove(mls, db);
793 multilist_sublist_unlock(mls);
794 uint64_t size = db->db.db_size;
795 uint64_t usize = dmu_buf_user_size(&db->db);
796 (void) zfs_refcount_remove_many(
797 &dbuf_caches[DB_DBUF_CACHE].size, size, db);
798 (void) zfs_refcount_remove_many(
799 &dbuf_caches[DB_DBUF_CACHE].size, usize, db->db_user);
800 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
801 DBUF_STAT_BUMPDOWN(cache_count);
802 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size + usize);
803 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
804 db->db_caching_status = DB_NO_CACHE;
805 dbuf_destroy(db);
806 DBUF_STAT_BUMP(cache_total_evicts);
807 } else {
808 multilist_sublist_unlock(mls);
809 }
810 }
811
812 /*
813 * The dbuf evict thread is responsible for aging out dbufs from the
814 * cache. Once the cache has reached it's maximum size, dbufs are removed
815 * and destroyed. The eviction thread will continue running until the size
816 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
817 * out of the cache it is destroyed and becomes eligible for arc eviction.
818 */
819 static __attribute__((noreturn)) void
dbuf_evict_thread(void * unused)820 dbuf_evict_thread(void *unused)
821 {
822 (void) unused;
823 callb_cpr_t cpr;
824
825 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
826
827 mutex_enter(&dbuf_evict_lock);
828 while (!dbuf_evict_thread_exit) {
829 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
830 CALLB_CPR_SAFE_BEGIN(&cpr);
831 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
832 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
833 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
834 }
835 mutex_exit(&dbuf_evict_lock);
836
837 /*
838 * Keep evicting as long as we're above the low water mark
839 * for the cache. We do this without holding the locks to
840 * minimize lock contention.
841 */
842 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
843 dbuf_evict_one();
844 }
845
846 mutex_enter(&dbuf_evict_lock);
847 }
848
849 dbuf_evict_thread_exit = B_FALSE;
850 cv_broadcast(&dbuf_evict_cv);
851 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
852 thread_exit();
853 }
854
855 /*
856 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
857 * If the dbuf cache is at its high water mark, then evict a dbuf from the
858 * dbuf cache using the caller's context.
859 */
860 static void
dbuf_evict_notify(uint64_t size)861 dbuf_evict_notify(uint64_t size)
862 {
863 /*
864 * We check if we should evict without holding the dbuf_evict_lock,
865 * because it's OK to occasionally make the wrong decision here,
866 * and grabbing the lock results in massive lock contention.
867 */
868 if (size > dbuf_cache_target_bytes()) {
869 /*
870 * Avoid calling dbuf_evict_one() from memory reclaim context
871 * (e.g. Linux kswapd, FreeBSD pagedaemon) to prevent deadlocks.
872 * Memory reclaim threads can get stuck waiting for the dbuf
873 * hash lock.
874 */
875 if (size > dbuf_cache_hiwater_bytes() &&
876 !current_is_reclaim_thread()) {
877 dbuf_evict_one();
878 }
879 cv_signal(&dbuf_evict_cv);
880 }
881 }
882
883 /*
884 * Since dbuf cache size is a fraction of target ARC size, ARC calls this when
885 * its target size is reduced due to memory pressure.
886 */
887 void
dbuf_cache_reduce_target_size(void)888 dbuf_cache_reduce_target_size(void)
889 {
890 uint64_t size = zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
891
892 if (size > dbuf_cache_target_bytes())
893 cv_signal(&dbuf_evict_cv);
894 }
895
896 static int
dbuf_kstat_update(kstat_t * ksp,int rw)897 dbuf_kstat_update(kstat_t *ksp, int rw)
898 {
899 dbuf_stats_t *ds = ksp->ks_data;
900 dbuf_hash_table_t *h = &dbuf_hash_table;
901
902 if (rw == KSTAT_WRITE)
903 return (SET_ERROR(EACCES));
904
905 ds->cache_count.value.ui64 =
906 wmsum_value(&dbuf_sums.cache_count);
907 ds->cache_size_bytes.value.ui64 =
908 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
909 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
910 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
911 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
912 ds->cache_total_evicts.value.ui64 =
913 wmsum_value(&dbuf_sums.cache_total_evicts);
914 for (int i = 0; i < DN_MAX_LEVELS; i++) {
915 ds->cache_levels[i].value.ui64 =
916 wmsum_value(&dbuf_sums.cache_levels[i]);
917 ds->cache_levels_bytes[i].value.ui64 =
918 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
919 }
920 ds->hash_hits.value.ui64 =
921 wmsum_value(&dbuf_sums.hash_hits);
922 ds->hash_misses.value.ui64 =
923 wmsum_value(&dbuf_sums.hash_misses);
924 ds->hash_collisions.value.ui64 =
925 wmsum_value(&dbuf_sums.hash_collisions);
926 ds->hash_elements.value.ui64 =
927 wmsum_value(&dbuf_sums.hash_elements);
928 ds->hash_chains.value.ui64 =
929 wmsum_value(&dbuf_sums.hash_chains);
930 ds->hash_insert_race.value.ui64 =
931 wmsum_value(&dbuf_sums.hash_insert_race);
932 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
933 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
934 ds->metadata_cache_count.value.ui64 =
935 wmsum_value(&dbuf_sums.metadata_cache_count);
936 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
937 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
938 ds->metadata_cache_overflow.value.ui64 =
939 wmsum_value(&dbuf_sums.metadata_cache_overflow);
940 return (0);
941 }
942
943 void
dbuf_init(void)944 dbuf_init(void)
945 {
946 uint64_t hmsize, hsize = 1ULL << 16;
947 dbuf_hash_table_t *h = &dbuf_hash_table;
948
949 /*
950 * The hash table is big enough to fill one eighth of physical memory
951 * with an average block size of zfs_arc_average_blocksize (default 8K).
952 * By default, the table will take up
953 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
954 */
955 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
956 hsize <<= 1;
957
958 h->hash_table = NULL;
959 while (h->hash_table == NULL) {
960 h->hash_table_mask = hsize - 1;
961
962 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
963 if (h->hash_table == NULL)
964 hsize >>= 1;
965
966 ASSERT3U(hsize, >=, 1ULL << 10);
967 }
968
969 /*
970 * The hash table buckets are protected by an array of mutexes where
971 * each mutex is reponsible for protecting 128 buckets. A minimum
972 * array size of 8192 is targeted to avoid contention.
973 */
974 if (dbuf_mutex_cache_shift == 0)
975 hmsize = MAX(hsize >> 7, 1ULL << 13);
976 else
977 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
978
979 h->hash_mutexes = NULL;
980 while (h->hash_mutexes == NULL) {
981 h->hash_mutex_mask = hmsize - 1;
982
983 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
984 KM_SLEEP);
985 if (h->hash_mutexes == NULL)
986 hmsize >>= 1;
987 }
988
989 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
990 sizeof (dmu_buf_impl_t),
991 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
992 dbuf_dirty_kmem_cache = kmem_cache_create("dbuf_dirty_record_t",
993 sizeof (dbuf_dirty_record_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
994
995 for (int i = 0; i < hmsize; i++)
996 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_NOLOCKDEP, NULL);
997
998 dbuf_stats_init(h);
999
1000 /*
1001 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
1002 * configuration is not required.
1003 */
1004 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
1005
1006 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1007 multilist_create(&dbuf_caches[dcs].cache,
1008 sizeof (dmu_buf_impl_t),
1009 offsetof(dmu_buf_impl_t, db_cache_link),
1010 dbuf_cache_multilist_index_func);
1011 zfs_refcount_create(&dbuf_caches[dcs].size);
1012 }
1013
1014 dbuf_evict_thread_exit = B_FALSE;
1015 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1016 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
1017 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
1018 NULL, 0, &p0, TS_RUN, minclsyspri);
1019
1020 wmsum_init(&dbuf_sums.cache_count, 0);
1021 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
1022 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1023 wmsum_init(&dbuf_sums.cache_levels[i], 0);
1024 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
1025 }
1026 wmsum_init(&dbuf_sums.hash_hits, 0);
1027 wmsum_init(&dbuf_sums.hash_misses, 0);
1028 wmsum_init(&dbuf_sums.hash_collisions, 0);
1029 wmsum_init(&dbuf_sums.hash_elements, 0);
1030 wmsum_init(&dbuf_sums.hash_chains, 0);
1031 wmsum_init(&dbuf_sums.hash_insert_race, 0);
1032 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
1033 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
1034
1035 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
1036 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
1037 KSTAT_FLAG_VIRTUAL);
1038 if (dbuf_ksp != NULL) {
1039 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1040 snprintf(dbuf_stats.cache_levels[i].name,
1041 KSTAT_STRLEN, "cache_level_%d", i);
1042 dbuf_stats.cache_levels[i].data_type =
1043 KSTAT_DATA_UINT64;
1044 snprintf(dbuf_stats.cache_levels_bytes[i].name,
1045 KSTAT_STRLEN, "cache_level_%d_bytes", i);
1046 dbuf_stats.cache_levels_bytes[i].data_type =
1047 KSTAT_DATA_UINT64;
1048 }
1049 dbuf_ksp->ks_data = &dbuf_stats;
1050 dbuf_ksp->ks_update = dbuf_kstat_update;
1051 kstat_install(dbuf_ksp);
1052 }
1053 }
1054
1055 void
dbuf_fini(void)1056 dbuf_fini(void)
1057 {
1058 dbuf_hash_table_t *h = &dbuf_hash_table;
1059
1060 dbuf_stats_destroy();
1061
1062 for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1063 mutex_destroy(&h->hash_mutexes[i]);
1064
1065 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1066 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1067 sizeof (kmutex_t));
1068
1069 kmem_cache_destroy(dbuf_kmem_cache);
1070 kmem_cache_destroy(dbuf_dirty_kmem_cache);
1071 taskq_destroy(dbu_evict_taskq);
1072
1073 mutex_enter(&dbuf_evict_lock);
1074 dbuf_evict_thread_exit = B_TRUE;
1075 while (dbuf_evict_thread_exit) {
1076 cv_signal(&dbuf_evict_cv);
1077 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1078 }
1079 mutex_exit(&dbuf_evict_lock);
1080
1081 mutex_destroy(&dbuf_evict_lock);
1082 cv_destroy(&dbuf_evict_cv);
1083
1084 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1085 zfs_refcount_destroy(&dbuf_caches[dcs].size);
1086 multilist_destroy(&dbuf_caches[dcs].cache);
1087 }
1088
1089 if (dbuf_ksp != NULL) {
1090 kstat_delete(dbuf_ksp);
1091 dbuf_ksp = NULL;
1092 }
1093
1094 wmsum_fini(&dbuf_sums.cache_count);
1095 wmsum_fini(&dbuf_sums.cache_total_evicts);
1096 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1097 wmsum_fini(&dbuf_sums.cache_levels[i]);
1098 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1099 }
1100 wmsum_fini(&dbuf_sums.hash_hits);
1101 wmsum_fini(&dbuf_sums.hash_misses);
1102 wmsum_fini(&dbuf_sums.hash_collisions);
1103 wmsum_fini(&dbuf_sums.hash_elements);
1104 wmsum_fini(&dbuf_sums.hash_chains);
1105 wmsum_fini(&dbuf_sums.hash_insert_race);
1106 wmsum_fini(&dbuf_sums.metadata_cache_count);
1107 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1108 }
1109
1110 /*
1111 * Other stuff.
1112 */
1113
1114 #ifdef ZFS_DEBUG
1115 static void
dbuf_verify(dmu_buf_impl_t * db)1116 dbuf_verify(dmu_buf_impl_t *db)
1117 {
1118 dnode_t *dn;
1119 dbuf_dirty_record_t *dr;
1120 uint32_t txg_prev;
1121
1122 ASSERT(MUTEX_HELD(&db->db_mtx));
1123
1124 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1125 return;
1126
1127 ASSERT(db->db_objset != NULL);
1128 DB_DNODE_ENTER(db);
1129 dn = DB_DNODE(db);
1130 if (dn == NULL) {
1131 ASSERT0P(db->db_parent);
1132 ASSERT0P(db->db_blkptr);
1133 } else {
1134 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1135 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1136 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1137 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1138 db->db_blkid == DMU_SPILL_BLKID ||
1139 !avl_is_empty(&dn->dn_dbufs));
1140 }
1141 if (db->db_blkid == DMU_BONUS_BLKID) {
1142 ASSERT(dn != NULL);
1143 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1144 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1145 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1146 ASSERT(dn != NULL);
1147 ASSERT0(db->db.db_offset);
1148 } else {
1149 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1150 }
1151
1152 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1153 ASSERT(dr->dr_dbuf == db);
1154 txg_prev = dr->dr_txg;
1155 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1156 dr = list_next(&db->db_dirty_records, dr)) {
1157 ASSERT(dr->dr_dbuf == db);
1158 ASSERT(txg_prev > dr->dr_txg);
1159 txg_prev = dr->dr_txg;
1160 }
1161 }
1162
1163 /*
1164 * We can't assert that db_size matches dn_datablksz because it
1165 * can be momentarily different when another thread is doing
1166 * dnode_set_blksz().
1167 */
1168 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1169 dr = db->db_data_pending;
1170 /*
1171 * It should only be modified in syncing context, so
1172 * make sure we only have one copy of the data.
1173 */
1174 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1175 }
1176
1177 /* verify db->db_blkptr */
1178 if (db->db_blkptr) {
1179 if (db->db_parent == dn->dn_dbuf) {
1180 /* db is pointed to by the dnode */
1181 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1182 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1183 ASSERT0P(db->db_parent);
1184 else
1185 ASSERT(db->db_parent != NULL);
1186 if (db->db_blkid != DMU_SPILL_BLKID)
1187 ASSERT3P(db->db_blkptr, ==,
1188 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1189 } else {
1190 /* db is pointed to by an indirect block */
1191 int epb __maybe_unused = db->db_parent->db.db_size >>
1192 SPA_BLKPTRSHIFT;
1193 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1194 ASSERT3U(db->db_parent->db.db_object, ==,
1195 db->db.db_object);
1196 ASSERT3P(db->db_blkptr, ==,
1197 ((blkptr_t *)db->db_parent->db.db_data +
1198 db->db_blkid % epb));
1199 }
1200 }
1201 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1202 (db->db_buf == NULL || db->db_buf->b_data) &&
1203 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1204 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1205 /*
1206 * If the blkptr isn't set but they have nonzero data,
1207 * it had better be dirty, otherwise we'll lose that
1208 * data when we evict this buffer.
1209 *
1210 * There is an exception to this rule for indirect blocks; in
1211 * this case, if the indirect block is a hole, we fill in a few
1212 * fields on each of the child blocks (importantly, birth time)
1213 * to prevent hole birth times from being lost when you
1214 * partially fill in a hole.
1215 */
1216 if (db->db_dirtycnt == 0) {
1217 if (db->db_level == 0) {
1218 uint64_t *buf = db->db.db_data;
1219 int i;
1220
1221 for (i = 0; i < db->db.db_size >> 3; i++) {
1222 ASSERT0(buf[i]);
1223 }
1224 } else {
1225 blkptr_t *bps = db->db.db_data;
1226 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1227 db->db.db_size);
1228 /*
1229 * We want to verify that all the blkptrs in the
1230 * indirect block are holes, but we may have
1231 * automatically set up a few fields for them.
1232 * We iterate through each blkptr and verify
1233 * they only have those fields set.
1234 */
1235 for (int i = 0;
1236 i < db->db.db_size / sizeof (blkptr_t);
1237 i++) {
1238 blkptr_t *bp = &bps[i];
1239 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1240 &bp->blk_cksum));
1241 ASSERT(
1242 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1243 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1244 DVA_IS_EMPTY(&bp->blk_dva[2]));
1245 ASSERT0(bp->blk_fill);
1246 ASSERT(!BP_IS_EMBEDDED(bp));
1247 ASSERT(BP_IS_HOLE(bp));
1248 ASSERT0(BP_GET_RAW_PHYSICAL_BIRTH(bp));
1249 }
1250 }
1251 }
1252 }
1253 DB_DNODE_EXIT(db);
1254 }
1255 #endif
1256
1257 static void
dbuf_clear_data(dmu_buf_impl_t * db)1258 dbuf_clear_data(dmu_buf_impl_t *db)
1259 {
1260 ASSERT(MUTEX_HELD(&db->db_mtx));
1261 dbuf_evict_user(db);
1262 ASSERT0P(db->db_buf);
1263 db->db.db_data = NULL;
1264 if (db->db_state != DB_NOFILL) {
1265 db->db_state = DB_UNCACHED;
1266 DTRACE_SET_STATE(db, "clear data");
1267 }
1268 }
1269
1270 static void
dbuf_set_data(dmu_buf_impl_t * db,arc_buf_t * buf)1271 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1272 {
1273 ASSERT(MUTEX_HELD(&db->db_mtx));
1274 ASSERT(buf != NULL);
1275
1276 db->db_buf = buf;
1277 ASSERT(buf->b_data != NULL);
1278 db->db.db_data = buf->b_data;
1279 }
1280
1281 static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t * db)1282 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1283 {
1284 spa_t *spa = db->db_objset->os_spa;
1285
1286 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1287 }
1288
1289 /*
1290 * Calculate which level n block references the data at the level 0 offset
1291 * provided.
1292 */
1293 uint64_t
dbuf_whichblock(const dnode_t * dn,const int64_t level,const uint64_t offset)1294 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1295 {
1296 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1297 /*
1298 * The level n blkid is equal to the level 0 blkid divided by
1299 * the number of level 0s in a level n block.
1300 *
1301 * The level 0 blkid is offset >> datablkshift =
1302 * offset / 2^datablkshift.
1303 *
1304 * The number of level 0s in a level n is the number of block
1305 * pointers in an indirect block, raised to the power of level.
1306 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1307 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1308 *
1309 * Thus, the level n blkid is: offset /
1310 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1311 * = offset / 2^(datablkshift + level *
1312 * (indblkshift - SPA_BLKPTRSHIFT))
1313 * = offset >> (datablkshift + level *
1314 * (indblkshift - SPA_BLKPTRSHIFT))
1315 */
1316
1317 const unsigned exp = dn->dn_datablkshift +
1318 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1319
1320 if (exp >= 8 * sizeof (offset)) {
1321 /* This only happens on the highest indirection level */
1322 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1323 return (0);
1324 }
1325
1326 ASSERT3U(exp, <, 8 * sizeof (offset));
1327
1328 return (offset >> exp);
1329 } else {
1330 ASSERT3U(offset, <, dn->dn_datablksz);
1331 return (0);
1332 }
1333 }
1334
1335 /*
1336 * This function is used to lock the parent of the provided dbuf. This should be
1337 * used when modifying or reading db_blkptr.
1338 */
1339 db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t * db,krw_t rw,const void * tag)1340 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1341 {
1342 enum db_lock_type ret = DLT_NONE;
1343 if (db->db_parent != NULL) {
1344 rw_enter(&db->db_parent->db_rwlock, rw);
1345 ret = DLT_PARENT;
1346 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1347 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1348 tag);
1349 ret = DLT_OBJSET;
1350 }
1351 /*
1352 * We only return a DLT_NONE lock when it's the top-most indirect block
1353 * of the meta-dnode of the MOS.
1354 */
1355 return (ret);
1356 }
1357
1358 /*
1359 * We need to pass the lock type in because it's possible that the block will
1360 * move from being the topmost indirect block in a dnode (and thus, have no
1361 * parent) to not the top-most via an indirection increase. This would cause a
1362 * panic if we didn't pass the lock type in.
1363 */
1364 void
dmu_buf_unlock_parent(dmu_buf_impl_t * db,db_lock_type_t type,const void * tag)1365 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1366 {
1367 if (type == DLT_PARENT)
1368 rw_exit(&db->db_parent->db_rwlock);
1369 else if (type == DLT_OBJSET)
1370 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1371 }
1372
1373 static void
dbuf_read_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * bp,arc_buf_t * buf,void * vdb)1374 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1375 arc_buf_t *buf, void *vdb)
1376 {
1377 (void) zb, (void) bp;
1378 dmu_buf_impl_t *db = vdb;
1379
1380 mutex_enter(&db->db_mtx);
1381 ASSERT3U(db->db_state, ==, DB_READ);
1382
1383 /*
1384 * All reads are synchronous, so we must have a hold on the dbuf
1385 */
1386 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1387 ASSERT0P(db->db_buf);
1388 ASSERT0P(db->db.db_data);
1389 if (buf == NULL) {
1390 /* i/o error */
1391 ASSERT(zio == NULL || zio->io_error != 0);
1392 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1393 ASSERT0P(db->db_buf);
1394 db->db_state = DB_UNCACHED;
1395 DTRACE_SET_STATE(db, "i/o error");
1396 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1397 /* freed in flight */
1398 ASSERT(zio == NULL || zio->io_error == 0);
1399 arc_release(buf, db);
1400 memset(buf->b_data, 0, db->db.db_size);
1401 arc_buf_freeze(buf);
1402 db->db_freed_in_flight = FALSE;
1403 dbuf_set_data(db, buf);
1404 db->db_state = DB_CACHED;
1405 DTRACE_SET_STATE(db, "freed in flight");
1406 } else {
1407 /* success */
1408 ASSERT(zio == NULL || zio->io_error == 0);
1409 dbuf_set_data(db, buf);
1410 db->db_state = DB_CACHED;
1411 DTRACE_SET_STATE(db, "successful read");
1412 }
1413 cv_broadcast(&db->db_changed);
1414 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1415 }
1416
1417 /*
1418 * Shortcut for performing reads on bonus dbufs. Returns
1419 * an error if we fail to verify the dnode associated with
1420 * a decrypted block. Otherwise success.
1421 */
1422 static int
dbuf_read_bonus(dmu_buf_impl_t * db,dnode_t * dn)1423 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn)
1424 {
1425 void* db_data;
1426 int bonuslen, max_bonuslen;
1427
1428 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1429 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1430 ASSERT(MUTEX_HELD(&db->db_mtx));
1431 ASSERT(DB_DNODE_HELD(db));
1432 ASSERT3U(bonuslen, <=, db->db.db_size);
1433 db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1434 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1435 if (bonuslen < max_bonuslen)
1436 memset(db_data, 0, max_bonuslen);
1437 if (bonuslen)
1438 memcpy(db_data, DN_BONUS(dn->dn_phys), bonuslen);
1439 db->db.db_data = db_data;
1440 db->db_state = DB_CACHED;
1441 DTRACE_SET_STATE(db, "bonus buffer filled");
1442 return (0);
1443 }
1444
1445 static void
dbuf_handle_indirect_hole(void * data,dnode_t * dn,blkptr_t * dbbp)1446 dbuf_handle_indirect_hole(void *data, dnode_t *dn, blkptr_t *dbbp)
1447 {
1448 blkptr_t *bps = data;
1449 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1450 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1451
1452 for (int i = 0; i < n_bps; i++) {
1453 blkptr_t *bp = &bps[i];
1454
1455 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1456 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1457 dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1458 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1459 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1460 BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0);
1461 }
1462 }
1463
1464 /*
1465 * Handle reads on dbufs that are holes, if necessary. This function
1466 * requires that the dbuf's mutex is held. Returns success (0) if action
1467 * was taken, ENOENT if no action was taken.
1468 */
1469 static int
dbuf_read_hole(dmu_buf_impl_t * db,dnode_t * dn,blkptr_t * bp)1470 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1471 {
1472 ASSERT(MUTEX_HELD(&db->db_mtx));
1473 arc_buf_t *db_data;
1474
1475 int is_hole = bp == NULL || BP_IS_HOLE(bp);
1476 /*
1477 * For level 0 blocks only, if the above check fails:
1478 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1479 * processes the delete record and clears the bp while we are waiting
1480 * for the dn_mtx (resulting in a "no" from block_freed).
1481 */
1482 if (!is_hole && db->db_level == 0)
1483 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1484
1485 if (is_hole) {
1486 db_data = dbuf_alloc_arcbuf(db);
1487 memset(db_data->b_data, 0, db->db.db_size);
1488
1489 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1490 BP_GET_LOGICAL_BIRTH(bp) != 0) {
1491 dbuf_handle_indirect_hole(db_data->b_data, dn, bp);
1492 }
1493 dbuf_set_data(db, db_data);
1494 db->db_state = DB_CACHED;
1495 DTRACE_SET_STATE(db, "hole read satisfied");
1496 return (0);
1497 }
1498 return (ENOENT);
1499 }
1500
1501 /*
1502 * This function ensures that, when doing a decrypting read of a block,
1503 * we make sure we have decrypted the dnode associated with it. We must do
1504 * this so that we ensure we are fully authenticating the checksum-of-MACs
1505 * tree from the root of the objset down to this block. Indirect blocks are
1506 * always verified against their secure checksum-of-MACs assuming that the
1507 * dnode containing them is correct. Now that we are doing a decrypting read,
1508 * we can be sure that the key is loaded and verify that assumption. This is
1509 * especially important considering that we always read encrypted dnode
1510 * blocks as raw data (without verifying their MACs) to start, and
1511 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1512 */
1513 static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t * db,dnode_t * dn,dmu_flags_t flags)1514 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, dnode_t *dn,
1515 dmu_flags_t flags)
1516 {
1517 objset_t *os = db->db_objset;
1518 dmu_buf_impl_t *dndb;
1519 arc_buf_t *dnbuf;
1520 zbookmark_phys_t zb;
1521 int err;
1522
1523 if ((flags & DMU_READ_NO_DECRYPT) != 0 ||
1524 !os->os_encrypted || os->os_raw_receive ||
1525 (dndb = dn->dn_dbuf) == NULL)
1526 return (0);
1527
1528 dnbuf = dndb->db_buf;
1529 if (!arc_is_encrypted(dnbuf))
1530 return (0);
1531
1532 mutex_enter(&dndb->db_mtx);
1533
1534 /*
1535 * Since dnode buffer is modified by sync process, there can be only
1536 * one copy of it. It means we can not modify (decrypt) it while it
1537 * is being written. I don't see how this may happen now, since
1538 * encrypted dnode writes by receive should be completed before any
1539 * plain-text reads due to txg wait, but better be safe than sorry.
1540 */
1541 while (1) {
1542 if (!arc_is_encrypted(dnbuf)) {
1543 mutex_exit(&dndb->db_mtx);
1544 return (0);
1545 }
1546 dbuf_dirty_record_t *dr = dndb->db_data_pending;
1547 if (dr == NULL || dr->dt.dl.dr_data != dnbuf)
1548 break;
1549 cv_wait(&dndb->db_changed, &dndb->db_mtx);
1550 };
1551
1552 SET_BOOKMARK(&zb, dmu_objset_id(os),
1553 DMU_META_DNODE_OBJECT, 0, dndb->db_blkid);
1554 err = arc_untransform(dnbuf, os->os_spa, &zb, B_TRUE);
1555
1556 /*
1557 * An error code of EACCES tells us that the key is still not
1558 * available. This is ok if we are only reading authenticated
1559 * (and therefore non-encrypted) blocks.
1560 */
1561 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1562 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1563 (db->db_blkid == DMU_BONUS_BLKID &&
1564 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1565 err = 0;
1566
1567 mutex_exit(&dndb->db_mtx);
1568
1569 return (err);
1570 }
1571
1572 /*
1573 * Drops db_mtx and the parent lock specified by dblt and tag before
1574 * returning.
1575 */
1576 static int
dbuf_read_impl(dmu_buf_impl_t * db,dnode_t * dn,zio_t * zio,dmu_flags_t flags,db_lock_type_t dblt,blkptr_t * bp,const void * tag)1577 dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags,
1578 db_lock_type_t dblt, blkptr_t *bp, const void *tag)
1579 {
1580 zbookmark_phys_t zb;
1581 uint32_t aflags = ARC_FLAG_NOWAIT;
1582 int err, zio_flags;
1583
1584 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1585 ASSERT(MUTEX_HELD(&db->db_mtx));
1586 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1587 ASSERT0P(db->db_buf);
1588 ASSERT(db->db_parent == NULL ||
1589 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1590
1591 if (db->db_blkid == DMU_BONUS_BLKID) {
1592 err = dbuf_read_bonus(db, dn);
1593 goto early_unlock;
1594 }
1595
1596 err = dbuf_read_hole(db, dn, bp);
1597 if (err == 0)
1598 goto early_unlock;
1599
1600 ASSERT(bp != NULL);
1601
1602 /*
1603 * Any attempt to read a redacted block should result in an error. This
1604 * will never happen under normal conditions, but can be useful for
1605 * debugging purposes.
1606 */
1607 if (BP_IS_REDACTED(bp)) {
1608 ASSERT(dsl_dataset_feature_is_active(
1609 db->db_objset->os_dsl_dataset,
1610 SPA_FEATURE_REDACTED_DATASETS));
1611 err = SET_ERROR(EIO);
1612 goto early_unlock;
1613 }
1614
1615 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1616 db->db.db_object, db->db_level, db->db_blkid);
1617
1618 /*
1619 * All bps of an encrypted os should have the encryption bit set.
1620 * If this is not true it indicates tampering and we report an error.
1621 */
1622 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bp)) {
1623 spa_log_error(db->db_objset->os_spa, &zb,
1624 BP_GET_PHYSICAL_BIRTH(bp));
1625 err = SET_ERROR(EIO);
1626 goto early_unlock;
1627 }
1628
1629 db->db_state = DB_READ;
1630 DTRACE_SET_STATE(db, "read issued");
1631 mutex_exit(&db->db_mtx);
1632
1633 if (!DBUF_IS_CACHEABLE(db))
1634 aflags |= ARC_FLAG_UNCACHED;
1635 else if (dbuf_is_l2cacheable(db, bp))
1636 aflags |= ARC_FLAG_L2CACHE;
1637
1638 dbuf_add_ref(db, NULL);
1639
1640 zio_flags = (flags & DB_RF_CANFAIL) ?
1641 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1642
1643 if ((flags & DMU_READ_NO_DECRYPT) && BP_IS_PROTECTED(bp))
1644 zio_flags |= ZIO_FLAG_RAW;
1645
1646 /*
1647 * The zio layer will copy the provided blkptr later, but we need to
1648 * do this now so that we can release the parent's rwlock. We have to
1649 * do that now so that if dbuf_read_done is called synchronously (on
1650 * an l1 cache hit) we don't acquire the db_mtx while holding the
1651 * parent's rwlock, which would be a lock ordering violation.
1652 */
1653 blkptr_t copy = *bp;
1654 dmu_buf_unlock_parent(db, dblt, tag);
1655 return (arc_read(zio, db->db_objset->os_spa, ©,
1656 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1657 &aflags, &zb));
1658
1659 early_unlock:
1660 mutex_exit(&db->db_mtx);
1661 dmu_buf_unlock_parent(db, dblt, tag);
1662 return (err);
1663 }
1664
1665 /*
1666 * This is our just-in-time copy function. It makes a copy of buffers that
1667 * have been modified in a previous transaction group before we access them in
1668 * the current active group.
1669 *
1670 * This function is used in three places: when we are dirtying a buffer for the
1671 * first time in a txg, when we are freeing a range in a dnode that includes
1672 * this buffer, and when we are accessing a buffer which was received compressed
1673 * and later referenced in a WRITE_BYREF record.
1674 *
1675 * Note that when we are called from dbuf_free_range() we do not put a hold on
1676 * the buffer, we just traverse the active dbuf list for the dnode.
1677 */
1678 static void
dbuf_fix_old_data(dmu_buf_impl_t * db,uint64_t txg)1679 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1680 {
1681 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1682
1683 ASSERT(MUTEX_HELD(&db->db_mtx));
1684 ASSERT(db->db.db_data != NULL);
1685 ASSERT0(db->db_level);
1686 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1687
1688 if (dr == NULL ||
1689 (dr->dt.dl.dr_data !=
1690 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1691 return;
1692
1693 /*
1694 * If the last dirty record for this dbuf has not yet synced
1695 * and its referencing the dbuf data, either:
1696 * reset the reference to point to a new copy,
1697 * or (if there a no active holders)
1698 * just null out the current db_data pointer.
1699 */
1700 ASSERT3U(dr->dr_txg, >=, txg - 2);
1701 if (db->db_blkid == DMU_BONUS_BLKID) {
1702 dnode_t *dn = DB_DNODE(db);
1703 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1704 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1705 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1706 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1707 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1708 dnode_t *dn = DB_DNODE(db);
1709 int size = arc_buf_size(db->db_buf);
1710 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1711 spa_t *spa = db->db_objset->os_spa;
1712 enum zio_compress compress_type =
1713 arc_get_compression(db->db_buf);
1714 uint8_t complevel = arc_get_complevel(db->db_buf);
1715
1716 if (arc_is_encrypted(db->db_buf)) {
1717 boolean_t byteorder;
1718 uint8_t salt[ZIO_DATA_SALT_LEN];
1719 uint8_t iv[ZIO_DATA_IV_LEN];
1720 uint8_t mac[ZIO_DATA_MAC_LEN];
1721
1722 arc_get_raw_params(db->db_buf, &byteorder, salt,
1723 iv, mac);
1724 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1725 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1726 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1727 compress_type, complevel);
1728 } else if (compress_type != ZIO_COMPRESS_OFF) {
1729 ASSERT3U(type, ==, ARC_BUFC_DATA);
1730 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1731 size, arc_buf_lsize(db->db_buf), compress_type,
1732 complevel);
1733 } else {
1734 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1735 }
1736 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1737 } else {
1738 db->db_buf = NULL;
1739 dbuf_clear_data(db);
1740 }
1741 }
1742
1743 int
dbuf_read(dmu_buf_impl_t * db,zio_t * pio,dmu_flags_t flags)1744 dbuf_read(dmu_buf_impl_t *db, zio_t *pio, dmu_flags_t flags)
1745 {
1746 dnode_t *dn;
1747 boolean_t miss = B_TRUE, need_wait = B_FALSE, prefetch;
1748 int err;
1749
1750 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1751
1752 DB_DNODE_ENTER(db);
1753 dn = DB_DNODE(db);
1754
1755 /*
1756 * Ensure that this block's dnode has been decrypted if the caller
1757 * has requested decrypted data.
1758 */
1759 err = dbuf_read_verify_dnode_crypt(db, dn, flags);
1760 if (err != 0)
1761 goto done;
1762
1763 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1764 (flags & DMU_READ_NO_PREFETCH) == 0;
1765
1766 mutex_enter(&db->db_mtx);
1767 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING)))
1768 db->db_pending_evict = B_FALSE;
1769 if (flags & DMU_PARTIAL_FIRST)
1770 db->db_partial_read = B_TRUE;
1771 else if (!(flags & (DMU_PARTIAL_MORE | DMU_KEEP_CACHING)))
1772 db->db_partial_read = B_FALSE;
1773 miss = (db->db_state != DB_CACHED);
1774
1775 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1776 /*
1777 * Another reader came in while the dbuf was in flight between
1778 * UNCACHED and CACHED. Either a writer will finish filling
1779 * the buffer, sending the dbuf to CACHED, or the first reader's
1780 * request will reach the read_done callback and send the dbuf
1781 * to CACHED. Otherwise, a failure occurred and the dbuf will
1782 * be sent to UNCACHED.
1783 */
1784 if (flags & DB_RF_NEVERWAIT) {
1785 mutex_exit(&db->db_mtx);
1786 DB_DNODE_EXIT(db);
1787 goto done;
1788 }
1789 do {
1790 ASSERT(db->db_state == DB_READ ||
1791 (flags & DB_RF_HAVESTRUCT) == 0);
1792 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, db,
1793 zio_t *, pio);
1794 cv_wait(&db->db_changed, &db->db_mtx);
1795 } while (db->db_state == DB_READ || db->db_state == DB_FILL);
1796 if (db->db_state == DB_UNCACHED) {
1797 err = SET_ERROR(EIO);
1798 mutex_exit(&db->db_mtx);
1799 DB_DNODE_EXIT(db);
1800 goto done;
1801 }
1802 }
1803
1804 if (db->db_state == DB_CACHED) {
1805 /*
1806 * If the arc buf is compressed or encrypted and the caller
1807 * requested uncompressed data, we need to untransform it
1808 * before returning. We also call arc_untransform() on any
1809 * unauthenticated blocks, which will verify their MAC if
1810 * the key is now available.
1811 */
1812 if ((flags & DMU_READ_NO_DECRYPT) == 0 && db->db_buf != NULL &&
1813 (arc_is_encrypted(db->db_buf) ||
1814 arc_is_unauthenticated(db->db_buf) ||
1815 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1816 spa_t *spa = dn->dn_objset->os_spa;
1817 zbookmark_phys_t zb;
1818
1819 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1820 db->db.db_object, db->db_level, db->db_blkid);
1821 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1822 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1823 dbuf_set_data(db, db->db_buf);
1824 }
1825 mutex_exit(&db->db_mtx);
1826 } else {
1827 ASSERT(db->db_state == DB_UNCACHED ||
1828 db->db_state == DB_NOFILL);
1829 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1830 blkptr_t *bp;
1831
1832 /*
1833 * If a block clone or Direct I/O write has occurred we will
1834 * get the dirty records overridden BP so we get the most
1835 * recent data.
1836 */
1837 err = dmu_buf_get_bp_from_dbuf(db, &bp);
1838
1839 if (!err) {
1840 if (pio == NULL && (db->db_state == DB_NOFILL ||
1841 (bp != NULL && !BP_IS_HOLE(bp)))) {
1842 spa_t *spa = dn->dn_objset->os_spa;
1843 pio =
1844 zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1845 need_wait = B_TRUE;
1846 }
1847
1848 err =
1849 dbuf_read_impl(db, dn, pio, flags, dblt, bp, FTAG);
1850 } else {
1851 mutex_exit(&db->db_mtx);
1852 dmu_buf_unlock_parent(db, dblt, FTAG);
1853 }
1854 /* dbuf_read_impl drops db_mtx and parent's rwlock. */
1855 miss = (db->db_state != DB_CACHED);
1856 }
1857
1858 if (err == 0 && prefetch) {
1859 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, miss,
1860 flags & DB_RF_HAVESTRUCT, (flags & DMU_UNCACHEDIO) ||
1861 db->db_pending_evict);
1862 }
1863 DB_DNODE_EXIT(db);
1864
1865 /*
1866 * If we created a zio we must execute it to avoid leaking it, even if
1867 * it isn't attached to any work due to an error in dbuf_read_impl().
1868 */
1869 if (need_wait) {
1870 if (err == 0)
1871 err = zio_wait(pio);
1872 else
1873 (void) zio_wait(pio);
1874 pio = NULL;
1875 }
1876
1877 done:
1878 if (miss)
1879 DBUF_STAT_BUMP(hash_misses);
1880 else
1881 DBUF_STAT_BUMP(hash_hits);
1882 if (pio && err != 0) {
1883 zio_t *zio = zio_null(pio, pio->io_spa, NULL, NULL, NULL,
1884 ZIO_FLAG_CANFAIL);
1885 zio->io_error = err;
1886 zio_nowait(zio);
1887 }
1888
1889 return (err);
1890 }
1891
1892 static void
dbuf_noread(dmu_buf_impl_t * db,dmu_flags_t flags)1893 dbuf_noread(dmu_buf_impl_t *db, dmu_flags_t flags)
1894 {
1895 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1896 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1897 mutex_enter(&db->db_mtx);
1898 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING)))
1899 db->db_pending_evict = B_FALSE;
1900 db->db_partial_read = B_FALSE;
1901 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1902 cv_wait(&db->db_changed, &db->db_mtx);
1903 if (db->db_state == DB_UNCACHED) {
1904 ASSERT0P(db->db_buf);
1905 ASSERT0P(db->db.db_data);
1906 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1907 db->db_state = DB_FILL;
1908 DTRACE_SET_STATE(db, "assigning filled buffer");
1909 } else if (db->db_state == DB_NOFILL) {
1910 dbuf_clear_data(db);
1911 } else {
1912 ASSERT3U(db->db_state, ==, DB_CACHED);
1913 }
1914 mutex_exit(&db->db_mtx);
1915 }
1916
1917 void
dbuf_unoverride(dbuf_dirty_record_t * dr)1918 dbuf_unoverride(dbuf_dirty_record_t *dr)
1919 {
1920 dmu_buf_impl_t *db = dr->dr_dbuf;
1921 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1922 uint64_t txg = dr->dr_txg;
1923
1924 ASSERT(MUTEX_HELD(&db->db_mtx));
1925
1926 /*
1927 * This assert is valid because dmu_sync() expects to be called by
1928 * a zilog's get_data while holding a range lock. This call only
1929 * comes from dbuf_dirty() callers who must also hold a range lock.
1930 */
1931 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1932 ASSERT0(db->db_level);
1933
1934 if (db->db_blkid == DMU_BONUS_BLKID ||
1935 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1936 return;
1937
1938 ASSERT(db->db_data_pending != dr);
1939
1940 /* free this block */
1941 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1942 zio_free(db->db_objset->os_spa, txg, bp);
1943
1944 if (dr->dt.dl.dr_brtwrite || dr->dt.dl.dr_diowrite) {
1945 ASSERT0P(dr->dt.dl.dr_data);
1946 dr->dt.dl.dr_data = db->db_buf;
1947 }
1948 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1949 dr->dt.dl.dr_nopwrite = B_FALSE;
1950 dr->dt.dl.dr_brtwrite = B_FALSE;
1951 dr->dt.dl.dr_diowrite = B_FALSE;
1952 dr->dt.dl.dr_has_raw_params = B_FALSE;
1953
1954 /*
1955 * In the event that Direct I/O was used, we do not
1956 * need to release the buffer from the ARC.
1957 *
1958 * Release the already-written buffer, so we leave it in
1959 * a consistent dirty state. Note that all callers are
1960 * modifying the buffer, so they will immediately do
1961 * another (redundant) arc_release(). Therefore, leave
1962 * the buf thawed to save the effort of freezing &
1963 * immediately re-thawing it.
1964 */
1965 if (dr->dt.dl.dr_data)
1966 arc_release(dr->dt.dl.dr_data, db);
1967 }
1968
1969 /*
1970 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1971 * data blocks in the free range, so that any future readers will find
1972 * empty blocks.
1973 */
1974 void
dbuf_free_range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)1975 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1976 dmu_tx_t *tx)
1977 {
1978 dmu_buf_impl_t *db_search;
1979 dmu_buf_impl_t *db, *db_next;
1980 uint64_t txg = tx->tx_txg;
1981 avl_index_t where;
1982 dbuf_dirty_record_t *dr;
1983
1984 if (end_blkid > dn->dn_maxblkid &&
1985 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1986 end_blkid = dn->dn_maxblkid;
1987 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1988 (u_longlong_t)end_blkid);
1989
1990 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1991 db_search->db_level = 0;
1992 db_search->db_blkid = start_blkid;
1993 db_search->db_state = DB_SEARCH;
1994
1995 mutex_enter(&dn->dn_dbufs_mtx);
1996 db = avl_find(&dn->dn_dbufs, db_search, &where);
1997 ASSERT0P(db);
1998
1999 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
2000
2001 for (; db != NULL; db = db_next) {
2002 db_next = AVL_NEXT(&dn->dn_dbufs, db);
2003 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2004
2005 if (db->db_level != 0 || db->db_blkid > end_blkid) {
2006 break;
2007 }
2008 ASSERT3U(db->db_blkid, >=, start_blkid);
2009
2010 /* found a level 0 buffer in the range */
2011 mutex_enter(&db->db_mtx);
2012 if (dbuf_undirty(db, tx)) {
2013 /* mutex has been dropped and dbuf destroyed */
2014 continue;
2015 }
2016
2017 if (db->db_state == DB_UNCACHED ||
2018 db->db_state == DB_NOFILL ||
2019 db->db_state == DB_EVICTING) {
2020 ASSERT0P(db->db.db_data);
2021 mutex_exit(&db->db_mtx);
2022 continue;
2023 }
2024 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2025 /* will be handled in dbuf_read_done or dbuf_rele */
2026 db->db_freed_in_flight = TRUE;
2027 mutex_exit(&db->db_mtx);
2028 continue;
2029 }
2030 if (zfs_refcount_count(&db->db_holds) == 0) {
2031 ASSERT(db->db_buf);
2032 dbuf_destroy(db);
2033 continue;
2034 }
2035 /* The dbuf is referenced */
2036
2037 dr = list_head(&db->db_dirty_records);
2038 if (dr != NULL) {
2039 if (dr->dr_txg == txg) {
2040 /*
2041 * This buffer is "in-use", re-adjust the file
2042 * size to reflect that this buffer may
2043 * contain new data when we sync.
2044 */
2045 if (db->db_blkid != DMU_SPILL_BLKID &&
2046 db->db_blkid > dn->dn_maxblkid)
2047 dn->dn_maxblkid = db->db_blkid;
2048 dbuf_unoverride(dr);
2049 } else {
2050 /*
2051 * This dbuf is not dirty in the open context.
2052 * Either uncache it (if its not referenced in
2053 * the open context) or reset its contents to
2054 * empty.
2055 */
2056 dbuf_fix_old_data(db, txg);
2057 }
2058 }
2059 /* clear the contents if its cached */
2060 if (db->db_state == DB_CACHED) {
2061 ASSERT(db->db.db_data != NULL);
2062 arc_release(db->db_buf, db);
2063 rw_enter(&db->db_rwlock, RW_WRITER);
2064 memset(db->db.db_data, 0, db->db.db_size);
2065 rw_exit(&db->db_rwlock);
2066 arc_buf_freeze(db->db_buf);
2067 }
2068
2069 mutex_exit(&db->db_mtx);
2070 }
2071
2072 mutex_exit(&dn->dn_dbufs_mtx);
2073 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2074 }
2075
2076 void
dbuf_new_size(dmu_buf_impl_t * db,int size,dmu_tx_t * tx)2077 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2078 {
2079 arc_buf_t *buf, *old_buf;
2080 dbuf_dirty_record_t *dr;
2081 int osize = db->db.db_size;
2082 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2083 dnode_t *dn;
2084
2085 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2086
2087 DB_DNODE_ENTER(db);
2088 dn = DB_DNODE(db);
2089
2090 /*
2091 * XXX we should be doing a dbuf_read, checking the return
2092 * value and returning that up to our callers
2093 */
2094 dmu_buf_will_dirty(&db->db, tx);
2095
2096 VERIFY3P(db->db_buf, !=, NULL);
2097
2098 /* create the data buffer for the new block */
2099 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2100
2101 /* copy old block data to the new block */
2102 old_buf = db->db_buf;
2103 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2104 /* zero the remainder */
2105 if (size > osize)
2106 memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2107
2108 mutex_enter(&db->db_mtx);
2109 dbuf_set_data(db, buf);
2110 arc_buf_destroy(old_buf, db);
2111 db->db.db_size = size;
2112
2113 dr = list_head(&db->db_dirty_records);
2114 /* dirty record added by dmu_buf_will_dirty() */
2115 VERIFY(dr != NULL);
2116 if (db->db_level == 0)
2117 dr->dt.dl.dr_data = buf;
2118 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2119 ASSERT3U(dr->dr_accounted, ==, osize);
2120 dr->dr_accounted = size;
2121 mutex_exit(&db->db_mtx);
2122
2123 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2124 DB_DNODE_EXIT(db);
2125 }
2126
2127 void
dbuf_release_bp(dmu_buf_impl_t * db)2128 dbuf_release_bp(dmu_buf_impl_t *db)
2129 {
2130 objset_t *os __maybe_unused = db->db_objset;
2131
2132 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2133 ASSERT(arc_released(os->os_phys_buf) ||
2134 list_link_active(&os->os_dsl_dataset->ds_synced_link));
2135 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2136
2137 (void) arc_release(db->db_buf, db);
2138 }
2139
2140 /*
2141 * We already have a dirty record for this TXG, and we are being
2142 * dirtied again.
2143 */
2144 static void
dbuf_redirty(dbuf_dirty_record_t * dr)2145 dbuf_redirty(dbuf_dirty_record_t *dr)
2146 {
2147 dmu_buf_impl_t *db = dr->dr_dbuf;
2148
2149 ASSERT(MUTEX_HELD(&db->db_mtx));
2150
2151 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2152 /*
2153 * If this buffer has already been written out,
2154 * we now need to reset its state.
2155 */
2156 dbuf_unoverride(dr);
2157 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2158 db->db_state != DB_NOFILL) {
2159 /* Already released on initial dirty, so just thaw. */
2160 ASSERT(arc_released(db->db_buf));
2161 arc_buf_thaw(db->db_buf);
2162 }
2163
2164 /*
2165 * Clear the rewrite flag since this is now a logical
2166 * modification.
2167 */
2168 dr->dt.dl.dr_rewrite = B_FALSE;
2169 }
2170 }
2171
2172 dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t * dn,uint64_t blkid,dmu_tx_t * tx)2173 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2174 {
2175 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2176 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2177 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2178 ASSERT(dn->dn_maxblkid >= blkid);
2179
2180 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2181 list_link_init(&dr->dr_dirty_node);
2182 list_link_init(&dr->dr_dbuf_node);
2183 dr->dr_dnode = dn;
2184 dr->dr_txg = tx->tx_txg;
2185 dr->dt.dll.dr_blkid = blkid;
2186 dr->dr_accounted = dn->dn_datablksz;
2187
2188 /*
2189 * There should not be any dbuf for the block that we're dirtying.
2190 * Otherwise the buffer contents could be inconsistent between the
2191 * dbuf and the lightweight dirty record.
2192 */
2193 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2194 NULL));
2195
2196 mutex_enter(&dn->dn_mtx);
2197 int txgoff = tx->tx_txg & TXG_MASK;
2198 if (dn->dn_free_ranges[txgoff] != NULL) {
2199 zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2200 }
2201
2202 if (dn->dn_nlevels == 1) {
2203 ASSERT3U(blkid, <, dn->dn_nblkptr);
2204 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2205 mutex_exit(&dn->dn_mtx);
2206 rw_exit(&dn->dn_struct_rwlock);
2207 dnode_setdirty(dn, tx);
2208 } else {
2209 mutex_exit(&dn->dn_mtx);
2210
2211 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2212 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2213 1, blkid >> epbs, FTAG);
2214 rw_exit(&dn->dn_struct_rwlock);
2215 if (parent_db == NULL) {
2216 kmem_free(dr, sizeof (*dr));
2217 return (NULL);
2218 }
2219 int err = dbuf_read(parent_db, NULL, DB_RF_CANFAIL |
2220 DMU_READ_NO_PREFETCH);
2221 if (err != 0) {
2222 dbuf_rele(parent_db, FTAG);
2223 kmem_free(dr, sizeof (*dr));
2224 return (NULL);
2225 }
2226
2227 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2228 dbuf_rele(parent_db, FTAG);
2229 mutex_enter(&parent_dr->dt.di.dr_mtx);
2230 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2231 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2232 mutex_exit(&parent_dr->dt.di.dr_mtx);
2233 dr->dr_parent = parent_dr;
2234 }
2235
2236 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2237
2238 return (dr);
2239 }
2240
2241 dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2242 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2243 {
2244 dnode_t *dn;
2245 objset_t *os;
2246 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2247 int txgoff = tx->tx_txg & TXG_MASK;
2248 boolean_t drop_struct_rwlock = B_FALSE;
2249
2250 ASSERT(tx->tx_txg != 0);
2251 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2252 DMU_TX_DIRTY_BUF(tx, db);
2253
2254 DB_DNODE_ENTER(db);
2255 dn = DB_DNODE(db);
2256 /*
2257 * Shouldn't dirty a regular buffer in syncing context. Private
2258 * objects may be dirtied in syncing context, but only if they
2259 * were already pre-dirtied in open context.
2260 */
2261 #ifdef ZFS_DEBUG
2262 if (dn->dn_objset->os_dsl_dataset != NULL) {
2263 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2264 RW_READER, FTAG);
2265 }
2266 ASSERT(!dmu_tx_is_syncing(tx) ||
2267 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2268 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2269 dn->dn_objset->os_dsl_dataset == NULL);
2270 if (dn->dn_objset->os_dsl_dataset != NULL)
2271 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2272 #endif
2273
2274 mutex_enter(&db->db_mtx);
2275 /*
2276 * XXX make this true for indirects too? The problem is that
2277 * transactions created with dmu_tx_create_assigned() from
2278 * syncing context don't bother holding ahead.
2279 */
2280 ASSERT(db->db_level != 0 ||
2281 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2282 db->db_state == DB_NOFILL);
2283
2284 if (db->db_blkid == DMU_SPILL_BLKID)
2285 dn->dn_have_spill = B_TRUE;
2286
2287 /*
2288 * If this buffer is already dirty, we're done.
2289 */
2290 dr_head = list_head(&db->db_dirty_records);
2291 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2292 db->db.db_object == DMU_META_DNODE_OBJECT);
2293 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2294 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2295 DB_DNODE_EXIT(db);
2296
2297 dbuf_redirty(dr_next);
2298 mutex_exit(&db->db_mtx);
2299 return (dr_next);
2300 }
2301
2302 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2303
2304 /*
2305 * We should only be dirtying in syncing context if it's the
2306 * mos or we're initializing the os or it's a special object.
2307 * However, we are allowed to dirty in syncing context provided
2308 * we already dirtied it in open context. Hence we must make
2309 * this assertion only if we're not already dirty.
2310 */
2311 os = dn->dn_objset;
2312 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2313 #ifdef ZFS_DEBUG
2314 if (dn->dn_objset->os_dsl_dataset != NULL)
2315 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2316 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2317 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2318 if (dn->dn_objset->os_dsl_dataset != NULL)
2319 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2320 #endif
2321 ASSERT(db->db.db_size != 0);
2322
2323 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2324
2325 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2326 dmu_objset_willuse_space(os, db->db.db_size, tx);
2327 }
2328
2329 /*
2330 * If this buffer is dirty in an old transaction group we need
2331 * to make a copy of it so that the changes we make in this
2332 * transaction group won't leak out when we sync the older txg.
2333 */
2334 dr = kmem_cache_alloc(dbuf_dirty_kmem_cache, KM_SLEEP);
2335 memset(dr, 0, sizeof (*dr));
2336 list_link_init(&dr->dr_dirty_node);
2337 list_link_init(&dr->dr_dbuf_node);
2338 dr->dr_dnode = dn;
2339 if (db->db_level == 0) {
2340 void *data_old = db->db_buf;
2341
2342 if (db->db_state != DB_NOFILL) {
2343 if (db->db_blkid == DMU_BONUS_BLKID) {
2344 dbuf_fix_old_data(db, tx->tx_txg);
2345 data_old = db->db.db_data;
2346 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2347 /*
2348 * Release the data buffer from the cache so
2349 * that we can modify it without impacting
2350 * possible other users of this cached data
2351 * block. Note that indirect blocks and
2352 * private objects are not released until the
2353 * syncing state (since they are only modified
2354 * then).
2355 */
2356 arc_release(db->db_buf, db);
2357 dbuf_fix_old_data(db, tx->tx_txg);
2358 data_old = db->db_buf;
2359 }
2360 ASSERT(data_old != NULL);
2361 }
2362 dr->dt.dl.dr_data = data_old;
2363 } else {
2364 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2365 list_create(&dr->dt.di.dr_children,
2366 sizeof (dbuf_dirty_record_t),
2367 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2368 }
2369 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2370 dr->dr_accounted = db->db.db_size;
2371 }
2372 dr->dr_dbuf = db;
2373 dr->dr_txg = tx->tx_txg;
2374 list_insert_before(&db->db_dirty_records, dr_next, dr);
2375
2376 /*
2377 * We could have been freed_in_flight between the dbuf_noread
2378 * and dbuf_dirty. We win, as though the dbuf_noread() had
2379 * happened after the free.
2380 */
2381 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2382 db->db_blkid != DMU_SPILL_BLKID) {
2383 mutex_enter(&dn->dn_mtx);
2384 if (dn->dn_free_ranges[txgoff] != NULL) {
2385 zfs_range_tree_clear(dn->dn_free_ranges[txgoff],
2386 db->db_blkid, 1);
2387 }
2388 mutex_exit(&dn->dn_mtx);
2389 db->db_freed_in_flight = FALSE;
2390 }
2391
2392 /*
2393 * This buffer is now part of this txg
2394 */
2395 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2396 db->db_dirtycnt += 1;
2397 ASSERT3U(db->db_dirtycnt, <=, 3);
2398
2399 mutex_exit(&db->db_mtx);
2400
2401 if (db->db_blkid == DMU_BONUS_BLKID ||
2402 db->db_blkid == DMU_SPILL_BLKID) {
2403 mutex_enter(&dn->dn_mtx);
2404 ASSERT(!list_link_active(&dr->dr_dirty_node));
2405 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2406 mutex_exit(&dn->dn_mtx);
2407 dnode_setdirty(dn, tx);
2408 DB_DNODE_EXIT(db);
2409 return (dr);
2410 }
2411
2412 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2413 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2414 drop_struct_rwlock = B_TRUE;
2415 }
2416
2417 /*
2418 * If we are overwriting a dedup BP, then unless it is snapshotted,
2419 * when we get to syncing context we will need to decrement its
2420 * refcount in the DDT. Prefetch the relevant DDT block so that
2421 * syncing context won't have to wait for the i/o.
2422 */
2423 if (db->db_blkptr != NULL) {
2424 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2425 ddt_prefetch(os->os_spa, db->db_blkptr);
2426 dmu_buf_unlock_parent(db, dblt, FTAG);
2427 }
2428
2429 /*
2430 * We need to hold the dn_struct_rwlock to make this assertion,
2431 * because it protects dn_phys / dn_next_nlevels from changing.
2432 */
2433 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2434 dn->dn_phys->dn_nlevels > db->db_level ||
2435 dn->dn_next_nlevels[txgoff] > db->db_level ||
2436 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2437 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2438
2439
2440 if (db->db_level == 0) {
2441 ASSERT(!db->db_objset->os_raw_receive ||
2442 dn->dn_maxblkid >= db->db_blkid);
2443 dnode_new_blkid(dn, db->db_blkid, tx,
2444 drop_struct_rwlock, B_FALSE);
2445 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2446 }
2447
2448 if (db->db_level+1 < dn->dn_nlevels) {
2449 dmu_buf_impl_t *parent = db->db_parent;
2450 dbuf_dirty_record_t *di;
2451 int parent_held = FALSE;
2452
2453 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2454 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2455 parent = dbuf_hold_level(dn, db->db_level + 1,
2456 db->db_blkid >> epbs, FTAG);
2457 ASSERT(parent != NULL);
2458 parent_held = TRUE;
2459 }
2460 if (drop_struct_rwlock)
2461 rw_exit(&dn->dn_struct_rwlock);
2462 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2463 di = dbuf_dirty(parent, tx);
2464 if (parent_held)
2465 dbuf_rele(parent, FTAG);
2466
2467 mutex_enter(&db->db_mtx);
2468 /*
2469 * Since we've dropped the mutex, it's possible that
2470 * dbuf_undirty() might have changed this out from under us.
2471 */
2472 if (list_head(&db->db_dirty_records) == dr ||
2473 dn->dn_object == DMU_META_DNODE_OBJECT) {
2474 mutex_enter(&di->dt.di.dr_mtx);
2475 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2476 ASSERT(!list_link_active(&dr->dr_dirty_node));
2477 list_insert_tail(&di->dt.di.dr_children, dr);
2478 mutex_exit(&di->dt.di.dr_mtx);
2479 dr->dr_parent = di;
2480 }
2481 mutex_exit(&db->db_mtx);
2482 } else {
2483 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2484 ASSERT(db->db_blkid < dn->dn_nblkptr);
2485 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2486 mutex_enter(&dn->dn_mtx);
2487 ASSERT(!list_link_active(&dr->dr_dirty_node));
2488 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2489 mutex_exit(&dn->dn_mtx);
2490 if (drop_struct_rwlock)
2491 rw_exit(&dn->dn_struct_rwlock);
2492 }
2493
2494 dnode_setdirty(dn, tx);
2495 DB_DNODE_EXIT(db);
2496 return (dr);
2497 }
2498
2499 static void
dbuf_undirty_bonus(dbuf_dirty_record_t * dr)2500 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2501 {
2502 dmu_buf_impl_t *db = dr->dr_dbuf;
2503
2504 ASSERT(MUTEX_HELD(&db->db_mtx));
2505 if (dr->dt.dl.dr_data != db->db.db_data) {
2506 struct dnode *dn = dr->dr_dnode;
2507 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2508
2509 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2510 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2511 }
2512 db->db_data_pending = NULL;
2513 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2514 list_remove(&db->db_dirty_records, dr);
2515 if (dr->dr_dbuf->db_level != 0) {
2516 mutex_destroy(&dr->dt.di.dr_mtx);
2517 list_destroy(&dr->dt.di.dr_children);
2518 }
2519 kmem_cache_free(dbuf_dirty_kmem_cache, dr);
2520 ASSERT3U(db->db_dirtycnt, >, 0);
2521 db->db_dirtycnt -= 1;
2522 }
2523
2524 /*
2525 * Undirty a buffer in the transaction group referenced by the given
2526 * transaction. Return whether this evicted the dbuf.
2527 */
2528 boolean_t
dbuf_undirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2529 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2530 {
2531 uint64_t txg = tx->tx_txg;
2532 boolean_t brtwrite;
2533 boolean_t diowrite;
2534
2535 ASSERT(txg != 0);
2536
2537 /*
2538 * Due to our use of dn_nlevels below, this can only be called
2539 * in open context, unless we are operating on the MOS or it's
2540 * a special object. From syncing context, dn_nlevels may be
2541 * different from the dn_nlevels used when dbuf was dirtied.
2542 */
2543 ASSERT(db->db_objset ==
2544 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2545 DMU_OBJECT_IS_SPECIAL(db->db.db_object) ||
2546 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2547 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2548 ASSERT0(db->db_level);
2549 ASSERT(MUTEX_HELD(&db->db_mtx));
2550
2551 /*
2552 * If this buffer is not dirty, we're done.
2553 */
2554 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2555 if (dr == NULL)
2556 return (B_FALSE);
2557 ASSERT(dr->dr_dbuf == db);
2558
2559 brtwrite = dr->dt.dl.dr_brtwrite;
2560 diowrite = dr->dt.dl.dr_diowrite;
2561 if (brtwrite) {
2562 ASSERT3B(diowrite, ==, B_FALSE);
2563 /*
2564 * We are freeing a block that we cloned in the same
2565 * transaction group.
2566 */
2567 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
2568 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
2569 brt_pending_remove(dmu_objset_spa(db->db_objset),
2570 bp, tx);
2571 }
2572 }
2573
2574 dnode_t *dn = dr->dr_dnode;
2575
2576 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2577
2578 ASSERT(db->db.db_size != 0);
2579
2580 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2581 dr->dr_accounted, txg);
2582
2583 list_remove(&db->db_dirty_records, dr);
2584
2585 /*
2586 * Note that there are three places in dbuf_dirty()
2587 * where this dirty record may be put on a list.
2588 * Make sure to do a list_remove corresponding to
2589 * every one of those list_insert calls.
2590 */
2591 if (dr->dr_parent) {
2592 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2593 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2594 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2595 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2596 db->db_level + 1 == dn->dn_nlevels) {
2597 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2598 mutex_enter(&dn->dn_mtx);
2599 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2600 mutex_exit(&dn->dn_mtx);
2601 }
2602
2603 if (db->db_state != DB_NOFILL && !brtwrite) {
2604 dbuf_unoverride(dr);
2605
2606 if (dr->dt.dl.dr_data != db->db_buf) {
2607 ASSERT(db->db_buf != NULL);
2608 ASSERT(dr->dt.dl.dr_data != NULL);
2609 arc_buf_destroy(dr->dt.dl.dr_data, db);
2610 }
2611 }
2612
2613 kmem_cache_free(dbuf_dirty_kmem_cache, dr);
2614
2615 ASSERT(db->db_dirtycnt > 0);
2616 db->db_dirtycnt -= 1;
2617
2618 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2619 ASSERT(db->db_state == DB_NOFILL || brtwrite || diowrite ||
2620 arc_released(db->db_buf));
2621 dbuf_destroy(db);
2622 return (B_TRUE);
2623 }
2624
2625 return (B_FALSE);
2626 }
2627
2628 void
dmu_buf_will_dirty_flags(dmu_buf_t * db_fake,dmu_tx_t * tx,dmu_flags_t flags)2629 dmu_buf_will_dirty_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, dmu_flags_t flags)
2630 {
2631 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2632 boolean_t undirty = B_FALSE;
2633
2634 ASSERT(tx->tx_txg != 0);
2635 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2636
2637 /*
2638 * Quick check for dirtiness to improve performance for some workloads
2639 * (e.g. file deletion with indirect blocks cached).
2640 */
2641 mutex_enter(&db->db_mtx);
2642 if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2643 /*
2644 * It's possible that the dbuf is already dirty but not cached,
2645 * because there are some calls to dbuf_dirty() that don't
2646 * go through dmu_buf_will_dirty().
2647 */
2648 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2649 if (dr != NULL) {
2650 if (db->db_level == 0 &&
2651 dr->dt.dl.dr_brtwrite) {
2652 /*
2653 * Block cloning: If we are dirtying a cloned
2654 * level 0 block, we cannot simply redirty it,
2655 * because this dr has no associated data.
2656 * We will go through a full undirtying below,
2657 * before dirtying it again.
2658 */
2659 undirty = B_TRUE;
2660 } else {
2661 /* This dbuf is already dirty and cached. */
2662 dbuf_redirty(dr);
2663 mutex_exit(&db->db_mtx);
2664 return;
2665 }
2666 }
2667 }
2668 mutex_exit(&db->db_mtx);
2669
2670 DB_DNODE_ENTER(db);
2671 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2672 flags |= DB_RF_HAVESTRUCT;
2673 DB_DNODE_EXIT(db);
2674
2675 /*
2676 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2677 * want to make sure dbuf_read() will read the pending cloned block and
2678 * not the uderlying block that is being replaced. dbuf_undirty() will
2679 * do brt_pending_remove() before removing the dirty record.
2680 */
2681 (void) dbuf_read(db, NULL, flags | DB_RF_MUST_SUCCEED);
2682 if (undirty) {
2683 mutex_enter(&db->db_mtx);
2684 VERIFY(!dbuf_undirty(db, tx));
2685 mutex_exit(&db->db_mtx);
2686 }
2687 (void) dbuf_dirty(db, tx);
2688 }
2689
2690 void
dmu_buf_will_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2691 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2692 {
2693 dmu_buf_will_dirty_flags(db_fake, tx, DMU_READ_NO_PREFETCH);
2694 }
2695
2696 void
dmu_buf_will_rewrite(dmu_buf_t * db_fake,dmu_tx_t * tx)2697 dmu_buf_will_rewrite(dmu_buf_t *db_fake, dmu_tx_t *tx)
2698 {
2699 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2700
2701 ASSERT(tx->tx_txg != 0);
2702 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2703
2704 /*
2705 * If the dbuf is already dirty in this txg, it will be written
2706 * anyway, so there's nothing to do.
2707 */
2708 mutex_enter(&db->db_mtx);
2709 if (dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) {
2710 mutex_exit(&db->db_mtx);
2711 return;
2712 }
2713 mutex_exit(&db->db_mtx);
2714
2715 /*
2716 * The dbuf is not dirty, so we need to make it dirty and
2717 * mark it for rewrite (preserve logical birth time).
2718 */
2719 dmu_buf_will_dirty_flags(db_fake, tx, DMU_READ_NO_PREFETCH);
2720
2721 mutex_enter(&db->db_mtx);
2722 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2723 if (dr != NULL && db->db_level == 0)
2724 dr->dt.dl.dr_rewrite = B_TRUE;
2725 mutex_exit(&db->db_mtx);
2726 }
2727
2728 boolean_t
dmu_buf_is_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2729 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2730 {
2731 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2732 dbuf_dirty_record_t *dr;
2733
2734 mutex_enter(&db->db_mtx);
2735 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2736 mutex_exit(&db->db_mtx);
2737 return (dr != NULL);
2738 }
2739
2740 /*
2741 * Normally the db_blkptr points to the most recent on-disk content for the
2742 * dbuf (and anything newer will be cached in the dbuf). However, a pending
2743 * block clone or not yet synced Direct I/O write will have a dirty record BP
2744 * pointing to the most recent data.
2745 */
2746 int
dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t * db,blkptr_t ** bp)2747 dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t *db, blkptr_t **bp)
2748 {
2749 ASSERT(MUTEX_HELD(&db->db_mtx));
2750 int error = 0;
2751
2752 if (db->db_level != 0) {
2753 *bp = db->db_blkptr;
2754 return (0);
2755 }
2756
2757 *bp = db->db_blkptr;
2758 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2759 if (dr && db->db_state == DB_NOFILL) {
2760 /* Block clone */
2761 if (!dr->dt.dl.dr_brtwrite)
2762 error = EIO;
2763 else
2764 *bp = &dr->dt.dl.dr_overridden_by;
2765 } else if (dr && db->db_state == DB_UNCACHED) {
2766 /* Direct I/O write */
2767 if (dr->dt.dl.dr_diowrite)
2768 *bp = &dr->dt.dl.dr_overridden_by;
2769 }
2770
2771 return (error);
2772 }
2773
2774 /*
2775 * Direct I/O reads can read directly from the ARC, but the data has
2776 * to be untransformed in order to copy it over into user pages.
2777 */
2778 int
dmu_buf_untransform_direct(dmu_buf_impl_t * db,spa_t * spa)2779 dmu_buf_untransform_direct(dmu_buf_impl_t *db, spa_t *spa)
2780 {
2781 int err = 0;
2782 DB_DNODE_ENTER(db);
2783 dnode_t *dn = DB_DNODE(db);
2784
2785 ASSERT3S(db->db_state, ==, DB_CACHED);
2786 ASSERT(MUTEX_HELD(&db->db_mtx));
2787
2788 /*
2789 * Ensure that this block's dnode has been decrypted if
2790 * the caller has requested decrypted data.
2791 */
2792 err = dbuf_read_verify_dnode_crypt(db, dn, 0);
2793
2794 /*
2795 * If the arc buf is compressed or encrypted and the caller
2796 * requested uncompressed data, we need to untransform it
2797 * before returning. We also call arc_untransform() on any
2798 * unauthenticated blocks, which will verify their MAC if
2799 * the key is now available.
2800 */
2801 if (err == 0 && db->db_buf != NULL &&
2802 (arc_is_encrypted(db->db_buf) ||
2803 arc_is_unauthenticated(db->db_buf) ||
2804 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
2805 zbookmark_phys_t zb;
2806
2807 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
2808 db->db.db_object, db->db_level, db->db_blkid);
2809 dbuf_fix_old_data(db, spa_syncing_txg(spa));
2810 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
2811 dbuf_set_data(db, db->db_buf);
2812 }
2813 DB_DNODE_EXIT(db);
2814 DBUF_STAT_BUMP(hash_hits);
2815
2816 return (err);
2817 }
2818
2819 void
dmu_buf_will_clone_or_dio(dmu_buf_t * db_fake,dmu_tx_t * tx)2820 dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx)
2821 {
2822 /*
2823 * Block clones and Direct I/O writes always happen in open-context.
2824 */
2825 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2826 ASSERT0(db->db_level);
2827 ASSERT(!dmu_tx_is_syncing(tx));
2828 ASSERT0(db->db_level);
2829 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2830 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
2831
2832 mutex_enter(&db->db_mtx);
2833 DBUF_VERIFY(db);
2834
2835 /*
2836 * We are going to clone or issue a Direct I/O write on this block, so
2837 * undirty modifications done to this block so far in this txg. This
2838 * includes writes and clones into this block.
2839 *
2840 * If there dirty record associated with this txg from a previous Direct
2841 * I/O write then space accounting cleanup takes place. It is important
2842 * to go ahead free up the space accounting through dbuf_undirty() ->
2843 * dbuf_unoverride() -> zio_free(). Space accountiung for determining
2844 * if a write can occur in zfs_write() happens through dmu_tx_assign().
2845 * This can cause an issue with Direct I/O writes in the case of
2846 * overwriting the same block, because all DVA allocations are being
2847 * done in open-context. Constantly allowing Direct I/O overwrites to
2848 * the same block can exhaust the pools available space leading to
2849 * ENOSPC errors at the DVA allocation part of the ZIO pipeline, which
2850 * will eventually suspend the pool. By cleaning up sapce acccounting
2851 * now, the ENOSPC error can be avoided.
2852 *
2853 * Since we are undirtying the record in open-context, we must have a
2854 * hold on the db, so it should never be evicted after calling
2855 * dbuf_undirty().
2856 */
2857 VERIFY3B(dbuf_undirty(db, tx), ==, B_FALSE);
2858 ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
2859
2860 if (db->db_buf != NULL) {
2861 /*
2862 * If there is an associated ARC buffer with this dbuf we can
2863 * only destroy it if the previous dirty record does not
2864 * reference it.
2865 */
2866 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2867 if (dr == NULL || dr->dt.dl.dr_data != db->db_buf)
2868 arc_buf_destroy(db->db_buf, db);
2869
2870 /*
2871 * Setting the dbuf's data pointers to NULL will force all
2872 * future reads down to the devices to get the most up to date
2873 * version of the data after a Direct I/O write has completed.
2874 */
2875 db->db_buf = NULL;
2876 dbuf_clear_data(db);
2877 }
2878
2879 ASSERT0P(db->db_buf);
2880 ASSERT0P(db->db.db_data);
2881
2882 db->db_state = DB_NOFILL;
2883 DTRACE_SET_STATE(db,
2884 "allocating NOFILL buffer for clone or direct I/O write");
2885
2886 DBUF_VERIFY(db);
2887 mutex_exit(&db->db_mtx);
2888
2889 dbuf_noread(db, DMU_KEEP_CACHING);
2890 (void) dbuf_dirty(db, tx);
2891 }
2892
2893 void
dmu_buf_will_not_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2894 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2895 {
2896 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2897
2898 mutex_enter(&db->db_mtx);
2899 db->db_state = DB_NOFILL;
2900 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2901 mutex_exit(&db->db_mtx);
2902
2903 dbuf_noread(db, DMU_KEEP_CACHING);
2904 (void) dbuf_dirty(db, tx);
2905 }
2906
2907 void
dmu_buf_will_fill_flags(dmu_buf_t * db_fake,dmu_tx_t * tx,boolean_t canfail,dmu_flags_t flags)2908 dmu_buf_will_fill_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail,
2909 dmu_flags_t flags)
2910 {
2911 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2912
2913 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2914 ASSERT(tx->tx_txg != 0);
2915 ASSERT0(db->db_level);
2916 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2917
2918 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2919 dmu_tx_private_ok(tx));
2920
2921 mutex_enter(&db->db_mtx);
2922 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2923 if (db->db_state == DB_NOFILL ||
2924 (db->db_state == DB_UNCACHED && dr && dr->dt.dl.dr_diowrite)) {
2925 /*
2926 * If the fill can fail we should have a way to return back to
2927 * the cloned or Direct I/O write data.
2928 */
2929 if (canfail && dr) {
2930 mutex_exit(&db->db_mtx);
2931 dmu_buf_will_dirty_flags(db_fake, tx, flags);
2932 return;
2933 }
2934 /*
2935 * Block cloning: We will be completely overwriting a block
2936 * cloned in this transaction group, so let's undirty the
2937 * pending clone and mark the block as uncached. This will be
2938 * as if the clone was never done.
2939 */
2940 if (db->db_state == DB_NOFILL) {
2941 VERIFY(!dbuf_undirty(db, tx));
2942 db->db_state = DB_UNCACHED;
2943 }
2944 }
2945 mutex_exit(&db->db_mtx);
2946
2947 dbuf_noread(db, flags);
2948 (void) dbuf_dirty(db, tx);
2949 }
2950
2951 void
dmu_buf_will_fill(dmu_buf_t * db_fake,dmu_tx_t * tx,boolean_t canfail)2952 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
2953 {
2954 dmu_buf_will_fill_flags(db_fake, tx, canfail, DMU_READ_NO_PREFETCH);
2955 }
2956
2957 /*
2958 * This function is effectively the same as dmu_buf_will_dirty(), but
2959 * indicates the caller expects raw encrypted data in the db, and provides
2960 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2961 * blkptr_t when this dbuf is written. This is only used for blocks of
2962 * dnodes, during raw receive.
2963 */
2964 void
dmu_buf_set_crypt_params(dmu_buf_t * db_fake,boolean_t byteorder,const uint8_t * salt,const uint8_t * iv,const uint8_t * mac,dmu_tx_t * tx)2965 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2966 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2967 {
2968 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2969 dbuf_dirty_record_t *dr;
2970
2971 /*
2972 * dr_has_raw_params is only processed for blocks of dnodes
2973 * (see dbuf_sync_dnode_leaf_crypt()).
2974 */
2975 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2976 ASSERT0(db->db_level);
2977 ASSERT(db->db_objset->os_raw_receive);
2978
2979 dmu_buf_will_dirty_flags(db_fake, tx,
2980 DMU_READ_NO_PREFETCH | DMU_READ_NO_DECRYPT);
2981
2982 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2983
2984 ASSERT3P(dr, !=, NULL);
2985 ASSERT3U(dr->dt.dl.dr_override_state, ==, DR_NOT_OVERRIDDEN);
2986
2987 dr->dt.dl.dr_has_raw_params = B_TRUE;
2988 dr->dt.dl.dr_byteorder = byteorder;
2989 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2990 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2991 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2992 }
2993
2994 static void
dbuf_override_impl(dmu_buf_impl_t * db,const blkptr_t * bp,dmu_tx_t * tx)2995 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2996 {
2997 struct dirty_leaf *dl;
2998 dbuf_dirty_record_t *dr;
2999
3000 ASSERT3U(db->db.db_object, !=, DMU_META_DNODE_OBJECT);
3001 ASSERT0(db->db_level);
3002
3003 dr = list_head(&db->db_dirty_records);
3004 ASSERT3P(dr, !=, NULL);
3005 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
3006 dl = &dr->dt.dl;
3007 ASSERT0(dl->dr_has_raw_params);
3008 dl->dr_overridden_by = *bp;
3009 dl->dr_override_state = DR_OVERRIDDEN;
3010 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
3011 }
3012
3013 boolean_t
dmu_buf_fill_done(dmu_buf_t * dbuf,dmu_tx_t * tx,boolean_t failed)3014 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
3015 {
3016 (void) tx;
3017 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
3018 mutex_enter(&db->db_mtx);
3019 DBUF_VERIFY(db);
3020
3021 if (db->db_state == DB_FILL) {
3022 if (db->db_level == 0 && db->db_freed_in_flight) {
3023 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3024 /* we were freed while filling */
3025 /* XXX dbuf_undirty? */
3026 memset(db->db.db_data, 0, db->db.db_size);
3027 db->db_freed_in_flight = FALSE;
3028 db->db_state = DB_CACHED;
3029 DTRACE_SET_STATE(db,
3030 "fill done handling freed in flight");
3031 failed = B_FALSE;
3032 } else if (failed) {
3033 VERIFY(!dbuf_undirty(db, tx));
3034 arc_buf_destroy(db->db_buf, db);
3035 db->db_buf = NULL;
3036 dbuf_clear_data(db);
3037 DTRACE_SET_STATE(db, "fill failed");
3038 } else {
3039 db->db_state = DB_CACHED;
3040 DTRACE_SET_STATE(db, "fill done");
3041 }
3042 cv_broadcast(&db->db_changed);
3043 } else {
3044 db->db_state = DB_CACHED;
3045 failed = B_FALSE;
3046 }
3047 mutex_exit(&db->db_mtx);
3048 return (failed);
3049 }
3050
3051 void
dmu_buf_write_embedded(dmu_buf_t * dbuf,void * data,bp_embedded_type_t etype,enum zio_compress comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)3052 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
3053 bp_embedded_type_t etype, enum zio_compress comp,
3054 int uncompressed_size, int compressed_size, int byteorder,
3055 dmu_tx_t *tx)
3056 {
3057 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
3058 struct dirty_leaf *dl;
3059 dmu_object_type_t type;
3060 dbuf_dirty_record_t *dr;
3061
3062 if (etype == BP_EMBEDDED_TYPE_DATA) {
3063 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
3064 SPA_FEATURE_EMBEDDED_DATA));
3065 }
3066
3067 DB_DNODE_ENTER(db);
3068 type = DB_DNODE(db)->dn_type;
3069 DB_DNODE_EXIT(db);
3070
3071 ASSERT0(db->db_level);
3072 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3073
3074 dmu_buf_will_not_fill(dbuf, tx);
3075
3076 dr = list_head(&db->db_dirty_records);
3077 ASSERT3P(dr, !=, NULL);
3078 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
3079 dl = &dr->dt.dl;
3080 ASSERT0(dl->dr_has_raw_params);
3081 encode_embedded_bp_compressed(&dl->dr_overridden_by,
3082 data, comp, uncompressed_size, compressed_size);
3083 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
3084 BP_SET_TYPE(&dl->dr_overridden_by, type);
3085 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
3086 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
3087
3088 dl->dr_override_state = DR_OVERRIDDEN;
3089 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
3090 }
3091
3092 void
dmu_buf_redact(dmu_buf_t * dbuf,dmu_tx_t * tx)3093 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
3094 {
3095 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
3096 dmu_object_type_t type;
3097 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
3098 SPA_FEATURE_REDACTED_DATASETS));
3099
3100 DB_DNODE_ENTER(db);
3101 type = DB_DNODE(db)->dn_type;
3102 DB_DNODE_EXIT(db);
3103
3104 ASSERT0(db->db_level);
3105 dmu_buf_will_not_fill(dbuf, tx);
3106
3107 blkptr_t bp = { { { {0} } } };
3108 BP_SET_TYPE(&bp, type);
3109 BP_SET_LEVEL(&bp, 0);
3110 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
3111 BP_SET_REDACTED(&bp);
3112 BPE_SET_LSIZE(&bp, dbuf->db_size);
3113
3114 dbuf_override_impl(db, &bp, tx);
3115 }
3116
3117 /*
3118 * Directly assign a provided arc buf to a given dbuf if it's not referenced
3119 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
3120 */
3121 void
dbuf_assign_arcbuf(dmu_buf_impl_t * db,arc_buf_t * buf,dmu_tx_t * tx,dmu_flags_t flags)3122 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
3123 dmu_flags_t flags)
3124 {
3125 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
3126 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3127 ASSERT0(db->db_level);
3128 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
3129 ASSERT(buf != NULL);
3130 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
3131 ASSERT(tx->tx_txg != 0);
3132
3133 arc_return_buf(buf, db);
3134 ASSERT(arc_released(buf));
3135
3136 mutex_enter(&db->db_mtx);
3137 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING)))
3138 db->db_pending_evict = B_FALSE;
3139 db->db_partial_read = B_FALSE;
3140
3141 while (db->db_state == DB_READ || db->db_state == DB_FILL)
3142 cv_wait(&db->db_changed, &db->db_mtx);
3143
3144 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
3145 db->db_state == DB_NOFILL);
3146
3147 if (db->db_state == DB_CACHED &&
3148 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
3149 /*
3150 * In practice, we will never have a case where we have an
3151 * encrypted arc buffer while additional holds exist on the
3152 * dbuf. We don't handle this here so we simply assert that
3153 * fact instead.
3154 */
3155 ASSERT(!arc_is_encrypted(buf));
3156 mutex_exit(&db->db_mtx);
3157 (void) dbuf_dirty(db, tx);
3158 memcpy(db->db.db_data, buf->b_data, db->db.db_size);
3159 arc_buf_destroy(buf, db);
3160 return;
3161 }
3162
3163 if (db->db_state == DB_CACHED) {
3164 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
3165
3166 ASSERT(db->db_buf != NULL);
3167 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
3168 ASSERT(dr->dt.dl.dr_data == db->db_buf);
3169
3170 if (!arc_released(db->db_buf)) {
3171 ASSERT(dr->dt.dl.dr_override_state ==
3172 DR_OVERRIDDEN);
3173 arc_release(db->db_buf, db);
3174 }
3175 dr->dt.dl.dr_data = buf;
3176 arc_buf_destroy(db->db_buf, db);
3177 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
3178 arc_release(db->db_buf, db);
3179 arc_buf_destroy(db->db_buf, db);
3180 }
3181 db->db_buf = NULL;
3182 } else if (db->db_state == DB_NOFILL) {
3183 /*
3184 * We will be completely replacing the cloned block. In case
3185 * it was cloned in this transaction group, let's undirty the
3186 * pending clone and mark the block as uncached. This will be
3187 * as if the clone was never done.
3188 */
3189 VERIFY(!dbuf_undirty(db, tx));
3190 db->db_state = DB_UNCACHED;
3191 }
3192 ASSERT0P(db->db_buf);
3193 dbuf_set_data(db, buf);
3194 db->db_state = DB_FILL;
3195 DTRACE_SET_STATE(db, "filling assigned arcbuf");
3196 mutex_exit(&db->db_mtx);
3197 (void) dbuf_dirty(db, tx);
3198 dmu_buf_fill_done(&db->db, tx, B_FALSE);
3199 }
3200
3201 void
dbuf_destroy(dmu_buf_impl_t * db)3202 dbuf_destroy(dmu_buf_impl_t *db)
3203 {
3204 dnode_t *dn;
3205 dmu_buf_impl_t *parent = db->db_parent;
3206 dmu_buf_impl_t *dndb;
3207
3208 ASSERT(MUTEX_HELD(&db->db_mtx));
3209 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3210
3211 if (db->db_buf != NULL) {
3212 arc_buf_destroy(db->db_buf, db);
3213 db->db_buf = NULL;
3214 }
3215
3216 if (db->db_blkid == DMU_BONUS_BLKID) {
3217 int slots = DB_DNODE(db)->dn_num_slots;
3218 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3219 if (db->db.db_data != NULL) {
3220 kmem_free(db->db.db_data, bonuslen);
3221 arc_space_return(bonuslen, ARC_SPACE_BONUS);
3222 db->db_state = DB_UNCACHED;
3223 DTRACE_SET_STATE(db, "buffer cleared");
3224 }
3225 }
3226
3227 dbuf_clear_data(db);
3228
3229 if (multilist_link_active(&db->db_cache_link)) {
3230 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3231 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3232
3233 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3234
3235 ASSERT0(dmu_buf_user_size(&db->db));
3236 (void) zfs_refcount_remove_many(
3237 &dbuf_caches[db->db_caching_status].size,
3238 db->db.db_size, db);
3239
3240 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3241 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3242 } else {
3243 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3244 DBUF_STAT_BUMPDOWN(cache_count);
3245 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3246 db->db.db_size);
3247 }
3248 db->db_caching_status = DB_NO_CACHE;
3249 }
3250
3251 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3252 ASSERT0P(db->db_data_pending);
3253 ASSERT(list_is_empty(&db->db_dirty_records));
3254
3255 db->db_state = DB_EVICTING;
3256 DTRACE_SET_STATE(db, "buffer eviction started");
3257 db->db_blkptr = NULL;
3258
3259 /*
3260 * Now that db_state is DB_EVICTING, nobody else can find this via
3261 * the hash table. We can now drop db_mtx, which allows us to
3262 * acquire the dn_dbufs_mtx.
3263 */
3264 mutex_exit(&db->db_mtx);
3265
3266 DB_DNODE_ENTER(db);
3267 dn = DB_DNODE(db);
3268 dndb = dn->dn_dbuf;
3269 if (db->db_blkid != DMU_BONUS_BLKID) {
3270 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3271 if (needlock)
3272 mutex_enter_nested(&dn->dn_dbufs_mtx,
3273 NESTED_SINGLE);
3274 avl_remove(&dn->dn_dbufs, db);
3275 membar_producer();
3276 DB_DNODE_EXIT(db);
3277 if (needlock)
3278 mutex_exit(&dn->dn_dbufs_mtx);
3279 /*
3280 * Decrementing the dbuf count means that the hold corresponding
3281 * to the removed dbuf is no longer discounted in dnode_move(),
3282 * so the dnode cannot be moved until after we release the hold.
3283 * The membar_producer() ensures visibility of the decremented
3284 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3285 * release any lock.
3286 */
3287 mutex_enter(&dn->dn_mtx);
3288 dnode_rele_and_unlock(dn, db, B_TRUE);
3289 #ifdef USE_DNODE_HANDLE
3290 db->db_dnode_handle = NULL;
3291 #else
3292 db->db_dnode = NULL;
3293 #endif
3294
3295 dbuf_hash_remove(db);
3296 } else {
3297 DB_DNODE_EXIT(db);
3298 }
3299
3300 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3301
3302 db->db_parent = NULL;
3303
3304 ASSERT0P(db->db_buf);
3305 ASSERT0P(db->db.db_data);
3306 ASSERT0P(db->db_hash_next);
3307 ASSERT0P(db->db_blkptr);
3308 ASSERT0P(db->db_data_pending);
3309 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3310 ASSERT(!multilist_link_active(&db->db_cache_link));
3311
3312 /*
3313 * If this dbuf is referenced from an indirect dbuf,
3314 * decrement the ref count on the indirect dbuf.
3315 */
3316 if (parent && parent != dndb) {
3317 mutex_enter(&parent->db_mtx);
3318 dbuf_rele_and_unlock(parent, db, B_TRUE);
3319 }
3320
3321 kmem_cache_free(dbuf_kmem_cache, db);
3322 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3323 }
3324
3325 /*
3326 * Note: While bpp will always be updated if the function returns success,
3327 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3328 * this happens when the dnode is the meta-dnode, or {user|group|project}used
3329 * object.
3330 */
3331 __attribute__((always_inline))
3332 static inline int
dbuf_findbp(dnode_t * dn,int level,uint64_t blkid,int fail_sparse,dmu_buf_impl_t ** parentp,blkptr_t ** bpp)3333 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3334 dmu_buf_impl_t **parentp, blkptr_t **bpp)
3335 {
3336 *parentp = NULL;
3337 *bpp = NULL;
3338
3339 ASSERT(blkid != DMU_BONUS_BLKID);
3340
3341 if (blkid == DMU_SPILL_BLKID) {
3342 mutex_enter(&dn->dn_mtx);
3343 if (dn->dn_have_spill &&
3344 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3345 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3346 else
3347 *bpp = NULL;
3348 dbuf_add_ref(dn->dn_dbuf, NULL);
3349 *parentp = dn->dn_dbuf;
3350 mutex_exit(&dn->dn_mtx);
3351 return (0);
3352 }
3353
3354 int nlevels =
3355 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3356 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3357
3358 ASSERT3U(level * epbs, <, 64);
3359 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3360 /*
3361 * This assertion shouldn't trip as long as the max indirect block size
3362 * is less than 1M. The reason for this is that up to that point,
3363 * the number of levels required to address an entire object with blocks
3364 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
3365 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3366 * (i.e. we can address the entire object), objects will all use at most
3367 * N-1 levels and the assertion won't overflow. However, once epbs is
3368 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
3369 * enough to address an entire object, so objects will have 5 levels,
3370 * but then this assertion will overflow.
3371 *
3372 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3373 * need to redo this logic to handle overflows.
3374 */
3375 ASSERT(level >= nlevels ||
3376 ((nlevels - level - 1) * epbs) +
3377 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3378 if (level >= nlevels ||
3379 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3380 ((nlevels - level - 1) * epbs)) ||
3381 (fail_sparse &&
3382 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3383 /* the buffer has no parent yet */
3384 return (SET_ERROR(ENOENT));
3385 } else if (level < nlevels-1) {
3386 /* this block is referenced from an indirect block */
3387 int err;
3388
3389 err = dbuf_hold_impl(dn, level + 1,
3390 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3391
3392 if (err)
3393 return (err);
3394 err = dbuf_read(*parentp, NULL, DB_RF_CANFAIL |
3395 DB_RF_HAVESTRUCT | DMU_READ_NO_PREFETCH);
3396 if (err) {
3397 dbuf_rele(*parentp, NULL);
3398 *parentp = NULL;
3399 return (err);
3400 }
3401 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3402 (blkid & ((1ULL << epbs) - 1));
3403 return (0);
3404 } else {
3405 /* the block is referenced from the dnode */
3406 ASSERT3U(level, ==, nlevels-1);
3407 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3408 blkid < dn->dn_phys->dn_nblkptr);
3409 if (dn->dn_dbuf) {
3410 dbuf_add_ref(dn->dn_dbuf, NULL);
3411 *parentp = dn->dn_dbuf;
3412 }
3413 *bpp = &dn->dn_phys->dn_blkptr[blkid];
3414 return (0);
3415 }
3416 }
3417
3418 static dmu_buf_impl_t *
dbuf_create(dnode_t * dn,uint8_t level,uint64_t blkid,dmu_buf_impl_t * parent,blkptr_t * blkptr,uint64_t hash)3419 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3420 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3421 {
3422 objset_t *os = dn->dn_objset;
3423 dmu_buf_impl_t *db, *odb;
3424
3425 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3426 ASSERT(dn->dn_type != DMU_OT_NONE);
3427
3428 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3429
3430 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3431 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3432
3433 db->db_objset = os;
3434 db->db.db_object = dn->dn_object;
3435 db->db_level = level;
3436 db->db_blkid = blkid;
3437 db->db_dirtycnt = 0;
3438 #ifdef USE_DNODE_HANDLE
3439 db->db_dnode_handle = dn->dn_handle;
3440 #else
3441 db->db_dnode = dn;
3442 #endif
3443 db->db_parent = parent;
3444 db->db_blkptr = blkptr;
3445 db->db_hash = hash;
3446
3447 db->db_user = NULL;
3448 db->db_user_immediate_evict = FALSE;
3449 db->db_freed_in_flight = FALSE;
3450 db->db_pending_evict = TRUE;
3451 db->db_partial_read = FALSE;
3452
3453 if (blkid == DMU_BONUS_BLKID) {
3454 ASSERT3P(parent, ==, dn->dn_dbuf);
3455 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3456 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3457 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3458 db->db.db_offset = DMU_BONUS_BLKID;
3459 db->db_state = DB_UNCACHED;
3460 DTRACE_SET_STATE(db, "bonus buffer created");
3461 db->db_caching_status = DB_NO_CACHE;
3462 /* the bonus dbuf is not placed in the hash table */
3463 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3464 return (db);
3465 } else if (blkid == DMU_SPILL_BLKID) {
3466 db->db.db_size = (blkptr != NULL) ?
3467 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3468 db->db.db_offset = 0;
3469 } else {
3470 int blocksize =
3471 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3472 db->db.db_size = blocksize;
3473 db->db.db_offset = db->db_blkid * blocksize;
3474 }
3475
3476 /*
3477 * Hold the dn_dbufs_mtx while we get the new dbuf
3478 * in the hash table *and* added to the dbufs list.
3479 * This prevents a possible deadlock with someone
3480 * trying to look up this dbuf before it's added to the
3481 * dn_dbufs list.
3482 */
3483 mutex_enter(&dn->dn_dbufs_mtx);
3484 db->db_state = DB_EVICTING; /* not worth logging this state change */
3485 if ((odb = dbuf_hash_insert(db)) != NULL) {
3486 /* someone else inserted it first */
3487 mutex_exit(&dn->dn_dbufs_mtx);
3488 kmem_cache_free(dbuf_kmem_cache, db);
3489 DBUF_STAT_BUMP(hash_insert_race);
3490 return (odb);
3491 }
3492 avl_add(&dn->dn_dbufs, db);
3493
3494 db->db_state = DB_UNCACHED;
3495 DTRACE_SET_STATE(db, "regular buffer created");
3496 db->db_caching_status = DB_NO_CACHE;
3497 mutex_exit(&dn->dn_dbufs_mtx);
3498 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3499
3500 if (parent && parent != dn->dn_dbuf)
3501 dbuf_add_ref(parent, db);
3502
3503 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3504 zfs_refcount_count(&dn->dn_holds) > 0);
3505 (void) zfs_refcount_add(&dn->dn_holds, db);
3506
3507 dprintf_dbuf(db, "db=%p\n", db);
3508
3509 return (db);
3510 }
3511
3512 /*
3513 * This function returns a block pointer and information about the object,
3514 * given a dnode and a block. This is a publicly accessible version of
3515 * dbuf_findbp that only returns some information, rather than the
3516 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3517 * should be locked as (at least) a reader.
3518 */
3519 int
dbuf_dnode_findbp(dnode_t * dn,uint64_t level,uint64_t blkid,blkptr_t * bp,uint16_t * datablkszsec,uint8_t * indblkshift)3520 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3521 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3522 {
3523 dmu_buf_impl_t *dbp = NULL;
3524 blkptr_t *bp2;
3525 int err = 0;
3526 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3527
3528 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3529 if (err == 0) {
3530 ASSERT3P(bp2, !=, NULL);
3531 *bp = *bp2;
3532 if (dbp != NULL)
3533 dbuf_rele(dbp, NULL);
3534 if (datablkszsec != NULL)
3535 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3536 if (indblkshift != NULL)
3537 *indblkshift = dn->dn_phys->dn_indblkshift;
3538 }
3539
3540 return (err);
3541 }
3542
3543 typedef struct dbuf_prefetch_arg {
3544 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3545 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3546 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3547 int dpa_curlevel; /* The current level that we're reading */
3548 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3549 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3550 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3551 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3552 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3553 void *dpa_arg; /* prefetch completion arg */
3554 } dbuf_prefetch_arg_t;
3555
3556 static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t * dpa,boolean_t io_done)3557 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3558 {
3559 if (dpa->dpa_cb != NULL) {
3560 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3561 dpa->dpa_zb.zb_blkid, io_done);
3562 }
3563 kmem_free(dpa, sizeof (*dpa));
3564 }
3565
3566 static void
dbuf_issue_final_prefetch_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3567 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3568 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3569 {
3570 (void) zio, (void) zb, (void) iobp;
3571 dbuf_prefetch_arg_t *dpa = private;
3572
3573 if (abuf != NULL)
3574 arc_buf_destroy(abuf, private);
3575
3576 dbuf_prefetch_fini(dpa, B_TRUE);
3577 }
3578
3579 /*
3580 * Actually issue the prefetch read for the block given.
3581 */
3582 static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t * dpa,blkptr_t * bp)3583 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3584 {
3585 ASSERT(!BP_IS_HOLE(bp));
3586 ASSERT(!BP_IS_REDACTED(bp));
3587 if (BP_IS_EMBEDDED(bp))
3588 return (dbuf_prefetch_fini(dpa, B_FALSE));
3589
3590 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3591 arc_flags_t aflags =
3592 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3593 ARC_FLAG_NO_BUF;
3594
3595 /* dnodes are always read as raw and then converted later */
3596 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3597 dpa->dpa_curlevel == 0)
3598 zio_flags |= ZIO_FLAG_RAW;
3599
3600 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3601 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3602 ASSERT(dpa->dpa_zio != NULL);
3603 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3604 dbuf_issue_final_prefetch_done, dpa,
3605 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3606 }
3607
3608 /*
3609 * Called when an indirect block above our prefetch target is read in. This
3610 * will either read in the next indirect block down the tree or issue the actual
3611 * prefetch if the next block down is our target.
3612 */
3613 static void
dbuf_prefetch_indirect_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3614 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3615 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3616 {
3617 (void) zb, (void) iobp;
3618 dbuf_prefetch_arg_t *dpa = private;
3619
3620 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3621 ASSERT3S(dpa->dpa_curlevel, >, 0);
3622
3623 if (abuf == NULL) {
3624 ASSERT(zio == NULL || zio->io_error != 0);
3625 dbuf_prefetch_fini(dpa, B_TRUE);
3626 return;
3627 }
3628 ASSERT(zio == NULL || zio->io_error == 0);
3629
3630 /*
3631 * The dpa_dnode is only valid if we are called with a NULL
3632 * zio. This indicates that the arc_read() returned without
3633 * first calling zio_read() to issue a physical read. Once
3634 * a physical read is made the dpa_dnode must be invalidated
3635 * as the locks guarding it may have been dropped. If the
3636 * dpa_dnode is still valid, then we want to add it to the dbuf
3637 * cache. To do so, we must hold the dbuf associated with the block
3638 * we just prefetched, read its contents so that we associate it
3639 * with an arc_buf_t, and then release it.
3640 */
3641 if (zio != NULL) {
3642 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3643 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3644 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3645 } else {
3646 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3647 }
3648 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3649
3650 dpa->dpa_dnode = NULL;
3651 } else if (dpa->dpa_dnode != NULL) {
3652 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3653 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3654 dpa->dpa_zb.zb_level));
3655 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3656 dpa->dpa_curlevel, curblkid, FTAG);
3657 if (db == NULL) {
3658 arc_buf_destroy(abuf, private);
3659 dbuf_prefetch_fini(dpa, B_TRUE);
3660 return;
3661 }
3662 (void) dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
3663 DMU_READ_NO_PREFETCH);
3664 dbuf_rele(db, FTAG);
3665 }
3666
3667 dpa->dpa_curlevel--;
3668 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3669 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3670 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3671 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3672
3673 ASSERT(!BP_IS_REDACTED(bp) || dpa->dpa_dnode == NULL ||
3674 dsl_dataset_feature_is_active(
3675 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3676 SPA_FEATURE_REDACTED_DATASETS));
3677 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3678 arc_buf_destroy(abuf, private);
3679 dbuf_prefetch_fini(dpa, B_TRUE);
3680 return;
3681 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3682 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3683 dbuf_issue_final_prefetch(dpa, bp);
3684 } else {
3685 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3686 zbookmark_phys_t zb;
3687
3688 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3689 if (dpa->dpa_dnode) {
3690 if (dnode_level_is_l2cacheable(bp, dpa->dpa_dnode,
3691 dpa->dpa_curlevel))
3692 iter_aflags |= ARC_FLAG_L2CACHE;
3693 } else {
3694 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3695 iter_aflags |= ARC_FLAG_L2CACHE;
3696 }
3697
3698 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3699
3700 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3701 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3702
3703 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3704 bp, dbuf_prefetch_indirect_done, dpa,
3705 ZIO_PRIORITY_SYNC_READ,
3706 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3707 &iter_aflags, &zb);
3708 }
3709
3710 arc_buf_destroy(abuf, private);
3711 }
3712
3713 /*
3714 * Issue prefetch reads for the given block on the given level. If the indirect
3715 * blocks above that block are not in memory, we will read them in
3716 * asynchronously. As a result, this call never blocks waiting for a read to
3717 * complete. Note that the prefetch might fail if the dataset is encrypted and
3718 * the encryption key is unmapped before the IO completes.
3719 */
3720 int
dbuf_prefetch_impl(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags,dbuf_prefetch_fn cb,void * arg)3721 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3722 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3723 void *arg)
3724 {
3725 blkptr_t bp;
3726 int epbs, nlevels, curlevel;
3727 uint64_t curblkid;
3728
3729 ASSERT(blkid != DMU_BONUS_BLKID);
3730 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3731
3732 if (blkid > dn->dn_maxblkid)
3733 goto no_issue;
3734
3735 if (level == 0 && dnode_block_freed(dn, blkid))
3736 goto no_issue;
3737
3738 /*
3739 * This dnode hasn't been written to disk yet, so there's nothing to
3740 * prefetch.
3741 */
3742 nlevels = dn->dn_phys->dn_nlevels;
3743 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3744 goto no_issue;
3745
3746 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3747 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3748 goto no_issue;
3749
3750 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3751 level, blkid, NULL);
3752 if (db != NULL) {
3753 mutex_exit(&db->db_mtx);
3754 /*
3755 * This dbuf already exists. It is either CACHED, or
3756 * (we assume) about to be read or filled.
3757 */
3758 goto no_issue;
3759 }
3760
3761 /*
3762 * Find the closest ancestor (indirect block) of the target block
3763 * that is present in the cache. In this indirect block, we will
3764 * find the bp that is at curlevel, curblkid.
3765 */
3766 curlevel = level;
3767 curblkid = blkid;
3768 while (curlevel < nlevels - 1) {
3769 int parent_level = curlevel + 1;
3770 uint64_t parent_blkid = curblkid >> epbs;
3771 dmu_buf_impl_t *db;
3772
3773 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3774 FALSE, TRUE, FTAG, &db) == 0) {
3775 blkptr_t *bpp = db->db_buf->b_data;
3776 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3777 dbuf_rele(db, FTAG);
3778 break;
3779 }
3780
3781 curlevel = parent_level;
3782 curblkid = parent_blkid;
3783 }
3784
3785 if (curlevel == nlevels - 1) {
3786 /* No cached indirect blocks found. */
3787 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3788 bp = dn->dn_phys->dn_blkptr[curblkid];
3789 }
3790 ASSERT(!BP_IS_REDACTED(&bp) ||
3791 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3792 SPA_FEATURE_REDACTED_DATASETS));
3793 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3794 goto no_issue;
3795
3796 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3797
3798 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3799 ZIO_FLAG_CANFAIL);
3800
3801 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3802 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3803 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3804 dn->dn_object, level, blkid);
3805 dpa->dpa_curlevel = curlevel;
3806 dpa->dpa_prio = prio;
3807 dpa->dpa_aflags = aflags;
3808 dpa->dpa_spa = dn->dn_objset->os_spa;
3809 dpa->dpa_dnode = dn;
3810 dpa->dpa_epbs = epbs;
3811 dpa->dpa_zio = pio;
3812 dpa->dpa_cb = cb;
3813 dpa->dpa_arg = arg;
3814
3815 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3816 dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3817 else if (dnode_level_is_l2cacheable(&bp, dn, level))
3818 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3819
3820 /*
3821 * If we have the indirect just above us, no need to do the asynchronous
3822 * prefetch chain; we'll just run the last step ourselves. If we're at
3823 * a higher level, though, we want to issue the prefetches for all the
3824 * indirect blocks asynchronously, so we can go on with whatever we were
3825 * doing.
3826 */
3827 if (curlevel == level) {
3828 ASSERT3U(curblkid, ==, blkid);
3829 dbuf_issue_final_prefetch(dpa, &bp);
3830 } else {
3831 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3832 zbookmark_phys_t zb;
3833
3834 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3835 if (dnode_level_is_l2cacheable(&bp, dn, curlevel))
3836 iter_aflags |= ARC_FLAG_L2CACHE;
3837
3838 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3839 dn->dn_object, curlevel, curblkid);
3840 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3841 &bp, dbuf_prefetch_indirect_done, dpa,
3842 ZIO_PRIORITY_SYNC_READ,
3843 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3844 &iter_aflags, &zb);
3845 }
3846 /*
3847 * We use pio here instead of dpa_zio since it's possible that
3848 * dpa may have already been freed.
3849 */
3850 zio_nowait(pio);
3851 return (1);
3852 no_issue:
3853 if (cb != NULL)
3854 cb(arg, level, blkid, B_FALSE);
3855 return (0);
3856 }
3857
3858 int
dbuf_prefetch(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags)3859 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3860 arc_flags_t aflags)
3861 {
3862
3863 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3864 }
3865
3866 /*
3867 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3868 * the case of encrypted, compressed and uncompressed buffers by
3869 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3870 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3871 *
3872 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3873 */
3874 noinline static void
dbuf_hold_copy(dnode_t * dn,dmu_buf_impl_t * db)3875 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3876 {
3877 dbuf_dirty_record_t *dr = db->db_data_pending;
3878 arc_buf_t *data = dr->dt.dl.dr_data;
3879 arc_buf_t *db_data;
3880 enum zio_compress compress_type = arc_get_compression(data);
3881 uint8_t complevel = arc_get_complevel(data);
3882
3883 if (arc_is_encrypted(data)) {
3884 boolean_t byteorder;
3885 uint8_t salt[ZIO_DATA_SALT_LEN];
3886 uint8_t iv[ZIO_DATA_IV_LEN];
3887 uint8_t mac[ZIO_DATA_MAC_LEN];
3888
3889 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3890 db_data = arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3891 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3892 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3893 compress_type, complevel);
3894 } else if (compress_type != ZIO_COMPRESS_OFF) {
3895 db_data = arc_alloc_compressed_buf(
3896 dn->dn_objset->os_spa, db, arc_buf_size(data),
3897 arc_buf_lsize(data), compress_type, complevel);
3898 } else {
3899 db_data = arc_alloc_buf(dn->dn_objset->os_spa, db,
3900 DBUF_GET_BUFC_TYPE(db), db->db.db_size);
3901 }
3902 memcpy(db_data->b_data, data->b_data, arc_buf_size(data));
3903
3904 dbuf_set_data(db, db_data);
3905 }
3906
3907 /*
3908 * Returns with db_holds incremented, and db_mtx not held.
3909 * Note: dn_struct_rwlock must be held.
3910 */
3911 int
dbuf_hold_impl(dnode_t * dn,uint8_t level,uint64_t blkid,boolean_t fail_sparse,boolean_t fail_uncached,const void * tag,dmu_buf_impl_t ** dbp)3912 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3913 boolean_t fail_sparse, boolean_t fail_uncached,
3914 const void *tag, dmu_buf_impl_t **dbp)
3915 {
3916 dmu_buf_impl_t *db, *parent = NULL;
3917 uint64_t hv;
3918
3919 /* If the pool has been created, verify the tx_sync_lock is not held */
3920 spa_t *spa = dn->dn_objset->os_spa;
3921 dsl_pool_t *dp = spa->spa_dsl_pool;
3922 if (dp != NULL) {
3923 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3924 }
3925
3926 ASSERT(blkid != DMU_BONUS_BLKID);
3927 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3928 if (!fail_sparse)
3929 ASSERT3U(dn->dn_nlevels, >, level);
3930
3931 *dbp = NULL;
3932
3933 /* dbuf_find() returns with db_mtx held */
3934 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3935
3936 if (db == NULL) {
3937 blkptr_t *bp = NULL;
3938 int err;
3939
3940 if (fail_uncached)
3941 return (SET_ERROR(ENOENT));
3942
3943 ASSERT0P(parent);
3944 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3945 if (fail_sparse) {
3946 if (err == 0 && bp && BP_IS_HOLE(bp))
3947 err = SET_ERROR(ENOENT);
3948 if (err) {
3949 if (parent)
3950 dbuf_rele(parent, NULL);
3951 return (err);
3952 }
3953 }
3954 if (err && err != ENOENT)
3955 return (err);
3956 db = dbuf_create(dn, level, blkid, parent, bp, hv);
3957 }
3958
3959 if (fail_uncached && db->db_state != DB_CACHED) {
3960 mutex_exit(&db->db_mtx);
3961 return (SET_ERROR(ENOENT));
3962 }
3963
3964 if (db->db_buf != NULL) {
3965 arc_buf_access(db->db_buf);
3966 ASSERT(MUTEX_HELD(&db->db_mtx));
3967 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3968 }
3969
3970 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3971
3972 /*
3973 * If this buffer is currently syncing out, and we are
3974 * still referencing it from db_data, we need to make a copy
3975 * of it in case we decide we want to dirty it again in this txg.
3976 */
3977 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3978 dn->dn_object != DMU_META_DNODE_OBJECT &&
3979 db->db_state == DB_CACHED && db->db_data_pending) {
3980 dbuf_dirty_record_t *dr = db->db_data_pending;
3981 if (dr->dt.dl.dr_data == db->db_buf) {
3982 ASSERT3P(db->db_buf, !=, NULL);
3983 dbuf_hold_copy(dn, db);
3984 }
3985 }
3986
3987 if (multilist_link_active(&db->db_cache_link)) {
3988 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3989 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3990 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3991
3992 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3993
3994 uint64_t size = db->db.db_size;
3995 uint64_t usize = dmu_buf_user_size(&db->db);
3996 (void) zfs_refcount_remove_many(
3997 &dbuf_caches[db->db_caching_status].size, size, db);
3998 (void) zfs_refcount_remove_many(
3999 &dbuf_caches[db->db_caching_status].size, usize,
4000 db->db_user);
4001
4002 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
4003 DBUF_STAT_BUMPDOWN(metadata_cache_count);
4004 } else {
4005 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
4006 DBUF_STAT_BUMPDOWN(cache_count);
4007 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
4008 size + usize);
4009 }
4010 db->db_caching_status = DB_NO_CACHE;
4011 }
4012 (void) zfs_refcount_add(&db->db_holds, tag);
4013 DBUF_VERIFY(db);
4014 mutex_exit(&db->db_mtx);
4015
4016 /* NOTE: we can't rele the parent until after we drop the db_mtx */
4017 if (parent)
4018 dbuf_rele(parent, NULL);
4019
4020 ASSERT3P(DB_DNODE(db), ==, dn);
4021 ASSERT3U(db->db_blkid, ==, blkid);
4022 ASSERT3U(db->db_level, ==, level);
4023 *dbp = db;
4024
4025 return (0);
4026 }
4027
4028 dmu_buf_impl_t *
dbuf_hold(dnode_t * dn,uint64_t blkid,const void * tag)4029 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
4030 {
4031 return (dbuf_hold_level(dn, 0, blkid, tag));
4032 }
4033
4034 dmu_buf_impl_t *
dbuf_hold_level(dnode_t * dn,int level,uint64_t blkid,const void * tag)4035 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
4036 {
4037 dmu_buf_impl_t *db;
4038 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
4039 return (err ? NULL : db);
4040 }
4041
4042 void
dbuf_create_bonus(dnode_t * dn)4043 dbuf_create_bonus(dnode_t *dn)
4044 {
4045 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
4046
4047 ASSERT0P(dn->dn_bonus);
4048 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
4049 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
4050 dn->dn_bonus->db_pending_evict = FALSE;
4051 }
4052
4053 int
dbuf_spill_set_blksz(dmu_buf_t * db_fake,uint64_t blksz,dmu_tx_t * tx)4054 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
4055 {
4056 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4057
4058 if (db->db_blkid != DMU_SPILL_BLKID)
4059 return (SET_ERROR(ENOTSUP));
4060 if (blksz == 0)
4061 blksz = SPA_MINBLOCKSIZE;
4062 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
4063 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
4064
4065 dbuf_new_size(db, blksz, tx);
4066
4067 return (0);
4068 }
4069
4070 void
dbuf_rm_spill(dnode_t * dn,dmu_tx_t * tx)4071 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
4072 {
4073 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
4074 }
4075
4076 #pragma weak dmu_buf_add_ref = dbuf_add_ref
4077 void
dbuf_add_ref(dmu_buf_impl_t * db,const void * tag)4078 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
4079 {
4080 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
4081 VERIFY3S(holds, >, 1);
4082 }
4083
4084 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
4085 boolean_t
dbuf_try_add_ref(dmu_buf_t * db_fake,objset_t * os,uint64_t obj,uint64_t blkid,const void * tag)4086 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
4087 const void *tag)
4088 {
4089 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4090 dmu_buf_impl_t *found_db;
4091 boolean_t result = B_FALSE;
4092
4093 if (blkid == DMU_BONUS_BLKID)
4094 found_db = dbuf_find_bonus(os, obj);
4095 else
4096 found_db = dbuf_find(os, obj, 0, blkid, NULL);
4097
4098 if (found_db != NULL) {
4099 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
4100 (void) zfs_refcount_add(&db->db_holds, tag);
4101 result = B_TRUE;
4102 }
4103 mutex_exit(&found_db->db_mtx);
4104 }
4105 return (result);
4106 }
4107
4108 /*
4109 * If you call dbuf_rele() you had better not be referencing the dnode handle
4110 * unless you have some other direct or indirect hold on the dnode. (An indirect
4111 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
4112 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
4113 * dnode's parent dbuf evicting its dnode handles.
4114 */
4115 void
dbuf_rele(dmu_buf_impl_t * db,const void * tag)4116 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
4117 {
4118 mutex_enter(&db->db_mtx);
4119 dbuf_rele_and_unlock(db, tag, B_FALSE);
4120 }
4121
4122 void
dmu_buf_rele(dmu_buf_t * db,const void * tag)4123 dmu_buf_rele(dmu_buf_t *db, const void *tag)
4124 {
4125 dbuf_rele((dmu_buf_impl_t *)db, tag);
4126 }
4127
4128 /*
4129 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
4130 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
4131 * argument should be set if we are already in the dbuf-evicting code
4132 * path, in which case we don't want to recursively evict. This allows us to
4133 * avoid deeply nested stacks that would have a call flow similar to this:
4134 *
4135 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
4136 * ^ |
4137 * | |
4138 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
4139 *
4140 */
4141 void
dbuf_rele_and_unlock(dmu_buf_impl_t * db,const void * tag,boolean_t evicting)4142 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
4143 {
4144 int64_t holds;
4145 uint64_t size;
4146
4147 ASSERT(MUTEX_HELD(&db->db_mtx));
4148 DBUF_VERIFY(db);
4149
4150 /*
4151 * Remove the reference to the dbuf before removing its hold on the
4152 * dnode so we can guarantee in dnode_move() that a referenced bonus
4153 * buffer has a corresponding dnode hold.
4154 */
4155 holds = zfs_refcount_remove(&db->db_holds, tag);
4156 ASSERT(holds >= 0);
4157
4158 /*
4159 * We can't freeze indirects if there is a possibility that they
4160 * may be modified in the current syncing context.
4161 */
4162 if (db->db_buf != NULL &&
4163 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
4164 arc_buf_freeze(db->db_buf);
4165 }
4166
4167 if (holds == db->db_dirtycnt &&
4168 db->db_level == 0 && db->db_user_immediate_evict)
4169 dbuf_evict_user(db);
4170
4171 if (holds == 0) {
4172 if (db->db_blkid == DMU_BONUS_BLKID) {
4173 dnode_t *dn;
4174 boolean_t evict_dbuf = db->db_pending_evict;
4175
4176 /*
4177 * If the dnode moves here, we cannot cross this
4178 * barrier until the move completes.
4179 */
4180 DB_DNODE_ENTER(db);
4181
4182 dn = DB_DNODE(db);
4183 atomic_dec_32(&dn->dn_dbufs_count);
4184
4185 /*
4186 * Decrementing the dbuf count means that the bonus
4187 * buffer's dnode hold is no longer discounted in
4188 * dnode_move(). The dnode cannot move until after
4189 * the dnode_rele() below.
4190 */
4191 DB_DNODE_EXIT(db);
4192
4193 /*
4194 * Do not reference db after its lock is dropped.
4195 * Another thread may evict it.
4196 */
4197 mutex_exit(&db->db_mtx);
4198
4199 if (evict_dbuf)
4200 dnode_evict_bonus(dn);
4201
4202 dnode_rele(dn, db);
4203 } else if (db->db_buf == NULL) {
4204 /*
4205 * This is a special case: we never associated this
4206 * dbuf with any data allocated from the ARC.
4207 */
4208 ASSERT(db->db_state == DB_UNCACHED ||
4209 db->db_state == DB_NOFILL);
4210 dbuf_destroy(db);
4211 } else if (arc_released(db->db_buf)) {
4212 /*
4213 * This dbuf has anonymous data associated with it.
4214 */
4215 dbuf_destroy(db);
4216 } else if (!db->db_partial_read && !DBUF_IS_CACHEABLE(db)) {
4217 /*
4218 * We don't expect more accesses to the dbuf, and it
4219 * is either not cacheable or was marked for eviction.
4220 */
4221 dbuf_destroy(db);
4222 } else if (!multilist_link_active(&db->db_cache_link)) {
4223 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4224
4225 dbuf_cached_state_t dcs =
4226 dbuf_include_in_metadata_cache(db) ?
4227 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
4228 db->db_caching_status = dcs;
4229
4230 multilist_insert(&dbuf_caches[dcs].cache, db);
4231 uint64_t db_size = db->db.db_size;
4232 uint64_t dbu_size = dmu_buf_user_size(&db->db);
4233 (void) zfs_refcount_add_many(
4234 &dbuf_caches[dcs].size, db_size, db);
4235 size = zfs_refcount_add_many(
4236 &dbuf_caches[dcs].size, dbu_size, db->db_user);
4237 uint8_t db_level = db->db_level;
4238 mutex_exit(&db->db_mtx);
4239
4240 if (dcs == DB_DBUF_METADATA_CACHE) {
4241 DBUF_STAT_BUMP(metadata_cache_count);
4242 DBUF_STAT_MAX(metadata_cache_size_bytes_max,
4243 size);
4244 } else {
4245 DBUF_STAT_BUMP(cache_count);
4246 DBUF_STAT_MAX(cache_size_bytes_max, size);
4247 DBUF_STAT_BUMP(cache_levels[db_level]);
4248 DBUF_STAT_INCR(cache_levels_bytes[db_level],
4249 db_size + dbu_size);
4250 }
4251
4252 if (dcs == DB_DBUF_CACHE && !evicting)
4253 dbuf_evict_notify(size);
4254 }
4255 } else {
4256 mutex_exit(&db->db_mtx);
4257 }
4258 }
4259
4260 #pragma weak dmu_buf_refcount = dbuf_refcount
4261 uint64_t
dbuf_refcount(dmu_buf_impl_t * db)4262 dbuf_refcount(dmu_buf_impl_t *db)
4263 {
4264 return (zfs_refcount_count(&db->db_holds));
4265 }
4266
4267 uint64_t
dmu_buf_user_refcount(dmu_buf_t * db_fake)4268 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4269 {
4270 uint64_t holds;
4271 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4272
4273 mutex_enter(&db->db_mtx);
4274 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4275 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4276 mutex_exit(&db->db_mtx);
4277
4278 return (holds);
4279 }
4280
4281 void *
dmu_buf_replace_user(dmu_buf_t * db_fake,dmu_buf_user_t * old_user,dmu_buf_user_t * new_user)4282 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4283 dmu_buf_user_t *new_user)
4284 {
4285 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4286
4287 mutex_enter(&db->db_mtx);
4288 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4289 if (db->db_user == old_user)
4290 db->db_user = new_user;
4291 else
4292 old_user = db->db_user;
4293 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4294 mutex_exit(&db->db_mtx);
4295
4296 return (old_user);
4297 }
4298
4299 void *
dmu_buf_set_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)4300 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4301 {
4302 return (dmu_buf_replace_user(db_fake, NULL, user));
4303 }
4304
4305 void *
dmu_buf_set_user_ie(dmu_buf_t * db_fake,dmu_buf_user_t * user)4306 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4307 {
4308 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4309
4310 db->db_user_immediate_evict = TRUE;
4311 return (dmu_buf_set_user(db_fake, user));
4312 }
4313
4314 void *
dmu_buf_remove_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)4315 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4316 {
4317 return (dmu_buf_replace_user(db_fake, user, NULL));
4318 }
4319
4320 void *
dmu_buf_get_user(dmu_buf_t * db_fake)4321 dmu_buf_get_user(dmu_buf_t *db_fake)
4322 {
4323 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4324
4325 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4326 return (db->db_user);
4327 }
4328
4329 uint64_t
dmu_buf_user_size(dmu_buf_t * db_fake)4330 dmu_buf_user_size(dmu_buf_t *db_fake)
4331 {
4332 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4333 if (db->db_user == NULL)
4334 return (0);
4335 return (atomic_load_64(&db->db_user->dbu_size));
4336 }
4337
4338 void
dmu_buf_add_user_size(dmu_buf_t * db_fake,uint64_t nadd)4339 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
4340 {
4341 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4342 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4343 ASSERT3P(db->db_user, !=, NULL);
4344 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
4345 atomic_add_64(&db->db_user->dbu_size, nadd);
4346 }
4347
4348 void
dmu_buf_sub_user_size(dmu_buf_t * db_fake,uint64_t nsub)4349 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
4350 {
4351 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4352 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4353 ASSERT3P(db->db_user, !=, NULL);
4354 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
4355 atomic_sub_64(&db->db_user->dbu_size, nsub);
4356 }
4357
4358 void
dmu_buf_user_evict_wait(void)4359 dmu_buf_user_evict_wait(void)
4360 {
4361 taskq_wait(dbu_evict_taskq);
4362 }
4363
4364 blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t * db)4365 dmu_buf_get_blkptr(dmu_buf_t *db)
4366 {
4367 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4368 return (dbi->db_blkptr);
4369 }
4370
4371 objset_t *
dmu_buf_get_objset(dmu_buf_t * db)4372 dmu_buf_get_objset(dmu_buf_t *db)
4373 {
4374 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4375 return (dbi->db_objset);
4376 }
4377
4378 static void
dbuf_check_blkptr(dnode_t * dn,dmu_buf_impl_t * db)4379 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4380 {
4381 /* ASSERT(dmu_tx_is_syncing(tx) */
4382 ASSERT(MUTEX_HELD(&db->db_mtx));
4383
4384 if (db->db_blkptr != NULL)
4385 return;
4386
4387 if (db->db_blkid == DMU_SPILL_BLKID) {
4388 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4389 BP_ZERO(db->db_blkptr);
4390 return;
4391 }
4392 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4393 /*
4394 * This buffer was allocated at a time when there was
4395 * no available blkptrs from the dnode, or it was
4396 * inappropriate to hook it in (i.e., nlevels mismatch).
4397 */
4398 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4399 ASSERT0P(db->db_parent);
4400 db->db_parent = dn->dn_dbuf;
4401 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4402 DBUF_VERIFY(db);
4403 } else {
4404 dmu_buf_impl_t *parent = db->db_parent;
4405 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4406
4407 ASSERT(dn->dn_phys->dn_nlevels > 1);
4408 if (parent == NULL) {
4409 mutex_exit(&db->db_mtx);
4410 rw_enter(&dn->dn_struct_rwlock, RW_READER);
4411 parent = dbuf_hold_level(dn, db->db_level + 1,
4412 db->db_blkid >> epbs, db);
4413 rw_exit(&dn->dn_struct_rwlock);
4414 mutex_enter(&db->db_mtx);
4415 db->db_parent = parent;
4416 }
4417 db->db_blkptr = (blkptr_t *)parent->db.db_data +
4418 (db->db_blkid & ((1ULL << epbs) - 1));
4419 DBUF_VERIFY(db);
4420 }
4421 }
4422
4423 static void
dbuf_sync_bonus(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4424 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4425 {
4426 dmu_buf_impl_t *db = dr->dr_dbuf;
4427 void *data = dr->dt.dl.dr_data;
4428
4429 ASSERT0(db->db_level);
4430 ASSERT(MUTEX_HELD(&db->db_mtx));
4431 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4432 ASSERT(data != NULL);
4433
4434 dnode_t *dn = dr->dr_dnode;
4435 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4436 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4437 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4438
4439 dbuf_sync_leaf_verify_bonus_dnode(dr);
4440
4441 dbuf_undirty_bonus(dr);
4442 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4443 }
4444
4445 /*
4446 * When syncing out a blocks of dnodes, adjust the block to deal with
4447 * encryption. Normally, we make sure the block is decrypted before writing
4448 * it. If we have crypt params, then we are writing a raw (encrypted) block,
4449 * from a raw receive. In this case, set the ARC buf's crypt params so
4450 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4451 */
4452 static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t * dr)4453 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4454 {
4455 int err;
4456 dmu_buf_impl_t *db = dr->dr_dbuf;
4457
4458 ASSERT(MUTEX_HELD(&db->db_mtx));
4459 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4460 ASSERT0(db->db_level);
4461
4462 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4463 zbookmark_phys_t zb;
4464
4465 /*
4466 * Unfortunately, there is currently no mechanism for
4467 * syncing context to handle decryption errors. An error
4468 * here is only possible if an attacker maliciously
4469 * changed a dnode block and updated the associated
4470 * checksums going up the block tree.
4471 */
4472 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4473 db->db.db_object, db->db_level, db->db_blkid);
4474 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4475 &zb, B_TRUE);
4476 if (err)
4477 panic("Invalid dnode block MAC");
4478 } else if (dr->dt.dl.dr_has_raw_params) {
4479 (void) arc_release(dr->dt.dl.dr_data, db);
4480 arc_convert_to_raw(dr->dt.dl.dr_data,
4481 dmu_objset_id(db->db_objset),
4482 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4483 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4484 }
4485 }
4486
4487 /*
4488 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4489 * is critical the we not allow the compiler to inline this function in to
4490 * dbuf_sync_list() thereby drastically bloating the stack usage.
4491 */
4492 noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4493 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4494 {
4495 dmu_buf_impl_t *db = dr->dr_dbuf;
4496 dnode_t *dn = dr->dr_dnode;
4497
4498 ASSERT(dmu_tx_is_syncing(tx));
4499
4500 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4501
4502 mutex_enter(&db->db_mtx);
4503
4504 ASSERT(db->db_level > 0);
4505 DBUF_VERIFY(db);
4506
4507 /* Read the block if it hasn't been read yet. */
4508 if (db->db_buf == NULL) {
4509 mutex_exit(&db->db_mtx);
4510 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4511 mutex_enter(&db->db_mtx);
4512 }
4513 ASSERT3U(db->db_state, ==, DB_CACHED);
4514 ASSERT(db->db_buf != NULL);
4515
4516 /* Indirect block size must match what the dnode thinks it is. */
4517 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4518 dbuf_check_blkptr(dn, db);
4519
4520 /* Provide the pending dirty record to child dbufs */
4521 db->db_data_pending = dr;
4522
4523 mutex_exit(&db->db_mtx);
4524
4525 dbuf_write(dr, db->db_buf, tx);
4526
4527 zio_t *zio = dr->dr_zio;
4528 mutex_enter(&dr->dt.di.dr_mtx);
4529 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4530 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4531 mutex_exit(&dr->dt.di.dr_mtx);
4532 zio_nowait(zio);
4533 }
4534
4535 /*
4536 * Verify that the size of the data in our bonus buffer does not exceed
4537 * its recorded size.
4538 *
4539 * The purpose of this verification is to catch any cases in development
4540 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4541 * due to incorrect feature management, older pools expect to read more
4542 * data even though they didn't actually write it to begin with.
4543 *
4544 * For a example, this would catch an error in the feature logic where we
4545 * open an older pool and we expect to write the space map histogram of
4546 * a space map with size SPACE_MAP_SIZE_V0.
4547 */
4548 static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t * dr)4549 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4550 {
4551 #ifdef ZFS_DEBUG
4552 dnode_t *dn = dr->dr_dnode;
4553
4554 /*
4555 * Encrypted bonus buffers can have data past their bonuslen.
4556 * Skip the verification of these blocks.
4557 */
4558 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4559 return;
4560
4561 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4562 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4563 ASSERT3U(bonuslen, <=, maxbonuslen);
4564
4565 arc_buf_t *datap = dr->dt.dl.dr_data;
4566 char *datap_end = ((char *)datap) + bonuslen;
4567 char *datap_max = ((char *)datap) + maxbonuslen;
4568
4569 /* ensure that everything is zero after our data */
4570 for (; datap_end < datap_max; datap_end++)
4571 ASSERT0(*datap_end);
4572 #endif
4573 }
4574
4575 static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t * dr)4576 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4577 {
4578 /* This must be a lightweight dirty record. */
4579 ASSERT0P(dr->dr_dbuf);
4580 dnode_t *dn = dr->dr_dnode;
4581
4582 if (dn->dn_phys->dn_nlevels == 1) {
4583 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4584 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4585 } else {
4586 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4587 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4588 VERIFY3U(parent_db->db_level, ==, 1);
4589 VERIFY3P(DB_DNODE(parent_db), ==, dn);
4590 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4591 blkptr_t *bp = parent_db->db.db_data;
4592 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4593 }
4594 }
4595
4596 static void
dbuf_lightweight_ready(zio_t * zio)4597 dbuf_lightweight_ready(zio_t *zio)
4598 {
4599 dbuf_dirty_record_t *dr = zio->io_private;
4600 blkptr_t *bp = zio->io_bp;
4601
4602 if (zio->io_error != 0)
4603 return;
4604
4605 dnode_t *dn = dr->dr_dnode;
4606
4607 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4608 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4609 int64_t delta = bp_get_dsize_sync(spa, bp) -
4610 bp_get_dsize_sync(spa, bp_orig);
4611 dnode_diduse_space(dn, delta);
4612
4613 uint64_t blkid = dr->dt.dll.dr_blkid;
4614 mutex_enter(&dn->dn_mtx);
4615 if (blkid > dn->dn_phys->dn_maxblkid) {
4616 ASSERT0(dn->dn_objset->os_raw_receive);
4617 dn->dn_phys->dn_maxblkid = blkid;
4618 }
4619 mutex_exit(&dn->dn_mtx);
4620
4621 if (!BP_IS_EMBEDDED(bp)) {
4622 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4623 BP_SET_FILL(bp, fill);
4624 }
4625
4626 dmu_buf_impl_t *parent_db;
4627 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4628 if (dr->dr_parent == NULL) {
4629 parent_db = dn->dn_dbuf;
4630 } else {
4631 parent_db = dr->dr_parent->dr_dbuf;
4632 }
4633 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4634 *bp_orig = *bp;
4635 rw_exit(&parent_db->db_rwlock);
4636 }
4637
4638 static void
dbuf_lightweight_done(zio_t * zio)4639 dbuf_lightweight_done(zio_t *zio)
4640 {
4641 dbuf_dirty_record_t *dr = zio->io_private;
4642
4643 VERIFY0(zio->io_error);
4644
4645 objset_t *os = dr->dr_dnode->dn_objset;
4646 dmu_tx_t *tx = os->os_synctx;
4647
4648 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4649 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4650 } else {
4651 dsl_dataset_t *ds = os->os_dsl_dataset;
4652 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4653 dsl_dataset_block_born(ds, zio->io_bp, tx);
4654 }
4655
4656 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4657 zio->io_txg);
4658
4659 abd_free(dr->dt.dll.dr_abd);
4660 kmem_free(dr, sizeof (*dr));
4661 }
4662
4663 noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4664 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4665 {
4666 dnode_t *dn = dr->dr_dnode;
4667 zio_t *pio;
4668 if (dn->dn_phys->dn_nlevels == 1) {
4669 pio = dn->dn_zio;
4670 } else {
4671 pio = dr->dr_parent->dr_zio;
4672 }
4673
4674 zbookmark_phys_t zb = {
4675 .zb_objset = dmu_objset_id(dn->dn_objset),
4676 .zb_object = dn->dn_object,
4677 .zb_level = 0,
4678 .zb_blkid = dr->dt.dll.dr_blkid,
4679 };
4680
4681 /*
4682 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4683 * will have the old BP in dbuf_lightweight_done().
4684 */
4685 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4686
4687 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4688 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4689 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4690 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4691 dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4692 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4693
4694 zio_nowait(dr->dr_zio);
4695 }
4696
4697 /*
4698 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4699 * critical the we not allow the compiler to inline this function in to
4700 * dbuf_sync_list() thereby drastically bloating the stack usage.
4701 */
4702 noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4703 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4704 {
4705 arc_buf_t **datap = &dr->dt.dl.dr_data;
4706 dmu_buf_impl_t *db = dr->dr_dbuf;
4707 dnode_t *dn = dr->dr_dnode;
4708 objset_t *os;
4709 uint64_t txg = tx->tx_txg;
4710
4711 ASSERT(dmu_tx_is_syncing(tx));
4712
4713 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4714
4715 mutex_enter(&db->db_mtx);
4716 /*
4717 * To be synced, we must be dirtied. But we might have been freed
4718 * after the dirty.
4719 */
4720 if (db->db_state == DB_UNCACHED) {
4721 /* This buffer has been freed since it was dirtied */
4722 ASSERT0P(db->db.db_data);
4723 } else if (db->db_state == DB_FILL) {
4724 /* This buffer was freed and is now being re-filled */
4725 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4726 } else if (db->db_state == DB_READ) {
4727 /*
4728 * This buffer was either cloned or had a Direct I/O write
4729 * occur and has an in-flgiht read on the BP. It is safe to
4730 * issue the write here, because the read has already been
4731 * issued and the contents won't change.
4732 *
4733 * We can verify the case of both the clone and Direct I/O
4734 * write by making sure the first dirty record for the dbuf
4735 * has no ARC buffer associated with it.
4736 */
4737 dbuf_dirty_record_t *dr_head =
4738 list_head(&db->db_dirty_records);
4739 ASSERT0P(db->db_buf);
4740 ASSERT0P(db->db.db_data);
4741 ASSERT0P(dr_head->dt.dl.dr_data);
4742 ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN);
4743 } else {
4744 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4745 }
4746 DBUF_VERIFY(db);
4747
4748 if (db->db_blkid == DMU_SPILL_BLKID) {
4749 mutex_enter(&dn->dn_mtx);
4750 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4751 /*
4752 * In the previous transaction group, the bonus buffer
4753 * was entirely used to store the attributes for the
4754 * dnode which overrode the dn_spill field. However,
4755 * when adding more attributes to the file a spill
4756 * block was required to hold the extra attributes.
4757 *
4758 * Make sure to clear the garbage left in the dn_spill
4759 * field from the previous attributes in the bonus
4760 * buffer. Otherwise, after writing out the spill
4761 * block to the new allocated dva, it will free
4762 * the old block pointed to by the invalid dn_spill.
4763 */
4764 db->db_blkptr = NULL;
4765 }
4766 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4767 mutex_exit(&dn->dn_mtx);
4768 }
4769
4770 /*
4771 * If this is a bonus buffer, simply copy the bonus data into the
4772 * dnode. It will be written out when the dnode is synced (and it
4773 * will be synced, since it must have been dirty for dbuf_sync to
4774 * be called).
4775 */
4776 if (db->db_blkid == DMU_BONUS_BLKID) {
4777 ASSERT(dr->dr_dbuf == db);
4778 dbuf_sync_bonus(dr, tx);
4779 return;
4780 }
4781
4782 os = dn->dn_objset;
4783
4784 /*
4785 * This function may have dropped the db_mtx lock allowing a dmu_sync
4786 * operation to sneak in. As a result, we need to ensure that we
4787 * don't check the dr_override_state until we have returned from
4788 * dbuf_check_blkptr.
4789 */
4790 dbuf_check_blkptr(dn, db);
4791
4792 /*
4793 * If this buffer is in the middle of an immediate write, wait for the
4794 * synchronous IO to complete.
4795 *
4796 * This is also valid even with Direct I/O writes setting a dirty
4797 * records override state into DR_IN_DMU_SYNC, because all
4798 * Direct I/O writes happen in open-context.
4799 */
4800 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4801 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4802 cv_wait(&db->db_changed, &db->db_mtx);
4803 }
4804
4805 /*
4806 * If this is a dnode block, ensure it is appropriately encrypted
4807 * or decrypted, depending on what we are writing to it this txg.
4808 */
4809 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4810 dbuf_prepare_encrypted_dnode_leaf(dr);
4811
4812 if (*datap != NULL && *datap == db->db_buf &&
4813 dn->dn_object != DMU_META_DNODE_OBJECT &&
4814 zfs_refcount_count(&db->db_holds) > 1) {
4815 /*
4816 * If this buffer is currently "in use" (i.e., there
4817 * are active holds and db_data still references it),
4818 * then make a copy before we start the write so that
4819 * any modifications from the open txg will not leak
4820 * into this write.
4821 *
4822 * NOTE: this copy does not need to be made for
4823 * objects only modified in the syncing context (e.g.
4824 * DNONE_DNODE blocks).
4825 */
4826 int psize = arc_buf_size(*datap);
4827 int lsize = arc_buf_lsize(*datap);
4828 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4829 enum zio_compress compress_type = arc_get_compression(*datap);
4830 uint8_t complevel = arc_get_complevel(*datap);
4831
4832 if (arc_is_encrypted(*datap)) {
4833 boolean_t byteorder;
4834 uint8_t salt[ZIO_DATA_SALT_LEN];
4835 uint8_t iv[ZIO_DATA_IV_LEN];
4836 uint8_t mac[ZIO_DATA_MAC_LEN];
4837
4838 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4839 *datap = arc_alloc_raw_buf(os->os_spa, db,
4840 dmu_objset_id(os), byteorder, salt, iv, mac,
4841 dn->dn_type, psize, lsize, compress_type,
4842 complevel);
4843 } else if (compress_type != ZIO_COMPRESS_OFF) {
4844 ASSERT3U(type, ==, ARC_BUFC_DATA);
4845 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4846 psize, lsize, compress_type, complevel);
4847 } else {
4848 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4849 }
4850 memcpy((*datap)->b_data, db->db.db_data, psize);
4851 }
4852 db->db_data_pending = dr;
4853
4854 mutex_exit(&db->db_mtx);
4855
4856 dbuf_write(dr, *datap, tx);
4857
4858 ASSERT(!list_link_active(&dr->dr_dirty_node));
4859 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4860 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4861 } else {
4862 zio_nowait(dr->dr_zio);
4863 }
4864 }
4865
4866 /*
4867 * Syncs out a range of dirty records for indirect or leaf dbufs. May be
4868 * called recursively from dbuf_sync_indirect().
4869 */
4870 void
dbuf_sync_list(list_t * list,int level,dmu_tx_t * tx)4871 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4872 {
4873 dbuf_dirty_record_t *dr;
4874
4875 while ((dr = list_head(list))) {
4876 if (dr->dr_zio != NULL) {
4877 /*
4878 * If we find an already initialized zio then we
4879 * are processing the meta-dnode, and we have finished.
4880 * The dbufs for all dnodes are put back on the list
4881 * during processing, so that we can zio_wait()
4882 * these IOs after initiating all child IOs.
4883 */
4884 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4885 DMU_META_DNODE_OBJECT);
4886 break;
4887 }
4888 list_remove(list, dr);
4889 if (dr->dr_dbuf == NULL) {
4890 dbuf_sync_lightweight(dr, tx);
4891 } else {
4892 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4893 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4894 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4895 }
4896 if (dr->dr_dbuf->db_level > 0)
4897 dbuf_sync_indirect(dr, tx);
4898 else
4899 dbuf_sync_leaf(dr, tx);
4900 }
4901 }
4902 }
4903
4904 static void
dbuf_write_ready(zio_t * zio,arc_buf_t * buf,void * vdb)4905 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4906 {
4907 (void) buf;
4908 dmu_buf_impl_t *db = vdb;
4909 dnode_t *dn;
4910 blkptr_t *bp = zio->io_bp;
4911 blkptr_t *bp_orig = &zio->io_bp_orig;
4912 spa_t *spa = zio->io_spa;
4913 int64_t delta;
4914 uint64_t fill = 0;
4915 int i;
4916
4917 ASSERT3P(db->db_blkptr, !=, NULL);
4918 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4919
4920 DB_DNODE_ENTER(db);
4921 dn = DB_DNODE(db);
4922 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4923 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4924 zio->io_prev_space_delta = delta;
4925
4926 if (BP_GET_BIRTH(bp) != 0) {
4927 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4928 BP_GET_TYPE(bp) == dn->dn_type) ||
4929 (db->db_blkid == DMU_SPILL_BLKID &&
4930 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4931 BP_IS_EMBEDDED(bp));
4932 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4933 }
4934
4935 mutex_enter(&db->db_mtx);
4936
4937 #ifdef ZFS_DEBUG
4938 if (db->db_blkid == DMU_SPILL_BLKID) {
4939 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4940 ASSERT(!(BP_IS_HOLE(bp)) &&
4941 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4942 }
4943 #endif
4944
4945 if (db->db_level == 0) {
4946 mutex_enter(&dn->dn_mtx);
4947 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4948 db->db_blkid != DMU_SPILL_BLKID) {
4949 ASSERT0(db->db_objset->os_raw_receive);
4950 dn->dn_phys->dn_maxblkid = db->db_blkid;
4951 }
4952 mutex_exit(&dn->dn_mtx);
4953
4954 if (dn->dn_type == DMU_OT_DNODE) {
4955 i = 0;
4956 while (i < db->db.db_size) {
4957 dnode_phys_t *dnp =
4958 (void *)(((char *)db->db.db_data) + i);
4959
4960 i += DNODE_MIN_SIZE;
4961 if (dnp->dn_type != DMU_OT_NONE) {
4962 fill++;
4963 for (int j = 0; j < dnp->dn_nblkptr;
4964 j++) {
4965 (void) zfs_blkptr_verify(spa,
4966 &dnp->dn_blkptr[j],
4967 BLK_CONFIG_SKIP,
4968 BLK_VERIFY_HALT);
4969 }
4970 if (dnp->dn_flags &
4971 DNODE_FLAG_SPILL_BLKPTR) {
4972 (void) zfs_blkptr_verify(spa,
4973 DN_SPILL_BLKPTR(dnp),
4974 BLK_CONFIG_SKIP,
4975 BLK_VERIFY_HALT);
4976 }
4977 i += dnp->dn_extra_slots *
4978 DNODE_MIN_SIZE;
4979 }
4980 }
4981 } else {
4982 if (BP_IS_HOLE(bp)) {
4983 fill = 0;
4984 } else {
4985 fill = 1;
4986 }
4987 }
4988 } else {
4989 blkptr_t *ibp = db->db.db_data;
4990 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4991 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4992 if (BP_IS_HOLE(ibp))
4993 continue;
4994 (void) zfs_blkptr_verify(spa, ibp,
4995 BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4996 fill += BP_GET_FILL(ibp);
4997 }
4998 }
4999 DB_DNODE_EXIT(db);
5000
5001 if (!BP_IS_EMBEDDED(bp))
5002 BP_SET_FILL(bp, fill);
5003
5004 mutex_exit(&db->db_mtx);
5005
5006 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
5007 *db->db_blkptr = *bp;
5008 dmu_buf_unlock_parent(db, dblt, FTAG);
5009 }
5010
5011 /*
5012 * This function gets called just prior to running through the compression
5013 * stage of the zio pipeline. If we're an indirect block comprised of only
5014 * holes, then we want this indirect to be compressed away to a hole. In
5015 * order to do that we must zero out any information about the holes that
5016 * this indirect points to prior to before we try to compress it.
5017 */
5018 static void
dbuf_write_children_ready(zio_t * zio,arc_buf_t * buf,void * vdb)5019 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
5020 {
5021 (void) zio, (void) buf;
5022 dmu_buf_impl_t *db = vdb;
5023 blkptr_t *bp;
5024 unsigned int epbs, i;
5025
5026 ASSERT3U(db->db_level, >, 0);
5027 DB_DNODE_ENTER(db);
5028 epbs = DB_DNODE(db)->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
5029 DB_DNODE_EXIT(db);
5030 ASSERT3U(epbs, <, 31);
5031
5032 /* Determine if all our children are holes */
5033 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
5034 if (!BP_IS_HOLE(bp))
5035 break;
5036 }
5037
5038 /*
5039 * If all the children are holes, then zero them all out so that
5040 * we may get compressed away.
5041 */
5042 if (i == 1ULL << epbs) {
5043 /*
5044 * We only found holes. Grab the rwlock to prevent
5045 * anybody from reading the blocks we're about to
5046 * zero out.
5047 */
5048 rw_enter(&db->db_rwlock, RW_WRITER);
5049 memset(db->db.db_data, 0, db->db.db_size);
5050 rw_exit(&db->db_rwlock);
5051 }
5052 }
5053
5054 static void
dbuf_write_done(zio_t * zio,arc_buf_t * buf,void * vdb)5055 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
5056 {
5057 (void) buf;
5058 dmu_buf_impl_t *db = vdb;
5059 blkptr_t *bp_orig = &zio->io_bp_orig;
5060 blkptr_t *bp = db->db_blkptr;
5061 objset_t *os = db->db_objset;
5062 dmu_tx_t *tx = os->os_synctx;
5063
5064 ASSERT0(zio->io_error);
5065 ASSERT(db->db_blkptr == bp);
5066
5067 /*
5068 * For nopwrites and rewrites we ensure that the bp matches our
5069 * original and bypass all the accounting.
5070 */
5071 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
5072 ASSERT(BP_EQUAL(bp, bp_orig));
5073 } else {
5074 dsl_dataset_t *ds = os->os_dsl_dataset;
5075 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
5076 dsl_dataset_block_born(ds, bp, tx);
5077 }
5078
5079 mutex_enter(&db->db_mtx);
5080
5081 DBUF_VERIFY(db);
5082
5083 dbuf_dirty_record_t *dr = db->db_data_pending;
5084 dnode_t *dn = dr->dr_dnode;
5085 ASSERT(!list_link_active(&dr->dr_dirty_node));
5086 ASSERT(dr->dr_dbuf == db);
5087 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
5088 list_remove(&db->db_dirty_records, dr);
5089
5090 #ifdef ZFS_DEBUG
5091 if (db->db_blkid == DMU_SPILL_BLKID) {
5092 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
5093 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
5094 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
5095 }
5096 #endif
5097
5098 if (db->db_level == 0) {
5099 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
5100 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
5101
5102 /* no dr_data if this is a NO_FILL or Direct I/O */
5103 if (dr->dt.dl.dr_data != NULL &&
5104 dr->dt.dl.dr_data != db->db_buf) {
5105 ASSERT3B(dr->dt.dl.dr_brtwrite, ==, B_FALSE);
5106 ASSERT3B(dr->dt.dl.dr_diowrite, ==, B_FALSE);
5107 arc_buf_destroy(dr->dt.dl.dr_data, db);
5108 }
5109 } else {
5110 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
5111 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
5112 if (!BP_IS_HOLE(db->db_blkptr)) {
5113 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
5114 SPA_BLKPTRSHIFT;
5115 ASSERT3U(db->db_blkid, <=,
5116 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
5117 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
5118 db->db.db_size);
5119 }
5120 mutex_destroy(&dr->dt.di.dr_mtx);
5121 list_destroy(&dr->dt.di.dr_children);
5122 }
5123
5124 cv_broadcast(&db->db_changed);
5125 ASSERT(db->db_dirtycnt > 0);
5126 db->db_dirtycnt -= 1;
5127 db->db_data_pending = NULL;
5128 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
5129
5130 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
5131 zio->io_txg);
5132
5133 kmem_cache_free(dbuf_dirty_kmem_cache, dr);
5134 }
5135
5136 static void
dbuf_write_nofill_ready(zio_t * zio)5137 dbuf_write_nofill_ready(zio_t *zio)
5138 {
5139 dbuf_write_ready(zio, NULL, zio->io_private);
5140 }
5141
5142 static void
dbuf_write_nofill_done(zio_t * zio)5143 dbuf_write_nofill_done(zio_t *zio)
5144 {
5145 dbuf_write_done(zio, NULL, zio->io_private);
5146 }
5147
5148 static void
dbuf_write_override_ready(zio_t * zio)5149 dbuf_write_override_ready(zio_t *zio)
5150 {
5151 dbuf_dirty_record_t *dr = zio->io_private;
5152 dmu_buf_impl_t *db = dr->dr_dbuf;
5153
5154 dbuf_write_ready(zio, NULL, db);
5155 }
5156
5157 static void
dbuf_write_override_done(zio_t * zio)5158 dbuf_write_override_done(zio_t *zio)
5159 {
5160 dbuf_dirty_record_t *dr = zio->io_private;
5161 dmu_buf_impl_t *db = dr->dr_dbuf;
5162 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
5163
5164 mutex_enter(&db->db_mtx);
5165 if (!BP_EQUAL(zio->io_bp, obp)) {
5166 if (!BP_IS_HOLE(obp))
5167 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
5168 arc_release(dr->dt.dl.dr_data, db);
5169 }
5170 mutex_exit(&db->db_mtx);
5171
5172 dbuf_write_done(zio, NULL, db);
5173
5174 if (zio->io_abd != NULL)
5175 abd_free(zio->io_abd);
5176 }
5177
5178 typedef struct dbuf_remap_impl_callback_arg {
5179 objset_t *drica_os;
5180 uint64_t drica_blk_birth;
5181 dmu_tx_t *drica_tx;
5182 } dbuf_remap_impl_callback_arg_t;
5183
5184 static void
dbuf_remap_impl_callback(uint64_t vdev,uint64_t offset,uint64_t size,void * arg)5185 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
5186 void *arg)
5187 {
5188 dbuf_remap_impl_callback_arg_t *drica = arg;
5189 objset_t *os = drica->drica_os;
5190 spa_t *spa = dmu_objset_spa(os);
5191 dmu_tx_t *tx = drica->drica_tx;
5192
5193 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5194
5195 if (os == spa_meta_objset(spa)) {
5196 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
5197 } else {
5198 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
5199 size, drica->drica_blk_birth, tx);
5200 }
5201 }
5202
5203 static void
dbuf_remap_impl(dnode_t * dn,blkptr_t * bp,krwlock_t * rw,dmu_tx_t * tx)5204 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
5205 {
5206 blkptr_t bp_copy = *bp;
5207 spa_t *spa = dmu_objset_spa(dn->dn_objset);
5208 dbuf_remap_impl_callback_arg_t drica;
5209
5210 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5211
5212 drica.drica_os = dn->dn_objset;
5213 drica.drica_blk_birth = BP_GET_BIRTH(bp);
5214 drica.drica_tx = tx;
5215 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5216 &drica)) {
5217 /*
5218 * If the blkptr being remapped is tracked by a livelist,
5219 * then we need to make sure the livelist reflects the update.
5220 * First, cancel out the old blkptr by appending a 'FREE'
5221 * entry. Next, add an 'ALLOC' to track the new version. This
5222 * way we avoid trying to free an inaccurate blkptr at delete.
5223 * Note that embedded blkptrs are not tracked in livelists.
5224 */
5225 if (dn->dn_objset != spa_meta_objset(spa)) {
5226 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5227 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5228 BP_GET_BIRTH(bp) > ds->ds_dir->dd_origin_txg) {
5229 ASSERT(!BP_IS_EMBEDDED(bp));
5230 ASSERT(dsl_dir_is_clone(ds->ds_dir));
5231 ASSERT(spa_feature_is_enabled(spa,
5232 SPA_FEATURE_LIVELIST));
5233 bplist_append(&ds->ds_dir->dd_pending_frees,
5234 bp);
5235 bplist_append(&ds->ds_dir->dd_pending_allocs,
5236 &bp_copy);
5237 }
5238 }
5239
5240 /*
5241 * The db_rwlock prevents dbuf_read_impl() from
5242 * dereferencing the BP while we are changing it. To
5243 * avoid lock contention, only grab it when we are actually
5244 * changing the BP.
5245 */
5246 if (rw != NULL)
5247 rw_enter(rw, RW_WRITER);
5248 *bp = bp_copy;
5249 if (rw != NULL)
5250 rw_exit(rw);
5251 }
5252 }
5253
5254 /*
5255 * Remap any existing BP's to concrete vdevs, if possible.
5256 */
5257 static void
dbuf_remap(dnode_t * dn,dmu_buf_impl_t * db,dmu_tx_t * tx)5258 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5259 {
5260 spa_t *spa = dmu_objset_spa(db->db_objset);
5261 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5262
5263 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5264 return;
5265
5266 if (db->db_level > 0) {
5267 blkptr_t *bp = db->db.db_data;
5268 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5269 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5270 }
5271 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5272 dnode_phys_t *dnp = db->db.db_data;
5273 ASSERT3U(dn->dn_type, ==, DMU_OT_DNODE);
5274 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5275 i += dnp[i].dn_extra_slots + 1) {
5276 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5277 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5278 &dn->dn_dbuf->db_rwlock);
5279 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5280 tx);
5281 }
5282 }
5283 }
5284 }
5285
5286
5287 /*
5288 * Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
5289 * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
5290 */
5291 static void
dbuf_write(dbuf_dirty_record_t * dr,arc_buf_t * data,dmu_tx_t * tx)5292 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5293 {
5294 dmu_buf_impl_t *db = dr->dr_dbuf;
5295 dnode_t *dn = dr->dr_dnode;
5296 objset_t *os;
5297 dmu_buf_impl_t *parent = db->db_parent;
5298 uint64_t txg = tx->tx_txg;
5299 zbookmark_phys_t zb;
5300 zio_prop_t zp;
5301 zio_t *pio; /* parent I/O */
5302 int wp_flag = 0;
5303
5304 ASSERT(dmu_tx_is_syncing(tx));
5305
5306 os = dn->dn_objset;
5307
5308 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5309 /*
5310 * Private object buffers are released here rather than in
5311 * dbuf_dirty() since they are only modified in the syncing
5312 * context and we don't want the overhead of making multiple
5313 * copies of the data.
5314 */
5315 if (BP_IS_HOLE(db->db_blkptr))
5316 arc_buf_thaw(data);
5317 else
5318 dbuf_release_bp(db);
5319 dbuf_remap(dn, db, tx);
5320 }
5321
5322 if (parent != dn->dn_dbuf) {
5323 /* Our parent is an indirect block. */
5324 /* We have a dirty parent that has been scheduled for write. */
5325 ASSERT(parent && parent->db_data_pending);
5326 /* Our parent's buffer is one level closer to the dnode. */
5327 ASSERT(db->db_level == parent->db_level-1);
5328 /*
5329 * We're about to modify our parent's db_data by modifying
5330 * our block pointer, so the parent must be released.
5331 */
5332 ASSERT(arc_released(parent->db_buf));
5333 pio = parent->db_data_pending->dr_zio;
5334 } else {
5335 /* Our parent is the dnode itself. */
5336 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5337 db->db_blkid != DMU_SPILL_BLKID) ||
5338 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5339 if (db->db_blkid != DMU_SPILL_BLKID)
5340 ASSERT3P(db->db_blkptr, ==,
5341 &dn->dn_phys->dn_blkptr[db->db_blkid]);
5342 pio = dn->dn_zio;
5343 }
5344
5345 ASSERT(db->db_level == 0 || data == db->db_buf);
5346 ASSERT3U(BP_GET_BIRTH(db->db_blkptr), <=, txg);
5347 ASSERT(pio);
5348
5349 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5350 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5351 db->db.db_object, db->db_level, db->db_blkid);
5352
5353 if (db->db_blkid == DMU_SPILL_BLKID)
5354 wp_flag = WP_SPILL;
5355 wp_flag |= (data == NULL) ? WP_NOFILL : 0;
5356
5357 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5358
5359 /*
5360 * Set rewrite properties for zfs_rewrite() operations.
5361 */
5362 if (db->db_level == 0 && dr->dt.dl.dr_rewrite) {
5363 zp.zp_rewrite = B_TRUE;
5364
5365 /*
5366 * Mark physical rewrite feature for activation.
5367 * This will be activated automatically during dataset sync.
5368 */
5369 dsl_dataset_t *ds = os->os_dsl_dataset;
5370 if (!dsl_dataset_feature_is_active(ds,
5371 SPA_FEATURE_PHYSICAL_REWRITE)) {
5372 ds->ds_feature_activation[
5373 SPA_FEATURE_PHYSICAL_REWRITE] = (void *)B_TRUE;
5374 }
5375 }
5376
5377 /*
5378 * We copy the blkptr now (rather than when we instantiate the dirty
5379 * record), because its value can change between open context and
5380 * syncing context. We do not need to hold dn_struct_rwlock to read
5381 * db_blkptr because we are in syncing context.
5382 */
5383 dr->dr_bp_copy = *db->db_blkptr;
5384
5385 if (db->db_level == 0 &&
5386 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5387 /*
5388 * The BP for this block has been provided by open context
5389 * (by dmu_sync(), dmu_write_direct(),
5390 * or dmu_buf_write_embedded()).
5391 */
5392 abd_t *contents = (data != NULL) ?
5393 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5394
5395 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5396 contents, db->db.db_size, db->db.db_size, &zp,
5397 dbuf_write_override_ready, NULL,
5398 dbuf_write_override_done,
5399 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5400 mutex_enter(&db->db_mtx);
5401 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5402 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5403 dr->dt.dl.dr_copies, dr->dt.dl.dr_gang_copies,
5404 dr->dt.dl.dr_nopwrite, dr->dt.dl.dr_brtwrite);
5405 mutex_exit(&db->db_mtx);
5406 } else if (data == NULL) {
5407 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5408 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5409 dr->dr_zio = zio_write(pio, os->os_spa, txg,
5410 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5411 dbuf_write_nofill_ready, NULL,
5412 dbuf_write_nofill_done, db,
5413 ZIO_PRIORITY_ASYNC_WRITE,
5414 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5415 } else {
5416 ASSERT(arc_released(data));
5417
5418 /*
5419 * For indirect blocks, we want to setup the children
5420 * ready callback so that we can properly handle an indirect
5421 * block that only contains holes.
5422 */
5423 arc_write_done_func_t *children_ready_cb = NULL;
5424 if (db->db_level != 0)
5425 children_ready_cb = dbuf_write_children_ready;
5426
5427 dr->dr_zio = arc_write(pio, os->os_spa, txg,
5428 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5429 dbuf_is_l2cacheable(db, NULL), &zp, dbuf_write_ready,
5430 children_ready_cb, dbuf_write_done, db,
5431 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5432 }
5433 }
5434
5435 EXPORT_SYMBOL(dbuf_find);
5436 EXPORT_SYMBOL(dbuf_is_metadata);
5437 EXPORT_SYMBOL(dbuf_destroy);
5438 EXPORT_SYMBOL(dbuf_whichblock);
5439 EXPORT_SYMBOL(dbuf_read);
5440 EXPORT_SYMBOL(dbuf_unoverride);
5441 EXPORT_SYMBOL(dbuf_free_range);
5442 EXPORT_SYMBOL(dbuf_new_size);
5443 EXPORT_SYMBOL(dbuf_release_bp);
5444 EXPORT_SYMBOL(dbuf_dirty);
5445 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5446 EXPORT_SYMBOL(dmu_buf_will_dirty);
5447 EXPORT_SYMBOL(dmu_buf_will_rewrite);
5448 EXPORT_SYMBOL(dmu_buf_is_dirty);
5449 EXPORT_SYMBOL(dmu_buf_will_clone_or_dio);
5450 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5451 EXPORT_SYMBOL(dmu_buf_will_fill);
5452 EXPORT_SYMBOL(dmu_buf_fill_done);
5453 EXPORT_SYMBOL(dmu_buf_rele);
5454 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5455 EXPORT_SYMBOL(dbuf_prefetch);
5456 EXPORT_SYMBOL(dbuf_hold_impl);
5457 EXPORT_SYMBOL(dbuf_hold);
5458 EXPORT_SYMBOL(dbuf_hold_level);
5459 EXPORT_SYMBOL(dbuf_create_bonus);
5460 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5461 EXPORT_SYMBOL(dbuf_rm_spill);
5462 EXPORT_SYMBOL(dbuf_add_ref);
5463 EXPORT_SYMBOL(dbuf_rele);
5464 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5465 EXPORT_SYMBOL(dbuf_refcount);
5466 EXPORT_SYMBOL(dbuf_sync_list);
5467 EXPORT_SYMBOL(dmu_buf_set_user);
5468 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5469 EXPORT_SYMBOL(dmu_buf_get_user);
5470 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5471
5472 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5473 "Maximum size in bytes of the dbuf cache.");
5474
5475 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5476 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5477
5478 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5479 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5480
5481 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5482 "Maximum size in bytes of dbuf metadata cache.");
5483
5484 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5485 "Set size of dbuf cache to log2 fraction of arc size.");
5486
5487 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5488 "Set size of dbuf metadata cache to log2 fraction of arc size.");
5489
5490 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5491 "Set size of dbuf cache mutex array as log2 shift.");
5492