1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
29 */
30
31 #include <sys/zfs_context.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_send.h>
34 #include <sys/dmu_impl.h>
35 #include <sys/dbuf.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/spa.h>
41 #include <sys/zio.h>
42 #include <sys/dmu_zfetch.h>
43 #include <sys/sa.h>
44 #include <sys/sa_impl.h>
45 #include <sys/zfeature.h>
46 #include <sys/blkptr.h>
47 #include <sys/range_tree.h>
48 #include <sys/callb.h>
49 #include <sys/abd.h>
50 #include <sys/vdev.h>
51 #include <sys/cityhash.h>
52 #include <sys/spa_impl.h>
53
54 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
55 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
56
57 #ifndef __lint
58 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
59 dmu_buf_evict_func_t *evict_func_sync,
60 dmu_buf_evict_func_t *evict_func_async,
61 dmu_buf_t **clear_on_evict_dbufp);
62 #endif /* ! __lint */
63
64 /*
65 * Global data structures and functions for the dbuf cache.
66 */
67 static kmem_cache_t *dbuf_kmem_cache;
68 static taskq_t *dbu_evict_taskq;
69
70 static kthread_t *dbuf_cache_evict_thread;
71 static kmutex_t dbuf_evict_lock;
72 static kcondvar_t dbuf_evict_cv;
73 static boolean_t dbuf_evict_thread_exit;
74
75 /*
76 * There are two dbuf caches; each dbuf can only be in one of them at a time.
77 *
78 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
79 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
80 * that represent the metadata that describes filesystems/snapshots/
81 * bookmarks/properties/etc. We only evict from this cache when we export a
82 * pool, to short-circuit as much I/O as possible for all administrative
83 * commands that need the metadata. There is no eviction policy for this
84 * cache, because we try to only include types in it which would occupy a
85 * very small amount of space per object but create a large impact on the
86 * performance of these commands. Instead, after it reaches a maximum size
87 * (which should only happen on very small memory systems with a very large
88 * number of filesystem objects), we stop taking new dbufs into the
89 * metadata cache, instead putting them in the normal dbuf cache.
90 *
91 * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that
92 * are not currently held but have been recently released. These dbufs
93 * are not eligible for arc eviction until they are aged out of the cache.
94 * Dbufs that are aged out of the cache will be immediately destroyed and
95 * become eligible for arc eviction.
96 *
97 * Dbufs are added to these caches once the last hold is released. If a dbuf is
98 * later accessed and still exists in the dbuf cache, then it will be removed
99 * from the cache and later re-added to the head of the cache.
100 *
101 * If a given dbuf meets the requirements for the metadata cache, it will go
102 * there, otherwise it will be considered for the generic LRU dbuf cache. The
103 * caches and the refcounts tracking their sizes are stored in an array indexed
104 * by those caches' matching enum values (from dbuf_cached_state_t).
105 */
106 typedef struct dbuf_cache {
107 multilist_t *cache;
108 zfs_refcount_t size;
109 } dbuf_cache_t;
110 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
111
112 /* Size limits for the caches */
113 uint64_t dbuf_cache_max_bytes = 0;
114 uint64_t dbuf_metadata_cache_max_bytes = 0;
115 /* Set the default sizes of the caches to log2 fraction of arc size */
116 int dbuf_cache_shift = 5;
117 int dbuf_metadata_cache_shift = 6;
118
119 /*
120 * For diagnostic purposes, this is incremented whenever we can't add
121 * something to the metadata cache because it's full, and instead put
122 * the data in the regular dbuf cache.
123 */
124 uint64_t dbuf_metadata_cache_overflow;
125
126 /*
127 * The LRU dbuf cache uses a three-stage eviction policy:
128 * - A low water marker designates when the dbuf eviction thread
129 * should stop evicting from the dbuf cache.
130 * - When we reach the maximum size (aka mid water mark), we
131 * signal the eviction thread to run.
132 * - The high water mark indicates when the eviction thread
133 * is unable to keep up with the incoming load and eviction must
134 * happen in the context of the calling thread.
135 *
136 * The dbuf cache:
137 * (max size)
138 * low water mid water hi water
139 * +----------------------------------------+----------+----------+
140 * | | | |
141 * | | | |
142 * | | | |
143 * | | | |
144 * +----------------------------------------+----------+----------+
145 * stop signal evict
146 * evicting eviction directly
147 * thread
148 *
149 * The high and low water marks indicate the operating range for the eviction
150 * thread. The low water mark is, by default, 90% of the total size of the
151 * cache and the high water mark is at 110% (both of these percentages can be
152 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
153 * respectively). The eviction thread will try to ensure that the cache remains
154 * within this range by waking up every second and checking if the cache is
155 * above the low water mark. The thread can also be woken up by callers adding
156 * elements into the cache if the cache is larger than the mid water (i.e max
157 * cache size). Once the eviction thread is woken up and eviction is required,
158 * it will continue evicting buffers until it's able to reduce the cache size
159 * to the low water mark. If the cache size continues to grow and hits the high
160 * water mark, then callers adding elements to the cache will begin to evict
161 * directly from the cache until the cache is no longer above the high water
162 * mark.
163 */
164
165 /*
166 * The percentage above and below the maximum cache size.
167 */
168 uint_t dbuf_cache_hiwater_pct = 10;
169 uint_t dbuf_cache_lowater_pct = 10;
170
171 /* ARGSUSED */
172 static int
dbuf_cons(void * vdb,void * unused,int kmflag)173 dbuf_cons(void *vdb, void *unused, int kmflag)
174 {
175 dmu_buf_impl_t *db = vdb;
176 bzero(db, sizeof (dmu_buf_impl_t));
177
178 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
179 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
180 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
181 multilist_link_init(&db->db_cache_link);
182 zfs_refcount_create(&db->db_holds);
183
184 return (0);
185 }
186
187 /* ARGSUSED */
188 static void
dbuf_dest(void * vdb,void * unused)189 dbuf_dest(void *vdb, void *unused)
190 {
191 dmu_buf_impl_t *db = vdb;
192 mutex_destroy(&db->db_mtx);
193 rw_destroy(&db->db_rwlock);
194 cv_destroy(&db->db_changed);
195 ASSERT(!multilist_link_active(&db->db_cache_link));
196 zfs_refcount_destroy(&db->db_holds);
197 }
198
199 /*
200 * dbuf hash table routines
201 */
202 static dbuf_hash_table_t dbuf_hash_table;
203
204 static uint64_t dbuf_hash_count;
205
206 /*
207 * We use Cityhash for this. It's fast, and has good hash properties without
208 * requiring any large static buffers.
209 */
210 static uint64_t
dbuf_hash(void * os,uint64_t obj,uint8_t lvl,uint64_t blkid)211 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
212 {
213 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
214 }
215
216 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
217 ((dbuf)->db.db_object == (obj) && \
218 (dbuf)->db_objset == (os) && \
219 (dbuf)->db_level == (level) && \
220 (dbuf)->db_blkid == (blkid))
221
222 dmu_buf_impl_t *
dbuf_find(objset_t * os,uint64_t obj,uint8_t level,uint64_t blkid)223 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
224 {
225 dbuf_hash_table_t *h = &dbuf_hash_table;
226 uint64_t hv = dbuf_hash(os, obj, level, blkid);
227 uint64_t idx = hv & h->hash_table_mask;
228 dmu_buf_impl_t *db;
229
230 mutex_enter(DBUF_HASH_MUTEX(h, idx));
231 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
232 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
233 mutex_enter(&db->db_mtx);
234 if (db->db_state != DB_EVICTING) {
235 mutex_exit(DBUF_HASH_MUTEX(h, idx));
236 return (db);
237 }
238 mutex_exit(&db->db_mtx);
239 }
240 }
241 mutex_exit(DBUF_HASH_MUTEX(h, idx));
242 return (NULL);
243 }
244
245 static dmu_buf_impl_t *
dbuf_find_bonus(objset_t * os,uint64_t object)246 dbuf_find_bonus(objset_t *os, uint64_t object)
247 {
248 dnode_t *dn;
249 dmu_buf_impl_t *db = NULL;
250
251 if (dnode_hold(os, object, FTAG, &dn) == 0) {
252 rw_enter(&dn->dn_struct_rwlock, RW_READER);
253 if (dn->dn_bonus != NULL) {
254 db = dn->dn_bonus;
255 mutex_enter(&db->db_mtx);
256 }
257 rw_exit(&dn->dn_struct_rwlock);
258 dnode_rele(dn, FTAG);
259 }
260 return (db);
261 }
262
263 /*
264 * Insert an entry into the hash table. If there is already an element
265 * equal to elem in the hash table, then the already existing element
266 * will be returned and the new element will not be inserted.
267 * Otherwise returns NULL.
268 */
269 static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t * db)270 dbuf_hash_insert(dmu_buf_impl_t *db)
271 {
272 dbuf_hash_table_t *h = &dbuf_hash_table;
273 objset_t *os = db->db_objset;
274 uint64_t obj = db->db.db_object;
275 int level = db->db_level;
276 uint64_t blkid = db->db_blkid;
277 uint64_t hv = dbuf_hash(os, obj, level, blkid);
278 uint64_t idx = hv & h->hash_table_mask;
279 dmu_buf_impl_t *dbf;
280
281 mutex_enter(DBUF_HASH_MUTEX(h, idx));
282 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
283 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
284 mutex_enter(&dbf->db_mtx);
285 if (dbf->db_state != DB_EVICTING) {
286 mutex_exit(DBUF_HASH_MUTEX(h, idx));
287 return (dbf);
288 }
289 mutex_exit(&dbf->db_mtx);
290 }
291 }
292
293 mutex_enter(&db->db_mtx);
294 db->db_hash_next = h->hash_table[idx];
295 h->hash_table[idx] = db;
296 mutex_exit(DBUF_HASH_MUTEX(h, idx));
297 atomic_inc_64(&dbuf_hash_count);
298
299 return (NULL);
300 }
301
302 /*
303 * Remove an entry from the hash table. It must be in the EVICTING state.
304 */
305 static void
dbuf_hash_remove(dmu_buf_impl_t * db)306 dbuf_hash_remove(dmu_buf_impl_t *db)
307 {
308 dbuf_hash_table_t *h = &dbuf_hash_table;
309 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object,
310 db->db_level, db->db_blkid);
311 uint64_t idx = hv & h->hash_table_mask;
312 dmu_buf_impl_t *dbf, **dbp;
313
314 /*
315 * We mustn't hold db_mtx to maintain lock ordering:
316 * DBUF_HASH_MUTEX > db_mtx.
317 */
318 ASSERT(zfs_refcount_is_zero(&db->db_holds));
319 ASSERT(db->db_state == DB_EVICTING);
320 ASSERT(!MUTEX_HELD(&db->db_mtx));
321
322 mutex_enter(DBUF_HASH_MUTEX(h, idx));
323 dbp = &h->hash_table[idx];
324 while ((dbf = *dbp) != db) {
325 dbp = &dbf->db_hash_next;
326 ASSERT(dbf != NULL);
327 }
328 *dbp = db->db_hash_next;
329 db->db_hash_next = NULL;
330 mutex_exit(DBUF_HASH_MUTEX(h, idx));
331 atomic_dec_64(&dbuf_hash_count);
332 }
333
334 typedef enum {
335 DBVU_EVICTING,
336 DBVU_NOT_EVICTING
337 } dbvu_verify_type_t;
338
339 static void
dbuf_verify_user(dmu_buf_impl_t * db,dbvu_verify_type_t verify_type)340 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
341 {
342 #ifdef ZFS_DEBUG
343 int64_t holds;
344
345 if (db->db_user == NULL)
346 return;
347
348 /* Only data blocks support the attachment of user data. */
349 ASSERT(db->db_level == 0);
350
351 /* Clients must resolve a dbuf before attaching user data. */
352 ASSERT(db->db.db_data != NULL);
353 ASSERT3U(db->db_state, ==, DB_CACHED);
354
355 holds = zfs_refcount_count(&db->db_holds);
356 if (verify_type == DBVU_EVICTING) {
357 /*
358 * Immediate eviction occurs when holds == dirtycnt.
359 * For normal eviction buffers, holds is zero on
360 * eviction, except when dbuf_fix_old_data() calls
361 * dbuf_clear_data(). However, the hold count can grow
362 * during eviction even though db_mtx is held (see
363 * dmu_bonus_hold() for an example), so we can only
364 * test the generic invariant that holds >= dirtycnt.
365 */
366 ASSERT3U(holds, >=, db->db_dirtycnt);
367 } else {
368 if (db->db_user_immediate_evict == TRUE)
369 ASSERT3U(holds, >=, db->db_dirtycnt);
370 else
371 ASSERT3U(holds, >, 0);
372 }
373 #endif
374 }
375
376 static void
dbuf_evict_user(dmu_buf_impl_t * db)377 dbuf_evict_user(dmu_buf_impl_t *db)
378 {
379 dmu_buf_user_t *dbu = db->db_user;
380
381 ASSERT(MUTEX_HELD(&db->db_mtx));
382
383 if (dbu == NULL)
384 return;
385
386 dbuf_verify_user(db, DBVU_EVICTING);
387 db->db_user = NULL;
388
389 #ifdef ZFS_DEBUG
390 if (dbu->dbu_clear_on_evict_dbufp != NULL)
391 *dbu->dbu_clear_on_evict_dbufp = NULL;
392 #endif
393
394 /*
395 * There are two eviction callbacks - one that we call synchronously
396 * and one that we invoke via a taskq. The async one is useful for
397 * avoiding lock order reversals and limiting stack depth.
398 *
399 * Note that if we have a sync callback but no async callback,
400 * it's likely that the sync callback will free the structure
401 * containing the dbu. In that case we need to take care to not
402 * dereference dbu after calling the sync evict func.
403 */
404 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
405
406 if (dbu->dbu_evict_func_sync != NULL)
407 dbu->dbu_evict_func_sync(dbu);
408
409 if (has_async) {
410 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
411 dbu, 0, &dbu->dbu_tqent);
412 }
413 }
414
415 boolean_t
dbuf_is_metadata(dmu_buf_impl_t * db)416 dbuf_is_metadata(dmu_buf_impl_t *db)
417 {
418 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
419 return (B_TRUE);
420 } else {
421 boolean_t is_metadata;
422
423 DB_DNODE_ENTER(db);
424 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
425 DB_DNODE_EXIT(db);
426
427 return (is_metadata);
428 }
429 }
430
431 /*
432 * This returns whether this dbuf should be stored in the metadata cache, which
433 * is based on whether it's from one of the dnode types that store data related
434 * to traversing dataset hierarchies.
435 */
436 static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t * db)437 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
438 {
439 DB_DNODE_ENTER(db);
440 dmu_object_type_t type = DB_DNODE(db)->dn_type;
441 DB_DNODE_EXIT(db);
442
443 /* Check if this dbuf is one of the types we care about */
444 if (DMU_OT_IS_METADATA_CACHED(type)) {
445 /* If we hit this, then we set something up wrong in dmu_ot */
446 ASSERT(DMU_OT_IS_METADATA(type));
447
448 /*
449 * Sanity check for small-memory systems: don't allocate too
450 * much memory for this purpose.
451 */
452 if (zfs_refcount_count(
453 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
454 dbuf_metadata_cache_max_bytes) {
455 dbuf_metadata_cache_overflow++;
456 DTRACE_PROBE1(dbuf__metadata__cache__overflow,
457 dmu_buf_impl_t *, db);
458 return (B_FALSE);
459 }
460
461 return (B_TRUE);
462 }
463
464 return (B_FALSE);
465 }
466
467 /*
468 * This function *must* return indices evenly distributed between all
469 * sublists of the multilist. This is needed due to how the dbuf eviction
470 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
471 * distributed between all sublists and uses this assumption when
472 * deciding which sublist to evict from and how much to evict from it.
473 */
474 unsigned int
dbuf_cache_multilist_index_func(multilist_t * ml,void * obj)475 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
476 {
477 dmu_buf_impl_t *db = obj;
478
479 /*
480 * The assumption here, is the hash value for a given
481 * dmu_buf_impl_t will remain constant throughout it's lifetime
482 * (i.e. it's objset, object, level and blkid fields don't change).
483 * Thus, we don't need to store the dbuf's sublist index
484 * on insertion, as this index can be recalculated on removal.
485 *
486 * Also, the low order bits of the hash value are thought to be
487 * distributed evenly. Otherwise, in the case that the multilist
488 * has a power of two number of sublists, each sublists' usage
489 * would not be evenly distributed.
490 */
491 return (dbuf_hash(db->db_objset, db->db.db_object,
492 db->db_level, db->db_blkid) %
493 multilist_get_num_sublists(ml));
494 }
495
496 static inline boolean_t
dbuf_cache_above_hiwater(void)497 dbuf_cache_above_hiwater(void)
498 {
499 uint64_t dbuf_cache_hiwater_bytes =
500 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100;
501
502 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
503 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes);
504 }
505
506 static inline boolean_t
dbuf_cache_above_lowater(void)507 dbuf_cache_above_lowater(void)
508 {
509 uint64_t dbuf_cache_lowater_bytes =
510 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100;
511
512 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
513 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes);
514 }
515
516 /*
517 * Evict the oldest eligible dbuf from the dbuf cache.
518 */
519 static void
dbuf_evict_one(void)520 dbuf_evict_one(void)
521 {
522 int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache);
523 multilist_sublist_t *mls = multilist_sublist_lock(
524 dbuf_caches[DB_DBUF_CACHE].cache, idx);
525
526 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
527
528 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
529 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
530 db = multilist_sublist_prev(mls, db);
531 }
532
533 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
534 multilist_sublist_t *, mls);
535
536 if (db != NULL) {
537 multilist_sublist_remove(mls, db);
538 multilist_sublist_unlock(mls);
539 (void) zfs_refcount_remove_many(
540 &dbuf_caches[DB_DBUF_CACHE].size,
541 db->db.db_size, db);
542 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
543 db->db_caching_status = DB_NO_CACHE;
544 dbuf_destroy(db);
545 } else {
546 multilist_sublist_unlock(mls);
547 }
548 }
549
550 /*
551 * The dbuf evict thread is responsible for aging out dbufs from the
552 * cache. Once the cache has reached it's maximum size, dbufs are removed
553 * and destroyed. The eviction thread will continue running until the size
554 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
555 * out of the cache it is destroyed and becomes eligible for arc eviction.
556 */
557 /* ARGSUSED */
558 static void
dbuf_evict_thread(void * unused)559 dbuf_evict_thread(void *unused)
560 {
561 callb_cpr_t cpr;
562
563 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
564
565 mutex_enter(&dbuf_evict_lock);
566 while (!dbuf_evict_thread_exit) {
567 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
568 CALLB_CPR_SAFE_BEGIN(&cpr);
569 (void) cv_timedwait_hires(&dbuf_evict_cv,
570 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
571 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
572 }
573 mutex_exit(&dbuf_evict_lock);
574
575 /*
576 * Keep evicting as long as we're above the low water mark
577 * for the cache. We do this without holding the locks to
578 * minimize lock contention.
579 */
580 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
581 dbuf_evict_one();
582 }
583
584 mutex_enter(&dbuf_evict_lock);
585 }
586
587 dbuf_evict_thread_exit = B_FALSE;
588 cv_broadcast(&dbuf_evict_cv);
589 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
590 thread_exit();
591 }
592
593 /*
594 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
595 * If the dbuf cache is at its high water mark, then evict a dbuf from the
596 * dbuf cache using the callers context.
597 */
598 static void
dbuf_evict_notify(void)599 dbuf_evict_notify(void)
600 {
601 /*
602 * We check if we should evict without holding the dbuf_evict_lock,
603 * because it's OK to occasionally make the wrong decision here,
604 * and grabbing the lock results in massive lock contention.
605 */
606 if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
607 dbuf_cache_max_bytes) {
608 if (dbuf_cache_above_hiwater())
609 dbuf_evict_one();
610 cv_signal(&dbuf_evict_cv);
611 }
612 }
613
614 void
dbuf_init(void)615 dbuf_init(void)
616 {
617 uint64_t hsize = 1ULL << 16;
618 dbuf_hash_table_t *h = &dbuf_hash_table;
619 int i;
620
621 /*
622 * The hash table is big enough to fill all of physical memory
623 * with an average 4K block size. The table will take up
624 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
625 */
626 while (hsize * 4096 < physmem * PAGESIZE)
627 hsize <<= 1;
628
629 retry:
630 h->hash_table_mask = hsize - 1;
631 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
632 if (h->hash_table == NULL) {
633 /* XXX - we should really return an error instead of assert */
634 ASSERT(hsize > (1ULL << 10));
635 hsize >>= 1;
636 goto retry;
637 }
638
639 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
640 sizeof (dmu_buf_impl_t),
641 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
642
643 for (i = 0; i < DBUF_MUTEXES; i++)
644 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
645
646 /*
647 * Setup the parameters for the dbuf caches. We set the sizes of the
648 * dbuf cache and the metadata cache to 1/32nd and 1/16th (default)
649 * of the size of the ARC, respectively. If the values are set in
650 * /etc/system and they're not greater than the size of the ARC, then
651 * we honor that value.
652 */
653 if (dbuf_cache_max_bytes == 0 ||
654 dbuf_cache_max_bytes >= arc_max_bytes()) {
655 dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift;
656 }
657 if (dbuf_metadata_cache_max_bytes == 0 ||
658 dbuf_metadata_cache_max_bytes >= arc_max_bytes()) {
659 dbuf_metadata_cache_max_bytes =
660 arc_max_bytes() >> dbuf_metadata_cache_shift;
661 }
662
663 /*
664 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
665 * configuration is not required.
666 */
667 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0);
668
669 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
670 dbuf_caches[dcs].cache =
671 multilist_create(sizeof (dmu_buf_impl_t),
672 offsetof(dmu_buf_impl_t, db_cache_link),
673 dbuf_cache_multilist_index_func);
674 zfs_refcount_create(&dbuf_caches[dcs].size);
675 }
676
677 dbuf_evict_thread_exit = B_FALSE;
678 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
679 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
680 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
681 NULL, 0, &p0, TS_RUN, minclsyspri);
682 }
683
684 void
dbuf_fini(void)685 dbuf_fini(void)
686 {
687 dbuf_hash_table_t *h = &dbuf_hash_table;
688 int i;
689
690 for (i = 0; i < DBUF_MUTEXES; i++)
691 mutex_destroy(&h->hash_mutexes[i]);
692 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
693 kmem_cache_destroy(dbuf_kmem_cache);
694 taskq_destroy(dbu_evict_taskq);
695
696 mutex_enter(&dbuf_evict_lock);
697 dbuf_evict_thread_exit = B_TRUE;
698 while (dbuf_evict_thread_exit) {
699 cv_signal(&dbuf_evict_cv);
700 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
701 }
702 mutex_exit(&dbuf_evict_lock);
703
704 mutex_destroy(&dbuf_evict_lock);
705 cv_destroy(&dbuf_evict_cv);
706
707 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
708 zfs_refcount_destroy(&dbuf_caches[dcs].size);
709 multilist_destroy(dbuf_caches[dcs].cache);
710 }
711 }
712
713 /*
714 * Other stuff.
715 */
716
717 #ifdef ZFS_DEBUG
718 static void
dbuf_verify(dmu_buf_impl_t * db)719 dbuf_verify(dmu_buf_impl_t *db)
720 {
721 dnode_t *dn;
722 dbuf_dirty_record_t *dr;
723
724 ASSERT(MUTEX_HELD(&db->db_mtx));
725
726 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
727 return;
728
729 ASSERT(db->db_objset != NULL);
730 DB_DNODE_ENTER(db);
731 dn = DB_DNODE(db);
732 if (dn == NULL) {
733 ASSERT(db->db_parent == NULL);
734 ASSERT(db->db_blkptr == NULL);
735 } else {
736 ASSERT3U(db->db.db_object, ==, dn->dn_object);
737 ASSERT3P(db->db_objset, ==, dn->dn_objset);
738 ASSERT3U(db->db_level, <, dn->dn_nlevels);
739 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
740 db->db_blkid == DMU_SPILL_BLKID ||
741 !avl_is_empty(&dn->dn_dbufs));
742 }
743 if (db->db_blkid == DMU_BONUS_BLKID) {
744 ASSERT(dn != NULL);
745 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
746 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
747 } else if (db->db_blkid == DMU_SPILL_BLKID) {
748 ASSERT(dn != NULL);
749 ASSERT0(db->db.db_offset);
750 } else {
751 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
752 }
753
754 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
755 ASSERT(dr->dr_dbuf == db);
756
757 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
758 ASSERT(dr->dr_dbuf == db);
759
760 /*
761 * We can't assert that db_size matches dn_datablksz because it
762 * can be momentarily different when another thread is doing
763 * dnode_set_blksz().
764 */
765 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
766 dr = db->db_data_pending;
767 /*
768 * It should only be modified in syncing context, so
769 * make sure we only have one copy of the data.
770 */
771 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
772 }
773
774 /* verify db->db_blkptr */
775 if (db->db_blkptr) {
776 if (db->db_parent == dn->dn_dbuf) {
777 /* db is pointed to by the dnode */
778 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
779 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
780 ASSERT(db->db_parent == NULL);
781 else
782 ASSERT(db->db_parent != NULL);
783 if (db->db_blkid != DMU_SPILL_BLKID)
784 ASSERT3P(db->db_blkptr, ==,
785 &dn->dn_phys->dn_blkptr[db->db_blkid]);
786 } else {
787 /* db is pointed to by an indirect block */
788 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
789 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
790 ASSERT3U(db->db_parent->db.db_object, ==,
791 db->db.db_object);
792 /*
793 * dnode_grow_indblksz() can make this fail if we don't
794 * have the parent's rwlock. XXX indblksz no longer
795 * grows. safe to do this now?
796 */
797 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
798 ASSERT3P(db->db_blkptr, ==,
799 ((blkptr_t *)db->db_parent->db.db_data +
800 db->db_blkid % epb));
801 }
802 }
803 }
804 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
805 (db->db_buf == NULL || db->db_buf->b_data) &&
806 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
807 db->db_state != DB_FILL && !dn->dn_free_txg) {
808 /*
809 * If the blkptr isn't set but they have nonzero data,
810 * it had better be dirty, otherwise we'll lose that
811 * data when we evict this buffer.
812 *
813 * There is an exception to this rule for indirect blocks; in
814 * this case, if the indirect block is a hole, we fill in a few
815 * fields on each of the child blocks (importantly, birth time)
816 * to prevent hole birth times from being lost when you
817 * partially fill in a hole.
818 */
819 if (db->db_dirtycnt == 0) {
820 if (db->db_level == 0) {
821 uint64_t *buf = db->db.db_data;
822 int i;
823
824 for (i = 0; i < db->db.db_size >> 3; i++) {
825 ASSERT(buf[i] == 0);
826 }
827 } else {
828 blkptr_t *bps = db->db.db_data;
829 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
830 db->db.db_size);
831 /*
832 * We want to verify that all the blkptrs in the
833 * indirect block are holes, but we may have
834 * automatically set up a few fields for them.
835 * We iterate through each blkptr and verify
836 * they only have those fields set.
837 */
838 for (int i = 0;
839 i < db->db.db_size / sizeof (blkptr_t);
840 i++) {
841 blkptr_t *bp = &bps[i];
842 ASSERT(ZIO_CHECKSUM_IS_ZERO(
843 &bp->blk_cksum));
844 ASSERT(
845 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
846 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
847 DVA_IS_EMPTY(&bp->blk_dva[2]));
848 ASSERT0(bp->blk_fill);
849 ASSERT0(bp->blk_pad[0]);
850 ASSERT0(bp->blk_pad[1]);
851 ASSERT(!BP_IS_EMBEDDED(bp));
852 ASSERT(BP_IS_HOLE(bp));
853 ASSERT0(bp->blk_phys_birth);
854 }
855 }
856 }
857 }
858 DB_DNODE_EXIT(db);
859 }
860 #endif
861
862 static void
dbuf_clear_data(dmu_buf_impl_t * db)863 dbuf_clear_data(dmu_buf_impl_t *db)
864 {
865 ASSERT(MUTEX_HELD(&db->db_mtx));
866 dbuf_evict_user(db);
867 ASSERT3P(db->db_buf, ==, NULL);
868 db->db.db_data = NULL;
869 if (db->db_state != DB_NOFILL)
870 db->db_state = DB_UNCACHED;
871 }
872
873 /*
874 * This function is used to lock the parent of the provided dbuf. This should be
875 * used when modifying or reading db_blkptr.
876 */
877 db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t * db,krw_t rw,void * tag)878 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
879 {
880 enum db_lock_type ret = DLT_NONE;
881 if (db->db_parent != NULL) {
882 rw_enter(&db->db_parent->db_rwlock, rw);
883 ret = DLT_PARENT;
884 } else if (dmu_objset_ds(db->db_objset) != NULL) {
885 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
886 tag);
887 ret = DLT_OBJSET;
888 }
889 /*
890 * We only return a DLT_NONE lock when it's the top-most indirect block
891 * of the meta-dnode of the MOS.
892 */
893 return (ret);
894 }
895
896 /*
897 * We need to pass the lock type in because it's possible that the block will
898 * move from being the topmost indirect block in a dnode (and thus, have no
899 * parent) to not the top-most via an indirection increase. This would cause a
900 * panic if we didn't pass the lock type in.
901 */
902 void
dmu_buf_unlock_parent(dmu_buf_impl_t * db,db_lock_type_t type,void * tag)903 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
904 {
905 if (type == DLT_PARENT)
906 rw_exit(&db->db_parent->db_rwlock);
907 else if (type == DLT_OBJSET)
908 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
909 }
910
911 static void
dbuf_set_data(dmu_buf_impl_t * db,arc_buf_t * buf)912 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
913 {
914 ASSERT(MUTEX_HELD(&db->db_mtx));
915 ASSERT(buf != NULL);
916
917 db->db_buf = buf;
918 ASSERT(buf->b_data != NULL);
919 db->db.db_data = buf->b_data;
920 }
921
922 /*
923 * Loan out an arc_buf for read. Return the loaned arc_buf.
924 */
925 arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t * db)926 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
927 {
928 arc_buf_t *abuf;
929
930 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
931 mutex_enter(&db->db_mtx);
932 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
933 int blksz = db->db.db_size;
934 spa_t *spa = db->db_objset->os_spa;
935
936 mutex_exit(&db->db_mtx);
937 abuf = arc_loan_buf(spa, B_FALSE, blksz);
938 bcopy(db->db.db_data, abuf->b_data, blksz);
939 } else {
940 abuf = db->db_buf;
941 arc_loan_inuse_buf(abuf, db);
942 db->db_buf = NULL;
943 dbuf_clear_data(db);
944 mutex_exit(&db->db_mtx);
945 }
946 return (abuf);
947 }
948
949 /*
950 * Calculate which level n block references the data at the level 0 offset
951 * provided.
952 */
953 uint64_t
dbuf_whichblock(dnode_t * dn,int64_t level,uint64_t offset)954 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
955 {
956 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
957 /*
958 * The level n blkid is equal to the level 0 blkid divided by
959 * the number of level 0s in a level n block.
960 *
961 * The level 0 blkid is offset >> datablkshift =
962 * offset / 2^datablkshift.
963 *
964 * The number of level 0s in a level n is the number of block
965 * pointers in an indirect block, raised to the power of level.
966 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
967 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
968 *
969 * Thus, the level n blkid is: offset /
970 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
971 * = offset / 2^(datablkshift + level *
972 * (indblkshift - SPA_BLKPTRSHIFT))
973 * = offset >> (datablkshift + level *
974 * (indblkshift - SPA_BLKPTRSHIFT))
975 */
976 return (offset >> (dn->dn_datablkshift + level *
977 (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
978 } else {
979 ASSERT3U(offset, <, dn->dn_datablksz);
980 return (0);
981 }
982 }
983
984 /* ARGSUSED */
985 static void
dbuf_read_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * bp,arc_buf_t * buf,void * vdb)986 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
987 arc_buf_t *buf, void *vdb)
988 {
989 dmu_buf_impl_t *db = vdb;
990
991 mutex_enter(&db->db_mtx);
992 ASSERT3U(db->db_state, ==, DB_READ);
993 /*
994 * All reads are synchronous, so we must have a hold on the dbuf
995 */
996 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
997 ASSERT(db->db_buf == NULL);
998 ASSERT(db->db.db_data == NULL);
999 if (buf == NULL) {
1000 /* i/o error */
1001 ASSERT(zio == NULL || zio->io_error != 0);
1002 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1003 ASSERT3P(db->db_buf, ==, NULL);
1004 db->db_state = DB_UNCACHED;
1005 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1006 /* we were freed in flight; disregard any error */
1007 ASSERT(zio == NULL || zio->io_error == 0);
1008 if (buf == NULL) {
1009 buf = arc_alloc_buf(db->db_objset->os_spa,
1010 db, DBUF_GET_BUFC_TYPE(db), db->db.db_size);
1011 }
1012 arc_release(buf, db);
1013 bzero(buf->b_data, db->db.db_size);
1014 arc_buf_freeze(buf);
1015 db->db_freed_in_flight = FALSE;
1016 dbuf_set_data(db, buf);
1017 db->db_state = DB_CACHED;
1018 } else if (buf != NULL) {
1019 /* success */
1020 ASSERT(zio == NULL || zio->io_error == 0);
1021 dbuf_set_data(db, buf);
1022 db->db_state = DB_CACHED;
1023 }
1024 cv_broadcast(&db->db_changed);
1025 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1026 }
1027
1028
1029 /*
1030 * This function ensures that, when doing a decrypting read of a block,
1031 * we make sure we have decrypted the dnode associated with it. We must do
1032 * this so that we ensure we are fully authenticating the checksum-of-MACs
1033 * tree from the root of the objset down to this block. Indirect blocks are
1034 * always verified against their secure checksum-of-MACs assuming that the
1035 * dnode containing them is correct. Now that we are doing a decrypting read,
1036 * we can be sure that the key is loaded and verify that assumption. This is
1037 * especially important considering that we always read encrypted dnode
1038 * blocks as raw data (without verifying their MACs) to start, and
1039 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1040 */
1041 static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t * db,uint32_t flags)1042 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1043 {
1044 int err = 0;
1045 objset_t *os = db->db_objset;
1046 arc_buf_t *dnode_abuf;
1047 dnode_t *dn;
1048 zbookmark_phys_t zb;
1049
1050 ASSERT(MUTEX_HELD(&db->db_mtx));
1051
1052 if (!os->os_encrypted || os->os_raw_receive ||
1053 (flags & DB_RF_NO_DECRYPT) != 0)
1054 return (0);
1055
1056 DB_DNODE_ENTER(db);
1057 dn = DB_DNODE(db);
1058 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1059
1060 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1061 DB_DNODE_EXIT(db);
1062 return (0);
1063 }
1064
1065 SET_BOOKMARK(&zb, dmu_objset_id(os),
1066 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1067 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1068
1069 /*
1070 * An error code of EACCES tells us that the key is still not
1071 * available. This is ok if we are only reading authenticated
1072 * (and therefore non-encrypted) blocks.
1073 */
1074 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1075 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1076 (db->db_blkid == DMU_BONUS_BLKID &&
1077 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1078 err = 0;
1079
1080 DB_DNODE_EXIT(db);
1081
1082 return (err);
1083 }
1084
1085 /*
1086 * Drops db_mtx and the parent lock specified by dblt and tag before
1087 * returning.
1088 */
1089 static int
dbuf_read_impl(dmu_buf_impl_t * db,zio_t * zio,uint32_t flags,db_lock_type_t dblt,void * tag)1090 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1091 db_lock_type_t dblt, void *tag)
1092 {
1093 dnode_t *dn;
1094 zbookmark_phys_t zb;
1095 arc_flags_t aflags = ARC_FLAG_NOWAIT;
1096 int err, zio_flags = 0;
1097
1098 DB_DNODE_ENTER(db);
1099 dn = DB_DNODE(db);
1100 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1101 ASSERT(MUTEX_HELD(&db->db_mtx));
1102 ASSERT(db->db_state == DB_UNCACHED);
1103 ASSERT(db->db_buf == NULL);
1104 ASSERT(db->db_parent == NULL ||
1105 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1106
1107 if (db->db_blkid == DMU_BONUS_BLKID) {
1108 /*
1109 * The bonus length stored in the dnode may be less than
1110 * the maximum available space in the bonus buffer.
1111 */
1112 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1113 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1114
1115 /* if the underlying dnode block is encrypted, decrypt it */
1116 err = dbuf_read_verify_dnode_crypt(db, flags);
1117 if (err != 0) {
1118 DB_DNODE_EXIT(db);
1119 mutex_exit(&db->db_mtx);
1120 return (err);
1121 }
1122
1123 ASSERT3U(bonuslen, <=, db->db.db_size);
1124 db->db.db_data = zio_buf_alloc(max_bonuslen);
1125 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1126 if (bonuslen < max_bonuslen)
1127 bzero(db->db.db_data, max_bonuslen);
1128 if (bonuslen)
1129 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
1130 DB_DNODE_EXIT(db);
1131 db->db_state = DB_CACHED;
1132 mutex_exit(&db->db_mtx);
1133 dmu_buf_unlock_parent(db, dblt, tag);
1134 return (0);
1135 }
1136
1137 /*
1138 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1139 * processes the delete record and clears the bp while we are waiting
1140 * for the dn_mtx (resulting in a "no" from block_freed).
1141 */
1142 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
1143 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
1144 BP_IS_HOLE(db->db_blkptr)))) {
1145 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1146
1147 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type,
1148 db->db.db_size));
1149 bzero(db->db.db_data, db->db.db_size);
1150
1151 if (db->db_blkptr != NULL && db->db_level > 0 &&
1152 BP_IS_HOLE(db->db_blkptr) &&
1153 db->db_blkptr->blk_birth != 0) {
1154 blkptr_t *bps = db->db.db_data;
1155 for (int i = 0; i < ((1 <<
1156 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
1157 i++) {
1158 blkptr_t *bp = &bps[i];
1159 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
1160 1 << dn->dn_indblkshift);
1161 BP_SET_LSIZE(bp,
1162 BP_GET_LEVEL(db->db_blkptr) == 1 ?
1163 dn->dn_datablksz :
1164 BP_GET_LSIZE(db->db_blkptr));
1165 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1166 BP_SET_LEVEL(bp,
1167 BP_GET_LEVEL(db->db_blkptr) - 1);
1168 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1169 }
1170 }
1171 DB_DNODE_EXIT(db);
1172 db->db_state = DB_CACHED;
1173 mutex_exit(&db->db_mtx);
1174 dmu_buf_unlock_parent(db, dblt, tag);
1175 return (0);
1176 }
1177
1178 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1179 db->db.db_object, db->db_level, db->db_blkid);
1180
1181 /*
1182 * All bps of an encrypted os should have the encryption bit set.
1183 * If this is not true it indicates tampering and we report an error.
1184 */
1185 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1186 spa_log_error(db->db_objset->os_spa, &zb);
1187 zfs_panic_recover("unencrypted block in encrypted "
1188 "object set %llu", dmu_objset_id(db->db_objset));
1189 DB_DNODE_EXIT(db);
1190 mutex_exit(&db->db_mtx);
1191 dmu_buf_unlock_parent(db, dblt, tag);
1192 return (SET_ERROR(EIO));
1193 }
1194
1195 err = dbuf_read_verify_dnode_crypt(db, flags);
1196 if (err != 0) {
1197 DB_DNODE_EXIT(db);
1198 dmu_buf_unlock_parent(db, dblt, tag);
1199 mutex_exit(&db->db_mtx);
1200 return (err);
1201 }
1202
1203 DB_DNODE_EXIT(db);
1204
1205 db->db_state = DB_READ;
1206 mutex_exit(&db->db_mtx);
1207
1208 if (DBUF_IS_L2CACHEABLE(db))
1209 aflags |= ARC_FLAG_L2CACHE;
1210
1211 dbuf_add_ref(db, NULL);
1212
1213 zio_flags = (flags & DB_RF_CANFAIL) ?
1214 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1215
1216 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1217 zio_flags |= ZIO_FLAG_RAW;
1218 /*
1219 * The zio layer will copy the provided blkptr later, but we need to
1220 * do this now so that we can release the parent's rwlock. We have to
1221 * do that now so that if dbuf_read_done is called synchronously (on
1222 * an l1 cache hit) we don't acquire the db_mtx while holding the
1223 * parent's rwlock, which would be a lock ordering violation.
1224 */
1225 blkptr_t bp = *db->db_blkptr;
1226 dmu_buf_unlock_parent(db, dblt, tag);
1227 (void) arc_read(zio, db->db_objset->os_spa, &bp,
1228 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1229 &aflags, &zb);
1230 return (err);
1231 }
1232
1233 /*
1234 * This is our just-in-time copy function. It makes a copy of buffers that
1235 * have been modified in a previous transaction group before we access them in
1236 * the current active group.
1237 *
1238 * This function is used in three places: when we are dirtying a buffer for the
1239 * first time in a txg, when we are freeing a range in a dnode that includes
1240 * this buffer, and when we are accessing a buffer which was received compressed
1241 * and later referenced in a WRITE_BYREF record.
1242 *
1243 * Note that when we are called from dbuf_free_range() we do not put a hold on
1244 * the buffer, we just traverse the active dbuf list for the dnode.
1245 */
1246 static void
dbuf_fix_old_data(dmu_buf_impl_t * db,uint64_t txg)1247 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1248 {
1249 dbuf_dirty_record_t *dr = db->db_last_dirty;
1250
1251 ASSERT(MUTEX_HELD(&db->db_mtx));
1252 ASSERT(db->db.db_data != NULL);
1253 ASSERT(db->db_level == 0);
1254 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1255
1256 if (dr == NULL ||
1257 (dr->dt.dl.dr_data !=
1258 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1259 return;
1260
1261 /*
1262 * If the last dirty record for this dbuf has not yet synced
1263 * and its referencing the dbuf data, either:
1264 * reset the reference to point to a new copy,
1265 * or (if there a no active holders)
1266 * just null out the current db_data pointer.
1267 */
1268 ASSERT3U(dr->dr_txg, >=, txg - 2);
1269 if (db->db_blkid == DMU_BONUS_BLKID) {
1270 /* Note that the data bufs here are zio_bufs */
1271 dnode_t *dn = DB_DNODE(db);
1272 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1273 dr->dt.dl.dr_data = zio_buf_alloc(bonuslen);
1274 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1275 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1276 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1277 dnode_t *dn = DB_DNODE(db);
1278 int size = arc_buf_size(db->db_buf);
1279 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1280 spa_t *spa = db->db_objset->os_spa;
1281 enum zio_compress compress_type =
1282 arc_get_compression(db->db_buf);
1283
1284 if (arc_is_encrypted(db->db_buf)) {
1285 boolean_t byteorder;
1286 uint8_t salt[ZIO_DATA_SALT_LEN];
1287 uint8_t iv[ZIO_DATA_IV_LEN];
1288 uint8_t mac[ZIO_DATA_MAC_LEN];
1289
1290 arc_get_raw_params(db->db_buf, &byteorder, salt,
1291 iv, mac);
1292 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1293 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1294 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1295 compress_type);
1296 } else if (compress_type != ZIO_COMPRESS_OFF) {
1297 ASSERT3U(type, ==, ARC_BUFC_DATA);
1298 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1299 size, arc_buf_lsize(db->db_buf), compress_type);
1300 } else {
1301 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1302 }
1303 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
1304 } else {
1305 db->db_buf = NULL;
1306 dbuf_clear_data(db);
1307 }
1308 }
1309
1310 int
dbuf_read(dmu_buf_impl_t * db,zio_t * zio,uint32_t flags)1311 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1312 {
1313 int err = 0;
1314 boolean_t prefetch;
1315 dnode_t *dn;
1316
1317 /*
1318 * We don't have to hold the mutex to check db_state because it
1319 * can't be freed while we have a hold on the buffer.
1320 */
1321 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1322
1323 if (db->db_state == DB_NOFILL)
1324 return (SET_ERROR(EIO));
1325
1326 DB_DNODE_ENTER(db);
1327 dn = DB_DNODE(db);
1328
1329 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1330 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1331 DBUF_IS_CACHEABLE(db);
1332
1333 mutex_enter(&db->db_mtx);
1334 if (db->db_state == DB_CACHED) {
1335 spa_t *spa = dn->dn_objset->os_spa;
1336
1337 /*
1338 * Ensure that this block's dnode has been decrypted if
1339 * the caller has requested decrypted data.
1340 */
1341 err = dbuf_read_verify_dnode_crypt(db, flags);
1342
1343 /*
1344 * If the arc buf is compressed or encrypted and the caller
1345 * requested uncompressed data, we need to untransform it
1346 * before returning. We also call arc_untransform() on any
1347 * unauthenticated blocks, which will verify their MAC if
1348 * the key is now available.
1349 */
1350 if (err == 0 && db->db_buf != NULL &&
1351 (flags & DB_RF_NO_DECRYPT) == 0 &&
1352 (arc_is_encrypted(db->db_buf) ||
1353 arc_is_unauthenticated(db->db_buf) ||
1354 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1355 zbookmark_phys_t zb;
1356
1357 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1358 db->db.db_object, db->db_level, db->db_blkid);
1359 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1360 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1361 dbuf_set_data(db, db->db_buf);
1362 }
1363 mutex_exit(&db->db_mtx);
1364 if (err == 0 && prefetch) {
1365 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1366 flags & DB_RF_HAVESTRUCT);
1367 }
1368 DB_DNODE_EXIT(db);
1369 } else if (db->db_state == DB_UNCACHED) {
1370 spa_t *spa = dn->dn_objset->os_spa;
1371 boolean_t need_wait = B_FALSE;
1372
1373 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1374
1375 if (zio == NULL &&
1376 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1377 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1378 need_wait = B_TRUE;
1379 }
1380 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1381 /*
1382 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1383 * for us
1384 */
1385 if (!err && prefetch) {
1386 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1387 flags & DB_RF_HAVESTRUCT);
1388 }
1389
1390 DB_DNODE_EXIT(db);
1391
1392 if (!err && need_wait)
1393 err = zio_wait(zio);
1394 } else {
1395 /*
1396 * Another reader came in while the dbuf was in flight
1397 * between UNCACHED and CACHED. Either a writer will finish
1398 * writing the buffer (sending the dbuf to CACHED) or the
1399 * first reader's request will reach the read_done callback
1400 * and send the dbuf to CACHED. Otherwise, a failure
1401 * occurred and the dbuf went to UNCACHED.
1402 */
1403 mutex_exit(&db->db_mtx);
1404 if (prefetch) {
1405 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1406 flags & DB_RF_HAVESTRUCT);
1407 }
1408 DB_DNODE_EXIT(db);
1409
1410 /* Skip the wait per the caller's request. */
1411 mutex_enter(&db->db_mtx);
1412 if ((flags & DB_RF_NEVERWAIT) == 0) {
1413 while (db->db_state == DB_READ ||
1414 db->db_state == DB_FILL) {
1415 ASSERT(db->db_state == DB_READ ||
1416 (flags & DB_RF_HAVESTRUCT) == 0);
1417 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1418 db, zio_t *, zio);
1419 cv_wait(&db->db_changed, &db->db_mtx);
1420 }
1421 if (db->db_state == DB_UNCACHED)
1422 err = SET_ERROR(EIO);
1423 }
1424 mutex_exit(&db->db_mtx);
1425 }
1426
1427 return (err);
1428 }
1429
1430 static void
dbuf_noread(dmu_buf_impl_t * db)1431 dbuf_noread(dmu_buf_impl_t *db)
1432 {
1433 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1434 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1435 mutex_enter(&db->db_mtx);
1436 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1437 cv_wait(&db->db_changed, &db->db_mtx);
1438 if (db->db_state == DB_UNCACHED) {
1439 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1440 spa_t *spa = db->db_objset->os_spa;
1441
1442 ASSERT(db->db_buf == NULL);
1443 ASSERT(db->db.db_data == NULL);
1444 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size));
1445 db->db_state = DB_FILL;
1446 } else if (db->db_state == DB_NOFILL) {
1447 dbuf_clear_data(db);
1448 } else {
1449 ASSERT3U(db->db_state, ==, DB_CACHED);
1450 }
1451 mutex_exit(&db->db_mtx);
1452 }
1453
1454 void
dbuf_unoverride(dbuf_dirty_record_t * dr)1455 dbuf_unoverride(dbuf_dirty_record_t *dr)
1456 {
1457 dmu_buf_impl_t *db = dr->dr_dbuf;
1458 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1459 uint64_t txg = dr->dr_txg;
1460
1461 ASSERT(MUTEX_HELD(&db->db_mtx));
1462 /*
1463 * This assert is valid because dmu_sync() expects to be called by
1464 * a zilog's get_data while holding a range lock. This call only
1465 * comes from dbuf_dirty() callers who must also hold a range lock.
1466 */
1467 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1468 ASSERT(db->db_level == 0);
1469
1470 if (db->db_blkid == DMU_BONUS_BLKID ||
1471 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1472 return;
1473
1474 ASSERT(db->db_data_pending != dr);
1475
1476 /* free this block */
1477 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1478 zio_free(db->db_objset->os_spa, txg, bp);
1479
1480 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1481 dr->dt.dl.dr_nopwrite = B_FALSE;
1482 dr->dt.dl.dr_has_raw_params = B_FALSE;
1483
1484 /*
1485 * Release the already-written buffer, so we leave it in
1486 * a consistent dirty state. Note that all callers are
1487 * modifying the buffer, so they will immediately do
1488 * another (redundant) arc_release(). Therefore, leave
1489 * the buf thawed to save the effort of freezing &
1490 * immediately re-thawing it.
1491 */
1492 arc_release(dr->dt.dl.dr_data, db);
1493 }
1494
1495 /*
1496 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1497 * data blocks in the free range, so that any future readers will find
1498 * empty blocks.
1499 */
1500 void
dbuf_free_range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)1501 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1502 dmu_tx_t *tx)
1503 {
1504 dmu_buf_impl_t db_search;
1505 dmu_buf_impl_t *db, *db_next;
1506 uint64_t txg = tx->tx_txg;
1507 avl_index_t where;
1508
1509 if (end_blkid > dn->dn_maxblkid &&
1510 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1511 end_blkid = dn->dn_maxblkid;
1512 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
1513
1514 db_search.db_level = 0;
1515 db_search.db_blkid = start_blkid;
1516 db_search.db_state = DB_SEARCH;
1517
1518 mutex_enter(&dn->dn_dbufs_mtx);
1519 db = avl_find(&dn->dn_dbufs, &db_search, &where);
1520 ASSERT3P(db, ==, NULL);
1521
1522 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1523
1524 for (; db != NULL; db = db_next) {
1525 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1526 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1527
1528 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1529 break;
1530 }
1531 ASSERT3U(db->db_blkid, >=, start_blkid);
1532
1533 /* found a level 0 buffer in the range */
1534 mutex_enter(&db->db_mtx);
1535 if (dbuf_undirty(db, tx)) {
1536 /* mutex has been dropped and dbuf destroyed */
1537 continue;
1538 }
1539
1540 if (db->db_state == DB_UNCACHED ||
1541 db->db_state == DB_NOFILL ||
1542 db->db_state == DB_EVICTING) {
1543 ASSERT(db->db.db_data == NULL);
1544 mutex_exit(&db->db_mtx);
1545 continue;
1546 }
1547 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1548 /* will be handled in dbuf_read_done or dbuf_rele */
1549 db->db_freed_in_flight = TRUE;
1550 mutex_exit(&db->db_mtx);
1551 continue;
1552 }
1553 if (zfs_refcount_count(&db->db_holds) == 0) {
1554 ASSERT(db->db_buf);
1555 dbuf_destroy(db);
1556 continue;
1557 }
1558 /* The dbuf is referenced */
1559
1560 if (db->db_last_dirty != NULL) {
1561 dbuf_dirty_record_t *dr = db->db_last_dirty;
1562
1563 if (dr->dr_txg == txg) {
1564 /*
1565 * This buffer is "in-use", re-adjust the file
1566 * size to reflect that this buffer may
1567 * contain new data when we sync.
1568 */
1569 if (db->db_blkid != DMU_SPILL_BLKID &&
1570 db->db_blkid > dn->dn_maxblkid)
1571 dn->dn_maxblkid = db->db_blkid;
1572 dbuf_unoverride(dr);
1573 } else {
1574 /*
1575 * This dbuf is not dirty in the open context.
1576 * Either uncache it (if its not referenced in
1577 * the open context) or reset its contents to
1578 * empty.
1579 */
1580 dbuf_fix_old_data(db, txg);
1581 }
1582 }
1583 /* clear the contents if its cached */
1584 if (db->db_state == DB_CACHED) {
1585 ASSERT(db->db.db_data != NULL);
1586 arc_release(db->db_buf, db);
1587 rw_enter(&db->db_rwlock, RW_WRITER);
1588 bzero(db->db.db_data, db->db.db_size);
1589 rw_exit(&db->db_rwlock);
1590 arc_buf_freeze(db->db_buf);
1591 }
1592
1593 mutex_exit(&db->db_mtx);
1594 }
1595 mutex_exit(&dn->dn_dbufs_mtx);
1596 }
1597
1598 void
dbuf_new_size(dmu_buf_impl_t * db,int size,dmu_tx_t * tx)1599 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1600 {
1601 arc_buf_t *buf, *obuf;
1602 int osize = db->db.db_size;
1603 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1604 dnode_t *dn;
1605
1606 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1607
1608 DB_DNODE_ENTER(db);
1609 dn = DB_DNODE(db);
1610
1611 /*
1612 * XXX we should be doing a dbuf_read, checking the return
1613 * value and returning that up to our callers
1614 */
1615 dmu_buf_will_dirty(&db->db, tx);
1616
1617 /* create the data buffer for the new block */
1618 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
1619
1620 /* copy old block data to the new block */
1621 obuf = db->db_buf;
1622 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1623 /* zero the remainder */
1624 if (size > osize)
1625 bzero((uint8_t *)buf->b_data + osize, size - osize);
1626
1627 mutex_enter(&db->db_mtx);
1628 dbuf_set_data(db, buf);
1629 arc_buf_destroy(obuf, db);
1630 db->db.db_size = size;
1631
1632 if (db->db_level == 0) {
1633 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1634 db->db_last_dirty->dt.dl.dr_data = buf;
1635 }
1636 mutex_exit(&db->db_mtx);
1637
1638 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
1639 DB_DNODE_EXIT(db);
1640 }
1641
1642 void
dbuf_release_bp(dmu_buf_impl_t * db)1643 dbuf_release_bp(dmu_buf_impl_t *db)
1644 {
1645 objset_t *os = db->db_objset;
1646
1647 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1648 ASSERT(arc_released(os->os_phys_buf) ||
1649 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1650 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1651
1652 (void) arc_release(db->db_buf, db);
1653 }
1654
1655 /*
1656 * We already have a dirty record for this TXG, and we are being
1657 * dirtied again.
1658 */
1659 static void
dbuf_redirty(dbuf_dirty_record_t * dr)1660 dbuf_redirty(dbuf_dirty_record_t *dr)
1661 {
1662 dmu_buf_impl_t *db = dr->dr_dbuf;
1663
1664 ASSERT(MUTEX_HELD(&db->db_mtx));
1665
1666 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1667 /*
1668 * If this buffer has already been written out,
1669 * we now need to reset its state.
1670 */
1671 dbuf_unoverride(dr);
1672 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1673 db->db_state != DB_NOFILL) {
1674 /* Already released on initial dirty, so just thaw. */
1675 ASSERT(arc_released(db->db_buf));
1676 arc_buf_thaw(db->db_buf);
1677 }
1678 }
1679 }
1680
1681 dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t * db,dmu_tx_t * tx)1682 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1683 {
1684 dnode_t *dn;
1685 objset_t *os;
1686 dbuf_dirty_record_t **drp, *dr;
1687 int txgoff = tx->tx_txg & TXG_MASK;
1688 boolean_t drop_struct_rwlock = B_FALSE;
1689
1690 ASSERT(tx->tx_txg != 0);
1691 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1692 DMU_TX_DIRTY_BUF(tx, db);
1693
1694 DB_DNODE_ENTER(db);
1695 dn = DB_DNODE(db);
1696 /*
1697 * Shouldn't dirty a regular buffer in syncing context. Private
1698 * objects may be dirtied in syncing context, but only if they
1699 * were already pre-dirtied in open context.
1700 */
1701 #ifdef DEBUG
1702 if (dn->dn_objset->os_dsl_dataset != NULL) {
1703 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1704 RW_READER, FTAG);
1705 }
1706 ASSERT(!dmu_tx_is_syncing(tx) ||
1707 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1708 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1709 dn->dn_objset->os_dsl_dataset == NULL);
1710 if (dn->dn_objset->os_dsl_dataset != NULL)
1711 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
1712 #endif
1713 /*
1714 * We make this assert for private objects as well, but after we
1715 * check if we're already dirty. They are allowed to re-dirty
1716 * in syncing context.
1717 */
1718 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1719 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1720 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1721
1722 mutex_enter(&db->db_mtx);
1723 /*
1724 * XXX make this true for indirects too? The problem is that
1725 * transactions created with dmu_tx_create_assigned() from
1726 * syncing context don't bother holding ahead.
1727 */
1728 ASSERT(db->db_level != 0 ||
1729 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1730 db->db_state == DB_NOFILL);
1731
1732 mutex_enter(&dn->dn_mtx);
1733 /*
1734 * Don't set dirtyctx to SYNC if we're just modifying this as we
1735 * initialize the objset.
1736 */
1737 if (dn->dn_dirtyctx == DN_UNDIRTIED) {
1738 if (dn->dn_objset->os_dsl_dataset != NULL) {
1739 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1740 RW_READER, FTAG);
1741 }
1742 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1743 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
1744 DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1745 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1746 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1747 }
1748 if (dn->dn_objset->os_dsl_dataset != NULL) {
1749 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
1750 FTAG);
1751 }
1752 }
1753
1754 if (tx->tx_txg > dn->dn_dirty_txg)
1755 dn->dn_dirty_txg = tx->tx_txg;
1756 mutex_exit(&dn->dn_mtx);
1757
1758 if (db->db_blkid == DMU_SPILL_BLKID)
1759 dn->dn_have_spill = B_TRUE;
1760
1761 /*
1762 * If this buffer is already dirty, we're done.
1763 */
1764 drp = &db->db_last_dirty;
1765 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1766 db->db.db_object == DMU_META_DNODE_OBJECT);
1767 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1768 drp = &dr->dr_next;
1769 if (dr && dr->dr_txg == tx->tx_txg) {
1770 DB_DNODE_EXIT(db);
1771
1772 dbuf_redirty(dr);
1773 mutex_exit(&db->db_mtx);
1774 return (dr);
1775 }
1776
1777 /*
1778 * Only valid if not already dirty.
1779 */
1780 ASSERT(dn->dn_object == 0 ||
1781 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1782 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1783
1784 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1785
1786 /*
1787 * We should only be dirtying in syncing context if it's the
1788 * mos or we're initializing the os or it's a special object.
1789 * However, we are allowed to dirty in syncing context provided
1790 * we already dirtied it in open context. Hence we must make
1791 * this assertion only if we're not already dirty.
1792 */
1793 os = dn->dn_objset;
1794 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
1795 #ifdef DEBUG
1796 if (dn->dn_objset->os_dsl_dataset != NULL)
1797 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
1798 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1799 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1800 if (dn->dn_objset->os_dsl_dataset != NULL)
1801 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1802 #endif
1803 ASSERT(db->db.db_size != 0);
1804
1805 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1806
1807 if (db->db_blkid != DMU_BONUS_BLKID) {
1808 dmu_objset_willuse_space(os, db->db.db_size, tx);
1809 }
1810
1811 /*
1812 * If this buffer is dirty in an old transaction group we need
1813 * to make a copy of it so that the changes we make in this
1814 * transaction group won't leak out when we sync the older txg.
1815 */
1816 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1817 if (db->db_level == 0) {
1818 void *data_old = db->db_buf;
1819
1820 if (db->db_state != DB_NOFILL) {
1821 if (db->db_blkid == DMU_BONUS_BLKID) {
1822 dbuf_fix_old_data(db, tx->tx_txg);
1823 data_old = db->db.db_data;
1824 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1825 /*
1826 * Release the data buffer from the cache so
1827 * that we can modify it without impacting
1828 * possible other users of this cached data
1829 * block. Note that indirect blocks and
1830 * private objects are not released until the
1831 * syncing state (since they are only modified
1832 * then).
1833 */
1834 arc_release(db->db_buf, db);
1835 dbuf_fix_old_data(db, tx->tx_txg);
1836 data_old = db->db_buf;
1837 }
1838 ASSERT(data_old != NULL);
1839 }
1840 dr->dt.dl.dr_data = data_old;
1841 } else {
1842 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1843 list_create(&dr->dt.di.dr_children,
1844 sizeof (dbuf_dirty_record_t),
1845 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1846 }
1847 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1848 dr->dr_accounted = db->db.db_size;
1849 dr->dr_dbuf = db;
1850 dr->dr_txg = tx->tx_txg;
1851 dr->dr_next = *drp;
1852 *drp = dr;
1853
1854 /*
1855 * We could have been freed_in_flight between the dbuf_noread
1856 * and dbuf_dirty. We win, as though the dbuf_noread() had
1857 * happened after the free.
1858 */
1859 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1860 db->db_blkid != DMU_SPILL_BLKID) {
1861 mutex_enter(&dn->dn_mtx);
1862 if (dn->dn_free_ranges[txgoff] != NULL) {
1863 range_tree_clear(dn->dn_free_ranges[txgoff],
1864 db->db_blkid, 1);
1865 }
1866 mutex_exit(&dn->dn_mtx);
1867 db->db_freed_in_flight = FALSE;
1868 }
1869
1870 /*
1871 * This buffer is now part of this txg
1872 */
1873 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1874 db->db_dirtycnt += 1;
1875 ASSERT3U(db->db_dirtycnt, <=, 3);
1876
1877 mutex_exit(&db->db_mtx);
1878
1879 if (db->db_blkid == DMU_BONUS_BLKID ||
1880 db->db_blkid == DMU_SPILL_BLKID) {
1881 mutex_enter(&dn->dn_mtx);
1882 ASSERT(!list_link_active(&dr->dr_dirty_node));
1883 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1884 mutex_exit(&dn->dn_mtx);
1885 dnode_setdirty(dn, tx);
1886 DB_DNODE_EXIT(db);
1887 return (dr);
1888 }
1889
1890 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1891 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1892 drop_struct_rwlock = B_TRUE;
1893 }
1894
1895 /*
1896 * If we are overwriting a dedup BP, then unless it is snapshotted,
1897 * when we get to syncing context we will need to decrement its
1898 * refcount in the DDT. Prefetch the relevant DDT block so that
1899 * syncing context won't have to wait for the i/o.
1900 */
1901 if (db->db_blkptr != NULL) {
1902 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1903 ddt_prefetch(os->os_spa, db->db_blkptr);
1904 dmu_buf_unlock_parent(db, dblt, FTAG);
1905 }
1906
1907 /*
1908 * We need to hold the dn_struct_rwlock to make this assertion,
1909 * because it protects dn_phys / dn_next_nlevels from changing.
1910 */
1911 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1912 dn->dn_phys->dn_nlevels > db->db_level ||
1913 dn->dn_next_nlevels[txgoff] > db->db_level ||
1914 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1915 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1916
1917
1918 if (db->db_level == 0) {
1919 ASSERT(!db->db_objset->os_raw_receive ||
1920 dn->dn_maxblkid >= db->db_blkid);
1921 dnode_new_blkid(dn, db->db_blkid, tx,
1922 drop_struct_rwlock, B_FALSE);
1923 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1924 }
1925
1926 if (db->db_level+1 < dn->dn_nlevels) {
1927 dmu_buf_impl_t *parent = db->db_parent;
1928 dbuf_dirty_record_t *di;
1929 int parent_held = FALSE;
1930
1931 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1932 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1933 parent = dbuf_hold_level(dn, db->db_level + 1,
1934 db->db_blkid >> epbs, FTAG);
1935 ASSERT(parent != NULL);
1936 parent_held = TRUE;
1937 }
1938 if (drop_struct_rwlock)
1939 rw_exit(&dn->dn_struct_rwlock);
1940 ASSERT3U(db->db_level + 1, ==, parent->db_level);
1941 di = dbuf_dirty(parent, tx);
1942 if (parent_held)
1943 dbuf_rele(parent, FTAG);
1944
1945 mutex_enter(&db->db_mtx);
1946 /*
1947 * Since we've dropped the mutex, it's possible that
1948 * dbuf_undirty() might have changed this out from under us.
1949 */
1950 if (db->db_last_dirty == dr ||
1951 dn->dn_object == DMU_META_DNODE_OBJECT) {
1952 mutex_enter(&di->dt.di.dr_mtx);
1953 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1954 ASSERT(!list_link_active(&dr->dr_dirty_node));
1955 list_insert_tail(&di->dt.di.dr_children, dr);
1956 mutex_exit(&di->dt.di.dr_mtx);
1957 dr->dr_parent = di;
1958 }
1959 mutex_exit(&db->db_mtx);
1960 } else {
1961 ASSERT(db->db_level + 1 == dn->dn_nlevels);
1962 ASSERT(db->db_blkid < dn->dn_nblkptr);
1963 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1964 mutex_enter(&dn->dn_mtx);
1965 ASSERT(!list_link_active(&dr->dr_dirty_node));
1966 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1967 mutex_exit(&dn->dn_mtx);
1968 if (drop_struct_rwlock)
1969 rw_exit(&dn->dn_struct_rwlock);
1970 }
1971
1972 dnode_setdirty(dn, tx);
1973 DB_DNODE_EXIT(db);
1974 return (dr);
1975 }
1976
1977 /*
1978 * Undirty a buffer in the transaction group referenced by the given
1979 * transaction. Return whether this evicted the dbuf.
1980 */
1981 static boolean_t
dbuf_undirty(dmu_buf_impl_t * db,dmu_tx_t * tx)1982 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1983 {
1984 dnode_t *dn;
1985 uint64_t txg = tx->tx_txg;
1986 dbuf_dirty_record_t *dr, **drp;
1987
1988 ASSERT(txg != 0);
1989
1990 /*
1991 * Due to our use of dn_nlevels below, this can only be called
1992 * in open context, unless we are operating on the MOS.
1993 * From syncing context, dn_nlevels may be different from the
1994 * dn_nlevels used when dbuf was dirtied.
1995 */
1996 ASSERT(db->db_objset ==
1997 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1998 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1999 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2000 ASSERT0(db->db_level);
2001 ASSERT(MUTEX_HELD(&db->db_mtx));
2002
2003 /*
2004 * If this buffer is not dirty, we're done.
2005 */
2006 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
2007 if (dr->dr_txg <= txg)
2008 break;
2009 if (dr == NULL || dr->dr_txg < txg)
2010 return (B_FALSE);
2011 ASSERT(dr->dr_txg == txg);
2012 ASSERT(dr->dr_dbuf == db);
2013
2014 DB_DNODE_ENTER(db);
2015 dn = DB_DNODE(db);
2016
2017 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2018
2019 ASSERT(db->db.db_size != 0);
2020
2021 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2022 dr->dr_accounted, txg);
2023
2024 *drp = dr->dr_next;
2025
2026 /*
2027 * Note that there are three places in dbuf_dirty()
2028 * where this dirty record may be put on a list.
2029 * Make sure to do a list_remove corresponding to
2030 * every one of those list_insert calls.
2031 */
2032 if (dr->dr_parent) {
2033 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2034 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2035 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2036 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2037 db->db_level + 1 == dn->dn_nlevels) {
2038 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2039 mutex_enter(&dn->dn_mtx);
2040 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2041 mutex_exit(&dn->dn_mtx);
2042 }
2043 DB_DNODE_EXIT(db);
2044
2045 if (db->db_state != DB_NOFILL) {
2046 dbuf_unoverride(dr);
2047
2048 ASSERT(db->db_buf != NULL);
2049 ASSERT(dr->dt.dl.dr_data != NULL);
2050 if (dr->dt.dl.dr_data != db->db_buf)
2051 arc_buf_destroy(dr->dt.dl.dr_data, db);
2052 }
2053
2054 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2055
2056 ASSERT(db->db_dirtycnt > 0);
2057 db->db_dirtycnt -= 1;
2058
2059 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2060 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2061 dbuf_destroy(db);
2062 return (B_TRUE);
2063 }
2064
2065 return (B_FALSE);
2066 }
2067
2068 static void
dmu_buf_will_dirty_impl(dmu_buf_t * db_fake,int flags,dmu_tx_t * tx)2069 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2070 {
2071 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2072
2073 ASSERT(tx->tx_txg != 0);
2074 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2075
2076 /*
2077 * Quick check for dirtyness. For already dirty blocks, this
2078 * reduces runtime of this function by >90%, and overall performance
2079 * by 50% for some workloads (e.g. file deletion with indirect blocks
2080 * cached).
2081 */
2082 mutex_enter(&db->db_mtx);
2083 dbuf_dirty_record_t *dr;
2084 for (dr = db->db_last_dirty;
2085 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
2086 /*
2087 * It's possible that it is already dirty but not cached,
2088 * because there are some calls to dbuf_dirty() that don't
2089 * go through dmu_buf_will_dirty().
2090 */
2091 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
2092 /* This dbuf is already dirty and cached. */
2093 dbuf_redirty(dr);
2094 mutex_exit(&db->db_mtx);
2095 return;
2096 }
2097 }
2098 mutex_exit(&db->db_mtx);
2099
2100 DB_DNODE_ENTER(db);
2101 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2102 flags |= DB_RF_HAVESTRUCT;
2103 DB_DNODE_EXIT(db);
2104 (void) dbuf_read(db, NULL, flags);
2105 (void) dbuf_dirty(db, tx);
2106 }
2107
2108 void
dmu_buf_will_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2109 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2110 {
2111 dmu_buf_will_dirty_impl(db_fake,
2112 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2113 }
2114
2115 void
dmu_buf_will_not_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2116 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2117 {
2118 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2119
2120 db->db_state = DB_NOFILL;
2121
2122 dmu_buf_will_fill(db_fake, tx);
2123 }
2124
2125 void
dmu_buf_will_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2126 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2127 {
2128 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2129
2130 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2131 ASSERT(tx->tx_txg != 0);
2132 ASSERT(db->db_level == 0);
2133 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2134
2135 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2136 dmu_tx_private_ok(tx));
2137
2138 dbuf_noread(db);
2139 (void) dbuf_dirty(db, tx);
2140 }
2141
2142 /*
2143 * This function is effectively the same as dmu_buf_will_dirty(), but
2144 * indicates the caller expects raw encrypted data in the db, and provides
2145 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2146 * blkptr_t when this dbuf is written. This is only used for blocks of
2147 * dnodes during a raw receive.
2148 */
2149 void
dmu_buf_set_crypt_params(dmu_buf_t * db_fake,boolean_t byteorder,const uint8_t * salt,const uint8_t * iv,const uint8_t * mac,dmu_tx_t * tx)2150 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2151 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2152 {
2153 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2154 dbuf_dirty_record_t *dr;
2155
2156 /*
2157 * dr_has_raw_params is only processed for blocks of dnodes
2158 * (see dbuf_sync_dnode_leaf_crypt()).
2159 */
2160 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2161 ASSERT3U(db->db_level, ==, 0);
2162
2163 dmu_buf_will_dirty_impl(db_fake,
2164 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2165
2166 dr = db->db_last_dirty;
2167 while (dr != NULL && dr->dr_txg > tx->tx_txg)
2168 dr = dr->dr_next;
2169
2170 ASSERT3P(dr, !=, NULL);
2171 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2172
2173 dr->dt.dl.dr_has_raw_params = B_TRUE;
2174 dr->dt.dl.dr_byteorder = byteorder;
2175 bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2176 bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2177 bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
2178 }
2179
2180 #pragma weak dmu_buf_fill_done = dbuf_fill_done
2181 /* ARGSUSED */
2182 void
dbuf_fill_done(dmu_buf_impl_t * db,dmu_tx_t * tx)2183 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
2184 {
2185 mutex_enter(&db->db_mtx);
2186 DBUF_VERIFY(db);
2187
2188 if (db->db_state == DB_FILL) {
2189 if (db->db_level == 0 && db->db_freed_in_flight) {
2190 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2191 /* we were freed while filling */
2192 /* XXX dbuf_undirty? */
2193 bzero(db->db.db_data, db->db.db_size);
2194 db->db_freed_in_flight = FALSE;
2195 }
2196 db->db_state = DB_CACHED;
2197 cv_broadcast(&db->db_changed);
2198 }
2199 mutex_exit(&db->db_mtx);
2200 }
2201
2202 void
dmu_buf_write_embedded(dmu_buf_t * dbuf,void * data,bp_embedded_type_t etype,enum zio_compress comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)2203 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2204 bp_embedded_type_t etype, enum zio_compress comp,
2205 int uncompressed_size, int compressed_size, int byteorder,
2206 dmu_tx_t *tx)
2207 {
2208 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2209 struct dirty_leaf *dl;
2210 dmu_object_type_t type;
2211
2212 if (etype == BP_EMBEDDED_TYPE_DATA) {
2213 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2214 SPA_FEATURE_EMBEDDED_DATA));
2215 }
2216
2217 DB_DNODE_ENTER(db);
2218 type = DB_DNODE(db)->dn_type;
2219 DB_DNODE_EXIT(db);
2220
2221 ASSERT0(db->db_level);
2222 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2223
2224 dmu_buf_will_not_fill(dbuf, tx);
2225
2226 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
2227 dl = &db->db_last_dirty->dt.dl;
2228 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2229 data, comp, uncompressed_size, compressed_size);
2230 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2231 BP_SET_TYPE(&dl->dr_overridden_by, type);
2232 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2233 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2234
2235 dl->dr_override_state = DR_OVERRIDDEN;
2236 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
2237 }
2238
2239 /*
2240 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2241 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2242 */
2243 void
dbuf_assign_arcbuf(dmu_buf_impl_t * db,arc_buf_t * buf,dmu_tx_t * tx)2244 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2245 {
2246 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2247 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2248 ASSERT(db->db_level == 0);
2249 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2250 ASSERT(buf != NULL);
2251 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2252 ASSERT(tx->tx_txg != 0);
2253
2254 arc_return_buf(buf, db);
2255 ASSERT(arc_released(buf));
2256
2257 mutex_enter(&db->db_mtx);
2258
2259 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2260 cv_wait(&db->db_changed, &db->db_mtx);
2261
2262 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2263
2264 if (db->db_state == DB_CACHED &&
2265 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2266 /*
2267 * In practice, we will never have a case where we have an
2268 * encrypted arc buffer while additional holds exist on the
2269 * dbuf. We don't handle this here so we simply assert that
2270 * fact instead.
2271 */
2272 ASSERT(!arc_is_encrypted(buf));
2273 mutex_exit(&db->db_mtx);
2274 (void) dbuf_dirty(db, tx);
2275 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
2276 arc_buf_destroy(buf, db);
2277 xuio_stat_wbuf_copied();
2278 return;
2279 }
2280
2281 xuio_stat_wbuf_nocopy();
2282 if (db->db_state == DB_CACHED) {
2283 dbuf_dirty_record_t *dr = db->db_last_dirty;
2284
2285 ASSERT(db->db_buf != NULL);
2286 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2287 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2288
2289 if (!arc_released(db->db_buf)) {
2290 ASSERT(dr->dt.dl.dr_override_state ==
2291 DR_OVERRIDDEN);
2292 arc_release(db->db_buf, db);
2293 }
2294 dr->dt.dl.dr_data = buf;
2295 arc_buf_destroy(db->db_buf, db);
2296 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2297 arc_release(db->db_buf, db);
2298 arc_buf_destroy(db->db_buf, db);
2299 }
2300 db->db_buf = NULL;
2301 }
2302 ASSERT(db->db_buf == NULL);
2303 dbuf_set_data(db, buf);
2304 db->db_state = DB_FILL;
2305 mutex_exit(&db->db_mtx);
2306 (void) dbuf_dirty(db, tx);
2307 dmu_buf_fill_done(&db->db, tx);
2308 }
2309
2310 void
dbuf_destroy(dmu_buf_impl_t * db)2311 dbuf_destroy(dmu_buf_impl_t *db)
2312 {
2313 dnode_t *dn;
2314 dmu_buf_impl_t *parent = db->db_parent;
2315 dmu_buf_impl_t *dndb;
2316
2317 ASSERT(MUTEX_HELD(&db->db_mtx));
2318 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2319
2320 if (db->db_buf != NULL) {
2321 arc_buf_destroy(db->db_buf, db);
2322 db->db_buf = NULL;
2323 }
2324
2325 if (db->db_blkid == DMU_BONUS_BLKID) {
2326 int slots = DB_DNODE(db)->dn_num_slots;
2327 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2328 if (db->db.db_data != NULL) {
2329 zio_buf_free(db->db.db_data, bonuslen);
2330 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2331 db->db_state = DB_UNCACHED;
2332 }
2333 }
2334
2335 dbuf_clear_data(db);
2336
2337 if (multilist_link_active(&db->db_cache_link)) {
2338 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2339 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2340
2341 multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
2342 (void) zfs_refcount_remove_many(
2343 &dbuf_caches[db->db_caching_status].size,
2344 db->db.db_size, db);
2345
2346 db->db_caching_status = DB_NO_CACHE;
2347 }
2348
2349 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2350 ASSERT(db->db_data_pending == NULL);
2351
2352 db->db_state = DB_EVICTING;
2353 db->db_blkptr = NULL;
2354
2355 /*
2356 * Now that db_state is DB_EVICTING, nobody else can find this via
2357 * the hash table. We can now drop db_mtx, which allows us to
2358 * acquire the dn_dbufs_mtx.
2359 */
2360 mutex_exit(&db->db_mtx);
2361
2362 DB_DNODE_ENTER(db);
2363 dn = DB_DNODE(db);
2364 dndb = dn->dn_dbuf;
2365 if (db->db_blkid != DMU_BONUS_BLKID) {
2366 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2367 if (needlock)
2368 mutex_enter(&dn->dn_dbufs_mtx);
2369 avl_remove(&dn->dn_dbufs, db);
2370 atomic_dec_32(&dn->dn_dbufs_count);
2371 membar_producer();
2372 DB_DNODE_EXIT(db);
2373 if (needlock)
2374 mutex_exit(&dn->dn_dbufs_mtx);
2375 /*
2376 * Decrementing the dbuf count means that the hold corresponding
2377 * to the removed dbuf is no longer discounted in dnode_move(),
2378 * so the dnode cannot be moved until after we release the hold.
2379 * The membar_producer() ensures visibility of the decremented
2380 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2381 * release any lock.
2382 */
2383 mutex_enter(&dn->dn_mtx);
2384 dnode_rele_and_unlock(dn, db, B_TRUE);
2385 db->db_dnode_handle = NULL;
2386
2387 dbuf_hash_remove(db);
2388 } else {
2389 DB_DNODE_EXIT(db);
2390 }
2391
2392 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2393
2394 db->db_parent = NULL;
2395
2396 ASSERT(db->db_buf == NULL);
2397 ASSERT(db->db.db_data == NULL);
2398 ASSERT(db->db_hash_next == NULL);
2399 ASSERT(db->db_blkptr == NULL);
2400 ASSERT(db->db_data_pending == NULL);
2401 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2402 ASSERT(!multilist_link_active(&db->db_cache_link));
2403
2404 kmem_cache_free(dbuf_kmem_cache, db);
2405 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2406
2407 /*
2408 * If this dbuf is referenced from an indirect dbuf,
2409 * decrement the ref count on the indirect dbuf.
2410 */
2411 if (parent && parent != dndb) {
2412 mutex_enter(&parent->db_mtx);
2413 dbuf_rele_and_unlock(parent, db, B_TRUE);
2414 }
2415 }
2416
2417 /*
2418 * Note: While bpp will always be updated if the function returns success,
2419 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2420 * this happens when the dnode is the meta-dnode, or {user|group|project}used
2421 * object.
2422 */
2423 static int
dbuf_findbp(dnode_t * dn,int level,uint64_t blkid,int fail_sparse,dmu_buf_impl_t ** parentp,blkptr_t ** bpp)2424 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2425 dmu_buf_impl_t **parentp, blkptr_t **bpp)
2426 {
2427 *parentp = NULL;
2428 *bpp = NULL;
2429
2430 ASSERT(blkid != DMU_BONUS_BLKID);
2431
2432 if (blkid == DMU_SPILL_BLKID) {
2433 mutex_enter(&dn->dn_mtx);
2434 if (dn->dn_have_spill &&
2435 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2436 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
2437 else
2438 *bpp = NULL;
2439 dbuf_add_ref(dn->dn_dbuf, NULL);
2440 *parentp = dn->dn_dbuf;
2441 mutex_exit(&dn->dn_mtx);
2442 return (0);
2443 }
2444
2445 int nlevels =
2446 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
2447 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2448
2449 ASSERT3U(level * epbs, <, 64);
2450 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2451 /*
2452 * This assertion shouldn't trip as long as the max indirect block size
2453 * is less than 1M. The reason for this is that up to that point,
2454 * the number of levels required to address an entire object with blocks
2455 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
2456 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2457 * (i.e. we can address the entire object), objects will all use at most
2458 * N-1 levels and the assertion won't overflow. However, once epbs is
2459 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
2460 * enough to address an entire object, so objects will have 5 levels,
2461 * but then this assertion will overflow.
2462 *
2463 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2464 * need to redo this logic to handle overflows.
2465 */
2466 ASSERT(level >= nlevels ||
2467 ((nlevels - level - 1) * epbs) +
2468 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
2469 if (level >= nlevels ||
2470 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2471 ((nlevels - level - 1) * epbs)) ||
2472 (fail_sparse &&
2473 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
2474 /* the buffer has no parent yet */
2475 return (SET_ERROR(ENOENT));
2476 } else if (level < nlevels-1) {
2477 /* this block is referenced from an indirect block */
2478 int err = dbuf_hold_impl(dn, level+1,
2479 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2480 if (err)
2481 return (err);
2482 err = dbuf_read(*parentp, NULL,
2483 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2484 if (err) {
2485 dbuf_rele(*parentp, NULL);
2486 *parentp = NULL;
2487 return (err);
2488 }
2489 rw_enter(&(*parentp)->db_rwlock, RW_READER);
2490 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2491 (blkid & ((1ULL << epbs) - 1));
2492 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2493 ASSERT(BP_IS_HOLE(*bpp));
2494 rw_exit(&(*parentp)->db_rwlock);
2495 return (0);
2496 } else {
2497 /* the block is referenced from the dnode */
2498 ASSERT3U(level, ==, nlevels-1);
2499 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2500 blkid < dn->dn_phys->dn_nblkptr);
2501 if (dn->dn_dbuf) {
2502 dbuf_add_ref(dn->dn_dbuf, NULL);
2503 *parentp = dn->dn_dbuf;
2504 }
2505 *bpp = &dn->dn_phys->dn_blkptr[blkid];
2506 return (0);
2507 }
2508 }
2509
2510 static dmu_buf_impl_t *
dbuf_create(dnode_t * dn,uint8_t level,uint64_t blkid,dmu_buf_impl_t * parent,blkptr_t * blkptr)2511 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2512 dmu_buf_impl_t *parent, blkptr_t *blkptr)
2513 {
2514 objset_t *os = dn->dn_objset;
2515 dmu_buf_impl_t *db, *odb;
2516
2517 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2518 ASSERT(dn->dn_type != DMU_OT_NONE);
2519
2520 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
2521
2522 db->db_objset = os;
2523 db->db.db_object = dn->dn_object;
2524 db->db_level = level;
2525 db->db_blkid = blkid;
2526 db->db_last_dirty = NULL;
2527 db->db_dirtycnt = 0;
2528 db->db_dnode_handle = dn->dn_handle;
2529 db->db_parent = parent;
2530 db->db_blkptr = blkptr;
2531
2532 db->db_user = NULL;
2533 db->db_user_immediate_evict = FALSE;
2534 db->db_freed_in_flight = FALSE;
2535 db->db_pending_evict = FALSE;
2536
2537 if (blkid == DMU_BONUS_BLKID) {
2538 ASSERT3P(parent, ==, dn->dn_dbuf);
2539 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
2540 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
2541 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
2542 db->db.db_offset = DMU_BONUS_BLKID;
2543 db->db_state = DB_UNCACHED;
2544 db->db_caching_status = DB_NO_CACHE;
2545 /* the bonus dbuf is not placed in the hash table */
2546 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2547 return (db);
2548 } else if (blkid == DMU_SPILL_BLKID) {
2549 db->db.db_size = (blkptr != NULL) ?
2550 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
2551 db->db.db_offset = 0;
2552 } else {
2553 int blocksize =
2554 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
2555 db->db.db_size = blocksize;
2556 db->db.db_offset = db->db_blkid * blocksize;
2557 }
2558
2559 /*
2560 * Hold the dn_dbufs_mtx while we get the new dbuf
2561 * in the hash table *and* added to the dbufs list.
2562 * This prevents a possible deadlock with someone
2563 * trying to look up this dbuf before its added to the
2564 * dn_dbufs list.
2565 */
2566 mutex_enter(&dn->dn_dbufs_mtx);
2567 db->db_state = DB_EVICTING;
2568 if ((odb = dbuf_hash_insert(db)) != NULL) {
2569 /* someone else inserted it first */
2570 kmem_cache_free(dbuf_kmem_cache, db);
2571 mutex_exit(&dn->dn_dbufs_mtx);
2572 return (odb);
2573 }
2574 avl_add(&dn->dn_dbufs, db);
2575
2576 db->db_state = DB_UNCACHED;
2577 db->db_caching_status = DB_NO_CACHE;
2578 mutex_exit(&dn->dn_dbufs_mtx);
2579 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2580
2581 if (parent && parent != dn->dn_dbuf)
2582 dbuf_add_ref(parent, db);
2583
2584 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2585 zfs_refcount_count(&dn->dn_holds) > 0);
2586 (void) zfs_refcount_add(&dn->dn_holds, db);
2587 atomic_inc_32(&dn->dn_dbufs_count);
2588
2589 dprintf_dbuf(db, "db=%p\n", db);
2590
2591 return (db);
2592 }
2593
2594 typedef struct dbuf_prefetch_arg {
2595 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
2596 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2597 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2598 int dpa_curlevel; /* The current level that we're reading */
2599 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
2600 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2601 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2602 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2603 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
2604 void *dpa_arg; /* prefetch completion arg */
2605 } dbuf_prefetch_arg_t;
2606
2607 static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t * dpa,boolean_t io_done)2608 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
2609 {
2610 if (dpa->dpa_cb != NULL)
2611 dpa->dpa_cb(dpa->dpa_arg, io_done);
2612 kmem_free(dpa, sizeof (*dpa));
2613 }
2614
2615 static void
dbuf_issue_final_prefetch_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)2616 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
2617 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
2618 {
2619 dbuf_prefetch_arg_t *dpa = private;
2620
2621 dbuf_prefetch_fini(dpa, B_TRUE);
2622 if (abuf != NULL)
2623 arc_buf_destroy(abuf, private);
2624 }
2625
2626 /*
2627 * Actually issue the prefetch read for the block given.
2628 */
2629 static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t * dpa,blkptr_t * bp)2630 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2631 {
2632 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2633 return (dbuf_prefetch_fini(dpa, B_FALSE));
2634
2635 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
2636 arc_flags_t aflags =
2637 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
2638
2639 /* dnodes are always read as raw and then converted later */
2640 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
2641 dpa->dpa_curlevel == 0)
2642 zio_flags |= ZIO_FLAG_RAW;
2643
2644 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2645 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2646 ASSERT(dpa->dpa_zio != NULL);
2647 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
2648 dbuf_issue_final_prefetch_done, dpa,
2649 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
2650 }
2651
2652 /*
2653 * Called when an indirect block above our prefetch target is read in. This
2654 * will either read in the next indirect block down the tree or issue the actual
2655 * prefetch if the next block down is our target.
2656 */
2657 /* ARGSUSED */
2658 static void
dbuf_prefetch_indirect_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)2659 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
2660 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
2661 {
2662 dbuf_prefetch_arg_t *dpa = private;
2663
2664 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2665 ASSERT3S(dpa->dpa_curlevel, >, 0);
2666
2667 if (abuf == NULL) {
2668 ASSERT(zio == NULL || zio->io_error != 0);
2669 return (dbuf_prefetch_fini(dpa, B_TRUE));
2670 }
2671 ASSERT(zio == NULL || zio->io_error == 0);
2672
2673 /*
2674 * The dpa_dnode is only valid if we are called with a NULL
2675 * zio. This indicates that the arc_read() returned without
2676 * first calling zio_read() to issue a physical read. Once
2677 * a physical read is made the dpa_dnode must be invalidated
2678 * as the locks guarding it may have been dropped. If the
2679 * dpa_dnode is still valid, then we want to add it to the dbuf
2680 * cache. To do so, we must hold the dbuf associated with the block
2681 * we just prefetched, read its contents so that we associate it
2682 * with an arc_buf_t, and then release it.
2683 */
2684 if (zio != NULL) {
2685 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
2686 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
2687 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
2688 } else {
2689 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2690 }
2691 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
2692
2693 dpa->dpa_dnode = NULL;
2694 } else if (dpa->dpa_dnode != NULL) {
2695 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
2696 (dpa->dpa_epbs * (dpa->dpa_curlevel -
2697 dpa->dpa_zb.zb_level));
2698 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
2699 dpa->dpa_curlevel, curblkid, FTAG);
2700 if (db == NULL) {
2701 arc_buf_destroy(abuf, private);
2702 return (dbuf_prefetch_fini(dpa, B_TRUE));
2703 }
2704 (void) dbuf_read(db, NULL,
2705 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
2706 dbuf_rele(db, FTAG);
2707 }
2708
2709 dpa->dpa_curlevel--;
2710 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
2711 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
2712 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
2713 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
2714
2715 if (BP_IS_HOLE(bp)) {
2716 dbuf_prefetch_fini(dpa, B_TRUE);
2717 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2718 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2719 dbuf_issue_final_prefetch(dpa, bp);
2720 } else {
2721 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2722 zbookmark_phys_t zb;
2723
2724 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
2725 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
2726 iter_aflags |= ARC_FLAG_L2CACHE;
2727
2728 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2729
2730 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2731 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2732
2733 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2734 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2735 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2736 &iter_aflags, &zb);
2737 }
2738
2739 arc_buf_destroy(abuf, private);
2740 }
2741
2742 /*
2743 * Issue prefetch reads for the given block on the given level. If the indirect
2744 * blocks above that block are not in memory, we will read them in
2745 * asynchronously. As a result, this call never blocks waiting for a read to
2746 * complete. Note that the prefetch might fail if the dataset is encrypted and
2747 * the encryption key is unmapped before the IO completes.
2748 */
2749 int
dbuf_prefetch_impl(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags,dbuf_prefetch_fn cb,void * arg)2750 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
2751 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
2752 void *arg)
2753 {
2754 blkptr_t bp;
2755 int epbs, nlevels, curlevel;
2756 uint64_t curblkid;
2757
2758 ASSERT(blkid != DMU_BONUS_BLKID);
2759 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2760
2761 if (blkid > dn->dn_maxblkid)
2762 goto no_issue;
2763
2764 if (level == 0 && dnode_block_freed(dn, blkid))
2765 goto no_issue;
2766
2767 /*
2768 * This dnode hasn't been written to disk yet, so there's nothing to
2769 * prefetch.
2770 */
2771 nlevels = dn->dn_phys->dn_nlevels;
2772 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2773 goto no_issue;
2774
2775 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2776 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2777 goto no_issue;
2778
2779 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
2780 level, blkid);
2781 if (db != NULL) {
2782 mutex_exit(&db->db_mtx);
2783 /*
2784 * This dbuf already exists. It is either CACHED, or
2785 * (we assume) about to be read or filled.
2786 */
2787 goto no_issue;
2788 }
2789
2790 /*
2791 * Find the closest ancestor (indirect block) of the target block
2792 * that is present in the cache. In this indirect block, we will
2793 * find the bp that is at curlevel, curblkid.
2794 */
2795 curlevel = level;
2796 curblkid = blkid;
2797 while (curlevel < nlevels - 1) {
2798 int parent_level = curlevel + 1;
2799 uint64_t parent_blkid = curblkid >> epbs;
2800 dmu_buf_impl_t *db;
2801
2802 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
2803 FALSE, TRUE, FTAG, &db) == 0) {
2804 blkptr_t *bpp = db->db_buf->b_data;
2805 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
2806 dbuf_rele(db, FTAG);
2807 break;
2808 }
2809
2810 curlevel = parent_level;
2811 curblkid = parent_blkid;
2812 }
2813
2814 if (curlevel == nlevels - 1) {
2815 /* No cached indirect blocks found. */
2816 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
2817 bp = dn->dn_phys->dn_blkptr[curblkid];
2818 }
2819 if (BP_IS_HOLE(&bp))
2820 goto no_issue;
2821
2822 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
2823
2824 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
2825 ZIO_FLAG_CANFAIL);
2826
2827 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
2828 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2829 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2830 dn->dn_object, level, blkid);
2831 dpa->dpa_curlevel = curlevel;
2832 dpa->dpa_prio = prio;
2833 dpa->dpa_aflags = aflags;
2834 dpa->dpa_spa = dn->dn_objset->os_spa;
2835 dpa->dpa_dnode = dn;
2836 dpa->dpa_epbs = epbs;
2837 dpa->dpa_zio = pio;
2838 dpa->dpa_cb = cb;
2839 dpa->dpa_arg = arg;
2840
2841 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
2842 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
2843 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
2844
2845 /*
2846 * If we have the indirect just above us, no need to do the asynchronous
2847 * prefetch chain; we'll just run the last step ourselves. If we're at
2848 * a higher level, though, we want to issue the prefetches for all the
2849 * indirect blocks asynchronously, so we can go on with whatever we were
2850 * doing.
2851 */
2852 if (curlevel == level) {
2853 ASSERT3U(curblkid, ==, blkid);
2854 dbuf_issue_final_prefetch(dpa, &bp);
2855 } else {
2856 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2857 zbookmark_phys_t zb;
2858
2859 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
2860 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
2861 iter_aflags |= ARC_FLAG_L2CACHE;
2862
2863 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2864 dn->dn_object, curlevel, curblkid);
2865 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2866 &bp, dbuf_prefetch_indirect_done, dpa, prio,
2867 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2868 &iter_aflags, &zb);
2869 }
2870 /*
2871 * We use pio here instead of dpa_zio since it's possible that
2872 * dpa may have already been freed.
2873 */
2874 zio_nowait(pio);
2875 return (1);
2876 no_issue:
2877 if (cb != NULL)
2878 cb(arg, B_FALSE);
2879 return (0);
2880 }
2881
2882 int
dbuf_prefetch(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags)2883 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2884 arc_flags_t aflags)
2885 {
2886
2887 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
2888 }
2889
2890 /*
2891 * Helper function for __dbuf_hold_impl() to copy a buffer. Handles
2892 * the case of encrypted, compressed and uncompressed buffers by
2893 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
2894 * arc_alloc_compressed_buf() or arc_alloc_buf().*
2895 *
2896 * NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl().
2897 */
2898 static void
dbuf_hold_copy(dnode_t * dn,dmu_buf_impl_t * db,dbuf_dirty_record_t * dr)2899 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db, dbuf_dirty_record_t *dr)
2900 {
2901 arc_buf_t *data = dr->dt.dl.dr_data;
2902 enum zio_compress compress_type = arc_get_compression(data);
2903
2904 if (arc_is_encrypted(data)) {
2905 boolean_t byteorder;
2906 uint8_t salt[ZIO_DATA_SALT_LEN];
2907 uint8_t iv[ZIO_DATA_IV_LEN];
2908 uint8_t mac[ZIO_DATA_MAC_LEN];
2909
2910 arc_get_raw_params(data, &byteorder, salt, iv, mac);
2911 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
2912 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
2913 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
2914 compress_type));
2915 } else if (compress_type != ZIO_COMPRESS_OFF) {
2916 dbuf_set_data(db, arc_alloc_compressed_buf(
2917 dn->dn_objset->os_spa, db, arc_buf_size(data),
2918 arc_buf_lsize(data), compress_type));
2919 } else {
2920 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
2921 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
2922 }
2923
2924 rw_enter(&db->db_rwlock, RW_WRITER);
2925 bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
2926 rw_exit(&db->db_rwlock);
2927 }
2928
2929 /*
2930 * Returns with db_holds incremented, and db_mtx not held.
2931 * Note: dn_struct_rwlock must be held.
2932 */
2933 int
dbuf_hold_impl(dnode_t * dn,uint8_t level,uint64_t blkid,boolean_t fail_sparse,boolean_t fail_uncached,void * tag,dmu_buf_impl_t ** dbp)2934 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
2935 boolean_t fail_sparse, boolean_t fail_uncached,
2936 void *tag, dmu_buf_impl_t **dbp)
2937 {
2938 dmu_buf_impl_t *db, *parent = NULL;
2939
2940 ASSERT(blkid != DMU_BONUS_BLKID);
2941 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2942 ASSERT3U(dn->dn_nlevels, >, level);
2943
2944 *dbp = NULL;
2945 top:
2946 /* dbuf_find() returns with db_mtx held */
2947 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
2948
2949 if (db == NULL) {
2950 blkptr_t *bp = NULL;
2951 int err;
2952
2953 if (fail_uncached)
2954 return (SET_ERROR(ENOENT));
2955
2956 ASSERT3P(parent, ==, NULL);
2957 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
2958 if (fail_sparse) {
2959 if (err == 0 && bp && BP_IS_HOLE(bp))
2960 err = SET_ERROR(ENOENT);
2961 if (err) {
2962 if (parent)
2963 dbuf_rele(parent, NULL);
2964 return (err);
2965 }
2966 }
2967 if (err && err != ENOENT)
2968 return (err);
2969 db = dbuf_create(dn, level, blkid, parent, bp);
2970 }
2971
2972 if (fail_uncached && db->db_state != DB_CACHED) {
2973 mutex_exit(&db->db_mtx);
2974 return (SET_ERROR(ENOENT));
2975 }
2976
2977 if (db->db_buf != NULL) {
2978 arc_buf_access(db->db_buf);
2979 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
2980 }
2981
2982 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
2983
2984 /*
2985 * If this buffer is currently syncing out, and we are are
2986 * still referencing it from db_data, we need to make a copy
2987 * of it in case we decide we want to dirty it again in this txg.
2988 */
2989 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2990 dn->dn_object != DMU_META_DNODE_OBJECT &&
2991 db->db_state == DB_CACHED && db->db_data_pending) {
2992 dbuf_dirty_record_t *dr = db->db_data_pending;
2993 if (dr->dt.dl.dr_data == db->db_buf)
2994 dbuf_hold_copy(dn, db, dr);
2995 }
2996
2997 if (multilist_link_active(&db->db_cache_link)) {
2998 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2999 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3000 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3001
3002 multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
3003 (void) zfs_refcount_remove_many(
3004 &dbuf_caches[db->db_caching_status].size,
3005 db->db.db_size, db);
3006
3007 db->db_caching_status = DB_NO_CACHE;
3008 }
3009 (void) zfs_refcount_add(&db->db_holds, tag);
3010 DBUF_VERIFY(db);
3011 mutex_exit(&db->db_mtx);
3012
3013 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3014 if (parent)
3015 dbuf_rele(parent, NULL);
3016
3017 ASSERT3P(DB_DNODE(db), ==, dn);
3018 ASSERT3U(db->db_blkid, ==, blkid);
3019 ASSERT3U(db->db_level, ==, level);
3020 *dbp = db;
3021
3022 return (0);
3023 }
3024
3025 dmu_buf_impl_t *
dbuf_hold(dnode_t * dn,uint64_t blkid,void * tag)3026 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3027 {
3028 return (dbuf_hold_level(dn, 0, blkid, tag));
3029 }
3030
3031 dmu_buf_impl_t *
dbuf_hold_level(dnode_t * dn,int level,uint64_t blkid,void * tag)3032 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3033 {
3034 dmu_buf_impl_t *db;
3035 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3036 return (err ? NULL : db);
3037 }
3038
3039 void
dbuf_create_bonus(dnode_t * dn)3040 dbuf_create_bonus(dnode_t *dn)
3041 {
3042 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3043
3044 ASSERT(dn->dn_bonus == NULL);
3045 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3046 }
3047
3048 int
dbuf_spill_set_blksz(dmu_buf_t * db_fake,uint64_t blksz,dmu_tx_t * tx)3049 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3050 {
3051 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3052
3053 if (db->db_blkid != DMU_SPILL_BLKID)
3054 return (SET_ERROR(ENOTSUP));
3055 if (blksz == 0)
3056 blksz = SPA_MINBLOCKSIZE;
3057 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3058 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3059
3060 dbuf_new_size(db, blksz, tx);
3061
3062 return (0);
3063 }
3064
3065 void
dbuf_rm_spill(dnode_t * dn,dmu_tx_t * tx)3066 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3067 {
3068 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3069 }
3070
3071 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3072 void
dbuf_add_ref(dmu_buf_impl_t * db,void * tag)3073 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3074 {
3075 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3076 ASSERT3S(holds, >, 1);
3077 }
3078
3079 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3080 boolean_t
dbuf_try_add_ref(dmu_buf_t * db_fake,objset_t * os,uint64_t obj,uint64_t blkid,void * tag)3081 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3082 void *tag)
3083 {
3084 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3085 dmu_buf_impl_t *found_db;
3086 boolean_t result = B_FALSE;
3087
3088 if (blkid == DMU_BONUS_BLKID)
3089 found_db = dbuf_find_bonus(os, obj);
3090 else
3091 found_db = dbuf_find(os, obj, 0, blkid);
3092
3093 if (found_db != NULL) {
3094 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3095 (void) zfs_refcount_add(&db->db_holds, tag);
3096 result = B_TRUE;
3097 }
3098 mutex_exit(&found_db->db_mtx);
3099 }
3100 return (result);
3101 }
3102
3103 /*
3104 * If you call dbuf_rele() you had better not be referencing the dnode handle
3105 * unless you have some other direct or indirect hold on the dnode. (An indirect
3106 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3107 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3108 * dnode's parent dbuf evicting its dnode handles.
3109 */
3110 void
dbuf_rele(dmu_buf_impl_t * db,void * tag)3111 dbuf_rele(dmu_buf_impl_t *db, void *tag)
3112 {
3113 mutex_enter(&db->db_mtx);
3114 dbuf_rele_and_unlock(db, tag, B_FALSE);
3115 }
3116
3117 void
dmu_buf_rele(dmu_buf_t * db,void * tag)3118 dmu_buf_rele(dmu_buf_t *db, void *tag)
3119 {
3120 dbuf_rele((dmu_buf_impl_t *)db, tag);
3121 }
3122
3123 /*
3124 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3125 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3126 * argument should be set if we are already in the dbuf-evicting code
3127 * path, in which case we don't want to recursively evict. This allows us to
3128 * avoid deeply nested stacks that would have a call flow similar to this:
3129 *
3130 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3131 * ^ |
3132 * | |
3133 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3134 *
3135 */
3136 void
dbuf_rele_and_unlock(dmu_buf_impl_t * db,void * tag,boolean_t evicting)3137 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
3138 {
3139 int64_t holds;
3140
3141 ASSERT(MUTEX_HELD(&db->db_mtx));
3142 DBUF_VERIFY(db);
3143
3144 /*
3145 * Remove the reference to the dbuf before removing its hold on the
3146 * dnode so we can guarantee in dnode_move() that a referenced bonus
3147 * buffer has a corresponding dnode hold.
3148 */
3149 holds = zfs_refcount_remove(&db->db_holds, tag);
3150 ASSERT(holds >= 0);
3151
3152 /*
3153 * We can't freeze indirects if there is a possibility that they
3154 * may be modified in the current syncing context.
3155 */
3156 if (db->db_buf != NULL &&
3157 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3158 arc_buf_freeze(db->db_buf);
3159 }
3160
3161 if (holds == db->db_dirtycnt &&
3162 db->db_level == 0 && db->db_user_immediate_evict)
3163 dbuf_evict_user(db);
3164
3165 if (holds == 0) {
3166 if (db->db_blkid == DMU_BONUS_BLKID) {
3167 dnode_t *dn;
3168 boolean_t evict_dbuf = db->db_pending_evict;
3169
3170 /*
3171 * If the dnode moves here, we cannot cross this
3172 * barrier until the move completes.
3173 */
3174 DB_DNODE_ENTER(db);
3175
3176 dn = DB_DNODE(db);
3177 atomic_dec_32(&dn->dn_dbufs_count);
3178
3179 /*
3180 * Decrementing the dbuf count means that the bonus
3181 * buffer's dnode hold is no longer discounted in
3182 * dnode_move(). The dnode cannot move until after
3183 * the dnode_rele() below.
3184 */
3185 DB_DNODE_EXIT(db);
3186
3187 /*
3188 * Do not reference db after its lock is dropped.
3189 * Another thread may evict it.
3190 */
3191 mutex_exit(&db->db_mtx);
3192
3193 if (evict_dbuf)
3194 dnode_evict_bonus(dn);
3195
3196 dnode_rele(dn, db);
3197 } else if (db->db_buf == NULL) {
3198 /*
3199 * This is a special case: we never associated this
3200 * dbuf with any data allocated from the ARC.
3201 */
3202 ASSERT(db->db_state == DB_UNCACHED ||
3203 db->db_state == DB_NOFILL);
3204 dbuf_destroy(db);
3205 } else if (arc_released(db->db_buf)) {
3206 /*
3207 * This dbuf has anonymous data associated with it.
3208 */
3209 dbuf_destroy(db);
3210 } else {
3211 boolean_t do_arc_evict = B_FALSE;
3212 blkptr_t bp;
3213 spa_t *spa = dmu_objset_spa(db->db_objset);
3214
3215 if (!DBUF_IS_CACHEABLE(db) &&
3216 db->db_blkptr != NULL &&
3217 !BP_IS_HOLE(db->db_blkptr) &&
3218 !BP_IS_EMBEDDED(db->db_blkptr)) {
3219 do_arc_evict = B_TRUE;
3220 bp = *db->db_blkptr;
3221 }
3222
3223 if (!DBUF_IS_CACHEABLE(db) ||
3224 db->db_pending_evict) {
3225 dbuf_destroy(db);
3226 } else if (!multilist_link_active(&db->db_cache_link)) {
3227 ASSERT3U(db->db_caching_status, ==,
3228 DB_NO_CACHE);
3229
3230 dbuf_cached_state_t dcs =
3231 dbuf_include_in_metadata_cache(db) ?
3232 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3233 db->db_caching_status = dcs;
3234
3235 multilist_insert(dbuf_caches[dcs].cache, db);
3236 (void) zfs_refcount_add_many(
3237 &dbuf_caches[dcs].size, db->db.db_size, db);
3238 mutex_exit(&db->db_mtx);
3239
3240 if (db->db_caching_status == DB_DBUF_CACHE &&
3241 !evicting) {
3242 dbuf_evict_notify();
3243 }
3244 }
3245
3246 if (do_arc_evict)
3247 arc_freed(spa, &bp);
3248 }
3249 } else {
3250 mutex_exit(&db->db_mtx);
3251 }
3252
3253 }
3254
3255 #pragma weak dmu_buf_refcount = dbuf_refcount
3256 uint64_t
dbuf_refcount(dmu_buf_impl_t * db)3257 dbuf_refcount(dmu_buf_impl_t *db)
3258 {
3259 return (zfs_refcount_count(&db->db_holds));
3260 }
3261
3262 uint64_t
dmu_buf_user_refcount(dmu_buf_t * db_fake)3263 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3264 {
3265 uint64_t holds;
3266 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3267
3268 mutex_enter(&db->db_mtx);
3269 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3270 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3271 mutex_exit(&db->db_mtx);
3272
3273 return (holds);
3274 }
3275
3276 void *
dmu_buf_replace_user(dmu_buf_t * db_fake,dmu_buf_user_t * old_user,dmu_buf_user_t * new_user)3277 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3278 dmu_buf_user_t *new_user)
3279 {
3280 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3281
3282 mutex_enter(&db->db_mtx);
3283 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3284 if (db->db_user == old_user)
3285 db->db_user = new_user;
3286 else
3287 old_user = db->db_user;
3288 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3289 mutex_exit(&db->db_mtx);
3290
3291 return (old_user);
3292 }
3293
3294 void *
dmu_buf_set_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)3295 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3296 {
3297 return (dmu_buf_replace_user(db_fake, NULL, user));
3298 }
3299
3300 void *
dmu_buf_set_user_ie(dmu_buf_t * db_fake,dmu_buf_user_t * user)3301 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3302 {
3303 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3304
3305 db->db_user_immediate_evict = TRUE;
3306 return (dmu_buf_set_user(db_fake, user));
3307 }
3308
3309 void *
dmu_buf_remove_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)3310 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3311 {
3312 return (dmu_buf_replace_user(db_fake, user, NULL));
3313 }
3314
3315 void *
dmu_buf_get_user(dmu_buf_t * db_fake)3316 dmu_buf_get_user(dmu_buf_t *db_fake)
3317 {
3318 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3319
3320 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3321 return (db->db_user);
3322 }
3323
3324 void
dmu_buf_user_evict_wait()3325 dmu_buf_user_evict_wait()
3326 {
3327 taskq_wait(dbu_evict_taskq);
3328 }
3329
3330 blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t * db)3331 dmu_buf_get_blkptr(dmu_buf_t *db)
3332 {
3333 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3334 return (dbi->db_blkptr);
3335 }
3336
3337 objset_t *
dmu_buf_get_objset(dmu_buf_t * db)3338 dmu_buf_get_objset(dmu_buf_t *db)
3339 {
3340 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3341 return (dbi->db_objset);
3342 }
3343
3344 dnode_t *
dmu_buf_dnode_enter(dmu_buf_t * db)3345 dmu_buf_dnode_enter(dmu_buf_t *db)
3346 {
3347 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3348 DB_DNODE_ENTER(dbi);
3349 return (DB_DNODE(dbi));
3350 }
3351
3352 void
dmu_buf_dnode_exit(dmu_buf_t * db)3353 dmu_buf_dnode_exit(dmu_buf_t *db)
3354 {
3355 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3356 DB_DNODE_EXIT(dbi);
3357 }
3358
3359 static void
dbuf_check_blkptr(dnode_t * dn,dmu_buf_impl_t * db)3360 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3361 {
3362 /* ASSERT(dmu_tx_is_syncing(tx) */
3363 ASSERT(MUTEX_HELD(&db->db_mtx));
3364
3365 if (db->db_blkptr != NULL)
3366 return;
3367
3368 if (db->db_blkid == DMU_SPILL_BLKID) {
3369 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
3370 BP_ZERO(db->db_blkptr);
3371 return;
3372 }
3373 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3374 /*
3375 * This buffer was allocated at a time when there was
3376 * no available blkptrs from the dnode, or it was
3377 * inappropriate to hook it in (i.e., nlevels mis-match).
3378 */
3379 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3380 ASSERT(db->db_parent == NULL);
3381 db->db_parent = dn->dn_dbuf;
3382 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3383 DBUF_VERIFY(db);
3384 } else {
3385 dmu_buf_impl_t *parent = db->db_parent;
3386 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3387
3388 ASSERT(dn->dn_phys->dn_nlevels > 1);
3389 if (parent == NULL) {
3390 mutex_exit(&db->db_mtx);
3391 rw_enter(&dn->dn_struct_rwlock, RW_READER);
3392 parent = dbuf_hold_level(dn, db->db_level + 1,
3393 db->db_blkid >> epbs, db);
3394 rw_exit(&dn->dn_struct_rwlock);
3395 mutex_enter(&db->db_mtx);
3396 db->db_parent = parent;
3397 }
3398 db->db_blkptr = (blkptr_t *)parent->db.db_data +
3399 (db->db_blkid & ((1ULL << epbs) - 1));
3400 DBUF_VERIFY(db);
3401 }
3402 }
3403
3404 /*
3405 * When syncing out blocks of dnodes, adjust the block to deal with
3406 * encryption. Normally, we make sure the block is decrypted before writing
3407 * it. If we have crypt params, then we are writing a raw (encrypted) block,
3408 * from a raw receive. In this case, set the ARC buf's crypt params so
3409 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
3410 *
3411 * XXX we should handle decrypting the dnode block in dbuf_dirty().
3412 */
3413 static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t * dr)3414 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
3415 {
3416 int err;
3417 dmu_buf_impl_t *db = dr->dr_dbuf;
3418
3419 ASSERT(MUTEX_HELD(&db->db_mtx));
3420 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
3421 ASSERT3U(db->db_level, ==, 0);
3422
3423 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
3424 zbookmark_phys_t zb;
3425
3426 /*
3427 * Unfortunately, there is currently no mechanism for
3428 * syncing context to handle decryption errors. An error
3429 * here is only possible if an attacker maliciously
3430 * changed a dnode block and updated the associated
3431 * checksums going up the block tree.
3432 */
3433 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
3434 db->db.db_object, db->db_level, db->db_blkid);
3435 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
3436 &zb, B_TRUE);
3437 if (err)
3438 panic("Invalid dnode block MAC");
3439 } else if (dr->dt.dl.dr_has_raw_params) {
3440 (void) arc_release(dr->dt.dl.dr_data, db);
3441 arc_convert_to_raw(dr->dt.dl.dr_data,
3442 dmu_objset_id(db->db_objset),
3443 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
3444 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
3445 }
3446 }
3447
3448 static void
dbuf_sync_indirect(dbuf_dirty_record_t * dr,dmu_tx_t * tx)3449 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3450 {
3451 dmu_buf_impl_t *db = dr->dr_dbuf;
3452 dnode_t *dn;
3453 zio_t *zio;
3454
3455 ASSERT(dmu_tx_is_syncing(tx));
3456
3457 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3458
3459 mutex_enter(&db->db_mtx);
3460
3461 ASSERT(db->db_level > 0);
3462 DBUF_VERIFY(db);
3463
3464 /* Read the block if it hasn't been read yet. */
3465 if (db->db_buf == NULL) {
3466 mutex_exit(&db->db_mtx);
3467 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
3468 mutex_enter(&db->db_mtx);
3469 }
3470 ASSERT3U(db->db_state, ==, DB_CACHED);
3471 ASSERT(db->db_buf != NULL);
3472
3473 DB_DNODE_ENTER(db);
3474 dn = DB_DNODE(db);
3475 /* Indirect block size must match what the dnode thinks it is. */
3476 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
3477 dbuf_check_blkptr(dn, db);
3478 DB_DNODE_EXIT(db);
3479
3480 /* Provide the pending dirty record to child dbufs */
3481 db->db_data_pending = dr;
3482
3483 mutex_exit(&db->db_mtx);
3484
3485 dbuf_write(dr, db->db_buf, tx);
3486
3487 zio = dr->dr_zio;
3488 mutex_enter(&dr->dt.di.dr_mtx);
3489 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
3490 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3491 mutex_exit(&dr->dt.di.dr_mtx);
3492 zio_nowait(zio);
3493 }
3494
3495 static void
dbuf_sync_leaf(dbuf_dirty_record_t * dr,dmu_tx_t * tx)3496 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3497 {
3498 arc_buf_t **datap = &dr->dt.dl.dr_data;
3499 dmu_buf_impl_t *db = dr->dr_dbuf;
3500 dnode_t *dn;
3501 objset_t *os;
3502 uint64_t txg = tx->tx_txg;
3503
3504 ASSERT(dmu_tx_is_syncing(tx));
3505
3506 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3507
3508 mutex_enter(&db->db_mtx);
3509 /*
3510 * To be synced, we must be dirtied. But we
3511 * might have been freed after the dirty.
3512 */
3513 if (db->db_state == DB_UNCACHED) {
3514 /* This buffer has been freed since it was dirtied */
3515 ASSERT(db->db.db_data == NULL);
3516 } else if (db->db_state == DB_FILL) {
3517 /* This buffer was freed and is now being re-filled */
3518 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
3519 } else {
3520 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
3521 }
3522 DBUF_VERIFY(db);
3523
3524 DB_DNODE_ENTER(db);
3525 dn = DB_DNODE(db);
3526
3527 if (db->db_blkid == DMU_SPILL_BLKID) {
3528 mutex_enter(&dn->dn_mtx);
3529 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
3530 mutex_exit(&dn->dn_mtx);
3531 }
3532
3533 /*
3534 * If this is a bonus buffer, simply copy the bonus data into the
3535 * dnode. It will be written out when the dnode is synced (and it
3536 * will be synced, since it must have been dirty for dbuf_sync to
3537 * be called).
3538 */
3539 if (db->db_blkid == DMU_BONUS_BLKID) {
3540 dbuf_dirty_record_t **drp;
3541
3542 ASSERT(*datap != NULL);
3543 ASSERT0(db->db_level);
3544 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
3545 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
3546 bcopy(*datap, DN_BONUS(dn->dn_phys),
3547 DN_MAX_BONUS_LEN(dn->dn_phys));
3548 DB_DNODE_EXIT(db);
3549
3550 if (*datap != db->db.db_data) {
3551 int slots = DB_DNODE(db)->dn_num_slots;
3552 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3553 zio_buf_free(*datap, bonuslen);
3554 arc_space_return(bonuslen, ARC_SPACE_BONUS);
3555 }
3556 db->db_data_pending = NULL;
3557 drp = &db->db_last_dirty;
3558 while (*drp != dr)
3559 drp = &(*drp)->dr_next;
3560 ASSERT(dr->dr_next == NULL);
3561 ASSERT(dr->dr_dbuf == db);
3562 *drp = dr->dr_next;
3563 kmem_free(dr, sizeof (dbuf_dirty_record_t));
3564 ASSERT(db->db_dirtycnt > 0);
3565 db->db_dirtycnt -= 1;
3566 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
3567 return;
3568 }
3569
3570 os = dn->dn_objset;
3571
3572 /*
3573 * This function may have dropped the db_mtx lock allowing a dmu_sync
3574 * operation to sneak in. As a result, we need to ensure that we
3575 * don't check the dr_override_state until we have returned from
3576 * dbuf_check_blkptr.
3577 */
3578 dbuf_check_blkptr(dn, db);
3579
3580 /*
3581 * If this buffer is in the middle of an immediate write,
3582 * wait for the synchronous IO to complete.
3583 */
3584 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
3585 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
3586 cv_wait(&db->db_changed, &db->db_mtx);
3587 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
3588 }
3589
3590 /*
3591 * If this is a dnode block, ensure it is appropriately encrypted
3592 * or decrypted, depending on what we are writing to it this txg.
3593 */
3594 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
3595 dbuf_prepare_encrypted_dnode_leaf(dr);
3596
3597 if (db->db_state != DB_NOFILL &&
3598 dn->dn_object != DMU_META_DNODE_OBJECT &&
3599 zfs_refcount_count(&db->db_holds) > 1 &&
3600 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
3601 *datap == db->db_buf) {
3602 /*
3603 * If this buffer is currently "in use" (i.e., there
3604 * are active holds and db_data still references it),
3605 * then make a copy before we start the write so that
3606 * any modifications from the open txg will not leak
3607 * into this write.
3608 *
3609 * NOTE: this copy does not need to be made for
3610 * objects only modified in the syncing context (e.g.
3611 * DNONE_DNODE blocks).
3612 */
3613 int psize = arc_buf_size(*datap);
3614 int lsize = arc_buf_lsize(*datap);
3615 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
3616 enum zio_compress compress_type = arc_get_compression(*datap);
3617
3618 if (arc_is_encrypted(*datap)) {
3619 boolean_t byteorder;
3620 uint8_t salt[ZIO_DATA_SALT_LEN];
3621 uint8_t iv[ZIO_DATA_IV_LEN];
3622 uint8_t mac[ZIO_DATA_MAC_LEN];
3623
3624 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
3625 *datap = arc_alloc_raw_buf(os->os_spa, db,
3626 dmu_objset_id(os), byteorder, salt, iv, mac,
3627 dn->dn_type, psize, lsize, compress_type);
3628 } else if (compress_type != ZIO_COMPRESS_OFF) {
3629 ASSERT3U(type, ==, ARC_BUFC_DATA);
3630 *datap = arc_alloc_compressed_buf(os->os_spa, db,
3631 psize, lsize, compress_type);
3632 } else {
3633 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
3634 }
3635 bcopy(db->db.db_data, (*datap)->b_data, psize);
3636 }
3637 db->db_data_pending = dr;
3638
3639 mutex_exit(&db->db_mtx);
3640
3641 dbuf_write(dr, *datap, tx);
3642
3643 ASSERT(!list_link_active(&dr->dr_dirty_node));
3644 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
3645 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
3646 DB_DNODE_EXIT(db);
3647 } else {
3648 /*
3649 * Although zio_nowait() does not "wait for an IO", it does
3650 * initiate the IO. If this is an empty write it seems plausible
3651 * that the IO could actually be completed before the nowait
3652 * returns. We need to DB_DNODE_EXIT() first in case
3653 * zio_nowait() invalidates the dbuf.
3654 */
3655 DB_DNODE_EXIT(db);
3656 zio_nowait(dr->dr_zio);
3657 }
3658 }
3659
3660 void
dbuf_sync_list(list_t * list,int level,dmu_tx_t * tx)3661 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
3662 {
3663 dbuf_dirty_record_t *dr;
3664
3665 while (dr = list_head(list)) {
3666 if (dr->dr_zio != NULL) {
3667 /*
3668 * If we find an already initialized zio then we
3669 * are processing the meta-dnode, and we have finished.
3670 * The dbufs for all dnodes are put back on the list
3671 * during processing, so that we can zio_wait()
3672 * these IOs after initiating all child IOs.
3673 */
3674 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
3675 DMU_META_DNODE_OBJECT);
3676 break;
3677 }
3678 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
3679 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
3680 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
3681 }
3682 list_remove(list, dr);
3683 if (dr->dr_dbuf->db_level > 0)
3684 dbuf_sync_indirect(dr, tx);
3685 else
3686 dbuf_sync_leaf(dr, tx);
3687 }
3688 }
3689
3690 /* ARGSUSED */
3691 static void
dbuf_write_ready(zio_t * zio,arc_buf_t * buf,void * vdb)3692 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
3693 {
3694 dmu_buf_impl_t *db = vdb;
3695 dnode_t *dn;
3696 blkptr_t *bp = zio->io_bp;
3697 blkptr_t *bp_orig = &zio->io_bp_orig;
3698 spa_t *spa = zio->io_spa;
3699 int64_t delta;
3700 uint64_t fill = 0;
3701 int i;
3702
3703 ASSERT3P(db->db_blkptr, !=, NULL);
3704 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
3705
3706 DB_DNODE_ENTER(db);
3707 dn = DB_DNODE(db);
3708 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
3709 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
3710 zio->io_prev_space_delta = delta;
3711
3712 if (bp->blk_birth != 0) {
3713 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
3714 BP_GET_TYPE(bp) == dn->dn_type) ||
3715 (db->db_blkid == DMU_SPILL_BLKID &&
3716 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
3717 BP_IS_EMBEDDED(bp));
3718 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
3719 }
3720
3721 mutex_enter(&db->db_mtx);
3722
3723 #ifdef ZFS_DEBUG
3724 if (db->db_blkid == DMU_SPILL_BLKID) {
3725 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3726 ASSERT(!(BP_IS_HOLE(bp)) &&
3727 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
3728 }
3729 #endif
3730
3731 if (db->db_level == 0) {
3732 mutex_enter(&dn->dn_mtx);
3733 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
3734 db->db_blkid != DMU_SPILL_BLKID) {
3735 ASSERT0(db->db_objset->os_raw_receive);
3736 dn->dn_phys->dn_maxblkid = db->db_blkid;
3737 }
3738 mutex_exit(&dn->dn_mtx);
3739
3740 if (dn->dn_type == DMU_OT_DNODE) {
3741 i = 0;
3742 while (i < db->db.db_size) {
3743 dnode_phys_t *dnp =
3744 (void *)(((char *)db->db.db_data) + i);
3745
3746 i += DNODE_MIN_SIZE;
3747 if (dnp->dn_type != DMU_OT_NONE) {
3748 fill++;
3749 i += dnp->dn_extra_slots *
3750 DNODE_MIN_SIZE;
3751 }
3752 }
3753 } else {
3754 if (BP_IS_HOLE(bp)) {
3755 fill = 0;
3756 } else {
3757 fill = 1;
3758 }
3759 }
3760 } else {
3761 blkptr_t *ibp = db->db.db_data;
3762 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
3763 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
3764 if (BP_IS_HOLE(ibp))
3765 continue;
3766 fill += BP_GET_FILL(ibp);
3767 }
3768 }
3769 DB_DNODE_EXIT(db);
3770
3771 if (!BP_IS_EMBEDDED(bp))
3772 BP_SET_FILL(bp, fill);
3773
3774 mutex_exit(&db->db_mtx);
3775
3776 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
3777 *db->db_blkptr = *bp;
3778 dmu_buf_unlock_parent(db, dblt, FTAG);
3779 }
3780
3781 /* ARGSUSED */
3782 /*
3783 * This function gets called just prior to running through the compression
3784 * stage of the zio pipeline. If we're an indirect block comprised of only
3785 * holes, then we want this indirect to be compressed away to a hole. In
3786 * order to do that we must zero out any information about the holes that
3787 * this indirect points to prior to before we try to compress it.
3788 */
3789 static void
dbuf_write_children_ready(zio_t * zio,arc_buf_t * buf,void * vdb)3790 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
3791 {
3792 dmu_buf_impl_t *db = vdb;
3793 dnode_t *dn;
3794 blkptr_t *bp;
3795 unsigned int epbs, i;
3796
3797 ASSERT3U(db->db_level, >, 0);
3798 DB_DNODE_ENTER(db);
3799 dn = DB_DNODE(db);
3800 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3801 ASSERT3U(epbs, <, 31);
3802
3803 /* Determine if all our children are holes */
3804 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
3805 if (!BP_IS_HOLE(bp))
3806 break;
3807 }
3808
3809 /*
3810 * If all the children are holes, then zero them all out so that
3811 * we may get compressed away.
3812 */
3813 if (i == 1 << epbs) {
3814 /*
3815 * We only found holes. Grab the rwlock to prevent
3816 * anybody from reading the blocks we're about to
3817 * zero out.
3818 */
3819 rw_enter(&db->db_rwlock, RW_WRITER);
3820 bzero(db->db.db_data, db->db.db_size);
3821 rw_exit(&db->db_rwlock);
3822 }
3823 DB_DNODE_EXIT(db);
3824 }
3825
3826 /*
3827 * The SPA will call this callback several times for each zio - once
3828 * for every physical child i/o (zio->io_phys_children times). This
3829 * allows the DMU to monitor the progress of each logical i/o. For example,
3830 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
3831 * block. There may be a long delay before all copies/fragments are completed,
3832 * so this callback allows us to retire dirty space gradually, as the physical
3833 * i/os complete.
3834 */
3835 /* ARGSUSED */
3836 static void
dbuf_write_physdone(zio_t * zio,arc_buf_t * buf,void * arg)3837 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
3838 {
3839 dmu_buf_impl_t *db = arg;
3840 objset_t *os = db->db_objset;
3841 dsl_pool_t *dp = dmu_objset_pool(os);
3842 dbuf_dirty_record_t *dr;
3843 int delta = 0;
3844
3845 dr = db->db_data_pending;
3846 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
3847
3848 /*
3849 * The callback will be called io_phys_children times. Retire one
3850 * portion of our dirty space each time we are called. Any rounding
3851 * error will be cleaned up by dsl_pool_sync()'s call to
3852 * dsl_pool_undirty_space().
3853 */
3854 delta = dr->dr_accounted / zio->io_phys_children;
3855 dsl_pool_undirty_space(dp, delta, zio->io_txg);
3856 }
3857
3858 /* ARGSUSED */
3859 static void
dbuf_write_done(zio_t * zio,arc_buf_t * buf,void * vdb)3860 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
3861 {
3862 dmu_buf_impl_t *db = vdb;
3863 blkptr_t *bp_orig = &zio->io_bp_orig;
3864 blkptr_t *bp = db->db_blkptr;
3865 objset_t *os = db->db_objset;
3866 dmu_tx_t *tx = os->os_synctx;
3867 dbuf_dirty_record_t **drp, *dr;
3868
3869 ASSERT0(zio->io_error);
3870 ASSERT(db->db_blkptr == bp);
3871
3872 /*
3873 * For nopwrites and rewrites we ensure that the bp matches our
3874 * original and bypass all the accounting.
3875 */
3876 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
3877 ASSERT(BP_EQUAL(bp, bp_orig));
3878 } else {
3879 dsl_dataset_t *ds = os->os_dsl_dataset;
3880 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
3881 dsl_dataset_block_born(ds, bp, tx);
3882 }
3883
3884 mutex_enter(&db->db_mtx);
3885
3886 DBUF_VERIFY(db);
3887
3888 drp = &db->db_last_dirty;
3889 while ((dr = *drp) != db->db_data_pending)
3890 drp = &dr->dr_next;
3891 ASSERT(!list_link_active(&dr->dr_dirty_node));
3892 ASSERT(dr->dr_dbuf == db);
3893 ASSERT(dr->dr_next == NULL);
3894 *drp = dr->dr_next;
3895
3896 #ifdef ZFS_DEBUG
3897 if (db->db_blkid == DMU_SPILL_BLKID) {
3898 dnode_t *dn;
3899
3900 DB_DNODE_ENTER(db);
3901 dn = DB_DNODE(db);
3902 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3903 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
3904 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
3905 DB_DNODE_EXIT(db);
3906 }
3907 #endif
3908
3909 if (db->db_level == 0) {
3910 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3911 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
3912 if (db->db_state != DB_NOFILL) {
3913 if (dr->dt.dl.dr_data != db->db_buf)
3914 arc_buf_destroy(dr->dt.dl.dr_data, db);
3915 }
3916 } else {
3917 dnode_t *dn;
3918
3919 DB_DNODE_ENTER(db);
3920 dn = DB_DNODE(db);
3921 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3922 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
3923 if (!BP_IS_HOLE(db->db_blkptr)) {
3924 int epbs =
3925 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3926 ASSERT3U(db->db_blkid, <=,
3927 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
3928 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
3929 db->db.db_size);
3930 }
3931 DB_DNODE_EXIT(db);
3932 mutex_destroy(&dr->dt.di.dr_mtx);
3933 list_destroy(&dr->dt.di.dr_children);
3934 }
3935 kmem_free(dr, sizeof (dbuf_dirty_record_t));
3936
3937 cv_broadcast(&db->db_changed);
3938 ASSERT(db->db_dirtycnt > 0);
3939 db->db_dirtycnt -= 1;
3940 db->db_data_pending = NULL;
3941 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
3942 }
3943
3944 static void
dbuf_write_nofill_ready(zio_t * zio)3945 dbuf_write_nofill_ready(zio_t *zio)
3946 {
3947 dbuf_write_ready(zio, NULL, zio->io_private);
3948 }
3949
3950 static void
dbuf_write_nofill_done(zio_t * zio)3951 dbuf_write_nofill_done(zio_t *zio)
3952 {
3953 dbuf_write_done(zio, NULL, zio->io_private);
3954 }
3955
3956 static void
dbuf_write_override_ready(zio_t * zio)3957 dbuf_write_override_ready(zio_t *zio)
3958 {
3959 dbuf_dirty_record_t *dr = zio->io_private;
3960 dmu_buf_impl_t *db = dr->dr_dbuf;
3961
3962 dbuf_write_ready(zio, NULL, db);
3963 }
3964
3965 static void
dbuf_write_override_done(zio_t * zio)3966 dbuf_write_override_done(zio_t *zio)
3967 {
3968 dbuf_dirty_record_t *dr = zio->io_private;
3969 dmu_buf_impl_t *db = dr->dr_dbuf;
3970 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
3971
3972 mutex_enter(&db->db_mtx);
3973 if (!BP_EQUAL(zio->io_bp, obp)) {
3974 if (!BP_IS_HOLE(obp))
3975 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
3976 arc_release(dr->dt.dl.dr_data, db);
3977 }
3978 mutex_exit(&db->db_mtx);
3979 dbuf_write_done(zio, NULL, db);
3980
3981 if (zio->io_abd != NULL)
3982 abd_put(zio->io_abd);
3983 }
3984
3985 typedef struct dbuf_remap_impl_callback_arg {
3986 objset_t *drica_os;
3987 uint64_t drica_blk_birth;
3988 dmu_tx_t *drica_tx;
3989 } dbuf_remap_impl_callback_arg_t;
3990
3991 static void
dbuf_remap_impl_callback(uint64_t vdev,uint64_t offset,uint64_t size,void * arg)3992 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
3993 void *arg)
3994 {
3995 dbuf_remap_impl_callback_arg_t *drica = arg;
3996 objset_t *os = drica->drica_os;
3997 spa_t *spa = dmu_objset_spa(os);
3998 dmu_tx_t *tx = drica->drica_tx;
3999
4000 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4001
4002 if (os == spa_meta_objset(spa)) {
4003 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4004 } else {
4005 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4006 size, drica->drica_blk_birth, tx);
4007 }
4008 }
4009
4010 static void
dbuf_remap_impl(dnode_t * dn,blkptr_t * bp,krwlock_t * rw,dmu_tx_t * tx)4011 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4012 {
4013 blkptr_t bp_copy = *bp;
4014 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4015 dbuf_remap_impl_callback_arg_t drica;
4016
4017 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4018
4019 drica.drica_os = dn->dn_objset;
4020 drica.drica_blk_birth = bp->blk_birth;
4021 drica.drica_tx = tx;
4022 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4023 &drica)) {
4024 /*
4025 * The db_rwlock prevents dbuf_read_impl() from
4026 * dereferencing the BP while we are changing it. To
4027 * avoid lock contention, only grab it when we are actually
4028 * changing the BP.
4029 */
4030 if (rw != NULL)
4031 rw_enter(rw, RW_WRITER);
4032 *bp = bp_copy;
4033 if (rw != NULL)
4034 rw_exit(rw);
4035 }
4036 }
4037
4038 /*
4039 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting
4040 * to remap a copy of every bp in the dbuf.
4041 */
4042 boolean_t
dbuf_can_remap(const dmu_buf_impl_t * db)4043 dbuf_can_remap(const dmu_buf_impl_t *db)
4044 {
4045 spa_t *spa = dmu_objset_spa(db->db_objset);
4046 blkptr_t *bp = db->db.db_data;
4047 boolean_t ret = B_FALSE;
4048
4049 ASSERT3U(db->db_level, >, 0);
4050 ASSERT3S(db->db_state, ==, DB_CACHED);
4051
4052 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4053
4054 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4055 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4056 blkptr_t bp_copy = bp[i];
4057 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
4058 ret = B_TRUE;
4059 break;
4060 }
4061 }
4062 spa_config_exit(spa, SCL_VDEV, FTAG);
4063
4064 return (ret);
4065 }
4066
4067 boolean_t
dnode_needs_remap(const dnode_t * dn)4068 dnode_needs_remap(const dnode_t *dn)
4069 {
4070 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4071 boolean_t ret = B_FALSE;
4072
4073 if (dn->dn_phys->dn_nlevels == 0) {
4074 return (B_FALSE);
4075 }
4076
4077 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4078
4079 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4080 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) {
4081 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j];
4082 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
4083 ret = B_TRUE;
4084 break;
4085 }
4086 }
4087 spa_config_exit(spa, SCL_VDEV, FTAG);
4088
4089 return (ret);
4090 }
4091
4092 /*
4093 * Remap any existing BP's to concrete vdevs, if possible.
4094 */
4095 static void
dbuf_remap(dnode_t * dn,dmu_buf_impl_t * db,dmu_tx_t * tx)4096 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4097 {
4098 spa_t *spa = dmu_objset_spa(db->db_objset);
4099 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4100
4101 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4102 return;
4103
4104 if (db->db_level > 0) {
4105 blkptr_t *bp = db->db.db_data;
4106 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4107 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4108 }
4109 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4110 dnode_phys_t *dnp = db->db.db_data;
4111 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4112 DMU_OT_DNODE);
4113 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) {
4114 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4115 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4116 &dn->dn_dbuf->db_rwlock);
4117 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4118 tx);
4119 }
4120 }
4121 }
4122 }
4123
4124
4125 /* Issue I/O to commit a dirty buffer to disk. */
4126 static void
dbuf_write(dbuf_dirty_record_t * dr,arc_buf_t * data,dmu_tx_t * tx)4127 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4128 {
4129 dmu_buf_impl_t *db = dr->dr_dbuf;
4130 dnode_t *dn;
4131 objset_t *os;
4132 dmu_buf_impl_t *parent = db->db_parent;
4133 uint64_t txg = tx->tx_txg;
4134 zbookmark_phys_t zb;
4135 zio_prop_t zp;
4136 zio_t *zio;
4137 int wp_flag = 0;
4138
4139 ASSERT(dmu_tx_is_syncing(tx));
4140
4141 DB_DNODE_ENTER(db);
4142 dn = DB_DNODE(db);
4143 os = dn->dn_objset;
4144
4145 if (db->db_state != DB_NOFILL) {
4146 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4147 /*
4148 * Private object buffers are released here rather
4149 * than in dbuf_dirty() since they are only modified
4150 * in the syncing context and we don't want the
4151 * overhead of making multiple copies of the data.
4152 */
4153 if (BP_IS_HOLE(db->db_blkptr)) {
4154 arc_buf_thaw(data);
4155 } else {
4156 dbuf_release_bp(db);
4157 }
4158 dbuf_remap(dn, db, tx);
4159 }
4160 }
4161
4162 if (parent != dn->dn_dbuf) {
4163 /* Our parent is an indirect block. */
4164 /* We have a dirty parent that has been scheduled for write. */
4165 ASSERT(parent && parent->db_data_pending);
4166 /* Our parent's buffer is one level closer to the dnode. */
4167 ASSERT(db->db_level == parent->db_level-1);
4168 /*
4169 * We're about to modify our parent's db_data by modifying
4170 * our block pointer, so the parent must be released.
4171 */
4172 ASSERT(arc_released(parent->db_buf));
4173 zio = parent->db_data_pending->dr_zio;
4174 } else {
4175 /* Our parent is the dnode itself. */
4176 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4177 db->db_blkid != DMU_SPILL_BLKID) ||
4178 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4179 if (db->db_blkid != DMU_SPILL_BLKID)
4180 ASSERT3P(db->db_blkptr, ==,
4181 &dn->dn_phys->dn_blkptr[db->db_blkid]);
4182 zio = dn->dn_zio;
4183 }
4184
4185 ASSERT(db->db_level == 0 || data == db->db_buf);
4186 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4187 ASSERT(zio);
4188
4189 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4190 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4191 db->db.db_object, db->db_level, db->db_blkid);
4192
4193 if (db->db_blkid == DMU_SPILL_BLKID)
4194 wp_flag = WP_SPILL;
4195 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4196
4197 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
4198
4199 DB_DNODE_EXIT(db);
4200
4201 /*
4202 * We copy the blkptr now (rather than when we instantiate the dirty
4203 * record), because its value can change between open context and
4204 * syncing context. We do not need to hold dn_struct_rwlock to read
4205 * db_blkptr because we are in syncing context.
4206 */
4207 dr->dr_bp_copy = *db->db_blkptr;
4208
4209 if (db->db_level == 0 &&
4210 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4211 /*
4212 * The BP for this block has been provided by open context
4213 * (by dmu_sync() or dmu_buf_write_embedded()).
4214 */
4215 abd_t *contents = (data != NULL) ?
4216 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
4217
4218 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy,
4219 contents, db->db.db_size, db->db.db_size, &zp,
4220 dbuf_write_override_ready, NULL, NULL,
4221 dbuf_write_override_done,
4222 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4223 mutex_enter(&db->db_mtx);
4224 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4225 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
4226 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
4227 mutex_exit(&db->db_mtx);
4228 } else if (db->db_state == DB_NOFILL) {
4229 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4230 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
4231 dr->dr_zio = zio_write(zio, os->os_spa, txg,
4232 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
4233 dbuf_write_nofill_ready, NULL, NULL,
4234 dbuf_write_nofill_done, db,
4235 ZIO_PRIORITY_ASYNC_WRITE,
4236 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4237 } else {
4238 ASSERT(arc_released(data));
4239
4240 /*
4241 * For indirect blocks, we want to setup the children
4242 * ready callback so that we can properly handle an indirect
4243 * block that only contains holes.
4244 */
4245 arc_write_done_func_t *children_ready_cb = NULL;
4246 if (db->db_level != 0)
4247 children_ready_cb = dbuf_write_children_ready;
4248
4249 dr->dr_zio = arc_write(zio, os->os_spa, txg,
4250 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
4251 &zp, dbuf_write_ready, children_ready_cb,
4252 dbuf_write_physdone, dbuf_write_done, db,
4253 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4254 }
4255 }
4256