1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 */
28
29 #ifndef _SYS_DBUF_H
30 #define _SYS_DBUF_H
31
32 #include <sys/dmu.h>
33 #include <sys/spa.h>
34 #include <sys/txg.h>
35 #include <sys/zio.h>
36 #include <sys/arc.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_refcount.h>
39 #include <sys/zrlock.h>
40 #include <sys/multilist.h>
41
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45
46 #define IN_DMU_SYNC 2
47
48 /*
49 * The simplified state transition diagram for dbufs looks like:
50 *
51 * +-------> READ ------+
52 * | |
53 * | V
54 * (alloc)-->UNCACHED CACHED-->EVICTING-->(free)
55 * ^ | ^ ^
56 * | | | |
57 * | +-------> FILL ------+ |
58 * | | | |
59 * | | | |
60 * | +------> NOFILL -----+-----> UNCACHED
61 * | | (Direct I/O)
62 * +---------------+
63 *
64 * DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
65 * to find all dbufs in a range of a dnode and must be less than any other
66 * dbuf_states_t (see comment on dn_dbufs in dnode.h).
67 */
68 typedef enum dbuf_states {
69 DB_MARKER = -2,
70 DB_SEARCH = -1,
71 DB_UNCACHED,
72 DB_FILL,
73 DB_NOFILL,
74 DB_READ,
75 DB_CACHED,
76 DB_EVICTING
77 } dbuf_states_t;
78
79 typedef enum dbuf_cached_state {
80 DB_NO_CACHE = -1,
81 DB_DBUF_CACHE,
82 DB_DBUF_METADATA_CACHE,
83 DB_CACHE_MAX
84 } dbuf_cached_state_t;
85
86 struct dnode;
87 struct dmu_tx;
88
89 /*
90 * level = 0 means the user data
91 * level = 1 means the single indirect block
92 * etc.
93 */
94
95 struct dmu_buf_impl;
96
97 typedef enum override_states {
98 DR_NOT_OVERRIDDEN,
99 DR_IN_DMU_SYNC,
100 DR_OVERRIDDEN
101 } override_states_t;
102
103 typedef enum db_lock_type {
104 DLT_NONE,
105 DLT_PARENT,
106 DLT_OBJSET
107 } db_lock_type_t;
108
109 typedef struct dbuf_dirty_record {
110 /* link on our parents dirty list */
111 list_node_t dr_dirty_node;
112
113 /* transaction group this data will sync in */
114 uint64_t dr_txg;
115
116 /* zio of outstanding write IO */
117 zio_t *dr_zio;
118
119 /* pointer back to our dbuf */
120 struct dmu_buf_impl *dr_dbuf;
121
122 /* list link for dbuf dirty records */
123 list_node_t dr_dbuf_node;
124
125 /*
126 * The dnode we are part of. Note that the dnode can not be moved or
127 * evicted due to the hold that's added by dnode_setdirty() or
128 * dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
129 * userquota_updates_task(). This hold is necessary for
130 * dirty_lightweight_leaf-type dirty records, which don't have a hold
131 * on a dbuf.
132 */
133 dnode_t *dr_dnode;
134
135 /* pointer to parent dirty record */
136 struct dbuf_dirty_record *dr_parent;
137
138 /* How much space was changed to dsl_pool_dirty_space() for this? */
139 unsigned int dr_accounted;
140
141 /* A copy of the bp that points to us */
142 blkptr_t dr_bp_copy;
143
144 union dirty_types {
145 struct dirty_indirect {
146
147 /* protect access to list */
148 kmutex_t dr_mtx;
149
150 /* Our list of dirty children */
151 list_t dr_children;
152 } di;
153 struct dirty_leaf {
154
155 /*
156 * dr_data is set when we dirty the buffer
157 * so that we can retain the pointer even if it
158 * gets COW'd in a subsequent transaction group.
159 */
160 arc_buf_t *dr_data;
161 override_states_t dr_override_state;
162 uint8_t dr_copies;
163 uint8_t dr_gang_copies;
164 boolean_t dr_nopwrite;
165 boolean_t dr_brtwrite;
166 boolean_t dr_diowrite;
167 boolean_t dr_rewrite;
168 boolean_t dr_has_raw_params;
169
170 /* Override and raw params are mutually exclusive. */
171 union {
172 blkptr_t dr_overridden_by;
173 struct {
174 /*
175 * If dr_has_raw_params is set, the
176 * following crypt params will be set
177 * on the BP that's written.
178 */
179 boolean_t dr_byteorder;
180 uint8_t dr_salt[ZIO_DATA_SALT_LEN];
181 uint8_t dr_iv[ZIO_DATA_IV_LEN];
182 uint8_t dr_mac[ZIO_DATA_MAC_LEN];
183 };
184 };
185 } dl;
186 struct dirty_lightweight_leaf {
187 /*
188 * This dirty record refers to a leaf (level=0)
189 * block, whose dbuf has not been instantiated for
190 * performance reasons.
191 */
192 uint64_t dr_blkid;
193 abd_t *dr_abd;
194 zio_prop_t dr_props;
195 zio_flag_t dr_flags;
196 } dll;
197 } dt;
198 } dbuf_dirty_record_t;
199
200 typedef struct dmu_buf_impl {
201 /*
202 * The following members are immutable, with the exception of
203 * db.db_data, which is protected by db_mtx.
204 */
205
206 /* the publicly visible structure */
207 dmu_buf_t db;
208
209 /* the objset we belong to */
210 struct objset *db_objset;
211
212 /*
213 * Handle to safely access the dnode we belong to (NULL when evicted)
214 * if dnode_move() is used on the platform, or just dnode otherwise.
215 */
216 #if !defined(__linux__) && !defined(__FreeBSD__)
217 #define USE_DNODE_HANDLE 1
218 struct dnode_handle *db_dnode_handle;
219 #else
220 struct dnode *db_dnode;
221 #endif
222
223 /*
224 * our parent buffer; if the dnode points to us directly,
225 * db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
226 * only accessed by sync thread ???
227 * (NULL when evicted)
228 * May change from NULL to non-NULL under the protection of db_mtx
229 * (see dbuf_check_blkptr())
230 */
231 struct dmu_buf_impl *db_parent;
232
233 /*
234 * link for hash table of all dmu_buf_impl_t's
235 */
236 struct dmu_buf_impl *db_hash_next;
237
238 /*
239 * Our link on the owner dnodes's dn_dbufs list.
240 * Protected by its dn_dbufs_mtx. Should be on the same cache line
241 * as db_level and db_blkid for the best avl_add() performance.
242 */
243 avl_node_t db_link;
244
245 /* our block number */
246 uint64_t db_blkid;
247
248 /*
249 * Pointer to the blkptr_t which points to us. May be NULL if we
250 * don't have one yet. (NULL when evicted)
251 */
252 blkptr_t *db_blkptr;
253
254 /*
255 * Our indirection level. Data buffers have db_level==0.
256 * Indirect buffers which point to data buffers have
257 * db_level==1. etc. Buffers which contain dnodes have
258 * db_level==0, since the dnodes are stored in a file.
259 */
260 uint8_t db_level;
261
262 /* This block was freed while a read or write was active. */
263 uint8_t db_freed_in_flight;
264
265 /*
266 * Evict user data as soon as the dirty and reference counts are equal.
267 */
268 uint8_t db_user_immediate_evict;
269
270 /*
271 * dnode_evict_dbufs() or dnode_evict_bonus() tried to evict this dbuf,
272 * but couldn't due to outstanding references. Evict once the refcount
273 * drops to 0.
274 */
275 uint8_t db_pending_evict;
276
277 /* Number of TXGs in which this buffer is dirty. */
278 uint8_t db_dirtycnt;
279
280 /* The buffer was partially read. More reads may follow. */
281 uint8_t db_partial_read;
282
283 /*
284 * Protects db_buf's contents if they contain an indirect block or data
285 * block of the meta-dnode. We use this lock to protect the structure of
286 * the block tree. This means that when modifying this dbuf's data, we
287 * grab its rwlock. When modifying its parent's data (including the
288 * blkptr to this dbuf), we grab the parent's rwlock. The lock ordering
289 * for this lock is:
290 * 1) dn_struct_rwlock
291 * 2) db_rwlock
292 * We don't currently grab multiple dbufs' db_rwlocks at once.
293 */
294 krwlock_t db_rwlock;
295
296 /* buffer holding our data */
297 arc_buf_t *db_buf;
298
299 /* db_mtx protects the members below */
300 kmutex_t db_mtx;
301
302 /*
303 * Current state of the buffer
304 */
305 dbuf_states_t db_state;
306
307 /* In which dbuf cache this dbuf is, if any. */
308 dbuf_cached_state_t db_caching_status;
309
310 /*
311 * Refcount accessed by dmu_buf_{hold,rele}.
312 * If nonzero, the buffer can't be destroyed.
313 * Protected by db_mtx.
314 */
315 zfs_refcount_t db_holds;
316
317 kcondvar_t db_changed;
318 dbuf_dirty_record_t *db_data_pending;
319
320 /* List of dirty records for the buffer sorted newest to oldest. */
321 list_t db_dirty_records;
322
323 /* Link in dbuf_cache or dbuf_metadata_cache */
324 multilist_node_t db_cache_link;
325
326 uint64_t db_hash;
327
328 /* User callback information. */
329 dmu_buf_user_t *db_user;
330 } dmu_buf_impl_t;
331
332 #define DBUF_HASH_MUTEX(h, idx) \
333 (&(h)->hash_mutexes[(idx) & ((h)->hash_mutex_mask)])
334
335 typedef struct dbuf_hash_table {
336 uint64_t hash_table_mask;
337 uint64_t hash_mutex_mask;
338 dmu_buf_impl_t **hash_table;
339 kmutex_t *hash_mutexes;
340 } dbuf_hash_table_t;
341
342 typedef void (*dbuf_prefetch_fn)(void *, uint64_t, uint64_t, boolean_t);
343
344 extern kmem_cache_t *dbuf_dirty_kmem_cache;
345
346 uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
347 const uint64_t offset);
348
349 void dbuf_create_bonus(struct dnode *dn);
350 int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
351
352 void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
353
354 dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, const void *tag);
355 dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
356 const void *tag);
357 int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
358 boolean_t fail_sparse, boolean_t fail_uncached,
359 const void *tag, dmu_buf_impl_t **dbp);
360
361 int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
362 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
363 void *arg);
364 int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
365 zio_priority_t prio, arc_flags_t aflags);
366
367 void dbuf_add_ref(dmu_buf_impl_t *db, const void *tag);
368 boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
369 uint64_t blkid, const void *tag);
370 uint64_t dbuf_refcount(dmu_buf_impl_t *db);
371
372 void dbuf_rele(dmu_buf_impl_t *db, const void *tag);
373 void dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag,
374 boolean_t evicting);
375
376 dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
377 uint64_t blkid, uint64_t *hash_out);
378
379 int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, dmu_flags_t flags);
380 void dmu_buf_will_clone_or_dio(dmu_buf_t *db, dmu_tx_t *tx);
381 void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
382 void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx, boolean_t canfail);
383 void dmu_buf_will_fill_flags(dmu_buf_t *db, dmu_tx_t *tx, boolean_t canfail,
384 dmu_flags_t flags);
385 boolean_t dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx, boolean_t failed);
386 void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
387 dmu_flags_t flags);
388 dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
389 dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
390 dmu_tx_t *tx);
391 boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
392 int dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t *db, blkptr_t **bp);
393 int dmu_buf_untransform_direct(dmu_buf_impl_t *db, spa_t *spa);
394 void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
395 bp_embedded_type_t etype, enum zio_compress comp,
396 int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
397
398 int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
399 const struct zio_prop *zp, zio_flag_t flags, dmu_tx_t *tx);
400
401 void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
402 void dbuf_destroy(dmu_buf_impl_t *db);
403
404 void dbuf_unoverride(dbuf_dirty_record_t *dr);
405 void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
406 void dbuf_release_bp(dmu_buf_impl_t *db);
407 db_lock_type_t dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw,
408 const void *tag);
409 void dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type,
410 const void *tag);
411
412 void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
413 struct dmu_tx *);
414
415 void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
416
417 void dbuf_stats_init(dbuf_hash_table_t *hash);
418 void dbuf_stats_destroy(void);
419
420 int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
421 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
422
423 #ifdef USE_DNODE_HANDLE
424 #define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
425 #define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
426 #define DB_DNODE_ENTER(_db) (zrl_add(&DB_DNODE_LOCK(_db)))
427 #define DB_DNODE_EXIT(_db) (zrl_remove(&DB_DNODE_LOCK(_db)))
428 #define DB_DNODE_HELD(_db) (!zrl_is_zero(&DB_DNODE_LOCK(_db)))
429 #else
430 #define DB_DNODE(_db) ((_db)->db_dnode)
431 #define DB_DNODE_LOCK(_db)
432 #define DB_DNODE_ENTER(_db)
433 #define DB_DNODE_EXIT(_db)
434 #define DB_DNODE_HELD(_db) (B_TRUE)
435 #endif
436
437 void dbuf_init(void);
438 void dbuf_fini(void);
439 void dbuf_cache_reduce_target_size(void);
440
441 boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
442
443 static inline dbuf_dirty_record_t *
dbuf_find_dirty_lte(dmu_buf_impl_t * db,uint64_t txg)444 dbuf_find_dirty_lte(dmu_buf_impl_t *db, uint64_t txg)
445 {
446 dbuf_dirty_record_t *dr;
447
448 for (dr = list_head(&db->db_dirty_records);
449 dr != NULL && dr->dr_txg > txg;
450 dr = list_next(&db->db_dirty_records, dr))
451 continue;
452 return (dr);
453 }
454
455 static inline dbuf_dirty_record_t *
dbuf_find_dirty_eq(dmu_buf_impl_t * db,uint64_t txg)456 dbuf_find_dirty_eq(dmu_buf_impl_t *db, uint64_t txg)
457 {
458 dbuf_dirty_record_t *dr;
459
460 dr = dbuf_find_dirty_lte(db, txg);
461 if (dr && dr->dr_txg == txg)
462 return (dr);
463 return (NULL);
464 }
465
466 #define DBUF_GET_BUFC_TYPE(_db) \
467 (dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
468
469 #define DBUF_IS_CACHEABLE(_db) (!(_db)->db_pending_evict && \
470 ((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL || \
471 (dbuf_is_metadata(_db) && \
472 ((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA))))
473
474 boolean_t dbuf_is_l2cacheable(dmu_buf_impl_t *db, blkptr_t *db_bp);
475
476 #ifdef ZFS_DEBUG
477
478 /*
479 * There should be a ## between the string literal and fmt, to make it
480 * clear that we're joining two strings together, but gcc does not
481 * support that preprocessor token.
482 */
483 #define dprintf_dbuf(dbuf, fmt, ...) do { \
484 if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
485 char __db_buf[32]; \
486 uint64_t __db_obj = (dbuf)->db.db_object; \
487 if (__db_obj == DMU_META_DNODE_OBJECT) \
488 (void) strlcpy(__db_buf, "mdn", sizeof (__db_buf)); \
489 else \
490 (void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
491 (u_longlong_t)__db_obj); \
492 dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
493 "obj=%s lvl=%u blkid=%lld " fmt, \
494 __db_buf, (dbuf)->db_level, \
495 (u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
496 } \
497 } while (0)
498
499 #define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
500 if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
501 char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
502 snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp); \
503 dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
504 kmem_free(__blkbuf, BP_SPRINTF_LEN); \
505 } \
506 } while (0)
507
508 #define DBUF_VERIFY(db) dbuf_verify(db)
509
510 #else
511
512 #define dprintf_dbuf(db, fmt, ...)
513 #define dprintf_dbuf_bp(db, bp, fmt, ...)
514 #define DBUF_VERIFY(db)
515
516 #endif
517
518
519 #ifdef __cplusplus
520 }
521 #endif
522
523 #endif /* _SYS_DBUF_H */
524