1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
29 * Copyright (c) 2019 Datto Inc.
30 * Copyright (c) 2019, 2023, Klara Inc.
31 * Copyright (c) 2019, Allan Jude
32 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
33 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
34 */
35
36 #include <sys/dmu.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/dbuf.h>
40 #include <sys/dnode.h>
41 #include <sys/zfs_context.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/dmu_traverse.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_dir.h>
46 #include <sys/dsl_pool.h>
47 #include <sys/dsl_synctask.h>
48 #include <sys/dsl_prop.h>
49 #include <sys/dmu_zfetch.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/zap.h>
52 #include <sys/zio_checksum.h>
53 #include <sys/zio_compress.h>
54 #include <sys/sa.h>
55 #include <sys/zfeature.h>
56 #include <sys/abd.h>
57 #include <sys/brt.h>
58 #include <sys/trace_zfs.h>
59 #include <sys/zfs_racct.h>
60 #include <sys/zfs_rlock.h>
61 #ifdef _KERNEL
62 #include <sys/vmsystm.h>
63 #include <sys/zfs_znode.h>
64 #endif
65
66 /*
67 * Enable/disable nopwrite feature.
68 */
69 static int zfs_nopwrite_enabled = 1;
70
71 /*
72 * Tunable to control percentage of dirtied L1 blocks from frees allowed into
73 * one TXG. After this threshold is crossed, additional dirty blocks from frees
74 * will wait until the next TXG.
75 * A value of zero will disable this throttle.
76 */
77 static uint_t zfs_per_txg_dirty_frees_percent = 30;
78
79 /*
80 * Enable/disable forcing txg sync when dirty checking for holes with lseek().
81 * By default this is enabled to ensure accurate hole reporting, it can result
82 * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads.
83 * Disabling this option will result in holes never being reported in dirty
84 * files which is always safe.
85 */
86 static int zfs_dmu_offset_next_sync = 1;
87
88 /*
89 * Limit the amount we can prefetch with one call to this amount. This
90 * helps to limit the amount of memory that can be used by prefetching.
91 * Larger objects should be prefetched a bit at a time.
92 */
93 #ifdef _ILP32
94 uint_t dmu_prefetch_max = 8 * 1024 * 1024;
95 #else
96 uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
97 #endif
98
99 /*
100 * Override copies= for dedup state objects. 0 means the traditional behaviour
101 * (ie the default for the containing objset ie 3 for the MOS).
102 */
103 uint_t dmu_ddt_copies = 0;
104
105 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
106 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
107 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
108 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
109 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
110 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
111 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
112 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
113 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
114 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
115 {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
116 {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
117 {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
118 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
119 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
120 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
121 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
122 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
123 {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
124 {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
125 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
126 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
127 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
128 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
129 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
130 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
131 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
132 {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
133 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
134 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
135 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
136 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
137 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
138 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
139 {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
140 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
141 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
142 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
143 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
144 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
145 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
146 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
147 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
148 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
149 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
150 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
151 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
152 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
153 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
154 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
155 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
156 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
157 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
158 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
159 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
160 };
161
162 dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
163 { byteswap_uint8_array, "uint8" },
164 { byteswap_uint16_array, "uint16" },
165 { byteswap_uint32_array, "uint32" },
166 { byteswap_uint64_array, "uint64" },
167 { zap_byteswap, "zap" },
168 { dnode_buf_byteswap, "dnode" },
169 { dmu_objset_byteswap, "objset" },
170 { zfs_znode_byteswap, "znode" },
171 { zfs_oldacl_byteswap, "oldacl" },
172 { zfs_acl_byteswap, "acl" }
173 };
174
175 int
dmu_buf_hold_noread_by_dnode(dnode_t * dn,uint64_t offset,const void * tag,dmu_buf_t ** dbp)176 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
177 const void *tag, dmu_buf_t **dbp)
178 {
179 uint64_t blkid;
180 dmu_buf_impl_t *db;
181
182 rw_enter(&dn->dn_struct_rwlock, RW_READER);
183 blkid = dbuf_whichblock(dn, 0, offset);
184 db = dbuf_hold(dn, blkid, tag);
185 rw_exit(&dn->dn_struct_rwlock);
186
187 if (db == NULL) {
188 *dbp = NULL;
189 return (SET_ERROR(EIO));
190 }
191
192 *dbp = &db->db;
193 return (0);
194 }
195
196 int
dmu_buf_hold_noread(objset_t * os,uint64_t object,uint64_t offset,const void * tag,dmu_buf_t ** dbp)197 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
198 const void *tag, dmu_buf_t **dbp)
199 {
200 dnode_t *dn;
201 uint64_t blkid;
202 dmu_buf_impl_t *db;
203 int err;
204
205 err = dnode_hold(os, object, FTAG, &dn);
206 if (err)
207 return (err);
208 rw_enter(&dn->dn_struct_rwlock, RW_READER);
209 blkid = dbuf_whichblock(dn, 0, offset);
210 db = dbuf_hold(dn, blkid, tag);
211 rw_exit(&dn->dn_struct_rwlock);
212 dnode_rele(dn, FTAG);
213
214 if (db == NULL) {
215 *dbp = NULL;
216 return (SET_ERROR(EIO));
217 }
218
219 *dbp = &db->db;
220 return (err);
221 }
222
223 int
dmu_buf_hold_by_dnode(dnode_t * dn,uint64_t offset,const void * tag,dmu_buf_t ** dbp,dmu_flags_t flags)224 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
225 const void *tag, dmu_buf_t **dbp, dmu_flags_t flags)
226 {
227 int err;
228
229 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
230 if (err == 0) {
231 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
232 err = dbuf_read(db, NULL, flags | DB_RF_CANFAIL);
233 if (err != 0) {
234 dbuf_rele(db, tag);
235 *dbp = NULL;
236 }
237 }
238
239 return (err);
240 }
241
242 int
dmu_buf_hold(objset_t * os,uint64_t object,uint64_t offset,const void * tag,dmu_buf_t ** dbp,dmu_flags_t flags)243 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
244 const void *tag, dmu_buf_t **dbp, dmu_flags_t flags)
245 {
246 int err;
247
248 err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
249 if (err == 0) {
250 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
251 err = dbuf_read(db, NULL, flags | DB_RF_CANFAIL);
252 if (err != 0) {
253 dbuf_rele(db, tag);
254 *dbp = NULL;
255 }
256 }
257
258 return (err);
259 }
260
261 int
dmu_bonus_max(void)262 dmu_bonus_max(void)
263 {
264 return (DN_OLD_MAX_BONUSLEN);
265 }
266
267 int
dmu_set_bonus(dmu_buf_t * db_fake,int newsize,dmu_tx_t * tx)268 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
269 {
270 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
271 dnode_t *dn;
272 int error;
273
274 if (newsize < 0 || newsize > db_fake->db_size)
275 return (SET_ERROR(EINVAL));
276
277 DB_DNODE_ENTER(db);
278 dn = DB_DNODE(db);
279
280 if (dn->dn_bonus != db) {
281 error = SET_ERROR(EINVAL);
282 } else {
283 dnode_setbonuslen(dn, newsize, tx);
284 error = 0;
285 }
286
287 DB_DNODE_EXIT(db);
288 return (error);
289 }
290
291 int
dmu_set_bonustype(dmu_buf_t * db_fake,dmu_object_type_t type,dmu_tx_t * tx)292 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
293 {
294 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
295 dnode_t *dn;
296 int error;
297
298 if (!DMU_OT_IS_VALID(type))
299 return (SET_ERROR(EINVAL));
300
301 DB_DNODE_ENTER(db);
302 dn = DB_DNODE(db);
303
304 if (dn->dn_bonus != db) {
305 error = SET_ERROR(EINVAL);
306 } else {
307 dnode_setbonus_type(dn, type, tx);
308 error = 0;
309 }
310
311 DB_DNODE_EXIT(db);
312 return (error);
313 }
314
315 dmu_object_type_t
dmu_get_bonustype(dmu_buf_t * db_fake)316 dmu_get_bonustype(dmu_buf_t *db_fake)
317 {
318 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
319 dmu_object_type_t type;
320
321 DB_DNODE_ENTER(db);
322 type = DB_DNODE(db)->dn_bonustype;
323 DB_DNODE_EXIT(db);
324
325 return (type);
326 }
327
328 int
dmu_rm_spill(objset_t * os,uint64_t object,dmu_tx_t * tx)329 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
330 {
331 dnode_t *dn;
332 int error;
333
334 error = dnode_hold(os, object, FTAG, &dn);
335 dbuf_rm_spill(dn, tx);
336 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
337 dnode_rm_spill(dn, tx);
338 rw_exit(&dn->dn_struct_rwlock);
339 dnode_rele(dn, FTAG);
340 return (error);
341 }
342
343 /*
344 * Lookup and hold the bonus buffer for the provided dnode. If the dnode
345 * has not yet been allocated a new bonus dbuf a will be allocated.
346 * Returns ENOENT, EIO, or 0.
347 */
dmu_bonus_hold_by_dnode(dnode_t * dn,const void * tag,dmu_buf_t ** dbp,dmu_flags_t flags)348 int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
349 dmu_flags_t flags)
350 {
351 dmu_buf_impl_t *db;
352 int error;
353
354 rw_enter(&dn->dn_struct_rwlock, RW_READER);
355 if (dn->dn_bonus == NULL) {
356 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
357 rw_exit(&dn->dn_struct_rwlock);
358 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
359 }
360 if (dn->dn_bonus == NULL)
361 dbuf_create_bonus(dn);
362 }
363 db = dn->dn_bonus;
364
365 /* as long as the bonus buf is held, the dnode will be held */
366 if (zfs_refcount_add(&db->db_holds, tag) == 1) {
367 VERIFY(dnode_add_ref(dn, db));
368 atomic_inc_32(&dn->dn_dbufs_count);
369 }
370
371 /*
372 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
373 * hold and incrementing the dbuf count to ensure that dnode_move() sees
374 * a dnode hold for every dbuf.
375 */
376 rw_exit(&dn->dn_struct_rwlock);
377
378 error = dbuf_read(db, NULL, flags | DB_RF_CANFAIL);
379 if (error) {
380 dnode_evict_bonus(dn);
381 dbuf_rele(db, tag);
382 *dbp = NULL;
383 return (error);
384 }
385
386 *dbp = &db->db;
387 return (0);
388 }
389
390 int
dmu_bonus_hold(objset_t * os,uint64_t object,const void * tag,dmu_buf_t ** dbp)391 dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
392 {
393 dnode_t *dn;
394 int error;
395
396 error = dnode_hold(os, object, FTAG, &dn);
397 if (error)
398 return (error);
399
400 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
401 dnode_rele(dn, FTAG);
402
403 return (error);
404 }
405
406 /*
407 * returns ENOENT, EIO, or 0.
408 *
409 * This interface will allocate a blank spill dbuf when a spill blk
410 * doesn't already exist on the dnode.
411 *
412 * if you only want to find an already existing spill db, then
413 * dmu_spill_hold_existing() should be used.
414 */
415 int
dmu_spill_hold_by_dnode(dnode_t * dn,dmu_flags_t flags,const void * tag,dmu_buf_t ** dbp)416 dmu_spill_hold_by_dnode(dnode_t *dn, dmu_flags_t flags, const void *tag,
417 dmu_buf_t **dbp)
418 {
419 dmu_buf_impl_t *db = NULL;
420 int err;
421
422 if ((flags & DB_RF_HAVESTRUCT) == 0)
423 rw_enter(&dn->dn_struct_rwlock, RW_READER);
424
425 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
426
427 if ((flags & DB_RF_HAVESTRUCT) == 0)
428 rw_exit(&dn->dn_struct_rwlock);
429
430 if (db == NULL) {
431 *dbp = NULL;
432 return (SET_ERROR(EIO));
433 }
434 err = dbuf_read(db, NULL, flags);
435 if (err == 0)
436 *dbp = &db->db;
437 else {
438 dbuf_rele(db, tag);
439 *dbp = NULL;
440 }
441 return (err);
442 }
443
444 int
dmu_spill_hold_existing(dmu_buf_t * bonus,const void * tag,dmu_buf_t ** dbp)445 dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp)
446 {
447 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
448 dnode_t *dn;
449 int err;
450
451 DB_DNODE_ENTER(db);
452 dn = DB_DNODE(db);
453
454 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
455 err = SET_ERROR(EINVAL);
456 } else {
457 rw_enter(&dn->dn_struct_rwlock, RW_READER);
458
459 if (!dn->dn_have_spill) {
460 err = SET_ERROR(ENOENT);
461 } else {
462 err = dmu_spill_hold_by_dnode(dn,
463 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
464 }
465
466 rw_exit(&dn->dn_struct_rwlock);
467 }
468
469 DB_DNODE_EXIT(db);
470 return (err);
471 }
472
473 int
dmu_spill_hold_by_bonus(dmu_buf_t * bonus,dmu_flags_t flags,const void * tag,dmu_buf_t ** dbp)474 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, dmu_flags_t flags, const void *tag,
475 dmu_buf_t **dbp)
476 {
477 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
478 int err;
479
480 DB_DNODE_ENTER(db);
481 err = dmu_spill_hold_by_dnode(DB_DNODE(db), flags, tag, dbp);
482 DB_DNODE_EXIT(db);
483
484 return (err);
485 }
486
487 /*
488 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
489 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
490 * and can induce severe lock contention when writing to several files
491 * whose dnodes are in the same block.
492 */
493 int
dmu_buf_hold_array_by_dnode(dnode_t * dn,uint64_t offset,uint64_t length,boolean_t read,const void * tag,int * numbufsp,dmu_buf_t *** dbpp,dmu_flags_t flags)494 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
495 boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp,
496 dmu_flags_t flags)
497 {
498 dmu_buf_t **dbp;
499 zstream_t *zs = NULL;
500 uint64_t blkid, nblks, i;
501 dmu_flags_t dbuf_flags;
502 int err;
503 zio_t *zio = NULL;
504 boolean_t missed = B_FALSE;
505
506 ASSERT(!read || length <= DMU_MAX_ACCESS);
507
508 /*
509 * Note: We directly notify the prefetch code of this read, so that
510 * we can tell it about the multi-block read. dbuf_read() only knows
511 * about the one block it is accessing.
512 */
513 dbuf_flags = (flags & ~DMU_READ_PREFETCH) | DMU_READ_NO_PREFETCH |
514 DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
515
516 rw_enter(&dn->dn_struct_rwlock, RW_READER);
517 if (dn->dn_datablkshift) {
518 int blkshift = dn->dn_datablkshift;
519 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
520 P2ALIGN_TYPED(offset, 1ULL << blkshift, uint64_t))
521 >> blkshift;
522 } else {
523 if (offset + length > dn->dn_datablksz) {
524 zfs_panic_recover("zfs: accessing past end of object "
525 "%llx/%llx (size=%u access=%llu+%llu)",
526 (longlong_t)dn->dn_objset->
527 os_dsl_dataset->ds_object,
528 (longlong_t)dn->dn_object, dn->dn_datablksz,
529 (longlong_t)offset, (longlong_t)length);
530 rw_exit(&dn->dn_struct_rwlock);
531 return (SET_ERROR(EIO));
532 }
533 nblks = 1;
534 }
535 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
536
537 if (read)
538 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
539 ZIO_FLAG_CANFAIL);
540 blkid = dbuf_whichblock(dn, 0, offset);
541 if ((flags & DMU_READ_NO_PREFETCH) == 0) {
542 /*
543 * Prepare the zfetch before initiating the demand reads, so
544 * that if multiple threads block on same indirect block, we
545 * base predictions on the original less racy request order.
546 */
547 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks,
548 read && !(flags & DMU_DIRECTIO), B_TRUE);
549 }
550 for (i = 0; i < nblks; i++) {
551 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
552 if (db == NULL) {
553 if (zs) {
554 dmu_zfetch_run(&dn->dn_zfetch, zs, missed,
555 B_TRUE, (flags & DMU_UNCACHEDIO));
556 }
557 rw_exit(&dn->dn_struct_rwlock);
558 dmu_buf_rele_array(dbp, nblks, tag);
559 if (read)
560 zio_nowait(zio);
561 return (SET_ERROR(EIO));
562 }
563
564 /*
565 * Initiate async demand data read.
566 * We check the db_state after calling dbuf_read() because
567 * (1) dbuf_read() may change the state to CACHED due to a
568 * hit in the ARC, and (2) on a cache miss, a child will
569 * have been added to "zio" but not yet completed, so the
570 * state will not yet be CACHED.
571 */
572 if (read) {
573 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
574 offset + length < db->db.db_offset +
575 db->db.db_size) {
576 if (offset <= db->db.db_offset)
577 dbuf_flags |= DMU_PARTIAL_FIRST;
578 else
579 dbuf_flags |= DMU_PARTIAL_MORE;
580 }
581 (void) dbuf_read(db, zio, dbuf_flags);
582 if (db->db_state != DB_CACHED)
583 missed = B_TRUE;
584 }
585 dbp[i] = &db->db;
586 }
587
588 /*
589 * If we are doing O_DIRECT we still hold the dbufs, even for reads,
590 * but we do not issue any reads here. We do not want to account for
591 * writes in this case.
592 *
593 * O_DIRECT write/read accounting takes place in
594 * dmu_{write/read}_abd().
595 */
596 if (!read && ((flags & DMU_DIRECTIO) == 0))
597 zfs_racct_write(dn->dn_objset->os_spa, length, nblks, flags);
598
599 if (zs) {
600 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, B_TRUE,
601 (flags & DMU_UNCACHEDIO));
602 }
603 rw_exit(&dn->dn_struct_rwlock);
604
605 if (read) {
606 /* wait for async read i/o */
607 err = zio_wait(zio);
608 if (err) {
609 dmu_buf_rele_array(dbp, nblks, tag);
610 return (err);
611 }
612
613 /* wait for other io to complete */
614 for (i = 0; i < nblks; i++) {
615 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
616 mutex_enter(&db->db_mtx);
617 while (db->db_state == DB_READ ||
618 db->db_state == DB_FILL)
619 cv_wait(&db->db_changed, &db->db_mtx);
620 if (db->db_state == DB_UNCACHED)
621 err = SET_ERROR(EIO);
622 mutex_exit(&db->db_mtx);
623 if (err) {
624 dmu_buf_rele_array(dbp, nblks, tag);
625 return (err);
626 }
627 }
628 }
629
630 *numbufsp = nblks;
631 *dbpp = dbp;
632 return (0);
633 }
634
635 int
dmu_buf_hold_array(objset_t * os,uint64_t object,uint64_t offset,uint64_t length,int read,const void * tag,int * numbufsp,dmu_buf_t *** dbpp,dmu_flags_t flags)636 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
637 uint64_t length, int read, const void *tag, int *numbufsp,
638 dmu_buf_t ***dbpp, dmu_flags_t flags)
639 {
640 dnode_t *dn;
641 int err;
642
643 err = dnode_hold(os, object, FTAG, &dn);
644 if (err)
645 return (err);
646
647 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
648 numbufsp, dbpp, flags);
649
650 dnode_rele(dn, FTAG);
651
652 return (err);
653 }
654
655 int
dmu_buf_hold_array_by_bonus(dmu_buf_t * db_fake,uint64_t offset,uint64_t length,boolean_t read,const void * tag,int * numbufsp,dmu_buf_t *** dbpp,dmu_flags_t flags)656 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
657 uint64_t length, boolean_t read, const void *tag, int *numbufsp,
658 dmu_buf_t ***dbpp, dmu_flags_t flags)
659 {
660 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
661 int err;
662
663 DB_DNODE_ENTER(db);
664 err = dmu_buf_hold_array_by_dnode(DB_DNODE(db), offset, length, read,
665 tag, numbufsp, dbpp, flags);
666 DB_DNODE_EXIT(db);
667
668 return (err);
669 }
670
671 void
dmu_buf_rele_array(dmu_buf_t ** dbp_fake,int numbufs,const void * tag)672 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag)
673 {
674 int i;
675 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
676
677 if (numbufs == 0)
678 return;
679
680 for (i = 0; i < numbufs; i++) {
681 if (dbp[i])
682 dbuf_rele(dbp[i], tag);
683 }
684
685 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
686 }
687
688 /*
689 * Issue prefetch I/Os for the given blocks. If level is greater than 0, the
690 * indirect blocks prefetched will be those that point to the blocks containing
691 * the data starting at offset, and continuing to offset + len. If the range
692 * is too long, prefetch the first dmu_prefetch_max bytes as requested, while
693 * for the rest only a higher level, also fitting within dmu_prefetch_max. It
694 * should primarily help random reads, since for long sequential reads there is
695 * a speculative prefetcher.
696 *
697 * Note that if the indirect blocks above the blocks being prefetched are not
698 * in cache, they will be asynchronously read in. Dnode read by dnode_hold()
699 * is currently synchronous.
700 */
701 void
dmu_prefetch(objset_t * os,uint64_t object,int64_t level,uint64_t offset,uint64_t len,zio_priority_t pri)702 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
703 uint64_t len, zio_priority_t pri)
704 {
705 dnode_t *dn;
706
707 if (dmu_prefetch_max == 0 || len == 0) {
708 dmu_prefetch_dnode(os, object, pri);
709 return;
710 }
711
712 if (dnode_hold(os, object, FTAG, &dn) != 0)
713 return;
714
715 dmu_prefetch_by_dnode(dn, level, offset, len, pri);
716
717 dnode_rele(dn, FTAG);
718 }
719
720 void
dmu_prefetch_by_dnode(dnode_t * dn,int64_t level,uint64_t offset,uint64_t len,zio_priority_t pri)721 dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset,
722 uint64_t len, zio_priority_t pri)
723 {
724 int64_t level2 = level;
725 uint64_t start, end, start2, end2;
726
727 /*
728 * Depending on len we may do two prefetches: blocks [start, end) at
729 * level, and following blocks [start2, end2) at higher level2.
730 */
731 rw_enter(&dn->dn_struct_rwlock, RW_READER);
732 if (dn->dn_datablkshift != 0) {
733
734 /*
735 * Limit prefetch to present blocks.
736 */
737 uint64_t size = (dn->dn_maxblkid + 1) << dn->dn_datablkshift;
738 if (offset >= size) {
739 rw_exit(&dn->dn_struct_rwlock);
740 return;
741 }
742 if (offset + len < offset || offset + len > size)
743 len = size - offset;
744
745 /*
746 * The object has multiple blocks. Calculate the full range
747 * of blocks [start, end2) and then split it into two parts,
748 * so that the first [start, end) fits into dmu_prefetch_max.
749 */
750 start = dbuf_whichblock(dn, level, offset);
751 end2 = dbuf_whichblock(dn, level, offset + len - 1) + 1;
752 uint8_t ibs = dn->dn_indblkshift;
753 uint8_t bs = (level == 0) ? dn->dn_datablkshift : ibs;
754 uint_t limit = P2ROUNDUP(dmu_prefetch_max, 1 << bs) >> bs;
755 start2 = end = MIN(end2, start + limit);
756
757 /*
758 * Find level2 where [start2, end2) fits into dmu_prefetch_max.
759 */
760 uint8_t ibps = ibs - SPA_BLKPTRSHIFT;
761 limit = P2ROUNDUP(dmu_prefetch_max, 1 << ibs) >> ibs;
762 if (limit == 0)
763 end2 = start2;
764 do {
765 level2++;
766 start2 = P2ROUNDUP(start2, 1 << ibps) >> ibps;
767 end2 = P2ROUNDUP(end2, 1 << ibps) >> ibps;
768 } while (end2 - start2 > limit);
769 } else {
770 /* There is only one block. Prefetch it or nothing. */
771 start = start2 = end2 = 0;
772 end = start + (level == 0 && offset < dn->dn_datablksz);
773 }
774
775 for (uint64_t i = start; i < end; i++)
776 dbuf_prefetch(dn, level, i, pri, 0);
777 for (uint64_t i = start2; i < end2; i++)
778 dbuf_prefetch(dn, level2, i, pri, 0);
779 rw_exit(&dn->dn_struct_rwlock);
780 }
781
782 /*
783 * Prime a prefetch for sequential accesses from offset for at least len bytes.
784 */
785 void
dmu_prefetch_stream(objset_t * os,uint64_t object,uint64_t offset,uint64_t len,boolean_t start_now)786 dmu_prefetch_stream(objset_t *os, uint64_t object, uint64_t offset,
787 uint64_t len, boolean_t start_now)
788 {
789 dnode_t *dn;
790
791 if (dnode_hold(os, object, FTAG, &dn) != 0)
792 return;
793 dmu_prefetch_stream_by_dnode(dn, offset, len, start_now);
794 dnode_rele(dn, FTAG);
795 }
796
797 void
dmu_prefetch_stream_by_dnode(dnode_t * dn,uint64_t offset,uint64_t len,boolean_t start_now)798 dmu_prefetch_stream_by_dnode(dnode_t *dn, uint64_t offset, uint64_t len,
799 boolean_t start_now)
800 {
801 rw_enter(&dn->dn_struct_rwlock, RW_READER);
802 if (dn->dn_datablkshift != 0) {
803 uint64_t start = dbuf_whichblock(dn, 0, offset);
804 if (len == 0) {
805 if (dmu_zfetch_prime(&dn->dn_zfetch, start, start) &&
806 start_now) {
807 dmu_zfetch(&dn->dn_zfetch, start, 0, B_TRUE,
808 B_TRUE, B_TRUE, B_FALSE);
809 }
810 } else {
811 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1);
812 if (start == end) {
813 if (start_now) {
814 dbuf_prefetch(dn, 0, start,
815 ZIO_PRIORITY_ASYNC_READ, 0);
816 }
817 } else if (
818 dmu_zfetch_prime(&dn->dn_zfetch, start, end + 1) &&
819 start_now) {
820 dmu_zfetch(&dn->dn_zfetch, start, 0, B_TRUE,
821 B_TRUE, B_TRUE, B_FALSE);
822 }
823 }
824 } else if (offset < dn->dn_datablksz && start_now) {
825 dbuf_prefetch(dn, 0, 0, ZIO_PRIORITY_ASYNC_READ, 0);
826 }
827 rw_exit(&dn->dn_struct_rwlock);
828 }
829
830 typedef struct {
831 kmutex_t dpa_lock;
832 kcondvar_t dpa_cv;
833 uint64_t dpa_pending_io;
834 } dmu_prefetch_arg_t;
835
836 static void
dmu_prefetch_done(void * arg,uint64_t level,uint64_t blkid,boolean_t issued)837 dmu_prefetch_done(void *arg, uint64_t level, uint64_t blkid, boolean_t issued)
838 {
839 (void) level; (void) blkid; (void)issued;
840 dmu_prefetch_arg_t *dpa = arg;
841
842 ASSERT0(level);
843
844 mutex_enter(&dpa->dpa_lock);
845 ASSERT3U(dpa->dpa_pending_io, >, 0);
846 if (--dpa->dpa_pending_io == 0)
847 cv_broadcast(&dpa->dpa_cv);
848 mutex_exit(&dpa->dpa_lock);
849 }
850
851 static void
dmu_prefetch_wait_by_dnode(dnode_t * dn,uint64_t offset,uint64_t len)852 dmu_prefetch_wait_by_dnode(dnode_t *dn, uint64_t offset, uint64_t len)
853 {
854 dmu_prefetch_arg_t dpa;
855
856 mutex_init(&dpa.dpa_lock, NULL, MUTEX_DEFAULT, NULL);
857 cv_init(&dpa.dpa_cv, NULL, CV_DEFAULT, NULL);
858
859 rw_enter(&dn->dn_struct_rwlock, RW_READER);
860
861 uint64_t start = dbuf_whichblock(dn, 0, offset);
862 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1) + 1;
863 dpa.dpa_pending_io = end - start;
864
865 for (uint64_t blk = start; blk < end; blk++) {
866 (void) dbuf_prefetch_impl(dn, 0, blk, ZIO_PRIORITY_ASYNC_READ,
867 0, dmu_prefetch_done, &dpa);
868 }
869
870 rw_exit(&dn->dn_struct_rwlock);
871
872 /* wait for prefetch L0 reads to finish */
873 mutex_enter(&dpa.dpa_lock);
874 while (dpa.dpa_pending_io > 0) {
875 cv_wait(&dpa.dpa_cv, &dpa.dpa_lock);
876
877 }
878 mutex_exit(&dpa.dpa_lock);
879
880 mutex_destroy(&dpa.dpa_lock);
881 cv_destroy(&dpa.dpa_cv);
882 }
883
884 /*
885 * Issue prefetch I/Os for the given L0 block range and wait for the I/O
886 * to complete. This does not enforce dmu_prefetch_max and will prefetch
887 * the entire range. The blocks are read from disk into the ARC but no
888 * decompression occurs (i.e., the dbuf cache is not required).
889 */
890 int
dmu_prefetch_wait(objset_t * os,uint64_t object,uint64_t offset,uint64_t size)891 dmu_prefetch_wait(objset_t *os, uint64_t object, uint64_t offset, uint64_t size)
892 {
893 dnode_t *dn;
894 int err = 0;
895
896 err = dnode_hold(os, object, FTAG, &dn);
897 if (err != 0)
898 return (err);
899
900 /*
901 * Chunk the requests (16 indirects worth) so that we can be
902 * interrupted. Prefetch at least SPA_MAXBLOCKSIZE at a time
903 * to better utilize pools with smaller block sizes.
904 */
905 uint64_t chunksize;
906 if (dn->dn_indblkshift) {
907 uint64_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
908 chunksize = (nbps * 16) << dn->dn_datablkshift;
909 chunksize = MAX(chunksize, SPA_MAXBLOCKSIZE);
910 } else {
911 chunksize = dn->dn_datablksz;
912 }
913
914 while (size > 0) {
915 uint64_t mylen = MIN(size, chunksize);
916
917 dmu_prefetch_wait_by_dnode(dn, offset, mylen);
918
919 offset += mylen;
920 size -= mylen;
921
922 if (issig()) {
923 err = SET_ERROR(EINTR);
924 break;
925 }
926 }
927
928 dnode_rele(dn, FTAG);
929
930 return (err);
931 }
932
933 /*
934 * Issue prefetch I/Os for the given object's dnode.
935 */
936 void
dmu_prefetch_dnode(objset_t * os,uint64_t object,zio_priority_t pri)937 dmu_prefetch_dnode(objset_t *os, uint64_t object, zio_priority_t pri)
938 {
939 if (object == 0 || object >= DN_MAX_OBJECT)
940 return;
941
942 dnode_t *dn = DMU_META_DNODE(os);
943 rw_enter(&dn->dn_struct_rwlock, RW_READER);
944 uint64_t blkid = dbuf_whichblock(dn, 0, object * sizeof (dnode_phys_t));
945 dbuf_prefetch(dn, 0, blkid, pri, 0);
946 rw_exit(&dn->dn_struct_rwlock);
947 }
948
949 /*
950 * Advisory cache eviction for a byte range of an object.
951 */
952 void
dmu_evict_range(objset_t * os,uint64_t object,uint64_t offset,uint64_t len)953 dmu_evict_range(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
954 {
955 dnode_t *dn;
956
957 if (len == 0)
958 return;
959 if (dnode_hold(os, object, FTAG, &dn) != 0)
960 return;
961
962 /*
963 * Exclude the last block if the range end is not block-aligned:
964 * a sequential access may continue into that block. The first
965 * block is included even when partially covered since backwards
966 * access patterns are rare.
967 */
968 rw_enter(&dn->dn_struct_rwlock, RW_READER);
969 uint64_t start, end;
970 if (dn->dn_datablkshift != 0) {
971 start = dbuf_whichblock(dn, 0, offset);
972 end = dbuf_whichblock(dn, 0, offset + len);
973 } else {
974 start = (offset >= dn->dn_datablksz);
975 end = (offset + len >= dn->dn_datablksz);
976 }
977 if (end > start)
978 dbuf_evict_range(dn, start, end - 1);
979 rw_exit(&dn->dn_struct_rwlock);
980
981 dnode_rele(dn, FTAG);
982 }
983
984 /*
985 * Get the next "chunk" of file data to free. We traverse the file from
986 * the end so that the file gets shorter over time (if we crash in the
987 * middle, this will leave us in a better state). We find allocated file
988 * data by simply searching the allocated level 1 indirects.
989 *
990 * On input, *start should be the first offset that does not need to be
991 * freed (e.g. "offset + length"). On return, *start will be the first
992 * offset that should be freed and l1blks is set to the number of level 1
993 * indirect blocks found within the chunk.
994 */
995 static int
get_next_chunk(dnode_t * dn,uint64_t * start,uint64_t minimum,uint64_t * l1blks)996 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
997 {
998 uint64_t blks;
999 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
1000 /* bytes of data covered by a level-1 indirect block */
1001 uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
1002 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
1003
1004 ASSERT3U(minimum, <=, *start);
1005
1006 /* dn_nlevels == 1 means we don't have any L1 blocks */
1007 if (dn->dn_nlevels <= 1) {
1008 *l1blks = 0;
1009 *start = minimum;
1010 return (0);
1011 }
1012
1013 /*
1014 * Check if we can free the entire range assuming that all of the
1015 * L1 blocks in this range have data. If we can, we use this
1016 * worst case value as an estimate so we can avoid having to look
1017 * at the object's actual data.
1018 */
1019 uint64_t total_l1blks =
1020 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
1021 iblkrange;
1022 if (total_l1blks <= maxblks) {
1023 *l1blks = total_l1blks;
1024 *start = minimum;
1025 return (0);
1026 }
1027 ASSERT(ISP2(iblkrange));
1028
1029 for (blks = 0; *start > minimum && blks < maxblks; blks++) {
1030 int err;
1031
1032 /*
1033 * dnode_next_offset(BACKWARDS) will find an allocated L1
1034 * indirect block at or before the input offset. We must
1035 * decrement *start so that it is at the end of the region
1036 * to search.
1037 */
1038 (*start)--;
1039
1040 err = dnode_next_offset(dn,
1041 DNODE_FIND_BACKWARDS, start, 2, 1, 0);
1042
1043 /* if there are no indirect blocks before start, we are done */
1044 if (err == ESRCH) {
1045 *start = minimum;
1046 break;
1047 } else if (err != 0) {
1048 *l1blks = blks;
1049 return (err);
1050 }
1051
1052 /* set start to the beginning of this L1 indirect */
1053 *start = P2ALIGN_TYPED(*start, iblkrange, uint64_t);
1054 }
1055 if (*start < minimum)
1056 *start = minimum;
1057 *l1blks = blks;
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
1064 * otherwise return false.
1065 * Used below in dmu_free_long_range_impl() to enable abort when unmounting
1066 */
1067 static boolean_t
dmu_objset_zfs_unmounting(objset_t * os)1068 dmu_objset_zfs_unmounting(objset_t *os)
1069 {
1070 #ifdef _KERNEL
1071 if (dmu_objset_type(os) == DMU_OST_ZFS)
1072 return (zfs_get_vfs_flag_unmounted(os));
1073 #else
1074 (void) os;
1075 #endif
1076 return (B_FALSE);
1077 }
1078
1079 static int
dmu_free_long_range_impl(objset_t * os,dnode_t * dn,uint64_t offset,uint64_t length)1080 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
1081 uint64_t length)
1082 {
1083 uint64_t object_size;
1084 int err;
1085 uint64_t dirty_frees_threshold;
1086 dsl_pool_t *dp = dmu_objset_pool(os);
1087
1088 if (dn == NULL)
1089 return (SET_ERROR(EINVAL));
1090
1091 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1092 if (offset >= object_size)
1093 return (0);
1094
1095 if (zfs_per_txg_dirty_frees_percent <= 100)
1096 dirty_frees_threshold =
1097 zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
1098 else
1099 dirty_frees_threshold = zfs_dirty_data_max / 20;
1100
1101 if (length == DMU_OBJECT_END || offset + length > object_size)
1102 length = object_size - offset;
1103
1104 while (length != 0) {
1105 uint64_t chunk_end, chunk_begin, chunk_len;
1106 uint64_t l1blks;
1107 dmu_tx_t *tx;
1108
1109 if (dmu_objset_zfs_unmounting(dn->dn_objset))
1110 return (SET_ERROR(EINTR));
1111
1112 chunk_end = chunk_begin = offset + length;
1113
1114 /* move chunk_begin backwards to the beginning of this chunk */
1115 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
1116 if (err)
1117 return (err);
1118 ASSERT3U(chunk_begin, >=, offset);
1119 ASSERT3U(chunk_begin, <=, chunk_end);
1120
1121 chunk_len = chunk_end - chunk_begin;
1122
1123 tx = dmu_tx_create(os);
1124 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
1125
1126 /*
1127 * Mark this transaction as typically resulting in a net
1128 * reduction in space used.
1129 */
1130 dmu_tx_mark_netfree(tx);
1131 err = dmu_tx_assign(tx, DMU_TX_WAIT);
1132 if (err) {
1133 dmu_tx_abort(tx);
1134 return (err);
1135 }
1136
1137 uint64_t txg = dmu_tx_get_txg(tx);
1138
1139 mutex_enter(&dp->dp_lock);
1140 uint64_t long_free_dirty =
1141 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
1142 mutex_exit(&dp->dp_lock);
1143
1144 /*
1145 * To avoid filling up a TXG with just frees, wait for
1146 * the next TXG to open before freeing more chunks if
1147 * we have reached the threshold of frees.
1148 */
1149 if (dirty_frees_threshold != 0 &&
1150 long_free_dirty >= dirty_frees_threshold) {
1151 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
1152 dmu_tx_commit(tx);
1153 txg_wait_open(dp, 0, B_TRUE);
1154 continue;
1155 }
1156
1157 /*
1158 * In order to prevent unnecessary write throttling, for each
1159 * TXG, we track the cumulative size of L1 blocks being dirtied
1160 * in dnode_free_range() below. We compare this number to a
1161 * tunable threshold, past which we prevent new L1 dirty freeing
1162 * blocks from being added into the open TXG. See
1163 * dmu_free_long_range_impl() for details. The threshold
1164 * prevents write throttle activation due to dirty freeing L1
1165 * blocks taking up a large percentage of zfs_dirty_data_max.
1166 */
1167 mutex_enter(&dp->dp_lock);
1168 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
1169 l1blks << dn->dn_indblkshift;
1170 mutex_exit(&dp->dp_lock);
1171 DTRACE_PROBE3(free__long__range,
1172 uint64_t, long_free_dirty, uint64_t, chunk_len,
1173 uint64_t, txg);
1174 dnode_free_range(dn, chunk_begin, chunk_len, tx);
1175
1176 dmu_tx_commit(tx);
1177
1178 length -= chunk_len;
1179 }
1180 return (0);
1181 }
1182
1183 int
dmu_free_long_range(objset_t * os,uint64_t object,uint64_t offset,uint64_t length)1184 dmu_free_long_range(objset_t *os, uint64_t object,
1185 uint64_t offset, uint64_t length)
1186 {
1187 dnode_t *dn;
1188 int err;
1189
1190 err = dnode_hold(os, object, FTAG, &dn);
1191 if (err != 0)
1192 return (err);
1193 err = dmu_free_long_range_impl(os, dn, offset, length);
1194
1195 /*
1196 * It is important to zero out the maxblkid when freeing the entire
1197 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
1198 * will take the fast path, and (b) dnode_reallocate() can verify
1199 * that the entire file has been freed.
1200 */
1201 if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
1202 dn->dn_maxblkid = 0;
1203
1204 dnode_rele(dn, FTAG);
1205 return (err);
1206 }
1207
1208 int
dmu_free_long_object(objset_t * os,uint64_t object)1209 dmu_free_long_object(objset_t *os, uint64_t object)
1210 {
1211 dmu_tx_t *tx;
1212 int err;
1213
1214 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
1215 if (err != 0)
1216 return (err);
1217
1218 tx = dmu_tx_create(os);
1219 dmu_tx_hold_bonus(tx, object);
1220 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1221 dmu_tx_mark_netfree(tx);
1222 err = dmu_tx_assign(tx, DMU_TX_WAIT);
1223 if (err == 0) {
1224 err = dmu_object_free(os, object, tx);
1225 dmu_tx_commit(tx);
1226 } else {
1227 dmu_tx_abort(tx);
1228 }
1229
1230 return (err);
1231 }
1232
1233 int
dmu_free_range(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)1234 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
1235 uint64_t size, dmu_tx_t *tx)
1236 {
1237 dnode_t *dn;
1238 int err = dnode_hold(os, object, FTAG, &dn);
1239 if (err)
1240 return (err);
1241 ASSERT(offset < UINT64_MAX);
1242 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
1243 dnode_free_range(dn, offset, size, tx);
1244 dnode_rele(dn, FTAG);
1245 return (0);
1246 }
1247
1248 static int
dmu_read_impl(dnode_t * dn,uint64_t offset,uint64_t size,void * buf,dmu_flags_t flags)1249 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
1250 void *buf, dmu_flags_t flags)
1251 {
1252 dmu_buf_t **dbp;
1253 int numbufs, err = 0;
1254
1255 /*
1256 * Deal with odd block sizes, where there can't be data past the first
1257 * block. If we ever do the tail block optimization, we will need to
1258 * handle that here as well.
1259 */
1260 if (dn->dn_maxblkid == 0) {
1261 uint64_t newsz = offset > dn->dn_datablksz ? 0 :
1262 MIN(size, dn->dn_datablksz - offset);
1263 memset((char *)buf + newsz, 0, size - newsz);
1264 size = newsz;
1265 }
1266
1267 if (size == 0)
1268 return (0);
1269
1270 /* Allow Direct I/O when requested and properly aligned */
1271 if ((flags & DMU_DIRECTIO) && zfs_dio_page_aligned(buf) &&
1272 zfs_dio_aligned(offset, size, PAGESIZE)) {
1273 abd_t *data = abd_get_from_buf(buf, size);
1274 err = dmu_read_abd(dn, offset, size, data, flags);
1275 abd_free(data);
1276 return (err);
1277 }
1278 flags &= ~DMU_DIRECTIO;
1279
1280 while (size > 0) {
1281 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
1282 int i;
1283
1284 /*
1285 * NB: we could do this block-at-a-time, but it's nice
1286 * to be reading in parallel.
1287 */
1288 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
1289 TRUE, FTAG, &numbufs, &dbp, flags);
1290 if (err)
1291 break;
1292
1293 for (i = 0; i < numbufs; i++) {
1294 uint64_t tocpy;
1295 int64_t bufoff;
1296 dmu_buf_t *db = dbp[i];
1297
1298 ASSERT(size > 0);
1299
1300 bufoff = offset - db->db_offset;
1301 tocpy = MIN(db->db_size - bufoff, size);
1302
1303 ASSERT(db->db_data != NULL);
1304 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
1305
1306 offset += tocpy;
1307 size -= tocpy;
1308 buf = (char *)buf + tocpy;
1309 }
1310 dmu_buf_rele_array(dbp, numbufs, FTAG);
1311 }
1312 return (err);
1313 }
1314
1315 int
dmu_read(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,void * buf,dmu_flags_t flags)1316 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1317 void *buf, dmu_flags_t flags)
1318 {
1319 dnode_t *dn;
1320 int err;
1321
1322 err = dnode_hold(os, object, FTAG, &dn);
1323 if (err != 0)
1324 return (err);
1325
1326 err = dmu_read_impl(dn, offset, size, buf, flags);
1327 dnode_rele(dn, FTAG);
1328 return (err);
1329 }
1330
1331 int
dmu_read_by_dnode(dnode_t * dn,uint64_t offset,uint64_t size,void * buf,dmu_flags_t flags)1332 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1333 dmu_flags_t flags)
1334 {
1335 return (dmu_read_impl(dn, offset, size, buf, flags));
1336 }
1337
1338 static void
dmu_write_impl(dmu_buf_t ** dbp,int numbufs,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx,dmu_flags_t flags)1339 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
1340 const void *buf, dmu_tx_t *tx, dmu_flags_t flags)
1341 {
1342 int i;
1343
1344 for (i = 0; i < numbufs; i++) {
1345 uint64_t tocpy;
1346 int64_t bufoff;
1347 dmu_buf_t *db = dbp[i];
1348
1349 ASSERT(size > 0);
1350
1351 bufoff = offset - db->db_offset;
1352 tocpy = MIN(db->db_size - bufoff, size);
1353
1354 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1355
1356 if (tocpy == db->db_size) {
1357 dmu_buf_will_fill_flags(db, tx, B_FALSE, flags);
1358 } else {
1359 if (i == numbufs - 1 && bufoff + tocpy < db->db_size) {
1360 if (bufoff == 0)
1361 flags |= DMU_PARTIAL_FIRST;
1362 else
1363 flags |= DMU_PARTIAL_MORE;
1364 }
1365 dmu_buf_will_dirty_flags(db, tx, flags);
1366 }
1367
1368 ASSERT(db->db_data != NULL);
1369 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
1370
1371 if (tocpy == db->db_size)
1372 dmu_buf_fill_done(db, tx, B_FALSE);
1373
1374 offset += tocpy;
1375 size -= tocpy;
1376 buf = (char *)buf + tocpy;
1377 }
1378 }
1379
1380 void
dmu_write(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx,dmu_flags_t flags)1381 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1382 const void *buf, dmu_tx_t *tx, dmu_flags_t flags)
1383 {
1384 dmu_buf_t **dbp;
1385 int numbufs;
1386
1387 if (size == 0)
1388 return;
1389
1390 VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1391 FALSE, FTAG, &numbufs, &dbp, flags));
1392 dmu_write_impl(dbp, numbufs, offset, size, buf, tx, flags);
1393 dmu_buf_rele_array(dbp, numbufs, FTAG);
1394 }
1395
1396 int
dmu_write_by_dnode(dnode_t * dn,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx,dmu_flags_t flags)1397 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1398 const void *buf, dmu_tx_t *tx, dmu_flags_t flags)
1399 {
1400 dmu_buf_t **dbp;
1401 int numbufs;
1402 int error;
1403
1404 if (size == 0)
1405 return (0);
1406
1407 /* Allow Direct I/O when requested and properly aligned */
1408 if ((flags & DMU_DIRECTIO) && zfs_dio_page_aligned((void *)buf) &&
1409 zfs_dio_aligned(offset, size, dn->dn_datablksz)) {
1410 abd_t *data = abd_get_from_buf((void *)buf, size);
1411 error = dmu_write_abd(dn, offset, size, data, flags, tx);
1412 abd_free(data);
1413 return (error);
1414 }
1415 flags &= ~DMU_DIRECTIO;
1416
1417 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1418 FALSE, FTAG, &numbufs, &dbp, flags));
1419 dmu_write_impl(dbp, numbufs, offset, size, buf, tx, flags);
1420 dmu_buf_rele_array(dbp, numbufs, FTAG);
1421 return (0);
1422 }
1423
1424 void
dmu_prealloc(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)1425 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1426 dmu_tx_t *tx)
1427 {
1428 dmu_buf_t **dbp;
1429 int numbufs, i;
1430
1431 if (size == 0)
1432 return;
1433
1434 VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1435 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1436
1437 for (i = 0; i < numbufs; i++) {
1438 dmu_buf_t *db = dbp[i];
1439
1440 dmu_buf_will_not_fill(db, tx);
1441 }
1442 dmu_buf_rele_array(dbp, numbufs, FTAG);
1443 }
1444
1445 void
dmu_write_embedded(objset_t * os,uint64_t object,uint64_t offset,void * data,uint8_t etype,uint8_t comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)1446 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
1447 void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
1448 int compressed_size, int byteorder, dmu_tx_t *tx)
1449 {
1450 dmu_buf_t *db;
1451
1452 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
1453 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
1454 VERIFY0(dmu_buf_hold_noread(os, object, offset,
1455 FTAG, &db));
1456
1457 dmu_buf_write_embedded(db,
1458 data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
1459 uncompressed_size, compressed_size, byteorder, tx);
1460
1461 dmu_buf_rele(db, FTAG);
1462 }
1463
1464 void
dmu_redact(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)1465 dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1466 dmu_tx_t *tx)
1467 {
1468 int numbufs, i;
1469 dmu_buf_t **dbp;
1470
1471 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
1472 &numbufs, &dbp, DMU_READ_PREFETCH));
1473 for (i = 0; i < numbufs; i++)
1474 dmu_buf_redact(dbp[i], tx);
1475 dmu_buf_rele_array(dbp, numbufs, FTAG);
1476 }
1477
1478 #ifdef _KERNEL
1479 int
dmu_read_uio_dnode(dnode_t * dn,zfs_uio_t * uio,uint64_t size,dmu_flags_t flags)1480 dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size,
1481 dmu_flags_t flags)
1482 {
1483 dmu_buf_t **dbp;
1484 int numbufs, i, err;
1485
1486 if ((flags & DMU_DIRECTIO) && (uio->uio_extflg & UIO_DIRECT))
1487 return (dmu_read_uio_direct(dn, uio, size, flags));
1488 flags &= ~DMU_DIRECTIO;
1489
1490 /*
1491 * NB: we could do this block-at-a-time, but it's nice
1492 * to be reading in parallel.
1493 */
1494 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1495 TRUE, FTAG, &numbufs, &dbp, flags);
1496 if (err)
1497 return (err);
1498
1499 for (i = 0; i < numbufs; i++) {
1500 uint64_t tocpy;
1501 int64_t bufoff;
1502 dmu_buf_t *db = dbp[i];
1503
1504 ASSERT(size > 0);
1505
1506 bufoff = zfs_uio_offset(uio) - db->db_offset;
1507 tocpy = MIN(db->db_size - bufoff, size);
1508
1509 ASSERT(db->db_data != NULL);
1510 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
1511 UIO_READ, uio);
1512
1513 if (err)
1514 break;
1515
1516 size -= tocpy;
1517 }
1518 dmu_buf_rele_array(dbp, numbufs, FTAG);
1519
1520 return (err);
1521 }
1522
1523 /*
1524 * Read 'size' bytes into the uio buffer.
1525 * From object zdb->db_object.
1526 * Starting at zfs_uio_offset(uio).
1527 *
1528 * If the caller already has a dbuf in the target object
1529 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1530 * because we don't have to find the dnode_t for the object.
1531 */
1532 int
dmu_read_uio_dbuf(dmu_buf_t * zdb,zfs_uio_t * uio,uint64_t size,dmu_flags_t flags)1533 dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
1534 dmu_flags_t flags)
1535 {
1536 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1537 int err;
1538
1539 if (size == 0)
1540 return (0);
1541
1542 DB_DNODE_ENTER(db);
1543 err = dmu_read_uio_dnode(DB_DNODE(db), uio, size, flags);
1544 DB_DNODE_EXIT(db);
1545
1546 return (err);
1547 }
1548
1549 /*
1550 * Read 'size' bytes into the uio buffer.
1551 * From the specified object
1552 * Starting at offset zfs_uio_offset(uio).
1553 */
1554 int
dmu_read_uio(objset_t * os,uint64_t object,zfs_uio_t * uio,uint64_t size,dmu_flags_t flags)1555 dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
1556 dmu_flags_t flags)
1557 {
1558 dnode_t *dn;
1559 int err;
1560
1561 if (size == 0)
1562 return (0);
1563
1564 err = dnode_hold(os, object, FTAG, &dn);
1565 if (err)
1566 return (err);
1567
1568 err = dmu_read_uio_dnode(dn, uio, size, flags);
1569
1570 dnode_rele(dn, FTAG);
1571
1572 return (err);
1573 }
1574
1575 int
dmu_write_uio_dnode(dnode_t * dn,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx,dmu_flags_t flags)1576 dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx,
1577 dmu_flags_t flags)
1578 {
1579 dmu_buf_t **dbp;
1580 int numbufs;
1581 int err = 0;
1582 uint64_t write_size;
1583 dmu_flags_t oflags = flags;
1584
1585 top:
1586 write_size = size;
1587
1588 /*
1589 * We only allow Direct I/O writes to happen if we are block
1590 * sized aligned. Otherwise, we pass the write off to the ARC.
1591 */
1592 if ((flags & DMU_DIRECTIO) && (uio->uio_extflg & UIO_DIRECT) &&
1593 (write_size >= dn->dn_datablksz)) {
1594 if (zfs_dio_aligned(zfs_uio_offset(uio), write_size,
1595 dn->dn_datablksz)) {
1596 return (dmu_write_uio_direct(dn, uio, size, flags, tx));
1597 } else if (write_size > dn->dn_datablksz &&
1598 zfs_dio_offset_aligned(zfs_uio_offset(uio),
1599 dn->dn_datablksz)) {
1600 write_size =
1601 dn->dn_datablksz * (write_size / dn->dn_datablksz);
1602 err = dmu_write_uio_direct(dn, uio, write_size, flags,
1603 tx);
1604 if (err == 0) {
1605 size -= write_size;
1606 goto top;
1607 } else {
1608 return (err);
1609 }
1610 } else {
1611 write_size =
1612 P2PHASE(zfs_uio_offset(uio), dn->dn_datablksz);
1613 }
1614 }
1615 flags &= ~DMU_DIRECTIO;
1616
1617 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), write_size,
1618 FALSE, FTAG, &numbufs, &dbp, flags);
1619 if (err)
1620 return (err);
1621
1622 for (int i = 0; i < numbufs; i++) {
1623 uint64_t tocpy;
1624 int64_t bufoff;
1625 dmu_buf_t *db = dbp[i];
1626
1627 ASSERT(write_size > 0);
1628
1629 offset_t off = zfs_uio_offset(uio);
1630 bufoff = off - db->db_offset;
1631 tocpy = MIN(db->db_size - bufoff, write_size);
1632
1633 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1634
1635 if (tocpy == db->db_size) {
1636 dmu_buf_will_fill_flags(db, tx, B_TRUE, flags);
1637 } else {
1638 if (i == numbufs - 1 && bufoff + tocpy < db->db_size) {
1639 if (bufoff == 0)
1640 flags |= DMU_PARTIAL_FIRST;
1641 else
1642 flags |= DMU_PARTIAL_MORE;
1643 }
1644 dmu_buf_will_dirty_flags(db, tx, flags);
1645 }
1646
1647 ASSERT(db->db_data != NULL);
1648 err = zfs_uio_fault_move((char *)db->db_data + bufoff,
1649 tocpy, UIO_WRITE, uio);
1650
1651 if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) {
1652 /* The fill was reverted. Undo any uio progress. */
1653 zfs_uio_advance(uio, off - zfs_uio_offset(uio));
1654 }
1655
1656 if (err)
1657 break;
1658
1659 write_size -= tocpy;
1660 size -= tocpy;
1661 }
1662
1663 IMPLY(err == 0, write_size == 0);
1664
1665 dmu_buf_rele_array(dbp, numbufs, FTAG);
1666
1667 if ((oflags & DMU_DIRECTIO) && (uio->uio_extflg & UIO_DIRECT) &&
1668 err == 0 && size > 0) {
1669 flags = oflags;
1670 goto top;
1671 }
1672 IMPLY(err == 0, size == 0);
1673
1674 return (err);
1675 }
1676
1677 /*
1678 * Write 'size' bytes from the uio buffer.
1679 * To object zdb->db_object.
1680 * Starting at offset zfs_uio_offset(uio).
1681 *
1682 * If the caller already has a dbuf in the target object
1683 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1684 * because we don't have to find the dnode_t for the object.
1685 */
1686 int
dmu_write_uio_dbuf(dmu_buf_t * zdb,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx,dmu_flags_t flags)1687 dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
1688 dmu_tx_t *tx, dmu_flags_t flags)
1689 {
1690 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1691 int err;
1692
1693 if (size == 0)
1694 return (0);
1695
1696 DB_DNODE_ENTER(db);
1697 err = dmu_write_uio_dnode(DB_DNODE(db), uio, size, tx, flags);
1698 DB_DNODE_EXIT(db);
1699
1700 return (err);
1701 }
1702
1703 /*
1704 * Write 'size' bytes from the uio buffer.
1705 * To the specified object.
1706 * Starting at offset zfs_uio_offset(uio).
1707 */
1708 int
dmu_write_uio(objset_t * os,uint64_t object,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx,dmu_flags_t flags)1709 dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
1710 dmu_tx_t *tx, dmu_flags_t flags)
1711 {
1712 dnode_t *dn;
1713 int err;
1714
1715 if (size == 0)
1716 return (0);
1717
1718 err = dnode_hold(os, object, FTAG, &dn);
1719 if (err)
1720 return (err);
1721
1722 err = dmu_write_uio_dnode(dn, uio, size, tx, flags);
1723
1724 dnode_rele(dn, FTAG);
1725
1726 return (err);
1727 }
1728 #endif /* _KERNEL */
1729
1730 static void
dmu_cached_bps(spa_t * spa,blkptr_t * bps,uint_t nbps,uint64_t * l1sz,uint64_t * l2sz)1731 dmu_cached_bps(spa_t *spa, blkptr_t *bps, uint_t nbps,
1732 uint64_t *l1sz, uint64_t *l2sz)
1733 {
1734 int cached_flags;
1735
1736 if (bps == NULL)
1737 return;
1738
1739 for (size_t blk_off = 0; blk_off < nbps; blk_off++) {
1740 blkptr_t *bp = &bps[blk_off];
1741
1742 if (BP_IS_HOLE(bp))
1743 continue;
1744
1745 cached_flags = arc_cached(spa, bp);
1746 if (cached_flags == 0)
1747 continue;
1748
1749 if ((cached_flags & (ARC_CACHED_IN_L1 | ARC_CACHED_IN_L2)) ==
1750 ARC_CACHED_IN_L2)
1751 *l2sz += BP_GET_LSIZE(bp);
1752 else
1753 *l1sz += BP_GET_LSIZE(bp);
1754 }
1755 }
1756
1757 /*
1758 * Estimate DMU object cached size.
1759 */
1760 int
dmu_object_cached_size(objset_t * os,uint64_t object,uint64_t * l1sz,uint64_t * l2sz)1761 dmu_object_cached_size(objset_t *os, uint64_t object,
1762 uint64_t *l1sz, uint64_t *l2sz)
1763 {
1764 dnode_t *dn;
1765 dmu_object_info_t doi;
1766 int err = 0;
1767
1768 *l1sz = *l2sz = 0;
1769
1770 if (dnode_hold(os, object, FTAG, &dn) != 0)
1771 return (0);
1772
1773 if (dn->dn_nlevels < 2) {
1774 dnode_rele(dn, FTAG);
1775 return (0);
1776 }
1777
1778 dmu_object_info_from_dnode(dn, &doi);
1779
1780 for (uint64_t off = 0; off < doi.doi_max_offset &&
1781 dmu_prefetch_max > 0; off += dmu_prefetch_max) {
1782 /* dbuf_read doesn't prefetch L1 blocks. */
1783 dmu_prefetch_by_dnode(dn, 1, off,
1784 dmu_prefetch_max, ZIO_PRIORITY_SYNC_READ);
1785 }
1786
1787 /*
1788 * Hold all valid L1 blocks, asking ARC the status of each BP
1789 * contained in each such L1 block.
1790 */
1791 uint_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
1792 uint64_t l1blks = 1 + (dn->dn_maxblkid / nbps);
1793
1794 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1795 for (uint64_t blk = 0; blk < l1blks; blk++) {
1796 dmu_buf_impl_t *db = NULL;
1797
1798 if (issig()) {
1799 /*
1800 * On interrupt, get out, and bubble up EINTR
1801 */
1802 err = EINTR;
1803 break;
1804 }
1805
1806 /*
1807 * If we get an i/o error here, the L1 can't be read,
1808 * and nothing under it could be cached, so we just
1809 * continue. Ignoring the error from dbuf_hold_impl
1810 * or from dbuf_read is then a reasonable choice.
1811 */
1812 err = dbuf_hold_impl(dn, 1, blk, B_TRUE, B_FALSE, FTAG, &db);
1813 if (err != 0) {
1814 /*
1815 * ignore error and continue
1816 */
1817 err = 0;
1818 continue;
1819 }
1820
1821 err = dbuf_read(db, NULL, DB_RF_CANFAIL);
1822 if (err == 0) {
1823 dmu_cached_bps(dmu_objset_spa(os), db->db.db_data,
1824 nbps, l1sz, l2sz);
1825 }
1826 /*
1827 * error may be ignored, and we continue
1828 */
1829 err = 0;
1830 dbuf_rele(db, FTAG);
1831 }
1832 rw_exit(&dn->dn_struct_rwlock);
1833
1834 dnode_rele(dn, FTAG);
1835 return (err);
1836 }
1837
1838 /*
1839 * Allocate a loaned anonymous arc buffer.
1840 */
1841 arc_buf_t *
dmu_request_arcbuf(dmu_buf_t * handle,int size)1842 dmu_request_arcbuf(dmu_buf_t *handle, int size)
1843 {
1844 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1845
1846 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
1847 }
1848
1849 /*
1850 * Free a loaned arc buffer.
1851 */
1852 void
dmu_return_arcbuf(arc_buf_t * buf)1853 dmu_return_arcbuf(arc_buf_t *buf)
1854 {
1855 arc_return_buf(buf, FTAG);
1856 arc_buf_destroy(buf, FTAG);
1857 }
1858
1859 /*
1860 * A "lightweight" write is faster than a regular write (e.g.
1861 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
1862 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
1863 * data can not be read or overwritten until the transaction's txg has been
1864 * synced. This makes it appropriate for workloads that are known to be
1865 * (temporarily) write-only, like "zfs receive".
1866 *
1867 * A single block is written, starting at the specified offset in bytes. If
1868 * the call is successful, it returns 0 and the provided abd has been
1869 * consumed (the caller should not free it).
1870 */
1871 int
dmu_lightweight_write_by_dnode(dnode_t * dn,uint64_t offset,abd_t * abd,const zio_prop_t * zp,zio_flag_t flags,dmu_tx_t * tx)1872 dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
1873 const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx)
1874 {
1875 dbuf_dirty_record_t *dr =
1876 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
1877 if (dr == NULL)
1878 return (SET_ERROR(EIO));
1879 dr->dt.dll.dr_abd = abd;
1880 dr->dt.dll.dr_props = *zp;
1881 dr->dt.dll.dr_flags = flags;
1882 return (0);
1883 }
1884
1885 /*
1886 * When possible directly assign passed loaned arc buffer to a dbuf.
1887 * If this is not possible copy the contents of passed arc buf via
1888 * dmu_write().
1889 */
1890 int
dmu_assign_arcbuf_by_dnode(dnode_t * dn,uint64_t offset,arc_buf_t * buf,dmu_tx_t * tx,dmu_flags_t flags)1891 dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
1892 dmu_tx_t *tx, dmu_flags_t flags)
1893 {
1894 dmu_buf_impl_t *db;
1895 objset_t *os = dn->dn_objset;
1896 uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
1897 uint64_t blkid;
1898
1899 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1900 blkid = dbuf_whichblock(dn, 0, offset);
1901 db = dbuf_hold(dn, blkid, FTAG);
1902 rw_exit(&dn->dn_struct_rwlock);
1903 if (db == NULL)
1904 return (SET_ERROR(EIO));
1905
1906 /*
1907 * We can only assign if the offset is aligned and the arc buf is the
1908 * same size as the dbuf.
1909 */
1910 if (offset == db->db.db_offset && blksz == db->db.db_size) {
1911 zfs_racct_write(os->os_spa, blksz, 1, flags);
1912 dbuf_assign_arcbuf(db, buf, tx, flags);
1913 dbuf_rele(db, FTAG);
1914 } else {
1915 /* compressed bufs must always be assignable to their dbuf */
1916 ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
1917 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
1918
1919 dbuf_rele(db, FTAG);
1920 dmu_write_by_dnode(dn, offset, blksz, buf->b_data, tx, flags);
1921 dmu_return_arcbuf(buf);
1922 }
1923
1924 return (0);
1925 }
1926
1927 int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t * handle,uint64_t offset,arc_buf_t * buf,dmu_tx_t * tx,dmu_flags_t flags)1928 dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1929 dmu_tx_t *tx, dmu_flags_t flags)
1930 {
1931 int err;
1932 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1933
1934 DB_DNODE_ENTER(db);
1935 err = dmu_assign_arcbuf_by_dnode(DB_DNODE(db), offset, buf, tx, flags);
1936 DB_DNODE_EXIT(db);
1937
1938 return (err);
1939 }
1940
1941 void
dmu_sync_ready(zio_t * zio,arc_buf_t * buf,void * varg)1942 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1943 {
1944 (void) buf;
1945 dmu_sync_arg_t *dsa = varg;
1946
1947 if (zio->io_error == 0) {
1948 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1949 blkptr_t *bp = zio->io_bp;
1950
1951 if (BP_IS_HOLE(bp)) {
1952 dmu_buf_t *db = NULL;
1953 if (dr)
1954 db = &(dr->dr_dbuf->db);
1955 else
1956 db = dsa->dsa_zgd->zgd_db;
1957 /*
1958 * A block of zeros may compress to a hole, but the
1959 * block size still needs to be known for replay.
1960 */
1961 BP_SET_LSIZE(bp, db->db_size);
1962 } else if (!BP_IS_EMBEDDED(bp)) {
1963 ASSERT0(BP_GET_LEVEL(bp));
1964 BP_SET_FILL(bp, 1);
1965 }
1966 }
1967 }
1968
1969 static void
dmu_sync_late_arrival_ready(zio_t * zio)1970 dmu_sync_late_arrival_ready(zio_t *zio)
1971 {
1972 dmu_sync_ready(zio, NULL, zio->io_private);
1973 }
1974
1975 void
dmu_sync_done(zio_t * zio,arc_buf_t * buf,void * varg)1976 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1977 {
1978 (void) buf;
1979 dmu_sync_arg_t *dsa = varg;
1980 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1981 dmu_buf_impl_t *db = dr->dr_dbuf;
1982 zgd_t *zgd = dsa->dsa_zgd;
1983
1984 /*
1985 * Record the vdev(s) backing this blkptr so they can be flushed after
1986 * the writes for the lwb have completed.
1987 */
1988 if (zgd && zio->io_error == 0) {
1989 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1990 }
1991
1992 mutex_enter(&db->db_mtx);
1993 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1994 if (zio->io_error == 0) {
1995 ASSERT0(dr->dt.dl.dr_has_raw_params);
1996 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1997 if (dr->dt.dl.dr_nopwrite) {
1998 blkptr_t *bp = zio->io_bp;
1999 blkptr_t *bp_orig = &zio->io_bp_orig;
2000 uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
2001
2002 ASSERT(BP_EQUAL(bp, bp_orig));
2003 VERIFY(BP_EQUAL(bp, db->db_blkptr));
2004 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
2005 VERIFY(zio_checksum_table[chksum].ci_flags &
2006 ZCHECKSUM_FLAG_NOPWRITE);
2007 }
2008 dr->dt.dl.dr_overridden_by = *zio->io_bp;
2009 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
2010 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
2011 dr->dt.dl.dr_gang_copies = zio->io_prop.zp_gang_copies;
2012
2013 /*
2014 * Old style holes are filled with all zeros, whereas
2015 * new-style holes maintain their lsize, type, level,
2016 * and birth time (see zio_write_compress). While we
2017 * need to reset the BP_SET_LSIZE() call that happened
2018 * in dmu_sync_ready for old style holes, we do *not*
2019 * want to wipe out the information contained in new
2020 * style holes. Thus, only zero out the block pointer if
2021 * it's an old style hole.
2022 */
2023 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
2024 BP_GET_LOGICAL_BIRTH(&dr->dt.dl.dr_overridden_by) == 0)
2025 BP_ZERO(&dr->dt.dl.dr_overridden_by);
2026 } else {
2027 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2028 }
2029
2030 cv_broadcast(&db->db_changed);
2031 mutex_exit(&db->db_mtx);
2032
2033 if (dsa->dsa_done)
2034 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
2035
2036 kmem_free(dsa, sizeof (*dsa));
2037 }
2038
2039 static void
dmu_sync_late_arrival_done(zio_t * zio)2040 dmu_sync_late_arrival_done(zio_t *zio)
2041 {
2042 blkptr_t *bp = zio->io_bp;
2043 dmu_sync_arg_t *dsa = zio->io_private;
2044 zgd_t *zgd = dsa->dsa_zgd;
2045
2046 if (zio->io_error == 0) {
2047 /*
2048 * Record the vdev(s) backing this blkptr so they can be
2049 * flushed after the writes for the lwb have completed.
2050 */
2051 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
2052
2053 if (!BP_IS_HOLE(bp)) {
2054 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
2055 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
2056 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
2057 ASSERT(BP_GET_BIRTH(zio->io_bp) == zio->io_txg);
2058 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
2059 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
2060 }
2061 }
2062
2063 dmu_tx_commit(dsa->dsa_tx);
2064
2065 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
2066
2067 abd_free(zio->io_abd);
2068 kmem_free(dsa, sizeof (*dsa));
2069 }
2070
2071 static int
dmu_sync_late_arrival(zio_t * pio,objset_t * os,dmu_sync_cb_t * done,zgd_t * zgd,zio_prop_t * zp,zbookmark_phys_t * zb)2072 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
2073 zio_prop_t *zp, zbookmark_phys_t *zb)
2074 {
2075 dmu_sync_arg_t *dsa;
2076 dmu_tx_t *tx;
2077 int error;
2078
2079 error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL,
2080 DB_RF_CANFAIL | DMU_READ_NO_PREFETCH | DMU_KEEP_CACHING);
2081 if (error != 0)
2082 return (error);
2083
2084 tx = dmu_tx_create(os);
2085 dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
2086 /*
2087 * This transaction does not produce any dirty data or log blocks, so
2088 * it should not be throttled. All other cases wait for TXG sync, by
2089 * which time the log block we are writing will be obsolete, so we can
2090 * skip waiting and just return error here instead.
2091 */
2092 if (dmu_tx_assign(tx, DMU_TX_NOWAIT | DMU_TX_NOTHROTTLE) != 0) {
2093 dmu_tx_abort(tx);
2094 /* Make zl_get_data do txg_waited_synced() */
2095 return (SET_ERROR(EIO));
2096 }
2097
2098 /*
2099 * In order to prevent the zgd's lwb from being free'd prior to
2100 * dmu_sync_late_arrival_done() being called, we have to ensure
2101 * the lwb's "max txg" takes this tx's txg into account.
2102 */
2103 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
2104
2105 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
2106 dsa->dsa_dr = NULL;
2107 dsa->dsa_done = done;
2108 dsa->dsa_zgd = zgd;
2109 dsa->dsa_tx = tx;
2110
2111 /*
2112 * Since we are currently syncing this txg, it's nontrivial to
2113 * determine what BP to nopwrite against, so we disable nopwrite.
2114 *
2115 * When syncing, the db_blkptr is initially the BP of the previous
2116 * txg. We can not nopwrite against it because it will be changed
2117 * (this is similar to the non-late-arrival case where the dbuf is
2118 * dirty in a future txg).
2119 *
2120 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
2121 * We can not nopwrite against it because although the BP will not
2122 * (typically) be changed, the data has not yet been persisted to this
2123 * location.
2124 *
2125 * Finally, when dbuf_write_done() is called, it is theoretically
2126 * possible to always nopwrite, because the data that was written in
2127 * this txg is the same data that we are trying to write. However we
2128 * would need to check that this dbuf is not dirty in any future
2129 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
2130 * don't nopwrite in this case.
2131 */
2132 zp->zp_nopwrite = B_FALSE;
2133
2134 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
2135 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
2136 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
2137 dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done,
2138 dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
2139
2140 return (0);
2141 }
2142
2143 /*
2144 * Intent log support: sync the block associated with db to disk.
2145 * N.B. and XXX: the caller is responsible for making sure that the
2146 * data isn't changing while dmu_sync() is writing it.
2147 *
2148 * Return values:
2149 *
2150 * EEXIST: this txg has already been synced, so there's nothing to do.
2151 * The caller should not log the write.
2152 *
2153 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
2154 * The caller should not log the write.
2155 *
2156 * EALREADY: this block is already in the process of being synced.
2157 * The caller should track its progress (somehow).
2158 *
2159 * EIO: could not do the I/O.
2160 * The caller should do a txg_wait_synced().
2161 *
2162 * 0: the I/O has been initiated.
2163 * The caller should log this blkptr in the done callback.
2164 * It is possible that the I/O will fail, in which case
2165 * the error will be reported to the done callback and
2166 * propagated to pio from zio_done().
2167 */
2168 int
dmu_sync(zio_t * pio,uint64_t txg,dmu_sync_cb_t * done,zgd_t * zgd)2169 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
2170 {
2171 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
2172 objset_t *os = db->db_objset;
2173 dsl_dataset_t *ds = os->os_dsl_dataset;
2174 dbuf_dirty_record_t *dr, *dr_next;
2175 dmu_sync_arg_t *dsa;
2176 zbookmark_phys_t zb;
2177 zio_prop_t zp;
2178
2179 ASSERT(pio != NULL);
2180 ASSERT(txg != 0);
2181
2182 SET_BOOKMARK(&zb, ds->ds_object,
2183 db->db.db_object, db->db_level, db->db_blkid);
2184
2185 DB_DNODE_ENTER(db);
2186 dmu_write_policy(os, DB_DNODE(db), db->db_level, WP_DMU_SYNC, &zp);
2187 DB_DNODE_EXIT(db);
2188
2189 /*
2190 * If we're frozen (running ziltest), we always need to generate a bp.
2191 */
2192 if (txg > spa_freeze_txg(os->os_spa))
2193 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
2194
2195 /*
2196 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
2197 * and us. If we determine that this txg is not yet syncing,
2198 * but it begins to sync a moment later, that's OK because the
2199 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
2200 */
2201 mutex_enter(&db->db_mtx);
2202
2203 if (txg <= spa_last_synced_txg(os->os_spa)) {
2204 /*
2205 * This txg has already synced. There's nothing to do.
2206 */
2207 mutex_exit(&db->db_mtx);
2208 return (SET_ERROR(EEXIST));
2209 }
2210
2211 if (txg <= spa_syncing_txg(os->os_spa)) {
2212 /*
2213 * This txg is currently syncing, so we can't mess with
2214 * the dirty record anymore; just write a new log block.
2215 */
2216 mutex_exit(&db->db_mtx);
2217 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
2218 }
2219
2220 dr = dbuf_find_dirty_eq(db, txg);
2221
2222 if (dr == NULL) {
2223 /*
2224 * There's no dr for this dbuf, so it must have been freed.
2225 * There's no need to log writes to freed blocks, so we're done.
2226 */
2227 mutex_exit(&db->db_mtx);
2228 return (SET_ERROR(ENOENT));
2229 }
2230
2231 dr_next = list_next(&db->db_dirty_records, dr);
2232 ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
2233
2234 if (db->db_blkptr != NULL) {
2235 /*
2236 * We need to fill in zgd_bp with the current blkptr so that
2237 * the nopwrite code can check if we're writing the same
2238 * data that's already on disk. We can only nopwrite if we
2239 * are sure that after making the copy, db_blkptr will not
2240 * change until our i/o completes. We ensure this by
2241 * holding the db_mtx, and only allowing nopwrite if the
2242 * block is not already dirty (see below). This is verified
2243 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
2244 * not changed.
2245 */
2246 *zgd->zgd_bp = *db->db_blkptr;
2247 }
2248
2249 /*
2250 * Assume the on-disk data is X, the current syncing data (in
2251 * txg - 1) is Y, and the current in-memory data is Z (currently
2252 * in dmu_sync).
2253 *
2254 * We usually want to perform a nopwrite if X and Z are the
2255 * same. However, if Y is different (i.e. the BP is going to
2256 * change before this write takes effect), then a nopwrite will
2257 * be incorrect - we would override with X, which could have
2258 * been freed when Y was written.
2259 *
2260 * (Note that this is not a concern when we are nop-writing from
2261 * syncing context, because X and Y must be identical, because
2262 * all previous txgs have been synced.)
2263 *
2264 * Therefore, we disable nopwrite if the current BP could change
2265 * before this TXG. There are two ways it could change: by
2266 * being dirty (dr_next is non-NULL), or by being freed
2267 * (dnode_block_freed()). This behavior is verified by
2268 * zio_done(), which VERIFYs that the override BP is identical
2269 * to the on-disk BP.
2270 */
2271 if (dr_next != NULL) {
2272 zp.zp_nopwrite = B_FALSE;
2273 } else {
2274 DB_DNODE_ENTER(db);
2275 if (dnode_block_freed(DB_DNODE(db), db->db_blkid))
2276 zp.zp_nopwrite = B_FALSE;
2277 DB_DNODE_EXIT(db);
2278 }
2279
2280 ASSERT(dr->dr_txg == txg);
2281 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
2282 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2283 /*
2284 * We have already issued a sync write for this buffer,
2285 * or this buffer has already been synced. It could not
2286 * have been dirtied since, or we would have cleared the state.
2287 */
2288 mutex_exit(&db->db_mtx);
2289 return (SET_ERROR(EALREADY));
2290 }
2291
2292 ASSERT0(dr->dt.dl.dr_has_raw_params);
2293 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2294 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
2295 mutex_exit(&db->db_mtx);
2296
2297 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
2298 dsa->dsa_dr = dr;
2299 dsa->dsa_done = done;
2300 dsa->dsa_zgd = zgd;
2301 dsa->dsa_tx = NULL;
2302
2303 zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp,
2304 dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db),
2305 dbuf_is_l2cacheable(db, NULL), &zp, dmu_sync_ready, NULL,
2306 dmu_sync_done, dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL,
2307 &zb));
2308
2309 return (0);
2310 }
2311
2312 int
dmu_object_set_nlevels(objset_t * os,uint64_t object,int nlevels,dmu_tx_t * tx)2313 dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
2314 {
2315 dnode_t *dn;
2316 int err;
2317
2318 err = dnode_hold(os, object, FTAG, &dn);
2319 if (err)
2320 return (err);
2321 err = dnode_set_nlevels(dn, nlevels, tx);
2322 dnode_rele(dn, FTAG);
2323 return (err);
2324 }
2325
2326 int
dmu_object_set_blocksize(objset_t * os,uint64_t object,uint64_t size,int ibs,dmu_tx_t * tx)2327 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
2328 dmu_tx_t *tx)
2329 {
2330 dnode_t *dn;
2331 int err;
2332
2333 err = dnode_hold(os, object, FTAG, &dn);
2334 if (err)
2335 return (err);
2336 err = dnode_set_blksz(dn, size, ibs, tx);
2337 dnode_rele(dn, FTAG);
2338 return (err);
2339 }
2340
2341 int
dmu_object_set_maxblkid(objset_t * os,uint64_t object,uint64_t maxblkid,dmu_tx_t * tx)2342 dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
2343 dmu_tx_t *tx)
2344 {
2345 dnode_t *dn;
2346 int err;
2347
2348 err = dnode_hold(os, object, FTAG, &dn);
2349 if (err)
2350 return (err);
2351 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2352 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
2353 rw_exit(&dn->dn_struct_rwlock);
2354 dnode_rele(dn, FTAG);
2355 return (0);
2356 }
2357
2358 void
dmu_object_set_checksum(objset_t * os,uint64_t object,uint8_t checksum,dmu_tx_t * tx)2359 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
2360 dmu_tx_t *tx)
2361 {
2362 dnode_t *dn;
2363
2364 /*
2365 * Send streams include each object's checksum function. This
2366 * check ensures that the receiving system can understand the
2367 * checksum function transmitted.
2368 */
2369 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
2370
2371 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2372 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
2373 dn->dn_checksum = checksum;
2374 dnode_setdirty(dn, tx);
2375 dnode_rele(dn, FTAG);
2376 }
2377
2378 void
dmu_object_set_compress(objset_t * os,uint64_t object,uint8_t compress,dmu_tx_t * tx)2379 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
2380 dmu_tx_t *tx)
2381 {
2382 dnode_t *dn;
2383
2384 /*
2385 * Send streams include each object's compression function. This
2386 * check ensures that the receiving system can understand the
2387 * compression function transmitted.
2388 */
2389 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
2390
2391 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2392 dn->dn_compress = compress;
2393 dnode_setdirty(dn, tx);
2394 dnode_rele(dn, FTAG);
2395 }
2396
2397 /*
2398 * When the "redundant_metadata" property is set to "most", only indirect
2399 * blocks of this level and higher will have an additional ditto block.
2400 */
2401 static const int zfs_redundant_metadata_most_ditto_level = 2;
2402
2403 void
dmu_write_policy(objset_t * os,dnode_t * dn,int level,int wp,zio_prop_t * zp)2404 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2405 {
2406 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2407 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
2408 (wp & WP_SPILL));
2409 enum zio_checksum checksum = os->os_checksum;
2410 enum zio_compress compress = os->os_compress;
2411 uint8_t complevel = os->os_complevel;
2412 enum zio_checksum dedup_checksum = os->os_dedup_checksum;
2413 boolean_t dedup = B_FALSE;
2414 boolean_t nopwrite = B_FALSE;
2415 boolean_t dedup_verify = os->os_dedup_verify;
2416 boolean_t encrypt = B_FALSE;
2417 int copies = os->os_copies;
2418 int gang_copies = os->os_copies;
2419
2420 /*
2421 * We maintain different write policies for each of the following
2422 * types of data:
2423 * 1. metadata
2424 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
2425 * 3. all other level 0 blocks
2426 */
2427 if (ismd) {
2428 /*
2429 * XXX -- we should design a compression algorithm
2430 * that specializes in arrays of bps.
2431 */
2432 compress = zio_compress_select(os->os_spa,
2433 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
2434
2435 /*
2436 * Metadata always gets checksummed. If the data
2437 * checksum is multi-bit correctable, and it's not a
2438 * ZBT-style checksum, then it's suitable for metadata
2439 * as well. Otherwise, the metadata checksum defaults
2440 * to fletcher4.
2441 */
2442 if (!(zio_checksum_table[checksum].ci_flags &
2443 ZCHECKSUM_FLAG_METADATA) ||
2444 (zio_checksum_table[checksum].ci_flags &
2445 ZCHECKSUM_FLAG_EMBEDDED))
2446 checksum = ZIO_CHECKSUM_FLETCHER_4;
2447
2448 switch (os->os_redundant_metadata) {
2449 case ZFS_REDUNDANT_METADATA_ALL:
2450 copies++;
2451 gang_copies++;
2452 break;
2453 case ZFS_REDUNDANT_METADATA_MOST:
2454 if (level >= zfs_redundant_metadata_most_ditto_level ||
2455 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))
2456 copies++;
2457 if (level + 1 >=
2458 zfs_redundant_metadata_most_ditto_level ||
2459 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))
2460 gang_copies++;
2461 break;
2462 case ZFS_REDUNDANT_METADATA_SOME:
2463 if (DMU_OT_IS_CRITICAL(type, level)) {
2464 copies++;
2465 gang_copies++;
2466 } else if (DMU_OT_IS_METADATA(type)) {
2467 gang_copies++;
2468 }
2469 break;
2470 case ZFS_REDUNDANT_METADATA_NONE:
2471 break;
2472 }
2473
2474 if (dmu_ddt_copies > 0) {
2475 /*
2476 * If this tunable is set, and this is a write for a
2477 * dedup entry store (zap or log), then we treat it
2478 * something like ZFS_REDUNDANT_METADATA_MOST on a
2479 * regular dataset: this many copies, and one more for
2480 * "higher" indirect blocks. This specific exception is
2481 * necessary because dedup objects are stored in the
2482 * MOS, which always has the highest possible copies.
2483 */
2484 dmu_object_type_t stype =
2485 dn ? dn->dn_storage_type : DMU_OT_NONE;
2486 if (stype == DMU_OT_NONE)
2487 stype = type;
2488 if (stype == DMU_OT_DDT_ZAP) {
2489 copies = dmu_ddt_copies;
2490 if (level >=
2491 zfs_redundant_metadata_most_ditto_level)
2492 copies++;
2493 }
2494 }
2495 } else if (wp & WP_NOFILL) {
2496 ASSERT0(level);
2497
2498 /*
2499 * If we're writing preallocated blocks, we aren't actually
2500 * writing them so don't set any policy properties. These
2501 * blocks are currently only used by an external subsystem
2502 * outside of zfs (i.e. dump) and not written by the zio
2503 * pipeline.
2504 */
2505 compress = ZIO_COMPRESS_OFF;
2506 checksum = ZIO_CHECKSUM_OFF;
2507 } else {
2508 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2509 compress);
2510 complevel = zio_complevel_select(os->os_spa, compress,
2511 complevel, complevel);
2512
2513 /*
2514 * Storing many references to an all zeros block in the dedup
2515 * table would be expensive. Instead, if dedup is enabled,
2516 * store them as holes even if compression is not enabled.
2517 */
2518 if (compress == ZIO_COMPRESS_OFF &&
2519 dedup_checksum != ZIO_CHECKSUM_OFF)
2520 compress = ZIO_COMPRESS_EMPTY;
2521
2522 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
2523 zio_checksum_select(dn->dn_checksum, checksum) :
2524 dedup_checksum;
2525
2526 /*
2527 * Determine dedup setting. If we are in dmu_sync(),
2528 * we won't actually dedup now because that's all
2529 * done in syncing context; but we do want to use the
2530 * dedup checksum. If the checksum is not strong
2531 * enough to ensure unique signatures, force
2532 * dedup_verify.
2533 */
2534 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2535 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
2536 if (!(zio_checksum_table[checksum].ci_flags &
2537 ZCHECKSUM_FLAG_DEDUP))
2538 dedup_verify = B_TRUE;
2539 }
2540
2541 /*
2542 * Enable nopwrite if we have secure enough checksum
2543 * algorithm (see comment in zio_nop_write) and
2544 * compression is enabled. We don't enable nopwrite if
2545 * dedup is enabled as the two features are mutually
2546 * exclusive.
2547 */
2548 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2549 ZCHECKSUM_FLAG_NOPWRITE) &&
2550 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2551
2552 if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
2553 (os->os_redundant_metadata ==
2554 ZFS_REDUNDANT_METADATA_MOST &&
2555 zfs_redundant_metadata_most_ditto_level <= 1))
2556 gang_copies++;
2557 }
2558
2559 /*
2560 * All objects in an encrypted objset are protected from modification
2561 * via a MAC. Encrypted objects store their IV and salt in the last DVA
2562 * in the bp, so we cannot use all copies. Encrypted objects are also
2563 * not subject to nopwrite since writing the same data will still
2564 * result in a new ciphertext. Only encrypted blocks can be dedup'd
2565 * to avoid ambiguity in the dedup code since the DDT does not store
2566 * object types.
2567 */
2568 if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
2569 encrypt = B_TRUE;
2570
2571 if (DMU_OT_IS_ENCRYPTED(type)) {
2572 copies = MIN(copies, SPA_DVAS_PER_BP - 1);
2573 gang_copies = MIN(gang_copies, SPA_DVAS_PER_BP - 1);
2574 nopwrite = B_FALSE;
2575 } else {
2576 dedup = B_FALSE;
2577 }
2578
2579 if (level <= 0 &&
2580 (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
2581 compress = ZIO_COMPRESS_EMPTY;
2582 }
2583 }
2584
2585 zp->zp_compress = compress;
2586 zp->zp_complevel = complevel;
2587 zp->zp_checksum = checksum;
2588 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2589 zp->zp_level = level;
2590 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2591 zp->zp_gang_copies = MIN(MAX(gang_copies, copies),
2592 spa_max_replication(os->os_spa));
2593 zp->zp_dedup = dedup;
2594 zp->zp_dedup_verify = dedup && dedup_verify;
2595 zp->zp_nopwrite = nopwrite;
2596 zp->zp_encrypt = encrypt;
2597 zp->zp_byteorder = ZFS_HOST_BYTEORDER;
2598 zp->zp_direct_write = (wp & WP_DIRECT_WR) ? B_TRUE : B_FALSE;
2599 zp->zp_rewrite = B_FALSE;
2600 memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
2601 memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
2602 memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
2603 zp->zp_zpl_smallblk = os->os_zpl_special_smallblock;
2604 zp->zp_storage_type = dn ? dn->dn_storage_type : DMU_OT_NONE;
2605
2606 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2607 }
2608
2609 /*
2610 * Reports the location of data and holes in an object. In order to
2611 * accurately report holes all dirty data must be synced to disk. This
2612 * causes extremely poor performance when seeking for holes in a dirty file.
2613 * As a compromise, only provide hole data when the dnode is clean. When
2614 * a dnode is dirty report the dnode as having no holes by returning EBUSY
2615 * which is always safe to do.
2616 */
2617 int
dmu_offset_next(objset_t * os,uint64_t object,boolean_t hole,uint64_t * off)2618 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2619 {
2620 dnode_t *dn;
2621 uint64_t txg, maxtxg = 0;
2622 int err;
2623
2624 restart:
2625 err = dnode_hold(os, object, FTAG, &dn);
2626 if (err)
2627 return (err);
2628
2629 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2630
2631 if (dnode_is_dirty(dn)) {
2632 /*
2633 * If the zfs_dmu_offset_next_sync module option is enabled
2634 * then hole reporting has been requested. Dirty dnodes
2635 * must be synced to disk to accurately report holes.
2636 *
2637 * Provided a RL_READER rangelock spanning 0-UINT64_MAX is
2638 * held by the caller only limited restarts will be required.
2639 * We tolerate callers which do not hold the rangelock by
2640 * returning EBUSY and not reporting holes after at most
2641 * TXG_CONCURRENT_STATES (3) restarts.
2642 */
2643 if (zfs_dmu_offset_next_sync) {
2644 rw_exit(&dn->dn_struct_rwlock);
2645 dnode_rele(dn, FTAG);
2646
2647 if (maxtxg == 0) {
2648 txg = spa_last_synced_txg(dmu_objset_spa(os));
2649 maxtxg = txg + TXG_CONCURRENT_STATES;
2650 } else if (txg >= maxtxg)
2651 return (SET_ERROR(EBUSY));
2652
2653 txg_wait_synced(dmu_objset_pool(os), ++txg);
2654 goto restart;
2655 }
2656
2657 err = SET_ERROR(EBUSY);
2658 } else {
2659 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
2660 (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2661 }
2662
2663 rw_exit(&dn->dn_struct_rwlock);
2664 dnode_rele(dn, FTAG);
2665
2666 return (err);
2667 }
2668
2669 int
dmu_read_l0_bps(objset_t * os,uint64_t object,uint64_t offset,uint64_t length,blkptr_t * bps,size_t * nbpsp)2670 dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
2671 blkptr_t *bps, size_t *nbpsp)
2672 {
2673 dmu_buf_t **dbp, *dbuf;
2674 dmu_buf_impl_t *db;
2675 blkptr_t *bp;
2676 int error, numbufs;
2677
2678 error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2679 &numbufs, &dbp, DMU_READ_PREFETCH);
2680 if (error != 0) {
2681 if (error == ESRCH) {
2682 error = SET_ERROR(ENXIO);
2683 }
2684 return (error);
2685 }
2686
2687 ASSERT3U(numbufs, <=, *nbpsp);
2688
2689 for (int i = 0; i < numbufs; i++) {
2690 dbuf = dbp[i];
2691 db = (dmu_buf_impl_t *)dbuf;
2692
2693 mutex_enter(&db->db_mtx);
2694
2695 if (!list_is_empty(&db->db_dirty_records)) {
2696 dbuf_dirty_record_t *dr;
2697
2698 dr = list_head(&db->db_dirty_records);
2699 if (dr->dt.dl.dr_brtwrite) {
2700 /*
2701 * This is very special case where we clone a
2702 * block and in the same transaction group we
2703 * read its BP (most likely to clone the clone).
2704 */
2705 bp = &dr->dt.dl.dr_overridden_by;
2706 } else {
2707 /*
2708 * The block was modified in the same
2709 * transaction group.
2710 */
2711 mutex_exit(&db->db_mtx);
2712 error = SET_ERROR(EAGAIN);
2713 goto out;
2714 }
2715 } else {
2716 bp = db->db_blkptr;
2717 }
2718
2719 mutex_exit(&db->db_mtx);
2720
2721 if (bp == NULL) {
2722 /*
2723 * The file size was increased, but the block was never
2724 * written, otherwise we would either have the block
2725 * pointer or the dirty record and would not get here.
2726 * It is effectively a hole, so report it as such.
2727 */
2728 BP_ZERO(&bps[i]);
2729 continue;
2730 }
2731 /*
2732 * Make sure we clone only data blocks.
2733 */
2734 if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) {
2735 error = SET_ERROR(EINVAL);
2736 goto out;
2737 }
2738
2739 /*
2740 * If the block was allocated in transaction group that is not
2741 * yet synced, we could clone it, but we couldn't write this
2742 * operation into ZIL, or it may be impossible to replay, since
2743 * the block may appear not yet allocated at that point.
2744 */
2745 if (BP_GET_PHYSICAL_BIRTH(bp) > spa_freeze_txg(os->os_spa)) {
2746 error = SET_ERROR(EINVAL);
2747 goto out;
2748 }
2749 if (BP_GET_PHYSICAL_BIRTH(bp) >
2750 spa_last_synced_txg(os->os_spa)) {
2751 error = SET_ERROR(EAGAIN);
2752 goto out;
2753 }
2754
2755 bps[i] = *bp;
2756 }
2757
2758 *nbpsp = numbufs;
2759 out:
2760 dmu_buf_rele_array(dbp, numbufs, FTAG);
2761
2762 return (error);
2763 }
2764
2765 int
dmu_brt_clone(objset_t * os,uint64_t object,uint64_t offset,uint64_t length,dmu_tx_t * tx,const blkptr_t * bps,size_t nbps)2766 dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
2767 dmu_tx_t *tx, const blkptr_t *bps, size_t nbps)
2768 {
2769 spa_t *spa;
2770 dmu_buf_t **dbp, *dbuf;
2771 dmu_buf_impl_t *db;
2772 struct dirty_leaf *dl;
2773 dbuf_dirty_record_t *dr;
2774 const blkptr_t *bp;
2775 int error = 0, i, numbufs;
2776
2777 spa = os->os_spa;
2778
2779 VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2780 &numbufs, &dbp, DMU_READ_PREFETCH));
2781 ASSERT3U(nbps, ==, numbufs);
2782
2783 /*
2784 * Before we start cloning make sure that the dbufs sizes match new BPs
2785 * sizes. If they don't, that's a no-go, as we are not able to shrink
2786 * dbufs.
2787 */
2788 for (i = 0; i < numbufs; i++) {
2789 dbuf = dbp[i];
2790 db = (dmu_buf_impl_t *)dbuf;
2791 bp = &bps[i];
2792
2793 ASSERT3U(db->db.db_object, !=, DMU_META_DNODE_OBJECT);
2794 ASSERT0(db->db_level);
2795 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2796 ASSERT(db->db_blkid != DMU_SPILL_BLKID);
2797
2798 if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) {
2799 error = SET_ERROR(EXDEV);
2800 goto out;
2801 }
2802 }
2803
2804 for (i = 0; i < numbufs; i++) {
2805 dbuf = dbp[i];
2806 db = (dmu_buf_impl_t *)dbuf;
2807 bp = &bps[i];
2808
2809 dmu_buf_will_clone_or_dio(dbuf, tx);
2810
2811 mutex_enter(&db->db_mtx);
2812
2813 dr = list_head(&db->db_dirty_records);
2814 VERIFY(dr != NULL);
2815 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2816 dl = &dr->dt.dl;
2817 ASSERT0(dl->dr_has_raw_params);
2818 dl->dr_overridden_by = *bp;
2819 if (!BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) != 0) {
2820 if (!BP_IS_EMBEDDED(bp)) {
2821 BP_SET_BIRTH(&dl->dr_overridden_by, dr->dr_txg,
2822 BP_GET_PHYSICAL_BIRTH(bp));
2823 BP_SET_REWRITE(&dl->dr_overridden_by, 0);
2824 } else {
2825 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by,
2826 dr->dr_txg);
2827 }
2828 }
2829 dl->dr_brtwrite = B_TRUE;
2830 dl->dr_override_state = DR_OVERRIDDEN;
2831
2832 mutex_exit(&db->db_mtx);
2833
2834 /*
2835 * When data in embedded into BP there is no need to create
2836 * BRT entry as there is no data block. Just copy the BP as
2837 * it contains the data.
2838 */
2839 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
2840 brt_pending_add(spa, bp, tx);
2841 }
2842 }
2843 out:
2844 dmu_buf_rele_array(dbp, numbufs, FTAG);
2845
2846 return (error);
2847 }
2848
2849 void
__dmu_object_info_from_dnode(dnode_t * dn,dmu_object_info_t * doi)2850 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2851 {
2852 dnode_phys_t *dnp = dn->dn_phys;
2853
2854 doi->doi_data_block_size = dn->dn_datablksz;
2855 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2856 1ULL << dn->dn_indblkshift : 0;
2857 doi->doi_type = dn->dn_type;
2858 doi->doi_bonus_type = dn->dn_bonustype;
2859 doi->doi_bonus_size = dn->dn_bonuslen;
2860 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2861 doi->doi_indirection = dn->dn_nlevels;
2862 doi->doi_checksum = dn->dn_checksum;
2863 doi->doi_compress = dn->dn_compress;
2864 doi->doi_nblkptr = dn->dn_nblkptr;
2865 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2866 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2867 doi->doi_fill_count = 0;
2868 for (int i = 0; i < dnp->dn_nblkptr; i++)
2869 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2870 }
2871
2872 void
dmu_object_info_from_dnode(dnode_t * dn,dmu_object_info_t * doi)2873 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2874 {
2875 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2876 mutex_enter(&dn->dn_mtx);
2877
2878 __dmu_object_info_from_dnode(dn, doi);
2879
2880 mutex_exit(&dn->dn_mtx);
2881 rw_exit(&dn->dn_struct_rwlock);
2882 }
2883
2884 /*
2885 * Get information on a DMU object.
2886 * If doi is NULL, just indicates whether the object exists.
2887 */
2888 int
dmu_object_info(objset_t * os,uint64_t object,dmu_object_info_t * doi)2889 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2890 {
2891 dnode_t *dn;
2892 int err = dnode_hold(os, object, FTAG, &dn);
2893
2894 if (err)
2895 return (err);
2896
2897 if (doi != NULL)
2898 dmu_object_info_from_dnode(dn, doi);
2899
2900 dnode_rele(dn, FTAG);
2901 return (0);
2902 }
2903
2904 /*
2905 * As above, but faster; can be used when you have a held dbuf in hand.
2906 */
2907 void
dmu_object_info_from_db(dmu_buf_t * db_fake,dmu_object_info_t * doi)2908 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
2909 {
2910 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2911
2912 DB_DNODE_ENTER(db);
2913 dmu_object_info_from_dnode(DB_DNODE(db), doi);
2914 DB_DNODE_EXIT(db);
2915 }
2916
2917 /*
2918 * Faster still when you only care about the size.
2919 */
2920 void
dmu_object_size_from_db(dmu_buf_t * db_fake,uint32_t * blksize,u_longlong_t * nblk512)2921 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2922 u_longlong_t *nblk512)
2923 {
2924 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2925 dnode_t *dn;
2926
2927 DB_DNODE_ENTER(db);
2928 dn = DB_DNODE(db);
2929
2930 *blksize = dn->dn_datablksz;
2931 /* add in number of slots used for the dnode itself */
2932 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2933 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2934 DB_DNODE_EXIT(db);
2935 }
2936
2937 void
dmu_object_dnsize_from_db(dmu_buf_t * db_fake,int * dnsize)2938 dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
2939 {
2940 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2941
2942 DB_DNODE_ENTER(db);
2943 *dnsize = DB_DNODE(db)->dn_num_slots << DNODE_SHIFT;
2944 DB_DNODE_EXIT(db);
2945 }
2946
2947 void
byteswap_uint64_array(void * vbuf,size_t size)2948 byteswap_uint64_array(void *vbuf, size_t size)
2949 {
2950 uint64_t *buf = vbuf;
2951 size_t count = size >> 3;
2952 int i;
2953
2954 ASSERT0((size & 7));
2955
2956 for (i = 0; i < count; i++)
2957 buf[i] = BSWAP_64(buf[i]);
2958 }
2959
2960 void
byteswap_uint32_array(void * vbuf,size_t size)2961 byteswap_uint32_array(void *vbuf, size_t size)
2962 {
2963 uint32_t *buf = vbuf;
2964 size_t count = size >> 2;
2965 int i;
2966
2967 ASSERT0((size & 3));
2968
2969 for (i = 0; i < count; i++)
2970 buf[i] = BSWAP_32(buf[i]);
2971 }
2972
2973 void
byteswap_uint16_array(void * vbuf,size_t size)2974 byteswap_uint16_array(void *vbuf, size_t size)
2975 {
2976 uint16_t *buf = vbuf;
2977 size_t count = size >> 1;
2978 int i;
2979
2980 ASSERT0((size & 1));
2981
2982 for (i = 0; i < count; i++)
2983 buf[i] = BSWAP_16(buf[i]);
2984 }
2985
2986 void
byteswap_uint8_array(void * vbuf,size_t size)2987 byteswap_uint8_array(void *vbuf, size_t size)
2988 {
2989 (void) vbuf, (void) size;
2990 }
2991
2992 void
dmu_init(void)2993 dmu_init(void)
2994 {
2995 abd_init();
2996 zfs_dbgmsg_init();
2997 sa_cache_init();
2998 dmu_objset_init();
2999 dnode_init();
3000 zfetch_init();
3001 dmu_tx_init();
3002 l2arc_init();
3003 arc_init();
3004 dbuf_init();
3005 }
3006
3007 void
dmu_fini(void)3008 dmu_fini(void)
3009 {
3010 arc_fini(); /* arc depends on l2arc, so arc must go first */
3011 l2arc_fini();
3012 dmu_tx_fini();
3013 zfetch_fini();
3014 dbuf_fini();
3015 dnode_fini();
3016 dmu_objset_fini();
3017 sa_cache_fini();
3018 zfs_dbgmsg_fini();
3019 abd_fini();
3020 }
3021
3022 EXPORT_SYMBOL(dmu_bonus_hold);
3023 EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
3024 EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
3025 EXPORT_SYMBOL(dmu_buf_rele_array);
3026 EXPORT_SYMBOL(dmu_prefetch);
3027 EXPORT_SYMBOL(dmu_prefetch_by_dnode);
3028 EXPORT_SYMBOL(dmu_prefetch_dnode);
3029 EXPORT_SYMBOL(dmu_prefetch_stream);
3030 EXPORT_SYMBOL(dmu_prefetch_stream_by_dnode);
3031 EXPORT_SYMBOL(dmu_free_range);
3032 EXPORT_SYMBOL(dmu_free_long_range);
3033 EXPORT_SYMBOL(dmu_free_long_object);
3034 EXPORT_SYMBOL(dmu_read);
3035 EXPORT_SYMBOL(dmu_read_by_dnode);
3036 EXPORT_SYMBOL(dmu_read_uio);
3037 EXPORT_SYMBOL(dmu_read_uio_dbuf);
3038 EXPORT_SYMBOL(dmu_read_uio_dnode);
3039 EXPORT_SYMBOL(dmu_write);
3040 EXPORT_SYMBOL(dmu_write_by_dnode);
3041 EXPORT_SYMBOL(dmu_write_uio);
3042 EXPORT_SYMBOL(dmu_write_uio_dbuf);
3043 EXPORT_SYMBOL(dmu_write_uio_dnode);
3044 EXPORT_SYMBOL(dmu_prealloc);
3045 EXPORT_SYMBOL(dmu_object_info);
3046 EXPORT_SYMBOL(dmu_object_info_from_dnode);
3047 EXPORT_SYMBOL(dmu_object_info_from_db);
3048 EXPORT_SYMBOL(dmu_object_size_from_db);
3049 EXPORT_SYMBOL(dmu_object_dnsize_from_db);
3050 EXPORT_SYMBOL(dmu_object_set_nlevels);
3051 EXPORT_SYMBOL(dmu_object_set_blocksize);
3052 EXPORT_SYMBOL(dmu_object_set_maxblkid);
3053 EXPORT_SYMBOL(dmu_object_set_checksum);
3054 EXPORT_SYMBOL(dmu_object_set_compress);
3055 EXPORT_SYMBOL(dmu_offset_next);
3056 EXPORT_SYMBOL(dmu_write_policy);
3057 EXPORT_SYMBOL(dmu_sync);
3058 EXPORT_SYMBOL(dmu_request_arcbuf);
3059 EXPORT_SYMBOL(dmu_return_arcbuf);
3060 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
3061 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
3062 EXPORT_SYMBOL(dmu_buf_hold);
3063 EXPORT_SYMBOL(dmu_ot);
3064
3065 ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
3066 "Enable NOP writes");
3067
3068 ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW,
3069 "Percentage of dirtied blocks from frees in one TXG");
3070
3071 ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
3072 "Enable forcing txg sync to find holes");
3073
3074 ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
3075 "Limit one prefetch call to this size");
3076
3077 ZFS_MODULE_PARAM(zfs, , dmu_ddt_copies, UINT, ZMOD_RW,
3078 "Override copies= for dedup objects");
3079