xref: /titanic_51/usr/src/uts/common/fs/zfs/dmu.c (revision e52fb54bb8f22da555df8e240ebd249941b0ed95)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  */
25 
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/zfs_context.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_traverse.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_pool.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dmu_zfetch.h>
40 #include <sys/zfs_ioctl.h>
41 #include <sys/zap.h>
42 #include <sys/zio_checksum.h>
43 #include <sys/sa.h>
44 #ifdef _KERNEL
45 #include <sys/vmsystm.h>
46 #include <sys/zfs_znode.h>
47 #endif
48 
49 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
50 	{	DMU_BSWAP_UINT8,	TRUE,	"unallocated"		},
51 	{	DMU_BSWAP_ZAP,		TRUE,	"object directory"	},
52 	{	DMU_BSWAP_UINT64,	TRUE,	"object array"		},
53 	{	DMU_BSWAP_UINT8,	TRUE,	"packed nvlist"		},
54 	{	DMU_BSWAP_UINT64,	TRUE,	"packed nvlist size"	},
55 	{	DMU_BSWAP_UINT64,	TRUE,	"bpobj"			},
56 	{	DMU_BSWAP_UINT64,	TRUE,	"bpobj header"		},
57 	{	DMU_BSWAP_UINT64,	TRUE,	"SPA space map header"	},
58 	{	DMU_BSWAP_UINT64,	TRUE,	"SPA space map"		},
59 	{	DMU_BSWAP_UINT64,	TRUE,	"ZIL intent log"	},
60 	{	DMU_BSWAP_DNODE,	TRUE,	"DMU dnode"		},
61 	{	DMU_BSWAP_OBJSET,	TRUE,	"DMU objset"		},
62 	{	DMU_BSWAP_UINT64,	TRUE,	"DSL directory"		},
63 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL directory child map"},
64 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL dataset snap map"	},
65 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL props"		},
66 	{	DMU_BSWAP_UINT64,	TRUE,	"DSL dataset"		},
67 	{	DMU_BSWAP_ZNODE,	TRUE,	"ZFS znode"		},
68 	{	DMU_BSWAP_OLDACL,	TRUE,	"ZFS V0 ACL"		},
69 	{	DMU_BSWAP_UINT8,	FALSE,	"ZFS plain file"	},
70 	{	DMU_BSWAP_ZAP,		TRUE,	"ZFS directory"		},
71 	{	DMU_BSWAP_ZAP,		TRUE,	"ZFS master node"	},
72 	{	DMU_BSWAP_ZAP,		TRUE,	"ZFS delete queue"	},
73 	{	DMU_BSWAP_UINT8,	FALSE,	"zvol object"		},
74 	{	DMU_BSWAP_ZAP,		TRUE,	"zvol prop"		},
75 	{	DMU_BSWAP_UINT8,	FALSE,	"other uint8[]"		},
76 	{	DMU_BSWAP_UINT64,	FALSE,	"other uint64[]"	},
77 	{	DMU_BSWAP_ZAP,		TRUE,	"other ZAP"		},
78 	{	DMU_BSWAP_ZAP,		TRUE,	"persistent error log"	},
79 	{	DMU_BSWAP_UINT8,	TRUE,	"SPA history"		},
80 	{	DMU_BSWAP_UINT64,	TRUE,	"SPA history offsets"	},
81 	{	DMU_BSWAP_ZAP,		TRUE,	"Pool properties"	},
82 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL permissions"	},
83 	{	DMU_BSWAP_ACL,		TRUE,	"ZFS ACL"		},
84 	{	DMU_BSWAP_UINT8,	TRUE,	"ZFS SYSACL"		},
85 	{	DMU_BSWAP_UINT8,	TRUE,	"FUID table"		},
86 	{	DMU_BSWAP_UINT64,	TRUE,	"FUID table size"	},
87 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL dataset next clones"},
88 	{	DMU_BSWAP_ZAP,		TRUE,	"scan work queue"	},
89 	{	DMU_BSWAP_ZAP,		TRUE,	"ZFS user/group used"	},
90 	{	DMU_BSWAP_ZAP,		TRUE,	"ZFS user/group quota"	},
91 	{	DMU_BSWAP_ZAP,		TRUE,	"snapshot refcount tags"},
92 	{	DMU_BSWAP_ZAP,		TRUE,	"DDT ZAP algorithm"	},
93 	{	DMU_BSWAP_ZAP,		TRUE,	"DDT statistics"	},
94 	{	DMU_BSWAP_UINT8,	TRUE,	"System attributes"	},
95 	{	DMU_BSWAP_ZAP,		TRUE,	"SA master node"	},
96 	{	DMU_BSWAP_ZAP,		TRUE,	"SA attr registration"	},
97 	{	DMU_BSWAP_ZAP,		TRUE,	"SA attr layouts"	},
98 	{	DMU_BSWAP_ZAP,		TRUE,	"scan translations"	},
99 	{	DMU_BSWAP_UINT8,	FALSE,	"deduplicated block"	},
100 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL deadlist map"	},
101 	{	DMU_BSWAP_UINT64,	TRUE,	"DSL deadlist map hdr"	},
102 	{	DMU_BSWAP_ZAP,		TRUE,	"DSL dir clones"	},
103 	{	DMU_BSWAP_UINT64,	TRUE,	"bpobj subobj"		}
104 };
105 
106 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
107 	{	byteswap_uint8_array,	"uint8"		},
108 	{	byteswap_uint16_array,	"uint16"	},
109 	{	byteswap_uint32_array,	"uint32"	},
110 	{	byteswap_uint64_array,	"uint64"	},
111 	{	zap_byteswap,		"zap"		},
112 	{	dnode_buf_byteswap,	"dnode"		},
113 	{	dmu_objset_byteswap,	"objset"	},
114 	{	zfs_znode_byteswap,	"znode"		},
115 	{	zfs_oldacl_byteswap,	"oldacl"	},
116 	{	zfs_acl_byteswap,	"acl"		}
117 };
118 
119 int
120 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
121     void *tag, dmu_buf_t **dbp, int flags)
122 {
123 	dnode_t *dn;
124 	uint64_t blkid;
125 	dmu_buf_impl_t *db;
126 	int err;
127 	int db_flags = DB_RF_CANFAIL;
128 
129 	if (flags & DMU_READ_NO_PREFETCH)
130 		db_flags |= DB_RF_NOPREFETCH;
131 
132 	err = dnode_hold(os, object, FTAG, &dn);
133 	if (err)
134 		return (err);
135 	blkid = dbuf_whichblock(dn, offset);
136 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
137 	db = dbuf_hold(dn, blkid, tag);
138 	rw_exit(&dn->dn_struct_rwlock);
139 	if (db == NULL) {
140 		err = EIO;
141 	} else {
142 		err = dbuf_read(db, NULL, db_flags);
143 		if (err) {
144 			dbuf_rele(db, tag);
145 			db = NULL;
146 		}
147 	}
148 
149 	dnode_rele(dn, FTAG);
150 	*dbp = &db->db; /* NULL db plus first field offset is NULL */
151 	return (err);
152 }
153 
154 int
155 dmu_bonus_max(void)
156 {
157 	return (DN_MAX_BONUSLEN);
158 }
159 
160 int
161 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
162 {
163 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
164 	dnode_t *dn;
165 	int error;
166 
167 	DB_DNODE_ENTER(db);
168 	dn = DB_DNODE(db);
169 
170 	if (dn->dn_bonus != db) {
171 		error = EINVAL;
172 	} else if (newsize < 0 || newsize > db_fake->db_size) {
173 		error = EINVAL;
174 	} else {
175 		dnode_setbonuslen(dn, newsize, tx);
176 		error = 0;
177 	}
178 
179 	DB_DNODE_EXIT(db);
180 	return (error);
181 }
182 
183 int
184 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
185 {
186 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
187 	dnode_t *dn;
188 	int error;
189 
190 	DB_DNODE_ENTER(db);
191 	dn = DB_DNODE(db);
192 
193 	if (!DMU_OT_IS_VALID(type)) {
194 		error = EINVAL;
195 	} else if (dn->dn_bonus != db) {
196 		error = EINVAL;
197 	} else {
198 		dnode_setbonus_type(dn, type, tx);
199 		error = 0;
200 	}
201 
202 	DB_DNODE_EXIT(db);
203 	return (error);
204 }
205 
206 dmu_object_type_t
207 dmu_get_bonustype(dmu_buf_t *db_fake)
208 {
209 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
210 	dnode_t *dn;
211 	dmu_object_type_t type;
212 
213 	DB_DNODE_ENTER(db);
214 	dn = DB_DNODE(db);
215 	type = dn->dn_bonustype;
216 	DB_DNODE_EXIT(db);
217 
218 	return (type);
219 }
220 
221 int
222 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
223 {
224 	dnode_t *dn;
225 	int error;
226 
227 	error = dnode_hold(os, object, FTAG, &dn);
228 	dbuf_rm_spill(dn, tx);
229 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
230 	dnode_rm_spill(dn, tx);
231 	rw_exit(&dn->dn_struct_rwlock);
232 	dnode_rele(dn, FTAG);
233 	return (error);
234 }
235 
236 /*
237  * returns ENOENT, EIO, or 0.
238  */
239 int
240 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
241 {
242 	dnode_t *dn;
243 	dmu_buf_impl_t *db;
244 	int error;
245 
246 	error = dnode_hold(os, object, FTAG, &dn);
247 	if (error)
248 		return (error);
249 
250 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
251 	if (dn->dn_bonus == NULL) {
252 		rw_exit(&dn->dn_struct_rwlock);
253 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
254 		if (dn->dn_bonus == NULL)
255 			dbuf_create_bonus(dn);
256 	}
257 	db = dn->dn_bonus;
258 
259 	/* as long as the bonus buf is held, the dnode will be held */
260 	if (refcount_add(&db->db_holds, tag) == 1) {
261 		VERIFY(dnode_add_ref(dn, db));
262 		(void) atomic_inc_32_nv(&dn->dn_dbufs_count);
263 	}
264 
265 	/*
266 	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
267 	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
268 	 * a dnode hold for every dbuf.
269 	 */
270 	rw_exit(&dn->dn_struct_rwlock);
271 
272 	dnode_rele(dn, FTAG);
273 
274 	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
275 
276 	*dbp = &db->db;
277 	return (0);
278 }
279 
280 /*
281  * returns ENOENT, EIO, or 0.
282  *
283  * This interface will allocate a blank spill dbuf when a spill blk
284  * doesn't already exist on the dnode.
285  *
286  * if you only want to find an already existing spill db, then
287  * dmu_spill_hold_existing() should be used.
288  */
289 int
290 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
291 {
292 	dmu_buf_impl_t *db = NULL;
293 	int err;
294 
295 	if ((flags & DB_RF_HAVESTRUCT) == 0)
296 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
297 
298 	db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
299 
300 	if ((flags & DB_RF_HAVESTRUCT) == 0)
301 		rw_exit(&dn->dn_struct_rwlock);
302 
303 	ASSERT(db != NULL);
304 	err = dbuf_read(db, NULL, flags);
305 	if (err == 0)
306 		*dbp = &db->db;
307 	else
308 		dbuf_rele(db, tag);
309 	return (err);
310 }
311 
312 int
313 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
314 {
315 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
316 	dnode_t *dn;
317 	int err;
318 
319 	DB_DNODE_ENTER(db);
320 	dn = DB_DNODE(db);
321 
322 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
323 		err = EINVAL;
324 	} else {
325 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
326 
327 		if (!dn->dn_have_spill) {
328 			err = ENOENT;
329 		} else {
330 			err = dmu_spill_hold_by_dnode(dn,
331 			    DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
332 		}
333 
334 		rw_exit(&dn->dn_struct_rwlock);
335 	}
336 
337 	DB_DNODE_EXIT(db);
338 	return (err);
339 }
340 
341 int
342 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
343 {
344 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
345 	dnode_t *dn;
346 	int err;
347 
348 	DB_DNODE_ENTER(db);
349 	dn = DB_DNODE(db);
350 	err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
351 	DB_DNODE_EXIT(db);
352 
353 	return (err);
354 }
355 
356 /*
357  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
358  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
359  * and can induce severe lock contention when writing to several files
360  * whose dnodes are in the same block.
361  */
362 static int
363 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
364     int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
365 {
366 	dsl_pool_t *dp = NULL;
367 	dmu_buf_t **dbp;
368 	uint64_t blkid, nblks, i;
369 	uint32_t dbuf_flags;
370 	int err;
371 	zio_t *zio;
372 	hrtime_t start;
373 
374 	ASSERT(length <= DMU_MAX_ACCESS);
375 
376 	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
377 	if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
378 		dbuf_flags |= DB_RF_NOPREFETCH;
379 
380 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
381 	if (dn->dn_datablkshift) {
382 		int blkshift = dn->dn_datablkshift;
383 		nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
384 		    P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
385 	} else {
386 		if (offset + length > dn->dn_datablksz) {
387 			zfs_panic_recover("zfs: accessing past end of object "
388 			    "%llx/%llx (size=%u access=%llu+%llu)",
389 			    (longlong_t)dn->dn_objset->
390 			    os_dsl_dataset->ds_object,
391 			    (longlong_t)dn->dn_object, dn->dn_datablksz,
392 			    (longlong_t)offset, (longlong_t)length);
393 			rw_exit(&dn->dn_struct_rwlock);
394 			return (EIO);
395 		}
396 		nblks = 1;
397 	}
398 	dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
399 
400 	if (dn->dn_objset->os_dsl_dataset)
401 		dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool;
402 	if (dp && dsl_pool_sync_context(dp))
403 		start = gethrtime();
404 	zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
405 	blkid = dbuf_whichblock(dn, offset);
406 	for (i = 0; i < nblks; i++) {
407 		dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
408 		if (db == NULL) {
409 			rw_exit(&dn->dn_struct_rwlock);
410 			dmu_buf_rele_array(dbp, nblks, tag);
411 			zio_nowait(zio);
412 			return (EIO);
413 		}
414 		/* initiate async i/o */
415 		if (read) {
416 			(void) dbuf_read(db, zio, dbuf_flags);
417 		}
418 		dbp[i] = &db->db;
419 	}
420 	rw_exit(&dn->dn_struct_rwlock);
421 
422 	/* wait for async i/o */
423 	err = zio_wait(zio);
424 	/* track read overhead when we are in sync context */
425 	if (dp && dsl_pool_sync_context(dp))
426 		dp->dp_read_overhead += gethrtime() - start;
427 	if (err) {
428 		dmu_buf_rele_array(dbp, nblks, tag);
429 		return (err);
430 	}
431 
432 	/* wait for other io to complete */
433 	if (read) {
434 		for (i = 0; i < nblks; i++) {
435 			dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
436 			mutex_enter(&db->db_mtx);
437 			while (db->db_state == DB_READ ||
438 			    db->db_state == DB_FILL)
439 				cv_wait(&db->db_changed, &db->db_mtx);
440 			if (db->db_state == DB_UNCACHED)
441 				err = EIO;
442 			mutex_exit(&db->db_mtx);
443 			if (err) {
444 				dmu_buf_rele_array(dbp, nblks, tag);
445 				return (err);
446 			}
447 		}
448 	}
449 
450 	*numbufsp = nblks;
451 	*dbpp = dbp;
452 	return (0);
453 }
454 
455 static int
456 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
457     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
458 {
459 	dnode_t *dn;
460 	int err;
461 
462 	err = dnode_hold(os, object, FTAG, &dn);
463 	if (err)
464 		return (err);
465 
466 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
467 	    numbufsp, dbpp, DMU_READ_PREFETCH);
468 
469 	dnode_rele(dn, FTAG);
470 
471 	return (err);
472 }
473 
474 int
475 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
476     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
477 {
478 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
479 	dnode_t *dn;
480 	int err;
481 
482 	DB_DNODE_ENTER(db);
483 	dn = DB_DNODE(db);
484 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
485 	    numbufsp, dbpp, DMU_READ_PREFETCH);
486 	DB_DNODE_EXIT(db);
487 
488 	return (err);
489 }
490 
491 void
492 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
493 {
494 	int i;
495 	dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
496 
497 	if (numbufs == 0)
498 		return;
499 
500 	for (i = 0; i < numbufs; i++) {
501 		if (dbp[i])
502 			dbuf_rele(dbp[i], tag);
503 	}
504 
505 	kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
506 }
507 
508 void
509 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
510 {
511 	dnode_t *dn;
512 	uint64_t blkid;
513 	int nblks, i, err;
514 
515 	if (zfs_prefetch_disable)
516 		return;
517 
518 	if (len == 0) {  /* they're interested in the bonus buffer */
519 		dn = DMU_META_DNODE(os);
520 
521 		if (object == 0 || object >= DN_MAX_OBJECT)
522 			return;
523 
524 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
525 		blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t));
526 		dbuf_prefetch(dn, blkid);
527 		rw_exit(&dn->dn_struct_rwlock);
528 		return;
529 	}
530 
531 	/*
532 	 * XXX - Note, if the dnode for the requested object is not
533 	 * already cached, we will do a *synchronous* read in the
534 	 * dnode_hold() call.  The same is true for any indirects.
535 	 */
536 	err = dnode_hold(os, object, FTAG, &dn);
537 	if (err != 0)
538 		return;
539 
540 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
541 	if (dn->dn_datablkshift) {
542 		int blkshift = dn->dn_datablkshift;
543 		nblks = (P2ROUNDUP(offset+len, 1<<blkshift) -
544 		    P2ALIGN(offset, 1<<blkshift)) >> blkshift;
545 	} else {
546 		nblks = (offset < dn->dn_datablksz);
547 	}
548 
549 	if (nblks != 0) {
550 		blkid = dbuf_whichblock(dn, offset);
551 		for (i = 0; i < nblks; i++)
552 			dbuf_prefetch(dn, blkid+i);
553 	}
554 
555 	rw_exit(&dn->dn_struct_rwlock);
556 
557 	dnode_rele(dn, FTAG);
558 }
559 
560 /*
561  * Get the next "chunk" of file data to free.  We traverse the file from
562  * the end so that the file gets shorter over time (if we crashes in the
563  * middle, this will leave us in a better state).  We find allocated file
564  * data by simply searching the allocated level 1 indirects.
565  */
566 static int
567 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t limit)
568 {
569 	uint64_t len = *start - limit;
570 	uint64_t blkcnt = 0;
571 	uint64_t maxblks = DMU_MAX_ACCESS / (1ULL << (dn->dn_indblkshift + 1));
572 	uint64_t iblkrange =
573 	    dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
574 
575 	ASSERT(limit <= *start);
576 
577 	if (len <= iblkrange * maxblks) {
578 		*start = limit;
579 		return (0);
580 	}
581 	ASSERT(ISP2(iblkrange));
582 
583 	while (*start > limit && blkcnt < maxblks) {
584 		int err;
585 
586 		/* find next allocated L1 indirect */
587 		err = dnode_next_offset(dn,
588 		    DNODE_FIND_BACKWARDS, start, 2, 1, 0);
589 
590 		/* if there are no more, then we are done */
591 		if (err == ESRCH) {
592 			*start = limit;
593 			return (0);
594 		} else if (err) {
595 			return (err);
596 		}
597 		blkcnt += 1;
598 
599 		/* reset offset to end of "next" block back */
600 		*start = P2ALIGN(*start, iblkrange);
601 		if (*start <= limit)
602 			*start = limit;
603 		else
604 			*start -= 1;
605 	}
606 	return (0);
607 }
608 
609 static int
610 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
611     uint64_t length, boolean_t free_dnode)
612 {
613 	dmu_tx_t *tx;
614 	uint64_t object_size, start, end, len;
615 	boolean_t trunc = (length == DMU_OBJECT_END);
616 	int align, err;
617 
618 	align = 1 << dn->dn_datablkshift;
619 	ASSERT(align > 0);
620 	object_size = align == 1 ? dn->dn_datablksz :
621 	    (dn->dn_maxblkid + 1) << dn->dn_datablkshift;
622 
623 	end = offset + length;
624 	if (trunc || end > object_size)
625 		end = object_size;
626 	if (end <= offset)
627 		return (0);
628 	length = end - offset;
629 
630 	while (length) {
631 		start = end;
632 		/* assert(offset <= start) */
633 		err = get_next_chunk(dn, &start, offset);
634 		if (err)
635 			return (err);
636 		len = trunc ? DMU_OBJECT_END : end - start;
637 
638 		tx = dmu_tx_create(os);
639 		dmu_tx_hold_free(tx, dn->dn_object, start, len);
640 		err = dmu_tx_assign(tx, TXG_WAIT);
641 		if (err) {
642 			dmu_tx_abort(tx);
643 			return (err);
644 		}
645 
646 		dnode_free_range(dn, start, trunc ? -1 : len, tx);
647 
648 		if (start == 0 && free_dnode) {
649 			ASSERT(trunc);
650 			dnode_free(dn, tx);
651 		}
652 
653 		length -= end - start;
654 
655 		dmu_tx_commit(tx);
656 		end = start;
657 	}
658 	return (0);
659 }
660 
661 int
662 dmu_free_long_range(objset_t *os, uint64_t object,
663     uint64_t offset, uint64_t length)
664 {
665 	dnode_t *dn;
666 	int err;
667 
668 	err = dnode_hold(os, object, FTAG, &dn);
669 	if (err != 0)
670 		return (err);
671 	err = dmu_free_long_range_impl(os, dn, offset, length, FALSE);
672 	dnode_rele(dn, FTAG);
673 	return (err);
674 }
675 
676 int
677 dmu_free_object(objset_t *os, uint64_t object)
678 {
679 	dnode_t *dn;
680 	dmu_tx_t *tx;
681 	int err;
682 
683 	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
684 	    FTAG, &dn);
685 	if (err != 0)
686 		return (err);
687 	if (dn->dn_nlevels == 1) {
688 		tx = dmu_tx_create(os);
689 		dmu_tx_hold_bonus(tx, object);
690 		dmu_tx_hold_free(tx, dn->dn_object, 0, DMU_OBJECT_END);
691 		err = dmu_tx_assign(tx, TXG_WAIT);
692 		if (err == 0) {
693 			dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
694 			dnode_free(dn, tx);
695 			dmu_tx_commit(tx);
696 		} else {
697 			dmu_tx_abort(tx);
698 		}
699 	} else {
700 		err = dmu_free_long_range_impl(os, dn, 0, DMU_OBJECT_END, TRUE);
701 	}
702 	dnode_rele(dn, FTAG);
703 	return (err);
704 }
705 
706 int
707 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
708     uint64_t size, dmu_tx_t *tx)
709 {
710 	dnode_t *dn;
711 	int err = dnode_hold(os, object, FTAG, &dn);
712 	if (err)
713 		return (err);
714 	ASSERT(offset < UINT64_MAX);
715 	ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
716 	dnode_free_range(dn, offset, size, tx);
717 	dnode_rele(dn, FTAG);
718 	return (0);
719 }
720 
721 int
722 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
723     void *buf, uint32_t flags)
724 {
725 	dnode_t *dn;
726 	dmu_buf_t **dbp;
727 	int numbufs, err;
728 
729 	err = dnode_hold(os, object, FTAG, &dn);
730 	if (err)
731 		return (err);
732 
733 	/*
734 	 * Deal with odd block sizes, where there can't be data past the first
735 	 * block.  If we ever do the tail block optimization, we will need to
736 	 * handle that here as well.
737 	 */
738 	if (dn->dn_maxblkid == 0) {
739 		int newsz = offset > dn->dn_datablksz ? 0 :
740 		    MIN(size, dn->dn_datablksz - offset);
741 		bzero((char *)buf + newsz, size - newsz);
742 		size = newsz;
743 	}
744 
745 	while (size > 0) {
746 		uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
747 		int i;
748 
749 		/*
750 		 * NB: we could do this block-at-a-time, but it's nice
751 		 * to be reading in parallel.
752 		 */
753 		err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
754 		    TRUE, FTAG, &numbufs, &dbp, flags);
755 		if (err)
756 			break;
757 
758 		for (i = 0; i < numbufs; i++) {
759 			int tocpy;
760 			int bufoff;
761 			dmu_buf_t *db = dbp[i];
762 
763 			ASSERT(size > 0);
764 
765 			bufoff = offset - db->db_offset;
766 			tocpy = (int)MIN(db->db_size - bufoff, size);
767 
768 			bcopy((char *)db->db_data + bufoff, buf, tocpy);
769 
770 			offset += tocpy;
771 			size -= tocpy;
772 			buf = (char *)buf + tocpy;
773 		}
774 		dmu_buf_rele_array(dbp, numbufs, FTAG);
775 	}
776 	dnode_rele(dn, FTAG);
777 	return (err);
778 }
779 
780 void
781 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
782     const void *buf, dmu_tx_t *tx)
783 {
784 	dmu_buf_t **dbp;
785 	int numbufs, i;
786 
787 	if (size == 0)
788 		return;
789 
790 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
791 	    FALSE, FTAG, &numbufs, &dbp));
792 
793 	for (i = 0; i < numbufs; i++) {
794 		int tocpy;
795 		int bufoff;
796 		dmu_buf_t *db = dbp[i];
797 
798 		ASSERT(size > 0);
799 
800 		bufoff = offset - db->db_offset;
801 		tocpy = (int)MIN(db->db_size - bufoff, size);
802 
803 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
804 
805 		if (tocpy == db->db_size)
806 			dmu_buf_will_fill(db, tx);
807 		else
808 			dmu_buf_will_dirty(db, tx);
809 
810 		bcopy(buf, (char *)db->db_data + bufoff, tocpy);
811 
812 		if (tocpy == db->db_size)
813 			dmu_buf_fill_done(db, tx);
814 
815 		offset += tocpy;
816 		size -= tocpy;
817 		buf = (char *)buf + tocpy;
818 	}
819 	dmu_buf_rele_array(dbp, numbufs, FTAG);
820 }
821 
822 void
823 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
824     dmu_tx_t *tx)
825 {
826 	dmu_buf_t **dbp;
827 	int numbufs, i;
828 
829 	if (size == 0)
830 		return;
831 
832 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
833 	    FALSE, FTAG, &numbufs, &dbp));
834 
835 	for (i = 0; i < numbufs; i++) {
836 		dmu_buf_t *db = dbp[i];
837 
838 		dmu_buf_will_not_fill(db, tx);
839 	}
840 	dmu_buf_rele_array(dbp, numbufs, FTAG);
841 }
842 
843 /*
844  * DMU support for xuio
845  */
846 kstat_t *xuio_ksp = NULL;
847 
848 int
849 dmu_xuio_init(xuio_t *xuio, int nblk)
850 {
851 	dmu_xuio_t *priv;
852 	uio_t *uio = &xuio->xu_uio;
853 
854 	uio->uio_iovcnt = nblk;
855 	uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
856 
857 	priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
858 	priv->cnt = nblk;
859 	priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
860 	priv->iovp = uio->uio_iov;
861 	XUIO_XUZC_PRIV(xuio) = priv;
862 
863 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
864 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
865 	else
866 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
867 
868 	return (0);
869 }
870 
871 void
872 dmu_xuio_fini(xuio_t *xuio)
873 {
874 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
875 	int nblk = priv->cnt;
876 
877 	kmem_free(priv->iovp, nblk * sizeof (iovec_t));
878 	kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
879 	kmem_free(priv, sizeof (dmu_xuio_t));
880 
881 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
882 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
883 	else
884 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
885 }
886 
887 /*
888  * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
889  * and increase priv->next by 1.
890  */
891 int
892 dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
893 {
894 	struct iovec *iov;
895 	uio_t *uio = &xuio->xu_uio;
896 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
897 	int i = priv->next++;
898 
899 	ASSERT(i < priv->cnt);
900 	ASSERT(off + n <= arc_buf_size(abuf));
901 	iov = uio->uio_iov + i;
902 	iov->iov_base = (char *)abuf->b_data + off;
903 	iov->iov_len = n;
904 	priv->bufs[i] = abuf;
905 	return (0);
906 }
907 
908 int
909 dmu_xuio_cnt(xuio_t *xuio)
910 {
911 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
912 	return (priv->cnt);
913 }
914 
915 arc_buf_t *
916 dmu_xuio_arcbuf(xuio_t *xuio, int i)
917 {
918 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
919 
920 	ASSERT(i < priv->cnt);
921 	return (priv->bufs[i]);
922 }
923 
924 void
925 dmu_xuio_clear(xuio_t *xuio, int i)
926 {
927 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
928 
929 	ASSERT(i < priv->cnt);
930 	priv->bufs[i] = NULL;
931 }
932 
933 static void
934 xuio_stat_init(void)
935 {
936 	xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
937 	    KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
938 	    KSTAT_FLAG_VIRTUAL);
939 	if (xuio_ksp != NULL) {
940 		xuio_ksp->ks_data = &xuio_stats;
941 		kstat_install(xuio_ksp);
942 	}
943 }
944 
945 static void
946 xuio_stat_fini(void)
947 {
948 	if (xuio_ksp != NULL) {
949 		kstat_delete(xuio_ksp);
950 		xuio_ksp = NULL;
951 	}
952 }
953 
954 void
955 xuio_stat_wbuf_copied()
956 {
957 	XUIOSTAT_BUMP(xuiostat_wbuf_copied);
958 }
959 
960 void
961 xuio_stat_wbuf_nocopy()
962 {
963 	XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
964 }
965 
966 #ifdef _KERNEL
967 int
968 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
969 {
970 	dmu_buf_t **dbp;
971 	int numbufs, i, err;
972 	xuio_t *xuio = NULL;
973 
974 	/*
975 	 * NB: we could do this block-at-a-time, but it's nice
976 	 * to be reading in parallel.
977 	 */
978 	err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
979 	    &numbufs, &dbp);
980 	if (err)
981 		return (err);
982 
983 	if (uio->uio_extflg == UIO_XUIO)
984 		xuio = (xuio_t *)uio;
985 
986 	for (i = 0; i < numbufs; i++) {
987 		int tocpy;
988 		int bufoff;
989 		dmu_buf_t *db = dbp[i];
990 
991 		ASSERT(size > 0);
992 
993 		bufoff = uio->uio_loffset - db->db_offset;
994 		tocpy = (int)MIN(db->db_size - bufoff, size);
995 
996 		if (xuio) {
997 			dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
998 			arc_buf_t *dbuf_abuf = dbi->db_buf;
999 			arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1000 			err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1001 			if (!err) {
1002 				uio->uio_resid -= tocpy;
1003 				uio->uio_loffset += tocpy;
1004 			}
1005 
1006 			if (abuf == dbuf_abuf)
1007 				XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1008 			else
1009 				XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1010 		} else {
1011 			err = uiomove((char *)db->db_data + bufoff, tocpy,
1012 			    UIO_READ, uio);
1013 		}
1014 		if (err)
1015 			break;
1016 
1017 		size -= tocpy;
1018 	}
1019 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1020 
1021 	return (err);
1022 }
1023 
1024 static int
1025 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1026 {
1027 	dmu_buf_t **dbp;
1028 	int numbufs;
1029 	int err = 0;
1030 	int i;
1031 
1032 	err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1033 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1034 	if (err)
1035 		return (err);
1036 
1037 	for (i = 0; i < numbufs; i++) {
1038 		int tocpy;
1039 		int bufoff;
1040 		dmu_buf_t *db = dbp[i];
1041 
1042 		ASSERT(size > 0);
1043 
1044 		bufoff = uio->uio_loffset - db->db_offset;
1045 		tocpy = (int)MIN(db->db_size - bufoff, size);
1046 
1047 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1048 
1049 		if (tocpy == db->db_size)
1050 			dmu_buf_will_fill(db, tx);
1051 		else
1052 			dmu_buf_will_dirty(db, tx);
1053 
1054 		/*
1055 		 * XXX uiomove could block forever (eg. nfs-backed
1056 		 * pages).  There needs to be a uiolockdown() function
1057 		 * to lock the pages in memory, so that uiomove won't
1058 		 * block.
1059 		 */
1060 		err = uiomove((char *)db->db_data + bufoff, tocpy,
1061 		    UIO_WRITE, uio);
1062 
1063 		if (tocpy == db->db_size)
1064 			dmu_buf_fill_done(db, tx);
1065 
1066 		if (err)
1067 			break;
1068 
1069 		size -= tocpy;
1070 	}
1071 
1072 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1073 	return (err);
1074 }
1075 
1076 int
1077 dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
1078     dmu_tx_t *tx)
1079 {
1080 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1081 	dnode_t *dn;
1082 	int err;
1083 
1084 	if (size == 0)
1085 		return (0);
1086 
1087 	DB_DNODE_ENTER(db);
1088 	dn = DB_DNODE(db);
1089 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1090 	DB_DNODE_EXIT(db);
1091 
1092 	return (err);
1093 }
1094 
1095 int
1096 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
1097     dmu_tx_t *tx)
1098 {
1099 	dnode_t *dn;
1100 	int err;
1101 
1102 	if (size == 0)
1103 		return (0);
1104 
1105 	err = dnode_hold(os, object, FTAG, &dn);
1106 	if (err)
1107 		return (err);
1108 
1109 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1110 
1111 	dnode_rele(dn, FTAG);
1112 
1113 	return (err);
1114 }
1115 
1116 int
1117 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1118     page_t *pp, dmu_tx_t *tx)
1119 {
1120 	dmu_buf_t **dbp;
1121 	int numbufs, i;
1122 	int err;
1123 
1124 	if (size == 0)
1125 		return (0);
1126 
1127 	err = dmu_buf_hold_array(os, object, offset, size,
1128 	    FALSE, FTAG, &numbufs, &dbp);
1129 	if (err)
1130 		return (err);
1131 
1132 	for (i = 0; i < numbufs; i++) {
1133 		int tocpy, copied, thiscpy;
1134 		int bufoff;
1135 		dmu_buf_t *db = dbp[i];
1136 		caddr_t va;
1137 
1138 		ASSERT(size > 0);
1139 		ASSERT3U(db->db_size, >=, PAGESIZE);
1140 
1141 		bufoff = offset - db->db_offset;
1142 		tocpy = (int)MIN(db->db_size - bufoff, size);
1143 
1144 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1145 
1146 		if (tocpy == db->db_size)
1147 			dmu_buf_will_fill(db, tx);
1148 		else
1149 			dmu_buf_will_dirty(db, tx);
1150 
1151 		for (copied = 0; copied < tocpy; copied += PAGESIZE) {
1152 			ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
1153 			thiscpy = MIN(PAGESIZE, tocpy - copied);
1154 			va = zfs_map_page(pp, S_READ);
1155 			bcopy(va, (char *)db->db_data + bufoff, thiscpy);
1156 			zfs_unmap_page(pp, va);
1157 			pp = pp->p_next;
1158 			bufoff += PAGESIZE;
1159 		}
1160 
1161 		if (tocpy == db->db_size)
1162 			dmu_buf_fill_done(db, tx);
1163 
1164 		offset += tocpy;
1165 		size -= tocpy;
1166 	}
1167 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1168 	return (err);
1169 }
1170 #endif
1171 
1172 /*
1173  * Allocate a loaned anonymous arc buffer.
1174  */
1175 arc_buf_t *
1176 dmu_request_arcbuf(dmu_buf_t *handle, int size)
1177 {
1178 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1179 	spa_t *spa;
1180 
1181 	DB_GET_SPA(&spa, db);
1182 	return (arc_loan_buf(spa, size));
1183 }
1184 
1185 /*
1186  * Free a loaned arc buffer.
1187  */
1188 void
1189 dmu_return_arcbuf(arc_buf_t *buf)
1190 {
1191 	arc_return_buf(buf, FTAG);
1192 	VERIFY(arc_buf_remove_ref(buf, FTAG) == 1);
1193 }
1194 
1195 /*
1196  * When possible directly assign passed loaned arc buffer to a dbuf.
1197  * If this is not possible copy the contents of passed arc buf via
1198  * dmu_write().
1199  */
1200 void
1201 dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1202     dmu_tx_t *tx)
1203 {
1204 	dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1205 	dnode_t *dn;
1206 	dmu_buf_impl_t *db;
1207 	uint32_t blksz = (uint32_t)arc_buf_size(buf);
1208 	uint64_t blkid;
1209 
1210 	DB_DNODE_ENTER(dbuf);
1211 	dn = DB_DNODE(dbuf);
1212 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1213 	blkid = dbuf_whichblock(dn, offset);
1214 	VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
1215 	rw_exit(&dn->dn_struct_rwlock);
1216 	DB_DNODE_EXIT(dbuf);
1217 
1218 	if (offset == db->db.db_offset && blksz == db->db.db_size) {
1219 		dbuf_assign_arcbuf(db, buf, tx);
1220 		dbuf_rele(db, FTAG);
1221 	} else {
1222 		objset_t *os;
1223 		uint64_t object;
1224 
1225 		DB_DNODE_ENTER(dbuf);
1226 		dn = DB_DNODE(dbuf);
1227 		os = dn->dn_objset;
1228 		object = dn->dn_object;
1229 		DB_DNODE_EXIT(dbuf);
1230 
1231 		dbuf_rele(db, FTAG);
1232 		dmu_write(os, object, offset, blksz, buf->b_data, tx);
1233 		dmu_return_arcbuf(buf);
1234 		XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1235 	}
1236 }
1237 
1238 typedef struct {
1239 	dbuf_dirty_record_t	*dsa_dr;
1240 	dmu_sync_cb_t		*dsa_done;
1241 	zgd_t			*dsa_zgd;
1242 	dmu_tx_t		*dsa_tx;
1243 } dmu_sync_arg_t;
1244 
1245 /* ARGSUSED */
1246 static void
1247 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1248 {
1249 	dmu_sync_arg_t *dsa = varg;
1250 	dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1251 	blkptr_t *bp = zio->io_bp;
1252 
1253 	if (zio->io_error == 0) {
1254 		if (BP_IS_HOLE(bp)) {
1255 			/*
1256 			 * A block of zeros may compress to a hole, but the
1257 			 * block size still needs to be known for replay.
1258 			 */
1259 			BP_SET_LSIZE(bp, db->db_size);
1260 		} else {
1261 			ASSERT(BP_GET_LEVEL(bp) == 0);
1262 			bp->blk_fill = 1;
1263 		}
1264 	}
1265 }
1266 
1267 static void
1268 dmu_sync_late_arrival_ready(zio_t *zio)
1269 {
1270 	dmu_sync_ready(zio, NULL, zio->io_private);
1271 }
1272 
1273 /* ARGSUSED */
1274 static void
1275 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1276 {
1277 	dmu_sync_arg_t *dsa = varg;
1278 	dbuf_dirty_record_t *dr = dsa->dsa_dr;
1279 	dmu_buf_impl_t *db = dr->dr_dbuf;
1280 
1281 	mutex_enter(&db->db_mtx);
1282 	ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1283 	if (zio->io_error == 0) {
1284 		dr->dt.dl.dr_overridden_by = *zio->io_bp;
1285 		dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1286 		dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1287 		if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by))
1288 			BP_ZERO(&dr->dt.dl.dr_overridden_by);
1289 	} else {
1290 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1291 	}
1292 	cv_broadcast(&db->db_changed);
1293 	mutex_exit(&db->db_mtx);
1294 
1295 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1296 
1297 	kmem_free(dsa, sizeof (*dsa));
1298 }
1299 
1300 static void
1301 dmu_sync_late_arrival_done(zio_t *zio)
1302 {
1303 	blkptr_t *bp = zio->io_bp;
1304 	dmu_sync_arg_t *dsa = zio->io_private;
1305 
1306 	if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
1307 		ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1308 		ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1309 		zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1310 	}
1311 
1312 	dmu_tx_commit(dsa->dsa_tx);
1313 
1314 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1315 
1316 	kmem_free(dsa, sizeof (*dsa));
1317 }
1318 
1319 static int
1320 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1321     zio_prop_t *zp, zbookmark_t *zb)
1322 {
1323 	dmu_sync_arg_t *dsa;
1324 	dmu_tx_t *tx;
1325 
1326 	tx = dmu_tx_create(os);
1327 	dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1328 	if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1329 		dmu_tx_abort(tx);
1330 		return (EIO);	/* Make zl_get_data do txg_waited_synced() */
1331 	}
1332 
1333 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1334 	dsa->dsa_dr = NULL;
1335 	dsa->dsa_done = done;
1336 	dsa->dsa_zgd = zgd;
1337 	dsa->dsa_tx = tx;
1338 
1339 	zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1340 	    zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp,
1341 	    dmu_sync_late_arrival_ready, dmu_sync_late_arrival_done, dsa,
1342 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1343 
1344 	return (0);
1345 }
1346 
1347 /*
1348  * Intent log support: sync the block associated with db to disk.
1349  * N.B. and XXX: the caller is responsible for making sure that the
1350  * data isn't changing while dmu_sync() is writing it.
1351  *
1352  * Return values:
1353  *
1354  *	EEXIST: this txg has already been synced, so there's nothing to to.
1355  *		The caller should not log the write.
1356  *
1357  *	ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1358  *		The caller should not log the write.
1359  *
1360  *	EALREADY: this block is already in the process of being synced.
1361  *		The caller should track its progress (somehow).
1362  *
1363  *	EIO: could not do the I/O.
1364  *		The caller should do a txg_wait_synced().
1365  *
1366  *	0: the I/O has been initiated.
1367  *		The caller should log this blkptr in the done callback.
1368  *		It is possible that the I/O will fail, in which case
1369  *		the error will be reported to the done callback and
1370  *		propagated to pio from zio_done().
1371  */
1372 int
1373 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1374 {
1375 	blkptr_t *bp = zgd->zgd_bp;
1376 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1377 	objset_t *os = db->db_objset;
1378 	dsl_dataset_t *ds = os->os_dsl_dataset;
1379 	dbuf_dirty_record_t *dr;
1380 	dmu_sync_arg_t *dsa;
1381 	zbookmark_t zb;
1382 	zio_prop_t zp;
1383 	dnode_t *dn;
1384 
1385 	ASSERT(pio != NULL);
1386 	ASSERT(BP_IS_HOLE(bp));
1387 	ASSERT(txg != 0);
1388 
1389 	SET_BOOKMARK(&zb, ds->ds_object,
1390 	    db->db.db_object, db->db_level, db->db_blkid);
1391 
1392 	DB_DNODE_ENTER(db);
1393 	dn = DB_DNODE(db);
1394 	dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1395 	DB_DNODE_EXIT(db);
1396 
1397 	/*
1398 	 * If we're frozen (running ziltest), we always need to generate a bp.
1399 	 */
1400 	if (txg > spa_freeze_txg(os->os_spa))
1401 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1402 
1403 	/*
1404 	 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1405 	 * and us.  If we determine that this txg is not yet syncing,
1406 	 * but it begins to sync a moment later, that's OK because the
1407 	 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1408 	 */
1409 	mutex_enter(&db->db_mtx);
1410 
1411 	if (txg <= spa_last_synced_txg(os->os_spa)) {
1412 		/*
1413 		 * This txg has already synced.  There's nothing to do.
1414 		 */
1415 		mutex_exit(&db->db_mtx);
1416 		return (EEXIST);
1417 	}
1418 
1419 	if (txg <= spa_syncing_txg(os->os_spa)) {
1420 		/*
1421 		 * This txg is currently syncing, so we can't mess with
1422 		 * the dirty record anymore; just write a new log block.
1423 		 */
1424 		mutex_exit(&db->db_mtx);
1425 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1426 	}
1427 
1428 	dr = db->db_last_dirty;
1429 	while (dr && dr->dr_txg != txg)
1430 		dr = dr->dr_next;
1431 
1432 	if (dr == NULL) {
1433 		/*
1434 		 * There's no dr for this dbuf, so it must have been freed.
1435 		 * There's no need to log writes to freed blocks, so we're done.
1436 		 */
1437 		mutex_exit(&db->db_mtx);
1438 		return (ENOENT);
1439 	}
1440 
1441 	ASSERT(dr->dr_txg == txg);
1442 	if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1443 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1444 		/*
1445 		 * We have already issued a sync write for this buffer,
1446 		 * or this buffer has already been synced.  It could not
1447 		 * have been dirtied since, or we would have cleared the state.
1448 		 */
1449 		mutex_exit(&db->db_mtx);
1450 		return (EALREADY);
1451 	}
1452 
1453 	ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1454 	dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1455 	mutex_exit(&db->db_mtx);
1456 
1457 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1458 	dsa->dsa_dr = dr;
1459 	dsa->dsa_done = done;
1460 	dsa->dsa_zgd = zgd;
1461 	dsa->dsa_tx = NULL;
1462 
1463 	zio_nowait(arc_write(pio, os->os_spa, txg,
1464 	    bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), &zp,
1465 	    dmu_sync_ready, dmu_sync_done, dsa,
1466 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
1467 
1468 	return (0);
1469 }
1470 
1471 int
1472 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1473 	dmu_tx_t *tx)
1474 {
1475 	dnode_t *dn;
1476 	int err;
1477 
1478 	err = dnode_hold(os, object, FTAG, &dn);
1479 	if (err)
1480 		return (err);
1481 	err = dnode_set_blksz(dn, size, ibs, tx);
1482 	dnode_rele(dn, FTAG);
1483 	return (err);
1484 }
1485 
1486 void
1487 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1488 	dmu_tx_t *tx)
1489 {
1490 	dnode_t *dn;
1491 
1492 	/* XXX assumes dnode_hold will not get an i/o error */
1493 	(void) dnode_hold(os, object, FTAG, &dn);
1494 	ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS);
1495 	dn->dn_checksum = checksum;
1496 	dnode_setdirty(dn, tx);
1497 	dnode_rele(dn, FTAG);
1498 }
1499 
1500 void
1501 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1502 	dmu_tx_t *tx)
1503 {
1504 	dnode_t *dn;
1505 
1506 	/* XXX assumes dnode_hold will not get an i/o error */
1507 	(void) dnode_hold(os, object, FTAG, &dn);
1508 	ASSERT(compress < ZIO_COMPRESS_FUNCTIONS);
1509 	dn->dn_compress = compress;
1510 	dnode_setdirty(dn, tx);
1511 	dnode_rele(dn, FTAG);
1512 }
1513 
1514 int zfs_mdcomp_disable = 0;
1515 
1516 void
1517 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1518 {
1519 	dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1520 	boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1521 	    (wp & WP_SPILL));
1522 	enum zio_checksum checksum = os->os_checksum;
1523 	enum zio_compress compress = os->os_compress;
1524 	enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1525 	boolean_t dedup;
1526 	boolean_t dedup_verify = os->os_dedup_verify;
1527 	int copies = os->os_copies;
1528 
1529 	/*
1530 	 * Determine checksum setting.
1531 	 */
1532 	if (ismd) {
1533 		/*
1534 		 * Metadata always gets checksummed.  If the data
1535 		 * checksum is multi-bit correctable, and it's not a
1536 		 * ZBT-style checksum, then it's suitable for metadata
1537 		 * as well.  Otherwise, the metadata checksum defaults
1538 		 * to fletcher4.
1539 		 */
1540 		if (zio_checksum_table[checksum].ci_correctable < 1 ||
1541 		    zio_checksum_table[checksum].ci_eck)
1542 			checksum = ZIO_CHECKSUM_FLETCHER_4;
1543 	} else {
1544 		checksum = zio_checksum_select(dn->dn_checksum, checksum);
1545 	}
1546 
1547 	/*
1548 	 * Determine compression setting.
1549 	 */
1550 	if (ismd) {
1551 		/*
1552 		 * XXX -- we should design a compression algorithm
1553 		 * that specializes in arrays of bps.
1554 		 */
1555 		compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY :
1556 		    ZIO_COMPRESS_LZJB;
1557 	} else {
1558 		compress = zio_compress_select(dn->dn_compress, compress);
1559 	}
1560 
1561 	/*
1562 	 * Determine dedup setting.  If we are in dmu_sync(), we won't
1563 	 * actually dedup now because that's all done in syncing context;
1564 	 * but we do want to use the dedup checkum.  If the checksum is not
1565 	 * strong enough to ensure unique signatures, force dedup_verify.
1566 	 */
1567 	dedup = (!ismd && dedup_checksum != ZIO_CHECKSUM_OFF);
1568 	if (dedup) {
1569 		checksum = dedup_checksum;
1570 		if (!zio_checksum_table[checksum].ci_dedup)
1571 			dedup_verify = 1;
1572 	}
1573 
1574 	if (wp & WP_DMU_SYNC)
1575 		dedup = 0;
1576 
1577 	if (wp & WP_NOFILL) {
1578 		ASSERT(!ismd && level == 0);
1579 		checksum = ZIO_CHECKSUM_OFF;
1580 		compress = ZIO_COMPRESS_OFF;
1581 		dedup = B_FALSE;
1582 	}
1583 
1584 	zp->zp_checksum = checksum;
1585 	zp->zp_compress = compress;
1586 	zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
1587 	zp->zp_level = level;
1588 	zp->zp_copies = MIN(copies + ismd, spa_max_replication(os->os_spa));
1589 	zp->zp_dedup = dedup;
1590 	zp->zp_dedup_verify = dedup && dedup_verify;
1591 }
1592 
1593 int
1594 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
1595 {
1596 	dnode_t *dn;
1597 	int i, err;
1598 
1599 	err = dnode_hold(os, object, FTAG, &dn);
1600 	if (err)
1601 		return (err);
1602 	/*
1603 	 * Sync any current changes before
1604 	 * we go trundling through the block pointers.
1605 	 */
1606 	for (i = 0; i < TXG_SIZE; i++) {
1607 		if (list_link_active(&dn->dn_dirty_link[i]))
1608 			break;
1609 	}
1610 	if (i != TXG_SIZE) {
1611 		dnode_rele(dn, FTAG);
1612 		txg_wait_synced(dmu_objset_pool(os), 0);
1613 		err = dnode_hold(os, object, FTAG, &dn);
1614 		if (err)
1615 			return (err);
1616 	}
1617 
1618 	err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
1619 	dnode_rele(dn, FTAG);
1620 
1621 	return (err);
1622 }
1623 
1624 void
1625 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
1626 {
1627 	dnode_phys_t *dnp;
1628 
1629 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1630 	mutex_enter(&dn->dn_mtx);
1631 
1632 	dnp = dn->dn_phys;
1633 
1634 	doi->doi_data_block_size = dn->dn_datablksz;
1635 	doi->doi_metadata_block_size = dn->dn_indblkshift ?
1636 	    1ULL << dn->dn_indblkshift : 0;
1637 	doi->doi_type = dn->dn_type;
1638 	doi->doi_bonus_type = dn->dn_bonustype;
1639 	doi->doi_bonus_size = dn->dn_bonuslen;
1640 	doi->doi_indirection = dn->dn_nlevels;
1641 	doi->doi_checksum = dn->dn_checksum;
1642 	doi->doi_compress = dn->dn_compress;
1643 	doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
1644 	doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz;
1645 	doi->doi_fill_count = 0;
1646 	for (int i = 0; i < dnp->dn_nblkptr; i++)
1647 		doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill;
1648 
1649 	mutex_exit(&dn->dn_mtx);
1650 	rw_exit(&dn->dn_struct_rwlock);
1651 }
1652 
1653 /*
1654  * Get information on a DMU object.
1655  * If doi is NULL, just indicates whether the object exists.
1656  */
1657 int
1658 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
1659 {
1660 	dnode_t *dn;
1661 	int err = dnode_hold(os, object, FTAG, &dn);
1662 
1663 	if (err)
1664 		return (err);
1665 
1666 	if (doi != NULL)
1667 		dmu_object_info_from_dnode(dn, doi);
1668 
1669 	dnode_rele(dn, FTAG);
1670 	return (0);
1671 }
1672 
1673 /*
1674  * As above, but faster; can be used when you have a held dbuf in hand.
1675  */
1676 void
1677 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
1678 {
1679 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1680 
1681 	DB_DNODE_ENTER(db);
1682 	dmu_object_info_from_dnode(DB_DNODE(db), doi);
1683 	DB_DNODE_EXIT(db);
1684 }
1685 
1686 /*
1687  * Faster still when you only care about the size.
1688  * This is specifically optimized for zfs_getattr().
1689  */
1690 void
1691 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
1692     u_longlong_t *nblk512)
1693 {
1694 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1695 	dnode_t *dn;
1696 
1697 	DB_DNODE_ENTER(db);
1698 	dn = DB_DNODE(db);
1699 
1700 	*blksize = dn->dn_datablksz;
1701 	/* add 1 for dnode space */
1702 	*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
1703 	    SPA_MINBLOCKSHIFT) + 1;
1704 	DB_DNODE_EXIT(db);
1705 }
1706 
1707 void
1708 byteswap_uint64_array(void *vbuf, size_t size)
1709 {
1710 	uint64_t *buf = vbuf;
1711 	size_t count = size >> 3;
1712 	int i;
1713 
1714 	ASSERT((size & 7) == 0);
1715 
1716 	for (i = 0; i < count; i++)
1717 		buf[i] = BSWAP_64(buf[i]);
1718 }
1719 
1720 void
1721 byteswap_uint32_array(void *vbuf, size_t size)
1722 {
1723 	uint32_t *buf = vbuf;
1724 	size_t count = size >> 2;
1725 	int i;
1726 
1727 	ASSERT((size & 3) == 0);
1728 
1729 	for (i = 0; i < count; i++)
1730 		buf[i] = BSWAP_32(buf[i]);
1731 }
1732 
1733 void
1734 byteswap_uint16_array(void *vbuf, size_t size)
1735 {
1736 	uint16_t *buf = vbuf;
1737 	size_t count = size >> 1;
1738 	int i;
1739 
1740 	ASSERT((size & 1) == 0);
1741 
1742 	for (i = 0; i < count; i++)
1743 		buf[i] = BSWAP_16(buf[i]);
1744 }
1745 
1746 /* ARGSUSED */
1747 void
1748 byteswap_uint8_array(void *vbuf, size_t size)
1749 {
1750 }
1751 
1752 void
1753 dmu_init(void)
1754 {
1755 	zfs_dbgmsg_init();
1756 	sa_cache_init();
1757 	xuio_stat_init();
1758 	dmu_objset_init();
1759 	dnode_init();
1760 	dbuf_init();
1761 	zfetch_init();
1762 	l2arc_init();
1763 	arc_init();
1764 }
1765 
1766 void
1767 dmu_fini(void)
1768 {
1769 	arc_fini();
1770 	l2arc_fini();
1771 	zfetch_fini();
1772 	dbuf_fini();
1773 	dnode_fini();
1774 	dmu_objset_fini();
1775 	xuio_stat_fini();
1776 	sa_cache_fini();
1777 	zfs_dbgmsg_fini();
1778 }
1779