xref: /titanic_51/usr/src/uts/common/fs/zfs/dmu_send.c (revision 70163ac57e58ace1c5c94dfbe85dca5a974eff36)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26  * Copyright 2014 HybridCluster. All rights reserved.
27  */
28 
29 #include <sys/dmu.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dbuf.h>
33 #include <sys/dnode.h>
34 #include <sys/zfs_context.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/zap.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/zfs_znode.h>
46 #include <zfs_fletcher.h>
47 #include <sys/avl.h>
48 #include <sys/ddt.h>
49 #include <sys/zfs_onexit.h>
50 #include <sys/dmu_send.h>
51 #include <sys/dsl_destroy.h>
52 #include <sys/blkptr.h>
53 #include <sys/dsl_bookmark.h>
54 #include <sys/zfeature.h>
55 
56 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
57 int zfs_send_corrupt_data = B_FALSE;
58 
59 static char *dmu_recv_tag = "dmu_recv_tag";
60 static const char *recv_clone_name = "%recv";
61 
62 static int
63 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
64 {
65 	dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
66 	ssize_t resid; /* have to get resid to get detailed errno */
67 	ASSERT0(len % 8);
68 
69 	fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
70 	dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
71 	    (caddr_t)buf, len,
72 	    0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
73 
74 	mutex_enter(&ds->ds_sendstream_lock);
75 	*dsp->dsa_off += len;
76 	mutex_exit(&ds->ds_sendstream_lock);
77 
78 	return (dsp->dsa_err);
79 }
80 
81 static int
82 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
83     uint64_t length)
84 {
85 	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
86 
87 	/*
88 	 * When we receive a free record, dbuf_free_range() assumes
89 	 * that the receiving system doesn't have any dbufs in the range
90 	 * being freed.  This is always true because there is a one-record
91 	 * constraint: we only send one WRITE record for any given
92 	 * object+offset.  We know that the one-record constraint is
93 	 * true because we always send data in increasing order by
94 	 * object,offset.
95 	 *
96 	 * If the increasing-order constraint ever changes, we should find
97 	 * another way to assert that the one-record constraint is still
98 	 * satisfied.
99 	 */
100 	ASSERT(object > dsp->dsa_last_data_object ||
101 	    (object == dsp->dsa_last_data_object &&
102 	    offset > dsp->dsa_last_data_offset));
103 
104 	/*
105 	 * If we are doing a non-incremental send, then there can't
106 	 * be any data in the dataset we're receiving into.  Therefore
107 	 * a free record would simply be a no-op.  Save space by not
108 	 * sending it to begin with.
109 	 */
110 	if (!dsp->dsa_incremental)
111 		return (0);
112 
113 	if (length != -1ULL && offset + length < offset)
114 		length = -1ULL;
115 
116 	/*
117 	 * If there is a pending op, but it's not PENDING_FREE, push it out,
118 	 * since free block aggregation can only be done for blocks of the
119 	 * same type (i.e., DRR_FREE records can only be aggregated with
120 	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
121 	 * aggregated with other DRR_FREEOBJECTS records.
122 	 */
123 	if (dsp->dsa_pending_op != PENDING_NONE &&
124 	    dsp->dsa_pending_op != PENDING_FREE) {
125 		if (dump_bytes(dsp, dsp->dsa_drr,
126 		    sizeof (dmu_replay_record_t)) != 0)
127 			return (SET_ERROR(EINTR));
128 		dsp->dsa_pending_op = PENDING_NONE;
129 	}
130 
131 	if (dsp->dsa_pending_op == PENDING_FREE) {
132 		/*
133 		 * There should never be a PENDING_FREE if length is -1
134 		 * (because dump_dnode is the only place where this
135 		 * function is called with a -1, and only after flushing
136 		 * any pending record).
137 		 */
138 		ASSERT(length != -1ULL);
139 		/*
140 		 * Check to see whether this free block can be aggregated
141 		 * with pending one.
142 		 */
143 		if (drrf->drr_object == object && drrf->drr_offset +
144 		    drrf->drr_length == offset) {
145 			drrf->drr_length += length;
146 			return (0);
147 		} else {
148 			/* not a continuation.  Push out pending record */
149 			if (dump_bytes(dsp, dsp->dsa_drr,
150 			    sizeof (dmu_replay_record_t)) != 0)
151 				return (SET_ERROR(EINTR));
152 			dsp->dsa_pending_op = PENDING_NONE;
153 		}
154 	}
155 	/* create a FREE record and make it pending */
156 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
157 	dsp->dsa_drr->drr_type = DRR_FREE;
158 	drrf->drr_object = object;
159 	drrf->drr_offset = offset;
160 	drrf->drr_length = length;
161 	drrf->drr_toguid = dsp->dsa_toguid;
162 	if (length == -1ULL) {
163 		if (dump_bytes(dsp, dsp->dsa_drr,
164 		    sizeof (dmu_replay_record_t)) != 0)
165 			return (SET_ERROR(EINTR));
166 	} else {
167 		dsp->dsa_pending_op = PENDING_FREE;
168 	}
169 
170 	return (0);
171 }
172 
173 static int
174 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
175     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
176 {
177 	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
178 
179 	/*
180 	 * We send data in increasing object, offset order.
181 	 * See comment in dump_free() for details.
182 	 */
183 	ASSERT(object > dsp->dsa_last_data_object ||
184 	    (object == dsp->dsa_last_data_object &&
185 	    offset > dsp->dsa_last_data_offset));
186 	dsp->dsa_last_data_object = object;
187 	dsp->dsa_last_data_offset = offset + blksz - 1;
188 
189 	/*
190 	 * If there is any kind of pending aggregation (currently either
191 	 * a grouping of free objects or free blocks), push it out to
192 	 * the stream, since aggregation can't be done across operations
193 	 * of different types.
194 	 */
195 	if (dsp->dsa_pending_op != PENDING_NONE) {
196 		if (dump_bytes(dsp, dsp->dsa_drr,
197 		    sizeof (dmu_replay_record_t)) != 0)
198 			return (SET_ERROR(EINTR));
199 		dsp->dsa_pending_op = PENDING_NONE;
200 	}
201 	/* write a DATA record */
202 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
203 	dsp->dsa_drr->drr_type = DRR_WRITE;
204 	drrw->drr_object = object;
205 	drrw->drr_type = type;
206 	drrw->drr_offset = offset;
207 	drrw->drr_length = blksz;
208 	drrw->drr_toguid = dsp->dsa_toguid;
209 	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
210 		/*
211 		 * There's no pre-computed checksum for partial-block
212 		 * writes or embedded BP's, so (like
213 		 * fletcher4-checkummed blocks) userland will have to
214 		 * compute a dedup-capable checksum itself.
215 		 */
216 		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
217 	} else {
218 		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
219 		if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
220 			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
221 		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
222 		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
223 		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
224 		drrw->drr_key.ddk_cksum = bp->blk_cksum;
225 	}
226 
227 	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
228 		return (SET_ERROR(EINTR));
229 	if (dump_bytes(dsp, data, blksz) != 0)
230 		return (SET_ERROR(EINTR));
231 	return (0);
232 }
233 
234 static int
235 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
236     int blksz, const blkptr_t *bp)
237 {
238 	char buf[BPE_PAYLOAD_SIZE];
239 	struct drr_write_embedded *drrw =
240 	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
241 
242 	if (dsp->dsa_pending_op != PENDING_NONE) {
243 		if (dump_bytes(dsp, dsp->dsa_drr,
244 		    sizeof (dmu_replay_record_t)) != 0)
245 			return (EINTR);
246 		dsp->dsa_pending_op = PENDING_NONE;
247 	}
248 
249 	ASSERT(BP_IS_EMBEDDED(bp));
250 
251 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
252 	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
253 	drrw->drr_object = object;
254 	drrw->drr_offset = offset;
255 	drrw->drr_length = blksz;
256 	drrw->drr_toguid = dsp->dsa_toguid;
257 	drrw->drr_compression = BP_GET_COMPRESS(bp);
258 	drrw->drr_etype = BPE_GET_ETYPE(bp);
259 	drrw->drr_lsize = BPE_GET_LSIZE(bp);
260 	drrw->drr_psize = BPE_GET_PSIZE(bp);
261 
262 	decode_embedded_bp_compressed(bp, buf);
263 
264 	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
265 		return (EINTR);
266 	if (dump_bytes(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
267 		return (EINTR);
268 	return (0);
269 }
270 
271 static int
272 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
273 {
274 	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
275 
276 	if (dsp->dsa_pending_op != PENDING_NONE) {
277 		if (dump_bytes(dsp, dsp->dsa_drr,
278 		    sizeof (dmu_replay_record_t)) != 0)
279 			return (SET_ERROR(EINTR));
280 		dsp->dsa_pending_op = PENDING_NONE;
281 	}
282 
283 	/* write a SPILL record */
284 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
285 	dsp->dsa_drr->drr_type = DRR_SPILL;
286 	drrs->drr_object = object;
287 	drrs->drr_length = blksz;
288 	drrs->drr_toguid = dsp->dsa_toguid;
289 
290 	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
291 		return (SET_ERROR(EINTR));
292 	if (dump_bytes(dsp, data, blksz))
293 		return (SET_ERROR(EINTR));
294 	return (0);
295 }
296 
297 static int
298 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
299 {
300 	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
301 
302 	/* See comment in dump_free(). */
303 	if (!dsp->dsa_incremental)
304 		return (0);
305 
306 	/*
307 	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
308 	 * push it out, since free block aggregation can only be done for
309 	 * blocks of the same type (i.e., DRR_FREE records can only be
310 	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
311 	 * can only be aggregated with other DRR_FREEOBJECTS records.
312 	 */
313 	if (dsp->dsa_pending_op != PENDING_NONE &&
314 	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
315 		if (dump_bytes(dsp, dsp->dsa_drr,
316 		    sizeof (dmu_replay_record_t)) != 0)
317 			return (SET_ERROR(EINTR));
318 		dsp->dsa_pending_op = PENDING_NONE;
319 	}
320 	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
321 		/*
322 		 * See whether this free object array can be aggregated
323 		 * with pending one
324 		 */
325 		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
326 			drrfo->drr_numobjs += numobjs;
327 			return (0);
328 		} else {
329 			/* can't be aggregated.  Push out pending record */
330 			if (dump_bytes(dsp, dsp->dsa_drr,
331 			    sizeof (dmu_replay_record_t)) != 0)
332 				return (SET_ERROR(EINTR));
333 			dsp->dsa_pending_op = PENDING_NONE;
334 		}
335 	}
336 
337 	/* write a FREEOBJECTS record */
338 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
339 	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
340 	drrfo->drr_firstobj = firstobj;
341 	drrfo->drr_numobjs = numobjs;
342 	drrfo->drr_toguid = dsp->dsa_toguid;
343 
344 	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
345 
346 	return (0);
347 }
348 
349 static int
350 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
351 {
352 	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
353 
354 	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
355 		return (dump_freeobjects(dsp, object, 1));
356 
357 	if (dsp->dsa_pending_op != PENDING_NONE) {
358 		if (dump_bytes(dsp, dsp->dsa_drr,
359 		    sizeof (dmu_replay_record_t)) != 0)
360 			return (SET_ERROR(EINTR));
361 		dsp->dsa_pending_op = PENDING_NONE;
362 	}
363 
364 	/* write an OBJECT record */
365 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
366 	dsp->dsa_drr->drr_type = DRR_OBJECT;
367 	drro->drr_object = object;
368 	drro->drr_type = dnp->dn_type;
369 	drro->drr_bonustype = dnp->dn_bonustype;
370 	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
371 	drro->drr_bonuslen = dnp->dn_bonuslen;
372 	drro->drr_checksumtype = dnp->dn_checksum;
373 	drro->drr_compress = dnp->dn_compress;
374 	drro->drr_toguid = dsp->dsa_toguid;
375 
376 	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
377 	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
378 		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
379 
380 	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
381 		return (SET_ERROR(EINTR));
382 
383 	if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
384 		return (SET_ERROR(EINTR));
385 
386 	/* Free anything past the end of the file. */
387 	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
388 	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
389 		return (SET_ERROR(EINTR));
390 	if (dsp->dsa_err != 0)
391 		return (SET_ERROR(EINTR));
392 	return (0);
393 }
394 
395 static boolean_t
396 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
397 {
398 	if (!BP_IS_EMBEDDED(bp))
399 		return (B_FALSE);
400 
401 	/*
402 	 * Compression function must be legacy, or explicitly enabled.
403 	 */
404 	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
405 	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
406 		return (B_FALSE);
407 
408 	/*
409 	 * Embed type must be explicitly enabled.
410 	 */
411 	switch (BPE_GET_ETYPE(bp)) {
412 	case BP_EMBEDDED_TYPE_DATA:
413 		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
414 			return (B_TRUE);
415 		break;
416 	default:
417 		return (B_FALSE);
418 	}
419 	return (B_FALSE);
420 }
421 
422 #define	BP_SPAN(dnp, level) \
423 	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
424 	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
425 
426 /* ARGSUSED */
427 static int
428 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
429     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
430 {
431 	dmu_sendarg_t *dsp = arg;
432 	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
433 	int err = 0;
434 
435 	if (issig(JUSTLOOKING) && issig(FORREAL))
436 		return (SET_ERROR(EINTR));
437 
438 	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
439 	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
440 		return (0);
441 	} else if (zb->zb_level == ZB_ZIL_LEVEL) {
442 		/*
443 		 * If we are sending a non-snapshot (which is allowed on
444 		 * read-only pools), it may have a ZIL, which must be ignored.
445 		 */
446 		return (0);
447 	} else if (BP_IS_HOLE(bp) &&
448 	    zb->zb_object == DMU_META_DNODE_OBJECT) {
449 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
450 		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
451 		err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
452 	} else if (BP_IS_HOLE(bp)) {
453 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
454 		err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
455 	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
456 		return (0);
457 	} else if (type == DMU_OT_DNODE) {
458 		dnode_phys_t *blk;
459 		int i;
460 		int blksz = BP_GET_LSIZE(bp);
461 		arc_flags_t aflags = ARC_FLAG_WAIT;
462 		arc_buf_t *abuf;
463 
464 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
465 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
466 		    &aflags, zb) != 0)
467 			return (SET_ERROR(EIO));
468 
469 		blk = abuf->b_data;
470 		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
471 			uint64_t dnobj = (zb->zb_blkid <<
472 			    (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
473 			err = dump_dnode(dsp, dnobj, blk+i);
474 			if (err != 0)
475 				break;
476 		}
477 		(void) arc_buf_remove_ref(abuf, &abuf);
478 	} else if (type == DMU_OT_SA) {
479 		arc_flags_t aflags = ARC_FLAG_WAIT;
480 		arc_buf_t *abuf;
481 		int blksz = BP_GET_LSIZE(bp);
482 
483 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
484 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
485 		    &aflags, zb) != 0)
486 			return (SET_ERROR(EIO));
487 
488 		err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
489 		(void) arc_buf_remove_ref(abuf, &abuf);
490 	} else if (backup_do_embed(dsp, bp)) {
491 		/* it's an embedded level-0 block of a regular object */
492 		int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
493 		err = dump_write_embedded(dsp, zb->zb_object,
494 		    zb->zb_blkid * blksz, blksz, bp);
495 	} else { /* it's a level-0 block of a regular object */
496 		arc_flags_t aflags = ARC_FLAG_WAIT;
497 		arc_buf_t *abuf;
498 		int blksz = BP_GET_LSIZE(bp);
499 		uint64_t offset;
500 
501 		ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
502 		ASSERT0(zb->zb_level);
503 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
504 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
505 		    &aflags, zb) != 0) {
506 			if (zfs_send_corrupt_data) {
507 				/* Send a block filled with 0x"zfs badd bloc" */
508 				abuf = arc_buf_alloc(spa, blksz, &abuf,
509 				    ARC_BUFC_DATA);
510 				uint64_t *ptr;
511 				for (ptr = abuf->b_data;
512 				    (char *)ptr < (char *)abuf->b_data + blksz;
513 				    ptr++)
514 					*ptr = 0x2f5baddb10c;
515 			} else {
516 				return (SET_ERROR(EIO));
517 			}
518 		}
519 
520 		offset = zb->zb_blkid * blksz;
521 
522 		if (!(dsp->dsa_featureflags &
523 		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
524 		    blksz > SPA_OLD_MAXBLOCKSIZE) {
525 			char *buf = abuf->b_data;
526 			while (blksz > 0 && err == 0) {
527 				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
528 				err = dump_write(dsp, type, zb->zb_object,
529 				    offset, n, NULL, buf);
530 				offset += n;
531 				buf += n;
532 				blksz -= n;
533 			}
534 		} else {
535 			err = dump_write(dsp, type, zb->zb_object,
536 			    offset, blksz, bp, abuf->b_data);
537 		}
538 		(void) arc_buf_remove_ref(abuf, &abuf);
539 	}
540 
541 	ASSERT(err == 0 || err == EINTR);
542 	return (err);
543 }
544 
545 /*
546  * Releases dp using the specified tag.
547  */
548 static int
549 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
550     zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok,
551     boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
552 {
553 	objset_t *os;
554 	dmu_replay_record_t *drr;
555 	dmu_sendarg_t *dsp;
556 	int err;
557 	uint64_t fromtxg = 0;
558 	uint64_t featureflags = 0;
559 
560 	err = dmu_objset_from_ds(ds, &os);
561 	if (err != 0) {
562 		dsl_pool_rele(dp, tag);
563 		return (err);
564 	}
565 
566 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
567 	drr->drr_type = DRR_BEGIN;
568 	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
569 	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
570 	    DMU_SUBSTREAM);
571 
572 #ifdef _KERNEL
573 	if (dmu_objset_type(os) == DMU_OST_ZFS) {
574 		uint64_t version;
575 		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
576 			kmem_free(drr, sizeof (dmu_replay_record_t));
577 			dsl_pool_rele(dp, tag);
578 			return (SET_ERROR(EINVAL));
579 		}
580 		if (version >= ZPL_VERSION_SA) {
581 			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
582 		}
583 	}
584 #endif
585 
586 	if (large_block_ok && ds->ds_large_blocks)
587 		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
588 	if (embedok &&
589 	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
590 		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
591 		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
592 			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
593 	} else {
594 		embedok = B_FALSE;
595 	}
596 
597 	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
598 	    featureflags);
599 
600 	drr->drr_u.drr_begin.drr_creation_time =
601 	    dsl_dataset_phys(ds)->ds_creation_time;
602 	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
603 	if (is_clone)
604 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
605 	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(ds)->ds_guid;
606 	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
607 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
608 
609 	if (fromzb != NULL) {
610 		drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid;
611 		fromtxg = fromzb->zbm_creation_txg;
612 	}
613 	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
614 	if (!ds->ds_is_snapshot) {
615 		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
616 		    sizeof (drr->drr_u.drr_begin.drr_toname));
617 	}
618 
619 	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
620 
621 	dsp->dsa_drr = drr;
622 	dsp->dsa_vp = vp;
623 	dsp->dsa_outfd = outfd;
624 	dsp->dsa_proc = curproc;
625 	dsp->dsa_os = os;
626 	dsp->dsa_off = off;
627 	dsp->dsa_toguid = dsl_dataset_phys(ds)->ds_guid;
628 	ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
629 	dsp->dsa_pending_op = PENDING_NONE;
630 	dsp->dsa_incremental = (fromzb != NULL);
631 	dsp->dsa_featureflags = featureflags;
632 
633 	mutex_enter(&ds->ds_sendstream_lock);
634 	list_insert_head(&ds->ds_sendstreams, dsp);
635 	mutex_exit(&ds->ds_sendstream_lock);
636 
637 	dsl_dataset_long_hold(ds, FTAG);
638 	dsl_pool_rele(dp, tag);
639 
640 	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
641 		err = dsp->dsa_err;
642 		goto out;
643 	}
644 
645 	err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
646 	    backup_cb, dsp);
647 
648 	if (dsp->dsa_pending_op != PENDING_NONE)
649 		if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
650 			err = SET_ERROR(EINTR);
651 
652 	if (err != 0) {
653 		if (err == EINTR && dsp->dsa_err != 0)
654 			err = dsp->dsa_err;
655 		goto out;
656 	}
657 
658 	bzero(drr, sizeof (dmu_replay_record_t));
659 	drr->drr_type = DRR_END;
660 	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
661 	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
662 
663 	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
664 		err = dsp->dsa_err;
665 		goto out;
666 	}
667 
668 out:
669 	mutex_enter(&ds->ds_sendstream_lock);
670 	list_remove(&ds->ds_sendstreams, dsp);
671 	mutex_exit(&ds->ds_sendstream_lock);
672 
673 	kmem_free(drr, sizeof (dmu_replay_record_t));
674 	kmem_free(dsp, sizeof (dmu_sendarg_t));
675 
676 	dsl_dataset_long_rele(ds, FTAG);
677 
678 	return (err);
679 }
680 
681 int
682 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
683     boolean_t embedok, boolean_t large_block_ok,
684     int outfd, vnode_t *vp, offset_t *off)
685 {
686 	dsl_pool_t *dp;
687 	dsl_dataset_t *ds;
688 	dsl_dataset_t *fromds = NULL;
689 	int err;
690 
691 	err = dsl_pool_hold(pool, FTAG, &dp);
692 	if (err != 0)
693 		return (err);
694 
695 	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
696 	if (err != 0) {
697 		dsl_pool_rele(dp, FTAG);
698 		return (err);
699 	}
700 
701 	if (fromsnap != 0) {
702 		zfs_bookmark_phys_t zb;
703 		boolean_t is_clone;
704 
705 		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
706 		if (err != 0) {
707 			dsl_dataset_rele(ds, FTAG);
708 			dsl_pool_rele(dp, FTAG);
709 			return (err);
710 		}
711 		if (!dsl_dataset_is_before(ds, fromds, 0))
712 			err = SET_ERROR(EXDEV);
713 		zb.zbm_creation_time =
714 		    dsl_dataset_phys(fromds)->ds_creation_time;
715 		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
716 		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
717 		is_clone = (fromds->ds_dir != ds->ds_dir);
718 		dsl_dataset_rele(fromds, FTAG);
719 		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
720 		    embedok, large_block_ok, outfd, vp, off);
721 	} else {
722 		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
723 		    embedok, large_block_ok, outfd, vp, off);
724 	}
725 	dsl_dataset_rele(ds, FTAG);
726 	return (err);
727 }
728 
729 int
730 dmu_send(const char *tosnap, const char *fromsnap,
731     boolean_t embedok, boolean_t large_block_ok,
732     int outfd, vnode_t *vp, offset_t *off)
733 {
734 	dsl_pool_t *dp;
735 	dsl_dataset_t *ds;
736 	int err;
737 	boolean_t owned = B_FALSE;
738 
739 	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
740 		return (SET_ERROR(EINVAL));
741 
742 	err = dsl_pool_hold(tosnap, FTAG, &dp);
743 	if (err != 0)
744 		return (err);
745 
746 	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
747 		/*
748 		 * We are sending a filesystem or volume.  Ensure
749 		 * that it doesn't change by owning the dataset.
750 		 */
751 		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
752 		owned = B_TRUE;
753 	} else {
754 		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
755 	}
756 	if (err != 0) {
757 		dsl_pool_rele(dp, FTAG);
758 		return (err);
759 	}
760 
761 	if (fromsnap != NULL) {
762 		zfs_bookmark_phys_t zb;
763 		boolean_t is_clone = B_FALSE;
764 		int fsnamelen = strchr(tosnap, '@') - tosnap;
765 
766 		/*
767 		 * If the fromsnap is in a different filesystem, then
768 		 * mark the send stream as a clone.
769 		 */
770 		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
771 		    (fromsnap[fsnamelen] != '@' &&
772 		    fromsnap[fsnamelen] != '#')) {
773 			is_clone = B_TRUE;
774 		}
775 
776 		if (strchr(fromsnap, '@')) {
777 			dsl_dataset_t *fromds;
778 			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
779 			if (err == 0) {
780 				if (!dsl_dataset_is_before(ds, fromds, 0))
781 					err = SET_ERROR(EXDEV);
782 				zb.zbm_creation_time =
783 				    dsl_dataset_phys(fromds)->ds_creation_time;
784 				zb.zbm_creation_txg =
785 				    dsl_dataset_phys(fromds)->ds_creation_txg;
786 				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
787 				is_clone = (ds->ds_dir != fromds->ds_dir);
788 				dsl_dataset_rele(fromds, FTAG);
789 			}
790 		} else {
791 			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
792 		}
793 		if (err != 0) {
794 			dsl_dataset_rele(ds, FTAG);
795 			dsl_pool_rele(dp, FTAG);
796 			return (err);
797 		}
798 		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
799 		    embedok, large_block_ok, outfd, vp, off);
800 	} else {
801 		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
802 		    embedok, large_block_ok, outfd, vp, off);
803 	}
804 	if (owned)
805 		dsl_dataset_disown(ds, FTAG);
806 	else
807 		dsl_dataset_rele(ds, FTAG);
808 	return (err);
809 }
810 
811 int
812 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
813 {
814 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
815 	int err;
816 	uint64_t size;
817 
818 	ASSERT(dsl_pool_config_held(dp));
819 
820 	/* tosnap must be a snapshot */
821 	if (!ds->ds_is_snapshot)
822 		return (SET_ERROR(EINVAL));
823 
824 	/*
825 	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
826 	 * or the origin's fs.
827 	 */
828 	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
829 		return (SET_ERROR(EXDEV));
830 
831 	/* Get uncompressed size estimate of changed data. */
832 	if (fromds == NULL) {
833 		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
834 	} else {
835 		uint64_t used, comp;
836 		err = dsl_dataset_space_written(fromds, ds,
837 		    &used, &comp, &size);
838 		if (err != 0)
839 			return (err);
840 	}
841 
842 	/*
843 	 * Assume that space (both on-disk and in-stream) is dominated by
844 	 * data.  We will adjust for indirect blocks and the copies property,
845 	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
846 	 */
847 
848 	/*
849 	 * Subtract out approximate space used by indirect blocks.
850 	 * Assume most space is used by data blocks (non-indirect, non-dnode).
851 	 * Assume all blocks are recordsize.  Assume ditto blocks and
852 	 * internal fragmentation counter out compression.
853 	 *
854 	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
855 	 * block, which we observe in practice.
856 	 */
857 	uint64_t recordsize;
858 	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
859 	if (err != 0)
860 		return (err);
861 	size -= size / recordsize * sizeof (blkptr_t);
862 
863 	/* Add in the space for the record associated with each block. */
864 	size += size / recordsize * sizeof (dmu_replay_record_t);
865 
866 	*sizep = size;
867 
868 	return (0);
869 }
870 
871 typedef struct dmu_recv_begin_arg {
872 	const char *drba_origin;
873 	dmu_recv_cookie_t *drba_cookie;
874 	cred_t *drba_cred;
875 	uint64_t drba_snapobj;
876 } dmu_recv_begin_arg_t;
877 
878 static int
879 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
880     uint64_t fromguid)
881 {
882 	uint64_t val;
883 	int error;
884 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
885 
886 	/* temporary clone name must not exist */
887 	error = zap_lookup(dp->dp_meta_objset,
888 	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
889 	    8, 1, &val);
890 	if (error != ENOENT)
891 		return (error == 0 ? EBUSY : error);
892 
893 	/* new snapshot name must not exist */
894 	error = zap_lookup(dp->dp_meta_objset,
895 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
896 	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
897 	if (error != ENOENT)
898 		return (error == 0 ? EEXIST : error);
899 
900 	/*
901 	 * Check snapshot limit before receiving. We'll recheck again at the
902 	 * end, but might as well abort before receiving if we're already over
903 	 * the limit.
904 	 *
905 	 * Note that we do not check the file system limit with
906 	 * dsl_dir_fscount_check because the temporary %clones don't count
907 	 * against that limit.
908 	 */
909 	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
910 	    NULL, drba->drba_cred);
911 	if (error != 0)
912 		return (error);
913 
914 	if (fromguid != 0) {
915 		dsl_dataset_t *snap;
916 		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
917 
918 		/* Find snapshot in this dir that matches fromguid. */
919 		while (obj != 0) {
920 			error = dsl_dataset_hold_obj(dp, obj, FTAG,
921 			    &snap);
922 			if (error != 0)
923 				return (SET_ERROR(ENODEV));
924 			if (snap->ds_dir != ds->ds_dir) {
925 				dsl_dataset_rele(snap, FTAG);
926 				return (SET_ERROR(ENODEV));
927 			}
928 			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
929 				break;
930 			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
931 			dsl_dataset_rele(snap, FTAG);
932 		}
933 		if (obj == 0)
934 			return (SET_ERROR(ENODEV));
935 
936 		if (drba->drba_cookie->drc_force) {
937 			drba->drba_snapobj = obj;
938 		} else {
939 			/*
940 			 * If we are not forcing, there must be no
941 			 * changes since fromsnap.
942 			 */
943 			if (dsl_dataset_modified_since_snap(ds, snap)) {
944 				dsl_dataset_rele(snap, FTAG);
945 				return (SET_ERROR(ETXTBSY));
946 			}
947 			drba->drba_snapobj = ds->ds_prev->ds_object;
948 		}
949 
950 		dsl_dataset_rele(snap, FTAG);
951 	} else {
952 		/* if full, most recent snapshot must be $ORIGIN */
953 		if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= TXG_INITIAL)
954 			return (SET_ERROR(ENODEV));
955 		drba->drba_snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
956 	}
957 
958 	return (0);
959 
960 }
961 
962 static int
963 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
964 {
965 	dmu_recv_begin_arg_t *drba = arg;
966 	dsl_pool_t *dp = dmu_tx_pool(tx);
967 	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
968 	uint64_t fromguid = drrb->drr_fromguid;
969 	int flags = drrb->drr_flags;
970 	int error;
971 	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
972 	dsl_dataset_t *ds;
973 	const char *tofs = drba->drba_cookie->drc_tofs;
974 
975 	/* already checked */
976 	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
977 
978 	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
979 	    DMU_COMPOUNDSTREAM ||
980 	    drrb->drr_type >= DMU_OST_NUMTYPES ||
981 	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
982 		return (SET_ERROR(EINVAL));
983 
984 	/* Verify pool version supports SA if SA_SPILL feature set */
985 	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
986 	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
987 		return (SET_ERROR(ENOTSUP));
988 
989 	/*
990 	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
991 	 * record to a plan WRITE record, so the pool must have the
992 	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
993 	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
994 	 */
995 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
996 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
997 		return (SET_ERROR(ENOTSUP));
998 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
999 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1000 		return (SET_ERROR(ENOTSUP));
1001 
1002 	/*
1003 	 * The receiving code doesn't know how to translate large blocks
1004 	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1005 	 * feature enabled if the stream has LARGE_BLOCKS.
1006 	 */
1007 	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1008 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1009 		return (SET_ERROR(ENOTSUP));
1010 
1011 	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1012 	if (error == 0) {
1013 		/* target fs already exists; recv into temp clone */
1014 
1015 		/* Can't recv a clone into an existing fs */
1016 		if (flags & DRR_FLAG_CLONE) {
1017 			dsl_dataset_rele(ds, FTAG);
1018 			return (SET_ERROR(EINVAL));
1019 		}
1020 
1021 		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1022 		dsl_dataset_rele(ds, FTAG);
1023 	} else if (error == ENOENT) {
1024 		/* target fs does not exist; must be a full backup or clone */
1025 		char buf[MAXNAMELEN];
1026 
1027 		/*
1028 		 * If it's a non-clone incremental, we are missing the
1029 		 * target fs, so fail the recv.
1030 		 */
1031 		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
1032 			return (SET_ERROR(ENOENT));
1033 
1034 		/* Open the parent of tofs */
1035 		ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1036 		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1037 		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1038 		if (error != 0)
1039 			return (error);
1040 
1041 		/*
1042 		 * Check filesystem and snapshot limits before receiving. We'll
1043 		 * recheck snapshot limits again at the end (we create the
1044 		 * filesystems and increment those counts during begin_sync).
1045 		 */
1046 		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1047 		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1048 		if (error != 0) {
1049 			dsl_dataset_rele(ds, FTAG);
1050 			return (error);
1051 		}
1052 
1053 		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1054 		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1055 		if (error != 0) {
1056 			dsl_dataset_rele(ds, FTAG);
1057 			return (error);
1058 		}
1059 
1060 		if (drba->drba_origin != NULL) {
1061 			dsl_dataset_t *origin;
1062 			error = dsl_dataset_hold(dp, drba->drba_origin,
1063 			    FTAG, &origin);
1064 			if (error != 0) {
1065 				dsl_dataset_rele(ds, FTAG);
1066 				return (error);
1067 			}
1068 			if (!origin->ds_is_snapshot) {
1069 				dsl_dataset_rele(origin, FTAG);
1070 				dsl_dataset_rele(ds, FTAG);
1071 				return (SET_ERROR(EINVAL));
1072 			}
1073 			if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1074 				dsl_dataset_rele(origin, FTAG);
1075 				dsl_dataset_rele(ds, FTAG);
1076 				return (SET_ERROR(ENODEV));
1077 			}
1078 			dsl_dataset_rele(origin, FTAG);
1079 		}
1080 		dsl_dataset_rele(ds, FTAG);
1081 		error = 0;
1082 	}
1083 	return (error);
1084 }
1085 
1086 static void
1087 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1088 {
1089 	dmu_recv_begin_arg_t *drba = arg;
1090 	dsl_pool_t *dp = dmu_tx_pool(tx);
1091 	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1092 	const char *tofs = drba->drba_cookie->drc_tofs;
1093 	dsl_dataset_t *ds, *newds;
1094 	uint64_t dsobj;
1095 	int error;
1096 	uint64_t crflags;
1097 
1098 	crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1099 	    DS_FLAG_CI_DATASET : 0;
1100 
1101 	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1102 	if (error == 0) {
1103 		/* create temporary clone */
1104 		dsl_dataset_t *snap = NULL;
1105 		if (drba->drba_snapobj != 0) {
1106 			VERIFY0(dsl_dataset_hold_obj(dp,
1107 			    drba->drba_snapobj, FTAG, &snap));
1108 		}
1109 		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1110 		    snap, crflags, drba->drba_cred, tx);
1111 		dsl_dataset_rele(snap, FTAG);
1112 		dsl_dataset_rele(ds, FTAG);
1113 	} else {
1114 		dsl_dir_t *dd;
1115 		const char *tail;
1116 		dsl_dataset_t *origin = NULL;
1117 
1118 		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1119 
1120 		if (drba->drba_origin != NULL) {
1121 			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1122 			    FTAG, &origin));
1123 		}
1124 
1125 		/* Create new dataset. */
1126 		dsobj = dsl_dataset_create_sync(dd,
1127 		    strrchr(tofs, '/') + 1,
1128 		    origin, crflags, drba->drba_cred, tx);
1129 		if (origin != NULL)
1130 			dsl_dataset_rele(origin, FTAG);
1131 		dsl_dir_rele(dd, FTAG);
1132 		drba->drba_cookie->drc_newfs = B_TRUE;
1133 	}
1134 	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1135 
1136 	if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1137 	    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1138 	    !newds->ds_large_blocks) {
1139 		dsl_dataset_activate_large_blocks_sync_impl(dsobj, tx);
1140 		newds->ds_large_blocks = B_TRUE;
1141 	}
1142 
1143 	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1144 	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1145 
1146 	/*
1147 	 * If we actually created a non-clone, we need to create the
1148 	 * objset in our new dataset.
1149 	 */
1150 	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1151 		(void) dmu_objset_create_impl(dp->dp_spa,
1152 		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1153 	}
1154 
1155 	drba->drba_cookie->drc_ds = newds;
1156 
1157 	spa_history_log_internal_ds(newds, "receive", tx, "");
1158 }
1159 
1160 /*
1161  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1162  * succeeds; otherwise we will leak the holds on the datasets.
1163  */
1164 int
1165 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1166     boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1167 {
1168 	dmu_recv_begin_arg_t drba = { 0 };
1169 	dmu_replay_record_t *drr;
1170 
1171 	bzero(drc, sizeof (dmu_recv_cookie_t));
1172 	drc->drc_drrb = drrb;
1173 	drc->drc_tosnap = tosnap;
1174 	drc->drc_tofs = tofs;
1175 	drc->drc_force = force;
1176 	drc->drc_cred = CRED();
1177 
1178 	if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1179 		drc->drc_byteswap = B_TRUE;
1180 	else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1181 		return (SET_ERROR(EINVAL));
1182 
1183 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1184 	drr->drr_type = DRR_BEGIN;
1185 	drr->drr_u.drr_begin = *drc->drc_drrb;
1186 	if (drc->drc_byteswap) {
1187 		fletcher_4_incremental_byteswap(drr,
1188 		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1189 	} else {
1190 		fletcher_4_incremental_native(drr,
1191 		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1192 	}
1193 	kmem_free(drr, sizeof (dmu_replay_record_t));
1194 
1195 	if (drc->drc_byteswap) {
1196 		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1197 		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1198 		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1199 		drrb->drr_type = BSWAP_32(drrb->drr_type);
1200 		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1201 		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1202 	}
1203 
1204 	drba.drba_origin = origin;
1205 	drba.drba_cookie = drc;
1206 	drba.drba_cred = CRED();
1207 
1208 	return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1209 	    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1210 }
1211 
1212 struct restorearg {
1213 	int err;
1214 	boolean_t byteswap;
1215 	vnode_t *vp;
1216 	char *buf;
1217 	uint64_t voff;
1218 	int bufsize; /* amount of memory allocated for buf */
1219 	zio_cksum_t cksum;
1220 	avl_tree_t *guid_to_ds_map;
1221 };
1222 
1223 typedef struct guid_map_entry {
1224 	uint64_t	guid;
1225 	dsl_dataset_t	*gme_ds;
1226 	avl_node_t	avlnode;
1227 } guid_map_entry_t;
1228 
1229 static int
1230 guid_compare(const void *arg1, const void *arg2)
1231 {
1232 	const guid_map_entry_t *gmep1 = arg1;
1233 	const guid_map_entry_t *gmep2 = arg2;
1234 
1235 	if (gmep1->guid < gmep2->guid)
1236 		return (-1);
1237 	else if (gmep1->guid > gmep2->guid)
1238 		return (1);
1239 	return (0);
1240 }
1241 
1242 static void
1243 free_guid_map_onexit(void *arg)
1244 {
1245 	avl_tree_t *ca = arg;
1246 	void *cookie = NULL;
1247 	guid_map_entry_t *gmep;
1248 
1249 	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1250 		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1251 		dsl_dataset_rele(gmep->gme_ds, gmep);
1252 		kmem_free(gmep, sizeof (guid_map_entry_t));
1253 	}
1254 	avl_destroy(ca);
1255 	kmem_free(ca, sizeof (avl_tree_t));
1256 }
1257 
1258 static void *
1259 restore_read(struct restorearg *ra, int len, char *buf)
1260 {
1261 	int done = 0;
1262 
1263 	if (buf == NULL)
1264 		buf = ra->buf;
1265 
1266 	/* some things will require 8-byte alignment, so everything must */
1267 	ASSERT0(len % 8);
1268 	ASSERT3U(len, <=, ra->bufsize);
1269 
1270 	while (done < len) {
1271 		ssize_t resid;
1272 
1273 		ra->err = vn_rdwr(UIO_READ, ra->vp,
1274 		    buf + done, len - done,
1275 		    ra->voff, UIO_SYSSPACE, FAPPEND,
1276 		    RLIM64_INFINITY, CRED(), &resid);
1277 
1278 		if (resid == len - done)
1279 			ra->err = SET_ERROR(EINVAL);
1280 		ra->voff += len - done - resid;
1281 		done = len - resid;
1282 		if (ra->err != 0)
1283 			return (NULL);
1284 	}
1285 
1286 	ASSERT3U(done, ==, len);
1287 	if (ra->byteswap)
1288 		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1289 	else
1290 		fletcher_4_incremental_native(buf, len, &ra->cksum);
1291 	return (buf);
1292 }
1293 
1294 static void
1295 backup_byteswap(dmu_replay_record_t *drr)
1296 {
1297 #define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1298 #define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1299 	drr->drr_type = BSWAP_32(drr->drr_type);
1300 	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1301 	switch (drr->drr_type) {
1302 	case DRR_BEGIN:
1303 		DO64(drr_begin.drr_magic);
1304 		DO64(drr_begin.drr_versioninfo);
1305 		DO64(drr_begin.drr_creation_time);
1306 		DO32(drr_begin.drr_type);
1307 		DO32(drr_begin.drr_flags);
1308 		DO64(drr_begin.drr_toguid);
1309 		DO64(drr_begin.drr_fromguid);
1310 		break;
1311 	case DRR_OBJECT:
1312 		DO64(drr_object.drr_object);
1313 		DO32(drr_object.drr_type);
1314 		DO32(drr_object.drr_bonustype);
1315 		DO32(drr_object.drr_blksz);
1316 		DO32(drr_object.drr_bonuslen);
1317 		DO64(drr_object.drr_toguid);
1318 		break;
1319 	case DRR_FREEOBJECTS:
1320 		DO64(drr_freeobjects.drr_firstobj);
1321 		DO64(drr_freeobjects.drr_numobjs);
1322 		DO64(drr_freeobjects.drr_toguid);
1323 		break;
1324 	case DRR_WRITE:
1325 		DO64(drr_write.drr_object);
1326 		DO32(drr_write.drr_type);
1327 		DO64(drr_write.drr_offset);
1328 		DO64(drr_write.drr_length);
1329 		DO64(drr_write.drr_toguid);
1330 		DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1331 		DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1332 		DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1333 		DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1334 		DO64(drr_write.drr_key.ddk_prop);
1335 		break;
1336 	case DRR_WRITE_BYREF:
1337 		DO64(drr_write_byref.drr_object);
1338 		DO64(drr_write_byref.drr_offset);
1339 		DO64(drr_write_byref.drr_length);
1340 		DO64(drr_write_byref.drr_toguid);
1341 		DO64(drr_write_byref.drr_refguid);
1342 		DO64(drr_write_byref.drr_refobject);
1343 		DO64(drr_write_byref.drr_refoffset);
1344 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1345 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1346 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1347 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1348 		DO64(drr_write_byref.drr_key.ddk_prop);
1349 		break;
1350 	case DRR_WRITE_EMBEDDED:
1351 		DO64(drr_write_embedded.drr_object);
1352 		DO64(drr_write_embedded.drr_offset);
1353 		DO64(drr_write_embedded.drr_length);
1354 		DO64(drr_write_embedded.drr_toguid);
1355 		DO32(drr_write_embedded.drr_lsize);
1356 		DO32(drr_write_embedded.drr_psize);
1357 		break;
1358 	case DRR_FREE:
1359 		DO64(drr_free.drr_object);
1360 		DO64(drr_free.drr_offset);
1361 		DO64(drr_free.drr_length);
1362 		DO64(drr_free.drr_toguid);
1363 		break;
1364 	case DRR_SPILL:
1365 		DO64(drr_spill.drr_object);
1366 		DO64(drr_spill.drr_length);
1367 		DO64(drr_spill.drr_toguid);
1368 		break;
1369 	case DRR_END:
1370 		DO64(drr_end.drr_checksum.zc_word[0]);
1371 		DO64(drr_end.drr_checksum.zc_word[1]);
1372 		DO64(drr_end.drr_checksum.zc_word[2]);
1373 		DO64(drr_end.drr_checksum.zc_word[3]);
1374 		DO64(drr_end.drr_toguid);
1375 		break;
1376 	}
1377 #undef DO64
1378 #undef DO32
1379 }
1380 
1381 static inline uint8_t
1382 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1383 {
1384 	if (bonus_type == DMU_OT_SA) {
1385 		return (1);
1386 	} else {
1387 		return (1 +
1388 		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1389 	}
1390 }
1391 
1392 static int
1393 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1394 {
1395 	dmu_object_info_t doi;
1396 	dmu_tx_t *tx;
1397 	void *data = NULL;
1398 	uint64_t object;
1399 	int err;
1400 
1401 	if (drro->drr_type == DMU_OT_NONE ||
1402 	    !DMU_OT_IS_VALID(drro->drr_type) ||
1403 	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1404 	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1405 	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1406 	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1407 	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1408 	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(os)) ||
1409 	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1410 		return (SET_ERROR(EINVAL));
1411 	}
1412 
1413 	err = dmu_object_info(os, drro->drr_object, &doi);
1414 
1415 	if (err != 0 && err != ENOENT)
1416 		return (SET_ERROR(EINVAL));
1417 	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1418 
1419 	if (drro->drr_bonuslen) {
1420 		data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8), NULL);
1421 		if (ra->err != 0)
1422 			return (ra->err);
1423 	}
1424 
1425 	/*
1426 	 * If we are losing blkptrs or changing the block size this must
1427 	 * be a new file instance.  We must clear out the previous file
1428 	 * contents before we can change this type of metadata in the dnode.
1429 	 */
1430 	if (err == 0) {
1431 		int nblkptr;
1432 
1433 		nblkptr = deduce_nblkptr(drro->drr_bonustype,
1434 		    drro->drr_bonuslen);
1435 
1436 		if (drro->drr_blksz != doi.doi_data_block_size ||
1437 		    nblkptr < doi.doi_nblkptr) {
1438 			err = dmu_free_long_range(os, drro->drr_object,
1439 			    0, DMU_OBJECT_END);
1440 			if (err != 0)
1441 				return (SET_ERROR(EINVAL));
1442 		}
1443 	}
1444 
1445 	tx = dmu_tx_create(os);
1446 	dmu_tx_hold_bonus(tx, object);
1447 	err = dmu_tx_assign(tx, TXG_WAIT);
1448 	if (err != 0) {
1449 		dmu_tx_abort(tx);
1450 		return (err);
1451 	}
1452 
1453 	if (object == DMU_NEW_OBJECT) {
1454 		/* currently free, want to be allocated */
1455 		err = dmu_object_claim(os, drro->drr_object,
1456 		    drro->drr_type, drro->drr_blksz,
1457 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1458 	} else if (drro->drr_type != doi.doi_type ||
1459 	    drro->drr_blksz != doi.doi_data_block_size ||
1460 	    drro->drr_bonustype != doi.doi_bonus_type ||
1461 	    drro->drr_bonuslen != doi.doi_bonus_size) {
1462 		/* currently allocated, but with different properties */
1463 		err = dmu_object_reclaim(os, drro->drr_object,
1464 		    drro->drr_type, drro->drr_blksz,
1465 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1466 	}
1467 	if (err != 0) {
1468 		dmu_tx_commit(tx);
1469 		return (SET_ERROR(EINVAL));
1470 	}
1471 
1472 	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1473 	    tx);
1474 	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1475 
1476 	if (data != NULL) {
1477 		dmu_buf_t *db;
1478 
1479 		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1480 		dmu_buf_will_dirty(db, tx);
1481 
1482 		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1483 		bcopy(data, db->db_data, drro->drr_bonuslen);
1484 		if (ra->byteswap) {
1485 			dmu_object_byteswap_t byteswap =
1486 			    DMU_OT_BYTESWAP(drro->drr_bonustype);
1487 			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1488 			    drro->drr_bonuslen);
1489 		}
1490 		dmu_buf_rele(db, FTAG);
1491 	}
1492 	dmu_tx_commit(tx);
1493 	return (0);
1494 }
1495 
1496 /* ARGSUSED */
1497 static int
1498 restore_freeobjects(struct restorearg *ra, objset_t *os,
1499     struct drr_freeobjects *drrfo)
1500 {
1501 	uint64_t obj;
1502 
1503 	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1504 		return (SET_ERROR(EINVAL));
1505 
1506 	for (obj = drrfo->drr_firstobj;
1507 	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1508 	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
1509 		int err;
1510 
1511 		if (dmu_object_info(os, obj, NULL) != 0)
1512 			continue;
1513 
1514 		err = dmu_free_long_object(os, obj);
1515 		if (err != 0)
1516 			return (err);
1517 	}
1518 	return (0);
1519 }
1520 
1521 static int
1522 restore_write(struct restorearg *ra, objset_t *os,
1523     struct drr_write *drrw)
1524 {
1525 	dmu_tx_t *tx;
1526 	void *data;
1527 	int err;
1528 
1529 	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1530 	    !DMU_OT_IS_VALID(drrw->drr_type))
1531 		return (SET_ERROR(EINVAL));
1532 
1533 	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1534 		return (SET_ERROR(EINVAL));
1535 
1536 	dmu_buf_t *bonus;
1537 	if (dmu_bonus_hold(os, drrw->drr_object, FTAG, &bonus) != 0)
1538 		return (SET_ERROR(EINVAL));
1539 
1540 	arc_buf_t *abuf = dmu_request_arcbuf(bonus, drrw->drr_length);
1541 
1542 	data = restore_read(ra, drrw->drr_length, abuf->b_data);
1543 	if (data == NULL) {
1544 		dmu_return_arcbuf(abuf);
1545 		dmu_buf_rele(bonus, FTAG);
1546 		return (ra->err);
1547 	}
1548 
1549 	tx = dmu_tx_create(os);
1550 
1551 	dmu_tx_hold_write(tx, drrw->drr_object,
1552 	    drrw->drr_offset, drrw->drr_length);
1553 	err = dmu_tx_assign(tx, TXG_WAIT);
1554 	if (err != 0) {
1555 		dmu_return_arcbuf(abuf);
1556 		dmu_buf_rele(bonus, FTAG);
1557 		dmu_tx_abort(tx);
1558 		return (err);
1559 	}
1560 	if (ra->byteswap) {
1561 		dmu_object_byteswap_t byteswap =
1562 		    DMU_OT_BYTESWAP(drrw->drr_type);
1563 		dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1564 	}
1565 	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1566 	dmu_tx_commit(tx);
1567 	dmu_buf_rele(bonus, FTAG);
1568 	return (0);
1569 }
1570 
1571 /*
1572  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1573  * streams to refer to a copy of the data that is already on the
1574  * system because it came in earlier in the stream.  This function
1575  * finds the earlier copy of the data, and uses that copy instead of
1576  * data from the stream to fulfill this write.
1577  */
1578 static int
1579 restore_write_byref(struct restorearg *ra, objset_t *os,
1580     struct drr_write_byref *drrwbr)
1581 {
1582 	dmu_tx_t *tx;
1583 	int err;
1584 	guid_map_entry_t gmesrch;
1585 	guid_map_entry_t *gmep;
1586 	avl_index_t where;
1587 	objset_t *ref_os = NULL;
1588 	dmu_buf_t *dbp;
1589 
1590 	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1591 		return (SET_ERROR(EINVAL));
1592 
1593 	/*
1594 	 * If the GUID of the referenced dataset is different from the
1595 	 * GUID of the target dataset, find the referenced dataset.
1596 	 */
1597 	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1598 		gmesrch.guid = drrwbr->drr_refguid;
1599 		if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1600 		    &where)) == NULL) {
1601 			return (SET_ERROR(EINVAL));
1602 		}
1603 		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1604 			return (SET_ERROR(EINVAL));
1605 	} else {
1606 		ref_os = os;
1607 	}
1608 
1609 	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1610 	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1611 	if (err != 0)
1612 		return (err);
1613 
1614 	tx = dmu_tx_create(os);
1615 
1616 	dmu_tx_hold_write(tx, drrwbr->drr_object,
1617 	    drrwbr->drr_offset, drrwbr->drr_length);
1618 	err = dmu_tx_assign(tx, TXG_WAIT);
1619 	if (err != 0) {
1620 		dmu_tx_abort(tx);
1621 		return (err);
1622 	}
1623 	dmu_write(os, drrwbr->drr_object,
1624 	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1625 	dmu_buf_rele(dbp, FTAG);
1626 	dmu_tx_commit(tx);
1627 	return (0);
1628 }
1629 
1630 static int
1631 restore_write_embedded(struct restorearg *ra, objset_t *os,
1632     struct drr_write_embedded *drrwnp)
1633 {
1634 	dmu_tx_t *tx;
1635 	int err;
1636 	void *data;
1637 
1638 	if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1639 		return (EINVAL);
1640 
1641 	if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1642 		return (EINVAL);
1643 
1644 	if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1645 		return (EINVAL);
1646 	if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1647 		return (EINVAL);
1648 
1649 	data = restore_read(ra, P2ROUNDUP(drrwnp->drr_psize, 8), NULL);
1650 	if (data == NULL)
1651 		return (ra->err);
1652 
1653 	tx = dmu_tx_create(os);
1654 
1655 	dmu_tx_hold_write(tx, drrwnp->drr_object,
1656 	    drrwnp->drr_offset, drrwnp->drr_length);
1657 	err = dmu_tx_assign(tx, TXG_WAIT);
1658 	if (err != 0) {
1659 		dmu_tx_abort(tx);
1660 		return (err);
1661 	}
1662 
1663 	dmu_write_embedded(os, drrwnp->drr_object,
1664 	    drrwnp->drr_offset, data, drrwnp->drr_etype,
1665 	    drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1666 	    ra->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1667 
1668 	dmu_tx_commit(tx);
1669 	return (0);
1670 }
1671 
1672 static int
1673 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1674 {
1675 	dmu_tx_t *tx;
1676 	void *data;
1677 	dmu_buf_t *db, *db_spill;
1678 	int err;
1679 
1680 	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1681 	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(os)))
1682 		return (SET_ERROR(EINVAL));
1683 
1684 	data = restore_read(ra, drrs->drr_length, NULL);
1685 	if (data == NULL)
1686 		return (ra->err);
1687 
1688 	if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1689 		return (SET_ERROR(EINVAL));
1690 
1691 	VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1692 	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1693 		dmu_buf_rele(db, FTAG);
1694 		return (err);
1695 	}
1696 
1697 	tx = dmu_tx_create(os);
1698 
1699 	dmu_tx_hold_spill(tx, db->db_object);
1700 
1701 	err = dmu_tx_assign(tx, TXG_WAIT);
1702 	if (err != 0) {
1703 		dmu_buf_rele(db, FTAG);
1704 		dmu_buf_rele(db_spill, FTAG);
1705 		dmu_tx_abort(tx);
1706 		return (err);
1707 	}
1708 	dmu_buf_will_dirty(db_spill, tx);
1709 
1710 	if (db_spill->db_size < drrs->drr_length)
1711 		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1712 		    drrs->drr_length, tx));
1713 	bcopy(data, db_spill->db_data, drrs->drr_length);
1714 
1715 	dmu_buf_rele(db, FTAG);
1716 	dmu_buf_rele(db_spill, FTAG);
1717 
1718 	dmu_tx_commit(tx);
1719 	return (0);
1720 }
1721 
1722 /* ARGSUSED */
1723 static int
1724 restore_free(struct restorearg *ra, objset_t *os,
1725     struct drr_free *drrf)
1726 {
1727 	int err;
1728 
1729 	if (drrf->drr_length != -1ULL &&
1730 	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1731 		return (SET_ERROR(EINVAL));
1732 
1733 	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1734 		return (SET_ERROR(EINVAL));
1735 
1736 	err = dmu_free_long_range(os, drrf->drr_object,
1737 	    drrf->drr_offset, drrf->drr_length);
1738 	return (err);
1739 }
1740 
1741 /* used to destroy the drc_ds on error */
1742 static void
1743 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1744 {
1745 	char name[MAXNAMELEN];
1746 	dsl_dataset_name(drc->drc_ds, name);
1747 	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1748 	(void) dsl_destroy_head(name);
1749 }
1750 
1751 /*
1752  * NB: callers *must* call dmu_recv_end() if this succeeds.
1753  */
1754 int
1755 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1756     int cleanup_fd, uint64_t *action_handlep)
1757 {
1758 	struct restorearg ra = { 0 };
1759 	dmu_replay_record_t *drr;
1760 	objset_t *os;
1761 	zio_cksum_t pcksum;
1762 	int featureflags;
1763 
1764 	ra.byteswap = drc->drc_byteswap;
1765 	ra.cksum = drc->drc_cksum;
1766 	ra.vp = vp;
1767 	ra.voff = *voffp;
1768 	ra.bufsize = SPA_MAXBLOCKSIZE;
1769 	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1770 
1771 	/* these were verified in dmu_recv_begin */
1772 	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1773 	    DMU_SUBSTREAM);
1774 	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1775 
1776 	/*
1777 	 * Open the objset we are modifying.
1778 	 */
1779 	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1780 
1781 	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
1782 
1783 	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1784 
1785 	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
1786 	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1787 		minor_t minor;
1788 
1789 		if (cleanup_fd == -1) {
1790 			ra.err = SET_ERROR(EBADF);
1791 			goto out;
1792 		}
1793 		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1794 		if (ra.err != 0) {
1795 			cleanup_fd = -1;
1796 			goto out;
1797 		}
1798 
1799 		if (*action_handlep == 0) {
1800 			ra.guid_to_ds_map =
1801 			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1802 			avl_create(ra.guid_to_ds_map, guid_compare,
1803 			    sizeof (guid_map_entry_t),
1804 			    offsetof(guid_map_entry_t, avlnode));
1805 			ra.err = zfs_onexit_add_cb(minor,
1806 			    free_guid_map_onexit, ra.guid_to_ds_map,
1807 			    action_handlep);
1808 			if (ra.err != 0)
1809 				goto out;
1810 		} else {
1811 			ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1812 			    (void **)&ra.guid_to_ds_map);
1813 			if (ra.err != 0)
1814 				goto out;
1815 		}
1816 
1817 		drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1818 	}
1819 
1820 	/*
1821 	 * Read records and process them.
1822 	 */
1823 	pcksum = ra.cksum;
1824 	while (ra.err == 0 &&
1825 	    NULL != (drr = restore_read(&ra, sizeof (*drr), NULL))) {
1826 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
1827 			ra.err = SET_ERROR(EINTR);
1828 			goto out;
1829 		}
1830 
1831 		if (ra.byteswap)
1832 			backup_byteswap(drr);
1833 
1834 		switch (drr->drr_type) {
1835 		case DRR_OBJECT:
1836 		{
1837 			/*
1838 			 * We need to make a copy of the record header,
1839 			 * because restore_{object,write} may need to
1840 			 * restore_read(), which will invalidate drr.
1841 			 */
1842 			struct drr_object drro = drr->drr_u.drr_object;
1843 			ra.err = restore_object(&ra, os, &drro);
1844 			break;
1845 		}
1846 		case DRR_FREEOBJECTS:
1847 		{
1848 			struct drr_freeobjects drrfo =
1849 			    drr->drr_u.drr_freeobjects;
1850 			ra.err = restore_freeobjects(&ra, os, &drrfo);
1851 			break;
1852 		}
1853 		case DRR_WRITE:
1854 		{
1855 			struct drr_write drrw = drr->drr_u.drr_write;
1856 			ra.err = restore_write(&ra, os, &drrw);
1857 			break;
1858 		}
1859 		case DRR_WRITE_BYREF:
1860 		{
1861 			struct drr_write_byref drrwbr =
1862 			    drr->drr_u.drr_write_byref;
1863 			ra.err = restore_write_byref(&ra, os, &drrwbr);
1864 			break;
1865 		}
1866 		case DRR_WRITE_EMBEDDED:
1867 		{
1868 			struct drr_write_embedded drrwe =
1869 			    drr->drr_u.drr_write_embedded;
1870 			ra.err = restore_write_embedded(&ra, os, &drrwe);
1871 			break;
1872 		}
1873 		case DRR_FREE:
1874 		{
1875 			struct drr_free drrf = drr->drr_u.drr_free;
1876 			ra.err = restore_free(&ra, os, &drrf);
1877 			break;
1878 		}
1879 		case DRR_END:
1880 		{
1881 			struct drr_end drre = drr->drr_u.drr_end;
1882 			/*
1883 			 * We compare against the *previous* checksum
1884 			 * value, because the stored checksum is of
1885 			 * everything before the DRR_END record.
1886 			 */
1887 			if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1888 				ra.err = SET_ERROR(ECKSUM);
1889 			goto out;
1890 		}
1891 		case DRR_SPILL:
1892 		{
1893 			struct drr_spill drrs = drr->drr_u.drr_spill;
1894 			ra.err = restore_spill(&ra, os, &drrs);
1895 			break;
1896 		}
1897 		default:
1898 			ra.err = SET_ERROR(EINVAL);
1899 			goto out;
1900 		}
1901 		pcksum = ra.cksum;
1902 	}
1903 	ASSERT(ra.err != 0);
1904 
1905 out:
1906 	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1907 		zfs_onexit_fd_rele(cleanup_fd);
1908 
1909 	if (ra.err != 0) {
1910 		/*
1911 		 * destroy what we created, so we don't leave it in the
1912 		 * inconsistent restoring state.
1913 		 */
1914 		dmu_recv_cleanup_ds(drc);
1915 	}
1916 
1917 	kmem_free(ra.buf, ra.bufsize);
1918 	*voffp = ra.voff;
1919 	return (ra.err);
1920 }
1921 
1922 static int
1923 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1924 {
1925 	dmu_recv_cookie_t *drc = arg;
1926 	dsl_pool_t *dp = dmu_tx_pool(tx);
1927 	int error;
1928 
1929 	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1930 
1931 	if (!drc->drc_newfs) {
1932 		dsl_dataset_t *origin_head;
1933 
1934 		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1935 		if (error != 0)
1936 			return (error);
1937 		if (drc->drc_force) {
1938 			/*
1939 			 * We will destroy any snapshots in tofs (i.e. before
1940 			 * origin_head) that are after the origin (which is
1941 			 * the snap before drc_ds, because drc_ds can not
1942 			 * have any snaps of its own).
1943 			 */
1944 			uint64_t obj;
1945 
1946 			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
1947 			while (obj !=
1948 			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
1949 				dsl_dataset_t *snap;
1950 				error = dsl_dataset_hold_obj(dp, obj, FTAG,
1951 				    &snap);
1952 				if (error != 0)
1953 					return (error);
1954 				if (snap->ds_dir != origin_head->ds_dir)
1955 					error = SET_ERROR(EINVAL);
1956 				if (error == 0)  {
1957 					error = dsl_destroy_snapshot_check_impl(
1958 					    snap, B_FALSE);
1959 				}
1960 				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1961 				dsl_dataset_rele(snap, FTAG);
1962 				if (error != 0)
1963 					return (error);
1964 			}
1965 		}
1966 		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1967 		    origin_head, drc->drc_force, drc->drc_owner, tx);
1968 		if (error != 0) {
1969 			dsl_dataset_rele(origin_head, FTAG);
1970 			return (error);
1971 		}
1972 		error = dsl_dataset_snapshot_check_impl(origin_head,
1973 		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
1974 		dsl_dataset_rele(origin_head, FTAG);
1975 		if (error != 0)
1976 			return (error);
1977 
1978 		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1979 	} else {
1980 		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1981 		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
1982 	}
1983 	return (error);
1984 }
1985 
1986 static void
1987 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1988 {
1989 	dmu_recv_cookie_t *drc = arg;
1990 	dsl_pool_t *dp = dmu_tx_pool(tx);
1991 
1992 	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1993 	    tx, "snap=%s", drc->drc_tosnap);
1994 
1995 	if (!drc->drc_newfs) {
1996 		dsl_dataset_t *origin_head;
1997 
1998 		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1999 		    &origin_head));
2000 
2001 		if (drc->drc_force) {
2002 			/*
2003 			 * Destroy any snapshots of drc_tofs (origin_head)
2004 			 * after the origin (the snap before drc_ds).
2005 			 */
2006 			uint64_t obj;
2007 
2008 			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2009 			while (obj !=
2010 			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2011 				dsl_dataset_t *snap;
2012 				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2013 				    &snap));
2014 				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2015 				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2016 				dsl_destroy_snapshot_sync_impl(snap,
2017 				    B_FALSE, tx);
2018 				dsl_dataset_rele(snap, FTAG);
2019 			}
2020 		}
2021 		VERIFY3P(drc->drc_ds->ds_prev, ==,
2022 		    origin_head->ds_prev);
2023 
2024 		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2025 		    origin_head, tx);
2026 		dsl_dataset_snapshot_sync_impl(origin_head,
2027 		    drc->drc_tosnap, tx);
2028 
2029 		/* set snapshot's creation time and guid */
2030 		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2031 		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2032 		    drc->drc_drrb->drr_creation_time;
2033 		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2034 		    drc->drc_drrb->drr_toguid;
2035 		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2036 		    ~DS_FLAG_INCONSISTENT;
2037 
2038 		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2039 		dsl_dataset_phys(origin_head)->ds_flags &=
2040 		    ~DS_FLAG_INCONSISTENT;
2041 
2042 		dsl_dataset_rele(origin_head, FTAG);
2043 		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2044 
2045 		if (drc->drc_owner != NULL)
2046 			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2047 	} else {
2048 		dsl_dataset_t *ds = drc->drc_ds;
2049 
2050 		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2051 
2052 		/* set snapshot's creation time and guid */
2053 		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2054 		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2055 		    drc->drc_drrb->drr_creation_time;
2056 		dsl_dataset_phys(ds->ds_prev)->ds_guid =
2057 		    drc->drc_drrb->drr_toguid;
2058 		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2059 		    ~DS_FLAG_INCONSISTENT;
2060 
2061 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2062 		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2063 	}
2064 	drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2065 	/*
2066 	 * Release the hold from dmu_recv_begin.  This must be done before
2067 	 * we return to open context, so that when we free the dataset's dnode,
2068 	 * we can evict its bonus buffer.
2069 	 */
2070 	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2071 	drc->drc_ds = NULL;
2072 }
2073 
2074 static int
2075 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2076 {
2077 	dsl_pool_t *dp;
2078 	dsl_dataset_t *snapds;
2079 	guid_map_entry_t *gmep;
2080 	int err;
2081 
2082 	ASSERT(guid_map != NULL);
2083 
2084 	err = dsl_pool_hold(name, FTAG, &dp);
2085 	if (err != 0)
2086 		return (err);
2087 	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2088 	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2089 	if (err == 0) {
2090 		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2091 		gmep->gme_ds = snapds;
2092 		avl_add(guid_map, gmep);
2093 		dsl_dataset_long_hold(snapds, gmep);
2094 	} else {
2095 		kmem_free(gmep, sizeof (*gmep));
2096 	}
2097 
2098 	dsl_pool_rele(dp, FTAG);
2099 	return (err);
2100 }
2101 
2102 static int dmu_recv_end_modified_blocks = 3;
2103 
2104 static int
2105 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2106 {
2107 	int error;
2108 	char name[MAXNAMELEN];
2109 
2110 #ifdef _KERNEL
2111 	/*
2112 	 * We will be destroying the ds; make sure its origin is unmounted if
2113 	 * necessary.
2114 	 */
2115 	dsl_dataset_name(drc->drc_ds, name);
2116 	zfs_destroy_unmount_origin(name);
2117 #endif
2118 
2119 	error = dsl_sync_task(drc->drc_tofs,
2120 	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2121 	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2122 
2123 	if (error != 0)
2124 		dmu_recv_cleanup_ds(drc);
2125 	return (error);
2126 }
2127 
2128 static int
2129 dmu_recv_new_end(dmu_recv_cookie_t *drc)
2130 {
2131 	int error;
2132 
2133 	error = dsl_sync_task(drc->drc_tofs,
2134 	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2135 	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2136 
2137 	if (error != 0) {
2138 		dmu_recv_cleanup_ds(drc);
2139 	} else if (drc->drc_guid_to_ds_map != NULL) {
2140 		(void) add_ds_to_guidmap(drc->drc_tofs,
2141 		    drc->drc_guid_to_ds_map,
2142 		    drc->drc_newsnapobj);
2143 	}
2144 	return (error);
2145 }
2146 
2147 int
2148 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2149 {
2150 	drc->drc_owner = owner;
2151 
2152 	if (drc->drc_newfs)
2153 		return (dmu_recv_new_end(drc));
2154 	else
2155 		return (dmu_recv_existing_end(drc));
2156 }
2157 
2158 /*
2159  * Return TRUE if this objset is currently being received into.
2160  */
2161 boolean_t
2162 dmu_objset_is_receiving(objset_t *os)
2163 {
2164 	return (os->os_dsl_dataset != NULL &&
2165 	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
2166 }
2167