xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_send.c (revision 32b706a1d3367746e0a3e15c957300631d4013d4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26  * Copyright 2014 HybridCluster. All rights reserved.
27  * Copyright 2016 RackTop Systems.
28  * Copyright (c) 2014 Integros [integros.com]
29  */
30 
31 #include <sys/dmu.h>
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dbuf.h>
35 #include <sys/dnode.h>
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/zap.h>
46 #include <sys/zio_checksum.h>
47 #include <sys/zfs_znode.h>
48 #include <zfs_fletcher.h>
49 #include <sys/avl.h>
50 #include <sys/ddt.h>
51 #include <sys/zfs_onexit.h>
52 #include <sys/dmu_send.h>
53 #include <sys/dsl_destroy.h>
54 #include <sys/blkptr.h>
55 #include <sys/dsl_bookmark.h>
56 #include <sys/zfeature.h>
57 #include <sys/bqueue.h>
58 
59 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
60 int zfs_send_corrupt_data = B_FALSE;
61 int zfs_send_queue_length = 16 * 1024 * 1024;
62 int zfs_recv_queue_length = 16 * 1024 * 1024;
63 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
64 int zfs_send_set_freerecords_bit = B_TRUE;
65 
66 static char *dmu_recv_tag = "dmu_recv_tag";
67 const char *recv_clone_name = "%recv";
68 
69 #define	BP_SPAN(datablkszsec, indblkshift, level) \
70 	(((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
71 	(level) * (indblkshift - SPA_BLKPTRSHIFT)))
72 
73 static void byteswap_record(dmu_replay_record_t *drr);
74 
75 struct send_thread_arg {
76 	bqueue_t	q;
77 	dsl_dataset_t	*ds;		/* Dataset to traverse */
78 	uint64_t	fromtxg;	/* Traverse from this txg */
79 	int		flags;		/* flags to pass to traverse_dataset */
80 	int		error_code;
81 	boolean_t	cancel;
82 	zbookmark_phys_t resume;
83 };
84 
85 struct send_block_record {
86 	boolean_t		eos_marker; /* Marks the end of the stream */
87 	blkptr_t		bp;
88 	zbookmark_phys_t	zb;
89 	uint8_t			indblkshift;
90 	uint16_t		datablkszsec;
91 	bqueue_node_t		ln;
92 };
93 
94 static int
95 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
96 {
97 	dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
98 	ssize_t resid; /* have to get resid to get detailed errno */
99 
100 	/*
101 	 * The code does not rely on this (len being a multiple of 8).  We keep
102 	 * this assertion because of the corresponding assertion in
103 	 * receive_read().  Keeping this assertion ensures that we do not
104 	 * inadvertently break backwards compatibility (causing the assertion
105 	 * in receive_read() to trigger on old software).
106 	 *
107 	 * Removing the assertions could be rolled into a new feature that uses
108 	 * data that isn't 8-byte aligned; if the assertions were removed, a
109 	 * feature flag would have to be added.
110 	 */
111 
112 	ASSERT0(len % 8);
113 
114 	dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
115 	    (caddr_t)buf, len,
116 	    0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
117 
118 	mutex_enter(&ds->ds_sendstream_lock);
119 	*dsp->dsa_off += len;
120 	mutex_exit(&ds->ds_sendstream_lock);
121 
122 	return (dsp->dsa_err);
123 }
124 
125 /*
126  * For all record types except BEGIN, fill in the checksum (overlaid in
127  * drr_u.drr_checksum.drr_checksum).  The checksum verifies everything
128  * up to the start of the checksum itself.
129  */
130 static int
131 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
132 {
133 	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
134 	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
135 	fletcher_4_incremental_native(dsp->dsa_drr,
136 	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
137 	    &dsp->dsa_zc);
138 	if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
139 		dsp->dsa_sent_begin = B_TRUE;
140 	} else {
141 		ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
142 		    drr_checksum.drr_checksum));
143 		dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
144 	}
145 	if (dsp->dsa_drr->drr_type == DRR_END) {
146 		dsp->dsa_sent_end = B_TRUE;
147 	}
148 	fletcher_4_incremental_native(&dsp->dsa_drr->
149 	    drr_u.drr_checksum.drr_checksum,
150 	    sizeof (zio_cksum_t), &dsp->dsa_zc);
151 	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
152 		return (SET_ERROR(EINTR));
153 	if (payload_len != 0) {
154 		fletcher_4_incremental_native(payload, payload_len,
155 		    &dsp->dsa_zc);
156 		if (dump_bytes(dsp, payload, payload_len) != 0)
157 			return (SET_ERROR(EINTR));
158 	}
159 	return (0);
160 }
161 
162 /*
163  * Fill in the drr_free struct, or perform aggregation if the previous record is
164  * also a free record, and the two are adjacent.
165  *
166  * Note that we send free records even for a full send, because we want to be
167  * able to receive a full send as a clone, which requires a list of all the free
168  * and freeobject records that were generated on the source.
169  */
170 static int
171 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
172     uint64_t length)
173 {
174 	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
175 
176 	/*
177 	 * When we receive a free record, dbuf_free_range() assumes
178 	 * that the receiving system doesn't have any dbufs in the range
179 	 * being freed.  This is always true because there is a one-record
180 	 * constraint: we only send one WRITE record for any given
181 	 * object,offset.  We know that the one-record constraint is
182 	 * true because we always send data in increasing order by
183 	 * object,offset.
184 	 *
185 	 * If the increasing-order constraint ever changes, we should find
186 	 * another way to assert that the one-record constraint is still
187 	 * satisfied.
188 	 */
189 	ASSERT(object > dsp->dsa_last_data_object ||
190 	    (object == dsp->dsa_last_data_object &&
191 	    offset > dsp->dsa_last_data_offset));
192 
193 	if (length != -1ULL && offset + length < offset)
194 		length = -1ULL;
195 
196 	/*
197 	 * If there is a pending op, but it's not PENDING_FREE, push it out,
198 	 * since free block aggregation can only be done for blocks of the
199 	 * same type (i.e., DRR_FREE records can only be aggregated with
200 	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
201 	 * aggregated with other DRR_FREEOBJECTS records.
202 	 */
203 	if (dsp->dsa_pending_op != PENDING_NONE &&
204 	    dsp->dsa_pending_op != PENDING_FREE) {
205 		if (dump_record(dsp, NULL, 0) != 0)
206 			return (SET_ERROR(EINTR));
207 		dsp->dsa_pending_op = PENDING_NONE;
208 	}
209 
210 	if (dsp->dsa_pending_op == PENDING_FREE) {
211 		/*
212 		 * There should never be a PENDING_FREE if length is -1
213 		 * (because dump_dnode is the only place where this
214 		 * function is called with a -1, and only after flushing
215 		 * any pending record).
216 		 */
217 		ASSERT(length != -1ULL);
218 		/*
219 		 * Check to see whether this free block can be aggregated
220 		 * with pending one.
221 		 */
222 		if (drrf->drr_object == object && drrf->drr_offset +
223 		    drrf->drr_length == offset) {
224 			drrf->drr_length += length;
225 			return (0);
226 		} else {
227 			/* not a continuation.  Push out pending record */
228 			if (dump_record(dsp, NULL, 0) != 0)
229 				return (SET_ERROR(EINTR));
230 			dsp->dsa_pending_op = PENDING_NONE;
231 		}
232 	}
233 	/* create a FREE record and make it pending */
234 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
235 	dsp->dsa_drr->drr_type = DRR_FREE;
236 	drrf->drr_object = object;
237 	drrf->drr_offset = offset;
238 	drrf->drr_length = length;
239 	drrf->drr_toguid = dsp->dsa_toguid;
240 	if (length == -1ULL) {
241 		if (dump_record(dsp, NULL, 0) != 0)
242 			return (SET_ERROR(EINTR));
243 	} else {
244 		dsp->dsa_pending_op = PENDING_FREE;
245 	}
246 
247 	return (0);
248 }
249 
250 static int
251 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
252     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
253 {
254 	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
255 
256 	/*
257 	 * We send data in increasing object, offset order.
258 	 * See comment in dump_free() for details.
259 	 */
260 	ASSERT(object > dsp->dsa_last_data_object ||
261 	    (object == dsp->dsa_last_data_object &&
262 	    offset > dsp->dsa_last_data_offset));
263 	dsp->dsa_last_data_object = object;
264 	dsp->dsa_last_data_offset = offset + blksz - 1;
265 
266 	/*
267 	 * If there is any kind of pending aggregation (currently either
268 	 * a grouping of free objects or free blocks), push it out to
269 	 * the stream, since aggregation can't be done across operations
270 	 * of different types.
271 	 */
272 	if (dsp->dsa_pending_op != PENDING_NONE) {
273 		if (dump_record(dsp, NULL, 0) != 0)
274 			return (SET_ERROR(EINTR));
275 		dsp->dsa_pending_op = PENDING_NONE;
276 	}
277 	/* write a WRITE record */
278 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
279 	dsp->dsa_drr->drr_type = DRR_WRITE;
280 	drrw->drr_object = object;
281 	drrw->drr_type = type;
282 	drrw->drr_offset = offset;
283 	drrw->drr_length = blksz;
284 	drrw->drr_toguid = dsp->dsa_toguid;
285 	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
286 		/*
287 		 * There's no pre-computed checksum for partial-block
288 		 * writes or embedded BP's, so (like
289 		 * fletcher4-checkummed blocks) userland will have to
290 		 * compute a dedup-capable checksum itself.
291 		 */
292 		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
293 	} else {
294 		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
295 		if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
296 		    ZCHECKSUM_FLAG_DEDUP)
297 			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
298 		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
299 		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
300 		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
301 		drrw->drr_key.ddk_cksum = bp->blk_cksum;
302 	}
303 
304 	if (dump_record(dsp, data, blksz) != 0)
305 		return (SET_ERROR(EINTR));
306 	return (0);
307 }
308 
309 static int
310 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
311     int blksz, const blkptr_t *bp)
312 {
313 	char buf[BPE_PAYLOAD_SIZE];
314 	struct drr_write_embedded *drrw =
315 	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
316 
317 	if (dsp->dsa_pending_op != PENDING_NONE) {
318 		if (dump_record(dsp, NULL, 0) != 0)
319 			return (EINTR);
320 		dsp->dsa_pending_op = PENDING_NONE;
321 	}
322 
323 	ASSERT(BP_IS_EMBEDDED(bp));
324 
325 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
326 	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
327 	drrw->drr_object = object;
328 	drrw->drr_offset = offset;
329 	drrw->drr_length = blksz;
330 	drrw->drr_toguid = dsp->dsa_toguid;
331 	drrw->drr_compression = BP_GET_COMPRESS(bp);
332 	drrw->drr_etype = BPE_GET_ETYPE(bp);
333 	drrw->drr_lsize = BPE_GET_LSIZE(bp);
334 	drrw->drr_psize = BPE_GET_PSIZE(bp);
335 
336 	decode_embedded_bp_compressed(bp, buf);
337 
338 	if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
339 		return (EINTR);
340 	return (0);
341 }
342 
343 static int
344 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
345 {
346 	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
347 
348 	if (dsp->dsa_pending_op != PENDING_NONE) {
349 		if (dump_record(dsp, NULL, 0) != 0)
350 			return (SET_ERROR(EINTR));
351 		dsp->dsa_pending_op = PENDING_NONE;
352 	}
353 
354 	/* write a SPILL record */
355 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
356 	dsp->dsa_drr->drr_type = DRR_SPILL;
357 	drrs->drr_object = object;
358 	drrs->drr_length = blksz;
359 	drrs->drr_toguid = dsp->dsa_toguid;
360 
361 	if (dump_record(dsp, data, blksz) != 0)
362 		return (SET_ERROR(EINTR));
363 	return (0);
364 }
365 
366 static int
367 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
368 {
369 	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
370 
371 	/*
372 	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
373 	 * push it out, since free block aggregation can only be done for
374 	 * blocks of the same type (i.e., DRR_FREE records can only be
375 	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
376 	 * can only be aggregated with other DRR_FREEOBJECTS records.
377 	 */
378 	if (dsp->dsa_pending_op != PENDING_NONE &&
379 	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
380 		if (dump_record(dsp, NULL, 0) != 0)
381 			return (SET_ERROR(EINTR));
382 		dsp->dsa_pending_op = PENDING_NONE;
383 	}
384 	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
385 		/*
386 		 * See whether this free object array can be aggregated
387 		 * with pending one
388 		 */
389 		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
390 			drrfo->drr_numobjs += numobjs;
391 			return (0);
392 		} else {
393 			/* can't be aggregated.  Push out pending record */
394 			if (dump_record(dsp, NULL, 0) != 0)
395 				return (SET_ERROR(EINTR));
396 			dsp->dsa_pending_op = PENDING_NONE;
397 		}
398 	}
399 
400 	/* write a FREEOBJECTS record */
401 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
402 	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
403 	drrfo->drr_firstobj = firstobj;
404 	drrfo->drr_numobjs = numobjs;
405 	drrfo->drr_toguid = dsp->dsa_toguid;
406 
407 	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
408 
409 	return (0);
410 }
411 
412 static int
413 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
414 {
415 	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
416 
417 	if (object < dsp->dsa_resume_object) {
418 		/*
419 		 * Note: when resuming, we will visit all the dnodes in
420 		 * the block of dnodes that we are resuming from.  In
421 		 * this case it's unnecessary to send the dnodes prior to
422 		 * the one we are resuming from.  We should be at most one
423 		 * block's worth of dnodes behind the resume point.
424 		 */
425 		ASSERT3U(dsp->dsa_resume_object - object, <,
426 		    1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
427 		return (0);
428 	}
429 
430 	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
431 		return (dump_freeobjects(dsp, object, 1));
432 
433 	if (dsp->dsa_pending_op != PENDING_NONE) {
434 		if (dump_record(dsp, NULL, 0) != 0)
435 			return (SET_ERROR(EINTR));
436 		dsp->dsa_pending_op = PENDING_NONE;
437 	}
438 
439 	/* write an OBJECT record */
440 	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
441 	dsp->dsa_drr->drr_type = DRR_OBJECT;
442 	drro->drr_object = object;
443 	drro->drr_type = dnp->dn_type;
444 	drro->drr_bonustype = dnp->dn_bonustype;
445 	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
446 	drro->drr_bonuslen = dnp->dn_bonuslen;
447 	drro->drr_checksumtype = dnp->dn_checksum;
448 	drro->drr_compress = dnp->dn_compress;
449 	drro->drr_toguid = dsp->dsa_toguid;
450 
451 	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
452 	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
453 		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
454 
455 	if (dump_record(dsp, DN_BONUS(dnp),
456 	    P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
457 		return (SET_ERROR(EINTR));
458 	}
459 
460 	/* Free anything past the end of the file. */
461 	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
462 	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
463 		return (SET_ERROR(EINTR));
464 	if (dsp->dsa_err != 0)
465 		return (SET_ERROR(EINTR));
466 	return (0);
467 }
468 
469 static boolean_t
470 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
471 {
472 	if (!BP_IS_EMBEDDED(bp))
473 		return (B_FALSE);
474 
475 	/*
476 	 * Compression function must be legacy, or explicitly enabled.
477 	 */
478 	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
479 	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
480 		return (B_FALSE);
481 
482 	/*
483 	 * Embed type must be explicitly enabled.
484 	 */
485 	switch (BPE_GET_ETYPE(bp)) {
486 	case BP_EMBEDDED_TYPE_DATA:
487 		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
488 			return (B_TRUE);
489 		break;
490 	default:
491 		return (B_FALSE);
492 	}
493 	return (B_FALSE);
494 }
495 
496 /*
497  * This is the callback function to traverse_dataset that acts as the worker
498  * thread for dmu_send_impl.
499  */
500 /*ARGSUSED*/
501 static int
502 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
503     const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
504 {
505 	struct send_thread_arg *sta = arg;
506 	struct send_block_record *record;
507 	uint64_t record_size;
508 	int err = 0;
509 
510 	ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
511 	    zb->zb_object >= sta->resume.zb_object);
512 
513 	if (sta->cancel)
514 		return (SET_ERROR(EINTR));
515 
516 	if (bp == NULL) {
517 		ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
518 		return (0);
519 	} else if (zb->zb_level < 0) {
520 		return (0);
521 	}
522 
523 	record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
524 	record->eos_marker = B_FALSE;
525 	record->bp = *bp;
526 	record->zb = *zb;
527 	record->indblkshift = dnp->dn_indblkshift;
528 	record->datablkszsec = dnp->dn_datablkszsec;
529 	record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
530 	bqueue_enqueue(&sta->q, record, record_size);
531 
532 	return (err);
533 }
534 
535 /*
536  * This function kicks off the traverse_dataset.  It also handles setting the
537  * error code of the thread in case something goes wrong, and pushes the End of
538  * Stream record when the traverse_dataset call has finished.  If there is no
539  * dataset to traverse, the thread immediately pushes End of Stream marker.
540  */
541 static void
542 send_traverse_thread(void *arg)
543 {
544 	struct send_thread_arg *st_arg = arg;
545 	int err;
546 	struct send_block_record *data;
547 
548 	if (st_arg->ds != NULL) {
549 		err = traverse_dataset_resume(st_arg->ds,
550 		    st_arg->fromtxg, &st_arg->resume,
551 		    st_arg->flags, send_cb, st_arg);
552 
553 		if (err != EINTR)
554 			st_arg->error_code = err;
555 	}
556 	data = kmem_zalloc(sizeof (*data), KM_SLEEP);
557 	data->eos_marker = B_TRUE;
558 	bqueue_enqueue(&st_arg->q, data, 1);
559 }
560 
561 /*
562  * This function actually handles figuring out what kind of record needs to be
563  * dumped, reading the data (which has hopefully been prefetched), and calling
564  * the appropriate helper function.
565  */
566 static int
567 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
568 {
569 	dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
570 	const blkptr_t *bp = &data->bp;
571 	const zbookmark_phys_t *zb = &data->zb;
572 	uint8_t indblkshift = data->indblkshift;
573 	uint16_t dblkszsec = data->datablkszsec;
574 	spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
575 	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
576 	int err = 0;
577 
578 	ASSERT3U(zb->zb_level, >=, 0);
579 
580 	ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
581 	    zb->zb_object >= dsa->dsa_resume_object);
582 
583 	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
584 	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
585 		return (0);
586 	} else if (BP_IS_HOLE(bp) &&
587 	    zb->zb_object == DMU_META_DNODE_OBJECT) {
588 		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
589 		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
590 		err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
591 	} else if (BP_IS_HOLE(bp)) {
592 		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
593 		uint64_t offset = zb->zb_blkid * span;
594 		err = dump_free(dsa, zb->zb_object, offset, span);
595 	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
596 		return (0);
597 	} else if (type == DMU_OT_DNODE) {
598 		int blksz = BP_GET_LSIZE(bp);
599 		arc_flags_t aflags = ARC_FLAG_WAIT;
600 		arc_buf_t *abuf;
601 
602 		ASSERT0(zb->zb_level);
603 
604 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
605 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
606 		    &aflags, zb) != 0)
607 			return (SET_ERROR(EIO));
608 
609 		dnode_phys_t *blk = abuf->b_data;
610 		uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
611 		for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
612 			err = dump_dnode(dsa, dnobj + i, blk + i);
613 			if (err != 0)
614 				break;
615 		}
616 		arc_buf_destroy(abuf, &abuf);
617 	} else if (type == DMU_OT_SA) {
618 		arc_flags_t aflags = ARC_FLAG_WAIT;
619 		arc_buf_t *abuf;
620 		int blksz = BP_GET_LSIZE(bp);
621 
622 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
623 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
624 		    &aflags, zb) != 0)
625 			return (SET_ERROR(EIO));
626 
627 		err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
628 		arc_buf_destroy(abuf, &abuf);
629 	} else if (backup_do_embed(dsa, bp)) {
630 		/* it's an embedded level-0 block of a regular object */
631 		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
632 		ASSERT0(zb->zb_level);
633 		err = dump_write_embedded(dsa, zb->zb_object,
634 		    zb->zb_blkid * blksz, blksz, bp);
635 	} else {
636 		/* it's a level-0 block of a regular object */
637 		arc_flags_t aflags = ARC_FLAG_WAIT;
638 		arc_buf_t *abuf;
639 		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
640 		uint64_t offset;
641 
642 		ASSERT0(zb->zb_level);
643 		ASSERT(zb->zb_object > dsa->dsa_resume_object ||
644 		    (zb->zb_object == dsa->dsa_resume_object &&
645 		    zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
646 
647 		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
648 		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
649 		    &aflags, zb) != 0) {
650 			if (zfs_send_corrupt_data) {
651 				/* Send a block filled with 0x"zfs badd bloc" */
652 				abuf = arc_alloc_buf(spa, blksz, &abuf,
653 				    ARC_BUFC_DATA);
654 				uint64_t *ptr;
655 				for (ptr = abuf->b_data;
656 				    (char *)ptr < (char *)abuf->b_data + blksz;
657 				    ptr++)
658 					*ptr = 0x2f5baddb10cULL;
659 			} else {
660 				return (SET_ERROR(EIO));
661 			}
662 		}
663 
664 		offset = zb->zb_blkid * blksz;
665 
666 		if (!(dsa->dsa_featureflags &
667 		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
668 		    blksz > SPA_OLD_MAXBLOCKSIZE) {
669 			char *buf = abuf->b_data;
670 			while (blksz > 0 && err == 0) {
671 				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
672 				err = dump_write(dsa, type, zb->zb_object,
673 				    offset, n, NULL, buf);
674 				offset += n;
675 				buf += n;
676 				blksz -= n;
677 			}
678 		} else {
679 			err = dump_write(dsa, type, zb->zb_object,
680 			    offset, blksz, bp, abuf->b_data);
681 		}
682 		arc_buf_destroy(abuf, &abuf);
683 	}
684 
685 	ASSERT(err == 0 || err == EINTR);
686 	return (err);
687 }
688 
689 /*
690  * Pop the new data off the queue, and free the old data.
691  */
692 static struct send_block_record *
693 get_next_record(bqueue_t *bq, struct send_block_record *data)
694 {
695 	struct send_block_record *tmp = bqueue_dequeue(bq);
696 	kmem_free(data, sizeof (*data));
697 	return (tmp);
698 }
699 
700 /*
701  * Actually do the bulk of the work in a zfs send.
702  *
703  * Note: Releases dp using the specified tag.
704  */
705 static int
706 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
707     zfs_bookmark_phys_t *ancestor_zb,
708     boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd,
709     uint64_t resumeobj, uint64_t resumeoff,
710     vnode_t *vp, offset_t *off)
711 {
712 	objset_t *os;
713 	dmu_replay_record_t *drr;
714 	dmu_sendarg_t *dsp;
715 	int err;
716 	uint64_t fromtxg = 0;
717 	uint64_t featureflags = 0;
718 	struct send_thread_arg to_arg = { 0 };
719 
720 	err = dmu_objset_from_ds(to_ds, &os);
721 	if (err != 0) {
722 		dsl_pool_rele(dp, tag);
723 		return (err);
724 	}
725 
726 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
727 	drr->drr_type = DRR_BEGIN;
728 	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
729 	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
730 	    DMU_SUBSTREAM);
731 
732 #ifdef _KERNEL
733 	if (dmu_objset_type(os) == DMU_OST_ZFS) {
734 		uint64_t version;
735 		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
736 			kmem_free(drr, sizeof (dmu_replay_record_t));
737 			dsl_pool_rele(dp, tag);
738 			return (SET_ERROR(EINVAL));
739 		}
740 		if (version >= ZPL_VERSION_SA) {
741 			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
742 		}
743 	}
744 #endif
745 
746 	if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
747 		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
748 	if (embedok &&
749 	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
750 		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
751 		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
752 			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
753 	}
754 
755 	if (resumeobj != 0 || resumeoff != 0) {
756 		featureflags |= DMU_BACKUP_FEATURE_RESUMING;
757 	}
758 
759 	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
760 	    featureflags);
761 
762 	drr->drr_u.drr_begin.drr_creation_time =
763 	    dsl_dataset_phys(to_ds)->ds_creation_time;
764 	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
765 	if (is_clone)
766 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
767 	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
768 	if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
769 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
770 	if (zfs_send_set_freerecords_bit)
771 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
772 
773 	if (ancestor_zb != NULL) {
774 		drr->drr_u.drr_begin.drr_fromguid =
775 		    ancestor_zb->zbm_guid;
776 		fromtxg = ancestor_zb->zbm_creation_txg;
777 	}
778 	dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
779 	if (!to_ds->ds_is_snapshot) {
780 		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
781 		    sizeof (drr->drr_u.drr_begin.drr_toname));
782 	}
783 
784 	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
785 
786 	dsp->dsa_drr = drr;
787 	dsp->dsa_vp = vp;
788 	dsp->dsa_outfd = outfd;
789 	dsp->dsa_proc = curproc;
790 	dsp->dsa_os = os;
791 	dsp->dsa_off = off;
792 	dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
793 	dsp->dsa_pending_op = PENDING_NONE;
794 	dsp->dsa_featureflags = featureflags;
795 	dsp->dsa_resume_object = resumeobj;
796 	dsp->dsa_resume_offset = resumeoff;
797 
798 	mutex_enter(&to_ds->ds_sendstream_lock);
799 	list_insert_head(&to_ds->ds_sendstreams, dsp);
800 	mutex_exit(&to_ds->ds_sendstream_lock);
801 
802 	dsl_dataset_long_hold(to_ds, FTAG);
803 	dsl_pool_rele(dp, tag);
804 
805 	void *payload = NULL;
806 	size_t payload_len = 0;
807 	if (resumeobj != 0 || resumeoff != 0) {
808 		dmu_object_info_t to_doi;
809 		err = dmu_object_info(os, resumeobj, &to_doi);
810 		if (err != 0)
811 			goto out;
812 		SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
813 		    resumeoff / to_doi.doi_data_block_size);
814 
815 		nvlist_t *nvl = fnvlist_alloc();
816 		fnvlist_add_uint64(nvl, "resume_object", resumeobj);
817 		fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
818 		payload = fnvlist_pack(nvl, &payload_len);
819 		drr->drr_payloadlen = payload_len;
820 		fnvlist_free(nvl);
821 	}
822 
823 	err = dump_record(dsp, payload, payload_len);
824 	fnvlist_pack_free(payload, payload_len);
825 	if (err != 0) {
826 		err = dsp->dsa_err;
827 		goto out;
828 	}
829 
830 	err = bqueue_init(&to_arg.q, zfs_send_queue_length,
831 	    offsetof(struct send_block_record, ln));
832 	to_arg.error_code = 0;
833 	to_arg.cancel = B_FALSE;
834 	to_arg.ds = to_ds;
835 	to_arg.fromtxg = fromtxg;
836 	to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
837 	(void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
838 	    TS_RUN, minclsyspri);
839 
840 	struct send_block_record *to_data;
841 	to_data = bqueue_dequeue(&to_arg.q);
842 
843 	while (!to_data->eos_marker && err == 0) {
844 		err = do_dump(dsp, to_data);
845 		to_data = get_next_record(&to_arg.q, to_data);
846 		if (issig(JUSTLOOKING) && issig(FORREAL))
847 			err = EINTR;
848 	}
849 
850 	if (err != 0) {
851 		to_arg.cancel = B_TRUE;
852 		while (!to_data->eos_marker) {
853 			to_data = get_next_record(&to_arg.q, to_data);
854 		}
855 	}
856 	kmem_free(to_data, sizeof (*to_data));
857 
858 	bqueue_destroy(&to_arg.q);
859 
860 	if (err == 0 && to_arg.error_code != 0)
861 		err = to_arg.error_code;
862 
863 	if (err != 0)
864 		goto out;
865 
866 	if (dsp->dsa_pending_op != PENDING_NONE)
867 		if (dump_record(dsp, NULL, 0) != 0)
868 			err = SET_ERROR(EINTR);
869 
870 	if (err != 0) {
871 		if (err == EINTR && dsp->dsa_err != 0)
872 			err = dsp->dsa_err;
873 		goto out;
874 	}
875 
876 	bzero(drr, sizeof (dmu_replay_record_t));
877 	drr->drr_type = DRR_END;
878 	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
879 	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
880 
881 	if (dump_record(dsp, NULL, 0) != 0)
882 		err = dsp->dsa_err;
883 
884 out:
885 	mutex_enter(&to_ds->ds_sendstream_lock);
886 	list_remove(&to_ds->ds_sendstreams, dsp);
887 	mutex_exit(&to_ds->ds_sendstream_lock);
888 
889 	VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
890 
891 	kmem_free(drr, sizeof (dmu_replay_record_t));
892 	kmem_free(dsp, sizeof (dmu_sendarg_t));
893 
894 	dsl_dataset_long_rele(to_ds, FTAG);
895 
896 	return (err);
897 }
898 
899 int
900 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
901     boolean_t embedok, boolean_t large_block_ok,
902     int outfd, vnode_t *vp, offset_t *off)
903 {
904 	dsl_pool_t *dp;
905 	dsl_dataset_t *ds;
906 	dsl_dataset_t *fromds = NULL;
907 	int err;
908 
909 	err = dsl_pool_hold(pool, FTAG, &dp);
910 	if (err != 0)
911 		return (err);
912 
913 	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
914 	if (err != 0) {
915 		dsl_pool_rele(dp, FTAG);
916 		return (err);
917 	}
918 
919 	if (fromsnap != 0) {
920 		zfs_bookmark_phys_t zb;
921 		boolean_t is_clone;
922 
923 		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
924 		if (err != 0) {
925 			dsl_dataset_rele(ds, FTAG);
926 			dsl_pool_rele(dp, FTAG);
927 			return (err);
928 		}
929 		if (!dsl_dataset_is_before(ds, fromds, 0))
930 			err = SET_ERROR(EXDEV);
931 		zb.zbm_creation_time =
932 		    dsl_dataset_phys(fromds)->ds_creation_time;
933 		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
934 		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
935 		is_clone = (fromds->ds_dir != ds->ds_dir);
936 		dsl_dataset_rele(fromds, FTAG);
937 		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
938 		    embedok, large_block_ok, outfd, 0, 0, vp, off);
939 	} else {
940 		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
941 		    embedok, large_block_ok, outfd, 0, 0, vp, off);
942 	}
943 	dsl_dataset_rele(ds, FTAG);
944 	return (err);
945 }
946 
947 int
948 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
949     boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
950     vnode_t *vp, offset_t *off)
951 {
952 	dsl_pool_t *dp;
953 	dsl_dataset_t *ds;
954 	int err;
955 	boolean_t owned = B_FALSE;
956 
957 	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
958 		return (SET_ERROR(EINVAL));
959 
960 	err = dsl_pool_hold(tosnap, FTAG, &dp);
961 	if (err != 0)
962 		return (err);
963 
964 	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
965 		/*
966 		 * We are sending a filesystem or volume.  Ensure
967 		 * that it doesn't change by owning the dataset.
968 		 */
969 		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
970 		owned = B_TRUE;
971 	} else {
972 		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
973 	}
974 	if (err != 0) {
975 		dsl_pool_rele(dp, FTAG);
976 		return (err);
977 	}
978 
979 	if (fromsnap != NULL) {
980 		zfs_bookmark_phys_t zb;
981 		boolean_t is_clone = B_FALSE;
982 		int fsnamelen = strchr(tosnap, '@') - tosnap;
983 
984 		/*
985 		 * If the fromsnap is in a different filesystem, then
986 		 * mark the send stream as a clone.
987 		 */
988 		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
989 		    (fromsnap[fsnamelen] != '@' &&
990 		    fromsnap[fsnamelen] != '#')) {
991 			is_clone = B_TRUE;
992 		}
993 
994 		if (strchr(fromsnap, '@')) {
995 			dsl_dataset_t *fromds;
996 			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
997 			if (err == 0) {
998 				if (!dsl_dataset_is_before(ds, fromds, 0))
999 					err = SET_ERROR(EXDEV);
1000 				zb.zbm_creation_time =
1001 				    dsl_dataset_phys(fromds)->ds_creation_time;
1002 				zb.zbm_creation_txg =
1003 				    dsl_dataset_phys(fromds)->ds_creation_txg;
1004 				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1005 				is_clone = (ds->ds_dir != fromds->ds_dir);
1006 				dsl_dataset_rele(fromds, FTAG);
1007 			}
1008 		} else {
1009 			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1010 		}
1011 		if (err != 0) {
1012 			dsl_dataset_rele(ds, FTAG);
1013 			dsl_pool_rele(dp, FTAG);
1014 			return (err);
1015 		}
1016 		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1017 		    embedok, large_block_ok,
1018 		    outfd, resumeobj, resumeoff, vp, off);
1019 	} else {
1020 		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1021 		    embedok, large_block_ok,
1022 		    outfd, resumeobj, resumeoff, vp, off);
1023 	}
1024 	if (owned)
1025 		dsl_dataset_disown(ds, FTAG);
1026 	else
1027 		dsl_dataset_rele(ds, FTAG);
1028 	return (err);
1029 }
1030 
1031 static int
1032 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
1033     uint64_t *sizep)
1034 {
1035 	int err;
1036 	/*
1037 	 * Assume that space (both on-disk and in-stream) is dominated by
1038 	 * data.  We will adjust for indirect blocks and the copies property,
1039 	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1040 	 */
1041 
1042 	/*
1043 	 * Subtract out approximate space used by indirect blocks.
1044 	 * Assume most space is used by data blocks (non-indirect, non-dnode).
1045 	 * Assume all blocks are recordsize.  Assume ditto blocks and
1046 	 * internal fragmentation counter out compression.
1047 	 *
1048 	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1049 	 * block, which we observe in practice.
1050 	 */
1051 	uint64_t recordsize;
1052 	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1053 	if (err != 0)
1054 		return (err);
1055 	size -= size / recordsize * sizeof (blkptr_t);
1056 
1057 	/* Add in the space for the record associated with each block. */
1058 	size += size / recordsize * sizeof (dmu_replay_record_t);
1059 
1060 	*sizep = size;
1061 
1062 	return (0);
1063 }
1064 
1065 int
1066 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1067 {
1068 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1069 	int err;
1070 	uint64_t size;
1071 
1072 	ASSERT(dsl_pool_config_held(dp));
1073 
1074 	/* tosnap must be a snapshot */
1075 	if (!ds->ds_is_snapshot)
1076 		return (SET_ERROR(EINVAL));
1077 
1078 	/* fromsnap, if provided, must be a snapshot */
1079 	if (fromds != NULL && !fromds->ds_is_snapshot)
1080 		return (SET_ERROR(EINVAL));
1081 
1082 	/*
1083 	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1084 	 * or the origin's fs.
1085 	 */
1086 	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1087 		return (SET_ERROR(EXDEV));
1088 
1089 	/* Get uncompressed size estimate of changed data. */
1090 	if (fromds == NULL) {
1091 		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1092 	} else {
1093 		uint64_t used, comp;
1094 		err = dsl_dataset_space_written(fromds, ds,
1095 		    &used, &comp, &size);
1096 		if (err != 0)
1097 			return (err);
1098 	}
1099 
1100 	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1101 	return (err);
1102 }
1103 
1104 /*
1105  * Simple callback used to traverse the blocks of a snapshot and sum their
1106  * uncompressed size
1107  */
1108 /* ARGSUSED */
1109 static int
1110 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1111     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1112 {
1113 	uint64_t *spaceptr = arg;
1114 	if (bp != NULL && !BP_IS_HOLE(bp)) {
1115 		*spaceptr += BP_GET_UCSIZE(bp);
1116 	}
1117 	return (0);
1118 }
1119 
1120 /*
1121  * Given a desination snapshot and a TXG, calculate the approximate size of a
1122  * send stream sent from that TXG. from_txg may be zero, indicating that the
1123  * whole snapshot will be sent.
1124  */
1125 int
1126 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1127     uint64_t *sizep)
1128 {
1129 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1130 	int err;
1131 	uint64_t size = 0;
1132 
1133 	ASSERT(dsl_pool_config_held(dp));
1134 
1135 	/* tosnap must be a snapshot */
1136 	if (!dsl_dataset_is_snapshot(ds))
1137 		return (SET_ERROR(EINVAL));
1138 
1139 	/* verify that from_txg is before the provided snapshot was taken */
1140 	if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1141 		return (SET_ERROR(EXDEV));
1142 	}
1143 
1144 	/*
1145 	 * traverse the blocks of the snapshot with birth times after
1146 	 * from_txg, summing their uncompressed size
1147 	 */
1148 	err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1149 	    dmu_calculate_send_traversal, &size);
1150 	if (err)
1151 		return (err);
1152 
1153 	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1154 	return (err);
1155 }
1156 
1157 typedef struct dmu_recv_begin_arg {
1158 	const char *drba_origin;
1159 	dmu_recv_cookie_t *drba_cookie;
1160 	cred_t *drba_cred;
1161 	uint64_t drba_snapobj;
1162 } dmu_recv_begin_arg_t;
1163 
1164 static int
1165 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1166     uint64_t fromguid)
1167 {
1168 	uint64_t val;
1169 	int error;
1170 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1171 
1172 	/* temporary clone name must not exist */
1173 	error = zap_lookup(dp->dp_meta_objset,
1174 	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1175 	    8, 1, &val);
1176 	if (error != ENOENT)
1177 		return (error == 0 ? EBUSY : error);
1178 
1179 	/* new snapshot name must not exist */
1180 	error = zap_lookup(dp->dp_meta_objset,
1181 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1182 	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
1183 	if (error != ENOENT)
1184 		return (error == 0 ? EEXIST : error);
1185 
1186 	/*
1187 	 * Check snapshot limit before receiving. We'll recheck again at the
1188 	 * end, but might as well abort before receiving if we're already over
1189 	 * the limit.
1190 	 *
1191 	 * Note that we do not check the file system limit with
1192 	 * dsl_dir_fscount_check because the temporary %clones don't count
1193 	 * against that limit.
1194 	 */
1195 	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1196 	    NULL, drba->drba_cred);
1197 	if (error != 0)
1198 		return (error);
1199 
1200 	if (fromguid != 0) {
1201 		dsl_dataset_t *snap;
1202 		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1203 
1204 		/* Find snapshot in this dir that matches fromguid. */
1205 		while (obj != 0) {
1206 			error = dsl_dataset_hold_obj(dp, obj, FTAG,
1207 			    &snap);
1208 			if (error != 0)
1209 				return (SET_ERROR(ENODEV));
1210 			if (snap->ds_dir != ds->ds_dir) {
1211 				dsl_dataset_rele(snap, FTAG);
1212 				return (SET_ERROR(ENODEV));
1213 			}
1214 			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1215 				break;
1216 			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1217 			dsl_dataset_rele(snap, FTAG);
1218 		}
1219 		if (obj == 0)
1220 			return (SET_ERROR(ENODEV));
1221 
1222 		if (drba->drba_cookie->drc_force) {
1223 			drba->drba_snapobj = obj;
1224 		} else {
1225 			/*
1226 			 * If we are not forcing, there must be no
1227 			 * changes since fromsnap.
1228 			 */
1229 			if (dsl_dataset_modified_since_snap(ds, snap)) {
1230 				dsl_dataset_rele(snap, FTAG);
1231 				return (SET_ERROR(ETXTBSY));
1232 			}
1233 			drba->drba_snapobj = ds->ds_prev->ds_object;
1234 		}
1235 
1236 		dsl_dataset_rele(snap, FTAG);
1237 	} else {
1238 		/* if full, then must be forced */
1239 		if (!drba->drba_cookie->drc_force)
1240 			return (SET_ERROR(EEXIST));
1241 		/* start from $ORIGIN@$ORIGIN, if supported */
1242 		drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1243 		    dp->dp_origin_snap->ds_object : 0;
1244 	}
1245 
1246 	return (0);
1247 
1248 }
1249 
1250 static int
1251 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1252 {
1253 	dmu_recv_begin_arg_t *drba = arg;
1254 	dsl_pool_t *dp = dmu_tx_pool(tx);
1255 	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1256 	uint64_t fromguid = drrb->drr_fromguid;
1257 	int flags = drrb->drr_flags;
1258 	int error;
1259 	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1260 	dsl_dataset_t *ds;
1261 	const char *tofs = drba->drba_cookie->drc_tofs;
1262 
1263 	/* already checked */
1264 	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1265 	ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1266 
1267 	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1268 	    DMU_COMPOUNDSTREAM ||
1269 	    drrb->drr_type >= DMU_OST_NUMTYPES ||
1270 	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1271 		return (SET_ERROR(EINVAL));
1272 
1273 	/* Verify pool version supports SA if SA_SPILL feature set */
1274 	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1275 	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1276 		return (SET_ERROR(ENOTSUP));
1277 
1278 	if (drba->drba_cookie->drc_resumable &&
1279 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1280 		return (SET_ERROR(ENOTSUP));
1281 
1282 	/*
1283 	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1284 	 * record to a plan WRITE record, so the pool must have the
1285 	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1286 	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1287 	 */
1288 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1289 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1290 		return (SET_ERROR(ENOTSUP));
1291 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1292 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1293 		return (SET_ERROR(ENOTSUP));
1294 
1295 	/*
1296 	 * The receiving code doesn't know how to translate large blocks
1297 	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1298 	 * feature enabled if the stream has LARGE_BLOCKS.
1299 	 */
1300 	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1301 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1302 		return (SET_ERROR(ENOTSUP));
1303 
1304 	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1305 	if (error == 0) {
1306 		/* target fs already exists; recv into temp clone */
1307 
1308 		/* Can't recv a clone into an existing fs */
1309 		if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
1310 			dsl_dataset_rele(ds, FTAG);
1311 			return (SET_ERROR(EINVAL));
1312 		}
1313 
1314 		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1315 		dsl_dataset_rele(ds, FTAG);
1316 	} else if (error == ENOENT) {
1317 		/* target fs does not exist; must be a full backup or clone */
1318 		char buf[ZFS_MAX_DATASET_NAME_LEN];
1319 
1320 		/*
1321 		 * If it's a non-clone incremental, we are missing the
1322 		 * target fs, so fail the recv.
1323 		 */
1324 		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1325 		    drba->drba_origin))
1326 			return (SET_ERROR(ENOENT));
1327 
1328 		/*
1329 		 * If we're receiving a full send as a clone, and it doesn't
1330 		 * contain all the necessary free records and freeobject
1331 		 * records, reject it.
1332 		 */
1333 		if (fromguid == 0 && drba->drba_origin &&
1334 		    !(flags & DRR_FLAG_FREERECORDS))
1335 			return (SET_ERROR(EINVAL));
1336 
1337 		/* Open the parent of tofs */
1338 		ASSERT3U(strlen(tofs), <, sizeof (buf));
1339 		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1340 		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1341 		if (error != 0)
1342 			return (error);
1343 
1344 		/*
1345 		 * Check filesystem and snapshot limits before receiving. We'll
1346 		 * recheck snapshot limits again at the end (we create the
1347 		 * filesystems and increment those counts during begin_sync).
1348 		 */
1349 		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1350 		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1351 		if (error != 0) {
1352 			dsl_dataset_rele(ds, FTAG);
1353 			return (error);
1354 		}
1355 
1356 		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1357 		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1358 		if (error != 0) {
1359 			dsl_dataset_rele(ds, FTAG);
1360 			return (error);
1361 		}
1362 
1363 		if (drba->drba_origin != NULL) {
1364 			dsl_dataset_t *origin;
1365 			error = dsl_dataset_hold(dp, drba->drba_origin,
1366 			    FTAG, &origin);
1367 			if (error != 0) {
1368 				dsl_dataset_rele(ds, FTAG);
1369 				return (error);
1370 			}
1371 			if (!origin->ds_is_snapshot) {
1372 				dsl_dataset_rele(origin, FTAG);
1373 				dsl_dataset_rele(ds, FTAG);
1374 				return (SET_ERROR(EINVAL));
1375 			}
1376 			if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
1377 			    fromguid != 0) {
1378 				dsl_dataset_rele(origin, FTAG);
1379 				dsl_dataset_rele(ds, FTAG);
1380 				return (SET_ERROR(ENODEV));
1381 			}
1382 			dsl_dataset_rele(origin, FTAG);
1383 		}
1384 		dsl_dataset_rele(ds, FTAG);
1385 		error = 0;
1386 	}
1387 	return (error);
1388 }
1389 
1390 static void
1391 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1392 {
1393 	dmu_recv_begin_arg_t *drba = arg;
1394 	dsl_pool_t *dp = dmu_tx_pool(tx);
1395 	objset_t *mos = dp->dp_meta_objset;
1396 	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1397 	const char *tofs = drba->drba_cookie->drc_tofs;
1398 	dsl_dataset_t *ds, *newds;
1399 	uint64_t dsobj;
1400 	int error;
1401 	uint64_t crflags = 0;
1402 
1403 	if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1404 		crflags |= DS_FLAG_CI_DATASET;
1405 
1406 	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1407 	if (error == 0) {
1408 		/* create temporary clone */
1409 		dsl_dataset_t *snap = NULL;
1410 		if (drba->drba_snapobj != 0) {
1411 			VERIFY0(dsl_dataset_hold_obj(dp,
1412 			    drba->drba_snapobj, FTAG, &snap));
1413 		}
1414 		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1415 		    snap, crflags, drba->drba_cred, tx);
1416 		if (drba->drba_snapobj != 0)
1417 			dsl_dataset_rele(snap, FTAG);
1418 		dsl_dataset_rele(ds, FTAG);
1419 	} else {
1420 		dsl_dir_t *dd;
1421 		const char *tail;
1422 		dsl_dataset_t *origin = NULL;
1423 
1424 		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1425 
1426 		if (drba->drba_origin != NULL) {
1427 			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1428 			    FTAG, &origin));
1429 		}
1430 
1431 		/* Create new dataset. */
1432 		dsobj = dsl_dataset_create_sync(dd,
1433 		    strrchr(tofs, '/') + 1,
1434 		    origin, crflags, drba->drba_cred, tx);
1435 		if (origin != NULL)
1436 			dsl_dataset_rele(origin, FTAG);
1437 		dsl_dir_rele(dd, FTAG);
1438 		drba->drba_cookie->drc_newfs = B_TRUE;
1439 	}
1440 	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1441 
1442 	if (drba->drba_cookie->drc_resumable) {
1443 		dsl_dataset_zapify(newds, tx);
1444 		if (drrb->drr_fromguid != 0) {
1445 			VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1446 			    8, 1, &drrb->drr_fromguid, tx));
1447 		}
1448 		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1449 		    8, 1, &drrb->drr_toguid, tx));
1450 		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1451 		    1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1452 		uint64_t one = 1;
1453 		uint64_t zero = 0;
1454 		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1455 		    8, 1, &one, tx));
1456 		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1457 		    8, 1, &zero, tx));
1458 		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1459 		    8, 1, &zero, tx));
1460 		if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1461 		    DMU_BACKUP_FEATURE_EMBED_DATA) {
1462 			VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1463 			    8, 1, &one, tx));
1464 		}
1465 	}
1466 
1467 	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1468 	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1469 
1470 	/*
1471 	 * If we actually created a non-clone, we need to create the
1472 	 * objset in our new dataset.
1473 	 */
1474 	rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
1475 	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1476 		(void) dmu_objset_create_impl(dp->dp_spa,
1477 		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1478 	}
1479 	rrw_exit(&newds->ds_bp_rwlock, FTAG);
1480 
1481 	drba->drba_cookie->drc_ds = newds;
1482 
1483 	spa_history_log_internal_ds(newds, "receive", tx, "");
1484 }
1485 
1486 static int
1487 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1488 {
1489 	dmu_recv_begin_arg_t *drba = arg;
1490 	dsl_pool_t *dp = dmu_tx_pool(tx);
1491 	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1492 	int error;
1493 	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1494 	dsl_dataset_t *ds;
1495 	const char *tofs = drba->drba_cookie->drc_tofs;
1496 
1497 	/* already checked */
1498 	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1499 	ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1500 
1501 	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1502 	    DMU_COMPOUNDSTREAM ||
1503 	    drrb->drr_type >= DMU_OST_NUMTYPES)
1504 		return (SET_ERROR(EINVAL));
1505 
1506 	/* Verify pool version supports SA if SA_SPILL feature set */
1507 	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1508 	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1509 		return (SET_ERROR(ENOTSUP));
1510 
1511 	/*
1512 	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1513 	 * record to a plain WRITE record, so the pool must have the
1514 	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1515 	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1516 	 */
1517 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1518 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1519 		return (SET_ERROR(ENOTSUP));
1520 	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1521 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1522 		return (SET_ERROR(ENOTSUP));
1523 
1524 	/* 6 extra bytes for /%recv */
1525 	char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1526 
1527 	(void) snprintf(recvname, sizeof (recvname), "%s/%s",
1528 	    tofs, recv_clone_name);
1529 
1530 	if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1531 		/* %recv does not exist; continue in tofs */
1532 		error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1533 		if (error != 0)
1534 			return (error);
1535 	}
1536 
1537 	/* check that ds is marked inconsistent */
1538 	if (!DS_IS_INCONSISTENT(ds)) {
1539 		dsl_dataset_rele(ds, FTAG);
1540 		return (SET_ERROR(EINVAL));
1541 	}
1542 
1543 	/* check that there is resuming data, and that the toguid matches */
1544 	if (!dsl_dataset_is_zapified(ds)) {
1545 		dsl_dataset_rele(ds, FTAG);
1546 		return (SET_ERROR(EINVAL));
1547 	}
1548 	uint64_t val;
1549 	error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1550 	    DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1551 	if (error != 0 || drrb->drr_toguid != val) {
1552 		dsl_dataset_rele(ds, FTAG);
1553 		return (SET_ERROR(EINVAL));
1554 	}
1555 
1556 	/*
1557 	 * Check if the receive is still running.  If so, it will be owned.
1558 	 * Note that nothing else can own the dataset (e.g. after the receive
1559 	 * fails) because it will be marked inconsistent.
1560 	 */
1561 	if (dsl_dataset_has_owner(ds)) {
1562 		dsl_dataset_rele(ds, FTAG);
1563 		return (SET_ERROR(EBUSY));
1564 	}
1565 
1566 	/* There should not be any snapshots of this fs yet. */
1567 	if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1568 		dsl_dataset_rele(ds, FTAG);
1569 		return (SET_ERROR(EINVAL));
1570 	}
1571 
1572 	/*
1573 	 * Note: resume point will be checked when we process the first WRITE
1574 	 * record.
1575 	 */
1576 
1577 	/* check that the origin matches */
1578 	val = 0;
1579 	(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1580 	    DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1581 	if (drrb->drr_fromguid != val) {
1582 		dsl_dataset_rele(ds, FTAG);
1583 		return (SET_ERROR(EINVAL));
1584 	}
1585 
1586 	dsl_dataset_rele(ds, FTAG);
1587 	return (0);
1588 }
1589 
1590 static void
1591 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1592 {
1593 	dmu_recv_begin_arg_t *drba = arg;
1594 	dsl_pool_t *dp = dmu_tx_pool(tx);
1595 	const char *tofs = drba->drba_cookie->drc_tofs;
1596 	dsl_dataset_t *ds;
1597 	uint64_t dsobj;
1598 	/* 6 extra bytes for /%recv */
1599 	char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1600 
1601 	(void) snprintf(recvname, sizeof (recvname), "%s/%s",
1602 	    tofs, recv_clone_name);
1603 
1604 	if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1605 		/* %recv does not exist; continue in tofs */
1606 		VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1607 		drba->drba_cookie->drc_newfs = B_TRUE;
1608 	}
1609 
1610 	/* clear the inconsistent flag so that we can own it */
1611 	ASSERT(DS_IS_INCONSISTENT(ds));
1612 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1613 	dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1614 	dsobj = ds->ds_object;
1615 	dsl_dataset_rele(ds, FTAG);
1616 
1617 	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1618 
1619 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1620 	dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1621 
1622 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1623 	ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1624 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
1625 
1626 	drba->drba_cookie->drc_ds = ds;
1627 
1628 	spa_history_log_internal_ds(ds, "resume receive", tx, "");
1629 }
1630 
1631 /*
1632  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1633  * succeeds; otherwise we will leak the holds on the datasets.
1634  */
1635 int
1636 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1637     boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1638 {
1639 	dmu_recv_begin_arg_t drba = { 0 };
1640 
1641 	bzero(drc, sizeof (dmu_recv_cookie_t));
1642 	drc->drc_drr_begin = drr_begin;
1643 	drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1644 	drc->drc_tosnap = tosnap;
1645 	drc->drc_tofs = tofs;
1646 	drc->drc_force = force;
1647 	drc->drc_resumable = resumable;
1648 	drc->drc_cred = CRED();
1649 
1650 	if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1651 		drc->drc_byteswap = B_TRUE;
1652 		fletcher_4_incremental_byteswap(drr_begin,
1653 		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1654 		byteswap_record(drr_begin);
1655 	} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1656 		fletcher_4_incremental_native(drr_begin,
1657 		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1658 	} else {
1659 		return (SET_ERROR(EINVAL));
1660 	}
1661 
1662 	drba.drba_origin = origin;
1663 	drba.drba_cookie = drc;
1664 	drba.drba_cred = CRED();
1665 
1666 	if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1667 	    DMU_BACKUP_FEATURE_RESUMING) {
1668 		return (dsl_sync_task(tofs,
1669 		    dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1670 		    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1671 	} else  {
1672 		return (dsl_sync_task(tofs,
1673 		    dmu_recv_begin_check, dmu_recv_begin_sync,
1674 		    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1675 	}
1676 }
1677 
1678 struct receive_record_arg {
1679 	dmu_replay_record_t header;
1680 	void *payload; /* Pointer to a buffer containing the payload */
1681 	/*
1682 	 * If the record is a write, pointer to the arc_buf_t containing the
1683 	 * payload.
1684 	 */
1685 	arc_buf_t *write_buf;
1686 	int payload_size;
1687 	uint64_t bytes_read; /* bytes read from stream when record created */
1688 	boolean_t eos_marker; /* Marks the end of the stream */
1689 	bqueue_node_t node;
1690 };
1691 
1692 struct receive_writer_arg {
1693 	objset_t *os;
1694 	boolean_t byteswap;
1695 	bqueue_t q;
1696 
1697 	/*
1698 	 * These three args are used to signal to the main thread that we're
1699 	 * done.
1700 	 */
1701 	kmutex_t mutex;
1702 	kcondvar_t cv;
1703 	boolean_t done;
1704 
1705 	int err;
1706 	/* A map from guid to dataset to help handle dedup'd streams. */
1707 	avl_tree_t *guid_to_ds_map;
1708 	boolean_t resumable;
1709 	uint64_t last_object, last_offset;
1710 	uint64_t bytes_read; /* bytes read when current record created */
1711 };
1712 
1713 struct objlist {
1714 	list_t list; /* List of struct receive_objnode. */
1715 	/*
1716 	 * Last object looked up. Used to assert that objects are being looked
1717 	 * up in ascending order.
1718 	 */
1719 	uint64_t last_lookup;
1720 };
1721 
1722 struct receive_objnode {
1723 	list_node_t node;
1724 	uint64_t object;
1725 };
1726 
1727 struct receive_arg  {
1728 	objset_t *os;
1729 	vnode_t *vp; /* The vnode to read the stream from */
1730 	uint64_t voff; /* The current offset in the stream */
1731 	uint64_t bytes_read;
1732 	/*
1733 	 * A record that has had its payload read in, but hasn't yet been handed
1734 	 * off to the worker thread.
1735 	 */
1736 	struct receive_record_arg *rrd;
1737 	/* A record that has had its header read in, but not its payload. */
1738 	struct receive_record_arg *next_rrd;
1739 	zio_cksum_t cksum;
1740 	zio_cksum_t prev_cksum;
1741 	int err;
1742 	boolean_t byteswap;
1743 	/* Sorted list of objects not to issue prefetches for. */
1744 	struct objlist ignore_objlist;
1745 };
1746 
1747 typedef struct guid_map_entry {
1748 	uint64_t	guid;
1749 	dsl_dataset_t	*gme_ds;
1750 	avl_node_t	avlnode;
1751 } guid_map_entry_t;
1752 
1753 static int
1754 guid_compare(const void *arg1, const void *arg2)
1755 {
1756 	const guid_map_entry_t *gmep1 = arg1;
1757 	const guid_map_entry_t *gmep2 = arg2;
1758 
1759 	if (gmep1->guid < gmep2->guid)
1760 		return (-1);
1761 	else if (gmep1->guid > gmep2->guid)
1762 		return (1);
1763 	return (0);
1764 }
1765 
1766 static void
1767 free_guid_map_onexit(void *arg)
1768 {
1769 	avl_tree_t *ca = arg;
1770 	void *cookie = NULL;
1771 	guid_map_entry_t *gmep;
1772 
1773 	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1774 		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1775 		dsl_dataset_rele(gmep->gme_ds, gmep);
1776 		kmem_free(gmep, sizeof (guid_map_entry_t));
1777 	}
1778 	avl_destroy(ca);
1779 	kmem_free(ca, sizeof (avl_tree_t));
1780 }
1781 
1782 static int
1783 receive_read(struct receive_arg *ra, int len, void *buf)
1784 {
1785 	int done = 0;
1786 
1787 	/*
1788 	 * The code doesn't rely on this (lengths being multiples of 8).  See
1789 	 * comment in dump_bytes.
1790 	 */
1791 	ASSERT0(len % 8);
1792 
1793 	while (done < len) {
1794 		ssize_t resid;
1795 
1796 		ra->err = vn_rdwr(UIO_READ, ra->vp,
1797 		    (char *)buf + done, len - done,
1798 		    ra->voff, UIO_SYSSPACE, FAPPEND,
1799 		    RLIM64_INFINITY, CRED(), &resid);
1800 
1801 		if (resid == len - done) {
1802 			/*
1803 			 * Note: ECKSUM indicates that the receive
1804 			 * was interrupted and can potentially be resumed.
1805 			 */
1806 			ra->err = SET_ERROR(ECKSUM);
1807 		}
1808 		ra->voff += len - done - resid;
1809 		done = len - resid;
1810 		if (ra->err != 0)
1811 			return (ra->err);
1812 	}
1813 
1814 	ra->bytes_read += len;
1815 
1816 	ASSERT3U(done, ==, len);
1817 	return (0);
1818 }
1819 
1820 static void
1821 byteswap_record(dmu_replay_record_t *drr)
1822 {
1823 #define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1824 #define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1825 	drr->drr_type = BSWAP_32(drr->drr_type);
1826 	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1827 
1828 	switch (drr->drr_type) {
1829 	case DRR_BEGIN:
1830 		DO64(drr_begin.drr_magic);
1831 		DO64(drr_begin.drr_versioninfo);
1832 		DO64(drr_begin.drr_creation_time);
1833 		DO32(drr_begin.drr_type);
1834 		DO32(drr_begin.drr_flags);
1835 		DO64(drr_begin.drr_toguid);
1836 		DO64(drr_begin.drr_fromguid);
1837 		break;
1838 	case DRR_OBJECT:
1839 		DO64(drr_object.drr_object);
1840 		DO32(drr_object.drr_type);
1841 		DO32(drr_object.drr_bonustype);
1842 		DO32(drr_object.drr_blksz);
1843 		DO32(drr_object.drr_bonuslen);
1844 		DO64(drr_object.drr_toguid);
1845 		break;
1846 	case DRR_FREEOBJECTS:
1847 		DO64(drr_freeobjects.drr_firstobj);
1848 		DO64(drr_freeobjects.drr_numobjs);
1849 		DO64(drr_freeobjects.drr_toguid);
1850 		break;
1851 	case DRR_WRITE:
1852 		DO64(drr_write.drr_object);
1853 		DO32(drr_write.drr_type);
1854 		DO64(drr_write.drr_offset);
1855 		DO64(drr_write.drr_length);
1856 		DO64(drr_write.drr_toguid);
1857 		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1858 		DO64(drr_write.drr_key.ddk_prop);
1859 		break;
1860 	case DRR_WRITE_BYREF:
1861 		DO64(drr_write_byref.drr_object);
1862 		DO64(drr_write_byref.drr_offset);
1863 		DO64(drr_write_byref.drr_length);
1864 		DO64(drr_write_byref.drr_toguid);
1865 		DO64(drr_write_byref.drr_refguid);
1866 		DO64(drr_write_byref.drr_refobject);
1867 		DO64(drr_write_byref.drr_refoffset);
1868 		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1869 		    drr_key.ddk_cksum);
1870 		DO64(drr_write_byref.drr_key.ddk_prop);
1871 		break;
1872 	case DRR_WRITE_EMBEDDED:
1873 		DO64(drr_write_embedded.drr_object);
1874 		DO64(drr_write_embedded.drr_offset);
1875 		DO64(drr_write_embedded.drr_length);
1876 		DO64(drr_write_embedded.drr_toguid);
1877 		DO32(drr_write_embedded.drr_lsize);
1878 		DO32(drr_write_embedded.drr_psize);
1879 		break;
1880 	case DRR_FREE:
1881 		DO64(drr_free.drr_object);
1882 		DO64(drr_free.drr_offset);
1883 		DO64(drr_free.drr_length);
1884 		DO64(drr_free.drr_toguid);
1885 		break;
1886 	case DRR_SPILL:
1887 		DO64(drr_spill.drr_object);
1888 		DO64(drr_spill.drr_length);
1889 		DO64(drr_spill.drr_toguid);
1890 		break;
1891 	case DRR_END:
1892 		DO64(drr_end.drr_toguid);
1893 		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1894 		break;
1895 	}
1896 
1897 	if (drr->drr_type != DRR_BEGIN) {
1898 		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1899 	}
1900 
1901 #undef DO64
1902 #undef DO32
1903 }
1904 
1905 static inline uint8_t
1906 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1907 {
1908 	if (bonus_type == DMU_OT_SA) {
1909 		return (1);
1910 	} else {
1911 		return (1 +
1912 		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1913 	}
1914 }
1915 
1916 static void
1917 save_resume_state(struct receive_writer_arg *rwa,
1918     uint64_t object, uint64_t offset, dmu_tx_t *tx)
1919 {
1920 	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1921 
1922 	if (!rwa->resumable)
1923 		return;
1924 
1925 	/*
1926 	 * We use ds_resume_bytes[] != 0 to indicate that we need to
1927 	 * update this on disk, so it must not be 0.
1928 	 */
1929 	ASSERT(rwa->bytes_read != 0);
1930 
1931 	/*
1932 	 * We only resume from write records, which have a valid
1933 	 * (non-meta-dnode) object number.
1934 	 */
1935 	ASSERT(object != 0);
1936 
1937 	/*
1938 	 * For resuming to work correctly, we must receive records in order,
1939 	 * sorted by object,offset.  This is checked by the callers, but
1940 	 * assert it here for good measure.
1941 	 */
1942 	ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1943 	ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1944 	    offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1945 	ASSERT3U(rwa->bytes_read, >=,
1946 	    rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1947 
1948 	rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1949 	rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1950 	rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1951 }
1952 
1953 static int
1954 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1955     void *data)
1956 {
1957 	dmu_object_info_t doi;
1958 	dmu_tx_t *tx;
1959 	uint64_t object;
1960 	int err;
1961 
1962 	if (drro->drr_type == DMU_OT_NONE ||
1963 	    !DMU_OT_IS_VALID(drro->drr_type) ||
1964 	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1965 	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1966 	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1967 	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1968 	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1969 	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1970 	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1971 		return (SET_ERROR(EINVAL));
1972 	}
1973 
1974 	err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1975 
1976 	if (err != 0 && err != ENOENT)
1977 		return (SET_ERROR(EINVAL));
1978 	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1979 
1980 	/*
1981 	 * If we are losing blkptrs or changing the block size this must
1982 	 * be a new file instance.  We must clear out the previous file
1983 	 * contents before we can change this type of metadata in the dnode.
1984 	 */
1985 	if (err == 0) {
1986 		int nblkptr;
1987 
1988 		nblkptr = deduce_nblkptr(drro->drr_bonustype,
1989 		    drro->drr_bonuslen);
1990 
1991 		if (drro->drr_blksz != doi.doi_data_block_size ||
1992 		    nblkptr < doi.doi_nblkptr) {
1993 			err = dmu_free_long_range(rwa->os, drro->drr_object,
1994 			    0, DMU_OBJECT_END);
1995 			if (err != 0)
1996 				return (SET_ERROR(EINVAL));
1997 		}
1998 	}
1999 
2000 	tx = dmu_tx_create(rwa->os);
2001 	dmu_tx_hold_bonus(tx, object);
2002 	err = dmu_tx_assign(tx, TXG_WAIT);
2003 	if (err != 0) {
2004 		dmu_tx_abort(tx);
2005 		return (err);
2006 	}
2007 
2008 	if (object == DMU_NEW_OBJECT) {
2009 		/* currently free, want to be allocated */
2010 		err = dmu_object_claim(rwa->os, drro->drr_object,
2011 		    drro->drr_type, drro->drr_blksz,
2012 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
2013 	} else if (drro->drr_type != doi.doi_type ||
2014 	    drro->drr_blksz != doi.doi_data_block_size ||
2015 	    drro->drr_bonustype != doi.doi_bonus_type ||
2016 	    drro->drr_bonuslen != doi.doi_bonus_size) {
2017 		/* currently allocated, but with different properties */
2018 		err = dmu_object_reclaim(rwa->os, drro->drr_object,
2019 		    drro->drr_type, drro->drr_blksz,
2020 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
2021 	}
2022 	if (err != 0) {
2023 		dmu_tx_commit(tx);
2024 		return (SET_ERROR(EINVAL));
2025 	}
2026 
2027 	dmu_object_set_checksum(rwa->os, drro->drr_object,
2028 	    drro->drr_checksumtype, tx);
2029 	dmu_object_set_compress(rwa->os, drro->drr_object,
2030 	    drro->drr_compress, tx);
2031 
2032 	if (data != NULL) {
2033 		dmu_buf_t *db;
2034 
2035 		VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2036 		dmu_buf_will_dirty(db, tx);
2037 
2038 		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2039 		bcopy(data, db->db_data, drro->drr_bonuslen);
2040 		if (rwa->byteswap) {
2041 			dmu_object_byteswap_t byteswap =
2042 			    DMU_OT_BYTESWAP(drro->drr_bonustype);
2043 			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2044 			    drro->drr_bonuslen);
2045 		}
2046 		dmu_buf_rele(db, FTAG);
2047 	}
2048 	dmu_tx_commit(tx);
2049 
2050 	return (0);
2051 }
2052 
2053 /* ARGSUSED */
2054 static int
2055 receive_freeobjects(struct receive_writer_arg *rwa,
2056     struct drr_freeobjects *drrfo)
2057 {
2058 	uint64_t obj;
2059 	int next_err = 0;
2060 
2061 	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2062 		return (SET_ERROR(EINVAL));
2063 
2064 	for (obj = drrfo->drr_firstobj;
2065 	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
2066 	    next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2067 		int err;
2068 
2069 		if (dmu_object_info(rwa->os, obj, NULL) != 0)
2070 			continue;
2071 
2072 		err = dmu_free_long_object(rwa->os, obj);
2073 		if (err != 0)
2074 			return (err);
2075 	}
2076 	if (next_err != ESRCH)
2077 		return (next_err);
2078 	return (0);
2079 }
2080 
2081 static int
2082 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2083     arc_buf_t *abuf)
2084 {
2085 	dmu_tx_t *tx;
2086 	int err;
2087 
2088 	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
2089 	    !DMU_OT_IS_VALID(drrw->drr_type))
2090 		return (SET_ERROR(EINVAL));
2091 
2092 	/*
2093 	 * For resuming to work, records must be in increasing order
2094 	 * by (object, offset).
2095 	 */
2096 	if (drrw->drr_object < rwa->last_object ||
2097 	    (drrw->drr_object == rwa->last_object &&
2098 	    drrw->drr_offset < rwa->last_offset)) {
2099 		return (SET_ERROR(EINVAL));
2100 	}
2101 	rwa->last_object = drrw->drr_object;
2102 	rwa->last_offset = drrw->drr_offset;
2103 
2104 	if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2105 		return (SET_ERROR(EINVAL));
2106 
2107 	tx = dmu_tx_create(rwa->os);
2108 
2109 	dmu_tx_hold_write(tx, drrw->drr_object,
2110 	    drrw->drr_offset, drrw->drr_length);
2111 	err = dmu_tx_assign(tx, TXG_WAIT);
2112 	if (err != 0) {
2113 		dmu_tx_abort(tx);
2114 		return (err);
2115 	}
2116 	if (rwa->byteswap) {
2117 		dmu_object_byteswap_t byteswap =
2118 		    DMU_OT_BYTESWAP(drrw->drr_type);
2119 		dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2120 		    drrw->drr_length);
2121 	}
2122 
2123 	dmu_buf_t *bonus;
2124 	if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2125 		return (SET_ERROR(EINVAL));
2126 	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2127 
2128 	/*
2129 	 * Note: If the receive fails, we want the resume stream to start
2130 	 * with the same record that we last successfully received (as opposed
2131 	 * to the next record), so that we can verify that we are
2132 	 * resuming from the correct location.
2133 	 */
2134 	save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2135 	dmu_tx_commit(tx);
2136 	dmu_buf_rele(bonus, FTAG);
2137 
2138 	return (0);
2139 }
2140 
2141 /*
2142  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
2143  * streams to refer to a copy of the data that is already on the
2144  * system because it came in earlier in the stream.  This function
2145  * finds the earlier copy of the data, and uses that copy instead of
2146  * data from the stream to fulfill this write.
2147  */
2148 static int
2149 receive_write_byref(struct receive_writer_arg *rwa,
2150     struct drr_write_byref *drrwbr)
2151 {
2152 	dmu_tx_t *tx;
2153 	int err;
2154 	guid_map_entry_t gmesrch;
2155 	guid_map_entry_t *gmep;
2156 	avl_index_t where;
2157 	objset_t *ref_os = NULL;
2158 	dmu_buf_t *dbp;
2159 
2160 	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2161 		return (SET_ERROR(EINVAL));
2162 
2163 	/*
2164 	 * If the GUID of the referenced dataset is different from the
2165 	 * GUID of the target dataset, find the referenced dataset.
2166 	 */
2167 	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2168 		gmesrch.guid = drrwbr->drr_refguid;
2169 		if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2170 		    &where)) == NULL) {
2171 			return (SET_ERROR(EINVAL));
2172 		}
2173 		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2174 			return (SET_ERROR(EINVAL));
2175 	} else {
2176 		ref_os = rwa->os;
2177 	}
2178 
2179 	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2180 	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2181 	if (err != 0)
2182 		return (err);
2183 
2184 	tx = dmu_tx_create(rwa->os);
2185 
2186 	dmu_tx_hold_write(tx, drrwbr->drr_object,
2187 	    drrwbr->drr_offset, drrwbr->drr_length);
2188 	err = dmu_tx_assign(tx, TXG_WAIT);
2189 	if (err != 0) {
2190 		dmu_tx_abort(tx);
2191 		return (err);
2192 	}
2193 	dmu_write(rwa->os, drrwbr->drr_object,
2194 	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2195 	dmu_buf_rele(dbp, FTAG);
2196 
2197 	/* See comment in restore_write. */
2198 	save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2199 	dmu_tx_commit(tx);
2200 	return (0);
2201 }
2202 
2203 static int
2204 receive_write_embedded(struct receive_writer_arg *rwa,
2205     struct drr_write_embedded *drrwe, void *data)
2206 {
2207 	dmu_tx_t *tx;
2208 	int err;
2209 
2210 	if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2211 		return (EINVAL);
2212 
2213 	if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2214 		return (EINVAL);
2215 
2216 	if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2217 		return (EINVAL);
2218 	if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2219 		return (EINVAL);
2220 
2221 	tx = dmu_tx_create(rwa->os);
2222 
2223 	dmu_tx_hold_write(tx, drrwe->drr_object,
2224 	    drrwe->drr_offset, drrwe->drr_length);
2225 	err = dmu_tx_assign(tx, TXG_WAIT);
2226 	if (err != 0) {
2227 		dmu_tx_abort(tx);
2228 		return (err);
2229 	}
2230 
2231 	dmu_write_embedded(rwa->os, drrwe->drr_object,
2232 	    drrwe->drr_offset, data, drrwe->drr_etype,
2233 	    drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2234 	    rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2235 
2236 	/* See comment in restore_write. */
2237 	save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2238 	dmu_tx_commit(tx);
2239 	return (0);
2240 }
2241 
2242 static int
2243 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2244     void *data)
2245 {
2246 	dmu_tx_t *tx;
2247 	dmu_buf_t *db, *db_spill;
2248 	int err;
2249 
2250 	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2251 	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2252 		return (SET_ERROR(EINVAL));
2253 
2254 	if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2255 		return (SET_ERROR(EINVAL));
2256 
2257 	VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2258 	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2259 		dmu_buf_rele(db, FTAG);
2260 		return (err);
2261 	}
2262 
2263 	tx = dmu_tx_create(rwa->os);
2264 
2265 	dmu_tx_hold_spill(tx, db->db_object);
2266 
2267 	err = dmu_tx_assign(tx, TXG_WAIT);
2268 	if (err != 0) {
2269 		dmu_buf_rele(db, FTAG);
2270 		dmu_buf_rele(db_spill, FTAG);
2271 		dmu_tx_abort(tx);
2272 		return (err);
2273 	}
2274 	dmu_buf_will_dirty(db_spill, tx);
2275 
2276 	if (db_spill->db_size < drrs->drr_length)
2277 		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2278 		    drrs->drr_length, tx));
2279 	bcopy(data, db_spill->db_data, drrs->drr_length);
2280 
2281 	dmu_buf_rele(db, FTAG);
2282 	dmu_buf_rele(db_spill, FTAG);
2283 
2284 	dmu_tx_commit(tx);
2285 	return (0);
2286 }
2287 
2288 /* ARGSUSED */
2289 static int
2290 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2291 {
2292 	int err;
2293 
2294 	if (drrf->drr_length != -1ULL &&
2295 	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2296 		return (SET_ERROR(EINVAL));
2297 
2298 	if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2299 		return (SET_ERROR(EINVAL));
2300 
2301 	err = dmu_free_long_range(rwa->os, drrf->drr_object,
2302 	    drrf->drr_offset, drrf->drr_length);
2303 
2304 	return (err);
2305 }
2306 
2307 /* used to destroy the drc_ds on error */
2308 static void
2309 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2310 {
2311 	if (drc->drc_resumable) {
2312 		/* wait for our resume state to be written to disk */
2313 		txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2314 		dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2315 	} else {
2316 		char name[ZFS_MAX_DATASET_NAME_LEN];
2317 		dsl_dataset_name(drc->drc_ds, name);
2318 		dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2319 		(void) dsl_destroy_head(name);
2320 	}
2321 }
2322 
2323 static void
2324 receive_cksum(struct receive_arg *ra, int len, void *buf)
2325 {
2326 	if (ra->byteswap) {
2327 		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2328 	} else {
2329 		fletcher_4_incremental_native(buf, len, &ra->cksum);
2330 	}
2331 }
2332 
2333 /*
2334  * Read the payload into a buffer of size len, and update the current record's
2335  * payload field.
2336  * Allocate ra->next_rrd and read the next record's header into
2337  * ra->next_rrd->header.
2338  * Verify checksum of payload and next record.
2339  */
2340 static int
2341 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2342 {
2343 	int err;
2344 
2345 	if (len != 0) {
2346 		ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2347 		err = receive_read(ra, len, buf);
2348 		if (err != 0)
2349 			return (err);
2350 		receive_cksum(ra, len, buf);
2351 
2352 		/* note: rrd is NULL when reading the begin record's payload */
2353 		if (ra->rrd != NULL) {
2354 			ra->rrd->payload = buf;
2355 			ra->rrd->payload_size = len;
2356 			ra->rrd->bytes_read = ra->bytes_read;
2357 		}
2358 	}
2359 
2360 	ra->prev_cksum = ra->cksum;
2361 
2362 	ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2363 	err = receive_read(ra, sizeof (ra->next_rrd->header),
2364 	    &ra->next_rrd->header);
2365 	ra->next_rrd->bytes_read = ra->bytes_read;
2366 	if (err != 0) {
2367 		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2368 		ra->next_rrd = NULL;
2369 		return (err);
2370 	}
2371 	if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2372 		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2373 		ra->next_rrd = NULL;
2374 		return (SET_ERROR(EINVAL));
2375 	}
2376 
2377 	/*
2378 	 * Note: checksum is of everything up to but not including the
2379 	 * checksum itself.
2380 	 */
2381 	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2382 	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2383 	receive_cksum(ra,
2384 	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2385 	    &ra->next_rrd->header);
2386 
2387 	zio_cksum_t cksum_orig =
2388 	    ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2389 	zio_cksum_t *cksump =
2390 	    &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2391 
2392 	if (ra->byteswap)
2393 		byteswap_record(&ra->next_rrd->header);
2394 
2395 	if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2396 	    !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2397 		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2398 		ra->next_rrd = NULL;
2399 		return (SET_ERROR(ECKSUM));
2400 	}
2401 
2402 	receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2403 
2404 	return (0);
2405 }
2406 
2407 static void
2408 objlist_create(struct objlist *list)
2409 {
2410 	list_create(&list->list, sizeof (struct receive_objnode),
2411 	    offsetof(struct receive_objnode, node));
2412 	list->last_lookup = 0;
2413 }
2414 
2415 static void
2416 objlist_destroy(struct objlist *list)
2417 {
2418 	for (struct receive_objnode *n = list_remove_head(&list->list);
2419 	    n != NULL; n = list_remove_head(&list->list)) {
2420 		kmem_free(n, sizeof (*n));
2421 	}
2422 	list_destroy(&list->list);
2423 }
2424 
2425 /*
2426  * This function looks through the objlist to see if the specified object number
2427  * is contained in the objlist.  In the process, it will remove all object
2428  * numbers in the list that are smaller than the specified object number.  Thus,
2429  * any lookup of an object number smaller than a previously looked up object
2430  * number will always return false; therefore, all lookups should be done in
2431  * ascending order.
2432  */
2433 static boolean_t
2434 objlist_exists(struct objlist *list, uint64_t object)
2435 {
2436 	struct receive_objnode *node = list_head(&list->list);
2437 	ASSERT3U(object, >=, list->last_lookup);
2438 	list->last_lookup = object;
2439 	while (node != NULL && node->object < object) {
2440 		VERIFY3P(node, ==, list_remove_head(&list->list));
2441 		kmem_free(node, sizeof (*node));
2442 		node = list_head(&list->list);
2443 	}
2444 	return (node != NULL && node->object == object);
2445 }
2446 
2447 /*
2448  * The objlist is a list of object numbers stored in ascending order.  However,
2449  * the insertion of new object numbers does not seek out the correct location to
2450  * store a new object number; instead, it appends it to the list for simplicity.
2451  * Thus, any users must take care to only insert new object numbers in ascending
2452  * order.
2453  */
2454 static void
2455 objlist_insert(struct objlist *list, uint64_t object)
2456 {
2457 	struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
2458 	node->object = object;
2459 #ifdef ZFS_DEBUG
2460 	struct receive_objnode *last_object = list_tail(&list->list);
2461 	uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
2462 	ASSERT3U(node->object, >, last_objnum);
2463 #endif
2464 	list_insert_tail(&list->list, node);
2465 }
2466 
2467 /*
2468  * Issue the prefetch reads for any necessary indirect blocks.
2469  *
2470  * We use the object ignore list to tell us whether or not to issue prefetches
2471  * for a given object.  We do this for both correctness (in case the blocksize
2472  * of an object has changed) and performance (if the object doesn't exist, don't
2473  * needlessly try to issue prefetches).  We also trim the list as we go through
2474  * the stream to prevent it from growing to an unbounded size.
2475  *
2476  * The object numbers within will always be in sorted order, and any write
2477  * records we see will also be in sorted order, but they're not sorted with
2478  * respect to each other (i.e. we can get several object records before
2479  * receiving each object's write records).  As a result, once we've reached a
2480  * given object number, we can safely remove any reference to lower object
2481  * numbers in the ignore list. In practice, we receive up to 32 object records
2482  * before receiving write records, so the list can have up to 32 nodes in it.
2483  */
2484 /* ARGSUSED */
2485 static void
2486 receive_read_prefetch(struct receive_arg *ra,
2487     uint64_t object, uint64_t offset, uint64_t length)
2488 {
2489 	if (!objlist_exists(&ra->ignore_objlist, object)) {
2490 		dmu_prefetch(ra->os, object, 1, offset, length,
2491 		    ZIO_PRIORITY_SYNC_READ);
2492 	}
2493 }
2494 
2495 /*
2496  * Read records off the stream, issuing any necessary prefetches.
2497  */
2498 static int
2499 receive_read_record(struct receive_arg *ra)
2500 {
2501 	int err;
2502 
2503 	switch (ra->rrd->header.drr_type) {
2504 	case DRR_OBJECT:
2505 	{
2506 		struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2507 		uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2508 		void *buf = kmem_zalloc(size, KM_SLEEP);
2509 		dmu_object_info_t doi;
2510 		err = receive_read_payload_and_next_header(ra, size, buf);
2511 		if (err != 0) {
2512 			kmem_free(buf, size);
2513 			return (err);
2514 		}
2515 		err = dmu_object_info(ra->os, drro->drr_object, &doi);
2516 		/*
2517 		 * See receive_read_prefetch for an explanation why we're
2518 		 * storing this object in the ignore_obj_list.
2519 		 */
2520 		if (err == ENOENT ||
2521 		    (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2522 			objlist_insert(&ra->ignore_objlist, drro->drr_object);
2523 			err = 0;
2524 		}
2525 		return (err);
2526 	}
2527 	case DRR_FREEOBJECTS:
2528 	{
2529 		err = receive_read_payload_and_next_header(ra, 0, NULL);
2530 		return (err);
2531 	}
2532 	case DRR_WRITE:
2533 	{
2534 		struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2535 		arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2536 		    drrw->drr_length);
2537 
2538 		err = receive_read_payload_and_next_header(ra,
2539 		    drrw->drr_length, abuf->b_data);
2540 		if (err != 0) {
2541 			dmu_return_arcbuf(abuf);
2542 			return (err);
2543 		}
2544 		ra->rrd->write_buf = abuf;
2545 		receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2546 		    drrw->drr_length);
2547 		return (err);
2548 	}
2549 	case DRR_WRITE_BYREF:
2550 	{
2551 		struct drr_write_byref *drrwb =
2552 		    &ra->rrd->header.drr_u.drr_write_byref;
2553 		err = receive_read_payload_and_next_header(ra, 0, NULL);
2554 		receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2555 		    drrwb->drr_length);
2556 		return (err);
2557 	}
2558 	case DRR_WRITE_EMBEDDED:
2559 	{
2560 		struct drr_write_embedded *drrwe =
2561 		    &ra->rrd->header.drr_u.drr_write_embedded;
2562 		uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2563 		void *buf = kmem_zalloc(size, KM_SLEEP);
2564 
2565 		err = receive_read_payload_and_next_header(ra, size, buf);
2566 		if (err != 0) {
2567 			kmem_free(buf, size);
2568 			return (err);
2569 		}
2570 
2571 		receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2572 		    drrwe->drr_length);
2573 		return (err);
2574 	}
2575 	case DRR_FREE:
2576 	{
2577 		/*
2578 		 * It might be beneficial to prefetch indirect blocks here, but
2579 		 * we don't really have the data to decide for sure.
2580 		 */
2581 		err = receive_read_payload_and_next_header(ra, 0, NULL);
2582 		return (err);
2583 	}
2584 	case DRR_END:
2585 	{
2586 		struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2587 		if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2588 			return (SET_ERROR(ECKSUM));
2589 		return (0);
2590 	}
2591 	case DRR_SPILL:
2592 	{
2593 		struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2594 		void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2595 		err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2596 		    buf);
2597 		if (err != 0)
2598 			kmem_free(buf, drrs->drr_length);
2599 		return (err);
2600 	}
2601 	default:
2602 		return (SET_ERROR(EINVAL));
2603 	}
2604 }
2605 
2606 /*
2607  * Commit the records to the pool.
2608  */
2609 static int
2610 receive_process_record(struct receive_writer_arg *rwa,
2611     struct receive_record_arg *rrd)
2612 {
2613 	int err;
2614 
2615 	/* Processing in order, therefore bytes_read should be increasing. */
2616 	ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2617 	rwa->bytes_read = rrd->bytes_read;
2618 
2619 	switch (rrd->header.drr_type) {
2620 	case DRR_OBJECT:
2621 	{
2622 		struct drr_object *drro = &rrd->header.drr_u.drr_object;
2623 		err = receive_object(rwa, drro, rrd->payload);
2624 		kmem_free(rrd->payload, rrd->payload_size);
2625 		rrd->payload = NULL;
2626 		return (err);
2627 	}
2628 	case DRR_FREEOBJECTS:
2629 	{
2630 		struct drr_freeobjects *drrfo =
2631 		    &rrd->header.drr_u.drr_freeobjects;
2632 		return (receive_freeobjects(rwa, drrfo));
2633 	}
2634 	case DRR_WRITE:
2635 	{
2636 		struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2637 		err = receive_write(rwa, drrw, rrd->write_buf);
2638 		/* if receive_write() is successful, it consumes the arc_buf */
2639 		if (err != 0)
2640 			dmu_return_arcbuf(rrd->write_buf);
2641 		rrd->write_buf = NULL;
2642 		rrd->payload = NULL;
2643 		return (err);
2644 	}
2645 	case DRR_WRITE_BYREF:
2646 	{
2647 		struct drr_write_byref *drrwbr =
2648 		    &rrd->header.drr_u.drr_write_byref;
2649 		return (receive_write_byref(rwa, drrwbr));
2650 	}
2651 	case DRR_WRITE_EMBEDDED:
2652 	{
2653 		struct drr_write_embedded *drrwe =
2654 		    &rrd->header.drr_u.drr_write_embedded;
2655 		err = receive_write_embedded(rwa, drrwe, rrd->payload);
2656 		kmem_free(rrd->payload, rrd->payload_size);
2657 		rrd->payload = NULL;
2658 		return (err);
2659 	}
2660 	case DRR_FREE:
2661 	{
2662 		struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2663 		return (receive_free(rwa, drrf));
2664 	}
2665 	case DRR_SPILL:
2666 	{
2667 		struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2668 		err = receive_spill(rwa, drrs, rrd->payload);
2669 		kmem_free(rrd->payload, rrd->payload_size);
2670 		rrd->payload = NULL;
2671 		return (err);
2672 	}
2673 	default:
2674 		return (SET_ERROR(EINVAL));
2675 	}
2676 }
2677 
2678 /*
2679  * dmu_recv_stream's worker thread; pull records off the queue, and then call
2680  * receive_process_record  When we're done, signal the main thread and exit.
2681  */
2682 static void
2683 receive_writer_thread(void *arg)
2684 {
2685 	struct receive_writer_arg *rwa = arg;
2686 	struct receive_record_arg *rrd;
2687 	for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2688 	    rrd = bqueue_dequeue(&rwa->q)) {
2689 		/*
2690 		 * If there's an error, the main thread will stop putting things
2691 		 * on the queue, but we need to clear everything in it before we
2692 		 * can exit.
2693 		 */
2694 		if (rwa->err == 0) {
2695 			rwa->err = receive_process_record(rwa, rrd);
2696 		} else if (rrd->write_buf != NULL) {
2697 			dmu_return_arcbuf(rrd->write_buf);
2698 			rrd->write_buf = NULL;
2699 			rrd->payload = NULL;
2700 		} else if (rrd->payload != NULL) {
2701 			kmem_free(rrd->payload, rrd->payload_size);
2702 			rrd->payload = NULL;
2703 		}
2704 		kmem_free(rrd, sizeof (*rrd));
2705 	}
2706 	kmem_free(rrd, sizeof (*rrd));
2707 	mutex_enter(&rwa->mutex);
2708 	rwa->done = B_TRUE;
2709 	cv_signal(&rwa->cv);
2710 	mutex_exit(&rwa->mutex);
2711 }
2712 
2713 static int
2714 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2715 {
2716 	uint64_t val;
2717 	objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2718 	uint64_t dsobj = dmu_objset_id(ra->os);
2719 	uint64_t resume_obj, resume_off;
2720 
2721 	if (nvlist_lookup_uint64(begin_nvl,
2722 	    "resume_object", &resume_obj) != 0 ||
2723 	    nvlist_lookup_uint64(begin_nvl,
2724 	    "resume_offset", &resume_off) != 0) {
2725 		return (SET_ERROR(EINVAL));
2726 	}
2727 	VERIFY0(zap_lookup(mos, dsobj,
2728 	    DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2729 	if (resume_obj != val)
2730 		return (SET_ERROR(EINVAL));
2731 	VERIFY0(zap_lookup(mos, dsobj,
2732 	    DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2733 	if (resume_off != val)
2734 		return (SET_ERROR(EINVAL));
2735 
2736 	return (0);
2737 }
2738 
2739 /*
2740  * Read in the stream's records, one by one, and apply them to the pool.  There
2741  * are two threads involved; the thread that calls this function will spin up a
2742  * worker thread, read the records off the stream one by one, and issue
2743  * prefetches for any necessary indirect blocks.  It will then push the records
2744  * onto an internal blocking queue.  The worker thread will pull the records off
2745  * the queue, and actually write the data into the DMU.  This way, the worker
2746  * thread doesn't have to wait for reads to complete, since everything it needs
2747  * (the indirect blocks) will be prefetched.
2748  *
2749  * NB: callers *must* call dmu_recv_end() if this succeeds.
2750  */
2751 int
2752 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
2753     int cleanup_fd, uint64_t *action_handlep)
2754 {
2755 	int err = 0;
2756 	struct receive_arg ra = { 0 };
2757 	struct receive_writer_arg rwa = { 0 };
2758 	int featureflags;
2759 	nvlist_t *begin_nvl = NULL;
2760 
2761 	ra.byteswap = drc->drc_byteswap;
2762 	ra.cksum = drc->drc_cksum;
2763 	ra.vp = vp;
2764 	ra.voff = *voffp;
2765 
2766 	if (dsl_dataset_is_zapified(drc->drc_ds)) {
2767 		(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2768 		    drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2769 		    sizeof (ra.bytes_read), 1, &ra.bytes_read);
2770 	}
2771 
2772 	objlist_create(&ra.ignore_objlist);
2773 
2774 	/* these were verified in dmu_recv_begin */
2775 	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2776 	    DMU_SUBSTREAM);
2777 	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2778 
2779 	/*
2780 	 * Open the objset we are modifying.
2781 	 */
2782 	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2783 
2784 	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2785 
2786 	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2787 
2788 	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
2789 	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2790 		minor_t minor;
2791 
2792 		if (cleanup_fd == -1) {
2793 			ra.err = SET_ERROR(EBADF);
2794 			goto out;
2795 		}
2796 		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2797 		if (ra.err != 0) {
2798 			cleanup_fd = -1;
2799 			goto out;
2800 		}
2801 
2802 		if (*action_handlep == 0) {
2803 			rwa.guid_to_ds_map =
2804 			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2805 			avl_create(rwa.guid_to_ds_map, guid_compare,
2806 			    sizeof (guid_map_entry_t),
2807 			    offsetof(guid_map_entry_t, avlnode));
2808 			err = zfs_onexit_add_cb(minor,
2809 			    free_guid_map_onexit, rwa.guid_to_ds_map,
2810 			    action_handlep);
2811 			if (ra.err != 0)
2812 				goto out;
2813 		} else {
2814 			err = zfs_onexit_cb_data(minor, *action_handlep,
2815 			    (void **)&rwa.guid_to_ds_map);
2816 			if (ra.err != 0)
2817 				goto out;
2818 		}
2819 
2820 		drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2821 	}
2822 
2823 	uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
2824 	void *payload = NULL;
2825 	if (payloadlen != 0)
2826 		payload = kmem_alloc(payloadlen, KM_SLEEP);
2827 
2828 	err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
2829 	if (err != 0) {
2830 		if (payloadlen != 0)
2831 			kmem_free(payload, payloadlen);
2832 		goto out;
2833 	}
2834 	if (payloadlen != 0) {
2835 		err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
2836 		kmem_free(payload, payloadlen);
2837 		if (err != 0)
2838 			goto out;
2839 	}
2840 
2841 	if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2842 		err = resume_check(&ra, begin_nvl);
2843 		if (err != 0)
2844 			goto out;
2845 	}
2846 
2847 	(void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2848 	    offsetof(struct receive_record_arg, node));
2849 	cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2850 	mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2851 	rwa.os = ra.os;
2852 	rwa.byteswap = drc->drc_byteswap;
2853 	rwa.resumable = drc->drc_resumable;
2854 
2855 	(void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, curproc,
2856 	    TS_RUN, minclsyspri);
2857 	/*
2858 	 * We're reading rwa.err without locks, which is safe since we are the
2859 	 * only reader, and the worker thread is the only writer.  It's ok if we
2860 	 * miss a write for an iteration or two of the loop, since the writer
2861 	 * thread will keep freeing records we send it until we send it an eos
2862 	 * marker.
2863 	 *
2864 	 * We can leave this loop in 3 ways:  First, if rwa.err is
2865 	 * non-zero.  In that case, the writer thread will free the rrd we just
2866 	 * pushed.  Second, if  we're interrupted; in that case, either it's the
2867 	 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2868 	 * has been handed off to the writer thread who will free it.  Finally,
2869 	 * if receive_read_record fails or we're at the end of the stream, then
2870 	 * we free ra.rrd and exit.
2871 	 */
2872 	while (rwa.err == 0) {
2873 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
2874 			err = SET_ERROR(EINTR);
2875 			break;
2876 		}
2877 
2878 		ASSERT3P(ra.rrd, ==, NULL);
2879 		ra.rrd = ra.next_rrd;
2880 		ra.next_rrd = NULL;
2881 		/* Allocates and loads header into ra.next_rrd */
2882 		err = receive_read_record(&ra);
2883 
2884 		if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2885 			kmem_free(ra.rrd, sizeof (*ra.rrd));
2886 			ra.rrd = NULL;
2887 			break;
2888 		}
2889 
2890 		bqueue_enqueue(&rwa.q, ra.rrd,
2891 		    sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2892 		ra.rrd = NULL;
2893 	}
2894 	if (ra.next_rrd == NULL)
2895 		ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2896 	ra.next_rrd->eos_marker = B_TRUE;
2897 	bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2898 
2899 	mutex_enter(&rwa.mutex);
2900 	while (!rwa.done) {
2901 		cv_wait(&rwa.cv, &rwa.mutex);
2902 	}
2903 	mutex_exit(&rwa.mutex);
2904 
2905 	cv_destroy(&rwa.cv);
2906 	mutex_destroy(&rwa.mutex);
2907 	bqueue_destroy(&rwa.q);
2908 	if (err == 0)
2909 		err = rwa.err;
2910 
2911 out:
2912 	nvlist_free(begin_nvl);
2913 	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2914 		zfs_onexit_fd_rele(cleanup_fd);
2915 
2916 	if (err != 0) {
2917 		/*
2918 		 * Clean up references. If receive is not resumable,
2919 		 * destroy what we created, so we don't leave it in
2920 		 * the inconsistent state.
2921 		 */
2922 		dmu_recv_cleanup_ds(drc);
2923 	}
2924 
2925 	*voffp = ra.voff;
2926 	objlist_destroy(&ra.ignore_objlist);
2927 	return (err);
2928 }
2929 
2930 static int
2931 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2932 {
2933 	dmu_recv_cookie_t *drc = arg;
2934 	dsl_pool_t *dp = dmu_tx_pool(tx);
2935 	int error;
2936 
2937 	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2938 
2939 	if (!drc->drc_newfs) {
2940 		dsl_dataset_t *origin_head;
2941 
2942 		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2943 		if (error != 0)
2944 			return (error);
2945 		if (drc->drc_force) {
2946 			/*
2947 			 * We will destroy any snapshots in tofs (i.e. before
2948 			 * origin_head) that are after the origin (which is
2949 			 * the snap before drc_ds, because drc_ds can not
2950 			 * have any snaps of its own).
2951 			 */
2952 			uint64_t obj;
2953 
2954 			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2955 			while (obj !=
2956 			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2957 				dsl_dataset_t *snap;
2958 				error = dsl_dataset_hold_obj(dp, obj, FTAG,
2959 				    &snap);
2960 				if (error != 0)
2961 					break;
2962 				if (snap->ds_dir != origin_head->ds_dir)
2963 					error = SET_ERROR(EINVAL);
2964 				if (error == 0)  {
2965 					error = dsl_destroy_snapshot_check_impl(
2966 					    snap, B_FALSE);
2967 				}
2968 				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2969 				dsl_dataset_rele(snap, FTAG);
2970 				if (error != 0)
2971 					break;
2972 			}
2973 			if (error != 0) {
2974 				dsl_dataset_rele(origin_head, FTAG);
2975 				return (error);
2976 			}
2977 		}
2978 		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2979 		    origin_head, drc->drc_force, drc->drc_owner, tx);
2980 		if (error != 0) {
2981 			dsl_dataset_rele(origin_head, FTAG);
2982 			return (error);
2983 		}
2984 		error = dsl_dataset_snapshot_check_impl(origin_head,
2985 		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2986 		dsl_dataset_rele(origin_head, FTAG);
2987 		if (error != 0)
2988 			return (error);
2989 
2990 		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2991 	} else {
2992 		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2993 		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2994 	}
2995 	return (error);
2996 }
2997 
2998 static void
2999 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3000 {
3001 	dmu_recv_cookie_t *drc = arg;
3002 	dsl_pool_t *dp = dmu_tx_pool(tx);
3003 
3004 	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3005 	    tx, "snap=%s", drc->drc_tosnap);
3006 
3007 	if (!drc->drc_newfs) {
3008 		dsl_dataset_t *origin_head;
3009 
3010 		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3011 		    &origin_head));
3012 
3013 		if (drc->drc_force) {
3014 			/*
3015 			 * Destroy any snapshots of drc_tofs (origin_head)
3016 			 * after the origin (the snap before drc_ds).
3017 			 */
3018 			uint64_t obj;
3019 
3020 			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3021 			while (obj !=
3022 			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3023 				dsl_dataset_t *snap;
3024 				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3025 				    &snap));
3026 				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3027 				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3028 				dsl_destroy_snapshot_sync_impl(snap,
3029 				    B_FALSE, tx);
3030 				dsl_dataset_rele(snap, FTAG);
3031 			}
3032 		}
3033 		VERIFY3P(drc->drc_ds->ds_prev, ==,
3034 		    origin_head->ds_prev);
3035 
3036 		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3037 		    origin_head, tx);
3038 		dsl_dataset_snapshot_sync_impl(origin_head,
3039 		    drc->drc_tosnap, tx);
3040 
3041 		/* set snapshot's creation time and guid */
3042 		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3043 		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3044 		    drc->drc_drrb->drr_creation_time;
3045 		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3046 		    drc->drc_drrb->drr_toguid;
3047 		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3048 		    ~DS_FLAG_INCONSISTENT;
3049 
3050 		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3051 		dsl_dataset_phys(origin_head)->ds_flags &=
3052 		    ~DS_FLAG_INCONSISTENT;
3053 
3054 		drc->drc_newsnapobj =
3055 		    dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3056 
3057 		dsl_dataset_rele(origin_head, FTAG);
3058 		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3059 
3060 		if (drc->drc_owner != NULL)
3061 			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3062 	} else {
3063 		dsl_dataset_t *ds = drc->drc_ds;
3064 
3065 		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3066 
3067 		/* set snapshot's creation time and guid */
3068 		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3069 		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3070 		    drc->drc_drrb->drr_creation_time;
3071 		dsl_dataset_phys(ds->ds_prev)->ds_guid =
3072 		    drc->drc_drrb->drr_toguid;
3073 		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3074 		    ~DS_FLAG_INCONSISTENT;
3075 
3076 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3077 		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3078 		if (dsl_dataset_has_resume_receive_state(ds)) {
3079 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3080 			    DS_FIELD_RESUME_FROMGUID, tx);
3081 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3082 			    DS_FIELD_RESUME_OBJECT, tx);
3083 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3084 			    DS_FIELD_RESUME_OFFSET, tx);
3085 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3086 			    DS_FIELD_RESUME_BYTES, tx);
3087 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3088 			    DS_FIELD_RESUME_TOGUID, tx);
3089 			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3090 			    DS_FIELD_RESUME_TONAME, tx);
3091 		}
3092 		drc->drc_newsnapobj =
3093 		    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3094 	}
3095 	/*
3096 	 * Release the hold from dmu_recv_begin.  This must be done before
3097 	 * we return to open context, so that when we free the dataset's dnode,
3098 	 * we can evict its bonus buffer.
3099 	 */
3100 	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3101 	drc->drc_ds = NULL;
3102 }
3103 
3104 static int
3105 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3106 {
3107 	dsl_pool_t *dp;
3108 	dsl_dataset_t *snapds;
3109 	guid_map_entry_t *gmep;
3110 	int err;
3111 
3112 	ASSERT(guid_map != NULL);
3113 
3114 	err = dsl_pool_hold(name, FTAG, &dp);
3115 	if (err != 0)
3116 		return (err);
3117 	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3118 	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3119 	if (err == 0) {
3120 		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3121 		gmep->gme_ds = snapds;
3122 		avl_add(guid_map, gmep);
3123 		dsl_dataset_long_hold(snapds, gmep);
3124 	} else {
3125 		kmem_free(gmep, sizeof (*gmep));
3126 	}
3127 
3128 	dsl_pool_rele(dp, FTAG);
3129 	return (err);
3130 }
3131 
3132 static int dmu_recv_end_modified_blocks = 3;
3133 
3134 static int
3135 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3136 {
3137 #ifdef _KERNEL
3138 	/*
3139 	 * We will be destroying the ds; make sure its origin is unmounted if
3140 	 * necessary.
3141 	 */
3142 	char name[ZFS_MAX_DATASET_NAME_LEN];
3143 	dsl_dataset_name(drc->drc_ds, name);
3144 	zfs_destroy_unmount_origin(name);
3145 #endif
3146 
3147 	return (dsl_sync_task(drc->drc_tofs,
3148 	    dmu_recv_end_check, dmu_recv_end_sync, drc,
3149 	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3150 }
3151 
3152 static int
3153 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3154 {
3155 	return (dsl_sync_task(drc->drc_tofs,
3156 	    dmu_recv_end_check, dmu_recv_end_sync, drc,
3157 	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3158 }
3159 
3160 int
3161 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3162 {
3163 	int error;
3164 
3165 	drc->drc_owner = owner;
3166 
3167 	if (drc->drc_newfs)
3168 		error = dmu_recv_new_end(drc);
3169 	else
3170 		error = dmu_recv_existing_end(drc);
3171 
3172 	if (error != 0) {
3173 		dmu_recv_cleanup_ds(drc);
3174 	} else if (drc->drc_guid_to_ds_map != NULL) {
3175 		(void) add_ds_to_guidmap(drc->drc_tofs,
3176 		    drc->drc_guid_to_ds_map,
3177 		    drc->drc_newsnapobj);
3178 	}
3179 	return (error);
3180 }
3181 
3182 /*
3183  * Return TRUE if this objset is currently being received into.
3184  */
3185 boolean_t
3186 dmu_objset_is_receiving(objset_t *os)
3187 {
3188 	return (os->os_dsl_dataset != NULL &&
3189 	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3190 }
3191