xref: /titanic_52/usr/src/uts/common/fs/zfs/dmu_send.c (revision 261906274d77b4a1c6d61c75d170ab5a8e85a6a7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/zfs_context.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_traverse.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/dsl_synctask.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/zap.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/avl.h>
43 #include <sys/ddt.h>
44 
45 static char *dmu_recv_tag = "dmu_recv_tag";
46 
47 /*
48  * The list of data whose inclusion in a send stream can be pending from
49  * one call to backup_cb to another.  Multiple calls to dump_free() and
50  * dump_freeobjects() can be aggregated into a single DRR_FREE or
51  * DRR_FREEOBJECTS replay record.
52  */
53 typedef enum {
54 	PENDING_NONE,
55 	PENDING_FREE,
56 	PENDING_FREEOBJECTS
57 } pendop_t;
58 
59 struct backuparg {
60 	dmu_replay_record_t *drr;
61 	vnode_t *vp;
62 	offset_t *off;
63 	objset_t *os;
64 	zio_cksum_t zc;
65 	uint64_t toguid;
66 	int err;
67 	pendop_t pending_op;
68 };
69 
70 static int
71 dump_bytes(struct backuparg *ba, void *buf, int len)
72 {
73 	ssize_t resid; /* have to get resid to get detailed errno */
74 	ASSERT3U(len % 8, ==, 0);
75 
76 	fletcher_4_incremental_native(buf, len, &ba->zc);
77 	ba->err = vn_rdwr(UIO_WRITE, ba->vp,
78 	    (caddr_t)buf, len,
79 	    0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
80 	*ba->off += len;
81 	return (ba->err);
82 }
83 
84 static int
85 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset,
86     uint64_t length)
87 {
88 	struct drr_free *drrf = &(ba->drr->drr_u.drr_free);
89 
90 	/*
91 	 * If there is a pending op, but it's not PENDING_FREE, push it out,
92 	 * since free block aggregation can only be done for blocks of the
93 	 * same type (i.e., DRR_FREE records can only be aggregated with
94 	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
95 	 * aggregated with other DRR_FREEOBJECTS records.
96 	 */
97 	if (ba->pending_op != PENDING_NONE && ba->pending_op != PENDING_FREE) {
98 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
99 			return (EINTR);
100 		ba->pending_op = PENDING_NONE;
101 	}
102 
103 	if (ba->pending_op == PENDING_FREE) {
104 		/*
105 		 * There should never be a PENDING_FREE if length is -1
106 		 * (because dump_dnode is the only place where this
107 		 * function is called with a -1, and only after flushing
108 		 * any pending record).
109 		 */
110 		ASSERT(length != -1ULL);
111 		/*
112 		 * Check to see whether this free block can be aggregated
113 		 * with pending one.
114 		 */
115 		if (drrf->drr_object == object && drrf->drr_offset +
116 		    drrf->drr_length == offset) {
117 			drrf->drr_length += length;
118 			return (0);
119 		} else {
120 			/* not a continuation.  Push out pending record */
121 			if (dump_bytes(ba, ba->drr,
122 			    sizeof (dmu_replay_record_t)) != 0)
123 				return (EINTR);
124 			ba->pending_op = PENDING_NONE;
125 		}
126 	}
127 	/* create a FREE record and make it pending */
128 	bzero(ba->drr, sizeof (dmu_replay_record_t));
129 	ba->drr->drr_type = DRR_FREE;
130 	drrf->drr_object = object;
131 	drrf->drr_offset = offset;
132 	drrf->drr_length = length;
133 	drrf->drr_toguid = ba->toguid;
134 	if (length == -1ULL) {
135 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
136 			return (EINTR);
137 	} else {
138 		ba->pending_op = PENDING_FREE;
139 	}
140 
141 	return (0);
142 }
143 
144 static int
145 dump_data(struct backuparg *ba, dmu_object_type_t type,
146     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
147 {
148 	struct drr_write *drrw = &(ba->drr->drr_u.drr_write);
149 
150 
151 	/*
152 	 * If there is any kind of pending aggregation (currently either
153 	 * a grouping of free objects or free blocks), push it out to
154 	 * the stream, since aggregation can't be done across operations
155 	 * of different types.
156 	 */
157 	if (ba->pending_op != PENDING_NONE) {
158 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
159 			return (EINTR);
160 		ba->pending_op = PENDING_NONE;
161 	}
162 	/* write a DATA record */
163 	bzero(ba->drr, sizeof (dmu_replay_record_t));
164 	ba->drr->drr_type = DRR_WRITE;
165 	drrw->drr_object = object;
166 	drrw->drr_type = type;
167 	drrw->drr_offset = offset;
168 	drrw->drr_length = blksz;
169 	drrw->drr_toguid = ba->toguid;
170 	drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
171 	if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
172 		drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
173 	DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
174 	DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
175 	DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
176 	drrw->drr_key.ddk_cksum = bp->blk_cksum;
177 
178 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
179 		return (EINTR);
180 	if (dump_bytes(ba, data, blksz) != 0)
181 		return (EINTR);
182 	return (0);
183 }
184 
185 static int
186 dump_spill(struct backuparg *ba, uint64_t object, int blksz, void *data)
187 {
188 	struct drr_spill *drrs = &(ba->drr->drr_u.drr_spill);
189 
190 	if (ba->pending_op != PENDING_NONE) {
191 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
192 			return (EINTR);
193 		ba->pending_op = PENDING_NONE;
194 	}
195 
196 	/* write a SPILL record */
197 	bzero(ba->drr, sizeof (dmu_replay_record_t));
198 	ba->drr->drr_type = DRR_SPILL;
199 	drrs->drr_object = object;
200 	drrs->drr_length = blksz;
201 	drrs->drr_toguid = ba->toguid;
202 
203 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
204 		return (EINTR);
205 	if (dump_bytes(ba, data, blksz))
206 		return (EINTR);
207 	return (0);
208 }
209 
210 static int
211 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs)
212 {
213 	struct drr_freeobjects *drrfo = &(ba->drr->drr_u.drr_freeobjects);
214 
215 	/*
216 	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
217 	 * push it out, since free block aggregation can only be done for
218 	 * blocks of the same type (i.e., DRR_FREE records can only be
219 	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
220 	 * can only be aggregated with other DRR_FREEOBJECTS records.
221 	 */
222 	if (ba->pending_op != PENDING_NONE &&
223 	    ba->pending_op != PENDING_FREEOBJECTS) {
224 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
225 			return (EINTR);
226 		ba->pending_op = PENDING_NONE;
227 	}
228 	if (ba->pending_op == PENDING_FREEOBJECTS) {
229 		/*
230 		 * See whether this free object array can be aggregated
231 		 * with pending one
232 		 */
233 		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
234 			drrfo->drr_numobjs += numobjs;
235 			return (0);
236 		} else {
237 			/* can't be aggregated.  Push out pending record */
238 			if (dump_bytes(ba, ba->drr,
239 			    sizeof (dmu_replay_record_t)) != 0)
240 				return (EINTR);
241 			ba->pending_op = PENDING_NONE;
242 		}
243 	}
244 
245 	/* write a FREEOBJECTS record */
246 	bzero(ba->drr, sizeof (dmu_replay_record_t));
247 	ba->drr->drr_type = DRR_FREEOBJECTS;
248 	drrfo->drr_firstobj = firstobj;
249 	drrfo->drr_numobjs = numobjs;
250 	drrfo->drr_toguid = ba->toguid;
251 
252 	ba->pending_op = PENDING_FREEOBJECTS;
253 
254 	return (0);
255 }
256 
257 static int
258 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp)
259 {
260 	struct drr_object *drro = &(ba->drr->drr_u.drr_object);
261 
262 	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
263 		return (dump_freeobjects(ba, object, 1));
264 
265 	if (ba->pending_op != PENDING_NONE) {
266 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
267 			return (EINTR);
268 		ba->pending_op = PENDING_NONE;
269 	}
270 
271 	/* write an OBJECT record */
272 	bzero(ba->drr, sizeof (dmu_replay_record_t));
273 	ba->drr->drr_type = DRR_OBJECT;
274 	drro->drr_object = object;
275 	drro->drr_type = dnp->dn_type;
276 	drro->drr_bonustype = dnp->dn_bonustype;
277 	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
278 	drro->drr_bonuslen = dnp->dn_bonuslen;
279 	drro->drr_checksumtype = dnp->dn_checksum;
280 	drro->drr_compress = dnp->dn_compress;
281 	drro->drr_toguid = ba->toguid;
282 
283 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
284 		return (EINTR);
285 
286 	if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
287 		return (EINTR);
288 
289 	/* free anything past the end of the file */
290 	if (dump_free(ba, object, (dnp->dn_maxblkid + 1) *
291 	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
292 		return (EINTR);
293 	if (ba->err)
294 		return (EINTR);
295 	return (0);
296 }
297 
298 #define	BP_SPAN(dnp, level) \
299 	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
300 	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
301 
302 /* ARGSUSED */
303 static int
304 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
305     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
306 {
307 	struct backuparg *ba = arg;
308 	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
309 	int err = 0;
310 
311 	if (issig(JUSTLOOKING) && issig(FORREAL))
312 		return (EINTR);
313 
314 	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
315 	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
316 		return (0);
317 	} else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
318 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
319 		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
320 		err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT);
321 	} else if (bp == NULL) {
322 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
323 		err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span);
324 	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
325 		return (0);
326 	} else if (type == DMU_OT_DNODE) {
327 		dnode_phys_t *blk;
328 		int i;
329 		int blksz = BP_GET_LSIZE(bp);
330 		uint32_t aflags = ARC_WAIT;
331 		arc_buf_t *abuf;
332 
333 		if (arc_read_nolock(NULL, spa, bp,
334 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
335 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
336 			return (EIO);
337 
338 		blk = abuf->b_data;
339 		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
340 			uint64_t dnobj = (zb->zb_blkid <<
341 			    (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
342 			err = dump_dnode(ba, dnobj, blk+i);
343 			if (err)
344 				break;
345 		}
346 		(void) arc_buf_remove_ref(abuf, &abuf);
347 	} else if (type == DMU_OT_SA) {
348 		uint32_t aflags = ARC_WAIT;
349 		arc_buf_t *abuf;
350 		int blksz = BP_GET_LSIZE(bp);
351 
352 		if (arc_read_nolock(NULL, spa, bp,
353 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
354 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
355 			return (EIO);
356 
357 		err = dump_spill(ba, zb->zb_object, blksz, abuf->b_data);
358 		(void) arc_buf_remove_ref(abuf, &abuf);
359 	} else { /* it's a level-0 block of a regular object */
360 		uint32_t aflags = ARC_WAIT;
361 		arc_buf_t *abuf;
362 		int blksz = BP_GET_LSIZE(bp);
363 
364 		if (arc_read_nolock(NULL, spa, bp,
365 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
366 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
367 			return (EIO);
368 
369 		err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz,
370 		    blksz, bp, abuf->b_data);
371 		(void) arc_buf_remove_ref(abuf, &abuf);
372 	}
373 
374 	ASSERT(err == 0 || err == EINTR);
375 	return (err);
376 }
377 
378 int
379 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
380     vnode_t *vp, offset_t *off)
381 {
382 	dsl_dataset_t *ds = tosnap->os_dsl_dataset;
383 	dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
384 	dmu_replay_record_t *drr;
385 	struct backuparg ba;
386 	int err;
387 	uint64_t fromtxg = 0;
388 
389 	/* tosnap must be a snapshot */
390 	if (ds->ds_phys->ds_next_snap_obj == 0)
391 		return (EINVAL);
392 
393 	/* fromsnap must be an earlier snapshot from the same fs as tosnap */
394 	if (fromds && (ds->ds_dir != fromds->ds_dir ||
395 	    fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
396 		return (EXDEV);
397 
398 	if (fromorigin) {
399 		dsl_pool_t *dp = ds->ds_dir->dd_pool;
400 
401 		if (fromsnap)
402 			return (EINVAL);
403 
404 		if (dsl_dir_is_clone(ds->ds_dir)) {
405 			rw_enter(&dp->dp_config_rwlock, RW_READER);
406 			err = dsl_dataset_hold_obj(dp,
407 			    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
408 			rw_exit(&dp->dp_config_rwlock);
409 			if (err)
410 				return (err);
411 		} else {
412 			fromorigin = B_FALSE;
413 		}
414 	}
415 
416 
417 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
418 	drr->drr_type = DRR_BEGIN;
419 	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
420 	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
421 	    DMU_SUBSTREAM);
422 	drr->drr_u.drr_begin.drr_creation_time =
423 	    ds->ds_phys->ds_creation_time;
424 	drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
425 	if (fromorigin)
426 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
427 	drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
428 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
429 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
430 
431 	if (fromds)
432 		drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
433 	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
434 
435 	if (fromds)
436 		fromtxg = fromds->ds_phys->ds_creation_txg;
437 	if (fromorigin)
438 		dsl_dataset_rele(fromds, FTAG);
439 
440 	ba.drr = drr;
441 	ba.vp = vp;
442 	ba.os = tosnap;
443 	ba.off = off;
444 	ba.toguid = ds->ds_phys->ds_guid;
445 	ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0);
446 	ba.pending_op = PENDING_NONE;
447 
448 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
449 		kmem_free(drr, sizeof (dmu_replay_record_t));
450 		return (ba.err);
451 	}
452 
453 	err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
454 	    backup_cb, &ba);
455 
456 	if (ba.pending_op != PENDING_NONE)
457 		if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0)
458 			err = EINTR;
459 
460 	if (err) {
461 		if (err == EINTR && ba.err)
462 			err = ba.err;
463 		kmem_free(drr, sizeof (dmu_replay_record_t));
464 		return (err);
465 	}
466 
467 	bzero(drr, sizeof (dmu_replay_record_t));
468 	drr->drr_type = DRR_END;
469 	drr->drr_u.drr_end.drr_checksum = ba.zc;
470 	drr->drr_u.drr_end.drr_toguid = ba.toguid;
471 
472 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
473 		kmem_free(drr, sizeof (dmu_replay_record_t));
474 		return (ba.err);
475 	}
476 
477 	kmem_free(drr, sizeof (dmu_replay_record_t));
478 
479 	return (0);
480 }
481 
482 struct recvbeginsyncarg {
483 	const char *tofs;
484 	const char *tosnap;
485 	dsl_dataset_t *origin;
486 	uint64_t fromguid;
487 	dmu_objset_type_t type;
488 	void *tag;
489 	boolean_t force;
490 	uint64_t dsflags;
491 	char clonelastname[MAXNAMELEN];
492 	dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
493 };
494 
495 /* ARGSUSED */
496 static int
497 recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
498 {
499 	dsl_dir_t *dd = arg1;
500 	struct recvbeginsyncarg *rbsa = arg2;
501 	objset_t *mos = dd->dd_pool->dp_meta_objset;
502 	uint64_t val;
503 	int err;
504 
505 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
506 	    strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
507 
508 	if (err != ENOENT)
509 		return (err ? err : EEXIST);
510 
511 	if (rbsa->origin) {
512 		/* make sure it's a snap in the same pool */
513 		if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
514 			return (EXDEV);
515 		if (!dsl_dataset_is_snapshot(rbsa->origin))
516 			return (EINVAL);
517 		if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
518 			return (ENODEV);
519 	}
520 
521 	return (0);
522 }
523 
524 static void
525 recv_new_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
526 {
527 	dsl_dir_t *dd = arg1;
528 	struct recvbeginsyncarg *rbsa = arg2;
529 	uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
530 	uint64_t dsobj;
531 
532 	/* Create and open new dataset. */
533 	dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
534 	    rbsa->origin, flags, cr, tx);
535 	VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
536 	    B_TRUE, dmu_recv_tag, &rbsa->ds));
537 
538 	if (rbsa->origin == NULL) {
539 		(void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
540 		    rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
541 	}
542 
543 	spa_history_internal_log(LOG_DS_REPLAY_FULL_SYNC,
544 	    dd->dd_pool->dp_spa, tx, cr, "dataset = %lld", dsobj);
545 }
546 
547 /* ARGSUSED */
548 static int
549 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
550 {
551 	dsl_dataset_t *ds = arg1;
552 	struct recvbeginsyncarg *rbsa = arg2;
553 	int err;
554 	uint64_t val;
555 
556 	/* must not have any changes since most recent snapshot */
557 	if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
558 		return (ETXTBSY);
559 
560 	if (rbsa->fromguid) {
561 		/* if incremental, most recent snapshot must match fromguid */
562 		if (ds->ds_prev == NULL)
563 			return (ENODEV);
564 
565 		/*
566 		 * most recent snapshot must match fromguid, or there are no
567 		 * changes since the fromguid one
568 		 */
569 		if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
570 			uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
571 			uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
572 			while (obj != 0) {
573 				dsl_dataset_t *snap;
574 				err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
575 				    obj, FTAG, &snap);
576 				if (err)
577 					return (ENODEV);
578 				if (snap->ds_phys->ds_creation_txg < birth) {
579 					dsl_dataset_rele(snap, FTAG);
580 					return (ENODEV);
581 				}
582 				if (snap->ds_phys->ds_guid == rbsa->fromguid) {
583 					dsl_dataset_rele(snap, FTAG);
584 					break; /* it's ok */
585 				}
586 				obj = snap->ds_phys->ds_prev_snap_obj;
587 				dsl_dataset_rele(snap, FTAG);
588 			}
589 			if (obj == 0)
590 				return (ENODEV);
591 		}
592 	} else {
593 		/* if full, most recent snapshot must be $ORIGIN */
594 		if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
595 			return (ENODEV);
596 	}
597 
598 	/* temporary clone name must not exist */
599 	err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
600 	    ds->ds_dir->dd_phys->dd_child_dir_zapobj,
601 	    rbsa->clonelastname, 8, 1, &val);
602 	if (err == 0)
603 		return (EEXIST);
604 	if (err != ENOENT)
605 		return (err);
606 
607 	/* new snapshot name must not exist */
608 	err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
609 	    ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
610 	if (err == 0)
611 		return (EEXIST);
612 	if (err != ENOENT)
613 		return (err);
614 	return (0);
615 }
616 
617 /* ARGSUSED */
618 static void
619 recv_existing_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
620 {
621 	dsl_dataset_t *ohds = arg1;
622 	struct recvbeginsyncarg *rbsa = arg2;
623 	dsl_pool_t *dp = ohds->ds_dir->dd_pool;
624 	dsl_dataset_t *cds;
625 	uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
626 	uint64_t dsobj;
627 
628 	/* create and open the temporary clone */
629 	dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
630 	    ohds->ds_prev, flags, cr, tx);
631 	VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
632 
633 	/*
634 	 * If we actually created a non-clone, we need to create the
635 	 * objset in our new dataset.
636 	 */
637 	if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
638 		(void) dmu_objset_create_impl(dp->dp_spa,
639 		    cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
640 	}
641 
642 	rbsa->ds = cds;
643 
644 	spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC,
645 	    dp->dp_spa, tx, cr, "dataset = %lld", dsobj);
646 }
647 
648 /*
649  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
650  * succeeds; otherwise we will leak the holds on the datasets.
651  */
652 int
653 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
654     boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
655 {
656 	int err = 0;
657 	boolean_t byteswap;
658 	struct recvbeginsyncarg rbsa = { 0 };
659 	uint64_t versioninfo;
660 	int flags;
661 	dsl_dataset_t *ds;
662 
663 	if (drrb->drr_magic == DMU_BACKUP_MAGIC)
664 		byteswap = FALSE;
665 	else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
666 		byteswap = TRUE;
667 	else
668 		return (EINVAL);
669 
670 	rbsa.tofs = tofs;
671 	rbsa.tosnap = tosnap;
672 	rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
673 	rbsa.fromguid = drrb->drr_fromguid;
674 	rbsa.type = drrb->drr_type;
675 	rbsa.tag = FTAG;
676 	rbsa.dsflags = 0;
677 	versioninfo = drrb->drr_versioninfo;
678 	flags = drrb->drr_flags;
679 
680 	if (byteswap) {
681 		rbsa.type = BSWAP_32(rbsa.type);
682 		rbsa.fromguid = BSWAP_64(rbsa.fromguid);
683 		versioninfo = BSWAP_64(versioninfo);
684 		flags = BSWAP_32(flags);
685 	}
686 
687 	if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
688 	    rbsa.type >= DMU_OST_NUMTYPES ||
689 	    ((flags & DRR_FLAG_CLONE) && origin == NULL))
690 		return (EINVAL);
691 
692 	if (flags & DRR_FLAG_CI_DATA)
693 		rbsa.dsflags = DS_FLAG_CI_DATASET;
694 
695 	bzero(drc, sizeof (dmu_recv_cookie_t));
696 	drc->drc_drrb = drrb;
697 	drc->drc_tosnap = tosnap;
698 	drc->drc_top_ds = top_ds;
699 	drc->drc_force = force;
700 
701 	/*
702 	 * Process the begin in syncing context.
703 	 */
704 
705 	/* open the dataset we are logically receiving into */
706 	err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
707 	if (err == 0) {
708 		/* target fs already exists; recv into temp clone */
709 
710 		/* Can't recv a clone into an existing fs */
711 		if (flags & DRR_FLAG_CLONE) {
712 			dsl_dataset_rele(ds, dmu_recv_tag);
713 			return (EINVAL);
714 		}
715 
716 		/* must not have an incremental recv already in progress */
717 		if (!mutex_tryenter(&ds->ds_recvlock)) {
718 			dsl_dataset_rele(ds, dmu_recv_tag);
719 			return (EBUSY);
720 		}
721 
722 		/* tmp clone name is: tofs/%tosnap" */
723 		(void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
724 		    "%%%s", tosnap);
725 		rbsa.force = force;
726 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
727 		    recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
728 		if (err) {
729 			mutex_exit(&ds->ds_recvlock);
730 			dsl_dataset_rele(ds, dmu_recv_tag);
731 			return (err);
732 		}
733 		drc->drc_logical_ds = ds;
734 		drc->drc_real_ds = rbsa.ds;
735 	} else if (err == ENOENT) {
736 		/* target fs does not exist; must be a full backup or clone */
737 		char *cp;
738 
739 		/*
740 		 * If it's a non-clone incremental, we are missing the
741 		 * target fs, so fail the recv.
742 		 */
743 		if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
744 			return (ENOENT);
745 
746 		/* Open the parent of tofs */
747 		cp = strrchr(tofs, '/');
748 		*cp = '\0';
749 		err = dsl_dataset_hold(tofs, FTAG, &ds);
750 		*cp = '/';
751 		if (err)
752 			return (err);
753 
754 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
755 		    recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
756 		dsl_dataset_rele(ds, FTAG);
757 		if (err)
758 			return (err);
759 		drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
760 		drc->drc_newfs = B_TRUE;
761 	}
762 
763 	return (err);
764 }
765 
766 struct restorearg {
767 	int err;
768 	int byteswap;
769 	vnode_t *vp;
770 	char *buf;
771 	uint64_t voff;
772 	int bufsize; /* amount of memory allocated for buf */
773 	zio_cksum_t cksum;
774 	avl_tree_t guid_to_ds_map;
775 };
776 
777 typedef struct guid_map_entry {
778 	uint64_t	guid;
779 	dsl_dataset_t	*gme_ds;
780 	avl_node_t	avlnode;
781 } guid_map_entry_t;
782 
783 static int
784 guid_compare(const void *arg1, const void *arg2)
785 {
786 	const guid_map_entry_t *gmep1 = arg1;
787 	const guid_map_entry_t *gmep2 = arg2;
788 
789 	if (gmep1->guid < gmep2->guid)
790 		return (-1);
791 	else if (gmep1->guid > gmep2->guid)
792 		return (1);
793 	return (0);
794 }
795 
796 /*
797  * This function is a callback used by dmu_objset_find() (which
798  * enumerates the object sets) to build an avl tree that maps guids
799  * to datasets.  The resulting table is used when processing DRR_WRITE_BYREF
800  * send stream records.  These records, which are used in dedup'ed
801  * streams, do not contain data themselves, but refer to a copy
802  * of the data block that has already been written because it was
803  * earlier in the stream.  That previous copy is identified by the
804  * guid of the dataset with the referenced data.
805  */
806 int
807 find_ds_by_guid(const char *name, void *arg)
808 {
809 	avl_tree_t *guid_map = arg;
810 	dsl_dataset_t *ds, *snapds;
811 	guid_map_entry_t *gmep;
812 	dsl_pool_t *dp;
813 	int err;
814 	uint64_t lastobj, firstobj;
815 
816 	if (dsl_dataset_hold(name, FTAG, &ds) != 0)
817 		return (0);
818 
819 	dp = ds->ds_dir->dd_pool;
820 	rw_enter(&dp->dp_config_rwlock, RW_READER);
821 	firstobj = ds->ds_dir->dd_phys->dd_origin_obj;
822 	lastobj = ds->ds_phys->ds_prev_snap_obj;
823 
824 	while (lastobj != firstobj) {
825 		err = dsl_dataset_hold_obj(dp, lastobj, guid_map, &snapds);
826 		if (err) {
827 			/*
828 			 * Skip this snapshot and move on. It's not
829 			 * clear why this would ever happen, but the
830 			 * remainder of the snapshot streadm can be
831 			 * processed.
832 			 */
833 			rw_exit(&dp->dp_config_rwlock);
834 			dsl_dataset_rele(ds, FTAG);
835 			return (0);
836 		}
837 
838 		gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
839 		gmep->guid = snapds->ds_phys->ds_guid;
840 		gmep->gme_ds = snapds;
841 		avl_add(guid_map, gmep);
842 		lastobj = snapds->ds_phys->ds_prev_snap_obj;
843 	}
844 
845 	rw_exit(&dp->dp_config_rwlock);
846 	dsl_dataset_rele(ds, FTAG);
847 
848 	return (0);
849 }
850 
851 static void *
852 restore_read(struct restorearg *ra, int len)
853 {
854 	void *rv;
855 	int done = 0;
856 
857 	/* some things will require 8-byte alignment, so everything must */
858 	ASSERT3U(len % 8, ==, 0);
859 
860 	while (done < len) {
861 		ssize_t resid;
862 
863 		ra->err = vn_rdwr(UIO_READ, ra->vp,
864 		    (caddr_t)ra->buf + done, len - done,
865 		    ra->voff, UIO_SYSSPACE, FAPPEND,
866 		    RLIM64_INFINITY, CRED(), &resid);
867 
868 		if (resid == len - done)
869 			ra->err = EINVAL;
870 		ra->voff += len - done - resid;
871 		done = len - resid;
872 		if (ra->err)
873 			return (NULL);
874 	}
875 
876 	ASSERT3U(done, ==, len);
877 	rv = ra->buf;
878 	if (ra->byteswap)
879 		fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
880 	else
881 		fletcher_4_incremental_native(rv, len, &ra->cksum);
882 	return (rv);
883 }
884 
885 static void
886 backup_byteswap(dmu_replay_record_t *drr)
887 {
888 #define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
889 #define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
890 	drr->drr_type = BSWAP_32(drr->drr_type);
891 	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
892 	switch (drr->drr_type) {
893 	case DRR_BEGIN:
894 		DO64(drr_begin.drr_magic);
895 		DO64(drr_begin.drr_versioninfo);
896 		DO64(drr_begin.drr_creation_time);
897 		DO32(drr_begin.drr_type);
898 		DO32(drr_begin.drr_flags);
899 		DO64(drr_begin.drr_toguid);
900 		DO64(drr_begin.drr_fromguid);
901 		break;
902 	case DRR_OBJECT:
903 		DO64(drr_object.drr_object);
904 		/* DO64(drr_object.drr_allocation_txg); */
905 		DO32(drr_object.drr_type);
906 		DO32(drr_object.drr_bonustype);
907 		DO32(drr_object.drr_blksz);
908 		DO32(drr_object.drr_bonuslen);
909 		DO64(drr_object.drr_toguid);
910 		break;
911 	case DRR_FREEOBJECTS:
912 		DO64(drr_freeobjects.drr_firstobj);
913 		DO64(drr_freeobjects.drr_numobjs);
914 		DO64(drr_freeobjects.drr_toguid);
915 		break;
916 	case DRR_WRITE:
917 		DO64(drr_write.drr_object);
918 		DO32(drr_write.drr_type);
919 		DO64(drr_write.drr_offset);
920 		DO64(drr_write.drr_length);
921 		DO64(drr_write.drr_toguid);
922 		DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
923 		DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
924 		DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
925 		DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
926 		DO64(drr_write.drr_key.ddk_prop);
927 		break;
928 	case DRR_WRITE_BYREF:
929 		DO64(drr_write_byref.drr_object);
930 		DO64(drr_write_byref.drr_offset);
931 		DO64(drr_write_byref.drr_length);
932 		DO64(drr_write_byref.drr_toguid);
933 		DO64(drr_write_byref.drr_refguid);
934 		DO64(drr_write_byref.drr_refobject);
935 		DO64(drr_write_byref.drr_refoffset);
936 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
937 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
938 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
939 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
940 		DO64(drr_write_byref.drr_key.ddk_prop);
941 		break;
942 	case DRR_FREE:
943 		DO64(drr_free.drr_object);
944 		DO64(drr_free.drr_offset);
945 		DO64(drr_free.drr_length);
946 		DO64(drr_free.drr_toguid);
947 		break;
948 	case DRR_SPILL:
949 		DO64(drr_spill.drr_object);
950 		DO64(drr_spill.drr_length);
951 		DO64(drr_spill.drr_toguid);
952 		break;
953 	case DRR_END:
954 		DO64(drr_end.drr_checksum.zc_word[0]);
955 		DO64(drr_end.drr_checksum.zc_word[1]);
956 		DO64(drr_end.drr_checksum.zc_word[2]);
957 		DO64(drr_end.drr_checksum.zc_word[3]);
958 		DO64(drr_end.drr_toguid);
959 		break;
960 	}
961 #undef DO64
962 #undef DO32
963 }
964 
965 static int
966 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
967 {
968 	int err;
969 	dmu_tx_t *tx;
970 	void *data = NULL;
971 
972 	if (drro->drr_type == DMU_OT_NONE ||
973 	    drro->drr_type >= DMU_OT_NUMTYPES ||
974 	    drro->drr_bonustype >= DMU_OT_NUMTYPES ||
975 	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
976 	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
977 	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
978 	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
979 	    drro->drr_blksz > SPA_MAXBLOCKSIZE ||
980 	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
981 		return (EINVAL);
982 	}
983 
984 	err = dmu_object_info(os, drro->drr_object, NULL);
985 
986 	if (err != 0 && err != ENOENT)
987 		return (EINVAL);
988 
989 	if (drro->drr_bonuslen) {
990 		data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
991 		if (ra->err)
992 			return (ra->err);
993 	}
994 
995 	if (err == ENOENT) {
996 		/* currently free, want to be allocated */
997 		tx = dmu_tx_create(os);
998 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
999 		err = dmu_tx_assign(tx, TXG_WAIT);
1000 		if (err) {
1001 			dmu_tx_abort(tx);
1002 			return (err);
1003 		}
1004 		err = dmu_object_claim(os, drro->drr_object,
1005 		    drro->drr_type, drro->drr_blksz,
1006 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1007 		dmu_tx_commit(tx);
1008 	} else {
1009 		/* currently allocated, want to be allocated */
1010 		err = dmu_object_reclaim(os, drro->drr_object,
1011 		    drro->drr_type, drro->drr_blksz,
1012 		    drro->drr_bonustype, drro->drr_bonuslen);
1013 	}
1014 	if (err) {
1015 		return (EINVAL);
1016 	}
1017 
1018 	tx = dmu_tx_create(os);
1019 	dmu_tx_hold_bonus(tx, drro->drr_object);
1020 	err = dmu_tx_assign(tx, TXG_WAIT);
1021 	if (err) {
1022 		dmu_tx_abort(tx);
1023 		return (err);
1024 	}
1025 
1026 	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1027 	    tx);
1028 	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1029 
1030 	if (data != NULL) {
1031 		dmu_buf_t *db;
1032 
1033 		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1034 		dmu_buf_will_dirty(db, tx);
1035 
1036 		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1037 		bcopy(data, db->db_data, drro->drr_bonuslen);
1038 		if (ra->byteswap) {
1039 			dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data,
1040 			    drro->drr_bonuslen);
1041 		}
1042 		dmu_buf_rele(db, FTAG);
1043 	}
1044 	dmu_tx_commit(tx);
1045 	return (0);
1046 }
1047 
1048 /* ARGSUSED */
1049 static int
1050 restore_freeobjects(struct restorearg *ra, objset_t *os,
1051     struct drr_freeobjects *drrfo)
1052 {
1053 	uint64_t obj;
1054 
1055 	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1056 		return (EINVAL);
1057 
1058 	for (obj = drrfo->drr_firstobj;
1059 	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1060 	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
1061 		int err;
1062 
1063 		if (dmu_object_info(os, obj, NULL) != 0)
1064 			continue;
1065 
1066 		err = dmu_free_object(os, obj);
1067 		if (err)
1068 			return (err);
1069 	}
1070 	return (0);
1071 }
1072 
1073 static int
1074 restore_write(struct restorearg *ra, objset_t *os,
1075     struct drr_write *drrw)
1076 {
1077 	dmu_tx_t *tx;
1078 	void *data;
1079 	int err;
1080 
1081 	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1082 	    drrw->drr_type >= DMU_OT_NUMTYPES)
1083 		return (EINVAL);
1084 
1085 	data = restore_read(ra, drrw->drr_length);
1086 	if (data == NULL)
1087 		return (ra->err);
1088 
1089 	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1090 		return (EINVAL);
1091 
1092 	tx = dmu_tx_create(os);
1093 
1094 	dmu_tx_hold_write(tx, drrw->drr_object,
1095 	    drrw->drr_offset, drrw->drr_length);
1096 	err = dmu_tx_assign(tx, TXG_WAIT);
1097 	if (err) {
1098 		dmu_tx_abort(tx);
1099 		return (err);
1100 	}
1101 	if (ra->byteswap)
1102 		dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length);
1103 	dmu_write(os, drrw->drr_object,
1104 	    drrw->drr_offset, drrw->drr_length, data, tx);
1105 	dmu_tx_commit(tx);
1106 	return (0);
1107 }
1108 
1109 /*
1110  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1111  * streams to refer to a copy of the data that is already on the
1112  * system because it came in earlier in the stream.  This function
1113  * finds the earlier copy of the data, and uses that copy instead of
1114  * data from the stream to fulfill this write.
1115  */
1116 static int
1117 restore_write_byref(struct restorearg *ra, objset_t *os,
1118     struct drr_write_byref *drrwbr)
1119 {
1120 	dmu_tx_t *tx;
1121 	int err;
1122 	guid_map_entry_t gmesrch;
1123 	guid_map_entry_t *gmep;
1124 	avl_index_t	where;
1125 	objset_t *ref_os = NULL;
1126 	dmu_buf_t *dbp;
1127 
1128 	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1129 		return (EINVAL);
1130 
1131 	/*
1132 	 * If the GUID of the referenced dataset is different from the
1133 	 * GUID of the target dataset, find the referenced dataset.
1134 	 */
1135 	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1136 		gmesrch.guid = drrwbr->drr_refguid;
1137 		if ((gmep = avl_find(&ra->guid_to_ds_map, &gmesrch,
1138 		    &where)) == NULL) {
1139 			return (EINVAL);
1140 		}
1141 		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1142 			return (EINVAL);
1143 	} else {
1144 		ref_os = os;
1145 	}
1146 
1147 	if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1148 	    drrwbr->drr_refoffset, FTAG, &dbp))
1149 		return (err);
1150 
1151 	tx = dmu_tx_create(os);
1152 
1153 	dmu_tx_hold_write(tx, drrwbr->drr_object,
1154 	    drrwbr->drr_offset, drrwbr->drr_length);
1155 	err = dmu_tx_assign(tx, TXG_WAIT);
1156 	if (err) {
1157 		dmu_tx_abort(tx);
1158 		return (err);
1159 	}
1160 	dmu_write(os, drrwbr->drr_object,
1161 	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1162 	dmu_buf_rele(dbp, FTAG);
1163 	dmu_tx_commit(tx);
1164 	return (0);
1165 }
1166 
1167 static int
1168 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1169 {
1170 	dmu_tx_t *tx;
1171 	void *data;
1172 	dmu_buf_t *db, *db_spill;
1173 	int err;
1174 
1175 	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1176 	    drrs->drr_length > SPA_MAXBLOCKSIZE)
1177 		return (EINVAL);
1178 
1179 	data = restore_read(ra, drrs->drr_length);
1180 	if (data == NULL)
1181 		return (ra->err);
1182 
1183 	if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1184 		return (EINVAL);
1185 
1186 	VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1187 	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1188 		dmu_buf_rele(db, FTAG);
1189 		return (err);
1190 	}
1191 
1192 	tx = dmu_tx_create(os);
1193 
1194 	dmu_tx_hold_spill(tx, db->db_object);
1195 
1196 	err = dmu_tx_assign(tx, TXG_WAIT);
1197 	if (err) {
1198 		dmu_buf_rele(db, FTAG);
1199 		dmu_buf_rele(db_spill, FTAG);
1200 		dmu_tx_abort(tx);
1201 		return (err);
1202 	}
1203 	dmu_buf_will_dirty(db_spill, tx);
1204 
1205 	if (db_spill->db_size < drrs->drr_length)
1206 		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1207 		    drrs->drr_length, tx));
1208 	bcopy(data, db_spill->db_data, drrs->drr_length);
1209 
1210 	dmu_buf_rele(db, FTAG);
1211 	dmu_buf_rele(db_spill, FTAG);
1212 
1213 	dmu_tx_commit(tx);
1214 	return (0);
1215 }
1216 
1217 /* ARGSUSED */
1218 static int
1219 restore_free(struct restorearg *ra, objset_t *os,
1220     struct drr_free *drrf)
1221 {
1222 	int err;
1223 
1224 	if (drrf->drr_length != -1ULL &&
1225 	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1226 		return (EINVAL);
1227 
1228 	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1229 		return (EINVAL);
1230 
1231 	err = dmu_free_long_range(os, drrf->drr_object,
1232 	    drrf->drr_offset, drrf->drr_length);
1233 	return (err);
1234 }
1235 
1236 /*
1237  * NB: callers *must* call dmu_recv_end() if this succeeds.
1238  */
1239 int
1240 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp)
1241 {
1242 	struct restorearg ra = { 0 };
1243 	dmu_replay_record_t *drr;
1244 	objset_t *os;
1245 	zio_cksum_t pcksum;
1246 	guid_map_entry_t *gmep;
1247 	int featureflags;
1248 
1249 	if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1250 		ra.byteswap = TRUE;
1251 
1252 	{
1253 		/* compute checksum of drr_begin record */
1254 		dmu_replay_record_t *drr;
1255 		drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1256 
1257 		drr->drr_type = DRR_BEGIN;
1258 		drr->drr_u.drr_begin = *drc->drc_drrb;
1259 		if (ra.byteswap) {
1260 			fletcher_4_incremental_byteswap(drr,
1261 			    sizeof (dmu_replay_record_t), &ra.cksum);
1262 		} else {
1263 			fletcher_4_incremental_native(drr,
1264 			    sizeof (dmu_replay_record_t), &ra.cksum);
1265 		}
1266 		kmem_free(drr, sizeof (dmu_replay_record_t));
1267 	}
1268 
1269 	if (ra.byteswap) {
1270 		struct drr_begin *drrb = drc->drc_drrb;
1271 		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1272 		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1273 		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1274 		drrb->drr_type = BSWAP_32(drrb->drr_type);
1275 		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1276 		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1277 	}
1278 
1279 	ra.vp = vp;
1280 	ra.voff = *voffp;
1281 	ra.bufsize = 1<<20;
1282 	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1283 
1284 	/* these were verified in dmu_recv_begin */
1285 	ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
1286 	    DMU_SUBSTREAM);
1287 	ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
1288 
1289 	/*
1290 	 * Open the objset we are modifying.
1291 	 */
1292 	VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
1293 
1294 	ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1295 
1296 	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1297 
1298 	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
1299 	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1300 		avl_create(&ra.guid_to_ds_map, guid_compare,
1301 		    sizeof (guid_map_entry_t),
1302 		    offsetof(guid_map_entry_t, avlnode));
1303 		(void) dmu_objset_find(drc->drc_top_ds, find_ds_by_guid,
1304 		    (void *)&ra.guid_to_ds_map,
1305 		    DS_FIND_CHILDREN);
1306 	}
1307 
1308 	/*
1309 	 * Read records and process them.
1310 	 */
1311 	pcksum = ra.cksum;
1312 	while (ra.err == 0 &&
1313 	    NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1314 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
1315 			ra.err = EINTR;
1316 			goto out;
1317 		}
1318 
1319 		if (ra.byteswap)
1320 			backup_byteswap(drr);
1321 
1322 		switch (drr->drr_type) {
1323 		case DRR_OBJECT:
1324 		{
1325 			/*
1326 			 * We need to make a copy of the record header,
1327 			 * because restore_{object,write} may need to
1328 			 * restore_read(), which will invalidate drr.
1329 			 */
1330 			struct drr_object drro = drr->drr_u.drr_object;
1331 			ra.err = restore_object(&ra, os, &drro);
1332 			break;
1333 		}
1334 		case DRR_FREEOBJECTS:
1335 		{
1336 			struct drr_freeobjects drrfo =
1337 			    drr->drr_u.drr_freeobjects;
1338 			ra.err = restore_freeobjects(&ra, os, &drrfo);
1339 			break;
1340 		}
1341 		case DRR_WRITE:
1342 		{
1343 			struct drr_write drrw = drr->drr_u.drr_write;
1344 			ra.err = restore_write(&ra, os, &drrw);
1345 			break;
1346 		}
1347 		case DRR_WRITE_BYREF:
1348 		{
1349 			struct drr_write_byref drrwbr =
1350 			    drr->drr_u.drr_write_byref;
1351 			ra.err = restore_write_byref(&ra, os, &drrwbr);
1352 			break;
1353 		}
1354 		case DRR_FREE:
1355 		{
1356 			struct drr_free drrf = drr->drr_u.drr_free;
1357 			ra.err = restore_free(&ra, os, &drrf);
1358 			break;
1359 		}
1360 		case DRR_END:
1361 		{
1362 			struct drr_end drre = drr->drr_u.drr_end;
1363 			/*
1364 			 * We compare against the *previous* checksum
1365 			 * value, because the stored checksum is of
1366 			 * everything before the DRR_END record.
1367 			 */
1368 			if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1369 				ra.err = ECKSUM;
1370 			goto out;
1371 		}
1372 		case DRR_SPILL:
1373 		{
1374 			struct drr_spill drrs = drr->drr_u.drr_spill;
1375 			ra.err = restore_spill(&ra, os, &drrs);
1376 			break;
1377 		}
1378 		default:
1379 			ra.err = EINVAL;
1380 			goto out;
1381 		}
1382 		pcksum = ra.cksum;
1383 	}
1384 	ASSERT(ra.err != 0);
1385 
1386 out:
1387 	if (ra.err != 0) {
1388 		/*
1389 		 * destroy what we created, so we don't leave it in the
1390 		 * inconsistent restoring state.
1391 		 */
1392 		txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1393 
1394 		(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1395 		    B_FALSE);
1396 		if (drc->drc_real_ds != drc->drc_logical_ds) {
1397 			mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1398 			dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1399 		}
1400 	}
1401 
1402 	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1403 		void *cookie = NULL;
1404 
1405 		while (gmep = avl_destroy_nodes(&ra.guid_to_ds_map, &cookie)) {
1406 			dsl_dataset_rele(gmep->gme_ds, &ra.guid_to_ds_map);
1407 			kmem_free(gmep, sizeof (guid_map_entry_t));
1408 		}
1409 		avl_destroy(&ra.guid_to_ds_map);
1410 	}
1411 
1412 	kmem_free(ra.buf, ra.bufsize);
1413 	*voffp = ra.voff;
1414 	return (ra.err);
1415 }
1416 
1417 struct recvendsyncarg {
1418 	char *tosnap;
1419 	uint64_t creation_time;
1420 	uint64_t toguid;
1421 };
1422 
1423 static int
1424 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1425 {
1426 	dsl_dataset_t *ds = arg1;
1427 	struct recvendsyncarg *resa = arg2;
1428 
1429 	return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1430 }
1431 
1432 static void
1433 recv_end_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1434 {
1435 	dsl_dataset_t *ds = arg1;
1436 	struct recvendsyncarg *resa = arg2;
1437 
1438 	dsl_dataset_snapshot_sync(ds, resa->tosnap, cr, tx);
1439 
1440 	/* set snapshot's creation time and guid */
1441 	dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1442 	ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1443 	ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1444 	ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1445 
1446 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1447 	ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1448 }
1449 
1450 static int
1451 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1452 {
1453 	struct recvendsyncarg resa;
1454 	dsl_dataset_t *ds = drc->drc_logical_ds;
1455 	int err;
1456 
1457 	/*
1458 	 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1459 	 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1460 	 * can close it.
1461 	 */
1462 	txg_wait_synced(ds->ds_dir->dd_pool, 0);
1463 
1464 	if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1465 		err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1466 		    drc->drc_force);
1467 		if (err)
1468 			goto out;
1469 	} else {
1470 		mutex_exit(&ds->ds_recvlock);
1471 		dsl_dataset_rele(ds, dmu_recv_tag);
1472 		(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1473 		    B_FALSE);
1474 		return (EBUSY);
1475 	}
1476 
1477 	resa.creation_time = drc->drc_drrb->drr_creation_time;
1478 	resa.toguid = drc->drc_drrb->drr_toguid;
1479 	resa.tosnap = drc->drc_tosnap;
1480 
1481 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1482 	    recv_end_check, recv_end_sync, ds, &resa, 3);
1483 	if (err) {
1484 		/* swap back */
1485 		(void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1486 	}
1487 
1488 out:
1489 	mutex_exit(&ds->ds_recvlock);
1490 	dsl_dataset_disown(ds, dmu_recv_tag);
1491 	(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1492 	return (err);
1493 }
1494 
1495 static int
1496 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1497 {
1498 	struct recvendsyncarg resa;
1499 	dsl_dataset_t *ds = drc->drc_logical_ds;
1500 	int err;
1501 
1502 	/*
1503 	 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1504 	 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1505 	 * can close it.
1506 	 */
1507 	txg_wait_synced(ds->ds_dir->dd_pool, 0);
1508 
1509 	resa.creation_time = drc->drc_drrb->drr_creation_time;
1510 	resa.toguid = drc->drc_drrb->drr_toguid;
1511 	resa.tosnap = drc->drc_tosnap;
1512 
1513 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1514 	    recv_end_check, recv_end_sync, ds, &resa, 3);
1515 	if (err) {
1516 		/* clean up the fs we just recv'd into */
1517 		(void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1518 	} else {
1519 		/* release the hold from dmu_recv_begin */
1520 		dsl_dataset_disown(ds, dmu_recv_tag);
1521 	}
1522 	return (err);
1523 }
1524 
1525 int
1526 dmu_recv_end(dmu_recv_cookie_t *drc)
1527 {
1528 	if (drc->drc_logical_ds != drc->drc_real_ds)
1529 		return (dmu_recv_existing_end(drc));
1530 	else
1531 		return (dmu_recv_new_end(drc));
1532 }
1533