xref: /titanic_51/usr/src/uts/common/fs/zfs/dmu_send.c (revision 6d0f2021fe1487a6c87087b48f3f0924f81b9859)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26  */
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dbuf.h>
32 #include <sys/dnode.h>
33 #include <sys/zfs_context.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49 
50 static char *dmu_recv_tag = "dmu_recv_tag";
51 
52 /*
53  * The list of data whose inclusion in a send stream can be pending from
54  * one call to backup_cb to another.  Multiple calls to dump_free() and
55  * dump_freeobjects() can be aggregated into a single DRR_FREE or
56  * DRR_FREEOBJECTS replay record.
57  */
58 typedef enum {
59 	PENDING_NONE,
60 	PENDING_FREE,
61 	PENDING_FREEOBJECTS
62 } pendop_t;
63 
64 struct backuparg {
65 	dmu_replay_record_t *drr;
66 	vnode_t *vp;
67 	offset_t *off;
68 	objset_t *os;
69 	zio_cksum_t zc;
70 	uint64_t toguid;
71 	int err;
72 	pendop_t pending_op;
73 };
74 
75 static int
76 dump_bytes(struct backuparg *ba, void *buf, int len)
77 {
78 	ssize_t resid; /* have to get resid to get detailed errno */
79 	ASSERT3U(len % 8, ==, 0);
80 
81 	fletcher_4_incremental_native(buf, len, &ba->zc);
82 	ba->err = vn_rdwr(UIO_WRITE, ba->vp,
83 	    (caddr_t)buf, len,
84 	    0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
85 	*ba->off += len;
86 	return (ba->err);
87 }
88 
89 static int
90 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset,
91     uint64_t length)
92 {
93 	struct drr_free *drrf = &(ba->drr->drr_u.drr_free);
94 
95 	/*
96 	 * If there is a pending op, but it's not PENDING_FREE, push it out,
97 	 * since free block aggregation can only be done for blocks of the
98 	 * same type (i.e., DRR_FREE records can only be aggregated with
99 	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
100 	 * aggregated with other DRR_FREEOBJECTS records.
101 	 */
102 	if (ba->pending_op != PENDING_NONE && ba->pending_op != PENDING_FREE) {
103 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
104 			return (EINTR);
105 		ba->pending_op = PENDING_NONE;
106 	}
107 
108 	if (ba->pending_op == PENDING_FREE) {
109 		/*
110 		 * There should never be a PENDING_FREE if length is -1
111 		 * (because dump_dnode is the only place where this
112 		 * function is called with a -1, and only after flushing
113 		 * any pending record).
114 		 */
115 		ASSERT(length != -1ULL);
116 		/*
117 		 * Check to see whether this free block can be aggregated
118 		 * with pending one.
119 		 */
120 		if (drrf->drr_object == object && drrf->drr_offset +
121 		    drrf->drr_length == offset) {
122 			drrf->drr_length += length;
123 			return (0);
124 		} else {
125 			/* not a continuation.  Push out pending record */
126 			if (dump_bytes(ba, ba->drr,
127 			    sizeof (dmu_replay_record_t)) != 0)
128 				return (EINTR);
129 			ba->pending_op = PENDING_NONE;
130 		}
131 	}
132 	/* create a FREE record and make it pending */
133 	bzero(ba->drr, sizeof (dmu_replay_record_t));
134 	ba->drr->drr_type = DRR_FREE;
135 	drrf->drr_object = object;
136 	drrf->drr_offset = offset;
137 	drrf->drr_length = length;
138 	drrf->drr_toguid = ba->toguid;
139 	if (length == -1ULL) {
140 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
141 			return (EINTR);
142 	} else {
143 		ba->pending_op = PENDING_FREE;
144 	}
145 
146 	return (0);
147 }
148 
149 static int
150 dump_data(struct backuparg *ba, dmu_object_type_t type,
151     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
152 {
153 	struct drr_write *drrw = &(ba->drr->drr_u.drr_write);
154 
155 
156 	/*
157 	 * If there is any kind of pending aggregation (currently either
158 	 * a grouping of free objects or free blocks), push it out to
159 	 * the stream, since aggregation can't be done across operations
160 	 * of different types.
161 	 */
162 	if (ba->pending_op != PENDING_NONE) {
163 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
164 			return (EINTR);
165 		ba->pending_op = PENDING_NONE;
166 	}
167 	/* write a DATA record */
168 	bzero(ba->drr, sizeof (dmu_replay_record_t));
169 	ba->drr->drr_type = DRR_WRITE;
170 	drrw->drr_object = object;
171 	drrw->drr_type = type;
172 	drrw->drr_offset = offset;
173 	drrw->drr_length = blksz;
174 	drrw->drr_toguid = ba->toguid;
175 	drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
176 	if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
177 		drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
178 	DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
179 	DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
180 	DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
181 	drrw->drr_key.ddk_cksum = bp->blk_cksum;
182 
183 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
184 		return (EINTR);
185 	if (dump_bytes(ba, data, blksz) != 0)
186 		return (EINTR);
187 	return (0);
188 }
189 
190 static int
191 dump_spill(struct backuparg *ba, uint64_t object, int blksz, void *data)
192 {
193 	struct drr_spill *drrs = &(ba->drr->drr_u.drr_spill);
194 
195 	if (ba->pending_op != PENDING_NONE) {
196 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
197 			return (EINTR);
198 		ba->pending_op = PENDING_NONE;
199 	}
200 
201 	/* write a SPILL record */
202 	bzero(ba->drr, sizeof (dmu_replay_record_t));
203 	ba->drr->drr_type = DRR_SPILL;
204 	drrs->drr_object = object;
205 	drrs->drr_length = blksz;
206 	drrs->drr_toguid = ba->toguid;
207 
208 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
209 		return (EINTR);
210 	if (dump_bytes(ba, data, blksz))
211 		return (EINTR);
212 	return (0);
213 }
214 
215 static int
216 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs)
217 {
218 	struct drr_freeobjects *drrfo = &(ba->drr->drr_u.drr_freeobjects);
219 
220 	/*
221 	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
222 	 * push it out, since free block aggregation can only be done for
223 	 * blocks of the same type (i.e., DRR_FREE records can only be
224 	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
225 	 * can only be aggregated with other DRR_FREEOBJECTS records.
226 	 */
227 	if (ba->pending_op != PENDING_NONE &&
228 	    ba->pending_op != PENDING_FREEOBJECTS) {
229 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
230 			return (EINTR);
231 		ba->pending_op = PENDING_NONE;
232 	}
233 	if (ba->pending_op == PENDING_FREEOBJECTS) {
234 		/*
235 		 * See whether this free object array can be aggregated
236 		 * with pending one
237 		 */
238 		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
239 			drrfo->drr_numobjs += numobjs;
240 			return (0);
241 		} else {
242 			/* can't be aggregated.  Push out pending record */
243 			if (dump_bytes(ba, ba->drr,
244 			    sizeof (dmu_replay_record_t)) != 0)
245 				return (EINTR);
246 			ba->pending_op = PENDING_NONE;
247 		}
248 	}
249 
250 	/* write a FREEOBJECTS record */
251 	bzero(ba->drr, sizeof (dmu_replay_record_t));
252 	ba->drr->drr_type = DRR_FREEOBJECTS;
253 	drrfo->drr_firstobj = firstobj;
254 	drrfo->drr_numobjs = numobjs;
255 	drrfo->drr_toguid = ba->toguid;
256 
257 	ba->pending_op = PENDING_FREEOBJECTS;
258 
259 	return (0);
260 }
261 
262 static int
263 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp)
264 {
265 	struct drr_object *drro = &(ba->drr->drr_u.drr_object);
266 
267 	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
268 		return (dump_freeobjects(ba, object, 1));
269 
270 	if (ba->pending_op != PENDING_NONE) {
271 		if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
272 			return (EINTR);
273 		ba->pending_op = PENDING_NONE;
274 	}
275 
276 	/* write an OBJECT record */
277 	bzero(ba->drr, sizeof (dmu_replay_record_t));
278 	ba->drr->drr_type = DRR_OBJECT;
279 	drro->drr_object = object;
280 	drro->drr_type = dnp->dn_type;
281 	drro->drr_bonustype = dnp->dn_bonustype;
282 	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
283 	drro->drr_bonuslen = dnp->dn_bonuslen;
284 	drro->drr_checksumtype = dnp->dn_checksum;
285 	drro->drr_compress = dnp->dn_compress;
286 	drro->drr_toguid = ba->toguid;
287 
288 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
289 		return (EINTR);
290 
291 	if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
292 		return (EINTR);
293 
294 	/* free anything past the end of the file */
295 	if (dump_free(ba, object, (dnp->dn_maxblkid + 1) *
296 	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
297 		return (EINTR);
298 	if (ba->err)
299 		return (EINTR);
300 	return (0);
301 }
302 
303 #define	BP_SPAN(dnp, level) \
304 	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
305 	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
306 
307 /* ARGSUSED */
308 static int
309 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
310     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
311 {
312 	struct backuparg *ba = arg;
313 	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
314 	int err = 0;
315 
316 	if (issig(JUSTLOOKING) && issig(FORREAL))
317 		return (EINTR);
318 
319 	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
320 	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
321 		return (0);
322 	} else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
323 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
324 		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
325 		err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT);
326 	} else if (bp == NULL) {
327 		uint64_t span = BP_SPAN(dnp, zb->zb_level);
328 		err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span);
329 	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
330 		return (0);
331 	} else if (type == DMU_OT_DNODE) {
332 		dnode_phys_t *blk;
333 		int i;
334 		int blksz = BP_GET_LSIZE(bp);
335 		uint32_t aflags = ARC_WAIT;
336 		arc_buf_t *abuf;
337 
338 		if (dsl_read(NULL, spa, bp, pbuf,
339 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
340 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
341 			return (EIO);
342 
343 		blk = abuf->b_data;
344 		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
345 			uint64_t dnobj = (zb->zb_blkid <<
346 			    (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
347 			err = dump_dnode(ba, dnobj, blk+i);
348 			if (err)
349 				break;
350 		}
351 		(void) arc_buf_remove_ref(abuf, &abuf);
352 	} else if (type == DMU_OT_SA) {
353 		uint32_t aflags = ARC_WAIT;
354 		arc_buf_t *abuf;
355 		int blksz = BP_GET_LSIZE(bp);
356 
357 		if (arc_read_nolock(NULL, spa, bp,
358 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
359 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
360 			return (EIO);
361 
362 		err = dump_spill(ba, zb->zb_object, blksz, abuf->b_data);
363 		(void) arc_buf_remove_ref(abuf, &abuf);
364 	} else { /* it's a level-0 block of a regular object */
365 		uint32_t aflags = ARC_WAIT;
366 		arc_buf_t *abuf;
367 		int blksz = BP_GET_LSIZE(bp);
368 
369 		if (dsl_read(NULL, spa, bp, pbuf,
370 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
371 		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
372 			return (EIO);
373 
374 		err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz,
375 		    blksz, bp, abuf->b_data);
376 		(void) arc_buf_remove_ref(abuf, &abuf);
377 	}
378 
379 	ASSERT(err == 0 || err == EINTR);
380 	return (err);
381 }
382 
383 int
384 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
385     vnode_t *vp, offset_t *off)
386 {
387 	dsl_dataset_t *ds = tosnap->os_dsl_dataset;
388 	dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
389 	dmu_replay_record_t *drr;
390 	struct backuparg ba;
391 	int err;
392 	uint64_t fromtxg = 0;
393 
394 	/* tosnap must be a snapshot */
395 	if (ds->ds_phys->ds_next_snap_obj == 0)
396 		return (EINVAL);
397 
398 	/* fromsnap must be an earlier snapshot from the same fs as tosnap */
399 	if (fromds && (ds->ds_dir != fromds->ds_dir ||
400 	    fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
401 		return (EXDEV);
402 
403 	if (fromorigin) {
404 		dsl_pool_t *dp = ds->ds_dir->dd_pool;
405 
406 		if (fromsnap)
407 			return (EINVAL);
408 
409 		if (dsl_dir_is_clone(ds->ds_dir)) {
410 			rw_enter(&dp->dp_config_rwlock, RW_READER);
411 			err = dsl_dataset_hold_obj(dp,
412 			    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
413 			rw_exit(&dp->dp_config_rwlock);
414 			if (err)
415 				return (err);
416 		} else {
417 			fromorigin = B_FALSE;
418 		}
419 	}
420 
421 
422 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
423 	drr->drr_type = DRR_BEGIN;
424 	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
425 	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
426 	    DMU_SUBSTREAM);
427 
428 #ifdef _KERNEL
429 	if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
430 		uint64_t version;
431 		if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0)
432 			return (EINVAL);
433 		if (version == ZPL_VERSION_SA) {
434 			DMU_SET_FEATUREFLAGS(
435 			    drr->drr_u.drr_begin.drr_versioninfo,
436 			    DMU_BACKUP_FEATURE_SA_SPILL);
437 		}
438 	}
439 #endif
440 
441 	drr->drr_u.drr_begin.drr_creation_time =
442 	    ds->ds_phys->ds_creation_time;
443 	drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
444 	if (fromorigin)
445 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
446 	drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
447 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
448 		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
449 
450 	if (fromds)
451 		drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
452 	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
453 
454 	if (fromds)
455 		fromtxg = fromds->ds_phys->ds_creation_txg;
456 	if (fromorigin)
457 		dsl_dataset_rele(fromds, FTAG);
458 
459 	ba.drr = drr;
460 	ba.vp = vp;
461 	ba.os = tosnap;
462 	ba.off = off;
463 	ba.toguid = ds->ds_phys->ds_guid;
464 	ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0);
465 	ba.pending_op = PENDING_NONE;
466 
467 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
468 		kmem_free(drr, sizeof (dmu_replay_record_t));
469 		return (ba.err);
470 	}
471 
472 	err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
473 	    backup_cb, &ba);
474 
475 	if (ba.pending_op != PENDING_NONE)
476 		if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0)
477 			err = EINTR;
478 
479 	if (err) {
480 		if (err == EINTR && ba.err)
481 			err = ba.err;
482 		kmem_free(drr, sizeof (dmu_replay_record_t));
483 		return (err);
484 	}
485 
486 	bzero(drr, sizeof (dmu_replay_record_t));
487 	drr->drr_type = DRR_END;
488 	drr->drr_u.drr_end.drr_checksum = ba.zc;
489 	drr->drr_u.drr_end.drr_toguid = ba.toguid;
490 
491 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
492 		kmem_free(drr, sizeof (dmu_replay_record_t));
493 		return (ba.err);
494 	}
495 
496 	kmem_free(drr, sizeof (dmu_replay_record_t));
497 
498 	return (0);
499 }
500 
501 struct recvbeginsyncarg {
502 	const char *tofs;
503 	const char *tosnap;
504 	dsl_dataset_t *origin;
505 	uint64_t fromguid;
506 	dmu_objset_type_t type;
507 	void *tag;
508 	boolean_t force;
509 	uint64_t dsflags;
510 	char clonelastname[MAXNAMELEN];
511 	dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
512 	cred_t *cr;
513 };
514 
515 /* ARGSUSED */
516 static int
517 recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
518 {
519 	dsl_dir_t *dd = arg1;
520 	struct recvbeginsyncarg *rbsa = arg2;
521 	objset_t *mos = dd->dd_pool->dp_meta_objset;
522 	uint64_t val;
523 	int err;
524 
525 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
526 	    strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
527 
528 	if (err != ENOENT)
529 		return (err ? err : EEXIST);
530 
531 	if (rbsa->origin) {
532 		/* make sure it's a snap in the same pool */
533 		if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
534 			return (EXDEV);
535 		if (!dsl_dataset_is_snapshot(rbsa->origin))
536 			return (EINVAL);
537 		if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
538 			return (ENODEV);
539 	}
540 
541 	return (0);
542 }
543 
544 static void
545 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
546 {
547 	dsl_dir_t *dd = arg1;
548 	struct recvbeginsyncarg *rbsa = arg2;
549 	uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
550 	uint64_t dsobj;
551 
552 	/* Create and open new dataset. */
553 	dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
554 	    rbsa->origin, flags, rbsa->cr, tx);
555 	VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
556 	    B_TRUE, dmu_recv_tag, &rbsa->ds));
557 
558 	if (rbsa->origin == NULL) {
559 		(void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
560 		    rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
561 	}
562 
563 	spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
564 	    dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
565 }
566 
567 /* ARGSUSED */
568 static int
569 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
570 {
571 	dsl_dataset_t *ds = arg1;
572 	struct recvbeginsyncarg *rbsa = arg2;
573 	int err;
574 	uint64_t val;
575 
576 	/* must not have any changes since most recent snapshot */
577 	if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
578 		return (ETXTBSY);
579 
580 	/* new snapshot name must not exist */
581 	err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
582 	    ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
583 	if (err == 0)
584 		return (EEXIST);
585 	if (err != ENOENT)
586 		return (err);
587 
588 	if (rbsa->fromguid) {
589 		/* if incremental, most recent snapshot must match fromguid */
590 		if (ds->ds_prev == NULL)
591 			return (ENODEV);
592 
593 		/*
594 		 * most recent snapshot must match fromguid, or there are no
595 		 * changes since the fromguid one
596 		 */
597 		if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
598 			uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
599 			uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
600 			while (obj != 0) {
601 				dsl_dataset_t *snap;
602 				err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
603 				    obj, FTAG, &snap);
604 				if (err)
605 					return (ENODEV);
606 				if (snap->ds_phys->ds_creation_txg < birth) {
607 					dsl_dataset_rele(snap, FTAG);
608 					return (ENODEV);
609 				}
610 				if (snap->ds_phys->ds_guid == rbsa->fromguid) {
611 					dsl_dataset_rele(snap, FTAG);
612 					break; /* it's ok */
613 				}
614 				obj = snap->ds_phys->ds_prev_snap_obj;
615 				dsl_dataset_rele(snap, FTAG);
616 			}
617 			if (obj == 0)
618 				return (ENODEV);
619 		}
620 	} else {
621 		/* if full, most recent snapshot must be $ORIGIN */
622 		if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
623 			return (ENODEV);
624 	}
625 
626 	/* temporary clone name must not exist */
627 	err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
628 	    ds->ds_dir->dd_phys->dd_child_dir_zapobj,
629 	    rbsa->clonelastname, 8, 1, &val);
630 	if (err == 0)
631 		return (EEXIST);
632 	if (err != ENOENT)
633 		return (err);
634 
635 	return (0);
636 }
637 
638 /* ARGSUSED */
639 static void
640 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
641 {
642 	dsl_dataset_t *ohds = arg1;
643 	struct recvbeginsyncarg *rbsa = arg2;
644 	dsl_pool_t *dp = ohds->ds_dir->dd_pool;
645 	dsl_dataset_t *cds;
646 	uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
647 	uint64_t dsobj;
648 
649 	/* create and open the temporary clone */
650 	dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
651 	    ohds->ds_prev, flags, rbsa->cr, tx);
652 	VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
653 
654 	/*
655 	 * If we actually created a non-clone, we need to create the
656 	 * objset in our new dataset.
657 	 */
658 	if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
659 		(void) dmu_objset_create_impl(dp->dp_spa,
660 		    cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
661 	}
662 
663 	rbsa->ds = cds;
664 
665 	spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
666 	    dp->dp_spa, tx, "dataset = %lld", dsobj);
667 }
668 
669 static boolean_t
670 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
671 {
672 	int featureflags;
673 
674 	featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
675 
676 	/* Verify pool version supports SA if SA_SPILL feature set */
677 	return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
678 	    (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
679 }
680 
681 /*
682  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
683  * succeeds; otherwise we will leak the holds on the datasets.
684  */
685 int
686 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
687     boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
688 {
689 	int err = 0;
690 	boolean_t byteswap;
691 	struct recvbeginsyncarg rbsa = { 0 };
692 	uint64_t versioninfo;
693 	int flags;
694 	dsl_dataset_t *ds;
695 
696 	if (drrb->drr_magic == DMU_BACKUP_MAGIC)
697 		byteswap = FALSE;
698 	else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
699 		byteswap = TRUE;
700 	else
701 		return (EINVAL);
702 
703 	rbsa.tofs = tofs;
704 	rbsa.tosnap = tosnap;
705 	rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
706 	rbsa.fromguid = drrb->drr_fromguid;
707 	rbsa.type = drrb->drr_type;
708 	rbsa.tag = FTAG;
709 	rbsa.dsflags = 0;
710 	rbsa.cr = CRED();
711 	versioninfo = drrb->drr_versioninfo;
712 	flags = drrb->drr_flags;
713 
714 	if (byteswap) {
715 		rbsa.type = BSWAP_32(rbsa.type);
716 		rbsa.fromguid = BSWAP_64(rbsa.fromguid);
717 		versioninfo = BSWAP_64(versioninfo);
718 		flags = BSWAP_32(flags);
719 	}
720 
721 	if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
722 	    rbsa.type >= DMU_OST_NUMTYPES ||
723 	    ((flags & DRR_FLAG_CLONE) && origin == NULL))
724 		return (EINVAL);
725 
726 	if (flags & DRR_FLAG_CI_DATA)
727 		rbsa.dsflags = DS_FLAG_CI_DATASET;
728 
729 	bzero(drc, sizeof (dmu_recv_cookie_t));
730 	drc->drc_drrb = drrb;
731 	drc->drc_tosnap = tosnap;
732 	drc->drc_top_ds = top_ds;
733 	drc->drc_force = force;
734 
735 	/*
736 	 * Process the begin in syncing context.
737 	 */
738 
739 	/* open the dataset we are logically receiving into */
740 	err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
741 	if (err == 0) {
742 		if (dmu_recv_verify_features(ds, drrb)) {
743 			dsl_dataset_rele(ds, dmu_recv_tag);
744 			return (ENOTSUP);
745 		}
746 		/* target fs already exists; recv into temp clone */
747 
748 		/* Can't recv a clone into an existing fs */
749 		if (flags & DRR_FLAG_CLONE) {
750 			dsl_dataset_rele(ds, dmu_recv_tag);
751 			return (EINVAL);
752 		}
753 
754 		/* must not have an incremental recv already in progress */
755 		if (!mutex_tryenter(&ds->ds_recvlock)) {
756 			dsl_dataset_rele(ds, dmu_recv_tag);
757 			return (EBUSY);
758 		}
759 
760 		/* tmp clone name is: tofs/%tosnap" */
761 		(void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
762 		    "%%%s", tosnap);
763 		rbsa.force = force;
764 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
765 		    recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
766 		if (err) {
767 			mutex_exit(&ds->ds_recvlock);
768 			dsl_dataset_rele(ds, dmu_recv_tag);
769 			return (err);
770 		}
771 		drc->drc_logical_ds = ds;
772 		drc->drc_real_ds = rbsa.ds;
773 	} else if (err == ENOENT) {
774 		/* target fs does not exist; must be a full backup or clone */
775 		char *cp;
776 
777 		/*
778 		 * If it's a non-clone incremental, we are missing the
779 		 * target fs, so fail the recv.
780 		 */
781 		if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
782 			return (ENOENT);
783 
784 		/* Open the parent of tofs */
785 		cp = strrchr(tofs, '/');
786 		*cp = '\0';
787 		err = dsl_dataset_hold(tofs, FTAG, &ds);
788 		*cp = '/';
789 		if (err)
790 			return (err);
791 
792 		if (dmu_recv_verify_features(ds, drrb)) {
793 			dsl_dataset_rele(ds, FTAG);
794 			return (ENOTSUP);
795 		}
796 
797 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
798 		    recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
799 		dsl_dataset_rele(ds, FTAG);
800 		if (err)
801 			return (err);
802 		drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
803 		drc->drc_newfs = B_TRUE;
804 	}
805 
806 	return (err);
807 }
808 
809 struct restorearg {
810 	int err;
811 	int byteswap;
812 	vnode_t *vp;
813 	char *buf;
814 	uint64_t voff;
815 	int bufsize; /* amount of memory allocated for buf */
816 	zio_cksum_t cksum;
817 	avl_tree_t *guid_to_ds_map;
818 };
819 
820 typedef struct guid_map_entry {
821 	uint64_t	guid;
822 	dsl_dataset_t	*gme_ds;
823 	avl_node_t	avlnode;
824 } guid_map_entry_t;
825 
826 static int
827 guid_compare(const void *arg1, const void *arg2)
828 {
829 	const guid_map_entry_t *gmep1 = arg1;
830 	const guid_map_entry_t *gmep2 = arg2;
831 
832 	if (gmep1->guid < gmep2->guid)
833 		return (-1);
834 	else if (gmep1->guid > gmep2->guid)
835 		return (1);
836 	return (0);
837 }
838 
839 static void
840 free_guid_map_onexit(void *arg)
841 {
842 	avl_tree_t *ca = arg;
843 	void *cookie = NULL;
844 	guid_map_entry_t *gmep;
845 
846 	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
847 		dsl_dataset_rele(gmep->gme_ds, ca);
848 		kmem_free(gmep, sizeof (guid_map_entry_t));
849 	}
850 	avl_destroy(ca);
851 	kmem_free(ca, sizeof (avl_tree_t));
852 }
853 
854 static void *
855 restore_read(struct restorearg *ra, int len)
856 {
857 	void *rv;
858 	int done = 0;
859 
860 	/* some things will require 8-byte alignment, so everything must */
861 	ASSERT3U(len % 8, ==, 0);
862 
863 	while (done < len) {
864 		ssize_t resid;
865 
866 		ra->err = vn_rdwr(UIO_READ, ra->vp,
867 		    (caddr_t)ra->buf + done, len - done,
868 		    ra->voff, UIO_SYSSPACE, FAPPEND,
869 		    RLIM64_INFINITY, CRED(), &resid);
870 
871 		if (resid == len - done)
872 			ra->err = EINVAL;
873 		ra->voff += len - done - resid;
874 		done = len - resid;
875 		if (ra->err)
876 			return (NULL);
877 	}
878 
879 	ASSERT3U(done, ==, len);
880 	rv = ra->buf;
881 	if (ra->byteswap)
882 		fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
883 	else
884 		fletcher_4_incremental_native(rv, len, &ra->cksum);
885 	return (rv);
886 }
887 
888 static void
889 backup_byteswap(dmu_replay_record_t *drr)
890 {
891 #define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
892 #define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
893 	drr->drr_type = BSWAP_32(drr->drr_type);
894 	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
895 	switch (drr->drr_type) {
896 	case DRR_BEGIN:
897 		DO64(drr_begin.drr_magic);
898 		DO64(drr_begin.drr_versioninfo);
899 		DO64(drr_begin.drr_creation_time);
900 		DO32(drr_begin.drr_type);
901 		DO32(drr_begin.drr_flags);
902 		DO64(drr_begin.drr_toguid);
903 		DO64(drr_begin.drr_fromguid);
904 		break;
905 	case DRR_OBJECT:
906 		DO64(drr_object.drr_object);
907 		/* DO64(drr_object.drr_allocation_txg); */
908 		DO32(drr_object.drr_type);
909 		DO32(drr_object.drr_bonustype);
910 		DO32(drr_object.drr_blksz);
911 		DO32(drr_object.drr_bonuslen);
912 		DO64(drr_object.drr_toguid);
913 		break;
914 	case DRR_FREEOBJECTS:
915 		DO64(drr_freeobjects.drr_firstobj);
916 		DO64(drr_freeobjects.drr_numobjs);
917 		DO64(drr_freeobjects.drr_toguid);
918 		break;
919 	case DRR_WRITE:
920 		DO64(drr_write.drr_object);
921 		DO32(drr_write.drr_type);
922 		DO64(drr_write.drr_offset);
923 		DO64(drr_write.drr_length);
924 		DO64(drr_write.drr_toguid);
925 		DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
926 		DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
927 		DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
928 		DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
929 		DO64(drr_write.drr_key.ddk_prop);
930 		break;
931 	case DRR_WRITE_BYREF:
932 		DO64(drr_write_byref.drr_object);
933 		DO64(drr_write_byref.drr_offset);
934 		DO64(drr_write_byref.drr_length);
935 		DO64(drr_write_byref.drr_toguid);
936 		DO64(drr_write_byref.drr_refguid);
937 		DO64(drr_write_byref.drr_refobject);
938 		DO64(drr_write_byref.drr_refoffset);
939 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
940 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
941 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
942 		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
943 		DO64(drr_write_byref.drr_key.ddk_prop);
944 		break;
945 	case DRR_FREE:
946 		DO64(drr_free.drr_object);
947 		DO64(drr_free.drr_offset);
948 		DO64(drr_free.drr_length);
949 		DO64(drr_free.drr_toguid);
950 		break;
951 	case DRR_SPILL:
952 		DO64(drr_spill.drr_object);
953 		DO64(drr_spill.drr_length);
954 		DO64(drr_spill.drr_toguid);
955 		break;
956 	case DRR_END:
957 		DO64(drr_end.drr_checksum.zc_word[0]);
958 		DO64(drr_end.drr_checksum.zc_word[1]);
959 		DO64(drr_end.drr_checksum.zc_word[2]);
960 		DO64(drr_end.drr_checksum.zc_word[3]);
961 		DO64(drr_end.drr_toguid);
962 		break;
963 	}
964 #undef DO64
965 #undef DO32
966 }
967 
968 static int
969 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
970 {
971 	int err;
972 	dmu_tx_t *tx;
973 	void *data = NULL;
974 
975 	if (drro->drr_type == DMU_OT_NONE ||
976 	    drro->drr_type >= DMU_OT_NUMTYPES ||
977 	    drro->drr_bonustype >= DMU_OT_NUMTYPES ||
978 	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
979 	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
980 	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
981 	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
982 	    drro->drr_blksz > SPA_MAXBLOCKSIZE ||
983 	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
984 		return (EINVAL);
985 	}
986 
987 	err = dmu_object_info(os, drro->drr_object, NULL);
988 
989 	if (err != 0 && err != ENOENT)
990 		return (EINVAL);
991 
992 	if (drro->drr_bonuslen) {
993 		data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
994 		if (ra->err)
995 			return (ra->err);
996 	}
997 
998 	if (err == ENOENT) {
999 		/* currently free, want to be allocated */
1000 		tx = dmu_tx_create(os);
1001 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1002 		err = dmu_tx_assign(tx, TXG_WAIT);
1003 		if (err) {
1004 			dmu_tx_abort(tx);
1005 			return (err);
1006 		}
1007 		err = dmu_object_claim(os, drro->drr_object,
1008 		    drro->drr_type, drro->drr_blksz,
1009 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1010 		dmu_tx_commit(tx);
1011 	} else {
1012 		/* currently allocated, want to be allocated */
1013 		err = dmu_object_reclaim(os, drro->drr_object,
1014 		    drro->drr_type, drro->drr_blksz,
1015 		    drro->drr_bonustype, drro->drr_bonuslen);
1016 	}
1017 	if (err) {
1018 		return (EINVAL);
1019 	}
1020 
1021 	tx = dmu_tx_create(os);
1022 	dmu_tx_hold_bonus(tx, drro->drr_object);
1023 	err = dmu_tx_assign(tx, TXG_WAIT);
1024 	if (err) {
1025 		dmu_tx_abort(tx);
1026 		return (err);
1027 	}
1028 
1029 	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1030 	    tx);
1031 	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1032 
1033 	if (data != NULL) {
1034 		dmu_buf_t *db;
1035 
1036 		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1037 		dmu_buf_will_dirty(db, tx);
1038 
1039 		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1040 		bcopy(data, db->db_data, drro->drr_bonuslen);
1041 		if (ra->byteswap) {
1042 			dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data,
1043 			    drro->drr_bonuslen);
1044 		}
1045 		dmu_buf_rele(db, FTAG);
1046 	}
1047 	dmu_tx_commit(tx);
1048 	return (0);
1049 }
1050 
1051 /* ARGSUSED */
1052 static int
1053 restore_freeobjects(struct restorearg *ra, objset_t *os,
1054     struct drr_freeobjects *drrfo)
1055 {
1056 	uint64_t obj;
1057 
1058 	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1059 		return (EINVAL);
1060 
1061 	for (obj = drrfo->drr_firstobj;
1062 	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1063 	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
1064 		int err;
1065 
1066 		if (dmu_object_info(os, obj, NULL) != 0)
1067 			continue;
1068 
1069 		err = dmu_free_object(os, obj);
1070 		if (err)
1071 			return (err);
1072 	}
1073 	return (0);
1074 }
1075 
1076 static int
1077 restore_write(struct restorearg *ra, objset_t *os,
1078     struct drr_write *drrw)
1079 {
1080 	dmu_tx_t *tx;
1081 	void *data;
1082 	int err;
1083 
1084 	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1085 	    drrw->drr_type >= DMU_OT_NUMTYPES)
1086 		return (EINVAL);
1087 
1088 	data = restore_read(ra, drrw->drr_length);
1089 	if (data == NULL)
1090 		return (ra->err);
1091 
1092 	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1093 		return (EINVAL);
1094 
1095 	tx = dmu_tx_create(os);
1096 
1097 	dmu_tx_hold_write(tx, drrw->drr_object,
1098 	    drrw->drr_offset, drrw->drr_length);
1099 	err = dmu_tx_assign(tx, TXG_WAIT);
1100 	if (err) {
1101 		dmu_tx_abort(tx);
1102 		return (err);
1103 	}
1104 	if (ra->byteswap)
1105 		dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length);
1106 	dmu_write(os, drrw->drr_object,
1107 	    drrw->drr_offset, drrw->drr_length, data, tx);
1108 	dmu_tx_commit(tx);
1109 	return (0);
1110 }
1111 
1112 /*
1113  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1114  * streams to refer to a copy of the data that is already on the
1115  * system because it came in earlier in the stream.  This function
1116  * finds the earlier copy of the data, and uses that copy instead of
1117  * data from the stream to fulfill this write.
1118  */
1119 static int
1120 restore_write_byref(struct restorearg *ra, objset_t *os,
1121     struct drr_write_byref *drrwbr)
1122 {
1123 	dmu_tx_t *tx;
1124 	int err;
1125 	guid_map_entry_t gmesrch;
1126 	guid_map_entry_t *gmep;
1127 	avl_index_t	where;
1128 	objset_t *ref_os = NULL;
1129 	dmu_buf_t *dbp;
1130 
1131 	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1132 		return (EINVAL);
1133 
1134 	/*
1135 	 * If the GUID of the referenced dataset is different from the
1136 	 * GUID of the target dataset, find the referenced dataset.
1137 	 */
1138 	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1139 		gmesrch.guid = drrwbr->drr_refguid;
1140 		if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1141 		    &where)) == NULL) {
1142 			return (EINVAL);
1143 		}
1144 		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1145 			return (EINVAL);
1146 	} else {
1147 		ref_os = os;
1148 	}
1149 
1150 	if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1151 	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1152 		return (err);
1153 
1154 	tx = dmu_tx_create(os);
1155 
1156 	dmu_tx_hold_write(tx, drrwbr->drr_object,
1157 	    drrwbr->drr_offset, drrwbr->drr_length);
1158 	err = dmu_tx_assign(tx, TXG_WAIT);
1159 	if (err) {
1160 		dmu_tx_abort(tx);
1161 		return (err);
1162 	}
1163 	dmu_write(os, drrwbr->drr_object,
1164 	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1165 	dmu_buf_rele(dbp, FTAG);
1166 	dmu_tx_commit(tx);
1167 	return (0);
1168 }
1169 
1170 static int
1171 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1172 {
1173 	dmu_tx_t *tx;
1174 	void *data;
1175 	dmu_buf_t *db, *db_spill;
1176 	int err;
1177 
1178 	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1179 	    drrs->drr_length > SPA_MAXBLOCKSIZE)
1180 		return (EINVAL);
1181 
1182 	data = restore_read(ra, drrs->drr_length);
1183 	if (data == NULL)
1184 		return (ra->err);
1185 
1186 	if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1187 		return (EINVAL);
1188 
1189 	VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1190 	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1191 		dmu_buf_rele(db, FTAG);
1192 		return (err);
1193 	}
1194 
1195 	tx = dmu_tx_create(os);
1196 
1197 	dmu_tx_hold_spill(tx, db->db_object);
1198 
1199 	err = dmu_tx_assign(tx, TXG_WAIT);
1200 	if (err) {
1201 		dmu_buf_rele(db, FTAG);
1202 		dmu_buf_rele(db_spill, FTAG);
1203 		dmu_tx_abort(tx);
1204 		return (err);
1205 	}
1206 	dmu_buf_will_dirty(db_spill, tx);
1207 
1208 	if (db_spill->db_size < drrs->drr_length)
1209 		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1210 		    drrs->drr_length, tx));
1211 	bcopy(data, db_spill->db_data, drrs->drr_length);
1212 
1213 	dmu_buf_rele(db, FTAG);
1214 	dmu_buf_rele(db_spill, FTAG);
1215 
1216 	dmu_tx_commit(tx);
1217 	return (0);
1218 }
1219 
1220 /* ARGSUSED */
1221 static int
1222 restore_free(struct restorearg *ra, objset_t *os,
1223     struct drr_free *drrf)
1224 {
1225 	int err;
1226 
1227 	if (drrf->drr_length != -1ULL &&
1228 	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1229 		return (EINVAL);
1230 
1231 	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1232 		return (EINVAL);
1233 
1234 	err = dmu_free_long_range(os, drrf->drr_object,
1235 	    drrf->drr_offset, drrf->drr_length);
1236 	return (err);
1237 }
1238 
1239 /*
1240  * NB: callers *must* call dmu_recv_end() if this succeeds.
1241  */
1242 int
1243 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1244     int cleanup_fd, uint64_t *action_handlep)
1245 {
1246 	struct restorearg ra = { 0 };
1247 	dmu_replay_record_t *drr;
1248 	objset_t *os;
1249 	zio_cksum_t pcksum;
1250 	int featureflags;
1251 
1252 	if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1253 		ra.byteswap = TRUE;
1254 
1255 	{
1256 		/* compute checksum of drr_begin record */
1257 		dmu_replay_record_t *drr;
1258 		drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1259 
1260 		drr->drr_type = DRR_BEGIN;
1261 		drr->drr_u.drr_begin = *drc->drc_drrb;
1262 		if (ra.byteswap) {
1263 			fletcher_4_incremental_byteswap(drr,
1264 			    sizeof (dmu_replay_record_t), &ra.cksum);
1265 		} else {
1266 			fletcher_4_incremental_native(drr,
1267 			    sizeof (dmu_replay_record_t), &ra.cksum);
1268 		}
1269 		kmem_free(drr, sizeof (dmu_replay_record_t));
1270 	}
1271 
1272 	if (ra.byteswap) {
1273 		struct drr_begin *drrb = drc->drc_drrb;
1274 		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1275 		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1276 		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1277 		drrb->drr_type = BSWAP_32(drrb->drr_type);
1278 		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1279 		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1280 	}
1281 
1282 	ra.vp = vp;
1283 	ra.voff = *voffp;
1284 	ra.bufsize = 1<<20;
1285 	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1286 
1287 	/* these were verified in dmu_recv_begin */
1288 	ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
1289 	    DMU_SUBSTREAM);
1290 	ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
1291 
1292 	/*
1293 	 * Open the objset we are modifying.
1294 	 */
1295 	VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
1296 
1297 	ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1298 
1299 	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1300 
1301 	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
1302 	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1303 		minor_t minor;
1304 
1305 		if (cleanup_fd == -1) {
1306 			ra.err = EBADF;
1307 			goto out;
1308 		}
1309 		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1310 		if (ra.err) {
1311 			cleanup_fd = -1;
1312 			goto out;
1313 		}
1314 
1315 		if (*action_handlep == 0) {
1316 			ra.guid_to_ds_map =
1317 			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1318 			avl_create(ra.guid_to_ds_map, guid_compare,
1319 			    sizeof (guid_map_entry_t),
1320 			    offsetof(guid_map_entry_t, avlnode));
1321 			ra.err = zfs_onexit_add_cb(minor,
1322 			    free_guid_map_onexit, ra.guid_to_ds_map,
1323 			    action_handlep);
1324 			if (ra.err)
1325 				goto out;
1326 		} else {
1327 			ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1328 			    (void **)&ra.guid_to_ds_map);
1329 			if (ra.err)
1330 				goto out;
1331 		}
1332 
1333 		drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1334 	}
1335 
1336 	/*
1337 	 * Read records and process them.
1338 	 */
1339 	pcksum = ra.cksum;
1340 	while (ra.err == 0 &&
1341 	    NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1342 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
1343 			ra.err = EINTR;
1344 			goto out;
1345 		}
1346 
1347 		if (ra.byteswap)
1348 			backup_byteswap(drr);
1349 
1350 		switch (drr->drr_type) {
1351 		case DRR_OBJECT:
1352 		{
1353 			/*
1354 			 * We need to make a copy of the record header,
1355 			 * because restore_{object,write} may need to
1356 			 * restore_read(), which will invalidate drr.
1357 			 */
1358 			struct drr_object drro = drr->drr_u.drr_object;
1359 			ra.err = restore_object(&ra, os, &drro);
1360 			break;
1361 		}
1362 		case DRR_FREEOBJECTS:
1363 		{
1364 			struct drr_freeobjects drrfo =
1365 			    drr->drr_u.drr_freeobjects;
1366 			ra.err = restore_freeobjects(&ra, os, &drrfo);
1367 			break;
1368 		}
1369 		case DRR_WRITE:
1370 		{
1371 			struct drr_write drrw = drr->drr_u.drr_write;
1372 			ra.err = restore_write(&ra, os, &drrw);
1373 			break;
1374 		}
1375 		case DRR_WRITE_BYREF:
1376 		{
1377 			struct drr_write_byref drrwbr =
1378 			    drr->drr_u.drr_write_byref;
1379 			ra.err = restore_write_byref(&ra, os, &drrwbr);
1380 			break;
1381 		}
1382 		case DRR_FREE:
1383 		{
1384 			struct drr_free drrf = drr->drr_u.drr_free;
1385 			ra.err = restore_free(&ra, os, &drrf);
1386 			break;
1387 		}
1388 		case DRR_END:
1389 		{
1390 			struct drr_end drre = drr->drr_u.drr_end;
1391 			/*
1392 			 * We compare against the *previous* checksum
1393 			 * value, because the stored checksum is of
1394 			 * everything before the DRR_END record.
1395 			 */
1396 			if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1397 				ra.err = ECKSUM;
1398 			goto out;
1399 		}
1400 		case DRR_SPILL:
1401 		{
1402 			struct drr_spill drrs = drr->drr_u.drr_spill;
1403 			ra.err = restore_spill(&ra, os, &drrs);
1404 			break;
1405 		}
1406 		default:
1407 			ra.err = EINVAL;
1408 			goto out;
1409 		}
1410 		pcksum = ra.cksum;
1411 	}
1412 	ASSERT(ra.err != 0);
1413 
1414 out:
1415 	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1416 		zfs_onexit_fd_rele(cleanup_fd);
1417 
1418 	if (ra.err != 0) {
1419 		/*
1420 		 * destroy what we created, so we don't leave it in the
1421 		 * inconsistent restoring state.
1422 		 */
1423 		txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1424 
1425 		(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1426 		    B_FALSE);
1427 		if (drc->drc_real_ds != drc->drc_logical_ds) {
1428 			mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1429 			dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1430 		}
1431 	}
1432 
1433 	kmem_free(ra.buf, ra.bufsize);
1434 	*voffp = ra.voff;
1435 	return (ra.err);
1436 }
1437 
1438 struct recvendsyncarg {
1439 	char *tosnap;
1440 	uint64_t creation_time;
1441 	uint64_t toguid;
1442 };
1443 
1444 static int
1445 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1446 {
1447 	dsl_dataset_t *ds = arg1;
1448 	struct recvendsyncarg *resa = arg2;
1449 
1450 	return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1451 }
1452 
1453 static void
1454 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1455 {
1456 	dsl_dataset_t *ds = arg1;
1457 	struct recvendsyncarg *resa = arg2;
1458 
1459 	dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1460 
1461 	/* set snapshot's creation time and guid */
1462 	dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1463 	ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1464 	ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1465 	ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1466 
1467 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1468 	ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1469 }
1470 
1471 static int
1472 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1473 {
1474 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1475 	uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1476 	dsl_dataset_t *snapds;
1477 	guid_map_entry_t *gmep;
1478 	int err;
1479 
1480 	ASSERT(guid_map != NULL);
1481 
1482 	rw_enter(&dp->dp_config_rwlock, RW_READER);
1483 	err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1484 	if (err == 0) {
1485 		gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1486 		gmep->guid = snapds->ds_phys->ds_guid;
1487 		gmep->gme_ds = snapds;
1488 		avl_add(guid_map, gmep);
1489 	}
1490 
1491 	rw_exit(&dp->dp_config_rwlock);
1492 	return (err);
1493 }
1494 
1495 static int
1496 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1497 {
1498 	struct recvendsyncarg resa;
1499 	dsl_dataset_t *ds = drc->drc_logical_ds;
1500 	int err;
1501 
1502 	/*
1503 	 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1504 	 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1505 	 * can close it.
1506 	 */
1507 	txg_wait_synced(ds->ds_dir->dd_pool, 0);
1508 
1509 	if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1510 		err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1511 		    drc->drc_force);
1512 		if (err)
1513 			goto out;
1514 	} else {
1515 		mutex_exit(&ds->ds_recvlock);
1516 		dsl_dataset_rele(ds, dmu_recv_tag);
1517 		(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1518 		    B_FALSE);
1519 		return (EBUSY);
1520 	}
1521 
1522 	resa.creation_time = drc->drc_drrb->drr_creation_time;
1523 	resa.toguid = drc->drc_drrb->drr_toguid;
1524 	resa.tosnap = drc->drc_tosnap;
1525 
1526 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1527 	    recv_end_check, recv_end_sync, ds, &resa, 3);
1528 	if (err) {
1529 		/* swap back */
1530 		(void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1531 	}
1532 
1533 out:
1534 	mutex_exit(&ds->ds_recvlock);
1535 	if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1536 		(void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1537 	dsl_dataset_disown(ds, dmu_recv_tag);
1538 	(void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1539 	return (err);
1540 }
1541 
1542 static int
1543 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1544 {
1545 	struct recvendsyncarg resa;
1546 	dsl_dataset_t *ds = drc->drc_logical_ds;
1547 	int err;
1548 
1549 	/*
1550 	 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1551 	 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1552 	 * can close it.
1553 	 */
1554 	txg_wait_synced(ds->ds_dir->dd_pool, 0);
1555 
1556 	resa.creation_time = drc->drc_drrb->drr_creation_time;
1557 	resa.toguid = drc->drc_drrb->drr_toguid;
1558 	resa.tosnap = drc->drc_tosnap;
1559 
1560 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1561 	    recv_end_check, recv_end_sync, ds, &resa, 3);
1562 	if (err) {
1563 		/* clean up the fs we just recv'd into */
1564 		(void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1565 	} else {
1566 		if (drc->drc_guid_to_ds_map != NULL)
1567 			(void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1568 		/* release the hold from dmu_recv_begin */
1569 		dsl_dataset_disown(ds, dmu_recv_tag);
1570 	}
1571 	return (err);
1572 }
1573 
1574 int
1575 dmu_recv_end(dmu_recv_cookie_t *drc)
1576 {
1577 	if (drc->drc_logical_ds != drc->drc_real_ds)
1578 		return (dmu_recv_existing_end(drc));
1579 	else
1580 		return (dmu_recv_new_end(drc));
1581 }
1582