xref: /titanic_41/usr/src/uts/common/fs/zfs/dmu_send.c (revision 8d4e547db823a866b8f73efc0acdc423e2963caf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dbuf.h>
32 #include <sys/dnode.h>
33 #include <sys/zfs_context.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_pool.h>
39 #include <sys/dsl_synctask.h>
40 #include <sys/zfs_ioctl.h>
41 #include <sys/zap.h>
42 #include <sys/zio_checksum.h>
43 
44 struct backuparg {
45 	dmu_replay_record_t *drr;
46 	vnode_t *vp;
47 	objset_t *os;
48 	zio_cksum_t zc;
49 	int err;
50 };
51 
52 static int
53 dump_bytes(struct backuparg *ba, void *buf, int len)
54 {
55 	ssize_t resid; /* have to get resid to get detailed errno */
56 	ASSERT3U(len % 8, ==, 0);
57 
58 	fletcher_4_incremental_native(buf, len, &ba->zc);
59 	ba->err = vn_rdwr(UIO_WRITE, ba->vp,
60 	    (caddr_t)buf, len,
61 	    0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
62 	return (ba->err);
63 }
64 
65 static int
66 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset,
67     uint64_t length)
68 {
69 	/* write a FREE record */
70 	bzero(ba->drr, sizeof (dmu_replay_record_t));
71 	ba->drr->drr_type = DRR_FREE;
72 	ba->drr->drr_u.drr_free.drr_object = object;
73 	ba->drr->drr_u.drr_free.drr_offset = offset;
74 	ba->drr->drr_u.drr_free.drr_length = length;
75 
76 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
77 		return (EINTR);
78 	return (0);
79 }
80 
81 static int
82 dump_data(struct backuparg *ba, dmu_object_type_t type,
83     uint64_t object, uint64_t offset, int blksz, void *data)
84 {
85 	/* write a DATA record */
86 	bzero(ba->drr, sizeof (dmu_replay_record_t));
87 	ba->drr->drr_type = DRR_WRITE;
88 	ba->drr->drr_u.drr_write.drr_object = object;
89 	ba->drr->drr_u.drr_write.drr_type = type;
90 	ba->drr->drr_u.drr_write.drr_offset = offset;
91 	ba->drr->drr_u.drr_write.drr_length = blksz;
92 
93 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
94 		return (EINTR);
95 	if (dump_bytes(ba, data, blksz))
96 		return (EINTR);
97 	return (0);
98 }
99 
100 static int
101 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs)
102 {
103 	/* write a FREEOBJECTS record */
104 	bzero(ba->drr, sizeof (dmu_replay_record_t));
105 	ba->drr->drr_type = DRR_FREEOBJECTS;
106 	ba->drr->drr_u.drr_freeobjects.drr_firstobj = firstobj;
107 	ba->drr->drr_u.drr_freeobjects.drr_numobjs = numobjs;
108 
109 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
110 		return (EINTR);
111 	return (0);
112 }
113 
114 static int
115 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp)
116 {
117 	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
118 		return (dump_freeobjects(ba, object, 1));
119 
120 	/* write an OBJECT record */
121 	bzero(ba->drr, sizeof (dmu_replay_record_t));
122 	ba->drr->drr_type = DRR_OBJECT;
123 	ba->drr->drr_u.drr_object.drr_object = object;
124 	ba->drr->drr_u.drr_object.drr_type = dnp->dn_type;
125 	ba->drr->drr_u.drr_object.drr_bonustype = dnp->dn_bonustype;
126 	ba->drr->drr_u.drr_object.drr_blksz =
127 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
128 	ba->drr->drr_u.drr_object.drr_bonuslen = dnp->dn_bonuslen;
129 	ba->drr->drr_u.drr_object.drr_checksum = dnp->dn_checksum;
130 	ba->drr->drr_u.drr_object.drr_compress = dnp->dn_compress;
131 
132 	if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
133 		return (EINTR);
134 
135 	if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)))
136 		return (EINTR);
137 
138 	/* free anything past the end of the file */
139 	if (dump_free(ba, object, (dnp->dn_maxblkid + 1) *
140 	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
141 		return (EINTR);
142 	if (ba->err)
143 		return (EINTR);
144 	return (0);
145 }
146 
147 #define	BP_SPAN(dnp, level) \
148 	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
149 	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
150 
151 static int
152 backup_cb(traverse_blk_cache_t *bc, spa_t *spa, void *arg)
153 {
154 	struct backuparg *ba = arg;
155 	uint64_t object = bc->bc_bookmark.zb_object;
156 	int level = bc->bc_bookmark.zb_level;
157 	uint64_t blkid = bc->bc_bookmark.zb_blkid;
158 	blkptr_t *bp = bc->bc_blkptr.blk_birth ? &bc->bc_blkptr : NULL;
159 	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
160 	void *data = bc->bc_data;
161 	int err = 0;
162 
163 	if (issig(JUSTLOOKING) && issig(FORREAL))
164 		return (EINTR);
165 
166 	ASSERT(data || bp == NULL);
167 
168 	if (bp == NULL && object == 0) {
169 		uint64_t span = BP_SPAN(bc->bc_dnode, level);
170 		uint64_t dnobj = (blkid * span) >> DNODE_SHIFT;
171 		err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT);
172 	} else if (bp == NULL) {
173 		uint64_t span = BP_SPAN(bc->bc_dnode, level);
174 		err = dump_free(ba, object, blkid * span, span);
175 	} else if (data && level == 0 && type == DMU_OT_DNODE) {
176 		dnode_phys_t *blk = data;
177 		int i;
178 		int blksz = BP_GET_LSIZE(bp);
179 
180 		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
181 			uint64_t dnobj =
182 			    (blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
183 			err = dump_dnode(ba, dnobj, blk+i);
184 			if (err)
185 				break;
186 		}
187 	} else if (level == 0 &&
188 	    type != DMU_OT_DNODE && type != DMU_OT_OBJSET) {
189 		int blksz = BP_GET_LSIZE(bp);
190 		if (data == NULL) {
191 			uint32_t aflags = ARC_WAIT;
192 			arc_buf_t *abuf;
193 			zbookmark_t zb;
194 
195 			zb.zb_objset = ba->os->os->os_dsl_dataset->ds_object;
196 			zb.zb_object = object;
197 			zb.zb_level = level;
198 			zb.zb_blkid = blkid;
199 			(void) arc_read(NULL, spa, bp,
200 			    dmu_ot[type].ot_byteswap, arc_getbuf_func, &abuf,
201 			    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_MUSTSUCCEED,
202 			    &aflags, &zb);
203 
204 			if (abuf) {
205 				err = dump_data(ba, type, object, blkid * blksz,
206 				    blksz, abuf->b_data);
207 				(void) arc_buf_remove_ref(abuf, &abuf);
208 			}
209 		} else {
210 			err = dump_data(ba, type, object, blkid * blksz,
211 			    blksz, data);
212 		}
213 	}
214 
215 	ASSERT(err == 0 || err == EINTR);
216 	return (err);
217 }
218 
219 int
220 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, vnode_t *vp)
221 {
222 	dsl_dataset_t *ds = tosnap->os->os_dsl_dataset;
223 	dsl_dataset_t *fromds = fromsnap ? fromsnap->os->os_dsl_dataset : NULL;
224 	dmu_replay_record_t *drr;
225 	struct backuparg ba;
226 	int err;
227 
228 	/* tosnap must be a snapshot */
229 	if (ds->ds_phys->ds_next_snap_obj == 0)
230 		return (EINVAL);
231 
232 	/* fromsnap must be an earlier snapshot from the same fs as tosnap */
233 	if (fromds && (ds->ds_dir != fromds->ds_dir ||
234 	    fromds->ds_phys->ds_creation_txg >=
235 	    ds->ds_phys->ds_creation_txg))
236 		return (EXDEV);
237 
238 	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
239 	drr->drr_type = DRR_BEGIN;
240 	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
241 	drr->drr_u.drr_begin.drr_version = DMU_BACKUP_VERSION;
242 	drr->drr_u.drr_begin.drr_creation_time =
243 	    ds->ds_phys->ds_creation_time;
244 	drr->drr_u.drr_begin.drr_type = tosnap->os->os_phys->os_type;
245 	drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
246 	if (fromds)
247 		drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
248 	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
249 
250 	ba.drr = drr;
251 	ba.vp = vp;
252 	ba.os = tosnap;
253 	ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0);
254 
255 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) {
256 		kmem_free(drr, sizeof (dmu_replay_record_t));
257 		return (ba.err);
258 	}
259 
260 	err = traverse_dsl_dataset(ds,
261 	    fromds ? fromds->ds_phys->ds_creation_txg : 0,
262 	    ADVANCE_PRE | ADVANCE_HOLES | ADVANCE_DATA | ADVANCE_NOLOCK,
263 	    backup_cb, &ba);
264 
265 	if (err) {
266 		if (err == EINTR && ba.err)
267 			err = ba.err;
268 		kmem_free(drr, sizeof (dmu_replay_record_t));
269 		return (err);
270 	}
271 
272 	bzero(drr, sizeof (dmu_replay_record_t));
273 	drr->drr_type = DRR_END;
274 	drr->drr_u.drr_end.drr_checksum = ba.zc;
275 
276 	if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) {
277 		kmem_free(drr, sizeof (dmu_replay_record_t));
278 		return (ba.err);
279 	}
280 
281 	kmem_free(drr, sizeof (dmu_replay_record_t));
282 
283 	return (0);
284 }
285 
286 struct restorearg {
287 	int err;
288 	int byteswap;
289 	vnode_t *vp;
290 	char *buf;
291 	uint64_t voff;
292 	int buflen; /* number of valid bytes in buf */
293 	int bufoff; /* next offset to read */
294 	int bufsize; /* amount of memory allocated for buf */
295 	zio_cksum_t zc;
296 };
297 
298 /* ARGSUSED */
299 static int
300 replay_incremental_check(void *arg1, void *arg2, dmu_tx_t *tx)
301 {
302 	dsl_dataset_t *ds = arg1;
303 	struct drr_begin *drrb = arg2;
304 	const char *snapname;
305 	int err;
306 	uint64_t val;
307 
308 	/* must already be a snapshot of this fs */
309 	if (ds->ds_phys->ds_prev_snap_obj == 0)
310 		return (ENODEV);
311 
312 	/* most recent snapshot must match fromguid */
313 	if (ds->ds_prev->ds_phys->ds_guid != drrb->drr_fromguid)
314 		return (ENODEV);
315 	/* must not have any changes since most recent snapshot */
316 	if (ds->ds_phys->ds_bp.blk_birth >
317 	    ds->ds_prev->ds_phys->ds_creation_txg)
318 		return (ETXTBSY);
319 
320 	/* new snapshot name must not exist */
321 	snapname = strrchr(drrb->drr_toname, '@');
322 	if (snapname == NULL)
323 		return (EEXIST);
324 
325 	snapname++;
326 	err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
327 	    ds->ds_phys->ds_snapnames_zapobj, snapname, 8, 1, &val);
328 	if (err == 0)
329 		return (EEXIST);
330 	if (err != ENOENT)
331 		return (err);
332 
333 	return (0);
334 }
335 
336 /* ARGSUSED */
337 static void
338 replay_incremental_sync(void *arg1, void *arg2, dmu_tx_t *tx)
339 {
340 	dsl_dataset_t *ds = arg1;
341 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
342 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
343 }
344 
345 /* ARGSUSED */
346 static int
347 replay_full_check(void *arg1, void *arg2, dmu_tx_t *tx)
348 {
349 	dsl_dir_t *dd = arg1;
350 	struct drr_begin *drrb = arg2;
351 	objset_t *mos = dd->dd_pool->dp_meta_objset;
352 	char *cp;
353 	uint64_t val;
354 	int err;
355 
356 	cp = strchr(drrb->drr_toname, '@');
357 	*cp = '\0';
358 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
359 	    strrchr(drrb->drr_toname, '/') + 1,
360 	    sizeof (uint64_t), 1, &val);
361 	*cp = '@';
362 
363 	if (err != ENOENT)
364 		return (err ? err : EEXIST);
365 
366 	return (0);
367 }
368 
369 static void
370 replay_full_sync(void *arg1, void *arg2, dmu_tx_t *tx)
371 {
372 	dsl_dir_t *dd = arg1;
373 	struct drr_begin *drrb = arg2;
374 	char *cp;
375 	dsl_dataset_t *ds;
376 	uint64_t dsobj;
377 
378 	cp = strchr(drrb->drr_toname, '@');
379 	*cp = '\0';
380 	dsobj = dsl_dataset_create_sync(dd, strrchr(drrb->drr_toname, '/') + 1,
381 	    NULL, tx);
382 	*cp = '@';
383 
384 	VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL,
385 	    DS_MODE_EXCLUSIVE, FTAG, &ds));
386 
387 	(void) dmu_objset_create_impl(dsl_dataset_get_spa(ds),
388 	    ds, &ds->ds_phys->ds_bp, drrb->drr_type, tx);
389 
390 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
391 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
392 
393 	dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
394 }
395 
396 static int
397 replay_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
398 {
399 	objset_t *os = arg1;
400 	struct drr_begin *drrb = arg2;
401 	char *snapname;
402 
403 	/* XXX verify that drr_toname is in dd */
404 
405 	snapname = strchr(drrb->drr_toname, '@');
406 	if (snapname == NULL)
407 		return (EINVAL);
408 	snapname++;
409 
410 	return (dsl_dataset_snapshot_check(os, snapname, tx));
411 }
412 
413 static void
414 replay_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
415 {
416 	objset_t *os = arg1;
417 	struct drr_begin *drrb = arg2;
418 	char *snapname;
419 	dsl_dataset_t *ds, *hds;
420 
421 	snapname = strchr(drrb->drr_toname, '@') + 1;
422 
423 	dsl_dataset_snapshot_sync(os, snapname, tx);
424 
425 	/* set snapshot's creation time and guid */
426 	hds = os->os->os_dsl_dataset;
427 	VERIFY(0 == dsl_dataset_open_obj(hds->ds_dir->dd_pool,
428 	    hds->ds_phys->ds_prev_snap_obj, NULL,
429 	    DS_MODE_PRIMARY | DS_MODE_READONLY | DS_MODE_INCONSISTENT,
430 	    FTAG, &ds));
431 
432 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
433 	ds->ds_phys->ds_creation_time = drrb->drr_creation_time;
434 	ds->ds_phys->ds_guid = drrb->drr_toguid;
435 	ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
436 
437 	dsl_dataset_close(ds, DS_MODE_PRIMARY, FTAG);
438 
439 	dmu_buf_will_dirty(hds->ds_dbuf, tx);
440 	hds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
441 }
442 
443 static void *
444 restore_read(struct restorearg *ra, int len)
445 {
446 	void *rv;
447 
448 	/* some things will require 8-byte alignment, so everything must */
449 	ASSERT3U(len % 8, ==, 0);
450 
451 	while (ra->buflen - ra->bufoff < len) {
452 		ssize_t resid;
453 		int leftover = ra->buflen - ra->bufoff;
454 
455 		(void) memmove(ra->buf, ra->buf + ra->bufoff, leftover);
456 		ra->err = vn_rdwr(UIO_READ, ra->vp,
457 		    (caddr_t)ra->buf + leftover, ra->bufsize - leftover,
458 		    ra->voff, UIO_SYSSPACE, FAPPEND,
459 		    RLIM64_INFINITY, CRED(), &resid);
460 
461 		ra->voff += ra->bufsize - leftover - resid;
462 		ra->buflen = ra->bufsize - resid;
463 		ra->bufoff = 0;
464 		if (resid == ra->bufsize - leftover)
465 			ra->err = EINVAL;
466 		if (ra->err)
467 			return (NULL);
468 		/* Could compute checksum here? */
469 	}
470 
471 	ASSERT3U(ra->bufoff % 8, ==, 0);
472 	ASSERT3U(ra->buflen - ra->bufoff, >=, len);
473 	rv = ra->buf + ra->bufoff;
474 	ra->bufoff += len;
475 	if (ra->byteswap)
476 		fletcher_4_incremental_byteswap(rv, len, &ra->zc);
477 	else
478 		fletcher_4_incremental_native(rv, len, &ra->zc);
479 	return (rv);
480 }
481 
482 static void
483 backup_byteswap(dmu_replay_record_t *drr)
484 {
485 #define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
486 #define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
487 	drr->drr_type = BSWAP_32(drr->drr_type);
488 	switch (drr->drr_type) {
489 	case DRR_BEGIN:
490 		DO64(drr_begin.drr_magic);
491 		DO64(drr_begin.drr_version);
492 		DO64(drr_begin.drr_creation_time);
493 		DO32(drr_begin.drr_type);
494 		DO64(drr_begin.drr_toguid);
495 		DO64(drr_begin.drr_fromguid);
496 		break;
497 	case DRR_OBJECT:
498 		DO64(drr_object.drr_object);
499 		/* DO64(drr_object.drr_allocation_txg); */
500 		DO32(drr_object.drr_type);
501 		DO32(drr_object.drr_bonustype);
502 		DO32(drr_object.drr_blksz);
503 		DO32(drr_object.drr_bonuslen);
504 		break;
505 	case DRR_FREEOBJECTS:
506 		DO64(drr_freeobjects.drr_firstobj);
507 		DO64(drr_freeobjects.drr_numobjs);
508 		break;
509 	case DRR_WRITE:
510 		DO64(drr_write.drr_object);
511 		DO32(drr_write.drr_type);
512 		DO64(drr_write.drr_offset);
513 		DO64(drr_write.drr_length);
514 		break;
515 	case DRR_FREE:
516 		DO64(drr_free.drr_object);
517 		DO64(drr_free.drr_offset);
518 		DO64(drr_free.drr_length);
519 		break;
520 	case DRR_END:
521 		DO64(drr_end.drr_checksum.zc_word[0]);
522 		DO64(drr_end.drr_checksum.zc_word[1]);
523 		DO64(drr_end.drr_checksum.zc_word[2]);
524 		DO64(drr_end.drr_checksum.zc_word[3]);
525 		break;
526 	}
527 #undef DO64
528 #undef DO32
529 }
530 
531 static int
532 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
533 {
534 	int err;
535 	dmu_tx_t *tx;
536 
537 	err = dmu_object_info(os, drro->drr_object, NULL);
538 
539 	if (err != 0 && err != ENOENT)
540 		return (EINVAL);
541 
542 	if (drro->drr_type == DMU_OT_NONE ||
543 	    drro->drr_type >= DMU_OT_NUMTYPES ||
544 	    drro->drr_bonustype >= DMU_OT_NUMTYPES ||
545 	    drro->drr_checksum >= ZIO_CHECKSUM_FUNCTIONS ||
546 	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
547 	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
548 	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
549 	    drro->drr_blksz > SPA_MAXBLOCKSIZE ||
550 	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
551 		return (EINVAL);
552 	}
553 
554 	tx = dmu_tx_create(os);
555 
556 	if (err == ENOENT) {
557 		/* currently free, want to be allocated */
558 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
559 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1);
560 		err = dmu_tx_assign(tx, TXG_WAIT);
561 		if (err) {
562 			dmu_tx_abort(tx);
563 			return (err);
564 		}
565 		err = dmu_object_claim(os, drro->drr_object,
566 		    drro->drr_type, drro->drr_blksz,
567 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
568 	} else {
569 		/* currently allocated, want to be allocated */
570 		dmu_tx_hold_bonus(tx, drro->drr_object);
571 		/*
572 		 * We may change blocksize, so need to
573 		 * hold_write
574 		 */
575 		dmu_tx_hold_write(tx, drro->drr_object, 0, 1);
576 		err = dmu_tx_assign(tx, TXG_WAIT);
577 		if (err) {
578 			dmu_tx_abort(tx);
579 			return (err);
580 		}
581 
582 		err = dmu_object_reclaim(os, drro->drr_object,
583 		    drro->drr_type, drro->drr_blksz,
584 		    drro->drr_bonustype, drro->drr_bonuslen, tx);
585 	}
586 	if (err) {
587 		dmu_tx_commit(tx);
588 		return (EINVAL);
589 	}
590 
591 	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksum, tx);
592 	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
593 
594 	if (drro->drr_bonuslen) {
595 		dmu_buf_t *db;
596 		void *data;
597 		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
598 		dmu_buf_will_dirty(db, tx);
599 
600 		ASSERT3U(db->db_size, ==, drro->drr_bonuslen);
601 		data = restore_read(ra, P2ROUNDUP(db->db_size, 8));
602 		if (data == NULL) {
603 			dmu_tx_commit(tx);
604 			return (ra->err);
605 		}
606 		bcopy(data, db->db_data, db->db_size);
607 		if (ra->byteswap) {
608 			dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data,
609 			    drro->drr_bonuslen);
610 		}
611 		dmu_buf_rele(db, FTAG);
612 	}
613 	dmu_tx_commit(tx);
614 	return (0);
615 }
616 
617 /* ARGSUSED */
618 static int
619 restore_freeobjects(struct restorearg *ra, objset_t *os,
620     struct drr_freeobjects *drrfo)
621 {
622 	uint64_t obj;
623 
624 	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
625 		return (EINVAL);
626 
627 	for (obj = drrfo->drr_firstobj;
628 	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
629 	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
630 		dmu_tx_t *tx;
631 		int err;
632 
633 		if (dmu_object_info(os, obj, NULL) != 0)
634 			continue;
635 
636 		tx = dmu_tx_create(os);
637 		dmu_tx_hold_bonus(tx, obj);
638 		err = dmu_tx_assign(tx, TXG_WAIT);
639 		if (err) {
640 			dmu_tx_abort(tx);
641 			return (err);
642 		}
643 		err = dmu_object_free(os, obj, tx);
644 		dmu_tx_commit(tx);
645 		if (err && err != ENOENT)
646 			return (EINVAL);
647 	}
648 	return (0);
649 }
650 
651 static int
652 restore_write(struct restorearg *ra, objset_t *os,
653     struct drr_write *drrw)
654 {
655 	dmu_tx_t *tx;
656 	void *data;
657 	int err;
658 
659 	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
660 	    drrw->drr_type >= DMU_OT_NUMTYPES)
661 		return (EINVAL);
662 
663 	data = restore_read(ra, drrw->drr_length);
664 	if (data == NULL)
665 		return (ra->err);
666 
667 	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
668 		return (EINVAL);
669 
670 	tx = dmu_tx_create(os);
671 
672 	dmu_tx_hold_write(tx, drrw->drr_object,
673 	    drrw->drr_offset, drrw->drr_length);
674 	err = dmu_tx_assign(tx, TXG_WAIT);
675 	if (err) {
676 		dmu_tx_abort(tx);
677 		return (err);
678 	}
679 	if (ra->byteswap)
680 		dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length);
681 	dmu_write(os, drrw->drr_object,
682 	    drrw->drr_offset, drrw->drr_length, data, tx);
683 	dmu_tx_commit(tx);
684 	return (0);
685 }
686 
687 /* ARGSUSED */
688 static int
689 restore_free(struct restorearg *ra, objset_t *os,
690     struct drr_free *drrf)
691 {
692 	dmu_tx_t *tx;
693 	int err;
694 
695 	if (drrf->drr_length != -1ULL &&
696 	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
697 		return (EINVAL);
698 
699 	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
700 		return (EINVAL);
701 
702 	tx = dmu_tx_create(os);
703 
704 	dmu_tx_hold_free(tx, drrf->drr_object,
705 	    drrf->drr_offset, drrf->drr_length);
706 	err = dmu_tx_assign(tx, TXG_WAIT);
707 	if (err) {
708 		dmu_tx_abort(tx);
709 		return (err);
710 	}
711 	err = dmu_free_range(os, drrf->drr_object,
712 	    drrf->drr_offset, drrf->drr_length, tx);
713 	dmu_tx_commit(tx);
714 	return (err);
715 }
716 
717 int
718 dmu_recvbackup(char *tosnap, struct drr_begin *drrb, uint64_t *sizep,
719     boolean_t force, vnode_t *vp, uint64_t voffset)
720 {
721 	struct restorearg ra;
722 	dmu_replay_record_t *drr;
723 	char *cp;
724 	objset_t *os = NULL;
725 	zio_cksum_t pzc;
726 
727 	bzero(&ra, sizeof (ra));
728 	ra.vp = vp;
729 	ra.voff = voffset;
730 	ra.bufsize = 1<<20;
731 	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
732 
733 	if (drrb->drr_magic == DMU_BACKUP_MAGIC) {
734 		ra.byteswap = FALSE;
735 	} else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
736 		ra.byteswap = TRUE;
737 	} else {
738 		ra.err = EINVAL;
739 		goto out;
740 	}
741 
742 	/*
743 	 * NB: this assumes that struct drr_begin will be the largest in
744 	 * dmu_replay_record_t's drr_u, and thus we don't need to pad it
745 	 * with zeros to make it the same length as we wrote out.
746 	 */
747 	((dmu_replay_record_t *)ra.buf)->drr_type = DRR_BEGIN;
748 	((dmu_replay_record_t *)ra.buf)->drr_pad = 0;
749 	((dmu_replay_record_t *)ra.buf)->drr_u.drr_begin = *drrb;
750 	if (ra.byteswap) {
751 		fletcher_4_incremental_byteswap(ra.buf,
752 		    sizeof (dmu_replay_record_t), &ra.zc);
753 	} else {
754 		fletcher_4_incremental_native(ra.buf,
755 		    sizeof (dmu_replay_record_t), &ra.zc);
756 	}
757 	(void) strcpy(drrb->drr_toname, tosnap); /* for the sync funcs */
758 
759 	if (ra.byteswap) {
760 		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
761 		drrb->drr_version = BSWAP_64(drrb->drr_version);
762 		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
763 		drrb->drr_type = BSWAP_32(drrb->drr_type);
764 		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
765 		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
766 	}
767 
768 	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
769 
770 	if (drrb->drr_version != DMU_BACKUP_VERSION ||
771 	    drrb->drr_type >= DMU_OST_NUMTYPES ||
772 	    strchr(drrb->drr_toname, '@') == NULL) {
773 		ra.err = EINVAL;
774 		goto out;
775 	}
776 
777 	/*
778 	 * Process the begin in syncing context.
779 	 */
780 	if (drrb->drr_fromguid) {
781 		/* incremental backup */
782 		dsl_dataset_t *ds = NULL;
783 
784 		cp = strchr(tosnap, '@');
785 		*cp = '\0';
786 		ra.err = dsl_dataset_open(tosnap, DS_MODE_EXCLUSIVE, FTAG, &ds);
787 		*cp = '@';
788 		if (ra.err)
789 			goto out;
790 
791 		/*
792 		 * Only do the rollback if the most recent snapshot
793 		 * matches the incremental source
794 		 */
795 		if (force) {
796 			if (ds->ds_prev == NULL ||
797 			    ds->ds_prev->ds_phys->ds_guid !=
798 			    drrb->drr_fromguid) {
799 				dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
800 				kmem_free(ra.buf, ra.bufsize);
801 				return (ENODEV);
802 			}
803 			(void) dsl_dataset_rollback(ds);
804 		}
805 		ra.err = dsl_sync_task_do(ds->ds_dir->dd_pool,
806 		    replay_incremental_check, replay_incremental_sync,
807 		    ds, drrb, 1);
808 		dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
809 	} else {
810 		/* full backup */
811 		dsl_dir_t *dd = NULL;
812 		const char *tail;
813 
814 		/* can't restore full backup into topmost fs, for now */
815 		if (strrchr(drrb->drr_toname, '/') == NULL) {
816 			ra.err = EINVAL;
817 			goto out;
818 		}
819 
820 		cp = strchr(tosnap, '@');
821 		*cp = '\0';
822 		ra.err = dsl_dir_open(tosnap, FTAG, &dd, &tail);
823 		*cp = '@';
824 		if (ra.err)
825 			goto out;
826 		if (tail == NULL) {
827 			ra.err = EEXIST;
828 			goto out;
829 		}
830 
831 		ra.err = dsl_sync_task_do(dd->dd_pool, replay_full_check,
832 		    replay_full_sync, dd, drrb, 5);
833 		dsl_dir_close(dd, FTAG);
834 	}
835 	if (ra.err)
836 		goto out;
837 
838 	/*
839 	 * Open the objset we are modifying.
840 	 */
841 
842 	cp = strchr(tosnap, '@');
843 	*cp = '\0';
844 	ra.err = dmu_objset_open(tosnap, DMU_OST_ANY,
845 	    DS_MODE_PRIMARY | DS_MODE_INCONSISTENT, &os);
846 	*cp = '@';
847 	ASSERT3U(ra.err, ==, 0);
848 
849 	/*
850 	 * Read records and process them.
851 	 */
852 	pzc = ra.zc;
853 	while (ra.err == 0 &&
854 	    NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
855 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
856 			ra.err = EINTR;
857 			goto out;
858 		}
859 
860 		if (ra.byteswap)
861 			backup_byteswap(drr);
862 
863 		switch (drr->drr_type) {
864 		case DRR_OBJECT:
865 		{
866 			/*
867 			 * We need to make a copy of the record header,
868 			 * because restore_{object,write} may need to
869 			 * restore_read(), which will invalidate drr.
870 			 */
871 			struct drr_object drro = drr->drr_u.drr_object;
872 			ra.err = restore_object(&ra, os, &drro);
873 			break;
874 		}
875 		case DRR_FREEOBJECTS:
876 		{
877 			struct drr_freeobjects drrfo =
878 			    drr->drr_u.drr_freeobjects;
879 			ra.err = restore_freeobjects(&ra, os, &drrfo);
880 			break;
881 		}
882 		case DRR_WRITE:
883 		{
884 			struct drr_write drrw = drr->drr_u.drr_write;
885 			ra.err = restore_write(&ra, os, &drrw);
886 			break;
887 		}
888 		case DRR_FREE:
889 		{
890 			struct drr_free drrf = drr->drr_u.drr_free;
891 			ra.err = restore_free(&ra, os, &drrf);
892 			break;
893 		}
894 		case DRR_END:
895 		{
896 			struct drr_end drre = drr->drr_u.drr_end;
897 			/*
898 			 * We compare against the *previous* checksum
899 			 * value, because the stored checksum is of
900 			 * everything before the DRR_END record.
901 			 */
902 			if (drre.drr_checksum.zc_word[0] != 0 &&
903 			    !ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pzc)) {
904 				ra.err = ECKSUM;
905 				goto out;
906 			}
907 
908 			ra.err = dsl_sync_task_do(dmu_objset_ds(os)->
909 			    ds_dir->dd_pool, replay_end_check, replay_end_sync,
910 			    os, drrb, 3);
911 			goto out;
912 		}
913 		default:
914 			ra.err = EINVAL;
915 			goto out;
916 		}
917 		pzc = ra.zc;
918 	}
919 
920 out:
921 	if (os)
922 		dmu_objset_close(os);
923 
924 	/*
925 	 * Make sure we don't rollback/destroy unless we actually
926 	 * processed the begin properly.  'os' will only be set if this
927 	 * is the case.
928 	 */
929 	if (ra.err && os && tosnap && strchr(tosnap, '@')) {
930 		/*
931 		 * rollback or destroy what we created, so we don't
932 		 * leave it in the restoring state.
933 		 */
934 		dsl_dataset_t *ds;
935 		int err;
936 
937 		cp = strchr(tosnap, '@');
938 		*cp = '\0';
939 		err = dsl_dataset_open(tosnap,
940 		    DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT,
941 		    FTAG, &ds);
942 		if (err == 0) {
943 			txg_wait_synced(ds->ds_dir->dd_pool, 0);
944 			if (drrb->drr_fromguid) {
945 				/* incremental: rollback to most recent snap */
946 				(void) dsl_dataset_rollback(ds);
947 				dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
948 			} else {
949 				/* full: destroy whole fs */
950 				dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
951 				(void) dsl_dataset_destroy(tosnap);
952 			}
953 		}
954 		*cp = '@';
955 	}
956 
957 	kmem_free(ra.buf, ra.bufsize);
958 	if (sizep)
959 		*sizep = ra.voff;
960 	return (ra.err);
961 }
962