xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision 68ac2337c38c8af06edcf32a72e42de36ec72a9d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/zvol.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/dmu_impl.h>
43 
44 
45 spa_t *
46 dmu_objset_spa(objset_t *os)
47 {
48 	return (os->os->os_spa);
49 }
50 
51 zilog_t *
52 dmu_objset_zil(objset_t *os)
53 {
54 	return (os->os->os_zil);
55 }
56 
57 dsl_pool_t *
58 dmu_objset_pool(objset_t *os)
59 {
60 	dsl_dataset_t *ds;
61 
62 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
63 		return (ds->ds_dir->dd_pool);
64 	else
65 		return (spa_get_dsl(os->os->os_spa));
66 }
67 
68 dsl_dataset_t *
69 dmu_objset_ds(objset_t *os)
70 {
71 	return (os->os->os_dsl_dataset);
72 }
73 
74 dmu_objset_type_t
75 dmu_objset_type(objset_t *os)
76 {
77 	return (os->os->os_phys->os_type);
78 }
79 
80 void
81 dmu_objset_name(objset_t *os, char *buf)
82 {
83 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
84 }
85 
86 uint64_t
87 dmu_objset_id(objset_t *os)
88 {
89 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
90 
91 	return (ds ? ds->ds_object : 0);
92 }
93 
94 static void
95 checksum_changed_cb(void *arg, uint64_t newval)
96 {
97 	objset_impl_t *osi = arg;
98 
99 	/*
100 	 * Inheritance should have been done by now.
101 	 */
102 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
103 
104 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
105 }
106 
107 static void
108 compression_changed_cb(void *arg, uint64_t newval)
109 {
110 	objset_impl_t *osi = arg;
111 
112 	/*
113 	 * Inheritance and range checking should have been done by now.
114 	 */
115 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
116 
117 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
118 }
119 
120 void
121 dmu_objset_byteswap(void *buf, size_t size)
122 {
123 	objset_phys_t *osp = buf;
124 
125 	ASSERT(size == sizeof (objset_phys_t));
126 	dnode_byteswap(&osp->os_meta_dnode);
127 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
128 	osp->os_type = BSWAP_64(osp->os_type);
129 }
130 
131 int
132 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
133     objset_impl_t **osip)
134 {
135 	objset_impl_t *winner, *osi;
136 	int i, err, checksum;
137 
138 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
139 	osi->os.os = osi;
140 	osi->os_dsl_dataset = ds;
141 	osi->os_spa = spa;
142 	if (bp)
143 		osi->os_rootbp = *bp;
144 	osi->os_phys = zio_buf_alloc(sizeof (objset_phys_t));
145 	if (!BP_IS_HOLE(&osi->os_rootbp)) {
146 		uint32_t aflags = ARC_WAIT;
147 		zbookmark_t zb;
148 		zb.zb_objset = ds ? ds->ds_object : 0;
149 		zb.zb_object = 0;
150 		zb.zb_level = -1;
151 		zb.zb_blkid = 0;
152 
153 		dprintf_bp(&osi->os_rootbp, "reading %s", "");
154 		err = arc_read(NULL, spa, &osi->os_rootbp,
155 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
156 		    arc_bcopy_func, osi->os_phys,
157 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
158 		if (err) {
159 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
160 			kmem_free(osi, sizeof (objset_impl_t));
161 			return (err);
162 		}
163 	} else {
164 		bzero(osi->os_phys, sizeof (objset_phys_t));
165 	}
166 
167 	/*
168 	 * Note: the changed_cb will be called once before the register
169 	 * func returns, thus changing the checksum/compression from the
170 	 * default (fletcher2/off).  Snapshots don't need to know, and
171 	 * registering would complicate clone promotion.
172 	 */
173 	if (ds && ds->ds_phys->ds_num_children == 0) {
174 		err = dsl_prop_register(ds, "checksum",
175 		    checksum_changed_cb, osi);
176 		if (err == 0)
177 			err = dsl_prop_register(ds, "compression",
178 			    compression_changed_cb, osi);
179 		if (err) {
180 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
181 			kmem_free(osi, sizeof (objset_impl_t));
182 			return (err);
183 		}
184 	} else if (ds == NULL) {
185 		/* It's the meta-objset. */
186 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
187 		osi->os_compress = ZIO_COMPRESS_LZJB;
188 	}
189 
190 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
191 
192 	/*
193 	 * Metadata always gets compressed and checksummed.
194 	 * If the data checksum is multi-bit correctable, and it's not
195 	 * a ZBT-style checksum, then it's suitable for metadata as well.
196 	 * Otherwise, the metadata checksum defaults to fletcher4.
197 	 */
198 	checksum = osi->os_checksum;
199 
200 	if (zio_checksum_table[checksum].ci_correctable &&
201 	    !zio_checksum_table[checksum].ci_zbt)
202 		osi->os_md_checksum = checksum;
203 	else
204 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
205 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
206 
207 	for (i = 0; i < TXG_SIZE; i++) {
208 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
209 		    offsetof(dnode_t, dn_dirty_link[i]));
210 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
211 		    offsetof(dnode_t, dn_dirty_link[i]));
212 	}
213 	list_create(&osi->os_dnodes, sizeof (dnode_t),
214 	    offsetof(dnode_t, dn_link));
215 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
216 	    offsetof(dmu_buf_impl_t, db_link));
217 
218 	mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL);
219 	mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
220 
221 	osi->os_meta_dnode = dnode_special_open(osi,
222 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
223 
224 	if (ds != NULL) {
225 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
226 		if (winner) {
227 			dmu_objset_evict(ds, osi);
228 			osi = winner;
229 		}
230 	}
231 
232 	*osip = osi;
233 	return (0);
234 }
235 
236 /* called from zpl */
237 int
238 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
239     objset_t **osp)
240 {
241 	dsl_dataset_t *ds;
242 	int err;
243 	objset_t *os;
244 	objset_impl_t *osi;
245 
246 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
247 	err = dsl_dataset_open(name, mode, os, &ds);
248 	if (err) {
249 		kmem_free(os, sizeof (objset_t));
250 		return (err);
251 	}
252 
253 	osi = dsl_dataset_get_user_ptr(ds);
254 	if (osi == NULL) {
255 		blkptr_t bp;
256 
257 		dsl_dataset_get_blkptr(ds, &bp);
258 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
259 		    ds, &bp, &osi);
260 		if (err) {
261 			dsl_dataset_close(ds, mode, os);
262 			kmem_free(os, sizeof (objset_t));
263 			return (err);
264 		}
265 	}
266 
267 	os->os = osi;
268 	os->os_mode = mode;
269 
270 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
271 		dmu_objset_close(os);
272 		return (EINVAL);
273 	}
274 	*osp = os;
275 	return (0);
276 }
277 
278 void
279 dmu_objset_close(objset_t *os)
280 {
281 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
282 	kmem_free(os, sizeof (objset_t));
283 }
284 
285 int
286 dmu_objset_evict_dbufs(objset_t *os, int try)
287 {
288 	objset_impl_t *osi = os->os;
289 	dnode_t *dn;
290 
291 	mutex_enter(&osi->os_lock);
292 
293 	/* process the mdn last, since the other dnodes have holds on it */
294 	list_remove(&osi->os_dnodes, osi->os_meta_dnode);
295 	list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode);
296 
297 	/*
298 	 * Find the first dnode with holds.  We have to do this dance
299 	 * because dnode_add_ref() only works if you already have a
300 	 * hold.  If there are no holds then it has no dbufs so OK to
301 	 * skip.
302 	 */
303 	for (dn = list_head(&osi->os_dnodes);
304 	    dn && refcount_is_zero(&dn->dn_holds);
305 	    dn = list_next(&osi->os_dnodes, dn))
306 		continue;
307 	if (dn)
308 		dnode_add_ref(dn, FTAG);
309 
310 	while (dn) {
311 		dnode_t *next_dn = dn;
312 
313 		do {
314 			next_dn = list_next(&osi->os_dnodes, next_dn);
315 		} while (next_dn && refcount_is_zero(&next_dn->dn_holds));
316 		if (next_dn)
317 			dnode_add_ref(next_dn, FTAG);
318 
319 		mutex_exit(&osi->os_lock);
320 		if (dnode_evict_dbufs(dn, try)) {
321 			dnode_rele(dn, FTAG);
322 			if (next_dn)
323 				dnode_rele(next_dn, FTAG);
324 			return (1);
325 		}
326 		dnode_rele(dn, FTAG);
327 		mutex_enter(&osi->os_lock);
328 		dn = next_dn;
329 	}
330 	mutex_exit(&osi->os_lock);
331 	return (0);
332 }
333 
334 void
335 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
336 {
337 	objset_impl_t *osi = arg;
338 	objset_t os;
339 	int i;
340 
341 	for (i = 0; i < TXG_SIZE; i++) {
342 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
343 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
344 	}
345 
346 	if (ds && ds->ds_phys->ds_num_children == 0) {
347 		VERIFY(0 == dsl_prop_unregister(ds, "checksum",
348 		    checksum_changed_cb, osi));
349 		VERIFY(0 == dsl_prop_unregister(ds, "compression",
350 		    compression_changed_cb, osi));
351 	}
352 
353 	/*
354 	 * We should need only a single pass over the dnode list, since
355 	 * nothing can be added to the list at this point.
356 	 */
357 	os.os = osi;
358 	(void) dmu_objset_evict_dbufs(&os, 0);
359 
360 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
361 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
362 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
363 
364 	dnode_special_close(osi->os_meta_dnode);
365 	zil_free(osi->os_zil);
366 
367 	zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
368 	mutex_destroy(&osi->os_lock);
369 	mutex_destroy(&osi->os_obj_lock);
370 	kmem_free(osi, sizeof (objset_impl_t));
371 }
372 
373 /* called from dsl for meta-objset */
374 objset_impl_t *
375 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, dmu_objset_type_t type,
376     dmu_tx_t *tx)
377 {
378 	objset_impl_t *osi;
379 	dnode_t *mdn;
380 
381 	ASSERT(dmu_tx_is_syncing(tx));
382 	VERIFY(0 == dmu_objset_open_impl(spa, ds, NULL, &osi));
383 	mdn = osi->os_meta_dnode;
384 
385 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
386 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
387 
388 	/*
389 	 * We don't want to have to increase the meta-dnode's nlevels
390 	 * later, because then we could do it in quescing context while
391 	 * we are also accessing it in open context.
392 	 *
393 	 * This precaution is not necessary for the MOS (ds == NULL),
394 	 * because the MOS is only updated in syncing context.
395 	 * This is most fortunate: the MOS is the only objset that
396 	 * needs to be synced multiple times as spa_sync() iterates
397 	 * to convergence, so minimizing its dn_nlevels matters.
398 	 */
399 	if (ds != NULL) {
400 		int levels = 1;
401 
402 		/*
403 		 * Determine the number of levels necessary for the meta-dnode
404 		 * to contain DN_MAX_OBJECT dnodes.
405 		 */
406 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
407 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
408 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
409 			levels++;
410 
411 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
412 		    mdn->dn_nlevels = levels;
413 	}
414 
415 	ASSERT(type != DMU_OST_NONE);
416 	ASSERT(type != DMU_OST_ANY);
417 	ASSERT(type < DMU_OST_NUMTYPES);
418 	osi->os_phys->os_type = type;
419 
420 	dsl_dataset_dirty(ds, tx);
421 
422 	return (osi);
423 }
424 
425 struct oscarg {
426 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
427 	void *userarg;
428 	dsl_dataset_t *clone_parent;
429 	const char *lastname;
430 	dmu_objset_type_t type;
431 };
432 
433 /* ARGSUSED */
434 static int
435 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
436 {
437 	dsl_dir_t *dd = arg1;
438 	struct oscarg *oa = arg2;
439 	objset_t *mos = dd->dd_pool->dp_meta_objset;
440 	int err;
441 	uint64_t ddobj;
442 
443 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
444 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
445 	if (err != ENOENT)
446 		return (err ? err : EEXIST);
447 
448 	if (oa->clone_parent != NULL) {
449 		/*
450 		 * You can't clone across pools.
451 		 */
452 		if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool)
453 			return (EXDEV);
454 
455 		/*
456 		 * You can only clone snapshots, not the head datasets.
457 		 */
458 		if (oa->clone_parent->ds_phys->ds_num_children == 0)
459 			return (EINVAL);
460 	}
461 	return (0);
462 }
463 
464 static void
465 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
466 {
467 	dsl_dir_t *dd = arg1;
468 	struct oscarg *oa = arg2;
469 	dsl_dataset_t *ds;
470 	blkptr_t bp;
471 	uint64_t dsobj;
472 
473 	ASSERT(dmu_tx_is_syncing(tx));
474 
475 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
476 	    oa->clone_parent, tx);
477 
478 	VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL,
479 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds));
480 	dsl_dataset_get_blkptr(ds, &bp);
481 	if (BP_IS_HOLE(&bp)) {
482 		objset_impl_t *osi;
483 
484 		/* This is an empty dmu_objset; not a clone. */
485 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
486 		    ds, oa->type, tx);
487 
488 		if (oa->userfunc)
489 			oa->userfunc(&osi->os, oa->userarg, tx);
490 	}
491 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
492 }
493 
494 int
495 dmu_objset_create(const char *name, dmu_objset_type_t type,
496     objset_t *clone_parent,
497     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
498 {
499 	dsl_dir_t *pdd;
500 	const char *tail;
501 	int err = 0;
502 	struct oscarg oa = { 0 };
503 
504 	ASSERT(strchr(name, '@') == NULL);
505 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
506 	if (err)
507 		return (err);
508 	if (tail == NULL) {
509 		dsl_dir_close(pdd, FTAG);
510 		return (EEXIST);
511 	}
512 
513 	dprintf("name=%s\n", name);
514 
515 	oa.userfunc = func;
516 	oa.userarg = arg;
517 	oa.lastname = tail;
518 	oa.type = type;
519 	if (clone_parent != NULL) {
520 		/*
521 		 * You can't clone to a different type.
522 		 */
523 		if (clone_parent->os->os_phys->os_type != type) {
524 			dsl_dir_close(pdd, FTAG);
525 			return (EINVAL);
526 		}
527 		oa.clone_parent = clone_parent->os->os_dsl_dataset;
528 	}
529 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
530 	    dmu_objset_create_sync, pdd, &oa, 5);
531 	dsl_dir_close(pdd, FTAG);
532 	return (err);
533 }
534 
535 int
536 dmu_objset_destroy(const char *name)
537 {
538 	objset_t *os;
539 	int error;
540 
541 	/*
542 	 * If it looks like we'll be able to destroy it, and there's
543 	 * an unplayed replay log sitting around, destroy the log.
544 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
545 	 * but the replay log objset is modified in open context.
546 	 */
547 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
548 	if (error == 0) {
549 		zil_destroy(dmu_objset_zil(os), B_FALSE);
550 		dmu_objset_close(os);
551 	}
552 
553 	return (dsl_dataset_destroy(name));
554 }
555 
556 int
557 dmu_objset_rollback(const char *name)
558 {
559 	int err;
560 	objset_t *os;
561 
562 	err = dmu_objset_open(name, DMU_OST_ANY,
563 	    DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, &os);
564 	if (err == 0) {
565 		err = zil_suspend(dmu_objset_zil(os));
566 		if (err == 0)
567 			zil_resume(dmu_objset_zil(os));
568 		if (err == 0) {
569 			/* XXX uncache everything? */
570 			err = dsl_dataset_rollback(os->os->os_dsl_dataset);
571 		}
572 		dmu_objset_close(os);
573 	}
574 	return (err);
575 }
576 
577 struct snaparg {
578 	dsl_sync_task_group_t *dstg;
579 	char *snapname;
580 	char failed[MAXPATHLEN];
581 };
582 
583 static int
584 dmu_objset_snapshot_one(char *name, void *arg)
585 {
586 	struct snaparg *sn = arg;
587 	objset_t *os;
588 	int err;
589 
590 	(void) strcpy(sn->failed, name);
591 
592 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_STANDARD, &os);
593 	if (err != 0)
594 		return (err);
595 
596 	/*
597 	 * NB: we need to wait for all in-flight changes to get to disk,
598 	 * so that we snapshot those changes.  zil_suspend does this as
599 	 * a side effect.
600 	 */
601 	err = zil_suspend(dmu_objset_zil(os));
602 	if (err == 0) {
603 		dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check,
604 		    dsl_dataset_snapshot_sync, os, sn->snapname, 3);
605 	}
606 	return (err);
607 }
608 
609 int
610 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
611 {
612 	dsl_sync_task_t *dst;
613 	struct snaparg sn = { 0 };
614 	char *cp;
615 	spa_t *spa;
616 	int err;
617 
618 	(void) strcpy(sn.failed, fsname);
619 
620 	cp = strchr(fsname, '/');
621 	if (cp) {
622 		*cp = '\0';
623 		err = spa_open(fsname, &spa, FTAG);
624 		*cp = '/';
625 	} else {
626 		err = spa_open(fsname, &spa, FTAG);
627 	}
628 	if (err)
629 		return (err);
630 
631 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
632 	sn.snapname = snapname;
633 
634 	if (recursive) {
635 		err = dmu_objset_find(fsname,
636 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
637 	} else {
638 		err = dmu_objset_snapshot_one(fsname, &sn);
639 	}
640 
641 	if (err)
642 		goto out;
643 
644 	err = dsl_sync_task_group_wait(sn.dstg);
645 
646 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
647 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
648 		objset_t *os = dst->dst_arg1;
649 		if (dst->dst_err)
650 			dmu_objset_name(os, sn.failed);
651 		zil_resume(dmu_objset_zil(os));
652 		dmu_objset_close(os);
653 	}
654 out:
655 	if (err)
656 		(void) strcpy(fsname, sn.failed);
657 	dsl_sync_task_group_destroy(sn.dstg);
658 	spa_close(spa, FTAG);
659 	return (err);
660 }
661 
662 static void
663 dmu_objset_sync_dnodes(objset_impl_t *os, list_t *list, dmu_tx_t *tx)
664 {
665 	dnode_t *dn = list_head(list);
666 	int level, err;
667 
668 	for (level = 0; dn = list_head(list); level++) {
669 		zio_t *zio;
670 		zio = zio_root(os->os_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
671 
672 		ASSERT3U(level, <=, DN_MAX_LEVELS);
673 
674 		while (dn) {
675 			dnode_t *next = list_next(list, dn);
676 
677 			list_remove(list, dn);
678 			if (dnode_sync(dn, level, zio, tx) == 0) {
679 				/*
680 				 * This dnode requires syncing at higher
681 				 * levels; put it back onto the list.
682 				 */
683 				if (next)
684 					list_insert_before(list, next, dn);
685 				else
686 					list_insert_tail(list, dn);
687 			}
688 			dn = next;
689 		}
690 
691 		DTRACE_PROBE1(wait__begin, zio_t *, zio);
692 		err = zio_wait(zio);
693 		DTRACE_PROBE4(wait__end, zio_t *, zio,
694 		    uint64_t, tx->tx_txg, objset_impl_t *, os, int, level);
695 
696 		ASSERT(err == 0);
697 	}
698 }
699 
700 /* ARGSUSED */
701 static void
702 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
703 {
704 	objset_impl_t *os = arg;
705 	objset_phys_t *osphys = zio->io_data;
706 	dnode_phys_t *dnp = &osphys->os_meta_dnode;
707 	int i;
708 
709 	ASSERT3U(zio->io_error, ==, 0);
710 
711 	/*
712 	 * Update rootbp fill count.
713 	 */
714 	os->os_rootbp.blk_fill = 1;	/* count the meta-dnode */
715 	for (i = 0; i < dnp->dn_nblkptr; i++)
716 		os->os_rootbp.blk_fill += dnp->dn_blkptr[i].blk_fill;
717 
718 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
719 	BP_SET_LEVEL(zio->io_bp, 0);
720 
721 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
722 	    BP_IDENTITY(&zio->io_bp_orig))) {
723 		dsl_dataset_block_kill(os->os_dsl_dataset, &zio->io_bp_orig,
724 		    os->os_synctx);
725 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
726 		    os->os_synctx);
727 	}
728 }
729 
730 /* called from dsl */
731 void
732 dmu_objset_sync(objset_impl_t *os, dmu_tx_t *tx)
733 {
734 	extern taskq_t *dbuf_tq;
735 	int txgoff;
736 	list_t *dirty_list;
737 	int err;
738 	zbookmark_t zb;
739 	arc_buf_t *abuf =
740 	    arc_buf_alloc(os->os_spa, sizeof (objset_phys_t), FTAG,
741 		ARC_BUFC_METADATA);
742 
743 	ASSERT(dmu_tx_is_syncing(tx));
744 	ASSERT(os->os_synctx == NULL);
745 	/* XXX the write_done callback should really give us the tx... */
746 	os->os_synctx = tx;
747 
748 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
749 
750 	txgoff = tx->tx_txg & TXG_MASK;
751 
752 	dmu_objset_sync_dnodes(os, &os->os_free_dnodes[txgoff], tx);
753 	dmu_objset_sync_dnodes(os, &os->os_dirty_dnodes[txgoff], tx);
754 
755 	/*
756 	 * Free intent log blocks up to this tx.
757 	 */
758 	zil_sync(os->os_zil, tx);
759 
760 	/*
761 	 * Sync meta-dnode
762 	 */
763 	dirty_list = &os->os_dirty_dnodes[txgoff];
764 	ASSERT(list_head(dirty_list) == NULL);
765 	list_insert_tail(dirty_list, os->os_meta_dnode);
766 	dmu_objset_sync_dnodes(os, dirty_list, tx);
767 
768 	/*
769 	 * Sync the root block.
770 	 */
771 	bcopy(os->os_phys, abuf->b_data, sizeof (objset_phys_t));
772 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
773 	zb.zb_object = 0;
774 	zb.zb_level = -1;
775 	zb.zb_blkid = 0;
776 	err = arc_write(NULL, os->os_spa, os->os_md_checksum,
777 	    os->os_md_compress,
778 	    dmu_get_replication_level(os->os_spa, &zb, DMU_OT_OBJSET),
779 	    tx->tx_txg, &os->os_rootbp, abuf, killer, os,
780 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT, &zb);
781 	ASSERT(err == 0);
782 	VERIFY(arc_buf_remove_ref(abuf, FTAG) == 1);
783 
784 	dsl_dataset_set_blkptr(os->os_dsl_dataset, &os->os_rootbp, tx);
785 
786 	ASSERT3P(os->os_synctx, ==, tx);
787 	taskq_wait(dbuf_tq);
788 	os->os_synctx = NULL;
789 }
790 
791 void
792 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
793     uint64_t *usedobjsp, uint64_t *availobjsp)
794 {
795 	dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp,
796 	    usedobjsp, availobjsp);
797 }
798 
799 uint64_t
800 dmu_objset_fsid_guid(objset_t *os)
801 {
802 	return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset));
803 }
804 
805 void
806 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
807 {
808 	stat->dds_type = os->os->os_phys->os_type;
809 	if (os->os->os_dsl_dataset)
810 		dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat);
811 }
812 
813 void
814 dmu_objset_stats(objset_t *os, nvlist_t *nv)
815 {
816 	ASSERT(os->os->os_dsl_dataset ||
817 	    os->os->os_phys->os_type == DMU_OST_META);
818 
819 	if (os->os->os_dsl_dataset != NULL)
820 		dsl_dataset_stats(os->os->os_dsl_dataset, nv);
821 
822 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
823 	    os->os->os_phys->os_type);
824 }
825 
826 int
827 dmu_objset_is_snapshot(objset_t *os)
828 {
829 	if (os->os->os_dsl_dataset != NULL)
830 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
831 	else
832 		return (B_FALSE);
833 }
834 
835 int
836 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
837     uint64_t *idp, uint64_t *offp)
838 {
839 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
840 	zap_cursor_t cursor;
841 	zap_attribute_t attr;
842 
843 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
844 		return (ENOENT);
845 
846 	zap_cursor_init_serialized(&cursor,
847 	    ds->ds_dir->dd_pool->dp_meta_objset,
848 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
849 
850 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
851 		zap_cursor_fini(&cursor);
852 		return (ENOENT);
853 	}
854 
855 	if (strlen(attr.za_name) + 1 > namelen) {
856 		zap_cursor_fini(&cursor);
857 		return (ENAMETOOLONG);
858 	}
859 
860 	(void) strcpy(name, attr.za_name);
861 	if (idp)
862 		*idp = attr.za_first_integer;
863 	zap_cursor_advance(&cursor);
864 	*offp = zap_cursor_serialize(&cursor);
865 	zap_cursor_fini(&cursor);
866 
867 	return (0);
868 }
869 
870 int
871 dmu_dir_list_next(objset_t *os, int namelen, char *name,
872     uint64_t *idp, uint64_t *offp)
873 {
874 	dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir;
875 	zap_cursor_t cursor;
876 	zap_attribute_t attr;
877 
878 	/* there is no next dir on a snapshot! */
879 	if (os->os->os_dsl_dataset->ds_object !=
880 	    dd->dd_phys->dd_head_dataset_obj)
881 		return (ENOENT);
882 
883 	zap_cursor_init_serialized(&cursor,
884 	    dd->dd_pool->dp_meta_objset,
885 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
886 
887 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
888 		zap_cursor_fini(&cursor);
889 		return (ENOENT);
890 	}
891 
892 	if (strlen(attr.za_name) + 1 > namelen) {
893 		zap_cursor_fini(&cursor);
894 		return (ENAMETOOLONG);
895 	}
896 
897 	(void) strcpy(name, attr.za_name);
898 	if (idp)
899 		*idp = attr.za_first_integer;
900 	zap_cursor_advance(&cursor);
901 	*offp = zap_cursor_serialize(&cursor);
902 	zap_cursor_fini(&cursor);
903 
904 	return (0);
905 }
906 
907 /*
908  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
909  */
910 int
911 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags)
912 {
913 	dsl_dir_t *dd;
914 	objset_t *os;
915 	uint64_t snapobj;
916 	zap_cursor_t zc;
917 	zap_attribute_t attr;
918 	char *child;
919 	int do_self, err;
920 
921 	err = dsl_dir_open(name, FTAG, &dd, NULL);
922 	if (err)
923 		return (err);
924 
925 	/* NB: the $MOS dir doesn't have a head dataset */
926 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
927 
928 	/*
929 	 * Iterate over all children.
930 	 */
931 	if (flags & DS_FIND_CHILDREN) {
932 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
933 		    dd->dd_phys->dd_child_dir_zapobj);
934 		    zap_cursor_retrieve(&zc, &attr) == 0;
935 		    (void) zap_cursor_advance(&zc)) {
936 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
937 			ASSERT(attr.za_num_integers == 1);
938 
939 			/*
940 			 * No separating '/' because parent's name ends in /.
941 			 */
942 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
943 			/* XXX could probably just use name here */
944 			dsl_dir_name(dd, child);
945 			(void) strcat(child, "/");
946 			(void) strcat(child, attr.za_name);
947 			err = dmu_objset_find(child, func, arg, flags);
948 			kmem_free(child, MAXPATHLEN);
949 			if (err)
950 				break;
951 		}
952 		zap_cursor_fini(&zc);
953 
954 		if (err) {
955 			dsl_dir_close(dd, FTAG);
956 			return (err);
957 		}
958 	}
959 
960 	/*
961 	 * Iterate over all snapshots.
962 	 */
963 	if ((flags & DS_FIND_SNAPSHOTS) &&
964 	    dmu_objset_open(name, DMU_OST_ANY,
965 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
966 
967 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
968 		dmu_objset_close(os);
969 
970 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
971 		    zap_cursor_retrieve(&zc, &attr) == 0;
972 		    (void) zap_cursor_advance(&zc)) {
973 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
974 			ASSERT(attr.za_num_integers == 1);
975 
976 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
977 			/* XXX could probably just use name here */
978 			dsl_dir_name(dd, child);
979 			(void) strcat(child, "@");
980 			(void) strcat(child, attr.za_name);
981 			err = func(child, arg);
982 			kmem_free(child, MAXPATHLEN);
983 			if (err)
984 				break;
985 		}
986 		zap_cursor_fini(&zc);
987 	}
988 
989 	dsl_dir_close(dd, FTAG);
990 
991 	if (err)
992 		return (err);
993 
994 	/*
995 	 * Apply to self if appropriate.
996 	 */
997 	if (do_self)
998 		err = func(name, arg);
999 	return (err);
1000 }
1001