xref: /titanic_50/usr/src/uts/common/fs/zfs/dmu_objset.c (revision e127a3e717f822eb855235fa3bd08235b2cf533d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/zvol.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/dmu_impl.h>
43 
44 
45 spa_t *
46 dmu_objset_spa(objset_t *os)
47 {
48 	return (os->os->os_spa);
49 }
50 
51 zilog_t *
52 dmu_objset_zil(objset_t *os)
53 {
54 	return (os->os->os_zil);
55 }
56 
57 dsl_pool_t *
58 dmu_objset_pool(objset_t *os)
59 {
60 	dsl_dataset_t *ds;
61 
62 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
63 		return (ds->ds_dir->dd_pool);
64 	else
65 		return (spa_get_dsl(os->os->os_spa));
66 }
67 
68 dsl_dataset_t *
69 dmu_objset_ds(objset_t *os)
70 {
71 	return (os->os->os_dsl_dataset);
72 }
73 
74 dmu_objset_type_t
75 dmu_objset_type(objset_t *os)
76 {
77 	return (os->os->os_phys->os_type);
78 }
79 
80 void
81 dmu_objset_name(objset_t *os, char *buf)
82 {
83 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
84 }
85 
86 uint64_t
87 dmu_objset_id(objset_t *os)
88 {
89 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
90 
91 	return (ds ? ds->ds_object : 0);
92 }
93 
94 static void
95 checksum_changed_cb(void *arg, uint64_t newval)
96 {
97 	objset_impl_t *osi = arg;
98 
99 	/*
100 	 * Inheritance should have been done by now.
101 	 */
102 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
103 
104 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
105 }
106 
107 static void
108 compression_changed_cb(void *arg, uint64_t newval)
109 {
110 	objset_impl_t *osi = arg;
111 
112 	/*
113 	 * Inheritance and range checking should have been done by now.
114 	 */
115 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
116 
117 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
118 }
119 
120 void
121 dmu_objset_byteswap(void *buf, size_t size)
122 {
123 	objset_phys_t *osp = buf;
124 
125 	ASSERT(size == sizeof (objset_phys_t));
126 	dnode_byteswap(&osp->os_meta_dnode);
127 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
128 	osp->os_type = BSWAP_64(osp->os_type);
129 }
130 
131 int
132 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
133     objset_impl_t **osip)
134 {
135 	objset_impl_t *winner, *osi;
136 	int i, err, checksum;
137 
138 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
139 	osi->os.os = osi;
140 	osi->os_dsl_dataset = ds;
141 	osi->os_spa = spa;
142 	osi->os_rootbp = bp;
143 	if (!BP_IS_HOLE(osi->os_rootbp)) {
144 		uint32_t aflags = ARC_WAIT;
145 		zbookmark_t zb;
146 		zb.zb_objset = ds ? ds->ds_object : 0;
147 		zb.zb_object = 0;
148 		zb.zb_level = -1;
149 		zb.zb_blkid = 0;
150 
151 		dprintf_bp(osi->os_rootbp, "reading %s", "");
152 		err = arc_read(NULL, spa, osi->os_rootbp,
153 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
154 		    arc_getbuf_func, &osi->os_phys_buf,
155 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
156 		if (err) {
157 			kmem_free(osi, sizeof (objset_impl_t));
158 			return (err);
159 		}
160 		osi->os_phys = osi->os_phys_buf->b_data;
161 		arc_release(osi->os_phys_buf, &osi->os_phys_buf);
162 	} else {
163 		osi->os_phys_buf = arc_buf_alloc(spa, sizeof (objset_phys_t),
164 		    &osi->os_phys_buf, ARC_BUFC_METADATA);
165 		osi->os_phys = osi->os_phys_buf->b_data;
166 		bzero(osi->os_phys, sizeof (objset_phys_t));
167 	}
168 
169 	/*
170 	 * Note: the changed_cb will be called once before the register
171 	 * func returns, thus changing the checksum/compression from the
172 	 * default (fletcher2/off).  Snapshots don't need to know, and
173 	 * registering would complicate clone promotion.
174 	 */
175 	if (ds && ds->ds_phys->ds_num_children == 0) {
176 		err = dsl_prop_register(ds, "checksum",
177 		    checksum_changed_cb, osi);
178 		if (err == 0)
179 			err = dsl_prop_register(ds, "compression",
180 			    compression_changed_cb, osi);
181 		if (err) {
182 			VERIFY(arc_buf_remove_ref(osi->os_phys_buf,
183 			    &osi->os_phys_buf) == 1);
184 			kmem_free(osi, sizeof (objset_impl_t));
185 			return (err);
186 		}
187 	} else if (ds == NULL) {
188 		/* It's the meta-objset. */
189 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
190 		osi->os_compress = ZIO_COMPRESS_LZJB;
191 	}
192 
193 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
194 
195 	/*
196 	 * Metadata always gets compressed and checksummed.
197 	 * If the data checksum is multi-bit correctable, and it's not
198 	 * a ZBT-style checksum, then it's suitable for metadata as well.
199 	 * Otherwise, the metadata checksum defaults to fletcher4.
200 	 */
201 	checksum = osi->os_checksum;
202 
203 	if (zio_checksum_table[checksum].ci_correctable &&
204 	    !zio_checksum_table[checksum].ci_zbt)
205 		osi->os_md_checksum = checksum;
206 	else
207 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
208 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
209 
210 	for (i = 0; i < TXG_SIZE; i++) {
211 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
212 		    offsetof(dnode_t, dn_dirty_link[i]));
213 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
214 		    offsetof(dnode_t, dn_dirty_link[i]));
215 	}
216 	list_create(&osi->os_dnodes, sizeof (dnode_t),
217 	    offsetof(dnode_t, dn_link));
218 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
219 	    offsetof(dmu_buf_impl_t, db_link));
220 
221 	mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL);
222 	mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
223 
224 	osi->os_meta_dnode = dnode_special_open(osi,
225 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
226 
227 	if (ds != NULL) {
228 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
229 		if (winner) {
230 			dmu_objset_evict(ds, osi);
231 			osi = winner;
232 		}
233 	}
234 
235 	*osip = osi;
236 	return (0);
237 }
238 
239 /* called from zpl */
240 int
241 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
242     objset_t **osp)
243 {
244 	dsl_dataset_t *ds;
245 	int err;
246 	objset_t *os;
247 	objset_impl_t *osi;
248 
249 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
250 	err = dsl_dataset_open(name, mode, os, &ds);
251 	if (err) {
252 		kmem_free(os, sizeof (objset_t));
253 		return (err);
254 	}
255 
256 	osi = dsl_dataset_get_user_ptr(ds);
257 	if (osi == NULL) {
258 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
259 		    ds, &ds->ds_phys->ds_bp, &osi);
260 		if (err) {
261 			dsl_dataset_close(ds, mode, os);
262 			kmem_free(os, sizeof (objset_t));
263 			return (err);
264 		}
265 	}
266 
267 	os->os = osi;
268 	os->os_mode = mode;
269 
270 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
271 		dmu_objset_close(os);
272 		return (EINVAL);
273 	}
274 	*osp = os;
275 	return (0);
276 }
277 
278 void
279 dmu_objset_close(objset_t *os)
280 {
281 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
282 	kmem_free(os, sizeof (objset_t));
283 }
284 
285 int
286 dmu_objset_evict_dbufs(objset_t *os, int try)
287 {
288 	objset_impl_t *osi = os->os;
289 	dnode_t *dn;
290 
291 	mutex_enter(&osi->os_lock);
292 
293 	/* process the mdn last, since the other dnodes have holds on it */
294 	list_remove(&osi->os_dnodes, osi->os_meta_dnode);
295 	list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode);
296 
297 	/*
298 	 * Find the first dnode with holds.  We have to do this dance
299 	 * because dnode_add_ref() only works if you already have a
300 	 * hold.  If there are no holds then it has no dbufs so OK to
301 	 * skip.
302 	 */
303 	for (dn = list_head(&osi->os_dnodes);
304 	    dn && refcount_is_zero(&dn->dn_holds);
305 	    dn = list_next(&osi->os_dnodes, dn))
306 		continue;
307 	if (dn)
308 		dnode_add_ref(dn, FTAG);
309 
310 	while (dn) {
311 		dnode_t *next_dn = dn;
312 
313 		do {
314 			next_dn = list_next(&osi->os_dnodes, next_dn);
315 		} while (next_dn && refcount_is_zero(&next_dn->dn_holds));
316 		if (next_dn)
317 			dnode_add_ref(next_dn, FTAG);
318 
319 		mutex_exit(&osi->os_lock);
320 		if (dnode_evict_dbufs(dn, try)) {
321 			dnode_rele(dn, FTAG);
322 			if (next_dn)
323 				dnode_rele(next_dn, FTAG);
324 			return (1);
325 		}
326 		dnode_rele(dn, FTAG);
327 		mutex_enter(&osi->os_lock);
328 		dn = next_dn;
329 	}
330 	mutex_exit(&osi->os_lock);
331 	return (0);
332 }
333 
334 void
335 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
336 {
337 	objset_impl_t *osi = arg;
338 	objset_t os;
339 	int i;
340 
341 	for (i = 0; i < TXG_SIZE; i++) {
342 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
343 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
344 	}
345 
346 	if (ds && ds->ds_phys->ds_num_children == 0) {
347 		VERIFY(0 == dsl_prop_unregister(ds, "checksum",
348 		    checksum_changed_cb, osi));
349 		VERIFY(0 == dsl_prop_unregister(ds, "compression",
350 		    compression_changed_cb, osi));
351 	}
352 
353 	/*
354 	 * We should need only a single pass over the dnode list, since
355 	 * nothing can be added to the list at this point.
356 	 */
357 	os.os = osi;
358 	(void) dmu_objset_evict_dbufs(&os, 0);
359 
360 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
361 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
362 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
363 
364 	dnode_special_close(osi->os_meta_dnode);
365 	zil_free(osi->os_zil);
366 
367 	VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1);
368 	mutex_destroy(&osi->os_lock);
369 	mutex_destroy(&osi->os_obj_lock);
370 	kmem_free(osi, sizeof (objset_impl_t));
371 }
372 
373 /* called from dsl for meta-objset */
374 objset_impl_t *
375 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
376     dmu_objset_type_t type, dmu_tx_t *tx)
377 {
378 	objset_impl_t *osi;
379 	dnode_t *mdn;
380 
381 	ASSERT(dmu_tx_is_syncing(tx));
382 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &osi));
383 	mdn = osi->os_meta_dnode;
384 
385 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
386 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
387 
388 	/*
389 	 * We don't want to have to increase the meta-dnode's nlevels
390 	 * later, because then we could do it in quescing context while
391 	 * we are also accessing it in open context.
392 	 *
393 	 * This precaution is not necessary for the MOS (ds == NULL),
394 	 * because the MOS is only updated in syncing context.
395 	 * This is most fortunate: the MOS is the only objset that
396 	 * needs to be synced multiple times as spa_sync() iterates
397 	 * to convergence, so minimizing its dn_nlevels matters.
398 	 */
399 	if (ds != NULL) {
400 		int levels = 1;
401 
402 		/*
403 		 * Determine the number of levels necessary for the meta-dnode
404 		 * to contain DN_MAX_OBJECT dnodes.
405 		 */
406 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
407 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
408 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
409 			levels++;
410 
411 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
412 		    mdn->dn_nlevels = levels;
413 	}
414 
415 	ASSERT(type != DMU_OST_NONE);
416 	ASSERT(type != DMU_OST_ANY);
417 	ASSERT(type < DMU_OST_NUMTYPES);
418 	osi->os_phys->os_type = type;
419 
420 	dsl_dataset_dirty(ds, tx);
421 
422 	return (osi);
423 }
424 
425 struct oscarg {
426 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
427 	void *userarg;
428 	dsl_dataset_t *clone_parent;
429 	const char *lastname;
430 	dmu_objset_type_t type;
431 };
432 
433 /* ARGSUSED */
434 static int
435 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
436 {
437 	dsl_dir_t *dd = arg1;
438 	struct oscarg *oa = arg2;
439 	objset_t *mos = dd->dd_pool->dp_meta_objset;
440 	int err;
441 	uint64_t ddobj;
442 
443 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
444 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
445 	if (err != ENOENT)
446 		return (err ? err : EEXIST);
447 
448 	if (oa->clone_parent != NULL) {
449 		/*
450 		 * You can't clone across pools.
451 		 */
452 		if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool)
453 			return (EXDEV);
454 
455 		/*
456 		 * You can only clone snapshots, not the head datasets.
457 		 */
458 		if (oa->clone_parent->ds_phys->ds_num_children == 0)
459 			return (EINVAL);
460 	}
461 	return (0);
462 }
463 
464 static void
465 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
466 {
467 	dsl_dir_t *dd = arg1;
468 	struct oscarg *oa = arg2;
469 	dsl_dataset_t *ds;
470 	blkptr_t *bp;
471 	uint64_t dsobj;
472 
473 	ASSERT(dmu_tx_is_syncing(tx));
474 
475 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
476 	    oa->clone_parent, tx);
477 
478 	VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL,
479 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds));
480 	bp = dsl_dataset_get_blkptr(ds);
481 	if (BP_IS_HOLE(bp)) {
482 		objset_impl_t *osi;
483 
484 		/* This is an empty dmu_objset; not a clone. */
485 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
486 		    ds, bp, oa->type, tx);
487 
488 		if (oa->userfunc)
489 			oa->userfunc(&osi->os, oa->userarg, tx);
490 	}
491 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
492 }
493 
494 int
495 dmu_objset_create(const char *name, dmu_objset_type_t type,
496     objset_t *clone_parent,
497     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
498 {
499 	dsl_dir_t *pdd;
500 	const char *tail;
501 	int err = 0;
502 	struct oscarg oa = { 0 };
503 
504 	ASSERT(strchr(name, '@') == NULL);
505 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
506 	if (err)
507 		return (err);
508 	if (tail == NULL) {
509 		dsl_dir_close(pdd, FTAG);
510 		return (EEXIST);
511 	}
512 
513 	dprintf("name=%s\n", name);
514 
515 	oa.userfunc = func;
516 	oa.userarg = arg;
517 	oa.lastname = tail;
518 	oa.type = type;
519 	if (clone_parent != NULL) {
520 		/*
521 		 * You can't clone to a different type.
522 		 */
523 		if (clone_parent->os->os_phys->os_type != type) {
524 			dsl_dir_close(pdd, FTAG);
525 			return (EINVAL);
526 		}
527 		oa.clone_parent = clone_parent->os->os_dsl_dataset;
528 	}
529 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
530 	    dmu_objset_create_sync, pdd, &oa, 5);
531 	dsl_dir_close(pdd, FTAG);
532 	return (err);
533 }
534 
535 int
536 dmu_objset_destroy(const char *name)
537 {
538 	objset_t *os;
539 	int error;
540 
541 	/*
542 	 * If it looks like we'll be able to destroy it, and there's
543 	 * an unplayed replay log sitting around, destroy the log.
544 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
545 	 * but the replay log objset is modified in open context.
546 	 */
547 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
548 	if (error == 0) {
549 		zil_destroy(dmu_objset_zil(os), B_FALSE);
550 		dmu_objset_close(os);
551 	}
552 
553 	return (dsl_dataset_destroy(name));
554 }
555 
556 int
557 dmu_objset_rollback(const char *name)
558 {
559 	int err;
560 	objset_t *os;
561 
562 	err = dmu_objset_open(name, DMU_OST_ANY,
563 	    DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, &os);
564 	if (err == 0) {
565 		err = zil_suspend(dmu_objset_zil(os));
566 		if (err == 0)
567 			zil_resume(dmu_objset_zil(os));
568 		if (err == 0) {
569 			/* XXX uncache everything? */
570 			err = dsl_dataset_rollback(os->os->os_dsl_dataset);
571 		}
572 		dmu_objset_close(os);
573 	}
574 	return (err);
575 }
576 
577 struct snaparg {
578 	dsl_sync_task_group_t *dstg;
579 	char *snapname;
580 	char failed[MAXPATHLEN];
581 };
582 
583 static int
584 dmu_objset_snapshot_one(char *name, void *arg)
585 {
586 	struct snaparg *sn = arg;
587 	objset_t *os;
588 	dmu_objset_stats_t stat;
589 	int err;
590 
591 	(void) strcpy(sn->failed, name);
592 
593 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_STANDARD, &os);
594 	if (err != 0)
595 		return (err);
596 
597 	/*
598 	 * If the objset is in an inconsistent state, return busy.
599 	 */
600 	dmu_objset_fast_stat(os, &stat);
601 	if (stat.dds_inconsistent) {
602 		dmu_objset_close(os);
603 		return (EBUSY);
604 	}
605 
606 	/*
607 	 * NB: we need to wait for all in-flight changes to get to disk,
608 	 * so that we snapshot those changes.  zil_suspend does this as
609 	 * a side effect.
610 	 */
611 	err = zil_suspend(dmu_objset_zil(os));
612 	if (err == 0) {
613 		dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check,
614 		    dsl_dataset_snapshot_sync, os, sn->snapname, 3);
615 	} else {
616 		dmu_objset_close(os);
617 	}
618 
619 	return (err);
620 }
621 
622 int
623 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
624 {
625 	dsl_sync_task_t *dst;
626 	struct snaparg sn = { 0 };
627 	char *cp;
628 	spa_t *spa;
629 	int err;
630 
631 	(void) strcpy(sn.failed, fsname);
632 
633 	cp = strchr(fsname, '/');
634 	if (cp) {
635 		*cp = '\0';
636 		err = spa_open(fsname, &spa, FTAG);
637 		*cp = '/';
638 	} else {
639 		err = spa_open(fsname, &spa, FTAG);
640 	}
641 	if (err)
642 		return (err);
643 
644 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
645 	sn.snapname = snapname;
646 
647 	if (recursive) {
648 		err = dmu_objset_find(fsname,
649 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
650 	} else {
651 		err = dmu_objset_snapshot_one(fsname, &sn);
652 	}
653 
654 	if (err)
655 		goto out;
656 
657 	err = dsl_sync_task_group_wait(sn.dstg);
658 
659 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
660 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
661 		objset_t *os = dst->dst_arg1;
662 		if (dst->dst_err)
663 			dmu_objset_name(os, sn.failed);
664 		zil_resume(dmu_objset_zil(os));
665 		dmu_objset_close(os);
666 	}
667 out:
668 	if (err)
669 		(void) strcpy(fsname, sn.failed);
670 	dsl_sync_task_group_destroy(sn.dstg);
671 	spa_close(spa, FTAG);
672 	return (err);
673 }
674 
675 static void
676 dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx)
677 {
678 	dnode_t *dn;
679 
680 	while (dn = list_head(list)) {
681 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
682 		ASSERT(dn->dn_dbuf->db_data_pending);
683 		/*
684 		 * Initialize dn_zio outside dnode_sync()
685 		 * to accomodate meta-dnode
686 		 */
687 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
688 		ASSERT(dn->dn_zio);
689 
690 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
691 		list_remove(list, dn);
692 		dnode_sync(dn, tx);
693 	}
694 }
695 
696 /* ARGSUSED */
697 static void
698 ready(zio_t *zio, arc_buf_t *abuf, void *arg)
699 {
700 	objset_impl_t *os = arg;
701 	blkptr_t *bp = os->os_rootbp;
702 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
703 	int i;
704 
705 	/*
706 	 * Update rootbp fill count.
707 	 */
708 	bp->blk_fill = 1;	/* count the meta-dnode */
709 	for (i = 0; i < dnp->dn_nblkptr; i++)
710 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
711 }
712 
713 /* ARGSUSED */
714 static void
715 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
716 {
717 	objset_impl_t *os = arg;
718 
719 	ASSERT3U(zio->io_error, ==, 0);
720 
721 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
722 	BP_SET_LEVEL(zio->io_bp, 0);
723 
724 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
725 	    BP_IDENTITY(&zio->io_bp_orig))) {
726 		if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg)
727 			dsl_dataset_block_kill(os->os_dsl_dataset,
728 			    &zio->io_bp_orig, NULL, os->os_synctx);
729 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
730 		    os->os_synctx);
731 	}
732 	arc_release(os->os_phys_buf, &os->os_phys_buf);
733 
734 	if (os->os_dsl_dataset)
735 		dmu_buf_rele(os->os_dsl_dataset->ds_dbuf, os->os_dsl_dataset);
736 }
737 
738 /* called from dsl */
739 void
740 dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx)
741 {
742 	int txgoff;
743 	zbookmark_t zb;
744 	zio_t *zio;
745 	list_t *list;
746 	dbuf_dirty_record_t *dr;
747 	int zio_flags;
748 
749 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
750 
751 	ASSERT(dmu_tx_is_syncing(tx));
752 	/* XXX the write_done callback should really give us the tx... */
753 	os->os_synctx = tx;
754 
755 	/*
756 	 * Create the root block IO
757 	 */
758 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
759 	zb.zb_object = 0;
760 	zb.zb_level = -1;
761 	zb.zb_blkid = 0;
762 	zio_flags = ZIO_FLAG_MUSTSUCCEED;
763 	if (dmu_ot[DMU_OT_OBJSET].ot_metadata || zb.zb_level != 0)
764 		zio_flags |= ZIO_FLAG_METADATA;
765 	if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg))
766 		dsl_dataset_block_kill(os->os_dsl_dataset,
767 		    os->os_rootbp, pio, tx);
768 	zio = arc_write(pio, os->os_spa, os->os_md_checksum,
769 	    os->os_md_compress,
770 	    dmu_get_replication_level(os->os_spa, &zb, DMU_OT_OBJSET),
771 	    tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, killer, os,
772 	    ZIO_PRIORITY_ASYNC_WRITE, zio_flags, &zb);
773 
774 	/*
775 	 * Sync meta-dnode - the parent IO for the sync is the root block
776 	 */
777 	os->os_meta_dnode->dn_zio = zio;
778 	dnode_sync(os->os_meta_dnode, tx);
779 
780 	txgoff = tx->tx_txg & TXG_MASK;
781 
782 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], tx);
783 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx);
784 
785 	list = &os->os_meta_dnode->dn_dirty_records[txgoff];
786 	while (dr = list_head(list)) {
787 		ASSERT(dr->dr_dbuf->db_level == 0);
788 		list_remove(list, dr);
789 		if (dr->dr_zio)
790 			zio_nowait(dr->dr_zio);
791 	}
792 	/*
793 	 * Free intent log blocks up to this tx.
794 	 */
795 	zil_sync(os->os_zil, tx);
796 	zio_nowait(zio);
797 }
798 
799 void
800 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
801     uint64_t *usedobjsp, uint64_t *availobjsp)
802 {
803 	dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp,
804 	    usedobjsp, availobjsp);
805 }
806 
807 uint64_t
808 dmu_objset_fsid_guid(objset_t *os)
809 {
810 	return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset));
811 }
812 
813 void
814 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
815 {
816 	stat->dds_type = os->os->os_phys->os_type;
817 	if (os->os->os_dsl_dataset)
818 		dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat);
819 }
820 
821 void
822 dmu_objset_stats(objset_t *os, nvlist_t *nv)
823 {
824 	ASSERT(os->os->os_dsl_dataset ||
825 	    os->os->os_phys->os_type == DMU_OST_META);
826 
827 	if (os->os->os_dsl_dataset != NULL)
828 		dsl_dataset_stats(os->os->os_dsl_dataset, nv);
829 
830 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
831 	    os->os->os_phys->os_type);
832 }
833 
834 int
835 dmu_objset_is_snapshot(objset_t *os)
836 {
837 	if (os->os->os_dsl_dataset != NULL)
838 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
839 	else
840 		return (B_FALSE);
841 }
842 
843 int
844 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
845     uint64_t *idp, uint64_t *offp)
846 {
847 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
848 	zap_cursor_t cursor;
849 	zap_attribute_t attr;
850 
851 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
852 		return (ENOENT);
853 
854 	zap_cursor_init_serialized(&cursor,
855 	    ds->ds_dir->dd_pool->dp_meta_objset,
856 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
857 
858 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
859 		zap_cursor_fini(&cursor);
860 		return (ENOENT);
861 	}
862 
863 	if (strlen(attr.za_name) + 1 > namelen) {
864 		zap_cursor_fini(&cursor);
865 		return (ENAMETOOLONG);
866 	}
867 
868 	(void) strcpy(name, attr.za_name);
869 	if (idp)
870 		*idp = attr.za_first_integer;
871 	zap_cursor_advance(&cursor);
872 	*offp = zap_cursor_serialize(&cursor);
873 	zap_cursor_fini(&cursor);
874 
875 	return (0);
876 }
877 
878 int
879 dmu_dir_list_next(objset_t *os, int namelen, char *name,
880     uint64_t *idp, uint64_t *offp)
881 {
882 	dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir;
883 	zap_cursor_t cursor;
884 	zap_attribute_t attr;
885 
886 	/* there is no next dir on a snapshot! */
887 	if (os->os->os_dsl_dataset->ds_object !=
888 	    dd->dd_phys->dd_head_dataset_obj)
889 		return (ENOENT);
890 
891 	zap_cursor_init_serialized(&cursor,
892 	    dd->dd_pool->dp_meta_objset,
893 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
894 
895 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
896 		zap_cursor_fini(&cursor);
897 		return (ENOENT);
898 	}
899 
900 	if (strlen(attr.za_name) + 1 > namelen) {
901 		zap_cursor_fini(&cursor);
902 		return (ENAMETOOLONG);
903 	}
904 
905 	(void) strcpy(name, attr.za_name);
906 	if (idp)
907 		*idp = attr.za_first_integer;
908 	zap_cursor_advance(&cursor);
909 	*offp = zap_cursor_serialize(&cursor);
910 	zap_cursor_fini(&cursor);
911 
912 	return (0);
913 }
914 
915 /*
916  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
917  */
918 int
919 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags)
920 {
921 	dsl_dir_t *dd;
922 	objset_t *os;
923 	uint64_t snapobj;
924 	zap_cursor_t zc;
925 	zap_attribute_t attr;
926 	char *child;
927 	int do_self, err;
928 
929 	err = dsl_dir_open(name, FTAG, &dd, NULL);
930 	if (err)
931 		return (err);
932 
933 	/* NB: the $MOS dir doesn't have a head dataset */
934 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
935 
936 	/*
937 	 * Iterate over all children.
938 	 */
939 	if (flags & DS_FIND_CHILDREN) {
940 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
941 		    dd->dd_phys->dd_child_dir_zapobj);
942 		    zap_cursor_retrieve(&zc, &attr) == 0;
943 		    (void) zap_cursor_advance(&zc)) {
944 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
945 			ASSERT(attr.za_num_integers == 1);
946 
947 			/*
948 			 * No separating '/' because parent's name ends in /.
949 			 */
950 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
951 			/* XXX could probably just use name here */
952 			dsl_dir_name(dd, child);
953 			(void) strcat(child, "/");
954 			(void) strcat(child, attr.za_name);
955 			err = dmu_objset_find(child, func, arg, flags);
956 			kmem_free(child, MAXPATHLEN);
957 			if (err)
958 				break;
959 		}
960 		zap_cursor_fini(&zc);
961 
962 		if (err) {
963 			dsl_dir_close(dd, FTAG);
964 			return (err);
965 		}
966 	}
967 
968 	/*
969 	 * Iterate over all snapshots.
970 	 */
971 	if ((flags & DS_FIND_SNAPSHOTS) &&
972 	    dmu_objset_open(name, DMU_OST_ANY,
973 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
974 
975 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
976 		dmu_objset_close(os);
977 
978 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
979 		    zap_cursor_retrieve(&zc, &attr) == 0;
980 		    (void) zap_cursor_advance(&zc)) {
981 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
982 			ASSERT(attr.za_num_integers == 1);
983 
984 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
985 			/* XXX could probably just use name here */
986 			dsl_dir_name(dd, child);
987 			(void) strcat(child, "@");
988 			(void) strcat(child, attr.za_name);
989 			err = func(child, arg);
990 			kmem_free(child, MAXPATHLEN);
991 			if (err)
992 				break;
993 		}
994 		zap_cursor_fini(&zc);
995 	}
996 
997 	dsl_dir_close(dd, FTAG);
998 
999 	if (err)
1000 		return (err);
1001 
1002 	/*
1003 	 * Apply to self if appropriate.
1004 	 */
1005 	if (do_self)
1006 		err = func(name, arg);
1007 	return (err);
1008 }
1009