xref: /illumos-gate/usr/src/lib/libzfs_core/common/libzfs_core.c (revision 458f44a49dc56cd17a39815122214e7a1b4793e3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  * Copyright 2017 RackTop Systems.
27  */
28 
29 /*
30  * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
31  * It has the following characteristics:
32  *
33  *  - Thread Safe.  libzfs_core is accessible concurrently from multiple
34  *  threads.  This is accomplished primarily by avoiding global data
35  *  (e.g. caching).  Since it's thread-safe, there is no reason for a
36  *  process to have multiple libzfs "instances".  Therefore, we store
37  *  our few pieces of data (e.g. the file descriptor) in global
38  *  variables.  The fd is reference-counted so that the libzfs_core
39  *  library can be "initialized" multiple times (e.g. by different
40  *  consumers within the same process).
41  *
42  *  - Committed Interface.  The libzfs_core interface will be committed,
43  *  therefore consumers can compile against it and be confident that
44  *  their code will continue to work on future releases of this code.
45  *  Currently, the interface is Evolving (not Committed), but we intend
46  *  to commit to it once it is more complete and we determine that it
47  *  meets the needs of all consumers.
48  *
49  *  - Programatic Error Handling.  libzfs_core communicates errors with
50  *  defined error numbers, and doesn't print anything to stdout/stderr.
51  *
52  *  - Thin Layer.  libzfs_core is a thin layer, marshaling arguments
53  *  to/from the kernel ioctls.  There is generally a 1:1 correspondence
54  *  between libzfs_core functions and ioctls to /dev/zfs.
55  *
56  *  - Clear Atomicity.  Because libzfs_core functions are generally 1:1
57  *  with kernel ioctls, and kernel ioctls are general atomic, each
58  *  libzfs_core function is atomic.  For example, creating multiple
59  *  snapshots with a single call to lzc_snapshot() is atomic -- it
60  *  can't fail with only some of the requested snapshots created, even
61  *  in the event of power loss or system crash.
62  *
63  *  - Continued libzfs Support.  Some higher-level operations (e.g.
64  *  support for "zfs send -R") are too complicated to fit the scope of
65  *  libzfs_core.  This functionality will continue to live in libzfs.
66  *  Where appropriate, libzfs will use the underlying atomic operations
67  *  of libzfs_core.  For example, libzfs may implement "zfs send -R |
68  *  zfs receive" by using individual "send one snapshot", rename,
69  *  destroy, and "receive one snapshot" operations in libzfs_core.
70  *  /sbin/zfs and /zbin/zpool will link with both libzfs and
71  *  libzfs_core.  Other consumers should aim to use only libzfs_core,
72  *  since that will be the supported, stable interface going forwards.
73  */
74 
75 #include <libzfs_core.h>
76 #include <ctype.h>
77 #include <unistd.h>
78 #include <stdlib.h>
79 #include <string.h>
80 #include <errno.h>
81 #include <fcntl.h>
82 #include <pthread.h>
83 #include <sys/nvpair.h>
84 #include <sys/param.h>
85 #include <sys/types.h>
86 #include <sys/stat.h>
87 #include <sys/zfs_ioctl.h>
88 
89 static int g_fd = -1;
90 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
91 static int g_refcount;
92 
93 int
94 libzfs_core_init(void)
95 {
96 	(void) pthread_mutex_lock(&g_lock);
97 	if (g_refcount == 0) {
98 		g_fd = open("/dev/zfs", O_RDWR);
99 		if (g_fd < 0) {
100 			(void) pthread_mutex_unlock(&g_lock);
101 			return (errno);
102 		}
103 	}
104 	g_refcount++;
105 	(void) pthread_mutex_unlock(&g_lock);
106 	return (0);
107 }
108 
109 void
110 libzfs_core_fini(void)
111 {
112 	(void) pthread_mutex_lock(&g_lock);
113 	ASSERT3S(g_refcount, >, 0);
114 
115 	if (g_refcount > 0)
116 		g_refcount--;
117 
118 	if (g_refcount == 0 && g_fd != -1) {
119 		(void) close(g_fd);
120 		g_fd = -1;
121 	}
122 	(void) pthread_mutex_unlock(&g_lock);
123 }
124 
125 static int
126 lzc_ioctl(zfs_ioc_t ioc, const char *name,
127     nvlist_t *source, nvlist_t **resultp)
128 {
129 	zfs_cmd_t zc = { 0 };
130 	int error = 0;
131 	char *packed;
132 	size_t size;
133 
134 	ASSERT3S(g_refcount, >, 0);
135 	VERIFY3S(g_fd, !=, -1);
136 
137 	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
138 
139 	packed = fnvlist_pack(source, &size);
140 	zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
141 	zc.zc_nvlist_src_size = size;
142 
143 	if (resultp != NULL) {
144 		*resultp = NULL;
145 		if (ioc == ZFS_IOC_CHANNEL_PROGRAM) {
146 			zc.zc_nvlist_dst_size = fnvlist_lookup_uint64(source,
147 			    ZCP_ARG_MEMLIMIT);
148 		} else {
149 			zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
150 		}
151 		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
152 		    malloc(zc.zc_nvlist_dst_size);
153 		if (zc.zc_nvlist_dst == NULL) {
154 			error = ENOMEM;
155 			goto out;
156 		}
157 	}
158 
159 	while (ioctl(g_fd, ioc, &zc) != 0) {
160 		/*
161 		 * If ioctl exited with ENOMEM, we retry the ioctl after
162 		 * increasing the size of the destination nvlist.
163 		 *
164 		 * Channel programs that exit with ENOMEM ran over the
165 		 * lua memory sandbox; they should not be retried.
166 		 */
167 		if (errno == ENOMEM && resultp != NULL &&
168 		    ioc != ZFS_IOC_CHANNEL_PROGRAM) {
169 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
170 			zc.zc_nvlist_dst_size *= 2;
171 			zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
172 			    malloc(zc.zc_nvlist_dst_size);
173 			if (zc.zc_nvlist_dst == NULL) {
174 				error = ENOMEM;
175 				goto out;
176 			}
177 		} else {
178 			error = errno;
179 			break;
180 		}
181 	}
182 	if (zc.zc_nvlist_dst_filled) {
183 		*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
184 		    zc.zc_nvlist_dst_size);
185 	}
186 
187 out:
188 	fnvlist_pack_free(packed, size);
189 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
190 	return (error);
191 }
192 
193 int
194 lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
195 {
196 	int error;
197 	nvlist_t *args = fnvlist_alloc();
198 	fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
199 	if (props != NULL)
200 		fnvlist_add_nvlist(args, "props", props);
201 	error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
202 	nvlist_free(args);
203 	return (error);
204 }
205 
206 int
207 lzc_clone(const char *fsname, const char *origin,
208     nvlist_t *props)
209 {
210 	int error;
211 	nvlist_t *args = fnvlist_alloc();
212 	fnvlist_add_string(args, "origin", origin);
213 	if (props != NULL)
214 		fnvlist_add_nvlist(args, "props", props);
215 	error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
216 	nvlist_free(args);
217 	return (error);
218 }
219 
220 int
221 lzc_promote(const char *fsname, char *snapnamebuf, int snapnamelen)
222 {
223 	/*
224 	 * The promote ioctl is still legacy, so we need to construct our
225 	 * own zfs_cmd_t rather than using lzc_ioctl().
226 	 */
227 	zfs_cmd_t zc = { 0 };
228 
229 	ASSERT3S(g_refcount, >, 0);
230 	VERIFY3S(g_fd, !=, -1);
231 
232 	(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
233 	if (ioctl(g_fd, ZFS_IOC_PROMOTE, &zc) != 0) {
234 		int error = errno;
235 		if (error == EEXIST && snapnamebuf != NULL)
236 			(void) strlcpy(snapnamebuf, zc.zc_string, snapnamelen);
237 		return (error);
238 	}
239 	return (0);
240 }
241 
242 /*
243  * Creates snapshots.
244  *
245  * The keys in the snaps nvlist are the snapshots to be created.
246  * They must all be in the same pool.
247  *
248  * The props nvlist is properties to set.  Currently only user properties
249  * are supported.  { user:prop_name -> string value }
250  *
251  * The returned results nvlist will have an entry for each snapshot that failed.
252  * The value will be the (int32) error code.
253  *
254  * The return value will be 0 if all snapshots were created, otherwise it will
255  * be the errno of a (unspecified) snapshot that failed.
256  */
257 int
258 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
259 {
260 	nvpair_t *elem;
261 	nvlist_t *args;
262 	int error;
263 	char pool[ZFS_MAX_DATASET_NAME_LEN];
264 
265 	*errlist = NULL;
266 
267 	/* determine the pool name */
268 	elem = nvlist_next_nvpair(snaps, NULL);
269 	if (elem == NULL)
270 		return (0);
271 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
272 	pool[strcspn(pool, "/@")] = '\0';
273 
274 	args = fnvlist_alloc();
275 	fnvlist_add_nvlist(args, "snaps", snaps);
276 	if (props != NULL)
277 		fnvlist_add_nvlist(args, "props", props);
278 
279 	error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
280 	nvlist_free(args);
281 
282 	return (error);
283 }
284 
285 /*
286  * Destroys snapshots.
287  *
288  * The keys in the snaps nvlist are the snapshots to be destroyed.
289  * They must all be in the same pool.
290  *
291  * Snapshots that do not exist will be silently ignored.
292  *
293  * If 'defer' is not set, and a snapshot has user holds or clones, the
294  * destroy operation will fail and none of the snapshots will be
295  * destroyed.
296  *
297  * If 'defer' is set, and a snapshot has user holds or clones, it will be
298  * marked for deferred destruction, and will be destroyed when the last hold
299  * or clone is removed/destroyed.
300  *
301  * The return value will be 0 if all snapshots were destroyed (or marked for
302  * later destruction if 'defer' is set) or didn't exist to begin with.
303  *
304  * Otherwise the return value will be the errno of a (unspecified) snapshot
305  * that failed, no snapshots will be destroyed, and the errlist will have an
306  * entry for each snapshot that failed.  The value in the errlist will be
307  * the (int32) error code.
308  */
309 int
310 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
311 {
312 	nvpair_t *elem;
313 	nvlist_t *args;
314 	int error;
315 	char pool[ZFS_MAX_DATASET_NAME_LEN];
316 
317 	/* determine the pool name */
318 	elem = nvlist_next_nvpair(snaps, NULL);
319 	if (elem == NULL)
320 		return (0);
321 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
322 	pool[strcspn(pool, "/@")] = '\0';
323 
324 	args = fnvlist_alloc();
325 	fnvlist_add_nvlist(args, "snaps", snaps);
326 	if (defer)
327 		fnvlist_add_boolean(args, "defer");
328 
329 	error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
330 	nvlist_free(args);
331 
332 	return (error);
333 }
334 
335 int
336 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
337     uint64_t *usedp)
338 {
339 	nvlist_t *args;
340 	nvlist_t *result;
341 	int err;
342 	char fs[ZFS_MAX_DATASET_NAME_LEN];
343 	char *atp;
344 
345 	/* determine the fs name */
346 	(void) strlcpy(fs, firstsnap, sizeof (fs));
347 	atp = strchr(fs, '@');
348 	if (atp == NULL)
349 		return (EINVAL);
350 	*atp = '\0';
351 
352 	args = fnvlist_alloc();
353 	fnvlist_add_string(args, "firstsnap", firstsnap);
354 
355 	err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
356 	nvlist_free(args);
357 	if (err == 0)
358 		*usedp = fnvlist_lookup_uint64(result, "used");
359 	fnvlist_free(result);
360 
361 	return (err);
362 }
363 
364 boolean_t
365 lzc_exists(const char *dataset)
366 {
367 	/*
368 	 * The objset_stats ioctl is still legacy, so we need to construct our
369 	 * own zfs_cmd_t rather than using lzc_ioctl().
370 	 */
371 	zfs_cmd_t zc = { 0 };
372 
373 	ASSERT3S(g_refcount, >, 0);
374 	VERIFY3S(g_fd, !=, -1);
375 
376 	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
377 	return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
378 }
379 
380 /*
381  * Create "user holds" on snapshots.  If there is a hold on a snapshot,
382  * the snapshot can not be destroyed.  (However, it can be marked for deletion
383  * by lzc_destroy_snaps(defer=B_TRUE).)
384  *
385  * The keys in the nvlist are snapshot names.
386  * The snapshots must all be in the same pool.
387  * The value is the name of the hold (string type).
388  *
389  * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
390  * In this case, when the cleanup_fd is closed (including on process
391  * termination), the holds will be released.  If the system is shut down
392  * uncleanly, the holds will be released when the pool is next opened
393  * or imported.
394  *
395  * Holds for snapshots which don't exist will be skipped and have an entry
396  * added to errlist, but will not cause an overall failure.
397  *
398  * The return value will be 0 if all holds, for snapshots that existed,
399  * were succesfully created.
400  *
401  * Otherwise the return value will be the errno of a (unspecified) hold that
402  * failed and no holds will be created.
403  *
404  * In all cases the errlist will have an entry for each hold that failed
405  * (name = snapshot), with its value being the error code (int32).
406  */
407 int
408 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
409 {
410 	char pool[ZFS_MAX_DATASET_NAME_LEN];
411 	nvlist_t *args;
412 	nvpair_t *elem;
413 	int error;
414 
415 	/* determine the pool name */
416 	elem = nvlist_next_nvpair(holds, NULL);
417 	if (elem == NULL)
418 		return (0);
419 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
420 	pool[strcspn(pool, "/@")] = '\0';
421 
422 	args = fnvlist_alloc();
423 	fnvlist_add_nvlist(args, "holds", holds);
424 	if (cleanup_fd != -1)
425 		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
426 
427 	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
428 	nvlist_free(args);
429 	return (error);
430 }
431 
432 /*
433  * Release "user holds" on snapshots.  If the snapshot has been marked for
434  * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
435  * any clones, and all the user holds are removed, then the snapshot will be
436  * destroyed.
437  *
438  * The keys in the nvlist are snapshot names.
439  * The snapshots must all be in the same pool.
440  * The value is a nvlist whose keys are the holds to remove.
441  *
442  * Holds which failed to release because they didn't exist will have an entry
443  * added to errlist, but will not cause an overall failure.
444  *
445  * The return value will be 0 if the nvl holds was empty or all holds that
446  * existed, were successfully removed.
447  *
448  * Otherwise the return value will be the errno of a (unspecified) hold that
449  * failed to release and no holds will be released.
450  *
451  * In all cases the errlist will have an entry for each hold that failed to
452  * to release.
453  */
454 int
455 lzc_release(nvlist_t *holds, nvlist_t **errlist)
456 {
457 	char pool[ZFS_MAX_DATASET_NAME_LEN];
458 	nvpair_t *elem;
459 
460 	/* determine the pool name */
461 	elem = nvlist_next_nvpair(holds, NULL);
462 	if (elem == NULL)
463 		return (0);
464 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
465 	pool[strcspn(pool, "/@")] = '\0';
466 
467 	return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
468 }
469 
470 /*
471  * Retrieve list of user holds on the specified snapshot.
472  *
473  * On success, *holdsp will be set to a nvlist which the caller must free.
474  * The keys are the names of the holds, and the value is the creation time
475  * of the hold (uint64) in seconds since the epoch.
476  */
477 int
478 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
479 {
480 	int error;
481 	nvlist_t *innvl = fnvlist_alloc();
482 	error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
483 	fnvlist_free(innvl);
484 	return (error);
485 }
486 
487 /*
488  * Generate a zfs send stream for the specified snapshot and write it to
489  * the specified file descriptor.
490  *
491  * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
492  *
493  * If "from" is NULL, a full (non-incremental) stream will be sent.
494  * If "from" is non-NULL, it must be the full name of a snapshot or
495  * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
496  * "pool/fs#earlier_bmark").  If non-NULL, the specified snapshot or
497  * bookmark must represent an earlier point in the history of "snapname").
498  * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
499  * or it can be the origin of "snapname"'s filesystem, or an earlier
500  * snapshot in the origin, etc.
501  *
502  * "fd" is the file descriptor to write the send stream to.
503  *
504  * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
505  * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
506  * records with drr_blksz > 128K.
507  *
508  * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
509  * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
510  * which the receiving system must support (as indicated by support
511  * for the "embedded_data" feature).
512  */
513 int
514 lzc_send(const char *snapname, const char *from, int fd,
515     enum lzc_send_flags flags)
516 {
517 	return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
518 }
519 
520 int
521 lzc_send_resume(const char *snapname, const char *from, int fd,
522     enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
523 {
524 	nvlist_t *args;
525 	int err;
526 
527 	args = fnvlist_alloc();
528 	fnvlist_add_int32(args, "fd", fd);
529 	if (from != NULL)
530 		fnvlist_add_string(args, "fromsnap", from);
531 	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
532 		fnvlist_add_boolean(args, "largeblockok");
533 	if (flags & LZC_SEND_FLAG_EMBED_DATA)
534 		fnvlist_add_boolean(args, "embedok");
535 	if (flags & LZC_SEND_FLAG_COMPRESS)
536 		fnvlist_add_boolean(args, "compressok");
537 	if (resumeobj != 0 || resumeoff != 0) {
538 		fnvlist_add_uint64(args, "resume_object", resumeobj);
539 		fnvlist_add_uint64(args, "resume_offset", resumeoff);
540 	}
541 	err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
542 	nvlist_free(args);
543 	return (err);
544 }
545 
546 /*
547  * "from" can be NULL, a snapshot, or a bookmark.
548  *
549  * If from is NULL, a full (non-incremental) stream will be estimated.  This
550  * is calculated very efficiently.
551  *
552  * If from is a snapshot, lzc_send_space uses the deadlists attached to
553  * each snapshot to efficiently estimate the stream size.
554  *
555  * If from is a bookmark, the indirect blocks in the destination snapshot
556  * are traversed, looking for blocks with a birth time since the creation TXG of
557  * the snapshot this bookmark was created from.  This will result in
558  * significantly more I/O and be less efficient than a send space estimation on
559  * an equivalent snapshot.
560  */
561 int
562 lzc_send_space(const char *snapname, const char *from,
563     enum lzc_send_flags flags, uint64_t *spacep)
564 {
565 	nvlist_t *args;
566 	nvlist_t *result;
567 	int err;
568 
569 	args = fnvlist_alloc();
570 	if (from != NULL)
571 		fnvlist_add_string(args, "from", from);
572 	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
573 		fnvlist_add_boolean(args, "largeblockok");
574 	if (flags & LZC_SEND_FLAG_EMBED_DATA)
575 		fnvlist_add_boolean(args, "embedok");
576 	if (flags & LZC_SEND_FLAG_COMPRESS)
577 		fnvlist_add_boolean(args, "compressok");
578 	err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
579 	nvlist_free(args);
580 	if (err == 0)
581 		*spacep = fnvlist_lookup_uint64(result, "space");
582 	nvlist_free(result);
583 	return (err);
584 }
585 
586 static int
587 recv_read(int fd, void *buf, int ilen)
588 {
589 	char *cp = buf;
590 	int rv;
591 	int len = ilen;
592 
593 	do {
594 		rv = read(fd, cp, len);
595 		cp += rv;
596 		len -= rv;
597 	} while (rv > 0);
598 
599 	if (rv < 0 || len != 0)
600 		return (EIO);
601 
602 	return (0);
603 }
604 
605 static int
606 recv_impl(const char *snapname, nvlist_t *props, const char *origin,
607     boolean_t force, boolean_t resumable, int fd,
608     const dmu_replay_record_t *begin_record)
609 {
610 	/*
611 	 * The receive ioctl is still legacy, so we need to construct our own
612 	 * zfs_cmd_t rather than using zfsc_ioctl().
613 	 */
614 	zfs_cmd_t zc = { 0 };
615 	char *atp;
616 	char *packed = NULL;
617 	size_t size;
618 	int error;
619 
620 	ASSERT3S(g_refcount, >, 0);
621 	VERIFY3S(g_fd, !=, -1);
622 
623 	/* zc_name is name of containing filesystem */
624 	(void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
625 	atp = strchr(zc.zc_name, '@');
626 	if (atp == NULL)
627 		return (EINVAL);
628 	*atp = '\0';
629 
630 	/* if the fs does not exist, try its parent. */
631 	if (!lzc_exists(zc.zc_name)) {
632 		char *slashp = strrchr(zc.zc_name, '/');
633 		if (slashp == NULL)
634 			return (ENOENT);
635 		*slashp = '\0';
636 
637 	}
638 
639 	/* zc_value is full name of the snapshot to create */
640 	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
641 
642 	if (props != NULL) {
643 		/* zc_nvlist_src is props to set */
644 		packed = fnvlist_pack(props, &size);
645 		zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
646 		zc.zc_nvlist_src_size = size;
647 	}
648 
649 	/* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
650 	if (origin != NULL)
651 		(void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
652 
653 	/* zc_begin_record is non-byteswapped BEGIN record */
654 	if (begin_record == NULL) {
655 		error = recv_read(fd, &zc.zc_begin_record,
656 		    sizeof (zc.zc_begin_record));
657 		if (error != 0)
658 			goto out;
659 	} else {
660 		zc.zc_begin_record = *begin_record;
661 	}
662 
663 	/* zc_cookie is fd to read from */
664 	zc.zc_cookie = fd;
665 
666 	/* zc guid is force flag */
667 	zc.zc_guid = force;
668 
669 	zc.zc_resumable = resumable;
670 
671 	/* zc_cleanup_fd is unused */
672 	zc.zc_cleanup_fd = -1;
673 
674 	error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
675 	if (error != 0)
676 		error = errno;
677 
678 out:
679 	if (packed != NULL)
680 		fnvlist_pack_free(packed, size);
681 	free((void*)(uintptr_t)zc.zc_nvlist_dst);
682 	return (error);
683 }
684 
685 /*
686  * The simplest receive case: receive from the specified fd, creating the
687  * specified snapshot.  Apply the specified properties as "received" properties
688  * (which can be overridden by locally-set properties).  If the stream is a
689  * clone, its origin snapshot must be specified by 'origin'.  The 'force'
690  * flag will cause the target filesystem to be rolled back or destroyed if
691  * necessary to receive.
692  *
693  * Return 0 on success or an errno on failure.
694  *
695  * Note: this interface does not work on dedup'd streams
696  * (those with DMU_BACKUP_FEATURE_DEDUP).
697  */
698 int
699 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
700     boolean_t force, int fd)
701 {
702 	return (recv_impl(snapname, props, origin, force, B_FALSE, fd, NULL));
703 }
704 
705 /*
706  * Like lzc_receive, but if the receive fails due to premature stream
707  * termination, the intermediate state will be preserved on disk.  In this
708  * case, ECKSUM will be returned.  The receive may subsequently be resumed
709  * with a resuming send stream generated by lzc_send_resume().
710  */
711 int
712 lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
713     boolean_t force, int fd)
714 {
715 	return (recv_impl(snapname, props, origin, force, B_TRUE, fd, NULL));
716 }
717 
718 /*
719  * Like lzc_receive, but allows the caller to read the begin record and then to
720  * pass it in.  That could be useful if the caller wants to derive, for example,
721  * the snapname or the origin parameters based on the information contained in
722  * the begin record.
723  * The begin record must be in its original form as read from the stream,
724  * in other words, it should not be byteswapped.
725  *
726  * The 'resumable' parameter allows to obtain the same behavior as with
727  * lzc_receive_resumable.
728  */
729 int
730 lzc_receive_with_header(const char *snapname, nvlist_t *props,
731     const char *origin, boolean_t force, boolean_t resumable, int fd,
732     const dmu_replay_record_t *begin_record)
733 {
734 	if (begin_record == NULL)
735 		return (EINVAL);
736 	return (recv_impl(snapname, props, origin, force, resumable, fd,
737 	    begin_record));
738 }
739 
740 /*
741  * Roll back this filesystem or volume to its most recent snapshot.
742  * If snapnamebuf is not NULL, it will be filled in with the name
743  * of the most recent snapshot.
744  * Note that the latest snapshot may change if a new one is concurrently
745  * created or the current one is destroyed.  lzc_rollback_to can be used
746  * to roll back to a specific latest snapshot.
747  *
748  * Return 0 on success or an errno on failure.
749  */
750 int
751 lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
752 {
753 	nvlist_t *args;
754 	nvlist_t *result;
755 	int err;
756 
757 	args = fnvlist_alloc();
758 	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
759 	nvlist_free(args);
760 	if (err == 0 && snapnamebuf != NULL) {
761 		const char *snapname = fnvlist_lookup_string(result, "target");
762 		(void) strlcpy(snapnamebuf, snapname, snapnamelen);
763 	}
764 	nvlist_free(result);
765 
766 	return (err);
767 }
768 
769 /*
770  * Roll back this filesystem or volume to the specified snapshot,
771  * if possible.
772  *
773  * Return 0 on success or an errno on failure.
774  */
775 int
776 lzc_rollback_to(const char *fsname, const char *snapname)
777 {
778 	nvlist_t *args;
779 	nvlist_t *result;
780 	int err;
781 
782 	args = fnvlist_alloc();
783 	fnvlist_add_string(args, "target", snapname);
784 	err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
785 	nvlist_free(args);
786 	nvlist_free(result);
787 	return (err);
788 }
789 
790 /*
791  * Creates bookmarks.
792  *
793  * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
794  * the name of the snapshot (e.g. "pool/fs@snap").  All the bookmarks and
795  * snapshots must be in the same pool.
796  *
797  * The returned results nvlist will have an entry for each bookmark that failed.
798  * The value will be the (int32) error code.
799  *
800  * The return value will be 0 if all bookmarks were created, otherwise it will
801  * be the errno of a (undetermined) bookmarks that failed.
802  */
803 int
804 lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
805 {
806 	nvpair_t *elem;
807 	int error;
808 	char pool[ZFS_MAX_DATASET_NAME_LEN];
809 
810 	/* determine the pool name */
811 	elem = nvlist_next_nvpair(bookmarks, NULL);
812 	if (elem == NULL)
813 		return (0);
814 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
815 	pool[strcspn(pool, "/#")] = '\0';
816 
817 	error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
818 
819 	return (error);
820 }
821 
822 /*
823  * Retrieve bookmarks.
824  *
825  * Retrieve the list of bookmarks for the given file system. The props
826  * parameter is an nvlist of property names (with no values) that will be
827  * returned for each bookmark.
828  *
829  * The following are valid properties on bookmarks, all of which are numbers
830  * (represented as uint64 in the nvlist)
831  *
832  * "guid" - globally unique identifier of the snapshot it refers to
833  * "createtxg" - txg when the snapshot it refers to was created
834  * "creation" - timestamp when the snapshot it refers to was created
835  *
836  * The format of the returned nvlist as follows:
837  * <short name of bookmark> -> {
838  *     <name of property> -> {
839  *         "value" -> uint64
840  *     }
841  *  }
842  */
843 int
844 lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
845 {
846 	return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
847 }
848 
849 /*
850  * Destroys bookmarks.
851  *
852  * The keys in the bmarks nvlist are the bookmarks to be destroyed.
853  * They must all be in the same pool.  Bookmarks are specified as
854  * <fs>#<bmark>.
855  *
856  * Bookmarks that do not exist will be silently ignored.
857  *
858  * The return value will be 0 if all bookmarks that existed were destroyed.
859  *
860  * Otherwise the return value will be the errno of a (undetermined) bookmark
861  * that failed, no bookmarks will be destroyed, and the errlist will have an
862  * entry for each bookmarks that failed.  The value in the errlist will be
863  * the (int32) error code.
864  */
865 int
866 lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
867 {
868 	nvpair_t *elem;
869 	int error;
870 	char pool[ZFS_MAX_DATASET_NAME_LEN];
871 
872 	/* determine the pool name */
873 	elem = nvlist_next_nvpair(bmarks, NULL);
874 	if (elem == NULL)
875 		return (0);
876 	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
877 	pool[strcspn(pool, "/#")] = '\0';
878 
879 	error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
880 
881 	return (error);
882 }
883 
884 static int
885 lzc_channel_program_impl(const char *pool, const char *program, boolean_t sync,
886     uint64_t instrlimit, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
887 {
888 	int error;
889 	nvlist_t *args;
890 
891 	args = fnvlist_alloc();
892 	fnvlist_add_string(args, ZCP_ARG_PROGRAM, program);
893 	fnvlist_add_nvlist(args, ZCP_ARG_ARGLIST, argnvl);
894 	fnvlist_add_boolean_value(args, ZCP_ARG_SYNC, sync);
895 	fnvlist_add_uint64(args, ZCP_ARG_INSTRLIMIT, instrlimit);
896 	fnvlist_add_uint64(args, ZCP_ARG_MEMLIMIT, memlimit);
897 	error = lzc_ioctl(ZFS_IOC_CHANNEL_PROGRAM, pool, args, outnvl);
898 	fnvlist_free(args);
899 
900 	return (error);
901 }
902 
903 /*
904  * Executes a channel program.
905  *
906  * If this function returns 0 the channel program was successfully loaded and
907  * ran without failing. Note that individual commands the channel program ran
908  * may have failed and the channel program is responsible for reporting such
909  * errors through outnvl if they are important.
910  *
911  * This method may also return:
912  *
913  * EINVAL   The program contains syntax errors, or an invalid memory or time
914  *          limit was given. No part of the channel program was executed.
915  *          If caused by syntax errors, 'outnvl' contains information about the
916  *          errors.
917  *
918  * ECHRNG   The program was executed, but encountered a runtime error, such as
919  *          calling a function with incorrect arguments, invoking the error()
920  *          function directly, failing an assert() command, etc. Some portion
921  *          of the channel program may have executed and committed changes.
922  *          Information about the failure can be found in 'outnvl'.
923  *
924  * ENOMEM   The program fully executed, but the output buffer was not large
925  *          enough to store the returned value. No output is returned through
926  *          'outnvl'.
927  *
928  * ENOSPC   The program was terminated because it exceeded its memory usage
929  *          limit. Some portion of the channel program may have executed and
930  *          committed changes to disk. No output is returned through 'outnvl'.
931  *
932  * ETIME    The program was terminated because it exceeded its Lua instruction
933  *          limit. Some portion of the channel program may have executed and
934  *          committed changes to disk. No output is returned through 'outnvl'.
935  */
936 int
937 lzc_channel_program(const char *pool, const char *program, uint64_t instrlimit,
938     uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
939 {
940 	return (lzc_channel_program_impl(pool, program, B_TRUE, instrlimit,
941 	    memlimit, argnvl, outnvl));
942 }
943 
944 /*
945  * Executes a read-only channel program.
946  *
947  * A read-only channel program works programmatically the same way as a
948  * normal channel program executed with lzc_channel_program(). The only
949  * difference is it runs exclusively in open-context and therefore can
950  * return faster. The downside to that, is that the program cannot change
951  * on-disk state by calling functions from the zfs.sync submodule.
952  *
953  * The return values of this function (and their meaning) are exactly the
954  * same as the ones described in lzc_channel_program().
955  */
956 int
957 lzc_channel_program_nosync(const char *pool, const char *program,
958     uint64_t timeout, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
959 {
960 	return (lzc_channel_program_impl(pool, program, B_FALSE, timeout,
961 	    memlimit, argnvl, outnvl));
962 }
963