xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 753d2d2e8e7fd0c9bcf736d9bf2f2faf4d6234cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * ZFS volume emulation driver.
30  *
31  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
32  * Volumes are accessed through the symbolic links named:
33  *
34  * /dev/zvol/dsk/<pool_name>/<dataset_name>
35  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
36  *
37  * These links are created by the ZFS-specific devfsadm link generator.
38  * Volumes are persistent through reboot.  No user command needs to be
39  * run before opening and using a device.
40  */
41 
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/errno.h>
45 #include <sys/aio_req.h>
46 #include <sys/uio.h>
47 #include <sys/buf.h>
48 #include <sys/modctl.h>
49 #include <sys/open.h>
50 #include <sys/kmem.h>
51 #include <sys/conf.h>
52 #include <sys/cmn_err.h>
53 #include <sys/stat.h>
54 #include <sys/zap.h>
55 #include <sys/spa.h>
56 #include <sys/zio.h>
57 #include <sys/dsl_prop.h>
58 #include <sys/dkio.h>
59 #include <sys/efi_partition.h>
60 #include <sys/byteorder.h>
61 #include <sys/pathname.h>
62 #include <sys/ddi.h>
63 #include <sys/sunddi.h>
64 #include <sys/crc32.h>
65 #include <sys/dirent.h>
66 #include <sys/policy.h>
67 #include <sys/fs/zfs.h>
68 #include <sys/zfs_ioctl.h>
69 #include <sys/mkdev.h>
70 #include <sys/zil.h>
71 #include <sys/refcount.h>
72 
73 #include "zfs_namecheck.h"
74 
75 #define	ZVOL_OBJ		1ULL
76 #define	ZVOL_ZAP_OBJ		2ULL
77 
78 static void *zvol_state;
79 
80 /*
81  * This lock protects the zvol_state structure from being modified
82  * while it's being used, e.g. an open that comes in before a create
83  * finishes.  It also protects temporary opens of the dataset so that,
84  * e.g., an open doesn't get a spurious EBUSY.
85  */
86 static kmutex_t zvol_state_lock;
87 static uint32_t zvol_minors;
88 
89 /*
90  * The in-core state of each volume.
91  */
92 typedef struct zvol_state {
93 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
94 	uint64_t	zv_volsize;	/* amount of space we advertise */
95 	minor_t		zv_minor;	/* minor number */
96 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
97 	uint8_t		zv_readonly;	/* hard readonly; like write-protect */
98 	objset_t	*zv_objset;	/* objset handle */
99 	uint32_t	zv_mode;	/* DS_MODE_* flags at open time */
100 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
101 	uint32_t	zv_total_opens;	/* total open count */
102 	zilog_t		*zv_zilog;	/* ZIL handle */
103 	uint64_t	zv_txg_assign;	/* txg to assign during ZIL replay */
104 	krwlock_t	zv_dslock;	/* dmu_sync() rwlock */
105 } zvol_state_t;
106 
107 static void
108 zvol_size_changed(zvol_state_t *zv, dev_t dev)
109 {
110 	dev = makedevice(getmajor(dev), zv->zv_minor);
111 
112 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
113 	    "Size", zv->zv_volsize) == DDI_SUCCESS);
114 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
115 	    "Nblocks", lbtodb(zv->zv_volsize)) == DDI_SUCCESS);
116 }
117 
118 int
119 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
120 {
121 	if (volsize == 0)
122 		return (EINVAL);
123 
124 	if (volsize % blocksize != 0)
125 		return (EINVAL);
126 
127 #ifdef _ILP32
128 	if (volsize - 1 > SPEC_MAXOFFSET_T)
129 		return (EOVERFLOW);
130 #endif
131 	return (0);
132 }
133 
134 int
135 zvol_check_volblocksize(uint64_t volblocksize)
136 {
137 	if (volblocksize < SPA_MINBLOCKSIZE ||
138 	    volblocksize > SPA_MAXBLOCKSIZE ||
139 	    !ISP2(volblocksize))
140 		return (EDOM);
141 
142 	return (0);
143 }
144 
145 static void
146 zvol_readonly_changed_cb(void *arg, uint64_t newval)
147 {
148 	zvol_state_t *zv = arg;
149 
150 	zv->zv_readonly = (uint8_t)newval;
151 }
152 
153 int
154 zvol_get_stats(objset_t *os, zvol_stats_t *zvs)
155 {
156 	int error;
157 	dmu_object_info_t doi;
158 
159 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &zvs->zv_volsize);
160 
161 	if (error)
162 		return (error);
163 
164 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
165 
166 	if (error == 0)
167 		zvs->zv_volblocksize = doi.doi_data_block_size;
168 
169 	return (error);
170 }
171 
172 /*
173  * Find a free minor number.
174  */
175 static minor_t
176 zvol_minor_alloc(void)
177 {
178 	minor_t minor;
179 
180 	ASSERT(MUTEX_HELD(&zvol_state_lock));
181 
182 	for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++)
183 		if (ddi_get_soft_state(zvol_state, minor) == NULL)
184 			return (minor);
185 
186 	return (0);
187 }
188 
189 static zvol_state_t *
190 zvol_minor_lookup(const char *name)
191 {
192 	minor_t minor;
193 	zvol_state_t *zv;
194 
195 	ASSERT(MUTEX_HELD(&zvol_state_lock));
196 
197 	for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) {
198 		zv = ddi_get_soft_state(zvol_state, minor);
199 		if (zv == NULL)
200 			continue;
201 		if (strcmp(zv->zv_name, name) == 0)
202 			break;
203 	}
204 
205 	return (zv);
206 }
207 
208 void
209 zvol_create_cb(objset_t *os, void *arg, dmu_tx_t *tx)
210 {
211 	zfs_create_data_t *zc = arg;
212 	int error;
213 	uint64_t volblocksize, volsize;
214 
215 	VERIFY(nvlist_lookup_uint64(zc->zc_props,
216 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
217 	if (nvlist_lookup_uint64(zc->zc_props,
218 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
219 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
220 
221 	/*
222 	 * These properites must be removed from the list so the generic
223 	 * property setting step won't apply to them.
224 	 */
225 	VERIFY(nvlist_remove_all(zc->zc_props,
226 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
227 	(void) nvlist_remove_all(zc->zc_props,
228 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
229 
230 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
231 	    DMU_OT_NONE, 0, tx);
232 	ASSERT(error == 0);
233 
234 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
235 	    DMU_OT_NONE, 0, tx);
236 	ASSERT(error == 0);
237 
238 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
239 	ASSERT(error == 0);
240 }
241 
242 /*
243  * Replay a TX_WRITE ZIL transaction that didn't get committed
244  * after a system failure
245  */
246 static int
247 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
248 {
249 	objset_t *os = zv->zv_objset;
250 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
251 	uint64_t off = lr->lr_offset;
252 	uint64_t len = lr->lr_length;
253 	dmu_tx_t *tx;
254 	int error;
255 
256 	if (byteswap)
257 		byteswap_uint64_array(lr, sizeof (*lr));
258 
259 	tx = dmu_tx_create(os);
260 	dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
261 	error = dmu_tx_assign(tx, zv->zv_txg_assign);
262 	if (error) {
263 		dmu_tx_abort(tx);
264 	} else {
265 		dmu_write(os, ZVOL_OBJ, off, len, data, tx);
266 		dmu_tx_commit(tx);
267 	}
268 
269 	return (error);
270 }
271 
272 /* ARGSUSED */
273 static int
274 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
275 {
276 	return (ENOTSUP);
277 }
278 
279 /*
280  * Callback vectors for replaying records.
281  * Only TX_WRITE is needed for zvol.
282  */
283 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
284 	zvol_replay_err,	/* 0 no such transaction type */
285 	zvol_replay_err,	/* TX_CREATE */
286 	zvol_replay_err,	/* TX_MKDIR */
287 	zvol_replay_err,	/* TX_MKXATTR */
288 	zvol_replay_err,	/* TX_SYMLINK */
289 	zvol_replay_err,	/* TX_REMOVE */
290 	zvol_replay_err,	/* TX_RMDIR */
291 	zvol_replay_err,	/* TX_LINK */
292 	zvol_replay_err,	/* TX_RENAME */
293 	zvol_replay_write,	/* TX_WRITE */
294 	zvol_replay_err,	/* TX_TRUNCATE */
295 	zvol_replay_err,	/* TX_SETATTR */
296 	zvol_replay_err,	/* TX_ACL */
297 };
298 
299 /*
300  * Create a minor node for the specified volume.
301  */
302 int
303 zvol_create_minor(const char *name, dev_t dev)
304 {
305 	zvol_state_t *zv;
306 	objset_t *os;
307 	uint64_t volsize;
308 	minor_t minor = 0;
309 	struct pathname linkpath;
310 	int ds_mode = DS_MODE_PRIMARY;
311 	vnode_t *vp = NULL;
312 	char *devpath;
313 	size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + 1 + strlen(name) + 1;
314 	char chrbuf[30], blkbuf[30];
315 	int error;
316 
317 	mutex_enter(&zvol_state_lock);
318 
319 	if ((zv = zvol_minor_lookup(name)) != NULL) {
320 		mutex_exit(&zvol_state_lock);
321 		return (EEXIST);
322 	}
323 
324 	if (strchr(name, '@') != 0)
325 		ds_mode |= DS_MODE_READONLY;
326 
327 	error = dmu_objset_open(name, DMU_OST_ZVOL, ds_mode, &os);
328 
329 	if (error) {
330 		mutex_exit(&zvol_state_lock);
331 		return (error);
332 	}
333 
334 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
335 
336 	if (error) {
337 		dmu_objset_close(os);
338 		mutex_exit(&zvol_state_lock);
339 		return (error);
340 	}
341 
342 	/*
343 	 * If there's an existing /dev/zvol symlink, try to use the
344 	 * same minor number we used last time.
345 	 */
346 	devpath = kmem_alloc(devpathlen, KM_SLEEP);
347 
348 	(void) sprintf(devpath, "%s/%s", ZVOL_FULL_DEV_DIR, name);
349 
350 	error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp);
351 
352 	kmem_free(devpath, devpathlen);
353 
354 	if (error == 0 && vp->v_type != VLNK)
355 		error = EINVAL;
356 
357 	if (error == 0) {
358 		pn_alloc(&linkpath);
359 		error = pn_getsymlink(vp, &linkpath, kcred);
360 		if (error == 0) {
361 			char *ms = strstr(linkpath.pn_path, ZVOL_PSEUDO_DEV);
362 			if (ms != NULL) {
363 				ms += strlen(ZVOL_PSEUDO_DEV);
364 				minor = stoi(&ms);
365 			}
366 		}
367 		pn_free(&linkpath);
368 	}
369 
370 	if (vp != NULL)
371 		VN_RELE(vp);
372 
373 	/*
374 	 * If we found a minor but it's already in use, we must pick a new one.
375 	 */
376 	if (minor != 0 && ddi_get_soft_state(zvol_state, minor) != NULL)
377 		minor = 0;
378 
379 	if (minor == 0)
380 		minor = zvol_minor_alloc();
381 
382 	if (minor == 0) {
383 		dmu_objset_close(os);
384 		mutex_exit(&zvol_state_lock);
385 		return (ENXIO);
386 	}
387 
388 	if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) {
389 		dmu_objset_close(os);
390 		mutex_exit(&zvol_state_lock);
391 		return (EAGAIN);
392 	}
393 
394 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
395 	    (char *)name);
396 
397 	(void) sprintf(chrbuf, "%uc,raw", minor);
398 
399 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
400 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
401 		ddi_soft_state_free(zvol_state, minor);
402 		dmu_objset_close(os);
403 		mutex_exit(&zvol_state_lock);
404 		return (EAGAIN);
405 	}
406 
407 	(void) sprintf(blkbuf, "%uc", minor);
408 
409 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
410 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
411 		ddi_remove_minor_node(zfs_dip, chrbuf);
412 		ddi_soft_state_free(zvol_state, minor);
413 		dmu_objset_close(os);
414 		mutex_exit(&zvol_state_lock);
415 		return (EAGAIN);
416 	}
417 
418 	zv = ddi_get_soft_state(zvol_state, minor);
419 
420 	(void) strcpy(zv->zv_name, name);
421 	zv->zv_min_bs = DEV_BSHIFT;
422 	zv->zv_minor = minor;
423 	zv->zv_volsize = volsize;
424 	zv->zv_objset = os;
425 	zv->zv_mode = ds_mode;
426 	zv->zv_zilog = zil_open(os, NULL);
427 
428 	rw_init(&zv->zv_dslock, NULL, RW_DEFAULT, NULL);
429 
430 	zil_replay(os, zv, &zv->zv_txg_assign, zvol_replay_vector, NULL);
431 
432 	zvol_size_changed(zv, dev);
433 
434 	/* XXX this should handle the possible i/o error */
435 	VERIFY(dsl_prop_register(dmu_objset_ds(zv->zv_objset),
436 	    "readonly", zvol_readonly_changed_cb, zv) == 0);
437 
438 	zvol_minors++;
439 
440 	mutex_exit(&zvol_state_lock);
441 
442 	return (0);
443 }
444 
445 /*
446  * Remove minor node for the specified volume.
447  */
448 int
449 zvol_remove_minor(const char *name)
450 {
451 	zvol_state_t *zv;
452 	char namebuf[30];
453 
454 	mutex_enter(&zvol_state_lock);
455 
456 	if ((zv = zvol_minor_lookup(name)) == NULL) {
457 		mutex_exit(&zvol_state_lock);
458 		return (ENXIO);
459 	}
460 
461 	if (zv->zv_total_opens != 0) {
462 		mutex_exit(&zvol_state_lock);
463 		return (EBUSY);
464 	}
465 
466 	(void) sprintf(namebuf, "%uc,raw", zv->zv_minor);
467 	ddi_remove_minor_node(zfs_dip, namebuf);
468 
469 	(void) sprintf(namebuf, "%uc", zv->zv_minor);
470 	ddi_remove_minor_node(zfs_dip, namebuf);
471 
472 	VERIFY(dsl_prop_unregister(dmu_objset_ds(zv->zv_objset),
473 	    "readonly", zvol_readonly_changed_cb, zv) == 0);
474 
475 	zil_close(zv->zv_zilog);
476 	zv->zv_zilog = NULL;
477 	dmu_objset_close(zv->zv_objset);
478 	zv->zv_objset = NULL;
479 
480 	ddi_soft_state_free(zvol_state, zv->zv_minor);
481 
482 	zvol_minors--;
483 
484 	mutex_exit(&zvol_state_lock);
485 
486 	return (0);
487 }
488 
489 int
490 zvol_set_volsize(const char *name, dev_t dev, uint64_t volsize)
491 {
492 	zvol_state_t *zv;
493 	dmu_tx_t *tx;
494 	int error;
495 	dmu_object_info_t doi;
496 
497 	mutex_enter(&zvol_state_lock);
498 
499 	if ((zv = zvol_minor_lookup(name)) == NULL) {
500 		mutex_exit(&zvol_state_lock);
501 		return (ENXIO);
502 	}
503 
504 	if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 ||
505 	    (error = zvol_check_volsize(volsize,
506 	    doi.doi_data_block_size)) != 0) {
507 		mutex_exit(&zvol_state_lock);
508 		return (error);
509 	}
510 
511 	if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) {
512 		mutex_exit(&zvol_state_lock);
513 		return (EROFS);
514 	}
515 
516 	tx = dmu_tx_create(zv->zv_objset);
517 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
518 	dmu_tx_hold_free(tx, ZVOL_OBJ, volsize, DMU_OBJECT_END);
519 	error = dmu_tx_assign(tx, TXG_WAIT);
520 	if (error) {
521 		dmu_tx_abort(tx);
522 		mutex_exit(&zvol_state_lock);
523 		return (error);
524 	}
525 
526 	error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
527 	    &volsize, tx);
528 	if (error == 0) {
529 		error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, volsize,
530 		    DMU_OBJECT_END, tx);
531 	}
532 
533 	dmu_tx_commit(tx);
534 
535 	if (error == 0) {
536 		zv->zv_volsize = volsize;
537 		zvol_size_changed(zv, dev);
538 	}
539 
540 	mutex_exit(&zvol_state_lock);
541 
542 	return (error);
543 }
544 
545 int
546 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
547 {
548 	zvol_state_t *zv;
549 	dmu_tx_t *tx;
550 	int error;
551 
552 	mutex_enter(&zvol_state_lock);
553 
554 	if ((zv = zvol_minor_lookup(name)) == NULL) {
555 		mutex_exit(&zvol_state_lock);
556 		return (ENXIO);
557 	}
558 
559 	if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) {
560 		mutex_exit(&zvol_state_lock);
561 		return (EROFS);
562 	}
563 
564 	tx = dmu_tx_create(zv->zv_objset);
565 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
566 	error = dmu_tx_assign(tx, TXG_WAIT);
567 	if (error) {
568 		dmu_tx_abort(tx);
569 	} else {
570 		error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
571 		    volblocksize, 0, tx);
572 		if (error == ENOTSUP)
573 			error = EBUSY;
574 		dmu_tx_commit(tx);
575 	}
576 
577 	mutex_exit(&zvol_state_lock);
578 
579 	return (error);
580 }
581 
582 /*ARGSUSED*/
583 int
584 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
585 {
586 	minor_t minor = getminor(*devp);
587 	zvol_state_t *zv;
588 
589 	if (minor == 0)			/* This is the control device */
590 		return (0);
591 
592 	mutex_enter(&zvol_state_lock);
593 
594 	zv = ddi_get_soft_state(zvol_state, minor);
595 	if (zv == NULL) {
596 		mutex_exit(&zvol_state_lock);
597 		return (ENXIO);
598 	}
599 
600 	ASSERT(zv->zv_objset != NULL);
601 
602 	if ((flag & FWRITE) &&
603 	    (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY))) {
604 		mutex_exit(&zvol_state_lock);
605 		return (EROFS);
606 	}
607 
608 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
609 		zv->zv_open_count[otyp]++;
610 		zv->zv_total_opens++;
611 	}
612 
613 	mutex_exit(&zvol_state_lock);
614 
615 	return (0);
616 }
617 
618 /*ARGSUSED*/
619 int
620 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
621 {
622 	minor_t minor = getminor(dev);
623 	zvol_state_t *zv;
624 
625 	if (minor == 0)		/* This is the control device */
626 		return (0);
627 
628 	mutex_enter(&zvol_state_lock);
629 
630 	zv = ddi_get_soft_state(zvol_state, minor);
631 	if (zv == NULL) {
632 		mutex_exit(&zvol_state_lock);
633 		return (ENXIO);
634 	}
635 
636 	/*
637 	 * The next statement is a workaround for the following DDI bug:
638 	 * 6343604 specfs race: multiple "last-close" of the same device
639 	 */
640 	if (zv->zv_total_opens == 0) {
641 		mutex_exit(&zvol_state_lock);
642 		return (0);
643 	}
644 
645 	/*
646 	 * If the open count is zero, this is a spurious close.
647 	 * That indicates a bug in the kernel / DDI framework.
648 	 */
649 	ASSERT(zv->zv_open_count[otyp] != 0);
650 	ASSERT(zv->zv_total_opens != 0);
651 
652 	/*
653 	 * You may get multiple opens, but only one close.
654 	 */
655 	zv->zv_open_count[otyp]--;
656 	zv->zv_total_opens--;
657 
658 	mutex_exit(&zvol_state_lock);
659 
660 	return (0);
661 }
662 
663 /*
664  * Create and return an immediate write ZIL transaction.
665  */
666 itx_t *
667 zvol_immediate_itx(offset_t off, ssize_t len, char *addr)
668 {
669 	itx_t *itx;
670 	lr_write_t *lr;
671 
672 	itx = zil_itx_create(TX_WRITE, sizeof (*lr) + len);
673 	lr = (lr_write_t *)&itx->itx_lr;
674 	lr->lr_foid = ZVOL_OBJ;
675 	lr->lr_offset = off;
676 	lr->lr_length = len;
677 	lr->lr_blkoff = 0;
678 	BP_ZERO(&lr->lr_blkptr);
679 	bcopy(addr, (char *)itx + offsetof(itx_t, itx_lr) +
680 	    sizeof (*lr), len);
681 	itx->itx_wr_state = WR_COPIED;
682 	return (itx);
683 }
684 
685 /*
686  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
687  *
688  * We store data in the log buffers if it's small enough.
689  * Otherwise we flush the data out via dmu_sync().
690  */
691 ssize_t zvol_immediate_write_sz = 65536;
692 
693 int
694 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t len,
695     char *addr)
696 {
697 	dmu_object_info_t doi;
698 	ssize_t nbytes;
699 	itx_t *itx;
700 	lr_write_t *lr;
701 	objset_t *os;
702 	dmu_buf_t *db;
703 	uint64_t txg;
704 	uint64_t boff;
705 	int error;
706 	uint32_t blocksize;
707 
708 	/* handle common case */
709 	if (len <= zvol_immediate_write_sz) {
710 		itx = zvol_immediate_itx(off, len, addr);
711 		(void) zil_itx_assign(zv->zv_zilog, itx, tx);
712 		return (0);
713 	}
714 
715 	txg = dmu_tx_get_txg(tx);
716 	os = zv->zv_objset;
717 
718 	/*
719 	 * We need to dmu_sync() each block in the range.
720 	 * For this we need the blocksize.
721 	 */
722 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
723 	if (error)
724 		return (error);
725 	blocksize = doi.doi_data_block_size;
726 
727 	/*
728 	 * We need to immediate write or dmu_sync() each block in the range.
729 	 */
730 	while (len) {
731 		nbytes = MIN(len, blocksize - P2PHASE(off, blocksize));
732 		if (nbytes <= zvol_immediate_write_sz) {
733 			itx = zvol_immediate_itx(off, nbytes, addr);
734 		} else {
735 			boff =  P2ALIGN_TYPED(off, blocksize, uint64_t);
736 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
737 			lr = (lr_write_t *)&itx->itx_lr;
738 			lr->lr_foid = ZVOL_OBJ;
739 			lr->lr_offset = off;
740 			lr->lr_length = nbytes;
741 			lr->lr_blkoff = off - boff;
742 			BP_ZERO(&lr->lr_blkptr);
743 
744 			/* XXX - we should do these IOs in parallel */
745 			VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, boff,
746 			    FTAG, &db));
747 			ASSERT(boff == db->db_offset);
748 			error = dmu_sync(NULL, db, &lr->lr_blkptr,
749 			    txg, NULL, NULL);
750 			dmu_buf_rele(db, FTAG);
751 			if (error) {
752 				kmem_free(itx, offsetof(itx_t, itx_lr));
753 				return (error);
754 			}
755 			itx->itx_wr_state = WR_COPIED;
756 		}
757 		(void) zil_itx_assign(zv->zv_zilog, itx, tx);
758 		len -= nbytes;
759 		off += nbytes;
760 	}
761 	return (0);
762 }
763 
764 int
765 zvol_strategy(buf_t *bp)
766 {
767 	zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev));
768 	uint64_t off, volsize;
769 	size_t size, resid;
770 	char *addr;
771 	objset_t *os;
772 	int error = 0;
773 	int sync;
774 	int reading;
775 	int txg_sync_needed = B_FALSE;
776 
777 	if (zv == NULL) {
778 		bioerror(bp, ENXIO);
779 		biodone(bp);
780 		return (0);
781 	}
782 
783 	if (getminor(bp->b_edev) == 0) {
784 		bioerror(bp, EINVAL);
785 		biodone(bp);
786 		return (0);
787 	}
788 
789 	if (zv->zv_readonly && !(bp->b_flags & B_READ)) {
790 		bioerror(bp, EROFS);
791 		biodone(bp);
792 		return (0);
793 	}
794 
795 	off = ldbtob(bp->b_blkno);
796 	volsize = zv->zv_volsize;
797 
798 	os = zv->zv_objset;
799 	ASSERT(os != NULL);
800 	sync = !(bp->b_flags & B_ASYNC) && !(zil_disable);
801 
802 	bp_mapin(bp);
803 	addr = bp->b_un.b_addr;
804 	resid = bp->b_bcount;
805 
806 	/*
807 	 * There must be no buffer changes when doing a dmu_sync() because
808 	 * we can't change the data whilst calculating the checksum.
809 	 * A better approach than a per zvol rwlock would be to lock ranges.
810 	 */
811 	reading = bp->b_flags & B_READ;
812 	if (reading || resid <= zvol_immediate_write_sz)
813 		rw_enter(&zv->zv_dslock, RW_READER);
814 	else
815 		rw_enter(&zv->zv_dslock, RW_WRITER);
816 
817 	while (resid != 0 && off < volsize) {
818 
819 		size = MIN(resid, 1UL << 20);	/* cap at 1MB per tx */
820 
821 		if (size > volsize - off)	/* don't write past the end */
822 			size = volsize - off;
823 
824 		if (reading) {
825 			error = dmu_read(os, ZVOL_OBJ, off, size, addr);
826 		} else {
827 			dmu_tx_t *tx = dmu_tx_create(os);
828 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
829 			error = dmu_tx_assign(tx, TXG_WAIT);
830 			if (error) {
831 				dmu_tx_abort(tx);
832 			} else {
833 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
834 				if (sync) {
835 					/* use the ZIL to commit this write */
836 					if (zvol_log_write(zv, tx, off, size,
837 					    addr) != 0) {
838 						txg_sync_needed = B_TRUE;
839 					}
840 				}
841 				dmu_tx_commit(tx);
842 			}
843 		}
844 		if (error)
845 			break;
846 		off += size;
847 		addr += size;
848 		resid -= size;
849 	}
850 	rw_exit(&zv->zv_dslock);
851 
852 	if ((bp->b_resid = resid) == bp->b_bcount)
853 		bioerror(bp, off > volsize ? EINVAL : error);
854 
855 	biodone(bp);
856 
857 	if (sync) {
858 		if (txg_sync_needed)
859 			txg_wait_synced(dmu_objset_pool(os), 0);
860 		else
861 			zil_commit(zv->zv_zilog, UINT64_MAX, 0);
862 	}
863 
864 	return (0);
865 }
866 
867 /*ARGSUSED*/
868 int
869 zvol_read(dev_t dev, uio_t *uiop, cred_t *cr)
870 {
871 	return (physio(zvol_strategy, NULL, dev, B_READ, minphys, uiop));
872 }
873 
874 /*ARGSUSED*/
875 int
876 zvol_write(dev_t dev, uio_t *uiop, cred_t *cr)
877 {
878 	return (physio(zvol_strategy, NULL, dev, B_WRITE, minphys, uiop));
879 }
880 
881 /*ARGSUSED*/
882 int
883 zvol_aread(dev_t dev, struct aio_req *aio, cred_t *cr)
884 {
885 	return (aphysio(zvol_strategy, anocancel, dev, B_READ, minphys, aio));
886 }
887 
888 /*ARGSUSED*/
889 int
890 zvol_awrite(dev_t dev, struct aio_req *aio, cred_t *cr)
891 {
892 	return (aphysio(zvol_strategy, anocancel, dev, B_WRITE, minphys, aio));
893 }
894 
895 /*
896  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
897  */
898 /*ARGSUSED*/
899 int
900 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
901 {
902 	zvol_state_t *zv;
903 	struct dk_cinfo dkc;
904 	struct dk_minfo dkm;
905 	dk_efi_t efi;
906 	efi_gpt_t gpt;
907 	efi_gpe_t gpe;
908 	struct uuid uuid = EFI_RESERVED;
909 	uint32_t crc;
910 	int error = 0;
911 
912 	mutex_enter(&zvol_state_lock);
913 
914 	zv = ddi_get_soft_state(zvol_state, getminor(dev));
915 
916 	if (zv == NULL) {
917 		mutex_exit(&zvol_state_lock);
918 		return (ENXIO);
919 	}
920 
921 	switch (cmd) {
922 
923 	case DKIOCINFO:
924 		bzero(&dkc, sizeof (dkc));
925 		(void) strcpy(dkc.dki_cname, "zvol");
926 		(void) strcpy(dkc.dki_dname, "zvol");
927 		dkc.dki_ctype = DKC_UNKNOWN;
928 		dkc.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
929 		mutex_exit(&zvol_state_lock);
930 		if (ddi_copyout(&dkc, (void *)arg, sizeof (dkc), flag))
931 			error = EFAULT;
932 		return (error);
933 
934 	case DKIOCGMEDIAINFO:
935 		bzero(&dkm, sizeof (dkm));
936 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
937 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
938 		dkm.dki_media_type = DK_UNKNOWN;
939 		mutex_exit(&zvol_state_lock);
940 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
941 			error = EFAULT;
942 		return (error);
943 
944 	case DKIOCGETEFI:
945 		if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) {
946 			mutex_exit(&zvol_state_lock);
947 			return (EFAULT);
948 		}
949 
950 		bzero(&gpt, sizeof (gpt));
951 		bzero(&gpe, sizeof (gpe));
952 
953 		efi.dki_data = (void *)(uintptr_t)efi.dki_data_64;
954 
955 		if (efi.dki_length < sizeof (gpt) + sizeof (gpe)) {
956 			mutex_exit(&zvol_state_lock);
957 			return (EINVAL);
958 		}
959 
960 		efi.dki_length = sizeof (gpt) + sizeof (gpe);
961 
962 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
963 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
964 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
965 		gpt.efi_gpt_FirstUsableLBA = LE_64(0ULL);
966 		gpt.efi_gpt_LastUsableLBA =
967 		    LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
968 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
969 		gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe));
970 
971 		UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
972 		gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA;
973 		gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA;
974 
975 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
976 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
977 
978 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
979 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
980 
981 		mutex_exit(&zvol_state_lock);
982 		if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag) ||
983 		    ddi_copyout(&gpe, efi.dki_data + 1, sizeof (gpe), flag))
984 			error = EFAULT;
985 		return (error);
986 
987 	default:
988 		error = ENOTSUP;
989 		break;
990 
991 	}
992 	mutex_exit(&zvol_state_lock);
993 	return (error);
994 }
995 
996 int
997 zvol_busy(void)
998 {
999 	return (zvol_minors != 0);
1000 }
1001 
1002 void
1003 zvol_init(void)
1004 {
1005 	VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0);
1006 	mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1007 }
1008 
1009 void
1010 zvol_fini(void)
1011 {
1012 	mutex_destroy(&zvol_state_lock);
1013 	ddi_soft_state_fini(&zvol_state);
1014 }
1015