xref: /freebsd/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c (revision 681ce946f33e75c590e97c53076e86dff1fe8f4a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
23  */
24 
25 #include <sys/dataset_kstats.h>
26 #include <sys/dbuf.h>
27 #include <sys/dmu_traverse.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/zil_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zio.h>
36 #include <sys/zfs_rlock.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zvol.h>
39 #include <sys/zvol_impl.h>
40 
41 #include <linux/blkdev_compat.h>
42 #include <linux/task_io_accounting_ops.h>
43 
44 unsigned int zvol_major = ZVOL_MAJOR;
45 unsigned int zvol_request_sync = 0;
46 unsigned int zvol_prefetch_bytes = (128 * 1024);
47 unsigned long zvol_max_discard_blocks = 16384;
48 unsigned int zvol_threads = 32;
49 unsigned int zvol_open_timeout_ms = 1000;
50 
51 struct zvol_state_os {
52 	struct gendisk		*zvo_disk;	/* generic disk */
53 	struct request_queue	*zvo_queue;	/* request queue */
54 	dev_t			zvo_dev;	/* device id */
55 };
56 
57 taskq_t *zvol_taskq;
58 static struct ida zvol_ida;
59 
60 typedef struct zv_request_stack {
61 	zvol_state_t	*zv;
62 	struct bio	*bio;
63 } zv_request_t;
64 
65 typedef struct zv_request_task {
66 	zv_request_t zvr;
67 	taskq_ent_t	ent;
68 } zv_request_task_t;
69 
70 static zv_request_task_t *
71 zv_request_task_create(zv_request_t zvr)
72 {
73 	zv_request_task_t *task;
74 	task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
75 	taskq_init_ent(&task->ent);
76 	task->zvr = zvr;
77 	return (task);
78 }
79 
80 static void
81 zv_request_task_free(zv_request_task_t *task)
82 {
83 	kmem_free(task, sizeof (*task));
84 }
85 
86 /*
87  * Given a path, return TRUE if path is a ZVOL.
88  */
89 static boolean_t
90 zvol_is_zvol_impl(const char *path)
91 {
92 	dev_t dev = 0;
93 
94 	if (vdev_lookup_bdev(path, &dev) != 0)
95 		return (B_FALSE);
96 
97 	if (MAJOR(dev) == zvol_major)
98 		return (B_TRUE);
99 
100 	return (B_FALSE);
101 }
102 
103 static void
104 zvol_write(zv_request_t *zvr)
105 {
106 	struct bio *bio = zvr->bio;
107 	int error = 0;
108 	zfs_uio_t uio;
109 
110 	zfs_uio_bvec_init(&uio, bio);
111 
112 	zvol_state_t *zv = zvr->zv;
113 	ASSERT3P(zv, !=, NULL);
114 	ASSERT3U(zv->zv_open_count, >, 0);
115 	ASSERT3P(zv->zv_zilog, !=, NULL);
116 
117 	/* bio marked as FLUSH need to flush before write */
118 	if (bio_is_flush(bio))
119 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
120 
121 	/* Some requests are just for flush and nothing else. */
122 	if (uio.uio_resid == 0) {
123 		rw_exit(&zv->zv_suspend_lock);
124 		BIO_END_IO(bio, 0);
125 		return;
126 	}
127 
128 	struct request_queue *q = zv->zv_zso->zvo_queue;
129 	struct gendisk *disk = zv->zv_zso->zvo_disk;
130 	ssize_t start_resid = uio.uio_resid;
131 	unsigned long start_time;
132 
133 	boolean_t acct = blk_queue_io_stat(q);
134 	if (acct)
135 		start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
136 
137 	boolean_t sync =
138 	    bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
139 
140 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
141 	    uio.uio_loffset, uio.uio_resid, RL_WRITER);
142 
143 	uint64_t volsize = zv->zv_volsize;
144 	while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
145 		uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
146 		uint64_t off = uio.uio_loffset;
147 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
148 
149 		if (bytes > volsize - off)	/* don't write past the end */
150 			bytes = volsize - off;
151 
152 		dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
153 
154 		/* This will only fail for ENOSPC */
155 		error = dmu_tx_assign(tx, TXG_WAIT);
156 		if (error) {
157 			dmu_tx_abort(tx);
158 			break;
159 		}
160 		error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
161 		if (error == 0) {
162 			zvol_log_write(zv, tx, off, bytes, sync);
163 		}
164 		dmu_tx_commit(tx);
165 
166 		if (error)
167 			break;
168 	}
169 	zfs_rangelock_exit(lr);
170 
171 	int64_t nwritten = start_resid - uio.uio_resid;
172 	dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
173 	task_io_account_write(nwritten);
174 
175 	if (sync)
176 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
177 
178 	rw_exit(&zv->zv_suspend_lock);
179 
180 	if (acct)
181 		blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
182 
183 	BIO_END_IO(bio, -error);
184 }
185 
186 static void
187 zvol_write_task(void *arg)
188 {
189 	zv_request_task_t *task = arg;
190 	zvol_write(&task->zvr);
191 	zv_request_task_free(task);
192 }
193 
194 static void
195 zvol_discard(zv_request_t *zvr)
196 {
197 	struct bio *bio = zvr->bio;
198 	zvol_state_t *zv = zvr->zv;
199 	uint64_t start = BIO_BI_SECTOR(bio) << 9;
200 	uint64_t size = BIO_BI_SIZE(bio);
201 	uint64_t end = start + size;
202 	boolean_t sync;
203 	int error = 0;
204 	dmu_tx_t *tx;
205 
206 	ASSERT3P(zv, !=, NULL);
207 	ASSERT3U(zv->zv_open_count, >, 0);
208 	ASSERT3P(zv->zv_zilog, !=, NULL);
209 
210 	struct request_queue *q = zv->zv_zso->zvo_queue;
211 	struct gendisk *disk = zv->zv_zso->zvo_disk;
212 	unsigned long start_time;
213 
214 	boolean_t acct = blk_queue_io_stat(q);
215 	if (acct)
216 		start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
217 
218 	sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
219 
220 	if (end > zv->zv_volsize) {
221 		error = SET_ERROR(EIO);
222 		goto unlock;
223 	}
224 
225 	/*
226 	 * Align the request to volume block boundaries when a secure erase is
227 	 * not required.  This will prevent dnode_free_range() from zeroing out
228 	 * the unaligned parts which is slow (read-modify-write) and useless
229 	 * since we are not freeing any space by doing so.
230 	 */
231 	if (!bio_is_secure_erase(bio)) {
232 		start = P2ROUNDUP(start, zv->zv_volblocksize);
233 		end = P2ALIGN(end, zv->zv_volblocksize);
234 		size = end - start;
235 	}
236 
237 	if (start >= end)
238 		goto unlock;
239 
240 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
241 	    start, size, RL_WRITER);
242 
243 	tx = dmu_tx_create(zv->zv_objset);
244 	dmu_tx_mark_netfree(tx);
245 	error = dmu_tx_assign(tx, TXG_WAIT);
246 	if (error != 0) {
247 		dmu_tx_abort(tx);
248 	} else {
249 		zvol_log_truncate(zv, tx, start, size, B_TRUE);
250 		dmu_tx_commit(tx);
251 		error = dmu_free_long_range(zv->zv_objset,
252 		    ZVOL_OBJ, start, size);
253 	}
254 	zfs_rangelock_exit(lr);
255 
256 	if (error == 0 && sync)
257 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
258 
259 unlock:
260 	rw_exit(&zv->zv_suspend_lock);
261 
262 	if (acct)
263 		blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
264 
265 	BIO_END_IO(bio, -error);
266 }
267 
268 static void
269 zvol_discard_task(void *arg)
270 {
271 	zv_request_task_t *task = arg;
272 	zvol_discard(&task->zvr);
273 	zv_request_task_free(task);
274 }
275 
276 static void
277 zvol_read(zv_request_t *zvr)
278 {
279 	struct bio *bio = zvr->bio;
280 	int error = 0;
281 	zfs_uio_t uio;
282 
283 	zfs_uio_bvec_init(&uio, bio);
284 
285 	zvol_state_t *zv = zvr->zv;
286 	ASSERT3P(zv, !=, NULL);
287 	ASSERT3U(zv->zv_open_count, >, 0);
288 
289 	struct request_queue *q = zv->zv_zso->zvo_queue;
290 	struct gendisk *disk = zv->zv_zso->zvo_disk;
291 	ssize_t start_resid = uio.uio_resid;
292 	unsigned long start_time;
293 
294 	boolean_t acct = blk_queue_io_stat(q);
295 	if (acct)
296 		start_time = blk_generic_start_io_acct(q, disk, READ, bio);
297 
298 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
299 	    uio.uio_loffset, uio.uio_resid, RL_READER);
300 
301 	uint64_t volsize = zv->zv_volsize;
302 	while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
303 		uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
304 
305 		/* don't read past the end */
306 		if (bytes > volsize - uio.uio_loffset)
307 			bytes = volsize - uio.uio_loffset;
308 
309 		error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
310 		if (error) {
311 			/* convert checksum errors into IO errors */
312 			if (error == ECKSUM)
313 				error = SET_ERROR(EIO);
314 			break;
315 		}
316 	}
317 	zfs_rangelock_exit(lr);
318 
319 	int64_t nread = start_resid - uio.uio_resid;
320 	dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
321 	task_io_account_read(nread);
322 
323 	rw_exit(&zv->zv_suspend_lock);
324 
325 	if (acct)
326 		blk_generic_end_io_acct(q, disk, READ, bio, start_time);
327 
328 	BIO_END_IO(bio, -error);
329 }
330 
331 static void
332 zvol_read_task(void *arg)
333 {
334 	zv_request_task_t *task = arg;
335 	zvol_read(&task->zvr);
336 	zv_request_task_free(task);
337 }
338 
339 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
340 #ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
341 static void
342 zvol_submit_bio(struct bio *bio)
343 #else
344 static blk_qc_t
345 zvol_submit_bio(struct bio *bio)
346 #endif
347 #else
348 static MAKE_REQUEST_FN_RET
349 zvol_request(struct request_queue *q, struct bio *bio)
350 #endif
351 {
352 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
353 #if defined(HAVE_BIO_BDEV_DISK)
354 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
355 #else
356 	struct request_queue *q = bio->bi_disk->queue;
357 #endif
358 #endif
359 	zvol_state_t *zv = q->queuedata;
360 	fstrans_cookie_t cookie = spl_fstrans_mark();
361 	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
362 	uint64_t size = BIO_BI_SIZE(bio);
363 	int rw = bio_data_dir(bio);
364 
365 	if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
366 		printk(KERN_INFO
367 		    "%s: bad access: offset=%llu, size=%lu\n",
368 		    zv->zv_zso->zvo_disk->disk_name,
369 		    (long long unsigned)offset,
370 		    (long unsigned)size);
371 
372 		BIO_END_IO(bio, -SET_ERROR(EIO));
373 		goto out;
374 	}
375 
376 	zv_request_t zvr = {
377 		.zv = zv,
378 		.bio = bio,
379 	};
380 	zv_request_task_t *task;
381 
382 	if (rw == WRITE) {
383 		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
384 			BIO_END_IO(bio, -SET_ERROR(EROFS));
385 			goto out;
386 		}
387 
388 		/*
389 		 * Prevents the zvol from being suspended, or the ZIL being
390 		 * concurrently opened.  Will be released after the i/o
391 		 * completes.
392 		 */
393 		rw_enter(&zv->zv_suspend_lock, RW_READER);
394 
395 		/*
396 		 * Open a ZIL if this is the first time we have written to this
397 		 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather
398 		 * than zv_state_lock so that we don't need to acquire an
399 		 * additional lock in this path.
400 		 */
401 		if (zv->zv_zilog == NULL) {
402 			rw_exit(&zv->zv_suspend_lock);
403 			rw_enter(&zv->zv_suspend_lock, RW_WRITER);
404 			if (zv->zv_zilog == NULL) {
405 				zv->zv_zilog = zil_open(zv->zv_objset,
406 				    zvol_get_data);
407 				zv->zv_flags |= ZVOL_WRITTEN_TO;
408 				/* replay / destroy done in zvol_create_minor */
409 				VERIFY0((zv->zv_zilog->zl_header->zh_flags &
410 				    ZIL_REPLAY_NEEDED));
411 			}
412 			rw_downgrade(&zv->zv_suspend_lock);
413 		}
414 
415 		/*
416 		 * We don't want this thread to be blocked waiting for i/o to
417 		 * complete, so we instead wait from a taskq callback. The
418 		 * i/o may be a ZIL write (via zil_commit()), or a read of an
419 		 * indirect block, or a read of a data block (if this is a
420 		 * partial-block write).  We will indicate that the i/o is
421 		 * complete by calling BIO_END_IO() from the taskq callback.
422 		 *
423 		 * This design allows the calling thread to continue and
424 		 * initiate more concurrent operations by calling
425 		 * zvol_request() again. There are typically only a small
426 		 * number of threads available to call zvol_request() (e.g.
427 		 * one per iSCSI target), so keeping the latency of
428 		 * zvol_request() low is important for performance.
429 		 *
430 		 * The zvol_request_sync module parameter allows this
431 		 * behavior to be altered, for performance evaluation
432 		 * purposes.  If the callback blocks, setting
433 		 * zvol_request_sync=1 will result in much worse performance.
434 		 *
435 		 * We can have up to zvol_threads concurrent i/o's being
436 		 * processed for all zvols on the system.  This is typically
437 		 * a vast improvement over the zvol_request_sync=1 behavior
438 		 * of one i/o at a time per zvol.  However, an even better
439 		 * design would be for zvol_request() to initiate the zio
440 		 * directly, and then be notified by the zio_done callback,
441 		 * which would call BIO_END_IO().  Unfortunately, the DMU/ZIL
442 		 * interfaces lack this functionality (they block waiting for
443 		 * the i/o to complete).
444 		 */
445 		if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
446 			if (zvol_request_sync) {
447 				zvol_discard(&zvr);
448 			} else {
449 				task = zv_request_task_create(zvr);
450 				taskq_dispatch_ent(zvol_taskq,
451 				    zvol_discard_task, task, 0, &task->ent);
452 			}
453 		} else {
454 			if (zvol_request_sync) {
455 				zvol_write(&zvr);
456 			} else {
457 				task = zv_request_task_create(zvr);
458 				taskq_dispatch_ent(zvol_taskq,
459 				    zvol_write_task, task, 0, &task->ent);
460 			}
461 		}
462 	} else {
463 		/*
464 		 * The SCST driver, and possibly others, may issue READ I/Os
465 		 * with a length of zero bytes.  These empty I/Os contain no
466 		 * data and require no additional handling.
467 		 */
468 		if (size == 0) {
469 			BIO_END_IO(bio, 0);
470 			goto out;
471 		}
472 
473 		rw_enter(&zv->zv_suspend_lock, RW_READER);
474 
475 		/* See comment in WRITE case above. */
476 		if (zvol_request_sync) {
477 			zvol_read(&zvr);
478 		} else {
479 			task = zv_request_task_create(zvr);
480 			taskq_dispatch_ent(zvol_taskq,
481 			    zvol_read_task, task, 0, &task->ent);
482 		}
483 	}
484 
485 out:
486 	spl_fstrans_unmark(cookie);
487 #if (defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
488 	defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)) && \
489 	!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
490 	return (BLK_QC_T_NONE);
491 #endif
492 }
493 
494 static int
495 zvol_open(struct block_device *bdev, fmode_t flag)
496 {
497 	zvol_state_t *zv;
498 	int error = 0;
499 	boolean_t drop_suspend = B_TRUE;
500 	boolean_t drop_namespace = B_FALSE;
501 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
502 	hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
503 	hrtime_t start = gethrtime();
504 
505 retry:
506 #endif
507 	rw_enter(&zvol_state_lock, RW_READER);
508 	/*
509 	 * Obtain a copy of private_data under the zvol_state_lock to make
510 	 * sure that either the result of zvol free code path setting
511 	 * bdev->bd_disk->private_data to NULL is observed, or zvol_free()
512 	 * is not called on this zv because of the positive zv_open_count.
513 	 */
514 	zv = bdev->bd_disk->private_data;
515 	if (zv == NULL) {
516 		rw_exit(&zvol_state_lock);
517 		return (SET_ERROR(-ENXIO));
518 	}
519 
520 	if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
521 		/*
522 		 * In all other call paths the spa_namespace_lock is taken
523 		 * before the bdev->bd_mutex lock.  However, on open(2)
524 		 * the __blkdev_get() function calls fops->open() with the
525 		 * bdev->bd_mutex lock held.  This can result in a deadlock
526 		 * when zvols from one pool are used as vdevs in another.
527 		 *
528 		 * To prevent a lock inversion deadlock we preemptively
529 		 * take the spa_namespace_lock.  Normally the lock will not
530 		 * be contended and this is safe because spa_open_common()
531 		 * handles the case where the caller already holds the
532 		 * spa_namespace_lock.
533 		 *
534 		 * When the lock cannot be aquired after multiple retries
535 		 * this must be the vdev on zvol deadlock case and we have
536 		 * no choice but to return an error.  For 5.12 and older
537 		 * kernels returning -ERESTARTSYS will result in the
538 		 * bdev->bd_mutex being dropped, then reacquired, and
539 		 * fops->open() being called again.  This process can be
540 		 * repeated safely until both locks are acquired.  For 5.13
541 		 * and newer the -ERESTARTSYS retry logic was removed from
542 		 * the kernel so the only option is to return the error for
543 		 * the caller to handle it.
544 		 */
545 		if (!mutex_tryenter(&spa_namespace_lock)) {
546 			rw_exit(&zvol_state_lock);
547 
548 #ifdef HAVE_BLKDEV_GET_ERESTARTSYS
549 			schedule();
550 			return (SET_ERROR(-ERESTARTSYS));
551 #else
552 			if ((gethrtime() - start) > timeout)
553 				return (SET_ERROR(-ERESTARTSYS));
554 
555 			schedule_timeout(MSEC_TO_TICK(10));
556 			goto retry;
557 #endif
558 		} else {
559 			drop_namespace = B_TRUE;
560 		}
561 	}
562 
563 	mutex_enter(&zv->zv_state_lock);
564 	/*
565 	 * make sure zvol is not suspended during first open
566 	 * (hold zv_suspend_lock) and respect proper lock acquisition
567 	 * ordering - zv_suspend_lock before zv_state_lock
568 	 */
569 	if (zv->zv_open_count == 0) {
570 		if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
571 			mutex_exit(&zv->zv_state_lock);
572 			rw_enter(&zv->zv_suspend_lock, RW_READER);
573 			mutex_enter(&zv->zv_state_lock);
574 			/* check to see if zv_suspend_lock is needed */
575 			if (zv->zv_open_count != 0) {
576 				rw_exit(&zv->zv_suspend_lock);
577 				drop_suspend = B_FALSE;
578 			}
579 		}
580 	} else {
581 		drop_suspend = B_FALSE;
582 	}
583 	rw_exit(&zvol_state_lock);
584 
585 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
586 
587 	if (zv->zv_open_count == 0) {
588 		ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
589 		error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
590 		if (error)
591 			goto out_mutex;
592 	}
593 
594 	if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
595 		error = -EROFS;
596 		goto out_open_count;
597 	}
598 
599 	zv->zv_open_count++;
600 
601 	mutex_exit(&zv->zv_state_lock);
602 	if (drop_namespace)
603 		mutex_exit(&spa_namespace_lock);
604 	if (drop_suspend)
605 		rw_exit(&zv->zv_suspend_lock);
606 
607 	zfs_check_media_change(bdev);
608 
609 	return (0);
610 
611 out_open_count:
612 	if (zv->zv_open_count == 0)
613 		zvol_last_close(zv);
614 
615 out_mutex:
616 	mutex_exit(&zv->zv_state_lock);
617 	if (drop_namespace)
618 		mutex_exit(&spa_namespace_lock);
619 	if (drop_suspend)
620 		rw_exit(&zv->zv_suspend_lock);
621 
622 	return (SET_ERROR(error));
623 }
624 
625 static void
626 zvol_release(struct gendisk *disk, fmode_t mode)
627 {
628 	zvol_state_t *zv;
629 	boolean_t drop_suspend = B_TRUE;
630 
631 	rw_enter(&zvol_state_lock, RW_READER);
632 	zv = disk->private_data;
633 
634 	mutex_enter(&zv->zv_state_lock);
635 	ASSERT3U(zv->zv_open_count, >, 0);
636 	/*
637 	 * make sure zvol is not suspended during last close
638 	 * (hold zv_suspend_lock) and respect proper lock acquisition
639 	 * ordering - zv_suspend_lock before zv_state_lock
640 	 */
641 	if (zv->zv_open_count == 1) {
642 		if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
643 			mutex_exit(&zv->zv_state_lock);
644 			rw_enter(&zv->zv_suspend_lock, RW_READER);
645 			mutex_enter(&zv->zv_state_lock);
646 			/* check to see if zv_suspend_lock is needed */
647 			if (zv->zv_open_count != 1) {
648 				rw_exit(&zv->zv_suspend_lock);
649 				drop_suspend = B_FALSE;
650 			}
651 		}
652 	} else {
653 		drop_suspend = B_FALSE;
654 	}
655 	rw_exit(&zvol_state_lock);
656 
657 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
658 
659 	zv->zv_open_count--;
660 	if (zv->zv_open_count == 0) {
661 		ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
662 		zvol_last_close(zv);
663 	}
664 
665 	mutex_exit(&zv->zv_state_lock);
666 
667 	if (drop_suspend)
668 		rw_exit(&zv->zv_suspend_lock);
669 }
670 
671 static int
672 zvol_ioctl(struct block_device *bdev, fmode_t mode,
673     unsigned int cmd, unsigned long arg)
674 {
675 	zvol_state_t *zv = bdev->bd_disk->private_data;
676 	int error = 0;
677 
678 	ASSERT3U(zv->zv_open_count, >, 0);
679 
680 	switch (cmd) {
681 	case BLKFLSBUF:
682 		fsync_bdev(bdev);
683 		invalidate_bdev(bdev);
684 		rw_enter(&zv->zv_suspend_lock, RW_READER);
685 
686 		if (!(zv->zv_flags & ZVOL_RDONLY))
687 			txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
688 
689 		rw_exit(&zv->zv_suspend_lock);
690 		break;
691 
692 	case BLKZNAME:
693 		mutex_enter(&zv->zv_state_lock);
694 		error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
695 		mutex_exit(&zv->zv_state_lock);
696 		break;
697 
698 	default:
699 		error = -ENOTTY;
700 		break;
701 	}
702 
703 	return (SET_ERROR(error));
704 }
705 
706 #ifdef CONFIG_COMPAT
707 static int
708 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
709     unsigned cmd, unsigned long arg)
710 {
711 	return (zvol_ioctl(bdev, mode, cmd, arg));
712 }
713 #else
714 #define	zvol_compat_ioctl	NULL
715 #endif
716 
717 static unsigned int
718 zvol_check_events(struct gendisk *disk, unsigned int clearing)
719 {
720 	unsigned int mask = 0;
721 
722 	rw_enter(&zvol_state_lock, RW_READER);
723 
724 	zvol_state_t *zv = disk->private_data;
725 	if (zv != NULL) {
726 		mutex_enter(&zv->zv_state_lock);
727 		mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
728 		zv->zv_changed = 0;
729 		mutex_exit(&zv->zv_state_lock);
730 	}
731 
732 	rw_exit(&zvol_state_lock);
733 
734 	return (mask);
735 }
736 
737 static int
738 zvol_revalidate_disk(struct gendisk *disk)
739 {
740 	rw_enter(&zvol_state_lock, RW_READER);
741 
742 	zvol_state_t *zv = disk->private_data;
743 	if (zv != NULL) {
744 		mutex_enter(&zv->zv_state_lock);
745 		set_capacity(zv->zv_zso->zvo_disk,
746 		    zv->zv_volsize >> SECTOR_BITS);
747 		mutex_exit(&zv->zv_state_lock);
748 	}
749 
750 	rw_exit(&zvol_state_lock);
751 
752 	return (0);
753 }
754 
755 static int
756 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
757 {
758 	struct gendisk *disk = zv->zv_zso->zvo_disk;
759 
760 #if defined(HAVE_REVALIDATE_DISK_SIZE)
761 	revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
762 #elif defined(HAVE_REVALIDATE_DISK)
763 	revalidate_disk(disk);
764 #else
765 	zvol_revalidate_disk(disk);
766 #endif
767 	return (0);
768 }
769 
770 static void
771 zvol_clear_private(zvol_state_t *zv)
772 {
773 	/*
774 	 * Cleared while holding zvol_state_lock as a writer
775 	 * which will prevent zvol_open() from opening it.
776 	 */
777 	zv->zv_zso->zvo_disk->private_data = NULL;
778 }
779 
780 /*
781  * Provide a simple virtual geometry for legacy compatibility.  For devices
782  * smaller than 1 MiB a small head and sector count is used to allow very
783  * tiny devices.  For devices over 1 Mib a standard head and sector count
784  * is used to keep the cylinders count reasonable.
785  */
786 static int
787 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
788 {
789 	zvol_state_t *zv = bdev->bd_disk->private_data;
790 	sector_t sectors;
791 
792 	ASSERT3U(zv->zv_open_count, >, 0);
793 
794 	sectors = get_capacity(zv->zv_zso->zvo_disk);
795 
796 	if (sectors > 2048) {
797 		geo->heads = 16;
798 		geo->sectors = 63;
799 	} else {
800 		geo->heads = 2;
801 		geo->sectors = 4;
802 	}
803 
804 	geo->start = 0;
805 	geo->cylinders = sectors / (geo->heads * geo->sectors);
806 
807 	return (0);
808 }
809 
810 static struct block_device_operations zvol_ops = {
811 	.open			= zvol_open,
812 	.release		= zvol_release,
813 	.ioctl			= zvol_ioctl,
814 	.compat_ioctl		= zvol_compat_ioctl,
815 	.check_events		= zvol_check_events,
816 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
817 	.revalidate_disk	= zvol_revalidate_disk,
818 #endif
819 	.getgeo			= zvol_getgeo,
820 	.owner			= THIS_MODULE,
821 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
822 	.submit_bio		= zvol_submit_bio,
823 #endif
824 };
825 
826 /*
827  * Allocate memory for a new zvol_state_t and setup the required
828  * request queue and generic disk structures for the block device.
829  */
830 static zvol_state_t *
831 zvol_alloc(dev_t dev, const char *name)
832 {
833 	zvol_state_t *zv;
834 	struct zvol_state_os *zso;
835 	uint64_t volmode;
836 
837 	if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
838 		return (NULL);
839 
840 	if (volmode == ZFS_VOLMODE_DEFAULT)
841 		volmode = zvol_volmode;
842 
843 	if (volmode == ZFS_VOLMODE_NONE)
844 		return (NULL);
845 
846 	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
847 	zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
848 	zv->zv_zso = zso;
849 	zv->zv_volmode = volmode;
850 
851 	list_link_init(&zv->zv_next);
852 	mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
853 
854 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
855 #ifdef HAVE_BLK_ALLOC_DISK
856 	zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
857 	if (zso->zvo_disk == NULL)
858 		goto out_kmem;
859 
860 	zso->zvo_disk->minors = ZVOL_MINORS;
861 	zso->zvo_queue = zso->zvo_disk->queue;
862 #else
863 	zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
864 	if (zso->zvo_queue == NULL)
865 		goto out_kmem;
866 
867 	zso->zvo_disk = alloc_disk(ZVOL_MINORS);
868 	if (zso->zvo_disk == NULL) {
869 		blk_cleanup_queue(zso->zvo_queue);
870 		goto out_kmem;
871 	}
872 
873 	zso->zvo_disk->queue = zso->zvo_queue;
874 #endif /* HAVE_BLK_ALLOC_DISK */
875 #else
876 	zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
877 	if (zso->zvo_queue == NULL)
878 		goto out_kmem;
879 
880 	zso->zvo_disk = alloc_disk(ZVOL_MINORS);
881 	if (zso->zvo_disk == NULL) {
882 		blk_cleanup_queue(zso->zvo_queue);
883 		goto out_kmem;
884 	}
885 
886 	zso->zvo_disk->queue = zso->zvo_queue;
887 #endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
888 
889 	blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
890 
891 	/* Limit read-ahead to a single page to prevent over-prefetching. */
892 	blk_queue_set_read_ahead(zso->zvo_queue, 1);
893 
894 	/* Disable write merging in favor of the ZIO pipeline. */
895 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
896 
897 	/* Enable /proc/diskstats */
898 	blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
899 
900 	zso->zvo_queue->queuedata = zv;
901 	zso->zvo_dev = dev;
902 	zv->zv_open_count = 0;
903 	strlcpy(zv->zv_name, name, MAXNAMELEN);
904 
905 	zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
906 	rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
907 
908 	zso->zvo_disk->major = zvol_major;
909 	zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
910 
911 	if (volmode == ZFS_VOLMODE_DEV) {
912 		/*
913 		 * ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
914 		 * gendisk->minors = 1 as noted in include/linux/genhd.h.
915 		 * Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
916 		 * and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
917 		 * setting gendisk->flags accordingly.
918 		 */
919 		zso->zvo_disk->minors = 1;
920 #if defined(GENHD_FL_EXT_DEVT)
921 		zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
922 #endif
923 #if defined(GENHD_FL_NO_PART_SCAN)
924 		zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
925 #endif
926 	}
927 	zso->zvo_disk->first_minor = (dev & MINORMASK);
928 	zso->zvo_disk->fops = &zvol_ops;
929 	zso->zvo_disk->private_data = zv;
930 	snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
931 	    ZVOL_DEV_NAME, (dev & MINORMASK));
932 
933 	return (zv);
934 
935 out_kmem:
936 	kmem_free(zso, sizeof (struct zvol_state_os));
937 	kmem_free(zv, sizeof (zvol_state_t));
938 	return (NULL);
939 }
940 
941 /*
942  * Cleanup then free a zvol_state_t which was created by zvol_alloc().
943  * At this time, the structure is not opened by anyone, is taken off
944  * the zvol_state_list, and has its private data set to NULL.
945  * The zvol_state_lock is dropped.
946  *
947  * This function may take many milliseconds to complete (e.g. we've seen
948  * it take over 256ms), due to the calls to "blk_cleanup_queue" and
949  * "del_gendisk". Thus, consumers need to be careful to account for this
950  * latency when calling this function.
951  */
952 static void
953 zvol_free(zvol_state_t *zv)
954 {
955 
956 	ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
957 	ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
958 	ASSERT0(zv->zv_open_count);
959 	ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
960 
961 	rw_destroy(&zv->zv_suspend_lock);
962 	zfs_rangelock_fini(&zv->zv_rangelock);
963 
964 	del_gendisk(zv->zv_zso->zvo_disk);
965 #if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
966 	defined(HAVE_BLK_ALLOC_DISK)
967 	blk_cleanup_disk(zv->zv_zso->zvo_disk);
968 #else
969 	blk_cleanup_queue(zv->zv_zso->zvo_queue);
970 	put_disk(zv->zv_zso->zvo_disk);
971 #endif
972 
973 	ida_simple_remove(&zvol_ida,
974 	    MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
975 
976 	mutex_destroy(&zv->zv_state_lock);
977 	dataset_kstats_destroy(&zv->zv_kstat);
978 
979 	kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
980 	kmem_free(zv, sizeof (zvol_state_t));
981 }
982 
983 void
984 zvol_wait_close(zvol_state_t *zv)
985 {
986 }
987 
988 /*
989  * Create a block device minor node and setup the linkage between it
990  * and the specified volume.  Once this function returns the block
991  * device is live and ready for use.
992  */
993 static int
994 zvol_os_create_minor(const char *name)
995 {
996 	zvol_state_t *zv;
997 	objset_t *os;
998 	dmu_object_info_t *doi;
999 	uint64_t volsize;
1000 	uint64_t len;
1001 	unsigned minor = 0;
1002 	int error = 0;
1003 	int idx;
1004 	uint64_t hash = zvol_name_hash(name);
1005 
1006 	if (zvol_inhibit_dev)
1007 		return (0);
1008 
1009 	idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
1010 	if (idx < 0)
1011 		return (SET_ERROR(-idx));
1012 	minor = idx << ZVOL_MINOR_BITS;
1013 
1014 	zv = zvol_find_by_name_hash(name, hash, RW_NONE);
1015 	if (zv) {
1016 		ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1017 		mutex_exit(&zv->zv_state_lock);
1018 		ida_simple_remove(&zvol_ida, idx);
1019 		return (SET_ERROR(EEXIST));
1020 	}
1021 
1022 	doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1023 
1024 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
1025 	if (error)
1026 		goto out_doi;
1027 
1028 	error = dmu_object_info(os, ZVOL_OBJ, doi);
1029 	if (error)
1030 		goto out_dmu_objset_disown;
1031 
1032 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1033 	if (error)
1034 		goto out_dmu_objset_disown;
1035 
1036 	zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1037 	if (zv == NULL) {
1038 		error = SET_ERROR(EAGAIN);
1039 		goto out_dmu_objset_disown;
1040 	}
1041 	zv->zv_hash = hash;
1042 
1043 	if (dmu_objset_is_snapshot(os))
1044 		zv->zv_flags |= ZVOL_RDONLY;
1045 
1046 	zv->zv_volblocksize = doi->doi_data_block_size;
1047 	zv->zv_volsize = volsize;
1048 	zv->zv_objset = os;
1049 
1050 	set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
1051 
1052 	blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
1053 	    (DMU_MAX_ACCESS / 4) >> 9);
1054 	blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
1055 	blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
1056 	blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
1057 	    zv->zv_volblocksize);
1058 	blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
1059 	blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
1060 	    (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1061 	blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
1062 	    zv->zv_volblocksize);
1063 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
1064 #ifdef QUEUE_FLAG_NONROT
1065 	blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
1066 #endif
1067 #ifdef QUEUE_FLAG_ADD_RANDOM
1068 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
1069 #endif
1070 	/* This flag was introduced in kernel version 4.12. */
1071 #ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
1072 	blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
1073 #endif
1074 
1075 	ASSERT3P(zv->zv_zilog, ==, NULL);
1076 	zv->zv_zilog = zil_open(os, zvol_get_data);
1077 	if (spa_writeable(dmu_objset_spa(os))) {
1078 		if (zil_replay_disable)
1079 			zil_destroy(zv->zv_zilog, B_FALSE);
1080 		else
1081 			zil_replay(os, zv, zvol_replay_vector);
1082 	}
1083 	zil_close(zv->zv_zilog);
1084 	zv->zv_zilog = NULL;
1085 	ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
1086 	dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
1087 
1088 	/*
1089 	 * When udev detects the addition of the device it will immediately
1090 	 * invoke blkid(8) to determine the type of content on the device.
1091 	 * Prefetching the blocks commonly scanned by blkid(8) will speed
1092 	 * up this process.
1093 	 */
1094 	len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
1095 	if (len > 0) {
1096 		dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
1097 		dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
1098 		    ZIO_PRIORITY_SYNC_READ);
1099 	}
1100 
1101 	zv->zv_objset = NULL;
1102 out_dmu_objset_disown:
1103 	dmu_objset_disown(os, B_TRUE, FTAG);
1104 out_doi:
1105 	kmem_free(doi, sizeof (dmu_object_info_t));
1106 
1107 	/*
1108 	 * Keep in mind that once add_disk() is called, the zvol is
1109 	 * announced to the world, and zvol_open()/zvol_release() can
1110 	 * be called at any time. Incidentally, add_disk() itself calls
1111 	 * zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
1112 	 * directly as well.
1113 	 */
1114 	if (error == 0) {
1115 		rw_enter(&zvol_state_lock, RW_WRITER);
1116 		zvol_insert(zv);
1117 		rw_exit(&zvol_state_lock);
1118 		add_disk(zv->zv_zso->zvo_disk);
1119 	} else {
1120 		ida_simple_remove(&zvol_ida, idx);
1121 	}
1122 
1123 	return (error);
1124 }
1125 
1126 static void
1127 zvol_rename_minor(zvol_state_t *zv, const char *newname)
1128 {
1129 	int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
1130 
1131 	ASSERT(RW_LOCK_HELD(&zvol_state_lock));
1132 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1133 
1134 	strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1135 
1136 	/* move to new hashtable entry  */
1137 	zv->zv_hash = zvol_name_hash(zv->zv_name);
1138 	hlist_del(&zv->zv_hlink);
1139 	hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
1140 
1141 	/*
1142 	 * The block device's read-only state is briefly changed causing
1143 	 * a KOBJ_CHANGE uevent to be issued.  This ensures udev detects
1144 	 * the name change and fixes the symlinks.  This does not change
1145 	 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1146 	 * changes.  This would normally be done using kobject_uevent() but
1147 	 * that is a GPL-only symbol which is why we need this workaround.
1148 	 */
1149 	set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
1150 	set_disk_ro(zv->zv_zso->zvo_disk, readonly);
1151 }
1152 
1153 static void
1154 zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
1155 {
1156 
1157 	set_disk_ro(zv->zv_zso->zvo_disk, flags);
1158 }
1159 
1160 static void
1161 zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
1162 {
1163 
1164 	set_capacity(zv->zv_zso->zvo_disk, capacity);
1165 }
1166 
1167 const static zvol_platform_ops_t zvol_linux_ops = {
1168 	.zv_free = zvol_free,
1169 	.zv_rename_minor = zvol_rename_minor,
1170 	.zv_create_minor = zvol_os_create_minor,
1171 	.zv_update_volsize = zvol_update_volsize,
1172 	.zv_clear_private = zvol_clear_private,
1173 	.zv_is_zvol = zvol_is_zvol_impl,
1174 	.zv_set_disk_ro = zvol_set_disk_ro_impl,
1175 	.zv_set_capacity = zvol_set_capacity_impl,
1176 };
1177 
1178 int
1179 zvol_init(void)
1180 {
1181 	int error;
1182 	int threads = MIN(MAX(zvol_threads, 1), 1024);
1183 
1184 	error = register_blkdev(zvol_major, ZVOL_DRIVER);
1185 	if (error) {
1186 		printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1187 		return (error);
1188 	}
1189 	zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
1190 	    threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1191 	if (zvol_taskq == NULL) {
1192 		unregister_blkdev(zvol_major, ZVOL_DRIVER);
1193 		return (-ENOMEM);
1194 	}
1195 	zvol_init_impl();
1196 	ida_init(&zvol_ida);
1197 	zvol_register_ops(&zvol_linux_ops);
1198 	return (0);
1199 }
1200 
1201 void
1202 zvol_fini(void)
1203 {
1204 	zvol_fini_impl();
1205 	unregister_blkdev(zvol_major, ZVOL_DRIVER);
1206 	taskq_destroy(zvol_taskq);
1207 	ida_destroy(&zvol_ida);
1208 }
1209 
1210 /* BEGIN CSTYLED */
1211 module_param(zvol_inhibit_dev, uint, 0644);
1212 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1213 
1214 module_param(zvol_major, uint, 0444);
1215 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1216 
1217 module_param(zvol_threads, uint, 0444);
1218 MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
1219 
1220 module_param(zvol_request_sync, uint, 0644);
1221 MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
1222 
1223 module_param(zvol_max_discard_blocks, ulong, 0444);
1224 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
1225 
1226 module_param(zvol_prefetch_bytes, uint, 0644);
1227 MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
1228 
1229 module_param(zvol_volmode, uint, 0644);
1230 MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
1231 /* END CSTYLED */
1232