1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
25 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
26 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * LLNL-CODE-403049.
28 */
29
30 #ifndef _ZFS_BLKDEV_H
31 #define _ZFS_BLKDEV_H
32
33 #include <linux/blkdev.h>
34 #include <linux/backing-dev.h>
35 #include <linux/hdreg.h>
36 #include <linux/major.h>
37 #include <linux/msdos_fs.h> /* for SECTOR_* */
38 #include <linux/bio.h>
39 #include <linux/blk-mq.h>
40
41 /*
42 * 6.11 API
43 * Setting the flush flags directly is no longer possible; flush flags are set
44 * on the queue_limits structure and passed to blk_disk_alloc(). In this case
45 * we remove this function entirely.
46 */
47 #if !defined(HAVE_BLK_ALLOC_DISK_2ARG) || \
48 !defined(HAVE_BLKDEV_QUEUE_LIMITS_FEATURES)
49 static inline void
blk_queue_set_write_cache(struct request_queue * q,bool on)50 blk_queue_set_write_cache(struct request_queue *q, bool on)
51 {
52 if (on) {
53 blk_queue_flag_set(QUEUE_FLAG_WC, q);
54 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
55 } else {
56 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
57 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
58 }
59 }
60 #endif /* !HAVE_BLK_ALLOC_DISK_2ARG || !HAVE_BLKDEV_QUEUE_LIMITS_FEATURES */
61
62 /*
63 * Detect if a device has a write cache. Used to set the intial value for the
64 * vdev nowritecache flag.
65 *
66 * 4.10: QUEUE_FLAG_WC added. Initialised by the driver, but can be changed
67 * later by the operator. If not set, kernel will return flush requests
68 * immediately without doing anything.
69 * 6.6: QUEUE_FLAG_HW_WC added. Initialised by the driver, can't be changed.
70 * Only controls if the operator is allowed to change _WC. Initial version
71 * buggy; aliased to QUEUE_FLAG_FUA, so unuseable.
72 * 6.6.10, 6.7: QUEUE_FLAG_HW_WC fixed.
73 *
74 * Older than 4.10 we just assume write cache, and let the normal flush fail
75 * detection apply.
76 */
77 static inline boolean_t
zfs_bdev_has_write_cache(struct block_device * bdev)78 zfs_bdev_has_write_cache(struct block_device *bdev)
79 {
80 #if defined(QUEUE_FLAG_HW_WC) && QUEUE_FLAG_HW_WC != QUEUE_FLAG_FUA
81 return (test_bit(QUEUE_FLAG_HW_WC, &bdev_get_queue(bdev)->queue_flags));
82 #elif defined(QUEUE_FLAG_WC)
83 return (test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags));
84 #else
85 return (B_TRUE);
86 #endif
87 }
88
89 static inline void
blk_queue_set_read_ahead(struct request_queue * q,unsigned long ra_pages)90 blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
91 {
92 #if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
93 !defined(HAVE_DISK_UPDATE_READAHEAD)
94 #if defined(HAVE_BLK_QUEUE_BDI_DYNAMIC)
95 q->backing_dev_info->ra_pages = ra_pages;
96 #elif defined(HAVE_BLK_QUEUE_DISK_BDI)
97 q->disk->bdi->ra_pages = ra_pages;
98 #else
99 q->backing_dev_info.ra_pages = ra_pages;
100 #endif
101 #endif
102 }
103
104 #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
105 #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
106 #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
107 #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
108 #define bio_for_each_segment4(bv, bvp, b, i) \
109 bio_for_each_segment((bv), (b), (i))
110 typedef struct bvec_iter bvec_iterator_t;
111
112 static inline void
bio_set_flags_failfast(struct block_device * bdev,int * flags,bool dev,bool transport,bool driver)113 bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev,
114 bool transport, bool driver)
115 {
116 #ifdef CONFIG_BUG
117 /*
118 * Disable FAILFAST for loopback devices because of the
119 * following incorrect BUG_ON() in loop_make_request().
120 * This support is also disabled for md devices because the
121 * test suite layers md devices on top of loopback devices.
122 * This may be removed when the loopback driver is fixed.
123 *
124 * BUG_ON(!lo || (rw != READ && rw != WRITE));
125 */
126 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
127 (MAJOR(bdev->bd_dev) == MD_MAJOR))
128 return;
129
130 #ifdef BLOCK_EXT_MAJOR
131 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
132 return;
133 #endif /* BLOCK_EXT_MAJOR */
134 #endif /* CONFIG_BUG */
135
136 if (dev)
137 *flags |= REQ_FAILFAST_DEV;
138 if (transport)
139 *flags |= REQ_FAILFAST_TRANSPORT;
140 if (driver)
141 *flags |= REQ_FAILFAST_DRIVER;
142 }
143
144 /*
145 * Maximum disk label length, it may be undefined for some kernels.
146 */
147 #if !defined(DISK_NAME_LEN)
148 #define DISK_NAME_LEN 32
149 #endif /* DISK_NAME_LEN */
150
151 static inline int
bi_status_to_errno(blk_status_t status)152 bi_status_to_errno(blk_status_t status)
153 {
154 switch (status) {
155 case BLK_STS_OK:
156 return (0);
157 case BLK_STS_NOTSUPP:
158 return (EOPNOTSUPP);
159 case BLK_STS_TIMEOUT:
160 return (ETIMEDOUT);
161 case BLK_STS_NOSPC:
162 return (ENOSPC);
163 case BLK_STS_TRANSPORT:
164 return (ENOLINK);
165 case BLK_STS_TARGET:
166 return (EREMOTEIO);
167 #ifdef HAVE_BLK_STS_RESV_CONFLICT
168 case BLK_STS_RESV_CONFLICT:
169 #else
170 case BLK_STS_NEXUS:
171 #endif
172 return (EBADE);
173 case BLK_STS_MEDIUM:
174 return (ENODATA);
175 case BLK_STS_PROTECTION:
176 return (EILSEQ);
177 case BLK_STS_RESOURCE:
178 return (ENOMEM);
179 case BLK_STS_AGAIN:
180 return (EAGAIN);
181 case BLK_STS_IOERR:
182 return (EIO);
183 default:
184 return (EIO);
185 }
186 }
187
188 static inline blk_status_t
errno_to_bi_status(int error)189 errno_to_bi_status(int error)
190 {
191 switch (error) {
192 case 0:
193 return (BLK_STS_OK);
194 case EOPNOTSUPP:
195 return (BLK_STS_NOTSUPP);
196 case ETIMEDOUT:
197 return (BLK_STS_TIMEOUT);
198 case ENOSPC:
199 return (BLK_STS_NOSPC);
200 case ENOLINK:
201 return (BLK_STS_TRANSPORT);
202 case EREMOTEIO:
203 return (BLK_STS_TARGET);
204 case EBADE:
205 #ifdef HAVE_BLK_STS_RESV_CONFLICT
206 return (BLK_STS_RESV_CONFLICT);
207 #else
208 return (BLK_STS_NEXUS);
209 #endif
210 case ENODATA:
211 return (BLK_STS_MEDIUM);
212 case EILSEQ:
213 return (BLK_STS_PROTECTION);
214 case ENOMEM:
215 return (BLK_STS_RESOURCE);
216 case EAGAIN:
217 return (BLK_STS_AGAIN);
218 case EIO:
219 return (BLK_STS_IOERR);
220 default:
221 return (BLK_STS_IOERR);
222 }
223 }
224
225 /*
226 * 5.15 MACRO,
227 * GD_DEAD
228 *
229 * 2.6.36 - 5.14 MACRO,
230 * GENHD_FL_UP
231 *
232 * Check the disk status and return B_TRUE if alive
233 * otherwise B_FALSE
234 */
235 static inline boolean_t
zfs_check_disk_status(struct block_device * bdev)236 zfs_check_disk_status(struct block_device *bdev)
237 {
238 #if defined(GENHD_FL_UP)
239 return (!!(bdev->bd_disk->flags & GENHD_FL_UP));
240 #elif defined(GD_DEAD)
241 return (!test_bit(GD_DEAD, &bdev->bd_disk->state));
242 #else
243 /*
244 * This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in
245 * the kernel - likely due to an MACRO change that needs to be chased down.
246 */
247 #error "Unsupported kernel: no usable disk status check"
248 #endif
249 }
250
251 /*
252 * 5.17 API change
253 *
254 * GENHD_FL_EXT_DEVT flag removed
255 * GENHD_FL_NO_PART_SCAN renamed GENHD_FL_NO_PART
256 */
257 #ifndef HAVE_GENHD_FL_EXT_DEVT
258 #define GENHD_FL_EXT_DEVT (0)
259 #endif
260 #ifndef HAVE_GENHD_FL_NO_PART
261 #define GENHD_FL_NO_PART (GENHD_FL_NO_PART_SCAN)
262 #endif
263
264 /*
265 * 4.1 API,
266 * 3.10.0 CentOS 7.x API,
267 * blkdev_reread_part()
268 *
269 * For older kernels trigger a re-reading of the partition table by calling
270 * check_disk_change() which calls flush_disk() to invalidate the device.
271 *
272 * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
273 * check_disk_change(), with the modification that invalidation is no longer
274 * forced.
275 */
276 #ifdef HAVE_CHECK_DISK_CHANGE
277 #define zfs_check_media_change(bdev) check_disk_change(bdev)
278 #ifdef HAVE_BLKDEV_REREAD_PART
279 #define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
280 #else
281 #define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
282 #endif /* HAVE_BLKDEV_REREAD_PART */
283 #else
284 #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
285 static inline int
zfs_check_media_change(struct block_device * bdev)286 zfs_check_media_change(struct block_device *bdev)
287 {
288 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
289 struct gendisk *gd = bdev->bd_disk;
290 const struct block_device_operations *bdo = gd->fops;
291 #endif
292
293 if (!bdev_check_media_change(bdev))
294 return (0);
295
296 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
297 /*
298 * Force revalidation, to mimic the old behavior of
299 * check_disk_change()
300 */
301 if (bdo->revalidate_disk)
302 bdo->revalidate_disk(gd);
303 #endif
304
305 return (0);
306 }
307 #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
308 #elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE)
309 #define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk)
310 #define zfs_check_media_change(bdev) disk_check_media_change(bdev->bd_disk)
311 #else
312 /*
313 * This is encountered if check_disk_change() and bdev_check_media_change()
314 * are not available in the kernel - likely due to an API change that needs
315 * to be chased down.
316 */
317 #error "Unsupported kernel: no usable disk change check"
318 #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
319 #endif /* HAVE_CHECK_DISK_CHANGE */
320
321 /*
322 * 2.6.27 API change
323 * The function was exported for use, prior to this it existed but the
324 * symbol was not exported.
325 *
326 * 5.11 API change
327 * Changed to take a dev_t argument which is set on success and return a
328 * non-zero error code on failure.
329 */
330 static inline int
vdev_lookup_bdev(const char * path,dev_t * dev)331 vdev_lookup_bdev(const char *path, dev_t *dev)
332 {
333 #if defined(HAVE_DEVT_LOOKUP_BDEV)
334 return (lookup_bdev(path, dev));
335 #elif defined(HAVE_1ARG_LOOKUP_BDEV)
336 struct block_device *bdev = lookup_bdev(path);
337 if (IS_ERR(bdev))
338 return (PTR_ERR(bdev));
339
340 *dev = bdev->bd_dev;
341 bdput(bdev);
342
343 return (0);
344 #else
345 #error "Unsupported kernel"
346 #endif
347 }
348
349 #if defined(HAVE_BLK_MODE_T)
350 #define blk_mode_is_open_write(flag) ((flag) & BLK_OPEN_WRITE)
351 #else
352 #define blk_mode_is_open_write(flag) ((flag) & FMODE_WRITE)
353 #endif
354
355 /*
356 * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
357 */
358 #if !defined(HAVE_BIO_SET_OP_ATTRS)
359 static inline void
bio_set_op_attrs(struct bio * bio,unsigned rw,unsigned flags)360 bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
361 {
362 bio->bi_opf = rw | flags;
363 }
364 #endif
365
366 /*
367 * bio_set_flush - Set the appropriate flags in a bio to guarantee
368 * data are on non-volatile media on completion.
369 */
370 static inline void
bio_set_flush(struct bio * bio)371 bio_set_flush(struct bio *bio)
372 {
373 bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE);
374 }
375
376 /*
377 * 4.8 API,
378 * REQ_OP_FLUSH
379 *
380 * in all cases but may have a performance impact for some kernels. It
381 * has the advantage of minimizing kernel specific changes in the zvol code.
382 *
383 */
384 static inline boolean_t
bio_is_flush(struct bio * bio)385 bio_is_flush(struct bio *bio)
386 {
387 return (bio_op(bio) == REQ_OP_FLUSH || op_is_flush(bio->bi_opf));
388 }
389
390 /*
391 * 4.8 API,
392 * REQ_FUA flag moved to bio->bi_opf
393 */
394 static inline boolean_t
bio_is_fua(struct bio * bio)395 bio_is_fua(struct bio *bio)
396 {
397 return (bio->bi_opf & REQ_FUA);
398 }
399
400 /*
401 * 4.8 API,
402 * REQ_OP_DISCARD
403 *
404 * In all cases the normal I/O path is used for discards. The only
405 * difference is how the kernel tags individual I/Os as discards.
406 */
407 static inline boolean_t
bio_is_discard(struct bio * bio)408 bio_is_discard(struct bio *bio)
409 {
410 return (bio_op(bio) == REQ_OP_DISCARD);
411 }
412
413 /*
414 * 4.8 API,
415 * REQ_OP_SECURE_ERASE
416 */
417 static inline boolean_t
bio_is_secure_erase(struct bio * bio)418 bio_is_secure_erase(struct bio *bio)
419 {
420 return (bio_op(bio) == REQ_OP_SECURE_ERASE);
421 }
422
423 /*
424 * 2.6.33 API change
425 * Discard granularity and alignment restrictions may now be set. For
426 * older kernels which do not support this it is safe to skip it.
427 */
428 static inline void
blk_queue_discard_granularity(struct request_queue * q,unsigned int dg)429 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
430 {
431 q->limits.discard_granularity = dg;
432 }
433
434 /*
435 * 5.19 API,
436 * bdev_max_discard_sectors()
437 *
438 * 2.6.32 API,
439 * blk_queue_discard()
440 */
441 static inline boolean_t
bdev_discard_supported(struct block_device * bdev)442 bdev_discard_supported(struct block_device *bdev)
443 {
444 #if defined(HAVE_BDEV_MAX_DISCARD_SECTORS)
445 return (bdev_max_discard_sectors(bdev) > 0 &&
446 bdev_discard_granularity(bdev) > 0);
447 #elif defined(HAVE_BLK_QUEUE_DISCARD)
448 return (blk_queue_discard(bdev_get_queue(bdev)) > 0 &&
449 bdev_get_queue(bdev)->limits.discard_granularity > 0);
450 #else
451 #error "Unsupported kernel"
452 #endif
453 }
454
455 /*
456 * 5.19 API,
457 * bdev_max_secure_erase_sectors()
458 *
459 * 4.8 API,
460 * blk_queue_secure_erase()
461 */
462 static inline boolean_t
bdev_secure_discard_supported(struct block_device * bdev)463 bdev_secure_discard_supported(struct block_device *bdev)
464 {
465 #if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS)
466 return (!!bdev_max_secure_erase_sectors(bdev));
467 #elif defined(HAVE_BLK_QUEUE_SECURE_ERASE)
468 return (!!blk_queue_secure_erase(bdev_get_queue(bdev)));
469 #else
470 #error "Unsupported kernel"
471 #endif
472 }
473
474 /*
475 * A common holder for vdev_bdev_open() is used to relax the exclusive open
476 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
477 * allow them to open the device multiple times. Other kernel callers and
478 * user space processes which don't pass this value will get EBUSY. This is
479 * currently required for the correct operation of hot spares.
480 */
481 #define VDEV_HOLDER ((void *)0x2401de7)
482
483 static inline unsigned long
blk_generic_start_io_acct(struct request_queue * q,struct gendisk * disk,int rw,struct bio * bio)484 blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
485 struct gendisk *disk __attribute__((unused)),
486 int rw __attribute__((unused)), struct bio *bio)
487 {
488 #if defined(HAVE_BDEV_IO_ACCT_63)
489 return (bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
490 jiffies));
491 #elif defined(HAVE_BDEV_IO_ACCT_OLD)
492 return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
493 bio_op(bio), jiffies));
494 #elif defined(HAVE_DISK_IO_ACCT)
495 return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
496 #elif defined(HAVE_BIO_IO_ACCT)
497 return (bio_start_io_acct(bio));
498 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
499 unsigned long start_time = jiffies;
500 generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
501 return (start_time);
502 #else
503 /* Unsupported */
504 return (0);
505 #endif
506 }
507
508 static inline void
blk_generic_end_io_acct(struct request_queue * q,struct gendisk * disk,int rw,struct bio * bio,unsigned long start_time)509 blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
510 struct gendisk *disk __attribute__((unused)),
511 int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
512 {
513 #if defined(HAVE_BDEV_IO_ACCT_63)
514 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), bio_sectors(bio),
515 start_time);
516 #elif defined(HAVE_BDEV_IO_ACCT_OLD)
517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
518 #elif defined(HAVE_DISK_IO_ACCT)
519 disk_end_io_acct(disk, bio_op(bio), start_time);
520 #elif defined(HAVE_BIO_IO_ACCT)
521 bio_end_io_acct(bio, start_time);
522 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
523 generic_end_io_acct(q, rw, &disk->part0, start_time);
524 #endif
525 }
526
527 #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
528 static inline struct request_queue *
blk_generic_alloc_queue(make_request_fn make_request,int node_id)529 blk_generic_alloc_queue(make_request_fn make_request, int node_id)
530 {
531 #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
532 return (blk_alloc_queue(make_request, node_id));
533 #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
534 return (blk_alloc_queue_rh(make_request, node_id));
535 #else
536 struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
537 if (q != NULL)
538 blk_queue_make_request(q, make_request);
539
540 return (q);
541 #endif
542 }
543 #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
544
545 /*
546 * All the io_*() helper functions below can operate on a bio, or a rq, but
547 * not both. The older submit_bio() codepath will pass a bio, and the
548 * newer blk-mq codepath will pass a rq.
549 */
550 static inline int
io_data_dir(struct bio * bio,struct request * rq)551 io_data_dir(struct bio *bio, struct request *rq)
552 {
553 if (rq != NULL) {
554 if (op_is_write(req_op(rq))) {
555 return (WRITE);
556 } else {
557 return (READ);
558 }
559 }
560 return (bio_data_dir(bio));
561 }
562
563 static inline int
io_is_flush(struct bio * bio,struct request * rq)564 io_is_flush(struct bio *bio, struct request *rq)
565 {
566 if (rq != NULL)
567 return (req_op(rq) == REQ_OP_FLUSH);
568 return (bio_is_flush(bio));
569 }
570
571 static inline int
io_is_discard(struct bio * bio,struct request * rq)572 io_is_discard(struct bio *bio, struct request *rq)
573 {
574 if (rq != NULL)
575 return (req_op(rq) == REQ_OP_DISCARD);
576 return (bio_is_discard(bio));
577 }
578
579 static inline int
io_is_secure_erase(struct bio * bio,struct request * rq)580 io_is_secure_erase(struct bio *bio, struct request *rq)
581 {
582 if (rq != NULL)
583 return (req_op(rq) == REQ_OP_SECURE_ERASE);
584 return (bio_is_secure_erase(bio));
585 }
586
587 static inline int
io_is_fua(struct bio * bio,struct request * rq)588 io_is_fua(struct bio *bio, struct request *rq)
589 {
590 if (rq != NULL)
591 return (rq->cmd_flags & REQ_FUA);
592 return (bio_is_fua(bio));
593 }
594
595
596 static inline uint64_t
io_offset(struct bio * bio,struct request * rq)597 io_offset(struct bio *bio, struct request *rq)
598 {
599 if (rq != NULL)
600 return (blk_rq_pos(rq) << 9);
601 return (BIO_BI_SECTOR(bio) << 9);
602 }
603
604 static inline uint64_t
io_size(struct bio * bio,struct request * rq)605 io_size(struct bio *bio, struct request *rq)
606 {
607 if (rq != NULL)
608 return (blk_rq_bytes(rq));
609 return (BIO_BI_SIZE(bio));
610 }
611
612 static inline int
io_has_data(struct bio * bio,struct request * rq)613 io_has_data(struct bio *bio, struct request *rq)
614 {
615 if (rq != NULL)
616 return (bio_has_data(rq->bio));
617 return (bio_has_data(bio));
618 }
619 #endif /* _ZFS_BLKDEV_H */
620