xref: /linux/block/blk-lib.c (revision e73c23ff736e1ea371dfa419d7bf8e77ee53044a)
1f31e7e40SDmitry Monakhov /*
2f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
3f31e7e40SDmitry Monakhov  */
4f31e7e40SDmitry Monakhov #include <linux/kernel.h>
5f31e7e40SDmitry Monakhov #include <linux/module.h>
6f31e7e40SDmitry Monakhov #include <linux/bio.h>
7f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
9f31e7e40SDmitry Monakhov 
10f31e7e40SDmitry Monakhov #include "blk.h"
11f31e7e40SDmitry Monakhov 
124e49ea4aSMike Christie static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
139082e87bSChristoph Hellwig 		gfp_t gfp)
14f31e7e40SDmitry Monakhov {
159082e87bSChristoph Hellwig 	struct bio *new = bio_alloc(gfp, nr_pages);
165dba3089SLukas Czerner 
179082e87bSChristoph Hellwig 	if (bio) {
189082e87bSChristoph Hellwig 		bio_chain(bio, new);
194e49ea4aSMike Christie 		submit_bio(bio);
209082e87bSChristoph Hellwig 	}
219082e87bSChristoph Hellwig 
229082e87bSChristoph Hellwig 	return new;
23f31e7e40SDmitry Monakhov }
24f31e7e40SDmitry Monakhov 
2538f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26288dab8aSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27469e3216SMike Christie 		struct bio **biop)
28f31e7e40SDmitry Monakhov {
29f31e7e40SDmitry Monakhov 	struct request_queue *q = bdev_get_queue(bdev);
3038f25255SChristoph Hellwig 	struct bio *bio = *biop;
31a22c4d7eSMing Lin 	unsigned int granularity;
32ef295ecfSChristoph Hellwig 	unsigned int op;
33a22c4d7eSMing Lin 	int alignment;
3428b2be20SDarrick J. Wong 	sector_t bs_mask;
35f31e7e40SDmitry Monakhov 
36f31e7e40SDmitry Monakhov 	if (!q)
37f31e7e40SDmitry Monakhov 		return -ENXIO;
38288dab8aSChristoph Hellwig 
39288dab8aSChristoph Hellwig 	if (flags & BLKDEV_DISCARD_SECURE) {
40e950fdf7SChristoph Hellwig 		if (flags & BLKDEV_DISCARD_ZERO)
41e950fdf7SChristoph Hellwig 			return -EOPNOTSUPP;
42288dab8aSChristoph Hellwig 		if (!blk_queue_secure_erase(q))
43288dab8aSChristoph Hellwig 			return -EOPNOTSUPP;
44288dab8aSChristoph Hellwig 		op = REQ_OP_SECURE_ERASE;
45288dab8aSChristoph Hellwig 	} else {
46f31e7e40SDmitry Monakhov 		if (!blk_queue_discard(q))
47f31e7e40SDmitry Monakhov 			return -EOPNOTSUPP;
48e950fdf7SChristoph Hellwig 		if ((flags & BLKDEV_DISCARD_ZERO) &&
49e950fdf7SChristoph Hellwig 		    !q->limits.discard_zeroes_data)
5038f25255SChristoph Hellwig 			return -EOPNOTSUPP;
51288dab8aSChristoph Hellwig 		op = REQ_OP_DISCARD;
52288dab8aSChristoph Hellwig 	}
53f31e7e40SDmitry Monakhov 
5428b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
5528b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
5628b2be20SDarrick J. Wong 		return -EINVAL;
5728b2be20SDarrick J. Wong 
58a22c4d7eSMing Lin 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
59a22c4d7eSMing Lin 	granularity = max(q->limits.discard_granularity >> 9, 1U);
60a22c4d7eSMing Lin 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
61a22c4d7eSMing Lin 
625dba3089SLukas Czerner 	while (nr_sects) {
63c6e66634SPaolo Bonzini 		unsigned int req_sects;
64a22c4d7eSMing Lin 		sector_t end_sect, tmp;
65c6e66634SPaolo Bonzini 
66a22c4d7eSMing Lin 		/* Make sure bi_size doesn't overflow */
67a22c4d7eSMing Lin 		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
68a22c4d7eSMing Lin 
699082e87bSChristoph Hellwig 		/**
70a22c4d7eSMing Lin 		 * If splitting a request, and the next starting sector would be
71a22c4d7eSMing Lin 		 * misaligned, stop the discard at the previous aligned sector.
72a22c4d7eSMing Lin 		 */
73c6e66634SPaolo Bonzini 		end_sect = sector + req_sects;
74a22c4d7eSMing Lin 		tmp = end_sect;
75a22c4d7eSMing Lin 		if (req_sects < nr_sects &&
76a22c4d7eSMing Lin 		    sector_div(tmp, granularity) != alignment) {
77a22c4d7eSMing Lin 			end_sect = end_sect - alignment;
78a22c4d7eSMing Lin 			sector_div(end_sect, granularity);
79a22c4d7eSMing Lin 			end_sect = end_sect * granularity + alignment;
80a22c4d7eSMing Lin 			req_sects = end_sect - sector;
81a22c4d7eSMing Lin 		}
82c6e66634SPaolo Bonzini 
834e49ea4aSMike Christie 		bio = next_bio(bio, 1, gfp_mask);
844f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
85f31e7e40SDmitry Monakhov 		bio->bi_bdev = bdev;
86288dab8aSChristoph Hellwig 		bio_set_op_attrs(bio, op, 0);
87f31e7e40SDmitry Monakhov 
884f024f37SKent Overstreet 		bio->bi_iter.bi_size = req_sects << 9;
89c6e66634SPaolo Bonzini 		nr_sects -= req_sects;
90c6e66634SPaolo Bonzini 		sector = end_sect;
91f31e7e40SDmitry Monakhov 
92c8123f8cSJens Axboe 		/*
93c8123f8cSJens Axboe 		 * We can loop for a long time in here, if someone does
94c8123f8cSJens Axboe 		 * full device discards (like mkfs). Be nice and allow
95c8123f8cSJens Axboe 		 * us to schedule out to avoid softlocking if preempt
96c8123f8cSJens Axboe 		 * is disabled.
97c8123f8cSJens Axboe 		 */
98c8123f8cSJens Axboe 		cond_resched();
995dba3089SLukas Czerner 	}
10038f25255SChristoph Hellwig 
10138f25255SChristoph Hellwig 	*biop = bio;
10238f25255SChristoph Hellwig 	return 0;
10338f25255SChristoph Hellwig }
10438f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
10538f25255SChristoph Hellwig 
10638f25255SChristoph Hellwig /**
10738f25255SChristoph Hellwig  * blkdev_issue_discard - queue a discard
10838f25255SChristoph Hellwig  * @bdev:	blockdev to issue discard for
10938f25255SChristoph Hellwig  * @sector:	start sector
11038f25255SChristoph Hellwig  * @nr_sects:	number of sectors to discard
11138f25255SChristoph Hellwig  * @gfp_mask:	memory allocation flags (for bio_alloc)
11238f25255SChristoph Hellwig  * @flags:	BLKDEV_IFL_* flags to control behaviour
11338f25255SChristoph Hellwig  *
11438f25255SChristoph Hellwig  * Description:
11538f25255SChristoph Hellwig  *    Issue a discard request for the sectors in question.
11638f25255SChristoph Hellwig  */
11738f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
11838f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
11938f25255SChristoph Hellwig {
12038f25255SChristoph Hellwig 	struct bio *bio = NULL;
12138f25255SChristoph Hellwig 	struct blk_plug plug;
12238f25255SChristoph Hellwig 	int ret;
12338f25255SChristoph Hellwig 
12438f25255SChristoph Hellwig 	blk_start_plug(&plug);
125288dab8aSChristoph Hellwig 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
12638f25255SChristoph Hellwig 			&bio);
127bbd848e0SMike Snitzer 	if (!ret && bio) {
1284e49ea4aSMike Christie 		ret = submit_bio_wait(bio);
129e950fdf7SChristoph Hellwig 		if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO))
130bbd848e0SMike Snitzer 			ret = 0;
13105bd92ddSShaun Tancheff 		bio_put(bio);
132bbd848e0SMike Snitzer 	}
1330cfbcafcSShaohua Li 	blk_finish_plug(&plug);
134f31e7e40SDmitry Monakhov 
135bbd848e0SMike Snitzer 	return ret;
136f31e7e40SDmitry Monakhov }
137f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1383f14d792SDmitry Monakhov 
1393f14d792SDmitry Monakhov /**
140*e73c23ffSChaitanya Kulkarni  * __blkdev_issue_write_same - generate number of bios with same page
1414363ac7cSMartin K. Petersen  * @bdev:	target blockdev
1424363ac7cSMartin K. Petersen  * @sector:	start sector
1434363ac7cSMartin K. Petersen  * @nr_sects:	number of sectors to write
1444363ac7cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
1454363ac7cSMartin K. Petersen  * @page:	page containing data to write
146*e73c23ffSChaitanya Kulkarni  * @biop:	pointer to anchor bio
1474363ac7cSMartin K. Petersen  *
1484363ac7cSMartin K. Petersen  * Description:
149*e73c23ffSChaitanya Kulkarni  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
1504363ac7cSMartin K. Petersen  */
151*e73c23ffSChaitanya Kulkarni static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
152*e73c23ffSChaitanya Kulkarni 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
153*e73c23ffSChaitanya Kulkarni 		struct bio **biop)
1544363ac7cSMartin K. Petersen {
1554363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1564363ac7cSMartin K. Petersen 	unsigned int max_write_same_sectors;
157*e73c23ffSChaitanya Kulkarni 	struct bio *bio = *biop;
15828b2be20SDarrick J. Wong 	sector_t bs_mask;
1594363ac7cSMartin K. Petersen 
1604363ac7cSMartin K. Petersen 	if (!q)
1614363ac7cSMartin K. Petersen 		return -ENXIO;
1624363ac7cSMartin K. Petersen 
16328b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
16428b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
16528b2be20SDarrick J. Wong 		return -EINVAL;
16628b2be20SDarrick J. Wong 
167*e73c23ffSChaitanya Kulkarni 	if (!bdev_write_same(bdev))
168*e73c23ffSChaitanya Kulkarni 		return -EOPNOTSUPP;
169*e73c23ffSChaitanya Kulkarni 
170b49a0871SMing Lin 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
171b49a0871SMing Lin 	max_write_same_sectors = UINT_MAX >> 9;
1724363ac7cSMartin K. Petersen 
1734363ac7cSMartin K. Petersen 	while (nr_sects) {
1744e49ea4aSMike Christie 		bio = next_bio(bio, 1, gfp_mask);
1754f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1764363ac7cSMartin K. Petersen 		bio->bi_bdev = bdev;
1774363ac7cSMartin K. Petersen 		bio->bi_vcnt = 1;
1784363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_page = page;
1794363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_offset = 0;
1804363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
18195fe6c1aSMike Christie 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
1824363ac7cSMartin K. Petersen 
1834363ac7cSMartin K. Petersen 		if (nr_sects > max_write_same_sectors) {
1844f024f37SKent Overstreet 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
1854363ac7cSMartin K. Petersen 			nr_sects -= max_write_same_sectors;
1864363ac7cSMartin K. Petersen 			sector += max_write_same_sectors;
1874363ac7cSMartin K. Petersen 		} else {
1884f024f37SKent Overstreet 			bio->bi_iter.bi_size = nr_sects << 9;
1894363ac7cSMartin K. Petersen 			nr_sects = 0;
1904363ac7cSMartin K. Petersen 		}
191*e73c23ffSChaitanya Kulkarni 		cond_resched();
1924363ac7cSMartin K. Petersen 	}
1934363ac7cSMartin K. Petersen 
194*e73c23ffSChaitanya Kulkarni 	*biop = bio;
195*e73c23ffSChaitanya Kulkarni 	return 0;
196*e73c23ffSChaitanya Kulkarni }
197*e73c23ffSChaitanya Kulkarni 
198*e73c23ffSChaitanya Kulkarni /**
199*e73c23ffSChaitanya Kulkarni  * blkdev_issue_write_same - queue a write same operation
200*e73c23ffSChaitanya Kulkarni  * @bdev:	target blockdev
201*e73c23ffSChaitanya Kulkarni  * @sector:	start sector
202*e73c23ffSChaitanya Kulkarni  * @nr_sects:	number of sectors to write
203*e73c23ffSChaitanya Kulkarni  * @gfp_mask:	memory allocation flags (for bio_alloc)
204*e73c23ffSChaitanya Kulkarni  * @page:	page containing data
205*e73c23ffSChaitanya Kulkarni  *
206*e73c23ffSChaitanya Kulkarni  * Description:
207*e73c23ffSChaitanya Kulkarni  *    Issue a write same request for the sectors in question.
208*e73c23ffSChaitanya Kulkarni  */
209*e73c23ffSChaitanya Kulkarni int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
210*e73c23ffSChaitanya Kulkarni 				sector_t nr_sects, gfp_t gfp_mask,
211*e73c23ffSChaitanya Kulkarni 				struct page *page)
212*e73c23ffSChaitanya Kulkarni {
213*e73c23ffSChaitanya Kulkarni 	struct bio *bio = NULL;
214*e73c23ffSChaitanya Kulkarni 	struct blk_plug plug;
215*e73c23ffSChaitanya Kulkarni 	int ret;
216*e73c23ffSChaitanya Kulkarni 
217*e73c23ffSChaitanya Kulkarni 	blk_start_plug(&plug);
218*e73c23ffSChaitanya Kulkarni 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
219*e73c23ffSChaitanya Kulkarni 			&bio);
220*e73c23ffSChaitanya Kulkarni 	if (ret == 0 && bio) {
2214e49ea4aSMike Christie 		ret = submit_bio_wait(bio);
22205bd92ddSShaun Tancheff 		bio_put(bio);
22305bd92ddSShaun Tancheff 	}
224*e73c23ffSChaitanya Kulkarni 	blk_finish_plug(&plug);
2253f40bf2cSChristoph Hellwig 	return ret;
2264363ac7cSMartin K. Petersen }
2274363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same);
2284363ac7cSMartin K. Petersen 
2294363ac7cSMartin K. Petersen /**
230*e73c23ffSChaitanya Kulkarni  * __blkdev_issue_zeroout - generate number of zero filed write bios
2313f14d792SDmitry Monakhov  * @bdev:	blockdev to issue
2323f14d792SDmitry Monakhov  * @sector:	start sector
2333f14d792SDmitry Monakhov  * @nr_sects:	number of sectors to write
2343f14d792SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
235*e73c23ffSChaitanya Kulkarni  * @biop:	pointer to anchor bio
236*e73c23ffSChaitanya Kulkarni  * @discard:	discard flag
2373f14d792SDmitry Monakhov  *
2383f14d792SDmitry Monakhov  * Description:
2393f14d792SDmitry Monakhov  *  Generate and issue number of bios with zerofiled pages.
2403f14d792SDmitry Monakhov  */
241*e73c23ffSChaitanya Kulkarni int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
242*e73c23ffSChaitanya Kulkarni 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
243*e73c23ffSChaitanya Kulkarni 		bool discard)
2443f14d792SDmitry Monakhov {
24518edc8eaSDmitry Monakhov 	int ret;
246*e73c23ffSChaitanya Kulkarni 	int bi_size = 0;
247*e73c23ffSChaitanya Kulkarni 	struct bio *bio = *biop;
2480aeea189SLukas Czerner 	unsigned int sz;
24928b2be20SDarrick J. Wong 	sector_t bs_mask;
25028b2be20SDarrick J. Wong 
25128b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
25228b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
25328b2be20SDarrick J. Wong 		return -EINVAL;
2543f14d792SDmitry Monakhov 
255*e73c23ffSChaitanya Kulkarni 	if (discard) {
256*e73c23ffSChaitanya Kulkarni 		ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
257*e73c23ffSChaitanya Kulkarni 				BLKDEV_DISCARD_ZERO, biop);
258*e73c23ffSChaitanya Kulkarni 		if (ret == 0 || (ret && ret != -EOPNOTSUPP))
259*e73c23ffSChaitanya Kulkarni 			goto out;
260*e73c23ffSChaitanya Kulkarni 	}
261*e73c23ffSChaitanya Kulkarni 
262*e73c23ffSChaitanya Kulkarni 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
263*e73c23ffSChaitanya Kulkarni 			ZERO_PAGE(0), biop);
264*e73c23ffSChaitanya Kulkarni 	if (ret == 0 || (ret && ret != -EOPNOTSUPP))
265*e73c23ffSChaitanya Kulkarni 		goto out;
266*e73c23ffSChaitanya Kulkarni 
267*e73c23ffSChaitanya Kulkarni 	ret = 0;
2683f14d792SDmitry Monakhov 	while (nr_sects != 0) {
2694e49ea4aSMike Christie 		bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
2709082e87bSChristoph Hellwig 				gfp_mask);
2714f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
2723f14d792SDmitry Monakhov 		bio->bi_bdev   = bdev;
27395fe6c1aSMike Christie 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2743f14d792SDmitry Monakhov 
2753f14d792SDmitry Monakhov 		while (nr_sects != 0) {
2760341aafbSJens Axboe 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
277*e73c23ffSChaitanya Kulkarni 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
278*e73c23ffSChaitanya Kulkarni 			nr_sects -= bi_size >> 9;
279*e73c23ffSChaitanya Kulkarni 			sector += bi_size >> 9;
280*e73c23ffSChaitanya Kulkarni 			if (bi_size < (sz << 9))
2813f14d792SDmitry Monakhov 				break;
2823f14d792SDmitry Monakhov 		}
283*e73c23ffSChaitanya Kulkarni 		cond_resched();
2843f14d792SDmitry Monakhov 	}
2853f14d792SDmitry Monakhov 
286*e73c23ffSChaitanya Kulkarni 	*biop = bio;
287*e73c23ffSChaitanya Kulkarni out:
28805bd92ddSShaun Tancheff 	return ret;
28905bd92ddSShaun Tancheff }
290*e73c23ffSChaitanya Kulkarni EXPORT_SYMBOL(__blkdev_issue_zeroout);
291579e8f3cSMartin K. Petersen 
292579e8f3cSMartin K. Petersen /**
293579e8f3cSMartin K. Petersen  * blkdev_issue_zeroout - zero-fill a block range
294579e8f3cSMartin K. Petersen  * @bdev:	blockdev to write
295579e8f3cSMartin K. Petersen  * @sector:	start sector
296579e8f3cSMartin K. Petersen  * @nr_sects:	number of sectors to write
297579e8f3cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
298d93ba7a5SMartin K. Petersen  * @discard:	whether to discard the block range
299579e8f3cSMartin K. Petersen  *
300579e8f3cSMartin K. Petersen  * Description:
301d93ba7a5SMartin K. Petersen  *  Zero-fill a block range.  If the discard flag is set and the block
302d93ba7a5SMartin K. Petersen  *  device guarantees that subsequent READ operations to the block range
303d93ba7a5SMartin K. Petersen  *  in question will return zeroes, the blocks will be discarded. Should
304d93ba7a5SMartin K. Petersen  *  the discard request fail, if the discard flag is not set, or if
305d93ba7a5SMartin K. Petersen  *  discard_zeroes_data is not supported, this function will resort to
306d93ba7a5SMartin K. Petersen  *  zeroing the blocks manually, thus provisioning (allocating,
307d93ba7a5SMartin K. Petersen  *  anchoring) them. If the block device supports the WRITE SAME command
308d93ba7a5SMartin K. Petersen  *  blkdev_issue_zeroout() will use it to optimize the process of
309d93ba7a5SMartin K. Petersen  *  clearing the block range. Otherwise the zeroing will be performed
310d93ba7a5SMartin K. Petersen  *  using regular WRITE calls.
311579e8f3cSMartin K. Petersen  */
312579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
313d93ba7a5SMartin K. Petersen 			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
314579e8f3cSMartin K. Petersen {
315*e73c23ffSChaitanya Kulkarni 	int ret;
316*e73c23ffSChaitanya Kulkarni 	struct bio *bio = NULL;
317*e73c23ffSChaitanya Kulkarni 	struct blk_plug plug;
318*e73c23ffSChaitanya Kulkarni 
319*e73c23ffSChaitanya Kulkarni 	blk_start_plug(&plug);
320*e73c23ffSChaitanya Kulkarni 	ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
321*e73c23ffSChaitanya Kulkarni 			&bio, discard);
322*e73c23ffSChaitanya Kulkarni 	if (ret == 0 && bio) {
323*e73c23ffSChaitanya Kulkarni 		ret = submit_bio_wait(bio);
324*e73c23ffSChaitanya Kulkarni 		bio_put(bio);
325e950fdf7SChristoph Hellwig 	}
326*e73c23ffSChaitanya Kulkarni 	blk_finish_plug(&plug);
327d93ba7a5SMartin K. Petersen 
328*e73c23ffSChaitanya Kulkarni 	return ret;
329579e8f3cSMartin K. Petersen }
3303f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
331