xref: /linux/block/blk-lib.c (revision ba5d73851e71847ba7f7f4c27a1a6e1f5ab91c79)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f31e7e40SDmitry Monakhov /*
3f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
4f31e7e40SDmitry Monakhov  */
5f31e7e40SDmitry Monakhov #include <linux/kernel.h>
6f31e7e40SDmitry Monakhov #include <linux/module.h>
7f31e7e40SDmitry Monakhov #include <linux/bio.h>
8f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
9f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
10f31e7e40SDmitry Monakhov 
11f31e7e40SDmitry Monakhov #include "blk.h"
12f31e7e40SDmitry Monakhov 
13a2d6b3a2SDamien Le Moal struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14f31e7e40SDmitry Monakhov {
159082e87bSChristoph Hellwig 	struct bio *new = bio_alloc(gfp, nr_pages);
165dba3089SLukas Czerner 
179082e87bSChristoph Hellwig 	if (bio) {
189082e87bSChristoph Hellwig 		bio_chain(bio, new);
194e49ea4aSMike Christie 		submit_bio(bio);
209082e87bSChristoph Hellwig 	}
219082e87bSChristoph Hellwig 
229082e87bSChristoph Hellwig 	return new;
23f31e7e40SDmitry Monakhov }
24f31e7e40SDmitry Monakhov 
2538f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26288dab8aSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27469e3216SMike Christie 		struct bio **biop)
28f31e7e40SDmitry Monakhov {
29f31e7e40SDmitry Monakhov 	struct request_queue *q = bdev_get_queue(bdev);
3038f25255SChristoph Hellwig 	struct bio *bio = *biop;
31ef295ecfSChristoph Hellwig 	unsigned int op;
3228b2be20SDarrick J. Wong 	sector_t bs_mask;
33f31e7e40SDmitry Monakhov 
34f31e7e40SDmitry Monakhov 	if (!q)
35f31e7e40SDmitry Monakhov 		return -ENXIO;
36288dab8aSChristoph Hellwig 
37a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
38a13553c7SIlya Dryomov 		return -EPERM;
39a13553c7SIlya Dryomov 
40288dab8aSChristoph Hellwig 	if (flags & BLKDEV_DISCARD_SECURE) {
41288dab8aSChristoph Hellwig 		if (!blk_queue_secure_erase(q))
42288dab8aSChristoph Hellwig 			return -EOPNOTSUPP;
43288dab8aSChristoph Hellwig 		op = REQ_OP_SECURE_ERASE;
44288dab8aSChristoph Hellwig 	} else {
45f31e7e40SDmitry Monakhov 		if (!blk_queue_discard(q))
46f31e7e40SDmitry Monakhov 			return -EOPNOTSUPP;
47288dab8aSChristoph Hellwig 		op = REQ_OP_DISCARD;
48288dab8aSChristoph Hellwig 	}
49f31e7e40SDmitry Monakhov 
5028b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
5128b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
5228b2be20SDarrick J. Wong 		return -EINVAL;
5328b2be20SDarrick J. Wong 
54*ba5d7385SMing Lei 	if (!nr_sects)
55*ba5d7385SMing Lei 		return -EINVAL;
56*ba5d7385SMing Lei 
575dba3089SLukas Czerner 	while (nr_sects) {
58*ba5d7385SMing Lei 		unsigned int req_sects = min_t(unsigned int, nr_sects,
59*ba5d7385SMing Lei 				bio_allowed_max_sectors(q));
60c6e66634SPaolo Bonzini 
61a2d6b3a2SDamien Le Moal 		bio = blk_next_bio(bio, 0, gfp_mask);
624f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
6374d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
64288dab8aSChristoph Hellwig 		bio_set_op_attrs(bio, op, 0);
65f31e7e40SDmitry Monakhov 
664f024f37SKent Overstreet 		bio->bi_iter.bi_size = req_sects << 9;
67*ba5d7385SMing Lei 		sector += req_sects;
68c6e66634SPaolo Bonzini 		nr_sects -= req_sects;
69f31e7e40SDmitry Monakhov 
70c8123f8cSJens Axboe 		/*
71c8123f8cSJens Axboe 		 * We can loop for a long time in here, if someone does
72c8123f8cSJens Axboe 		 * full device discards (like mkfs). Be nice and allow
73c8123f8cSJens Axboe 		 * us to schedule out to avoid softlocking if preempt
74c8123f8cSJens Axboe 		 * is disabled.
75c8123f8cSJens Axboe 		 */
76c8123f8cSJens Axboe 		cond_resched();
775dba3089SLukas Czerner 	}
7838f25255SChristoph Hellwig 
7938f25255SChristoph Hellwig 	*biop = bio;
8038f25255SChristoph Hellwig 	return 0;
8138f25255SChristoph Hellwig }
8238f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
8338f25255SChristoph Hellwig 
8438f25255SChristoph Hellwig /**
8538f25255SChristoph Hellwig  * blkdev_issue_discard - queue a discard
8638f25255SChristoph Hellwig  * @bdev:	blockdev to issue discard for
8738f25255SChristoph Hellwig  * @sector:	start sector
8838f25255SChristoph Hellwig  * @nr_sects:	number of sectors to discard
8938f25255SChristoph Hellwig  * @gfp_mask:	memory allocation flags (for bio_alloc)
90e554911cSEric Biggers  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
9138f25255SChristoph Hellwig  *
9238f25255SChristoph Hellwig  * Description:
9338f25255SChristoph Hellwig  *    Issue a discard request for the sectors in question.
9438f25255SChristoph Hellwig  */
9538f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
9638f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
9738f25255SChristoph Hellwig {
9838f25255SChristoph Hellwig 	struct bio *bio = NULL;
9938f25255SChristoph Hellwig 	struct blk_plug plug;
10038f25255SChristoph Hellwig 	int ret;
10138f25255SChristoph Hellwig 
10238f25255SChristoph Hellwig 	blk_start_plug(&plug);
103288dab8aSChristoph Hellwig 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
10438f25255SChristoph Hellwig 			&bio);
105bbd848e0SMike Snitzer 	if (!ret && bio) {
1064e49ea4aSMike Christie 		ret = submit_bio_wait(bio);
10748920ff2SChristoph Hellwig 		if (ret == -EOPNOTSUPP)
108bbd848e0SMike Snitzer 			ret = 0;
10905bd92ddSShaun Tancheff 		bio_put(bio);
110bbd848e0SMike Snitzer 	}
1110cfbcafcSShaohua Li 	blk_finish_plug(&plug);
112f31e7e40SDmitry Monakhov 
113bbd848e0SMike Snitzer 	return ret;
114f31e7e40SDmitry Monakhov }
115f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1163f14d792SDmitry Monakhov 
1173f14d792SDmitry Monakhov /**
118e73c23ffSChaitanya Kulkarni  * __blkdev_issue_write_same - generate number of bios with same page
1194363ac7cSMartin K. Petersen  * @bdev:	target blockdev
1204363ac7cSMartin K. Petersen  * @sector:	start sector
1214363ac7cSMartin K. Petersen  * @nr_sects:	number of sectors to write
1224363ac7cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
1234363ac7cSMartin K. Petersen  * @page:	page containing data to write
124e73c23ffSChaitanya Kulkarni  * @biop:	pointer to anchor bio
1254363ac7cSMartin K. Petersen  *
1264363ac7cSMartin K. Petersen  * Description:
127e73c23ffSChaitanya Kulkarni  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
1284363ac7cSMartin K. Petersen  */
129e73c23ffSChaitanya Kulkarni static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
130e73c23ffSChaitanya Kulkarni 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
131e73c23ffSChaitanya Kulkarni 		struct bio **biop)
1324363ac7cSMartin K. Petersen {
1334363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1344363ac7cSMartin K. Petersen 	unsigned int max_write_same_sectors;
135e73c23ffSChaitanya Kulkarni 	struct bio *bio = *biop;
13628b2be20SDarrick J. Wong 	sector_t bs_mask;
1374363ac7cSMartin K. Petersen 
1384363ac7cSMartin K. Petersen 	if (!q)
1394363ac7cSMartin K. Petersen 		return -ENXIO;
1404363ac7cSMartin K. Petersen 
141a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
142a13553c7SIlya Dryomov 		return -EPERM;
143a13553c7SIlya Dryomov 
14428b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
14528b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
14628b2be20SDarrick J. Wong 		return -EINVAL;
14728b2be20SDarrick J. Wong 
148e73c23ffSChaitanya Kulkarni 	if (!bdev_write_same(bdev))
149e73c23ffSChaitanya Kulkarni 		return -EOPNOTSUPP;
150e73c23ffSChaitanya Kulkarni 
151b49a0871SMing Lin 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
152b49a0871SMing Lin 	max_write_same_sectors = UINT_MAX >> 9;
1534363ac7cSMartin K. Petersen 
1544363ac7cSMartin K. Petersen 	while (nr_sects) {
155a2d6b3a2SDamien Le Moal 		bio = blk_next_bio(bio, 1, gfp_mask);
1564f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
15774d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
1584363ac7cSMartin K. Petersen 		bio->bi_vcnt = 1;
1594363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_page = page;
1604363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_offset = 0;
1614363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
16295fe6c1aSMike Christie 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
1634363ac7cSMartin K. Petersen 
1644363ac7cSMartin K. Petersen 		if (nr_sects > max_write_same_sectors) {
1654f024f37SKent Overstreet 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
1664363ac7cSMartin K. Petersen 			nr_sects -= max_write_same_sectors;
1674363ac7cSMartin K. Petersen 			sector += max_write_same_sectors;
1684363ac7cSMartin K. Petersen 		} else {
1694f024f37SKent Overstreet 			bio->bi_iter.bi_size = nr_sects << 9;
1704363ac7cSMartin K. Petersen 			nr_sects = 0;
1714363ac7cSMartin K. Petersen 		}
172e73c23ffSChaitanya Kulkarni 		cond_resched();
1734363ac7cSMartin K. Petersen 	}
1744363ac7cSMartin K. Petersen 
175e73c23ffSChaitanya Kulkarni 	*biop = bio;
176e73c23ffSChaitanya Kulkarni 	return 0;
177e73c23ffSChaitanya Kulkarni }
178e73c23ffSChaitanya Kulkarni 
179e73c23ffSChaitanya Kulkarni /**
180e73c23ffSChaitanya Kulkarni  * blkdev_issue_write_same - queue a write same operation
181e73c23ffSChaitanya Kulkarni  * @bdev:	target blockdev
182e73c23ffSChaitanya Kulkarni  * @sector:	start sector
183e73c23ffSChaitanya Kulkarni  * @nr_sects:	number of sectors to write
184e73c23ffSChaitanya Kulkarni  * @gfp_mask:	memory allocation flags (for bio_alloc)
185e73c23ffSChaitanya Kulkarni  * @page:	page containing data
186e73c23ffSChaitanya Kulkarni  *
187e73c23ffSChaitanya Kulkarni  * Description:
188e73c23ffSChaitanya Kulkarni  *    Issue a write same request for the sectors in question.
189e73c23ffSChaitanya Kulkarni  */
190e73c23ffSChaitanya Kulkarni int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
191e73c23ffSChaitanya Kulkarni 				sector_t nr_sects, gfp_t gfp_mask,
192e73c23ffSChaitanya Kulkarni 				struct page *page)
193e73c23ffSChaitanya Kulkarni {
194e73c23ffSChaitanya Kulkarni 	struct bio *bio = NULL;
195e73c23ffSChaitanya Kulkarni 	struct blk_plug plug;
196e73c23ffSChaitanya Kulkarni 	int ret;
197e73c23ffSChaitanya Kulkarni 
198e73c23ffSChaitanya Kulkarni 	blk_start_plug(&plug);
199e73c23ffSChaitanya Kulkarni 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
200e73c23ffSChaitanya Kulkarni 			&bio);
201e73c23ffSChaitanya Kulkarni 	if (ret == 0 && bio) {
2024e49ea4aSMike Christie 		ret = submit_bio_wait(bio);
20305bd92ddSShaun Tancheff 		bio_put(bio);
20405bd92ddSShaun Tancheff 	}
205e73c23ffSChaitanya Kulkarni 	blk_finish_plug(&plug);
2063f40bf2cSChristoph Hellwig 	return ret;
2074363ac7cSMartin K. Petersen }
2084363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same);
2094363ac7cSMartin K. Petersen 
210a6f0788eSChaitanya Kulkarni static int __blkdev_issue_write_zeroes(struct block_device *bdev,
211a6f0788eSChaitanya Kulkarni 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
212d928be9fSChristoph Hellwig 		struct bio **biop, unsigned flags)
213a6f0788eSChaitanya Kulkarni {
214a6f0788eSChaitanya Kulkarni 	struct bio *bio = *biop;
215a6f0788eSChaitanya Kulkarni 	unsigned int max_write_zeroes_sectors;
216a6f0788eSChaitanya Kulkarni 	struct request_queue *q = bdev_get_queue(bdev);
217a6f0788eSChaitanya Kulkarni 
218a6f0788eSChaitanya Kulkarni 	if (!q)
219a6f0788eSChaitanya Kulkarni 		return -ENXIO;
220a6f0788eSChaitanya Kulkarni 
221a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
222a13553c7SIlya Dryomov 		return -EPERM;
223a13553c7SIlya Dryomov 
224a6f0788eSChaitanya Kulkarni 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
225a6f0788eSChaitanya Kulkarni 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
226a6f0788eSChaitanya Kulkarni 
227a6f0788eSChaitanya Kulkarni 	if (max_write_zeroes_sectors == 0)
228a6f0788eSChaitanya Kulkarni 		return -EOPNOTSUPP;
229a6f0788eSChaitanya Kulkarni 
230a6f0788eSChaitanya Kulkarni 	while (nr_sects) {
231a2d6b3a2SDamien Le Moal 		bio = blk_next_bio(bio, 0, gfp_mask);
232a6f0788eSChaitanya Kulkarni 		bio->bi_iter.bi_sector = sector;
23374d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
234d928be9fSChristoph Hellwig 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
235d928be9fSChristoph Hellwig 		if (flags & BLKDEV_ZERO_NOUNMAP)
236d928be9fSChristoph Hellwig 			bio->bi_opf |= REQ_NOUNMAP;
237a6f0788eSChaitanya Kulkarni 
238a6f0788eSChaitanya Kulkarni 		if (nr_sects > max_write_zeroes_sectors) {
239a6f0788eSChaitanya Kulkarni 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
240a6f0788eSChaitanya Kulkarni 			nr_sects -= max_write_zeroes_sectors;
241a6f0788eSChaitanya Kulkarni 			sector += max_write_zeroes_sectors;
242a6f0788eSChaitanya Kulkarni 		} else {
243a6f0788eSChaitanya Kulkarni 			bio->bi_iter.bi_size = nr_sects << 9;
244a6f0788eSChaitanya Kulkarni 			nr_sects = 0;
245a6f0788eSChaitanya Kulkarni 		}
246a6f0788eSChaitanya Kulkarni 		cond_resched();
247a6f0788eSChaitanya Kulkarni 	}
248a6f0788eSChaitanya Kulkarni 
249a6f0788eSChaitanya Kulkarni 	*biop = bio;
250a6f0788eSChaitanya Kulkarni 	return 0;
251a6f0788eSChaitanya Kulkarni }
252a6f0788eSChaitanya Kulkarni 
253615d22a5SDamien Le Moal /*
254615d22a5SDamien Le Moal  * Convert a number of 512B sectors to a number of pages.
255615d22a5SDamien Le Moal  * The result is limited to a number of pages that can fit into a BIO.
256615d22a5SDamien Le Moal  * Also make sure that the result is always at least 1 (page) for the cases
257615d22a5SDamien Le Moal  * where nr_sects is lower than the number of sectors in a page.
258615d22a5SDamien Le Moal  */
259615d22a5SDamien Le Moal static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
260615d22a5SDamien Le Moal {
26109c2c359SMikulas Patocka 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
262615d22a5SDamien Le Moal 
26309c2c359SMikulas Patocka 	return min(pages, (sector_t)BIO_MAX_PAGES);
264615d22a5SDamien Le Moal }
265615d22a5SDamien Le Moal 
266425a4dbaSIlya Dryomov static int __blkdev_issue_zero_pages(struct block_device *bdev,
267425a4dbaSIlya Dryomov 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
268425a4dbaSIlya Dryomov 		struct bio **biop)
2693f14d792SDmitry Monakhov {
270425a4dbaSIlya Dryomov 	struct request_queue *q = bdev_get_queue(bdev);
271e73c23ffSChaitanya Kulkarni 	struct bio *bio = *biop;
272425a4dbaSIlya Dryomov 	int bi_size = 0;
2730aeea189SLukas Czerner 	unsigned int sz;
27428b2be20SDarrick J. Wong 
275425a4dbaSIlya Dryomov 	if (!q)
276425a4dbaSIlya Dryomov 		return -ENXIO;
2773f14d792SDmitry Monakhov 
278a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
279a13553c7SIlya Dryomov 		return -EPERM;
280a13553c7SIlya Dryomov 
2813f14d792SDmitry Monakhov 	while (nr_sects != 0) {
282a2d6b3a2SDamien Le Moal 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
2839082e87bSChristoph Hellwig 				   gfp_mask);
2844f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
28574d46992SChristoph Hellwig 		bio_set_dev(bio, bdev);
28695fe6c1aSMike Christie 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2873f14d792SDmitry Monakhov 
2883f14d792SDmitry Monakhov 		while (nr_sects != 0) {
289615d22a5SDamien Le Moal 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
290615d22a5SDamien Le Moal 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
291e73c23ffSChaitanya Kulkarni 			nr_sects -= bi_size >> 9;
292e73c23ffSChaitanya Kulkarni 			sector += bi_size >> 9;
293615d22a5SDamien Le Moal 			if (bi_size < sz)
2943f14d792SDmitry Monakhov 				break;
2953f14d792SDmitry Monakhov 		}
296e73c23ffSChaitanya Kulkarni 		cond_resched();
2973f14d792SDmitry Monakhov 	}
2983f14d792SDmitry Monakhov 
299e73c23ffSChaitanya Kulkarni 	*biop = bio;
300425a4dbaSIlya Dryomov 	return 0;
301425a4dbaSIlya Dryomov }
302425a4dbaSIlya Dryomov 
3033f14d792SDmitry Monakhov /**
3043f14d792SDmitry Monakhov  * __blkdev_issue_zeroout - generate number of zero filed write bios
3053f14d792SDmitry Monakhov  * @bdev:	blockdev to issue
3063f14d792SDmitry Monakhov  * @sector:	start sector
3073f14d792SDmitry Monakhov  * @nr_sects:	number of sectors to write
3083f14d792SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
3093f14d792SDmitry Monakhov  * @biop:	pointer to anchor bio
3103f14d792SDmitry Monakhov  * @flags:	controls detailed behavior
3113f14d792SDmitry Monakhov  *
3123f14d792SDmitry Monakhov  * Description:
3133f14d792SDmitry Monakhov  *  Zero-fill a block range, either using hardware offload or by explicitly
3143f14d792SDmitry Monakhov  *  writing zeroes to the device.
3153f14d792SDmitry Monakhov  *
3163f14d792SDmitry Monakhov  *  If a device is using logical block provisioning, the underlying space will
3173f14d792SDmitry Monakhov  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
3183f14d792SDmitry Monakhov  *
3193f14d792SDmitry Monakhov  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
3203f14d792SDmitry Monakhov  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3213f14d792SDmitry Monakhov  */
3223f14d792SDmitry Monakhov int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
3233f14d792SDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
3243f14d792SDmitry Monakhov 		unsigned flags)
3253f14d792SDmitry Monakhov {
3263f14d792SDmitry Monakhov 	int ret;
3273f14d792SDmitry Monakhov 	sector_t bs_mask;
3283f14d792SDmitry Monakhov 
3293f14d792SDmitry Monakhov 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
3303f14d792SDmitry Monakhov 	if ((sector | nr_sects) & bs_mask)
3313f14d792SDmitry Monakhov 		return -EINVAL;
3323f14d792SDmitry Monakhov 
3333f14d792SDmitry Monakhov 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
3343f14d792SDmitry Monakhov 			biop, flags);
3353f14d792SDmitry Monakhov 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
33605bd92ddSShaun Tancheff 		return ret;
337425a4dbaSIlya Dryomov 
338425a4dbaSIlya Dryomov 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
339425a4dbaSIlya Dryomov 					 biop);
34005bd92ddSShaun Tancheff }
341e73c23ffSChaitanya Kulkarni EXPORT_SYMBOL(__blkdev_issue_zeroout);
342579e8f3cSMartin K. Petersen 
343579e8f3cSMartin K. Petersen /**
344579e8f3cSMartin K. Petersen  * blkdev_issue_zeroout - zero-fill a block range
345579e8f3cSMartin K. Petersen  * @bdev:	blockdev to write
346579e8f3cSMartin K. Petersen  * @sector:	start sector
347579e8f3cSMartin K. Petersen  * @nr_sects:	number of sectors to write
348579e8f3cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
349ee472d83SChristoph Hellwig  * @flags:	controls detailed behavior
350579e8f3cSMartin K. Petersen  *
351579e8f3cSMartin K. Petersen  * Description:
352ee472d83SChristoph Hellwig  *  Zero-fill a block range, either using hardware offload or by explicitly
353ee472d83SChristoph Hellwig  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
354ee472d83SChristoph Hellwig  *  valid values for %flags.
355579e8f3cSMartin K. Petersen  */
356579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
357ee472d83SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
358579e8f3cSMartin K. Petersen {
359d5ce4c31SIlya Dryomov 	int ret = 0;
360d5ce4c31SIlya Dryomov 	sector_t bs_mask;
361d5ce4c31SIlya Dryomov 	struct bio *bio;
362e73c23ffSChaitanya Kulkarni 	struct blk_plug plug;
363d5ce4c31SIlya Dryomov 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
364e73c23ffSChaitanya Kulkarni 
365d5ce4c31SIlya Dryomov 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
366d5ce4c31SIlya Dryomov 	if ((sector | nr_sects) & bs_mask)
367d5ce4c31SIlya Dryomov 		return -EINVAL;
368d5ce4c31SIlya Dryomov 
369d5ce4c31SIlya Dryomov retry:
370d5ce4c31SIlya Dryomov 	bio = NULL;
371e73c23ffSChaitanya Kulkarni 	blk_start_plug(&plug);
372d5ce4c31SIlya Dryomov 	if (try_write_zeroes) {
373d5ce4c31SIlya Dryomov 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
374d5ce4c31SIlya Dryomov 						  gfp_mask, &bio, flags);
375d5ce4c31SIlya Dryomov 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
376d5ce4c31SIlya Dryomov 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
377d5ce4c31SIlya Dryomov 						gfp_mask, &bio);
378d5ce4c31SIlya Dryomov 	} else {
379d5ce4c31SIlya Dryomov 		/* No zeroing offload support */
380d5ce4c31SIlya Dryomov 		ret = -EOPNOTSUPP;
381d5ce4c31SIlya Dryomov 	}
382e73c23ffSChaitanya Kulkarni 	if (ret == 0 && bio) {
383e73c23ffSChaitanya Kulkarni 		ret = submit_bio_wait(bio);
384e73c23ffSChaitanya Kulkarni 		bio_put(bio);
385e950fdf7SChristoph Hellwig 	}
386e73c23ffSChaitanya Kulkarni 	blk_finish_plug(&plug);
387d5ce4c31SIlya Dryomov 	if (ret && try_write_zeroes) {
388d5ce4c31SIlya Dryomov 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389d5ce4c31SIlya Dryomov 			try_write_zeroes = false;
390d5ce4c31SIlya Dryomov 			goto retry;
391d5ce4c31SIlya Dryomov 		}
392d5ce4c31SIlya Dryomov 		if (!bdev_write_zeroes_sectors(bdev)) {
393d5ce4c31SIlya Dryomov 			/*
394d5ce4c31SIlya Dryomov 			 * Zeroing offload support was indicated, but the
395d5ce4c31SIlya Dryomov 			 * device reported ILLEGAL REQUEST (for some devices
396d5ce4c31SIlya Dryomov 			 * there is no non-destructive way to verify whether
397d5ce4c31SIlya Dryomov 			 * WRITE ZEROES is actually supported).
398d5ce4c31SIlya Dryomov 			 */
399d5ce4c31SIlya Dryomov 			ret = -EOPNOTSUPP;
400d5ce4c31SIlya Dryomov 		}
401d5ce4c31SIlya Dryomov 	}
402d93ba7a5SMartin K. Petersen 
403e73c23ffSChaitanya Kulkarni 	return ret;
404579e8f3cSMartin K. Petersen }
4053f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
406