xref: /linux/block/blk-lib.c (revision 38f252553300ee1d3346a5273e95fe1dd60ca50a)
1f31e7e40SDmitry Monakhov /*
2f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
3f31e7e40SDmitry Monakhov  */
4f31e7e40SDmitry Monakhov #include <linux/kernel.h>
5f31e7e40SDmitry Monakhov #include <linux/module.h>
6f31e7e40SDmitry Monakhov #include <linux/bio.h>
7f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
9f31e7e40SDmitry Monakhov 
10f31e7e40SDmitry Monakhov #include "blk.h"
11f31e7e40SDmitry Monakhov 
129082e87bSChristoph Hellwig static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
139082e87bSChristoph Hellwig 		gfp_t gfp)
14f31e7e40SDmitry Monakhov {
159082e87bSChristoph Hellwig 	struct bio *new = bio_alloc(gfp, nr_pages);
165dba3089SLukas Czerner 
179082e87bSChristoph Hellwig 	if (bio) {
189082e87bSChristoph Hellwig 		bio_chain(bio, new);
199082e87bSChristoph Hellwig 		submit_bio(rw, bio);
209082e87bSChristoph Hellwig 	}
219082e87bSChristoph Hellwig 
229082e87bSChristoph Hellwig 	return new;
23f31e7e40SDmitry Monakhov }
24f31e7e40SDmitry Monakhov 
25*38f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26*38f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
27f31e7e40SDmitry Monakhov {
28f31e7e40SDmitry Monakhov 	struct request_queue *q = bdev_get_queue(bdev);
29*38f25255SChristoph Hellwig 	struct bio *bio = *biop;
30a22c4d7eSMing Lin 	unsigned int granularity;
31a22c4d7eSMing Lin 	int alignment;
32f31e7e40SDmitry Monakhov 
33f31e7e40SDmitry Monakhov 	if (!q)
34f31e7e40SDmitry Monakhov 		return -ENXIO;
35f31e7e40SDmitry Monakhov 	if (!blk_queue_discard(q))
36f31e7e40SDmitry Monakhov 		return -EOPNOTSUPP;
37*38f25255SChristoph Hellwig 	if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
38*38f25255SChristoph Hellwig 		return -EOPNOTSUPP;
39f31e7e40SDmitry Monakhov 
40a22c4d7eSMing Lin 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
41a22c4d7eSMing Lin 	granularity = max(q->limits.discard_granularity >> 9, 1U);
42a22c4d7eSMing Lin 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
43a22c4d7eSMing Lin 
445dba3089SLukas Czerner 	while (nr_sects) {
45c6e66634SPaolo Bonzini 		unsigned int req_sects;
46a22c4d7eSMing Lin 		sector_t end_sect, tmp;
47c6e66634SPaolo Bonzini 
48a22c4d7eSMing Lin 		/* Make sure bi_size doesn't overflow */
49a22c4d7eSMing Lin 		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
50a22c4d7eSMing Lin 
519082e87bSChristoph Hellwig 		/**
52a22c4d7eSMing Lin 		 * If splitting a request, and the next starting sector would be
53a22c4d7eSMing Lin 		 * misaligned, stop the discard at the previous aligned sector.
54a22c4d7eSMing Lin 		 */
55c6e66634SPaolo Bonzini 		end_sect = sector + req_sects;
56a22c4d7eSMing Lin 		tmp = end_sect;
57a22c4d7eSMing Lin 		if (req_sects < nr_sects &&
58a22c4d7eSMing Lin 		    sector_div(tmp, granularity) != alignment) {
59a22c4d7eSMing Lin 			end_sect = end_sect - alignment;
60a22c4d7eSMing Lin 			sector_div(end_sect, granularity);
61a22c4d7eSMing Lin 			end_sect = end_sect * granularity + alignment;
62a22c4d7eSMing Lin 			req_sects = end_sect - sector;
63a22c4d7eSMing Lin 		}
64c6e66634SPaolo Bonzini 
659082e87bSChristoph Hellwig 		bio = next_bio(bio, type, 1, gfp_mask);
664f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
67f31e7e40SDmitry Monakhov 		bio->bi_bdev = bdev;
68f31e7e40SDmitry Monakhov 
694f024f37SKent Overstreet 		bio->bi_iter.bi_size = req_sects << 9;
70c6e66634SPaolo Bonzini 		nr_sects -= req_sects;
71c6e66634SPaolo Bonzini 		sector = end_sect;
72f31e7e40SDmitry Monakhov 
73c8123f8cSJens Axboe 		/*
74c8123f8cSJens Axboe 		 * We can loop for a long time in here, if someone does
75c8123f8cSJens Axboe 		 * full device discards (like mkfs). Be nice and allow
76c8123f8cSJens Axboe 		 * us to schedule out to avoid softlocking if preempt
77c8123f8cSJens Axboe 		 * is disabled.
78c8123f8cSJens Axboe 		 */
79c8123f8cSJens Axboe 		cond_resched();
805dba3089SLukas Czerner 	}
81*38f25255SChristoph Hellwig 
82*38f25255SChristoph Hellwig 	*biop = bio;
83*38f25255SChristoph Hellwig 	return 0;
84*38f25255SChristoph Hellwig }
85*38f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
86*38f25255SChristoph Hellwig 
87*38f25255SChristoph Hellwig /**
88*38f25255SChristoph Hellwig  * blkdev_issue_discard - queue a discard
89*38f25255SChristoph Hellwig  * @bdev:	blockdev to issue discard for
90*38f25255SChristoph Hellwig  * @sector:	start sector
91*38f25255SChristoph Hellwig  * @nr_sects:	number of sectors to discard
92*38f25255SChristoph Hellwig  * @gfp_mask:	memory allocation flags (for bio_alloc)
93*38f25255SChristoph Hellwig  * @flags:	BLKDEV_IFL_* flags to control behaviour
94*38f25255SChristoph Hellwig  *
95*38f25255SChristoph Hellwig  * Description:
96*38f25255SChristoph Hellwig  *    Issue a discard request for the sectors in question.
97*38f25255SChristoph Hellwig  */
98*38f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
99*38f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
100*38f25255SChristoph Hellwig {
101*38f25255SChristoph Hellwig 	int type = REQ_WRITE | REQ_DISCARD;
102*38f25255SChristoph Hellwig 	struct bio *bio = NULL;
103*38f25255SChristoph Hellwig 	struct blk_plug plug;
104*38f25255SChristoph Hellwig 	int ret;
105*38f25255SChristoph Hellwig 
106*38f25255SChristoph Hellwig 	if (flags & BLKDEV_DISCARD_SECURE)
107*38f25255SChristoph Hellwig 		type |= REQ_SECURE;
108*38f25255SChristoph Hellwig 
109*38f25255SChristoph Hellwig 	blk_start_plug(&plug);
110*38f25255SChristoph Hellwig 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
111*38f25255SChristoph Hellwig 			&bio);
112*38f25255SChristoph Hellwig 	if (!ret && bio)
1139082e87bSChristoph Hellwig 		ret = submit_bio_wait(type, bio);
1140cfbcafcSShaohua Li 	blk_finish_plug(&plug);
115f31e7e40SDmitry Monakhov 
1169082e87bSChristoph Hellwig 	return ret != -EOPNOTSUPP ? ret : 0;
117f31e7e40SDmitry Monakhov }
118f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1193f14d792SDmitry Monakhov 
1203f14d792SDmitry Monakhov /**
1214363ac7cSMartin K. Petersen  * blkdev_issue_write_same - queue a write same operation
1224363ac7cSMartin K. Petersen  * @bdev:	target blockdev
1234363ac7cSMartin K. Petersen  * @sector:	start sector
1244363ac7cSMartin K. Petersen  * @nr_sects:	number of sectors to write
1254363ac7cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
1264363ac7cSMartin K. Petersen  * @page:	page containing data to write
1274363ac7cSMartin K. Petersen  *
1284363ac7cSMartin K. Petersen  * Description:
1294363ac7cSMartin K. Petersen  *    Issue a write same request for the sectors in question.
1304363ac7cSMartin K. Petersen  */
1314363ac7cSMartin K. Petersen int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1324363ac7cSMartin K. Petersen 			    sector_t nr_sects, gfp_t gfp_mask,
1334363ac7cSMartin K. Petersen 			    struct page *page)
1344363ac7cSMartin K. Petersen {
1354363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1364363ac7cSMartin K. Petersen 	unsigned int max_write_same_sectors;
1379082e87bSChristoph Hellwig 	struct bio *bio = NULL;
1384363ac7cSMartin K. Petersen 	int ret = 0;
1394363ac7cSMartin K. Petersen 
1404363ac7cSMartin K. Petersen 	if (!q)
1414363ac7cSMartin K. Petersen 		return -ENXIO;
1424363ac7cSMartin K. Petersen 
143b49a0871SMing Lin 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
144b49a0871SMing Lin 	max_write_same_sectors = UINT_MAX >> 9;
1454363ac7cSMartin K. Petersen 
1464363ac7cSMartin K. Petersen 	while (nr_sects) {
1479082e87bSChristoph Hellwig 		bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
1484f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1494363ac7cSMartin K. Petersen 		bio->bi_bdev = bdev;
1504363ac7cSMartin K. Petersen 		bio->bi_vcnt = 1;
1514363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_page = page;
1524363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_offset = 0;
1534363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
1544363ac7cSMartin K. Petersen 
1554363ac7cSMartin K. Petersen 		if (nr_sects > max_write_same_sectors) {
1564f024f37SKent Overstreet 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
1574363ac7cSMartin K. Petersen 			nr_sects -= max_write_same_sectors;
1584363ac7cSMartin K. Petersen 			sector += max_write_same_sectors;
1594363ac7cSMartin K. Petersen 		} else {
1604f024f37SKent Overstreet 			bio->bi_iter.bi_size = nr_sects << 9;
1614363ac7cSMartin K. Petersen 			nr_sects = 0;
1624363ac7cSMartin K. Petersen 		}
1634363ac7cSMartin K. Petersen 	}
1644363ac7cSMartin K. Petersen 
1659082e87bSChristoph Hellwig 	if (bio)
1669082e87bSChristoph Hellwig 		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
1679082e87bSChristoph Hellwig 	return ret != -EOPNOTSUPP ? ret : 0;
1684363ac7cSMartin K. Petersen }
1694363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same);
1704363ac7cSMartin K. Petersen 
1714363ac7cSMartin K. Petersen /**
172291d24f6SBen Hutchings  * blkdev_issue_zeroout - generate number of zero filed write bios
1733f14d792SDmitry Monakhov  * @bdev:	blockdev to issue
1743f14d792SDmitry Monakhov  * @sector:	start sector
1753f14d792SDmitry Monakhov  * @nr_sects:	number of sectors to write
1763f14d792SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
1773f14d792SDmitry Monakhov  *
1783f14d792SDmitry Monakhov  * Description:
1793f14d792SDmitry Monakhov  *  Generate and issue number of bios with zerofiled pages.
1803f14d792SDmitry Monakhov  */
1813f14d792SDmitry Monakhov 
18235086784SFabian Frederick static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
183dd3932edSChristoph Hellwig 				  sector_t nr_sects, gfp_t gfp_mask)
1843f14d792SDmitry Monakhov {
18518edc8eaSDmitry Monakhov 	int ret;
1869082e87bSChristoph Hellwig 	struct bio *bio = NULL;
1870aeea189SLukas Czerner 	unsigned int sz;
1883f14d792SDmitry Monakhov 
1893f14d792SDmitry Monakhov 	while (nr_sects != 0) {
1909082e87bSChristoph Hellwig 		bio = next_bio(bio, WRITE,
1919082e87bSChristoph Hellwig 				min(nr_sects, (sector_t)BIO_MAX_PAGES),
1929082e87bSChristoph Hellwig 				gfp_mask);
1934f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1943f14d792SDmitry Monakhov 		bio->bi_bdev   = bdev;
1953f14d792SDmitry Monakhov 
1963f14d792SDmitry Monakhov 		while (nr_sects != 0) {
1970341aafbSJens Axboe 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
1983f14d792SDmitry Monakhov 			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
1993f14d792SDmitry Monakhov 			nr_sects -= ret >> 9;
2003f14d792SDmitry Monakhov 			sector += ret >> 9;
2013f14d792SDmitry Monakhov 			if (ret < (sz << 9))
2023f14d792SDmitry Monakhov 				break;
2033f14d792SDmitry Monakhov 		}
2043f14d792SDmitry Monakhov 	}
2053f14d792SDmitry Monakhov 
2069082e87bSChristoph Hellwig 	if (bio)
2079082e87bSChristoph Hellwig 		return submit_bio_wait(WRITE, bio);
2089082e87bSChristoph Hellwig 	return 0;
2093f14d792SDmitry Monakhov }
210579e8f3cSMartin K. Petersen 
211579e8f3cSMartin K. Petersen /**
212579e8f3cSMartin K. Petersen  * blkdev_issue_zeroout - zero-fill a block range
213579e8f3cSMartin K. Petersen  * @bdev:	blockdev to write
214579e8f3cSMartin K. Petersen  * @sector:	start sector
215579e8f3cSMartin K. Petersen  * @nr_sects:	number of sectors to write
216579e8f3cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
217d93ba7a5SMartin K. Petersen  * @discard:	whether to discard the block range
218579e8f3cSMartin K. Petersen  *
219579e8f3cSMartin K. Petersen  * Description:
220d93ba7a5SMartin K. Petersen  *  Zero-fill a block range.  If the discard flag is set and the block
221d93ba7a5SMartin K. Petersen  *  device guarantees that subsequent READ operations to the block range
222d93ba7a5SMartin K. Petersen  *  in question will return zeroes, the blocks will be discarded. Should
223d93ba7a5SMartin K. Petersen  *  the discard request fail, if the discard flag is not set, or if
224d93ba7a5SMartin K. Petersen  *  discard_zeroes_data is not supported, this function will resort to
225d93ba7a5SMartin K. Petersen  *  zeroing the blocks manually, thus provisioning (allocating,
226d93ba7a5SMartin K. Petersen  *  anchoring) them. If the block device supports the WRITE SAME command
227d93ba7a5SMartin K. Petersen  *  blkdev_issue_zeroout() will use it to optimize the process of
228d93ba7a5SMartin K. Petersen  *  clearing the block range. Otherwise the zeroing will be performed
229d93ba7a5SMartin K. Petersen  *  using regular WRITE calls.
230579e8f3cSMartin K. Petersen  */
231579e8f3cSMartin K. Petersen 
232579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
233d93ba7a5SMartin K. Petersen 			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
234579e8f3cSMartin K. Petersen {
235d93ba7a5SMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
236579e8f3cSMartin K. Petersen 
2379f9ee1f2SMartin K. Petersen 	if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
2389f9ee1f2SMartin K. Petersen 	    blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
239d93ba7a5SMartin K. Petersen 		return 0;
240d93ba7a5SMartin K. Petersen 
2419f9ee1f2SMartin K. Petersen 	if (bdev_write_same(bdev) &&
2429f9ee1f2SMartin K. Petersen 	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
2439f9ee1f2SMartin K. Petersen 				    ZERO_PAGE(0)) == 0)
244579e8f3cSMartin K. Petersen 		return 0;
245579e8f3cSMartin K. Petersen 
246579e8f3cSMartin K. Petersen 	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
247579e8f3cSMartin K. Petersen }
2483f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
249