xref: /linux/block/blk-lib.c (revision bbd848e0fade51ae51dab86a0683069cef89953f)
1f31e7e40SDmitry Monakhov /*
2f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
3f31e7e40SDmitry Monakhov  */
4f31e7e40SDmitry Monakhov #include <linux/kernel.h>
5f31e7e40SDmitry Monakhov #include <linux/module.h>
6f31e7e40SDmitry Monakhov #include <linux/bio.h>
7f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
9f31e7e40SDmitry Monakhov 
10f31e7e40SDmitry Monakhov #include "blk.h"
11f31e7e40SDmitry Monakhov 
129082e87bSChristoph Hellwig static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
139082e87bSChristoph Hellwig 		gfp_t gfp)
14f31e7e40SDmitry Monakhov {
159082e87bSChristoph Hellwig 	struct bio *new = bio_alloc(gfp, nr_pages);
165dba3089SLukas Czerner 
179082e87bSChristoph Hellwig 	if (bio) {
189082e87bSChristoph Hellwig 		bio_chain(bio, new);
199082e87bSChristoph Hellwig 		submit_bio(rw, bio);
209082e87bSChristoph Hellwig 	}
219082e87bSChristoph Hellwig 
229082e87bSChristoph Hellwig 	return new;
23f31e7e40SDmitry Monakhov }
24f31e7e40SDmitry Monakhov 
2538f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
2638f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
27f31e7e40SDmitry Monakhov {
28f31e7e40SDmitry Monakhov 	struct request_queue *q = bdev_get_queue(bdev);
2938f25255SChristoph Hellwig 	struct bio *bio = *biop;
30a22c4d7eSMing Lin 	unsigned int granularity;
31a22c4d7eSMing Lin 	int alignment;
32f31e7e40SDmitry Monakhov 
33f31e7e40SDmitry Monakhov 	if (!q)
34f31e7e40SDmitry Monakhov 		return -ENXIO;
35f31e7e40SDmitry Monakhov 	if (!blk_queue_discard(q))
36f31e7e40SDmitry Monakhov 		return -EOPNOTSUPP;
3738f25255SChristoph Hellwig 	if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
3838f25255SChristoph Hellwig 		return -EOPNOTSUPP;
39f31e7e40SDmitry Monakhov 
40a22c4d7eSMing Lin 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
41a22c4d7eSMing Lin 	granularity = max(q->limits.discard_granularity >> 9, 1U);
42a22c4d7eSMing Lin 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
43a22c4d7eSMing Lin 
445dba3089SLukas Czerner 	while (nr_sects) {
45c6e66634SPaolo Bonzini 		unsigned int req_sects;
46a22c4d7eSMing Lin 		sector_t end_sect, tmp;
47c6e66634SPaolo Bonzini 
48a22c4d7eSMing Lin 		/* Make sure bi_size doesn't overflow */
49a22c4d7eSMing Lin 		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
50a22c4d7eSMing Lin 
519082e87bSChristoph Hellwig 		/**
52a22c4d7eSMing Lin 		 * If splitting a request, and the next starting sector would be
53a22c4d7eSMing Lin 		 * misaligned, stop the discard at the previous aligned sector.
54a22c4d7eSMing Lin 		 */
55c6e66634SPaolo Bonzini 		end_sect = sector + req_sects;
56a22c4d7eSMing Lin 		tmp = end_sect;
57a22c4d7eSMing Lin 		if (req_sects < nr_sects &&
58a22c4d7eSMing Lin 		    sector_div(tmp, granularity) != alignment) {
59a22c4d7eSMing Lin 			end_sect = end_sect - alignment;
60a22c4d7eSMing Lin 			sector_div(end_sect, granularity);
61a22c4d7eSMing Lin 			end_sect = end_sect * granularity + alignment;
62a22c4d7eSMing Lin 			req_sects = end_sect - sector;
63a22c4d7eSMing Lin 		}
64c6e66634SPaolo Bonzini 
659082e87bSChristoph Hellwig 		bio = next_bio(bio, type, 1, gfp_mask);
664f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
67f31e7e40SDmitry Monakhov 		bio->bi_bdev = bdev;
68f31e7e40SDmitry Monakhov 
694f024f37SKent Overstreet 		bio->bi_iter.bi_size = req_sects << 9;
70c6e66634SPaolo Bonzini 		nr_sects -= req_sects;
71c6e66634SPaolo Bonzini 		sector = end_sect;
72f31e7e40SDmitry Monakhov 
73c8123f8cSJens Axboe 		/*
74c8123f8cSJens Axboe 		 * We can loop for a long time in here, if someone does
75c8123f8cSJens Axboe 		 * full device discards (like mkfs). Be nice and allow
76c8123f8cSJens Axboe 		 * us to schedule out to avoid softlocking if preempt
77c8123f8cSJens Axboe 		 * is disabled.
78c8123f8cSJens Axboe 		 */
79c8123f8cSJens Axboe 		cond_resched();
805dba3089SLukas Czerner 	}
8138f25255SChristoph Hellwig 
8238f25255SChristoph Hellwig 	*biop = bio;
8338f25255SChristoph Hellwig 	return 0;
8438f25255SChristoph Hellwig }
8538f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
8638f25255SChristoph Hellwig 
8738f25255SChristoph Hellwig /**
8838f25255SChristoph Hellwig  * blkdev_issue_discard - queue a discard
8938f25255SChristoph Hellwig  * @bdev:	blockdev to issue discard for
9038f25255SChristoph Hellwig  * @sector:	start sector
9138f25255SChristoph Hellwig  * @nr_sects:	number of sectors to discard
9238f25255SChristoph Hellwig  * @gfp_mask:	memory allocation flags (for bio_alloc)
9338f25255SChristoph Hellwig  * @flags:	BLKDEV_IFL_* flags to control behaviour
9438f25255SChristoph Hellwig  *
9538f25255SChristoph Hellwig  * Description:
9638f25255SChristoph Hellwig  *    Issue a discard request for the sectors in question.
9738f25255SChristoph Hellwig  */
9838f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
9938f25255SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
10038f25255SChristoph Hellwig {
10138f25255SChristoph Hellwig 	int type = REQ_WRITE | REQ_DISCARD;
10238f25255SChristoph Hellwig 	struct bio *bio = NULL;
10338f25255SChristoph Hellwig 	struct blk_plug plug;
10438f25255SChristoph Hellwig 	int ret;
10538f25255SChristoph Hellwig 
10638f25255SChristoph Hellwig 	if (flags & BLKDEV_DISCARD_SECURE)
10738f25255SChristoph Hellwig 		type |= REQ_SECURE;
10838f25255SChristoph Hellwig 
10938f25255SChristoph Hellwig 	blk_start_plug(&plug);
11038f25255SChristoph Hellwig 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
11138f25255SChristoph Hellwig 			&bio);
112*bbd848e0SMike Snitzer 	if (!ret && bio) {
1139082e87bSChristoph Hellwig 		ret = submit_bio_wait(type, bio);
114*bbd848e0SMike Snitzer 		if (ret == -EOPNOTSUPP)
115*bbd848e0SMike Snitzer 			ret = 0;
116*bbd848e0SMike Snitzer 	}
1170cfbcafcSShaohua Li 	blk_finish_plug(&plug);
118f31e7e40SDmitry Monakhov 
119*bbd848e0SMike Snitzer 	return ret;
120f31e7e40SDmitry Monakhov }
121f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1223f14d792SDmitry Monakhov 
1233f14d792SDmitry Monakhov /**
1244363ac7cSMartin K. Petersen  * blkdev_issue_write_same - queue a write same operation
1254363ac7cSMartin K. Petersen  * @bdev:	target blockdev
1264363ac7cSMartin K. Petersen  * @sector:	start sector
1274363ac7cSMartin K. Petersen  * @nr_sects:	number of sectors to write
1284363ac7cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
1294363ac7cSMartin K. Petersen  * @page:	page containing data to write
1304363ac7cSMartin K. Petersen  *
1314363ac7cSMartin K. Petersen  * Description:
1324363ac7cSMartin K. Petersen  *    Issue a write same request for the sectors in question.
1334363ac7cSMartin K. Petersen  */
1344363ac7cSMartin K. Petersen int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1354363ac7cSMartin K. Petersen 			    sector_t nr_sects, gfp_t gfp_mask,
1364363ac7cSMartin K. Petersen 			    struct page *page)
1374363ac7cSMartin K. Petersen {
1384363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1394363ac7cSMartin K. Petersen 	unsigned int max_write_same_sectors;
1409082e87bSChristoph Hellwig 	struct bio *bio = NULL;
1414363ac7cSMartin K. Petersen 	int ret = 0;
1424363ac7cSMartin K. Petersen 
1434363ac7cSMartin K. Petersen 	if (!q)
1444363ac7cSMartin K. Petersen 		return -ENXIO;
1454363ac7cSMartin K. Petersen 
146b49a0871SMing Lin 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
147b49a0871SMing Lin 	max_write_same_sectors = UINT_MAX >> 9;
1484363ac7cSMartin K. Petersen 
1494363ac7cSMartin K. Petersen 	while (nr_sects) {
1509082e87bSChristoph Hellwig 		bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
1514f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1524363ac7cSMartin K. Petersen 		bio->bi_bdev = bdev;
1534363ac7cSMartin K. Petersen 		bio->bi_vcnt = 1;
1544363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_page = page;
1554363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_offset = 0;
1564363ac7cSMartin K. Petersen 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
1574363ac7cSMartin K. Petersen 
1584363ac7cSMartin K. Petersen 		if (nr_sects > max_write_same_sectors) {
1594f024f37SKent Overstreet 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
1604363ac7cSMartin K. Petersen 			nr_sects -= max_write_same_sectors;
1614363ac7cSMartin K. Petersen 			sector += max_write_same_sectors;
1624363ac7cSMartin K. Petersen 		} else {
1634f024f37SKent Overstreet 			bio->bi_iter.bi_size = nr_sects << 9;
1644363ac7cSMartin K. Petersen 			nr_sects = 0;
1654363ac7cSMartin K. Petersen 		}
1664363ac7cSMartin K. Petersen 	}
1674363ac7cSMartin K. Petersen 
1689082e87bSChristoph Hellwig 	if (bio)
1699082e87bSChristoph Hellwig 		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
1709082e87bSChristoph Hellwig 	return ret != -EOPNOTSUPP ? ret : 0;
1714363ac7cSMartin K. Petersen }
1724363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same);
1734363ac7cSMartin K. Petersen 
1744363ac7cSMartin K. Petersen /**
175291d24f6SBen Hutchings  * blkdev_issue_zeroout - generate number of zero filed write bios
1763f14d792SDmitry Monakhov  * @bdev:	blockdev to issue
1773f14d792SDmitry Monakhov  * @sector:	start sector
1783f14d792SDmitry Monakhov  * @nr_sects:	number of sectors to write
1793f14d792SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
1803f14d792SDmitry Monakhov  *
1813f14d792SDmitry Monakhov  * Description:
1823f14d792SDmitry Monakhov  *  Generate and issue number of bios with zerofiled pages.
1833f14d792SDmitry Monakhov  */
1843f14d792SDmitry Monakhov 
18535086784SFabian Frederick static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
186dd3932edSChristoph Hellwig 				  sector_t nr_sects, gfp_t gfp_mask)
1873f14d792SDmitry Monakhov {
18818edc8eaSDmitry Monakhov 	int ret;
1899082e87bSChristoph Hellwig 	struct bio *bio = NULL;
1900aeea189SLukas Czerner 	unsigned int sz;
1913f14d792SDmitry Monakhov 
1923f14d792SDmitry Monakhov 	while (nr_sects != 0) {
1939082e87bSChristoph Hellwig 		bio = next_bio(bio, WRITE,
1949082e87bSChristoph Hellwig 				min(nr_sects, (sector_t)BIO_MAX_PAGES),
1959082e87bSChristoph Hellwig 				gfp_mask);
1964f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1973f14d792SDmitry Monakhov 		bio->bi_bdev   = bdev;
1983f14d792SDmitry Monakhov 
1993f14d792SDmitry Monakhov 		while (nr_sects != 0) {
2000341aafbSJens Axboe 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
2013f14d792SDmitry Monakhov 			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
2023f14d792SDmitry Monakhov 			nr_sects -= ret >> 9;
2033f14d792SDmitry Monakhov 			sector += ret >> 9;
2043f14d792SDmitry Monakhov 			if (ret < (sz << 9))
2053f14d792SDmitry Monakhov 				break;
2063f14d792SDmitry Monakhov 		}
2073f14d792SDmitry Monakhov 	}
2083f14d792SDmitry Monakhov 
2099082e87bSChristoph Hellwig 	if (bio)
2109082e87bSChristoph Hellwig 		return submit_bio_wait(WRITE, bio);
2119082e87bSChristoph Hellwig 	return 0;
2123f14d792SDmitry Monakhov }
213579e8f3cSMartin K. Petersen 
214579e8f3cSMartin K. Petersen /**
215579e8f3cSMartin K. Petersen  * blkdev_issue_zeroout - zero-fill a block range
216579e8f3cSMartin K. Petersen  * @bdev:	blockdev to write
217579e8f3cSMartin K. Petersen  * @sector:	start sector
218579e8f3cSMartin K. Petersen  * @nr_sects:	number of sectors to write
219579e8f3cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
220d93ba7a5SMartin K. Petersen  * @discard:	whether to discard the block range
221579e8f3cSMartin K. Petersen  *
222579e8f3cSMartin K. Petersen  * Description:
223d93ba7a5SMartin K. Petersen  *  Zero-fill a block range.  If the discard flag is set and the block
224d93ba7a5SMartin K. Petersen  *  device guarantees that subsequent READ operations to the block range
225d93ba7a5SMartin K. Petersen  *  in question will return zeroes, the blocks will be discarded. Should
226d93ba7a5SMartin K. Petersen  *  the discard request fail, if the discard flag is not set, or if
227d93ba7a5SMartin K. Petersen  *  discard_zeroes_data is not supported, this function will resort to
228d93ba7a5SMartin K. Petersen  *  zeroing the blocks manually, thus provisioning (allocating,
229d93ba7a5SMartin K. Petersen  *  anchoring) them. If the block device supports the WRITE SAME command
230d93ba7a5SMartin K. Petersen  *  blkdev_issue_zeroout() will use it to optimize the process of
231d93ba7a5SMartin K. Petersen  *  clearing the block range. Otherwise the zeroing will be performed
232d93ba7a5SMartin K. Petersen  *  using regular WRITE calls.
233579e8f3cSMartin K. Petersen  */
234579e8f3cSMartin K. Petersen 
235579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236d93ba7a5SMartin K. Petersen 			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
237579e8f3cSMartin K. Petersen {
238d93ba7a5SMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
239579e8f3cSMartin K. Petersen 
2409f9ee1f2SMartin K. Petersen 	if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
2419f9ee1f2SMartin K. Petersen 	    blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
242d93ba7a5SMartin K. Petersen 		return 0;
243d93ba7a5SMartin K. Petersen 
2449f9ee1f2SMartin K. Petersen 	if (bdev_write_same(bdev) &&
2459f9ee1f2SMartin K. Petersen 	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
2469f9ee1f2SMartin K. Petersen 				    ZERO_PAGE(0)) == 0)
247579e8f3cSMartin K. Petersen 		return 0;
248579e8f3cSMartin K. Petersen 
249579e8f3cSMartin K. Petersen 	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
250579e8f3cSMartin K. Petersen }
2513f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
252