xref: /linux/block/blk-lib.c (revision 616355cc818c6ddadc393fdfd4491f94458cb715)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
13 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
14 		sector_t nr_sects, gfp_t gfp_mask, int flags,
15 		struct bio **biop)
16 {
17 	struct request_queue *q = bdev_get_queue(bdev);
18 	struct bio *bio = *biop;
19 	unsigned int op;
20 	sector_t bs_mask, part_offset = 0;
21 
22 	if (bdev_read_only(bdev))
23 		return -EPERM;
24 
25 	if (flags & BLKDEV_DISCARD_SECURE) {
26 		if (!blk_queue_secure_erase(q))
27 			return -EOPNOTSUPP;
28 		op = REQ_OP_SECURE_ERASE;
29 	} else {
30 		if (!blk_queue_discard(q))
31 			return -EOPNOTSUPP;
32 		op = REQ_OP_DISCARD;
33 	}
34 
35 	/* In case the discard granularity isn't set by buggy device driver */
36 	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
37 		char dev_name[BDEVNAME_SIZE];
38 
39 		bdevname(bdev, dev_name);
40 		pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
41 		return -EOPNOTSUPP;
42 	}
43 
44 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
45 	if ((sector | nr_sects) & bs_mask)
46 		return -EINVAL;
47 
48 	if (!nr_sects)
49 		return -EINVAL;
50 
51 	/* In case the discard request is in a partition */
52 	if (bdev_is_partition(bdev))
53 		part_offset = bdev->bd_start_sect;
54 
55 	while (nr_sects) {
56 		sector_t granularity_aligned_lba, req_sects;
57 		sector_t sector_mapped = sector + part_offset;
58 
59 		granularity_aligned_lba = round_up(sector_mapped,
60 				q->limits.discard_granularity >> SECTOR_SHIFT);
61 
62 		/*
63 		 * Check whether the discard bio starts at a discard_granularity
64 		 * aligned LBA,
65 		 * - If no: set (granularity_aligned_lba - sector_mapped) to
66 		 *   bi_size of the first split bio, then the second bio will
67 		 *   start at a discard_granularity aligned LBA on the device.
68 		 * - If yes: use bio_aligned_discard_max_sectors() as the max
69 		 *   possible bi_size of the first split bio. Then when this bio
70 		 *   is split in device drive, the split ones are very probably
71 		 *   to be aligned to discard_granularity of the device's queue.
72 		 */
73 		if (granularity_aligned_lba == sector_mapped)
74 			req_sects = min_t(sector_t, nr_sects,
75 					  bio_aligned_discard_max_sectors(q));
76 		else
77 			req_sects = min_t(sector_t, nr_sects,
78 					  granularity_aligned_lba - sector_mapped);
79 
80 		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
81 
82 		bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
83 		bio->bi_iter.bi_sector = sector;
84 		bio->bi_iter.bi_size = req_sects << 9;
85 		sector += req_sects;
86 		nr_sects -= req_sects;
87 
88 		/*
89 		 * We can loop for a long time in here, if someone does
90 		 * full device discards (like mkfs). Be nice and allow
91 		 * us to schedule out to avoid softlocking if preempt
92 		 * is disabled.
93 		 */
94 		cond_resched();
95 	}
96 
97 	*biop = bio;
98 	return 0;
99 }
100 EXPORT_SYMBOL(__blkdev_issue_discard);
101 
102 /**
103  * blkdev_issue_discard - queue a discard
104  * @bdev:	blockdev to issue discard for
105  * @sector:	start sector
106  * @nr_sects:	number of sectors to discard
107  * @gfp_mask:	memory allocation flags (for bio_alloc)
108  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
109  *
110  * Description:
111  *    Issue a discard request for the sectors in question.
112  */
113 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
114 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
115 {
116 	struct bio *bio = NULL;
117 	struct blk_plug plug;
118 	int ret;
119 
120 	blk_start_plug(&plug);
121 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
122 			&bio);
123 	if (!ret && bio) {
124 		ret = submit_bio_wait(bio);
125 		if (ret == -EOPNOTSUPP)
126 			ret = 0;
127 		bio_put(bio);
128 	}
129 	blk_finish_plug(&plug);
130 
131 	return ret;
132 }
133 EXPORT_SYMBOL(blkdev_issue_discard);
134 
135 /**
136  * __blkdev_issue_write_same - generate number of bios with same page
137  * @bdev:	target blockdev
138  * @sector:	start sector
139  * @nr_sects:	number of sectors to write
140  * @gfp_mask:	memory allocation flags (for bio_alloc)
141  * @page:	page containing data to write
142  * @biop:	pointer to anchor bio
143  *
144  * Description:
145  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
146  */
147 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
148 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
149 		struct bio **biop)
150 {
151 	struct request_queue *q = bdev_get_queue(bdev);
152 	unsigned int max_write_same_sectors;
153 	struct bio *bio = *biop;
154 	sector_t bs_mask;
155 
156 	if (bdev_read_only(bdev))
157 		return -EPERM;
158 
159 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
160 	if ((sector | nr_sects) & bs_mask)
161 		return -EINVAL;
162 
163 	if (!bdev_write_same(bdev))
164 		return -EOPNOTSUPP;
165 
166 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
167 	max_write_same_sectors = bio_allowed_max_sectors(q);
168 
169 	while (nr_sects) {
170 		bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
171 		bio->bi_iter.bi_sector = sector;
172 		bio->bi_vcnt = 1;
173 		bio->bi_io_vec->bv_page = page;
174 		bio->bi_io_vec->bv_offset = 0;
175 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
176 
177 		if (nr_sects > max_write_same_sectors) {
178 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
179 			nr_sects -= max_write_same_sectors;
180 			sector += max_write_same_sectors;
181 		} else {
182 			bio->bi_iter.bi_size = nr_sects << 9;
183 			nr_sects = 0;
184 		}
185 		cond_resched();
186 	}
187 
188 	*biop = bio;
189 	return 0;
190 }
191 
192 /**
193  * blkdev_issue_write_same - queue a write same operation
194  * @bdev:	target blockdev
195  * @sector:	start sector
196  * @nr_sects:	number of sectors to write
197  * @gfp_mask:	memory allocation flags (for bio_alloc)
198  * @page:	page containing data
199  *
200  * Description:
201  *    Issue a write same request for the sectors in question.
202  */
203 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
204 				sector_t nr_sects, gfp_t gfp_mask,
205 				struct page *page)
206 {
207 	struct bio *bio = NULL;
208 	struct blk_plug plug;
209 	int ret;
210 
211 	blk_start_plug(&plug);
212 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
213 			&bio);
214 	if (ret == 0 && bio) {
215 		ret = submit_bio_wait(bio);
216 		bio_put(bio);
217 	}
218 	blk_finish_plug(&plug);
219 	return ret;
220 }
221 EXPORT_SYMBOL(blkdev_issue_write_same);
222 
223 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
224 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
225 		struct bio **biop, unsigned flags)
226 {
227 	struct bio *bio = *biop;
228 	unsigned int max_write_zeroes_sectors;
229 
230 	if (bdev_read_only(bdev))
231 		return -EPERM;
232 
233 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
234 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
235 
236 	if (max_write_zeroes_sectors == 0)
237 		return -EOPNOTSUPP;
238 
239 	while (nr_sects) {
240 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
241 		bio->bi_iter.bi_sector = sector;
242 		if (flags & BLKDEV_ZERO_NOUNMAP)
243 			bio->bi_opf |= REQ_NOUNMAP;
244 
245 		if (nr_sects > max_write_zeroes_sectors) {
246 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
247 			nr_sects -= max_write_zeroes_sectors;
248 			sector += max_write_zeroes_sectors;
249 		} else {
250 			bio->bi_iter.bi_size = nr_sects << 9;
251 			nr_sects = 0;
252 		}
253 		cond_resched();
254 	}
255 
256 	*biop = bio;
257 	return 0;
258 }
259 
260 /*
261  * Convert a number of 512B sectors to a number of pages.
262  * The result is limited to a number of pages that can fit into a BIO.
263  * Also make sure that the result is always at least 1 (page) for the cases
264  * where nr_sects is lower than the number of sectors in a page.
265  */
266 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
267 {
268 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
269 
270 	return min(pages, (sector_t)BIO_MAX_VECS);
271 }
272 
273 static int __blkdev_issue_zero_pages(struct block_device *bdev,
274 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
275 		struct bio **biop)
276 {
277 	struct bio *bio = *biop;
278 	int bi_size = 0;
279 	unsigned int sz;
280 
281 	if (bdev_read_only(bdev))
282 		return -EPERM;
283 
284 	while (nr_sects != 0) {
285 		bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
286 				   REQ_OP_WRITE, gfp_mask);
287 		bio->bi_iter.bi_sector = sector;
288 
289 		while (nr_sects != 0) {
290 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
291 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
292 			nr_sects -= bi_size >> 9;
293 			sector += bi_size >> 9;
294 			if (bi_size < sz)
295 				break;
296 		}
297 		cond_resched();
298 	}
299 
300 	*biop = bio;
301 	return 0;
302 }
303 
304 /**
305  * __blkdev_issue_zeroout - generate number of zero filed write bios
306  * @bdev:	blockdev to issue
307  * @sector:	start sector
308  * @nr_sects:	number of sectors to write
309  * @gfp_mask:	memory allocation flags (for bio_alloc)
310  * @biop:	pointer to anchor bio
311  * @flags:	controls detailed behavior
312  *
313  * Description:
314  *  Zero-fill a block range, either using hardware offload or by explicitly
315  *  writing zeroes to the device.
316  *
317  *  If a device is using logical block provisioning, the underlying space will
318  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
319  *
320  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
321  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
322  */
323 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
324 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
325 		unsigned flags)
326 {
327 	int ret;
328 	sector_t bs_mask;
329 
330 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
331 	if ((sector | nr_sects) & bs_mask)
332 		return -EINVAL;
333 
334 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
335 			biop, flags);
336 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
337 		return ret;
338 
339 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
340 					 biop);
341 }
342 EXPORT_SYMBOL(__blkdev_issue_zeroout);
343 
344 /**
345  * blkdev_issue_zeroout - zero-fill a block range
346  * @bdev:	blockdev to write
347  * @sector:	start sector
348  * @nr_sects:	number of sectors to write
349  * @gfp_mask:	memory allocation flags (for bio_alloc)
350  * @flags:	controls detailed behavior
351  *
352  * Description:
353  *  Zero-fill a block range, either using hardware offload or by explicitly
354  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
355  *  valid values for %flags.
356  */
357 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
358 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
359 {
360 	int ret = 0;
361 	sector_t bs_mask;
362 	struct bio *bio;
363 	struct blk_plug plug;
364 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
365 
366 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
367 	if ((sector | nr_sects) & bs_mask)
368 		return -EINVAL;
369 
370 retry:
371 	bio = NULL;
372 	blk_start_plug(&plug);
373 	if (try_write_zeroes) {
374 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
375 						  gfp_mask, &bio, flags);
376 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
377 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
378 						gfp_mask, &bio);
379 	} else {
380 		/* No zeroing offload support */
381 		ret = -EOPNOTSUPP;
382 	}
383 	if (ret == 0 && bio) {
384 		ret = submit_bio_wait(bio);
385 		bio_put(bio);
386 	}
387 	blk_finish_plug(&plug);
388 	if (ret && try_write_zeroes) {
389 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
390 			try_write_zeroes = false;
391 			goto retry;
392 		}
393 		if (!bdev_write_zeroes_sectors(bdev)) {
394 			/*
395 			 * Zeroing offload support was indicated, but the
396 			 * device reported ILLEGAL REQUEST (for some devices
397 			 * there is no non-destructive way to verify whether
398 			 * WRITE ZEROES is actually supported).
399 			 */
400 			ret = -EOPNOTSUPP;
401 		}
402 	}
403 
404 	return ret;
405 }
406 EXPORT_SYMBOL(blkdev_issue_zeroout);
407