xref: /linux/block/blk-lib.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
14 {
15 	unsigned int discard_granularity = bdev_discard_granularity(bdev);
16 	sector_t granularity_aligned_sector;
17 
18 	if (bdev_is_partition(bdev))
19 		sector += bdev->bd_start_sect;
20 
21 	granularity_aligned_sector =
22 		round_up(sector, discard_granularity >> SECTOR_SHIFT);
23 
24 	/*
25 	 * Make sure subsequent bios start aligned to the discard granularity if
26 	 * it needs to be split.
27 	 */
28 	if (granularity_aligned_sector != sector)
29 		return granularity_aligned_sector - sector;
30 
31 	/*
32 	 * Align the bio size to the discard granularity to make splitting the bio
33 	 * at discard granularity boundaries easier in the driver if needed.
34 	 */
35 	return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
36 }
37 
38 struct bio *blk_alloc_discard_bio(struct block_device *bdev,
39 		sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
40 {
41 	sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
42 	struct bio *bio;
43 
44 	if (!bio_sects)
45 		return NULL;
46 
47 	bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
48 	if (!bio)
49 		return NULL;
50 	bio->bi_iter.bi_sector = *sector;
51 	bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
52 	*sector += bio_sects;
53 	*nr_sects -= bio_sects;
54 	/*
55 	 * We can loop for a long time in here if someone does full device
56 	 * discards (like mkfs).  Be nice and allow us to schedule out to avoid
57 	 * softlocking if preempt is disabled.
58 	 */
59 	cond_resched();
60 	return bio;
61 }
62 
63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
64 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
65 {
66 	struct bio *bio;
67 
68 	while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
69 			gfp_mask)))
70 		*biop = bio_chain_and_submit(*biop, bio);
71 	return 0;
72 }
73 EXPORT_SYMBOL(__blkdev_issue_discard);
74 
75 /**
76  * blkdev_issue_discard - queue a discard
77  * @bdev:	blockdev to issue discard for
78  * @sector:	start sector
79  * @nr_sects:	number of sectors to discard
80  * @gfp_mask:	memory allocation flags (for bio_alloc)
81  *
82  * Description:
83  *    Issue a discard request for the sectors in question.
84  */
85 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
86 		sector_t nr_sects, gfp_t gfp_mask)
87 {
88 	struct bio *bio = NULL;
89 	struct blk_plug plug;
90 	int ret;
91 
92 	blk_start_plug(&plug);
93 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
94 	if (!ret && bio) {
95 		ret = submit_bio_wait(bio);
96 		if (ret == -EOPNOTSUPP)
97 			ret = 0;
98 		bio_put(bio);
99 	}
100 	blk_finish_plug(&plug);
101 
102 	return ret;
103 }
104 EXPORT_SYMBOL(blkdev_issue_discard);
105 
106 static sector_t bio_write_zeroes_limit(struct block_device *bdev)
107 {
108 	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
109 
110 	return min(bdev_write_zeroes_sectors(bdev),
111 		(UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
112 }
113 
114 static void __blkdev_issue_write_zeroes(struct block_device *bdev,
115 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
116 		struct bio **biop, unsigned flags)
117 {
118 	while (nr_sects) {
119 		unsigned int len = min_t(sector_t, nr_sects,
120 				bio_write_zeroes_limit(bdev));
121 		struct bio *bio;
122 
123 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
124 		    fatal_signal_pending(current))
125 			break;
126 
127 		bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
128 		bio->bi_iter.bi_sector = sector;
129 		if (flags & BLKDEV_ZERO_NOUNMAP)
130 			bio->bi_opf |= REQ_NOUNMAP;
131 
132 		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
133 		*biop = bio_chain_and_submit(*biop, bio);
134 
135 		nr_sects -= len;
136 		sector += len;
137 		cond_resched();
138 	}
139 }
140 
141 static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
142 		sector_t nr_sects, gfp_t gfp, unsigned flags)
143 {
144 	struct bio *bio = NULL;
145 	struct blk_plug plug;
146 	int ret = 0;
147 
148 	blk_start_plug(&plug);
149 	__blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags);
150 	if (bio) {
151 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
152 		    fatal_signal_pending(current)) {
153 			bio_await_chain(bio);
154 			blk_finish_plug(&plug);
155 			return -EINTR;
156 		}
157 		ret = submit_bio_wait(bio);
158 		bio_put(bio);
159 	}
160 	blk_finish_plug(&plug);
161 
162 	/*
163 	 * For some devices there is no non-destructive way to verify whether
164 	 * WRITE ZEROES is actually supported.  These will clear the capability
165 	 * on an I/O error, in which case we'll turn any error into
166 	 * "not supported" here.
167 	 */
168 	if (ret && !bdev_write_zeroes_sectors(bdev))
169 		return -EOPNOTSUPP;
170 	return ret;
171 }
172 
173 /*
174  * Convert a number of 512B sectors to a number of pages.
175  * The result is limited to a number of pages that can fit into a BIO.
176  * Also make sure that the result is always at least 1 (page) for the cases
177  * where nr_sects is lower than the number of sectors in a page.
178  */
179 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
180 {
181 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
182 
183 	return min(pages, (sector_t)BIO_MAX_VECS);
184 }
185 
186 static void __blkdev_issue_zero_pages(struct block_device *bdev,
187 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
188 		struct bio **biop, unsigned int flags)
189 {
190 	while (nr_sects) {
191 		unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
192 		struct bio *bio;
193 
194 		bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
195 		bio->bi_iter.bi_sector = sector;
196 
197 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
198 		    fatal_signal_pending(current))
199 			break;
200 
201 		do {
202 			unsigned int len, added;
203 
204 			len = min_t(sector_t,
205 				PAGE_SIZE, nr_sects << SECTOR_SHIFT);
206 			added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
207 			if (added < len)
208 				break;
209 			nr_sects -= added >> SECTOR_SHIFT;
210 			sector += added >> SECTOR_SHIFT;
211 		} while (nr_sects);
212 
213 		*biop = bio_chain_and_submit(*biop, bio);
214 		cond_resched();
215 	}
216 }
217 
218 static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
219 		sector_t nr_sects, gfp_t gfp, unsigned flags)
220 {
221 	struct bio *bio = NULL;
222 	struct blk_plug plug;
223 	int ret = 0;
224 
225 	if (flags & BLKDEV_ZERO_NOFALLBACK)
226 		return -EOPNOTSUPP;
227 
228 	blk_start_plug(&plug);
229 	__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
230 	if (bio) {
231 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
232 		    fatal_signal_pending(current)) {
233 			bio_await_chain(bio);
234 			blk_finish_plug(&plug);
235 			return -EINTR;
236 		}
237 		ret = submit_bio_wait(bio);
238 		bio_put(bio);
239 	}
240 	blk_finish_plug(&plug);
241 
242 	return ret;
243 }
244 
245 /**
246  * __blkdev_issue_zeroout - generate number of zero filed write bios
247  * @bdev:	blockdev to issue
248  * @sector:	start sector
249  * @nr_sects:	number of sectors to write
250  * @gfp_mask:	memory allocation flags (for bio_alloc)
251  * @biop:	pointer to anchor bio
252  * @flags:	controls detailed behavior
253  *
254  * Description:
255  *  Zero-fill a block range, either using hardware offload or by explicitly
256  *  writing zeroes to the device.
257  *
258  *  If a device is using logical block provisioning, the underlying space will
259  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
260  *
261  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
262  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
263  */
264 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
265 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
266 		unsigned flags)
267 {
268 	if (bdev_read_only(bdev))
269 		return -EPERM;
270 
271 	if (bdev_write_zeroes_sectors(bdev)) {
272 		__blkdev_issue_write_zeroes(bdev, sector, nr_sects,
273 				gfp_mask, biop, flags);
274 	} else {
275 		if (flags & BLKDEV_ZERO_NOFALLBACK)
276 			return -EOPNOTSUPP;
277 		__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
278 				biop, flags);
279 	}
280 	return 0;
281 }
282 EXPORT_SYMBOL(__blkdev_issue_zeroout);
283 
284 /**
285  * blkdev_issue_zeroout - zero-fill a block range
286  * @bdev:	blockdev to write
287  * @sector:	start sector
288  * @nr_sects:	number of sectors to write
289  * @gfp_mask:	memory allocation flags (for bio_alloc)
290  * @flags:	controls detailed behavior
291  *
292  * Description:
293  *  Zero-fill a block range, either using hardware offload or by explicitly
294  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
295  *  valid values for %flags.
296  */
297 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
298 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
299 {
300 	int ret;
301 
302 	if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
303 		return -EINVAL;
304 	if (bdev_read_only(bdev))
305 		return -EPERM;
306 
307 	if (bdev_write_zeroes_sectors(bdev)) {
308 		ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
309 				gfp_mask, flags);
310 		if (ret != -EOPNOTSUPP)
311 			return ret;
312 	}
313 
314 	return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
315 }
316 EXPORT_SYMBOL(blkdev_issue_zeroout);
317 
318 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
319 		sector_t nr_sects, gfp_t gfp)
320 {
321 	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
322 	unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
323 	struct bio *bio = NULL;
324 	struct blk_plug plug;
325 	int ret = 0;
326 
327 	/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
328 	if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
329 		max_sectors = UINT_MAX >> SECTOR_SHIFT;
330 	max_sectors &= ~bs_mask;
331 
332 	if (max_sectors == 0)
333 		return -EOPNOTSUPP;
334 	if ((sector | nr_sects) & bs_mask)
335 		return -EINVAL;
336 	if (bdev_read_only(bdev))
337 		return -EPERM;
338 
339 	blk_start_plug(&plug);
340 	while (nr_sects) {
341 		unsigned int len = min_t(sector_t, nr_sects, max_sectors);
342 
343 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
344 		bio->bi_iter.bi_sector = sector;
345 		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
346 
347 		sector += len;
348 		nr_sects -= len;
349 		cond_resched();
350 	}
351 	if (bio) {
352 		ret = submit_bio_wait(bio);
353 		bio_put(bio);
354 	}
355 	blk_finish_plug(&plug);
356 
357 	return ret;
358 }
359 EXPORT_SYMBOL(blkdev_issue_secure_erase);
360