1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f31e7e40SDmitry Monakhov /*
3f31e7e40SDmitry Monakhov * Functions related to generic helpers functions
4f31e7e40SDmitry Monakhov */
5f31e7e40SDmitry Monakhov #include <linux/kernel.h>
6f31e7e40SDmitry Monakhov #include <linux/module.h>
7f31e7e40SDmitry Monakhov #include <linux/bio.h>
8f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
9f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
10f31e7e40SDmitry Monakhov
11f31e7e40SDmitry Monakhov #include "blk.h"
12f31e7e40SDmitry Monakhov
bio_discard_limit(struct block_device * bdev,sector_t sector)13e3cc28eaSChristoph Hellwig static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
14e3cc28eaSChristoph Hellwig {
157b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(bdev);
16e3cc28eaSChristoph Hellwig sector_t granularity_aligned_sector;
17e3cc28eaSChristoph Hellwig
18e3cc28eaSChristoph Hellwig if (bdev_is_partition(bdev))
19e3cc28eaSChristoph Hellwig sector += bdev->bd_start_sect;
20e3cc28eaSChristoph Hellwig
21e3cc28eaSChristoph Hellwig granularity_aligned_sector =
22e3cc28eaSChristoph Hellwig round_up(sector, discard_granularity >> SECTOR_SHIFT);
23e3cc28eaSChristoph Hellwig
24e3cc28eaSChristoph Hellwig /*
25e3cc28eaSChristoph Hellwig * Make sure subsequent bios start aligned to the discard granularity if
26e3cc28eaSChristoph Hellwig * it needs to be split.
27e3cc28eaSChristoph Hellwig */
28e3cc28eaSChristoph Hellwig if (granularity_aligned_sector != sector)
29e3cc28eaSChristoph Hellwig return granularity_aligned_sector - sector;
30e3cc28eaSChristoph Hellwig
31e3cc28eaSChristoph Hellwig /*
32e3cc28eaSChristoph Hellwig * Align the bio size to the discard granularity to make splitting the bio
33e3cc28eaSChristoph Hellwig * at discard granularity boundaries easier in the driver if needed.
34e3cc28eaSChristoph Hellwig */
35e3cc28eaSChristoph Hellwig return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
36e3cc28eaSChristoph Hellwig }
37e3cc28eaSChristoph Hellwig
blk_alloc_discard_bio(struct block_device * bdev,sector_t * sector,sector_t * nr_sects,gfp_t gfp_mask)38e8b4869bSChristoph Hellwig struct bio *blk_alloc_discard_bio(struct block_device *bdev,
39e8b4869bSChristoph Hellwig sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
40e8b4869bSChristoph Hellwig {
41e8b4869bSChristoph Hellwig sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
42e8b4869bSChristoph Hellwig struct bio *bio;
43e8b4869bSChristoph Hellwig
44e8b4869bSChristoph Hellwig if (!bio_sects)
45e8b4869bSChristoph Hellwig return NULL;
46e8b4869bSChristoph Hellwig
47e8b4869bSChristoph Hellwig bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
48e8b4869bSChristoph Hellwig if (!bio)
49e8b4869bSChristoph Hellwig return NULL;
50e8b4869bSChristoph Hellwig bio->bi_iter.bi_sector = *sector;
51e8b4869bSChristoph Hellwig bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
52e8b4869bSChristoph Hellwig *sector += bio_sects;
53e8b4869bSChristoph Hellwig *nr_sects -= bio_sects;
54e8b4869bSChristoph Hellwig /*
55e8b4869bSChristoph Hellwig * We can loop for a long time in here if someone does full device
56e8b4869bSChristoph Hellwig * discards (like mkfs). Be nice and allow us to schedule out to avoid
57e8b4869bSChristoph Hellwig * softlocking if preempt is disabled.
58e8b4869bSChristoph Hellwig */
59e8b4869bSChristoph Hellwig cond_resched();
60e8b4869bSChristoph Hellwig return bio;
61e8b4869bSChristoph Hellwig }
62e8b4869bSChristoph Hellwig
__blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)6338f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
6444abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
65f31e7e40SDmitry Monakhov {
66e8b4869bSChristoph Hellwig struct bio *bio;
67ba5d7385SMing Lei
68e8b4869bSChristoph Hellwig while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects,
69e8b4869bSChristoph Hellwig gfp_mask)))
70e8b4869bSChristoph Hellwig *biop = bio_chain_and_submit(*biop, bio);
7138f25255SChristoph Hellwig return 0;
7238f25255SChristoph Hellwig }
7338f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
7438f25255SChristoph Hellwig
7538f25255SChristoph Hellwig /**
7638f25255SChristoph Hellwig * blkdev_issue_discard - queue a discard
7738f25255SChristoph Hellwig * @bdev: blockdev to issue discard for
7838f25255SChristoph Hellwig * @sector: start sector
7938f25255SChristoph Hellwig * @nr_sects: number of sectors to discard
8038f25255SChristoph Hellwig * @gfp_mask: memory allocation flags (for bio_alloc)
8138f25255SChristoph Hellwig *
8238f25255SChristoph Hellwig * Description:
8338f25255SChristoph Hellwig * Issue a discard request for the sectors in question.
8438f25255SChristoph Hellwig */
blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask)8538f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
8644abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask)
8738f25255SChristoph Hellwig {
8838f25255SChristoph Hellwig struct bio *bio = NULL;
8938f25255SChristoph Hellwig struct blk_plug plug;
9038f25255SChristoph Hellwig int ret;
9138f25255SChristoph Hellwig
9238f25255SChristoph Hellwig blk_start_plug(&plug);
9344abff2cSChristoph Hellwig ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
94bbd848e0SMike Snitzer if (!ret && bio) {
954e49ea4aSMike Christie ret = submit_bio_wait(bio);
9648920ff2SChristoph Hellwig if (ret == -EOPNOTSUPP)
97bbd848e0SMike Snitzer ret = 0;
9805bd92ddSShaun Tancheff bio_put(bio);
99bbd848e0SMike Snitzer }
1000cfbcafcSShaohua Li blk_finish_plug(&plug);
101f31e7e40SDmitry Monakhov
102bbd848e0SMike Snitzer return ret;
103f31e7e40SDmitry Monakhov }
104f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1053f14d792SDmitry Monakhov
bio_write_zeroes_limit(struct block_device * bdev)10673a768d5SChristoph Hellwig static sector_t bio_write_zeroes_limit(struct block_device *bdev)
10773a768d5SChristoph Hellwig {
10873a768d5SChristoph Hellwig sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
10973a768d5SChristoph Hellwig
11073a768d5SChristoph Hellwig return min(bdev_write_zeroes_sectors(bdev),
11173a768d5SChristoph Hellwig (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
11273a768d5SChristoph Hellwig }
11373a768d5SChristoph Hellwig
11464b582caSJohn Garry /*
11564b582caSJohn Garry * There is no reliable way for the SCSI subsystem to determine whether a
11664b582caSJohn Garry * device supports a WRITE SAME operation without actually performing a write
11764b582caSJohn Garry * to media. As a result, write_zeroes is enabled by default and will be
11864b582caSJohn Garry * disabled if a zeroing operation subsequently fails. This means that this
11964b582caSJohn Garry * queue limit is likely to change at runtime.
12064b582caSJohn Garry */
__blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags,sector_t limit)121f6eacb26SChristoph Hellwig static void __blkdev_issue_write_zeroes(struct block_device *bdev,
122a6f0788eSChaitanya Kulkarni sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
12364b582caSJohn Garry struct bio **biop, unsigned flags, sector_t limit)
124a6f0788eSChaitanya Kulkarni {
12564b582caSJohn Garry
126a6f0788eSChaitanya Kulkarni while (nr_sects) {
12764b582caSJohn Garry unsigned int len = min(nr_sects, limit);
128bf86bcdbSChristoph Hellwig struct bio *bio;
12976a27e1bSKeith Busch
130bf86bcdbSChristoph Hellwig if ((flags & BLKDEV_ZERO_KILLABLE) &&
131bf86bcdbSChristoph Hellwig fatal_signal_pending(current))
132bf86bcdbSChristoph Hellwig break;
133bf86bcdbSChristoph Hellwig
134bf86bcdbSChristoph Hellwig bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
135a6f0788eSChaitanya Kulkarni bio->bi_iter.bi_sector = sector;
136d928be9fSChristoph Hellwig if (flags & BLKDEV_ZERO_NOUNMAP)
137d928be9fSChristoph Hellwig bio->bi_opf |= REQ_NOUNMAP;
138a6f0788eSChaitanya Kulkarni
13976a27e1bSKeith Busch bio->bi_iter.bi_size = len << SECTOR_SHIFT;
140bf86bcdbSChristoph Hellwig *biop = bio_chain_and_submit(*biop, bio);
141bf86bcdbSChristoph Hellwig
14276a27e1bSKeith Busch nr_sects -= len;
14376a27e1bSKeith Busch sector += len;
144a6f0788eSChaitanya Kulkarni cond_resched();
145a6f0788eSChaitanya Kulkarni }
146a6f0788eSChaitanya Kulkarni }
147a6f0788eSChaitanya Kulkarni
blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp,unsigned flags)14899800cedSChristoph Hellwig static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
14999800cedSChristoph Hellwig sector_t nr_sects, gfp_t gfp, unsigned flags)
15099800cedSChristoph Hellwig {
15164b582caSJohn Garry sector_t limit = bio_write_zeroes_limit(bdev);
15299800cedSChristoph Hellwig struct bio *bio = NULL;
15399800cedSChristoph Hellwig struct blk_plug plug;
15499800cedSChristoph Hellwig int ret = 0;
15599800cedSChristoph Hellwig
15699800cedSChristoph Hellwig blk_start_plug(&plug);
15764b582caSJohn Garry __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
15864b582caSJohn Garry flags, limit);
15999800cedSChristoph Hellwig if (bio) {
160bf86bcdbSChristoph Hellwig if ((flags & BLKDEV_ZERO_KILLABLE) &&
161bf86bcdbSChristoph Hellwig fatal_signal_pending(current)) {
162bf86bcdbSChristoph Hellwig bio_await_chain(bio);
163bf86bcdbSChristoph Hellwig blk_finish_plug(&plug);
164bf86bcdbSChristoph Hellwig return -EINTR;
165bf86bcdbSChristoph Hellwig }
16699800cedSChristoph Hellwig ret = submit_bio_wait(bio);
16799800cedSChristoph Hellwig bio_put(bio);
16899800cedSChristoph Hellwig }
16999800cedSChristoph Hellwig blk_finish_plug(&plug);
17099800cedSChristoph Hellwig
17199800cedSChristoph Hellwig /*
17299800cedSChristoph Hellwig * For some devices there is no non-destructive way to verify whether
17399800cedSChristoph Hellwig * WRITE ZEROES is actually supported. These will clear the capability
17499800cedSChristoph Hellwig * on an I/O error, in which case we'll turn any error into
17599800cedSChristoph Hellwig * "not supported" here.
17699800cedSChristoph Hellwig */
177*e33a97a8SDarrick J. Wong if (ret && !bdev_write_zeroes_sectors(bdev))
17899800cedSChristoph Hellwig return -EOPNOTSUPP;
17999800cedSChristoph Hellwig return ret;
18099800cedSChristoph Hellwig }
18199800cedSChristoph Hellwig
182615d22a5SDamien Le Moal /*
183615d22a5SDamien Le Moal * Convert a number of 512B sectors to a number of pages.
184615d22a5SDamien Le Moal * The result is limited to a number of pages that can fit into a BIO.
185615d22a5SDamien Le Moal * Also make sure that the result is always at least 1 (page) for the cases
186615d22a5SDamien Le Moal * where nr_sects is lower than the number of sectors in a page.
187615d22a5SDamien Le Moal */
__blkdev_sectors_to_bio_pages(sector_t nr_sects)188615d22a5SDamien Le Moal static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
189615d22a5SDamien Le Moal {
19009c2c359SMikulas Patocka sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
191615d22a5SDamien Le Moal
192a8affc03SChristoph Hellwig return min(pages, (sector_t)BIO_MAX_VECS);
193615d22a5SDamien Le Moal }
194615d22a5SDamien Le Moal
__blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned int flags)195f6eacb26SChristoph Hellwig static void __blkdev_issue_zero_pages(struct block_device *bdev,
196425a4dbaSIlya Dryomov sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
197bf86bcdbSChristoph Hellwig struct bio **biop, unsigned int flags)
1983f14d792SDmitry Monakhov {
199bf86bcdbSChristoph Hellwig while (nr_sects) {
200bf86bcdbSChristoph Hellwig unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
201bf86bcdbSChristoph Hellwig struct bio *bio;
20228b2be20SDarrick J. Wong
203bf86bcdbSChristoph Hellwig bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
2044f024f37SKent Overstreet bio->bi_iter.bi_sector = sector;
2053f14d792SDmitry Monakhov
206bf86bcdbSChristoph Hellwig if ((flags & BLKDEV_ZERO_KILLABLE) &&
207bf86bcdbSChristoph Hellwig fatal_signal_pending(current))
2083f14d792SDmitry Monakhov break;
209bf86bcdbSChristoph Hellwig
210bf86bcdbSChristoph Hellwig do {
211bf86bcdbSChristoph Hellwig unsigned int len, added;
212bf86bcdbSChristoph Hellwig
213bf86bcdbSChristoph Hellwig len = min_t(sector_t,
214bf86bcdbSChristoph Hellwig PAGE_SIZE, nr_sects << SECTOR_SHIFT);
215bf86bcdbSChristoph Hellwig added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
216bf86bcdbSChristoph Hellwig if (added < len)
217bf86bcdbSChristoph Hellwig break;
218bf86bcdbSChristoph Hellwig nr_sects -= added >> SECTOR_SHIFT;
219bf86bcdbSChristoph Hellwig sector += added >> SECTOR_SHIFT;
220bf86bcdbSChristoph Hellwig } while (nr_sects);
221bf86bcdbSChristoph Hellwig
222bf86bcdbSChristoph Hellwig *biop = bio_chain_and_submit(*biop, bio);
223e73c23ffSChaitanya Kulkarni cond_resched();
2243f14d792SDmitry Monakhov }
225425a4dbaSIlya Dryomov }
226425a4dbaSIlya Dryomov
blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp,unsigned flags)22799800cedSChristoph Hellwig static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
22899800cedSChristoph Hellwig sector_t nr_sects, gfp_t gfp, unsigned flags)
22999800cedSChristoph Hellwig {
23099800cedSChristoph Hellwig struct bio *bio = NULL;
23199800cedSChristoph Hellwig struct blk_plug plug;
23299800cedSChristoph Hellwig int ret = 0;
23399800cedSChristoph Hellwig
23499800cedSChristoph Hellwig if (flags & BLKDEV_ZERO_NOFALLBACK)
23599800cedSChristoph Hellwig return -EOPNOTSUPP;
23699800cedSChristoph Hellwig
23799800cedSChristoph Hellwig blk_start_plug(&plug);
238bf86bcdbSChristoph Hellwig __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
23999800cedSChristoph Hellwig if (bio) {
240bf86bcdbSChristoph Hellwig if ((flags & BLKDEV_ZERO_KILLABLE) &&
241bf86bcdbSChristoph Hellwig fatal_signal_pending(current)) {
242bf86bcdbSChristoph Hellwig bio_await_chain(bio);
243bf86bcdbSChristoph Hellwig blk_finish_plug(&plug);
244bf86bcdbSChristoph Hellwig return -EINTR;
245bf86bcdbSChristoph Hellwig }
24699800cedSChristoph Hellwig ret = submit_bio_wait(bio);
24799800cedSChristoph Hellwig bio_put(bio);
24899800cedSChristoph Hellwig }
24999800cedSChristoph Hellwig blk_finish_plug(&plug);
25099800cedSChristoph Hellwig
25199800cedSChristoph Hellwig return ret;
25299800cedSChristoph Hellwig }
25399800cedSChristoph Hellwig
2543f14d792SDmitry Monakhov /**
2553f14d792SDmitry Monakhov * __blkdev_issue_zeroout - generate number of zero filed write bios
2563f14d792SDmitry Monakhov * @bdev: blockdev to issue
2573f14d792SDmitry Monakhov * @sector: start sector
2583f14d792SDmitry Monakhov * @nr_sects: number of sectors to write
2593f14d792SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc)
2603f14d792SDmitry Monakhov * @biop: pointer to anchor bio
2613f14d792SDmitry Monakhov * @flags: controls detailed behavior
2623f14d792SDmitry Monakhov *
2633f14d792SDmitry Monakhov * Description:
2643f14d792SDmitry Monakhov * Zero-fill a block range, either using hardware offload or by explicitly
2653f14d792SDmitry Monakhov * writing zeroes to the device.
2663f14d792SDmitry Monakhov *
2673f14d792SDmitry Monakhov * If a device is using logical block provisioning, the underlying space will
2683f14d792SDmitry Monakhov * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
2693f14d792SDmitry Monakhov *
2703f14d792SDmitry Monakhov * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
2713f14d792SDmitry Monakhov * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
2723f14d792SDmitry Monakhov */
__blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)2733f14d792SDmitry Monakhov int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
2743f14d792SDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
2753f14d792SDmitry Monakhov unsigned flags)
2763f14d792SDmitry Monakhov {
27764b582caSJohn Garry sector_t limit = bio_write_zeroes_limit(bdev);
27864b582caSJohn Garry
279f6eacb26SChristoph Hellwig if (bdev_read_only(bdev))
280f6eacb26SChristoph Hellwig return -EPERM;
2813f14d792SDmitry Monakhov
28264b582caSJohn Garry if (limit) {
283f6eacb26SChristoph Hellwig __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
28464b582caSJohn Garry gfp_mask, biop, flags, limit);
285f6eacb26SChristoph Hellwig } else {
286f6eacb26SChristoph Hellwig if (flags & BLKDEV_ZERO_NOFALLBACK)
287f6eacb26SChristoph Hellwig return -EOPNOTSUPP;
288f6eacb26SChristoph Hellwig __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
289bf86bcdbSChristoph Hellwig biop, flags);
29005bd92ddSShaun Tancheff }
291f6eacb26SChristoph Hellwig return 0;
292f6eacb26SChristoph Hellwig }
293e73c23ffSChaitanya Kulkarni EXPORT_SYMBOL(__blkdev_issue_zeroout);
294579e8f3cSMartin K. Petersen
295579e8f3cSMartin K. Petersen /**
296579e8f3cSMartin K. Petersen * blkdev_issue_zeroout - zero-fill a block range
297579e8f3cSMartin K. Petersen * @bdev: blockdev to write
298579e8f3cSMartin K. Petersen * @sector: start sector
299579e8f3cSMartin K. Petersen * @nr_sects: number of sectors to write
300579e8f3cSMartin K. Petersen * @gfp_mask: memory allocation flags (for bio_alloc)
301ee472d83SChristoph Hellwig * @flags: controls detailed behavior
302579e8f3cSMartin K. Petersen *
303579e8f3cSMartin K. Petersen * Description:
304ee472d83SChristoph Hellwig * Zero-fill a block range, either using hardware offload or by explicitly
305ee472d83SChristoph Hellwig * writing zeroes to the device. See __blkdev_issue_zeroout() for the
306ee472d83SChristoph Hellwig * valid values for %flags.
307579e8f3cSMartin K. Petersen */
blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned flags)308579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
309ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
310579e8f3cSMartin K. Petersen {
31199800cedSChristoph Hellwig int ret;
312e73c23ffSChaitanya Kulkarni
31399800cedSChristoph Hellwig if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
314d5ce4c31SIlya Dryomov return -EINVAL;
315f6eacb26SChristoph Hellwig if (bdev_read_only(bdev))
316f6eacb26SChristoph Hellwig return -EPERM;
317d5ce4c31SIlya Dryomov
31899800cedSChristoph Hellwig if (bdev_write_zeroes_sectors(bdev)) {
31999800cedSChristoph Hellwig ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
32099800cedSChristoph Hellwig gfp_mask, flags);
32139722a2fSChristoph Hellwig if (ret != -EOPNOTSUPP)
322e73c23ffSChaitanya Kulkarni return ret;
323579e8f3cSMartin K. Petersen }
32499800cedSChristoph Hellwig
32599800cedSChristoph Hellwig return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
32699800cedSChristoph Hellwig }
3273f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
32844abff2cSChristoph Hellwig
blkdev_issue_secure_erase(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp)32944abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
33044abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp)
33144abff2cSChristoph Hellwig {
33244abff2cSChristoph Hellwig sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
33344abff2cSChristoph Hellwig unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
33444abff2cSChristoph Hellwig struct bio *bio = NULL;
33544abff2cSChristoph Hellwig struct blk_plug plug;
33644abff2cSChristoph Hellwig int ret = 0;
33744abff2cSChristoph Hellwig
338c4fa3684SMikulas Patocka /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
339c4fa3684SMikulas Patocka if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
340c4fa3684SMikulas Patocka max_sectors = UINT_MAX >> SECTOR_SHIFT;
341c4fa3684SMikulas Patocka max_sectors &= ~bs_mask;
342c4fa3684SMikulas Patocka
34344abff2cSChristoph Hellwig if (max_sectors == 0)
34444abff2cSChristoph Hellwig return -EOPNOTSUPP;
34544abff2cSChristoph Hellwig if ((sector | nr_sects) & bs_mask)
34644abff2cSChristoph Hellwig return -EINVAL;
34744abff2cSChristoph Hellwig if (bdev_read_only(bdev))
34844abff2cSChristoph Hellwig return -EPERM;
34944abff2cSChristoph Hellwig
35044abff2cSChristoph Hellwig blk_start_plug(&plug);
3515affe497SKeith Busch while (nr_sects) {
35244abff2cSChristoph Hellwig unsigned int len = min_t(sector_t, nr_sects, max_sectors);
35344abff2cSChristoph Hellwig
35444abff2cSChristoph Hellwig bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
35544abff2cSChristoph Hellwig bio->bi_iter.bi_sector = sector;
356c4fa3684SMikulas Patocka bio->bi_iter.bi_size = len << SECTOR_SHIFT;
35744abff2cSChristoph Hellwig
358c4fa3684SMikulas Patocka sector += len;
359c4fa3684SMikulas Patocka nr_sects -= len;
3605affe497SKeith Busch cond_resched();
3615affe497SKeith Busch }
3625affe497SKeith Busch if (bio) {
36344abff2cSChristoph Hellwig ret = submit_bio_wait(bio);
36444abff2cSChristoph Hellwig bio_put(bio);
36544abff2cSChristoph Hellwig }
36644abff2cSChristoph Hellwig blk_finish_plug(&plug);
36744abff2cSChristoph Hellwig
36844abff2cSChristoph Hellwig return ret;
36944abff2cSChristoph Hellwig }
37044abff2cSChristoph Hellwig EXPORT_SYMBOL(blkdev_issue_secure_erase);
371