1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to generic helpers functions 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/scatterlist.h> 10 11 #include "blk.h" 12 13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) 14 { 15 unsigned int discard_granularity = bdev_discard_granularity(bdev); 16 sector_t granularity_aligned_sector; 17 18 if (bdev_is_partition(bdev)) 19 sector += bdev->bd_start_sect; 20 21 granularity_aligned_sector = 22 round_up(sector, discard_granularity >> SECTOR_SHIFT); 23 24 /* 25 * Make sure subsequent bios start aligned to the discard granularity if 26 * it needs to be split. 27 */ 28 if (granularity_aligned_sector != sector) 29 return granularity_aligned_sector - sector; 30 31 /* 32 * Align the bio size to the discard granularity to make splitting the bio 33 * at discard granularity boundaries easier in the driver if needed. 34 */ 35 return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; 36 } 37 38 struct bio *blk_alloc_discard_bio(struct block_device *bdev, 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) 40 { 41 sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); 42 struct bio *bio; 43 44 if (!bio_sects) 45 return NULL; 46 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); 48 if (!bio) 49 return NULL; 50 bio->bi_iter.bi_sector = *sector; 51 bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; 52 *sector += bio_sects; 53 *nr_sects -= bio_sects; 54 /* 55 * We can loop for a long time in here if someone does full device 56 * discards (like mkfs). Be nice and allow us to schedule out to avoid 57 * softlocking if preempt is disabled. 58 */ 59 cond_resched(); 60 return bio; 61 } 62 63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) 65 { 66 struct bio *bio; 67 68 while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, 69 gfp_mask))) 70 *biop = bio_chain_and_submit(*biop, bio); 71 return 0; 72 } 73 EXPORT_SYMBOL(__blkdev_issue_discard); 74 75 /** 76 * blkdev_issue_discard - queue a discard 77 * @bdev: blockdev to issue discard for 78 * @sector: start sector 79 * @nr_sects: number of sectors to discard 80 * @gfp_mask: memory allocation flags (for bio_alloc) 81 * 82 * Description: 83 * Issue a discard request for the sectors in question. 84 */ 85 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 86 sector_t nr_sects, gfp_t gfp_mask) 87 { 88 struct bio *bio = NULL; 89 struct blk_plug plug; 90 int ret; 91 92 blk_start_plug(&plug); 93 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); 94 if (!ret && bio) { 95 ret = submit_bio_wait(bio); 96 if (ret == -EOPNOTSUPP) 97 ret = 0; 98 bio_put(bio); 99 } 100 blk_finish_plug(&plug); 101 102 return ret; 103 } 104 EXPORT_SYMBOL(blkdev_issue_discard); 105 106 static int __blkdev_issue_write_zeroes(struct block_device *bdev, 107 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 108 struct bio **biop, unsigned flags) 109 { 110 struct bio *bio = *biop; 111 unsigned int max_sectors; 112 113 if (bdev_read_only(bdev)) 114 return -EPERM; 115 116 /* Ensure that max_sectors doesn't overflow bi_size */ 117 max_sectors = bdev_write_zeroes_sectors(bdev); 118 119 if (max_sectors == 0) 120 return -EOPNOTSUPP; 121 122 while (nr_sects) { 123 unsigned int len = min_t(sector_t, nr_sects, max_sectors); 124 125 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); 126 bio->bi_iter.bi_sector = sector; 127 if (flags & BLKDEV_ZERO_NOUNMAP) 128 bio->bi_opf |= REQ_NOUNMAP; 129 130 bio->bi_iter.bi_size = len << SECTOR_SHIFT; 131 nr_sects -= len; 132 sector += len; 133 cond_resched(); 134 } 135 136 *biop = bio; 137 return 0; 138 } 139 140 /* 141 * Convert a number of 512B sectors to a number of pages. 142 * The result is limited to a number of pages that can fit into a BIO. 143 * Also make sure that the result is always at least 1 (page) for the cases 144 * where nr_sects is lower than the number of sectors in a page. 145 */ 146 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) 147 { 148 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); 149 150 return min(pages, (sector_t)BIO_MAX_VECS); 151 } 152 153 static int __blkdev_issue_zero_pages(struct block_device *bdev, 154 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 155 struct bio **biop) 156 { 157 struct bio *bio = *biop; 158 int bi_size = 0; 159 unsigned int sz; 160 161 if (bdev_read_only(bdev)) 162 return -EPERM; 163 164 while (nr_sects != 0) { 165 bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), 166 REQ_OP_WRITE, gfp_mask); 167 bio->bi_iter.bi_sector = sector; 168 169 while (nr_sects != 0) { 170 sz = min((sector_t) PAGE_SIZE, nr_sects << 9); 171 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); 172 nr_sects -= bi_size >> 9; 173 sector += bi_size >> 9; 174 if (bi_size < sz) 175 break; 176 } 177 cond_resched(); 178 } 179 180 *biop = bio; 181 return 0; 182 } 183 184 /** 185 * __blkdev_issue_zeroout - generate number of zero filed write bios 186 * @bdev: blockdev to issue 187 * @sector: start sector 188 * @nr_sects: number of sectors to write 189 * @gfp_mask: memory allocation flags (for bio_alloc) 190 * @biop: pointer to anchor bio 191 * @flags: controls detailed behavior 192 * 193 * Description: 194 * Zero-fill a block range, either using hardware offload or by explicitly 195 * writing zeroes to the device. 196 * 197 * If a device is using logical block provisioning, the underlying space will 198 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. 199 * 200 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return 201 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. 202 */ 203 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 204 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 205 unsigned flags) 206 { 207 int ret; 208 sector_t bs_mask; 209 210 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 211 if ((sector | nr_sects) & bs_mask) 212 return -EINVAL; 213 214 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, 215 biop, flags); 216 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) 217 return ret; 218 219 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, 220 biop); 221 } 222 EXPORT_SYMBOL(__blkdev_issue_zeroout); 223 224 /** 225 * blkdev_issue_zeroout - zero-fill a block range 226 * @bdev: blockdev to write 227 * @sector: start sector 228 * @nr_sects: number of sectors to write 229 * @gfp_mask: memory allocation flags (for bio_alloc) 230 * @flags: controls detailed behavior 231 * 232 * Description: 233 * Zero-fill a block range, either using hardware offload or by explicitly 234 * writing zeroes to the device. See __blkdev_issue_zeroout() for the 235 * valid values for %flags. 236 */ 237 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 238 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) 239 { 240 int ret = 0; 241 sector_t bs_mask; 242 struct bio *bio; 243 struct blk_plug plug; 244 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); 245 246 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 247 if ((sector | nr_sects) & bs_mask) 248 return -EINVAL; 249 250 retry: 251 bio = NULL; 252 blk_start_plug(&plug); 253 if (try_write_zeroes) { 254 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, 255 gfp_mask, &bio, flags); 256 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { 257 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, 258 gfp_mask, &bio); 259 } else { 260 /* No zeroing offload support */ 261 ret = -EOPNOTSUPP; 262 } 263 if (ret == 0 && bio) { 264 ret = submit_bio_wait(bio); 265 bio_put(bio); 266 } 267 blk_finish_plug(&plug); 268 if (ret && try_write_zeroes) { 269 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { 270 try_write_zeroes = false; 271 goto retry; 272 } 273 if (!bdev_write_zeroes_sectors(bdev)) { 274 /* 275 * Zeroing offload support was indicated, but the 276 * device reported ILLEGAL REQUEST (for some devices 277 * there is no non-destructive way to verify whether 278 * WRITE ZEROES is actually supported). 279 */ 280 ret = -EOPNOTSUPP; 281 } 282 } 283 284 return ret; 285 } 286 EXPORT_SYMBOL(blkdev_issue_zeroout); 287 288 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 289 sector_t nr_sects, gfp_t gfp) 290 { 291 sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 292 unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); 293 struct bio *bio = NULL; 294 struct blk_plug plug; 295 int ret = 0; 296 297 /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ 298 if (max_sectors > UINT_MAX >> SECTOR_SHIFT) 299 max_sectors = UINT_MAX >> SECTOR_SHIFT; 300 max_sectors &= ~bs_mask; 301 302 if (max_sectors == 0) 303 return -EOPNOTSUPP; 304 if ((sector | nr_sects) & bs_mask) 305 return -EINVAL; 306 if (bdev_read_only(bdev)) 307 return -EPERM; 308 309 blk_start_plug(&plug); 310 while (nr_sects) { 311 unsigned int len = min_t(sector_t, nr_sects, max_sectors); 312 313 bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); 314 bio->bi_iter.bi_sector = sector; 315 bio->bi_iter.bi_size = len << SECTOR_SHIFT; 316 317 sector += len; 318 nr_sects -= len; 319 cond_resched(); 320 } 321 if (bio) { 322 ret = submit_bio_wait(bio); 323 bio_put(bio); 324 } 325 blk_finish_plug(&plug); 326 327 return ret; 328 } 329 EXPORT_SYMBOL(blkdev_issue_secure_erase); 330