1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Zoned block device handling 4 * 5 * Copyright (c) 2015, Hannes Reinecke 6 * Copyright (c) 2015, SUSE Linux GmbH 7 * 8 * Copyright (c) 2016, Damien Le Moal 9 * Copyright (c) 2016, Western Digital 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-mq.h> 16 #include <linux/mm.h> 17 #include <linux/vmalloc.h> 18 #include <linux/sched/mm.h> 19 20 #include "blk.h" 21 22 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name 23 static const char *const zone_cond_name[] = { 24 ZONE_COND_NAME(NOT_WP), 25 ZONE_COND_NAME(EMPTY), 26 ZONE_COND_NAME(IMP_OPEN), 27 ZONE_COND_NAME(EXP_OPEN), 28 ZONE_COND_NAME(CLOSED), 29 ZONE_COND_NAME(READONLY), 30 ZONE_COND_NAME(FULL), 31 ZONE_COND_NAME(OFFLINE), 32 }; 33 #undef ZONE_COND_NAME 34 35 /** 36 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. 37 * @zone_cond: BLK_ZONE_COND_XXX. 38 * 39 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX 40 * into string format. Useful in the debugging and tracing zone conditions. For 41 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". 42 */ 43 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) 44 { 45 static const char *zone_cond_str = "UNKNOWN"; 46 47 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) 48 zone_cond_str = zone_cond_name[zone_cond]; 49 50 return zone_cond_str; 51 } 52 EXPORT_SYMBOL_GPL(blk_zone_cond_str); 53 54 /* 55 * Return true if a request is a write requests that needs zone write locking. 56 */ 57 bool blk_req_needs_zone_write_lock(struct request *rq) 58 { 59 if (!rq->q->disk->seq_zones_wlock) 60 return false; 61 62 return blk_rq_is_seq_zoned_write(rq); 63 } 64 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); 65 66 bool blk_req_zone_write_trylock(struct request *rq) 67 { 68 unsigned int zno = blk_rq_zone_no(rq); 69 70 if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock)) 71 return false; 72 73 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 74 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 75 76 return true; 77 } 78 EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock); 79 80 void __blk_req_zone_write_lock(struct request *rq) 81 { 82 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), 83 rq->q->disk->seq_zones_wlock))) 84 return; 85 86 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 87 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 88 } 89 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); 90 91 void __blk_req_zone_write_unlock(struct request *rq) 92 { 93 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; 94 if (rq->q->disk->seq_zones_wlock) 95 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), 96 rq->q->disk->seq_zones_wlock)); 97 } 98 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); 99 100 /** 101 * bdev_nr_zones - Get number of zones 102 * @bdev: Target device 103 * 104 * Return the total number of zones of a zoned block device. For a block 105 * device without zone capabilities, the number of zones is always 0. 106 */ 107 unsigned int bdev_nr_zones(struct block_device *bdev) 108 { 109 sector_t zone_sectors = bdev_zone_sectors(bdev); 110 111 if (!bdev_is_zoned(bdev)) 112 return 0; 113 return (bdev_nr_sectors(bdev) + zone_sectors - 1) >> 114 ilog2(zone_sectors); 115 } 116 EXPORT_SYMBOL_GPL(bdev_nr_zones); 117 118 /** 119 * blkdev_report_zones - Get zones information 120 * @bdev: Target block device 121 * @sector: Sector from which to report zones 122 * @nr_zones: Maximum number of zones to report 123 * @cb: Callback function called for each reported zone 124 * @data: Private data for the callback 125 * 126 * Description: 127 * Get zone information starting from the zone containing @sector for at most 128 * @nr_zones, and call @cb for each zone reported by the device. 129 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES 130 * constant can be passed to @nr_zones. 131 * Returns the number of zones reported by the device, or a negative errno 132 * value in case of failure. 133 * 134 * Note: The caller must use memalloc_noXX_save/restore() calls to control 135 * memory allocations done within this function. 136 */ 137 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 138 unsigned int nr_zones, report_zones_cb cb, void *data) 139 { 140 struct gendisk *disk = bdev->bd_disk; 141 sector_t capacity = get_capacity(disk); 142 143 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) 144 return -EOPNOTSUPP; 145 146 if (!nr_zones || sector >= capacity) 147 return 0; 148 149 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); 150 } 151 EXPORT_SYMBOL_GPL(blkdev_report_zones); 152 153 static inline unsigned long *blk_alloc_zone_bitmap(int node, 154 unsigned int nr_zones) 155 { 156 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), 157 GFP_NOIO, node); 158 } 159 160 static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, 161 void *data) 162 { 163 /* 164 * For an all-zones reset, ignore conventional, empty, read-only 165 * and offline zones. 166 */ 167 switch (zone->cond) { 168 case BLK_ZONE_COND_NOT_WP: 169 case BLK_ZONE_COND_EMPTY: 170 case BLK_ZONE_COND_READONLY: 171 case BLK_ZONE_COND_OFFLINE: 172 return 0; 173 default: 174 set_bit(idx, (unsigned long *)data); 175 return 0; 176 } 177 } 178 179 static int blkdev_zone_reset_all_emulated(struct block_device *bdev) 180 { 181 struct gendisk *disk = bdev->bd_disk; 182 sector_t capacity = bdev_nr_sectors(bdev); 183 sector_t zone_sectors = bdev_zone_sectors(bdev); 184 unsigned long *need_reset; 185 struct bio *bio = NULL; 186 sector_t sector = 0; 187 int ret; 188 189 need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones); 190 if (!need_reset) 191 return -ENOMEM; 192 193 ret = disk->fops->report_zones(disk, 0, disk->nr_zones, 194 blk_zone_need_reset_cb, need_reset); 195 if (ret < 0) 196 goto out_free_need_reset; 197 198 ret = 0; 199 while (sector < capacity) { 200 if (!test_bit(disk_zone_no(disk, sector), need_reset)) { 201 sector += zone_sectors; 202 continue; 203 } 204 205 bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC, 206 GFP_KERNEL); 207 bio->bi_iter.bi_sector = sector; 208 sector += zone_sectors; 209 210 /* This may take a while, so be nice to others */ 211 cond_resched(); 212 } 213 214 if (bio) { 215 ret = submit_bio_wait(bio); 216 bio_put(bio); 217 } 218 219 out_free_need_reset: 220 kfree(need_reset); 221 return ret; 222 } 223 224 static int blkdev_zone_reset_all(struct block_device *bdev) 225 { 226 struct bio bio; 227 228 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); 229 return submit_bio_wait(&bio); 230 } 231 232 /** 233 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones 234 * @bdev: Target block device 235 * @op: Operation to be performed on the zones 236 * @sector: Start sector of the first zone to operate on 237 * @nr_sectors: Number of sectors, should be at least the length of one zone and 238 * must be zone size aligned. 239 * 240 * Description: 241 * Perform the specified operation on the range of zones specified by 242 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range 243 * is valid, but the specified range should not contain conventional zones. 244 * The operation to execute on each zone can be a zone reset, open, close 245 * or finish request. 246 */ 247 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 248 sector_t sector, sector_t nr_sectors) 249 { 250 struct request_queue *q = bdev_get_queue(bdev); 251 sector_t zone_sectors = bdev_zone_sectors(bdev); 252 sector_t capacity = bdev_nr_sectors(bdev); 253 sector_t end_sector = sector + nr_sectors; 254 struct bio *bio = NULL; 255 int ret = 0; 256 257 if (!bdev_is_zoned(bdev)) 258 return -EOPNOTSUPP; 259 260 if (bdev_read_only(bdev)) 261 return -EPERM; 262 263 if (!op_is_zone_mgmt(op)) 264 return -EOPNOTSUPP; 265 266 if (end_sector <= sector || end_sector > capacity) 267 /* Out of range */ 268 return -EINVAL; 269 270 /* Check alignment (handle eventual smaller last zone) */ 271 if (!bdev_is_zone_start(bdev, sector)) 272 return -EINVAL; 273 274 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) 275 return -EINVAL; 276 277 /* 278 * In the case of a zone reset operation over all zones, 279 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this 280 * command. For other devices, we emulate this command behavior by 281 * identifying the zones needing a reset. 282 */ 283 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { 284 if (!blk_queue_zone_resetall(q)) 285 return blkdev_zone_reset_all_emulated(bdev); 286 return blkdev_zone_reset_all(bdev); 287 } 288 289 while (sector < end_sector) { 290 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); 291 bio->bi_iter.bi_sector = sector; 292 sector += zone_sectors; 293 294 /* This may take a while, so be nice to others */ 295 cond_resched(); 296 } 297 298 ret = submit_bio_wait(bio); 299 bio_put(bio); 300 301 return ret; 302 } 303 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); 304 305 struct zone_report_args { 306 struct blk_zone __user *zones; 307 }; 308 309 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, 310 void *data) 311 { 312 struct zone_report_args *args = data; 313 314 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) 315 return -EFAULT; 316 return 0; 317 } 318 319 /* 320 * BLKREPORTZONE ioctl processing. 321 * Called from blkdev_ioctl. 322 */ 323 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, 324 unsigned long arg) 325 { 326 void __user *argp = (void __user *)arg; 327 struct zone_report_args args; 328 struct blk_zone_report rep; 329 int ret; 330 331 if (!argp) 332 return -EINVAL; 333 334 if (!bdev_is_zoned(bdev)) 335 return -ENOTTY; 336 337 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) 338 return -EFAULT; 339 340 if (!rep.nr_zones) 341 return -EINVAL; 342 343 args.zones = argp + sizeof(struct blk_zone_report); 344 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, 345 blkdev_copy_zone_to_user, &args); 346 if (ret < 0) 347 return ret; 348 349 rep.nr_zones = ret; 350 rep.flags = BLK_ZONE_REP_CAPACITY; 351 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) 352 return -EFAULT; 353 return 0; 354 } 355 356 static int blkdev_truncate_zone_range(struct block_device *bdev, 357 blk_mode_t mode, const struct blk_zone_range *zrange) 358 { 359 loff_t start, end; 360 361 if (zrange->sector + zrange->nr_sectors <= zrange->sector || 362 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) 363 /* Out of range */ 364 return -EINVAL; 365 366 start = zrange->sector << SECTOR_SHIFT; 367 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; 368 369 return truncate_bdev_range(bdev, mode, start, end); 370 } 371 372 /* 373 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. 374 * Called from blkdev_ioctl. 375 */ 376 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, 377 unsigned int cmd, unsigned long arg) 378 { 379 void __user *argp = (void __user *)arg; 380 struct blk_zone_range zrange; 381 enum req_op op; 382 int ret; 383 384 if (!argp) 385 return -EINVAL; 386 387 if (!bdev_is_zoned(bdev)) 388 return -ENOTTY; 389 390 if (!(mode & BLK_OPEN_WRITE)) 391 return -EBADF; 392 393 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) 394 return -EFAULT; 395 396 switch (cmd) { 397 case BLKRESETZONE: 398 op = REQ_OP_ZONE_RESET; 399 400 /* Invalidate the page cache, including dirty pages. */ 401 filemap_invalidate_lock(bdev->bd_inode->i_mapping); 402 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 403 if (ret) 404 goto fail; 405 break; 406 case BLKOPENZONE: 407 op = REQ_OP_ZONE_OPEN; 408 break; 409 case BLKCLOSEZONE: 410 op = REQ_OP_ZONE_CLOSE; 411 break; 412 case BLKFINISHZONE: 413 op = REQ_OP_ZONE_FINISH; 414 break; 415 default: 416 return -ENOTTY; 417 } 418 419 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors); 420 421 fail: 422 if (cmd == BLKRESETZONE) 423 filemap_invalidate_unlock(bdev->bd_inode->i_mapping); 424 425 return ret; 426 } 427 428 void disk_free_zone_bitmaps(struct gendisk *disk) 429 { 430 kfree(disk->conv_zones_bitmap); 431 disk->conv_zones_bitmap = NULL; 432 kfree(disk->seq_zones_wlock); 433 disk->seq_zones_wlock = NULL; 434 } 435 436 struct blk_revalidate_zone_args { 437 struct gendisk *disk; 438 unsigned long *conv_zones_bitmap; 439 unsigned long *seq_zones_wlock; 440 unsigned int nr_zones; 441 sector_t sector; 442 }; 443 444 /* 445 * Helper function to check the validity of zones of a zoned block device. 446 */ 447 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, 448 void *data) 449 { 450 struct blk_revalidate_zone_args *args = data; 451 struct gendisk *disk = args->disk; 452 struct request_queue *q = disk->queue; 453 sector_t capacity = get_capacity(disk); 454 sector_t zone_sectors = q->limits.chunk_sectors; 455 456 /* Check for bad zones and holes in the zone report */ 457 if (zone->start != args->sector) { 458 pr_warn("%s: Zone gap at sectors %llu..%llu\n", 459 disk->disk_name, args->sector, zone->start); 460 return -ENODEV; 461 } 462 463 if (zone->start >= capacity || !zone->len) { 464 pr_warn("%s: Invalid zone start %llu, length %llu\n", 465 disk->disk_name, zone->start, zone->len); 466 return -ENODEV; 467 } 468 469 /* 470 * All zones must have the same size, with the exception on an eventual 471 * smaller last zone. 472 */ 473 if (zone->start + zone->len < capacity) { 474 if (zone->len != zone_sectors) { 475 pr_warn("%s: Invalid zoned device with non constant zone size\n", 476 disk->disk_name); 477 return -ENODEV; 478 } 479 } else if (zone->len > zone_sectors) { 480 pr_warn("%s: Invalid zoned device with larger last zone size\n", 481 disk->disk_name); 482 return -ENODEV; 483 } 484 485 /* Check zone type */ 486 switch (zone->type) { 487 case BLK_ZONE_TYPE_CONVENTIONAL: 488 if (!args->conv_zones_bitmap) { 489 args->conv_zones_bitmap = 490 blk_alloc_zone_bitmap(q->node, args->nr_zones); 491 if (!args->conv_zones_bitmap) 492 return -ENOMEM; 493 } 494 set_bit(idx, args->conv_zones_bitmap); 495 break; 496 case BLK_ZONE_TYPE_SEQWRITE_REQ: 497 if (!args->seq_zones_wlock) { 498 args->seq_zones_wlock = 499 blk_alloc_zone_bitmap(q->node, args->nr_zones); 500 if (!args->seq_zones_wlock) 501 return -ENOMEM; 502 } 503 break; 504 case BLK_ZONE_TYPE_SEQWRITE_PREF: 505 default: 506 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", 507 disk->disk_name, (int)zone->type, zone->start); 508 return -ENODEV; 509 } 510 511 args->sector += zone->len; 512 return 0; 513 } 514 515 /** 516 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps 517 * @disk: Target disk 518 * @update_driver_data: Callback to update driver data on the frozen disk 519 * 520 * Helper function for low-level device drivers to check and (re) allocate and 521 * initialize a disk request queue zone bitmaps. This functions should normally 522 * be called within the disk ->revalidate method for blk-mq based drivers. 523 * Before calling this function, the device driver must already have set the 524 * device zone size (chunk_sector limit) and the max zone append limit. 525 * For BIO based drivers, this function cannot be used. BIO based device drivers 526 * only need to set disk->nr_zones so that the sysfs exposed value is correct. 527 * If the @update_driver_data callback function is not NULL, the callback is 528 * executed with the device request queue frozen after all zones have been 529 * checked. 530 */ 531 int blk_revalidate_disk_zones(struct gendisk *disk, 532 void (*update_driver_data)(struct gendisk *disk)) 533 { 534 struct request_queue *q = disk->queue; 535 sector_t zone_sectors = q->limits.chunk_sectors; 536 sector_t capacity = get_capacity(disk); 537 struct blk_revalidate_zone_args args = { }; 538 unsigned int noio_flag; 539 int ret; 540 541 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 542 return -EIO; 543 if (WARN_ON_ONCE(!queue_is_mq(q))) 544 return -EIO; 545 546 if (!capacity) 547 return -ENODEV; 548 549 /* 550 * Checks that the device driver indicated a valid zone size and that 551 * the max zone append limit is set. 552 */ 553 if (!zone_sectors || !is_power_of_2(zone_sectors)) { 554 pr_warn("%s: Invalid non power of two zone size (%llu)\n", 555 disk->disk_name, zone_sectors); 556 return -ENODEV; 557 } 558 559 if (!q->limits.max_zone_append_sectors) { 560 pr_warn("%s: Invalid 0 maximum zone append limit\n", 561 disk->disk_name); 562 return -ENODEV; 563 } 564 565 /* 566 * Ensure that all memory allocations in this context are done as if 567 * GFP_NOIO was specified. 568 */ 569 args.disk = disk; 570 args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); 571 noio_flag = memalloc_noio_save(); 572 ret = disk->fops->report_zones(disk, 0, UINT_MAX, 573 blk_revalidate_zone_cb, &args); 574 if (!ret) { 575 pr_warn("%s: No zones reported\n", disk->disk_name); 576 ret = -ENODEV; 577 } 578 memalloc_noio_restore(noio_flag); 579 580 /* 581 * If zones where reported, make sure that the entire disk capacity 582 * has been checked. 583 */ 584 if (ret > 0 && args.sector != capacity) { 585 pr_warn("%s: Missing zones from sector %llu\n", 586 disk->disk_name, args.sector); 587 ret = -ENODEV; 588 } 589 590 /* 591 * Install the new bitmaps and update nr_zones only once the queue is 592 * stopped and all I/Os are completed (i.e. a scheduler is not 593 * referencing the bitmaps). 594 */ 595 blk_mq_freeze_queue(q); 596 if (ret > 0) { 597 disk->nr_zones = args.nr_zones; 598 swap(disk->seq_zones_wlock, args.seq_zones_wlock); 599 swap(disk->conv_zones_bitmap, args.conv_zones_bitmap); 600 if (update_driver_data) 601 update_driver_data(disk); 602 ret = 0; 603 } else { 604 pr_warn("%s: failed to revalidate zones\n", disk->disk_name); 605 disk_free_zone_bitmaps(disk); 606 } 607 blk_mq_unfreeze_queue(q); 608 609 kfree(args.seq_zones_wlock); 610 kfree(args.conv_zones_bitmap); 611 return ret; 612 } 613 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); 614