1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Zoned block device handling 4 * 5 * Copyright (c) 2015, Hannes Reinecke 6 * Copyright (c) 2015, SUSE Linux GmbH 7 * 8 * Copyright (c) 2016, Damien Le Moal 9 * Copyright (c) 2016, Western Digital 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/rbtree.h> 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/mm.h> 18 #include <linux/vmalloc.h> 19 #include <linux/sched/mm.h> 20 21 #include "blk.h" 22 23 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name 24 static const char *const zone_cond_name[] = { 25 ZONE_COND_NAME(NOT_WP), 26 ZONE_COND_NAME(EMPTY), 27 ZONE_COND_NAME(IMP_OPEN), 28 ZONE_COND_NAME(EXP_OPEN), 29 ZONE_COND_NAME(CLOSED), 30 ZONE_COND_NAME(READONLY), 31 ZONE_COND_NAME(FULL), 32 ZONE_COND_NAME(OFFLINE), 33 }; 34 #undef ZONE_COND_NAME 35 36 /** 37 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. 38 * @zone_cond: BLK_ZONE_COND_XXX. 39 * 40 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX 41 * into string format. Useful in the debugging and tracing zone conditions. For 42 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". 43 */ 44 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) 45 { 46 static const char *zone_cond_str = "UNKNOWN"; 47 48 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) 49 zone_cond_str = zone_cond_name[zone_cond]; 50 51 return zone_cond_str; 52 } 53 EXPORT_SYMBOL_GPL(blk_zone_cond_str); 54 55 /* 56 * Return true if a request is a write requests that needs zone write locking. 57 */ 58 bool blk_req_needs_zone_write_lock(struct request *rq) 59 { 60 if (blk_rq_is_passthrough(rq)) 61 return false; 62 63 if (!rq->q->disk->seq_zones_wlock) 64 return false; 65 66 if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq))) 67 return blk_rq_zone_is_seq(rq); 68 69 return false; 70 } 71 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); 72 73 bool blk_req_zone_write_trylock(struct request *rq) 74 { 75 unsigned int zno = blk_rq_zone_no(rq); 76 77 if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock)) 78 return false; 79 80 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 81 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 82 83 return true; 84 } 85 EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock); 86 87 void __blk_req_zone_write_lock(struct request *rq) 88 { 89 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), 90 rq->q->disk->seq_zones_wlock))) 91 return; 92 93 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); 94 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; 95 } 96 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); 97 98 void __blk_req_zone_write_unlock(struct request *rq) 99 { 100 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; 101 if (rq->q->disk->seq_zones_wlock) 102 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), 103 rq->q->disk->seq_zones_wlock)); 104 } 105 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); 106 107 /** 108 * bdev_nr_zones - Get number of zones 109 * @bdev: Target device 110 * 111 * Return the total number of zones of a zoned block device. For a block 112 * device without zone capabilities, the number of zones is always 0. 113 */ 114 unsigned int bdev_nr_zones(struct block_device *bdev) 115 { 116 sector_t zone_sectors = bdev_zone_sectors(bdev); 117 118 if (!bdev_is_zoned(bdev)) 119 return 0; 120 return (bdev_nr_sectors(bdev) + zone_sectors - 1) >> 121 ilog2(zone_sectors); 122 } 123 EXPORT_SYMBOL_GPL(bdev_nr_zones); 124 125 /** 126 * blkdev_report_zones - Get zones information 127 * @bdev: Target block device 128 * @sector: Sector from which to report zones 129 * @nr_zones: Maximum number of zones to report 130 * @cb: Callback function called for each reported zone 131 * @data: Private data for the callback 132 * 133 * Description: 134 * Get zone information starting from the zone containing @sector for at most 135 * @nr_zones, and call @cb for each zone reported by the device. 136 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES 137 * constant can be passed to @nr_zones. 138 * Returns the number of zones reported by the device, or a negative errno 139 * value in case of failure. 140 * 141 * Note: The caller must use memalloc_noXX_save/restore() calls to control 142 * memory allocations done within this function. 143 */ 144 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 145 unsigned int nr_zones, report_zones_cb cb, void *data) 146 { 147 struct gendisk *disk = bdev->bd_disk; 148 sector_t capacity = get_capacity(disk); 149 150 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) 151 return -EOPNOTSUPP; 152 153 if (!nr_zones || sector >= capacity) 154 return 0; 155 156 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); 157 } 158 EXPORT_SYMBOL_GPL(blkdev_report_zones); 159 160 static inline unsigned long *blk_alloc_zone_bitmap(int node, 161 unsigned int nr_zones) 162 { 163 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), 164 GFP_NOIO, node); 165 } 166 167 static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, 168 void *data) 169 { 170 /* 171 * For an all-zones reset, ignore conventional, empty, read-only 172 * and offline zones. 173 */ 174 switch (zone->cond) { 175 case BLK_ZONE_COND_NOT_WP: 176 case BLK_ZONE_COND_EMPTY: 177 case BLK_ZONE_COND_READONLY: 178 case BLK_ZONE_COND_OFFLINE: 179 return 0; 180 default: 181 set_bit(idx, (unsigned long *)data); 182 return 0; 183 } 184 } 185 186 static int blkdev_zone_reset_all_emulated(struct block_device *bdev, 187 gfp_t gfp_mask) 188 { 189 struct gendisk *disk = bdev->bd_disk; 190 sector_t capacity = bdev_nr_sectors(bdev); 191 sector_t zone_sectors = bdev_zone_sectors(bdev); 192 unsigned long *need_reset; 193 struct bio *bio = NULL; 194 sector_t sector = 0; 195 int ret; 196 197 need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones); 198 if (!need_reset) 199 return -ENOMEM; 200 201 ret = disk->fops->report_zones(disk, 0, disk->nr_zones, 202 blk_zone_need_reset_cb, need_reset); 203 if (ret < 0) 204 goto out_free_need_reset; 205 206 ret = 0; 207 while (sector < capacity) { 208 if (!test_bit(disk_zone_no(disk, sector), need_reset)) { 209 sector += zone_sectors; 210 continue; 211 } 212 213 bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC, 214 gfp_mask); 215 bio->bi_iter.bi_sector = sector; 216 sector += zone_sectors; 217 218 /* This may take a while, so be nice to others */ 219 cond_resched(); 220 } 221 222 if (bio) { 223 ret = submit_bio_wait(bio); 224 bio_put(bio); 225 } 226 227 out_free_need_reset: 228 kfree(need_reset); 229 return ret; 230 } 231 232 static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask) 233 { 234 struct bio bio; 235 236 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); 237 return submit_bio_wait(&bio); 238 } 239 240 /** 241 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones 242 * @bdev: Target block device 243 * @op: Operation to be performed on the zones 244 * @sector: Start sector of the first zone to operate on 245 * @nr_sectors: Number of sectors, should be at least the length of one zone and 246 * must be zone size aligned. 247 * @gfp_mask: Memory allocation flags (for bio_alloc) 248 * 249 * Description: 250 * Perform the specified operation on the range of zones specified by 251 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range 252 * is valid, but the specified range should not contain conventional zones. 253 * The operation to execute on each zone can be a zone reset, open, close 254 * or finish request. 255 */ 256 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 257 sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) 258 { 259 struct request_queue *q = bdev_get_queue(bdev); 260 sector_t zone_sectors = bdev_zone_sectors(bdev); 261 sector_t capacity = bdev_nr_sectors(bdev); 262 sector_t end_sector = sector + nr_sectors; 263 struct bio *bio = NULL; 264 int ret = 0; 265 266 if (!bdev_is_zoned(bdev)) 267 return -EOPNOTSUPP; 268 269 if (bdev_read_only(bdev)) 270 return -EPERM; 271 272 if (!op_is_zone_mgmt(op)) 273 return -EOPNOTSUPP; 274 275 if (end_sector <= sector || end_sector > capacity) 276 /* Out of range */ 277 return -EINVAL; 278 279 /* Check alignment (handle eventual smaller last zone) */ 280 if (!bdev_is_zone_start(bdev, sector)) 281 return -EINVAL; 282 283 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) 284 return -EINVAL; 285 286 /* 287 * In the case of a zone reset operation over all zones, 288 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this 289 * command. For other devices, we emulate this command behavior by 290 * identifying the zones needing a reset. 291 */ 292 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { 293 if (!blk_queue_zone_resetall(q)) 294 return blkdev_zone_reset_all_emulated(bdev, gfp_mask); 295 return blkdev_zone_reset_all(bdev, gfp_mask); 296 } 297 298 while (sector < end_sector) { 299 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask); 300 bio->bi_iter.bi_sector = sector; 301 sector += zone_sectors; 302 303 /* This may take a while, so be nice to others */ 304 cond_resched(); 305 } 306 307 ret = submit_bio_wait(bio); 308 bio_put(bio); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); 313 314 struct zone_report_args { 315 struct blk_zone __user *zones; 316 }; 317 318 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, 319 void *data) 320 { 321 struct zone_report_args *args = data; 322 323 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) 324 return -EFAULT; 325 return 0; 326 } 327 328 /* 329 * BLKREPORTZONE ioctl processing. 330 * Called from blkdev_ioctl. 331 */ 332 int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 333 unsigned int cmd, unsigned long arg) 334 { 335 void __user *argp = (void __user *)arg; 336 struct zone_report_args args; 337 struct blk_zone_report rep; 338 int ret; 339 340 if (!argp) 341 return -EINVAL; 342 343 if (!bdev_is_zoned(bdev)) 344 return -ENOTTY; 345 346 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) 347 return -EFAULT; 348 349 if (!rep.nr_zones) 350 return -EINVAL; 351 352 args.zones = argp + sizeof(struct blk_zone_report); 353 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, 354 blkdev_copy_zone_to_user, &args); 355 if (ret < 0) 356 return ret; 357 358 rep.nr_zones = ret; 359 rep.flags = BLK_ZONE_REP_CAPACITY; 360 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) 361 return -EFAULT; 362 return 0; 363 } 364 365 static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, 366 const struct blk_zone_range *zrange) 367 { 368 loff_t start, end; 369 370 if (zrange->sector + zrange->nr_sectors <= zrange->sector || 371 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) 372 /* Out of range */ 373 return -EINVAL; 374 375 start = zrange->sector << SECTOR_SHIFT; 376 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; 377 378 return truncate_bdev_range(bdev, mode, start, end); 379 } 380 381 /* 382 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. 383 * Called from blkdev_ioctl. 384 */ 385 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 386 unsigned int cmd, unsigned long arg) 387 { 388 void __user *argp = (void __user *)arg; 389 struct blk_zone_range zrange; 390 enum req_op op; 391 int ret; 392 393 if (!argp) 394 return -EINVAL; 395 396 if (!bdev_is_zoned(bdev)) 397 return -ENOTTY; 398 399 if (!(mode & FMODE_WRITE)) 400 return -EBADF; 401 402 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) 403 return -EFAULT; 404 405 switch (cmd) { 406 case BLKRESETZONE: 407 op = REQ_OP_ZONE_RESET; 408 409 /* Invalidate the page cache, including dirty pages. */ 410 filemap_invalidate_lock(bdev->bd_inode->i_mapping); 411 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 412 if (ret) 413 goto fail; 414 break; 415 case BLKOPENZONE: 416 op = REQ_OP_ZONE_OPEN; 417 break; 418 case BLKCLOSEZONE: 419 op = REQ_OP_ZONE_CLOSE; 420 break; 421 case BLKFINISHZONE: 422 op = REQ_OP_ZONE_FINISH; 423 break; 424 default: 425 return -ENOTTY; 426 } 427 428 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, 429 GFP_KERNEL); 430 431 fail: 432 if (cmd == BLKRESETZONE) 433 filemap_invalidate_unlock(bdev->bd_inode->i_mapping); 434 435 return ret; 436 } 437 438 void disk_free_zone_bitmaps(struct gendisk *disk) 439 { 440 kfree(disk->conv_zones_bitmap); 441 disk->conv_zones_bitmap = NULL; 442 kfree(disk->seq_zones_wlock); 443 disk->seq_zones_wlock = NULL; 444 } 445 446 struct blk_revalidate_zone_args { 447 struct gendisk *disk; 448 unsigned long *conv_zones_bitmap; 449 unsigned long *seq_zones_wlock; 450 unsigned int nr_zones; 451 sector_t zone_sectors; 452 sector_t sector; 453 }; 454 455 /* 456 * Helper function to check the validity of zones of a zoned block device. 457 */ 458 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, 459 void *data) 460 { 461 struct blk_revalidate_zone_args *args = data; 462 struct gendisk *disk = args->disk; 463 struct request_queue *q = disk->queue; 464 sector_t capacity = get_capacity(disk); 465 466 /* 467 * All zones must have the same size, with the exception on an eventual 468 * smaller last zone. 469 */ 470 if (zone->start == 0) { 471 if (zone->len == 0 || !is_power_of_2(zone->len)) { 472 pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n", 473 disk->disk_name, zone->len); 474 return -ENODEV; 475 } 476 477 args->zone_sectors = zone->len; 478 args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len); 479 } else if (zone->start + args->zone_sectors < capacity) { 480 if (zone->len != args->zone_sectors) { 481 pr_warn("%s: Invalid zoned device with non constant zone size\n", 482 disk->disk_name); 483 return -ENODEV; 484 } 485 } else { 486 if (zone->len > args->zone_sectors) { 487 pr_warn("%s: Invalid zoned device with larger last zone size\n", 488 disk->disk_name); 489 return -ENODEV; 490 } 491 } 492 493 /* Check for holes in the zone report */ 494 if (zone->start != args->sector) { 495 pr_warn("%s: Zone gap at sectors %llu..%llu\n", 496 disk->disk_name, args->sector, zone->start); 497 return -ENODEV; 498 } 499 500 /* Check zone type */ 501 switch (zone->type) { 502 case BLK_ZONE_TYPE_CONVENTIONAL: 503 if (!args->conv_zones_bitmap) { 504 args->conv_zones_bitmap = 505 blk_alloc_zone_bitmap(q->node, args->nr_zones); 506 if (!args->conv_zones_bitmap) 507 return -ENOMEM; 508 } 509 set_bit(idx, args->conv_zones_bitmap); 510 break; 511 case BLK_ZONE_TYPE_SEQWRITE_REQ: 512 case BLK_ZONE_TYPE_SEQWRITE_PREF: 513 if (!args->seq_zones_wlock) { 514 args->seq_zones_wlock = 515 blk_alloc_zone_bitmap(q->node, args->nr_zones); 516 if (!args->seq_zones_wlock) 517 return -ENOMEM; 518 } 519 break; 520 default: 521 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", 522 disk->disk_name, (int)zone->type, zone->start); 523 return -ENODEV; 524 } 525 526 args->sector += zone->len; 527 return 0; 528 } 529 530 /** 531 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps 532 * @disk: Target disk 533 * @update_driver_data: Callback to update driver data on the frozen disk 534 * 535 * Helper function for low-level device drivers to (re) allocate and initialize 536 * a disk request queue zone bitmaps. This functions should normally be called 537 * within the disk ->revalidate method for blk-mq based drivers. For BIO based 538 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value 539 * is correct. 540 * If the @update_driver_data callback function is not NULL, the callback is 541 * executed with the device request queue frozen after all zones have been 542 * checked. 543 */ 544 int blk_revalidate_disk_zones(struct gendisk *disk, 545 void (*update_driver_data)(struct gendisk *disk)) 546 { 547 struct request_queue *q = disk->queue; 548 struct blk_revalidate_zone_args args = { 549 .disk = disk, 550 }; 551 unsigned int noio_flag; 552 int ret; 553 554 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 555 return -EIO; 556 if (WARN_ON_ONCE(!queue_is_mq(q))) 557 return -EIO; 558 559 if (!get_capacity(disk)) 560 return -EIO; 561 562 /* 563 * Ensure that all memory allocations in this context are done as if 564 * GFP_NOIO was specified. 565 */ 566 noio_flag = memalloc_noio_save(); 567 ret = disk->fops->report_zones(disk, 0, UINT_MAX, 568 blk_revalidate_zone_cb, &args); 569 if (!ret) { 570 pr_warn("%s: No zones reported\n", disk->disk_name); 571 ret = -ENODEV; 572 } 573 memalloc_noio_restore(noio_flag); 574 575 /* 576 * If zones where reported, make sure that the entire disk capacity 577 * has been checked. 578 */ 579 if (ret > 0 && args.sector != get_capacity(disk)) { 580 pr_warn("%s: Missing zones from sector %llu\n", 581 disk->disk_name, args.sector); 582 ret = -ENODEV; 583 } 584 585 /* 586 * Install the new bitmaps and update nr_zones only once the queue is 587 * stopped and all I/Os are completed (i.e. a scheduler is not 588 * referencing the bitmaps). 589 */ 590 blk_mq_freeze_queue(q); 591 if (ret > 0) { 592 blk_queue_chunk_sectors(q, args.zone_sectors); 593 disk->nr_zones = args.nr_zones; 594 swap(disk->seq_zones_wlock, args.seq_zones_wlock); 595 swap(disk->conv_zones_bitmap, args.conv_zones_bitmap); 596 if (update_driver_data) 597 update_driver_data(disk); 598 ret = 0; 599 } else { 600 pr_warn("%s: failed to revalidate zones\n", disk->disk_name); 601 disk_free_zone_bitmaps(disk); 602 } 603 blk_mq_unfreeze_queue(q); 604 605 kfree(args.seq_zones_wlock); 606 kfree(args.conv_zones_bitmap); 607 return ret; 608 } 609 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); 610 611 void disk_clear_zone_settings(struct gendisk *disk) 612 { 613 struct request_queue *q = disk->queue; 614 615 blk_mq_freeze_queue(q); 616 617 disk_free_zone_bitmaps(disk); 618 blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q); 619 q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE; 620 disk->nr_zones = 0; 621 disk->max_open_zones = 0; 622 disk->max_active_zones = 0; 623 q->limits.chunk_sectors = 0; 624 q->limits.zone_write_granularity = 0; 625 q->limits.max_zone_append_sectors = 0; 626 627 blk_mq_unfreeze_queue(q); 628 } 629