1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Zoned block device handling 4 * 5 * Copyright (c) 2015, Hannes Reinecke 6 * Copyright (c) 2015, SUSE Linux GmbH 7 * 8 * Copyright (c) 2016, Damien Le Moal 9 * Copyright (c) 2016, Western Digital 10 * Copyright (c) 2024, Western Digital Corporation or its affiliates. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/blkdev.h> 15 #include <linux/blk-mq.h> 16 #include <linux/spinlock.h> 17 #include <linux/refcount.h> 18 #include <linux/mempool.h> 19 20 #include "blk.h" 21 #include "blk-mq-sched.h" 22 #include "blk-mq-debugfs.h" 23 24 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name 25 static const char *const zone_cond_name[] = { 26 ZONE_COND_NAME(NOT_WP), 27 ZONE_COND_NAME(EMPTY), 28 ZONE_COND_NAME(IMP_OPEN), 29 ZONE_COND_NAME(EXP_OPEN), 30 ZONE_COND_NAME(CLOSED), 31 ZONE_COND_NAME(READONLY), 32 ZONE_COND_NAME(FULL), 33 ZONE_COND_NAME(OFFLINE), 34 }; 35 #undef ZONE_COND_NAME 36 37 /* 38 * Per-zone write plug. 39 * @node: hlist_node structure for managing the plug using a hash table. 40 * @ref: Zone write plug reference counter. A zone write plug reference is 41 * always at least 1 when the plug is hashed in the disk plug hash table. 42 * The reference is incremented whenever a new BIO needing plugging is 43 * submitted and when a function needs to manipulate a plug. The 44 * reference count is decremented whenever a plugged BIO completes and 45 * when a function that referenced the plug returns. The initial 46 * reference is dropped whenever the zone of the zone write plug is reset, 47 * finished and when the zone becomes full (last write BIO to the zone 48 * completes). 49 * @lock: Spinlock to atomically manipulate the plug. 50 * @flags: Flags indicating the plug state. 51 * @zone_no: The number of the zone the plug is managing. 52 * @wp_offset: The zone write pointer location relative to the start of the zone 53 * as a number of 512B sectors. 54 * @bio_list: The list of BIOs that are currently plugged. 55 * @bio_work: Work struct to handle issuing of plugged BIOs 56 * @rcu_head: RCU head to free zone write plugs with an RCU grace period. 57 * @disk: The gendisk the plug belongs to. 58 */ 59 struct blk_zone_wplug { 60 struct hlist_node node; 61 refcount_t ref; 62 spinlock_t lock; 63 unsigned int flags; 64 unsigned int zone_no; 65 unsigned int wp_offset; 66 struct bio_list bio_list; 67 struct work_struct bio_work; 68 struct rcu_head rcu_head; 69 struct gendisk *disk; 70 }; 71 72 /* 73 * Zone write plug flags bits: 74 * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, 75 * that is, that write BIOs are being throttled due to a write BIO already 76 * being executed or the zone write plug bio list is not empty. 77 * - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone 78 * write pointer offset and need to update it. 79 * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed 80 * from the disk hash table and that the initial reference to the zone 81 * write plug set when the plug was first added to the hash table has been 82 * dropped. This flag is set when a zone is reset, finished or become full, 83 * to prevent new references to the zone write plug to be taken for 84 * newly incoming BIOs. A zone write plug flagged with this flag will be 85 * freed once all remaining references from BIOs or functions are dropped. 86 */ 87 #define BLK_ZONE_WPLUG_PLUGGED (1U << 0) 88 #define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1) 89 #define BLK_ZONE_WPLUG_UNHASHED (1U << 2) 90 91 /** 92 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. 93 * @zone_cond: BLK_ZONE_COND_XXX. 94 * 95 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX 96 * into string format. Useful in the debugging and tracing zone conditions. For 97 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". 98 */ 99 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) 100 { 101 static const char *zone_cond_str = "UNKNOWN"; 102 103 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) 104 zone_cond_str = zone_cond_name[zone_cond]; 105 106 return zone_cond_str; 107 } 108 EXPORT_SYMBOL_GPL(blk_zone_cond_str); 109 110 struct disk_report_zones_cb_args { 111 struct gendisk *disk; 112 report_zones_cb user_cb; 113 void *user_data; 114 }; 115 116 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, 117 struct blk_zone *zone); 118 119 static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx, 120 void *data) 121 { 122 struct disk_report_zones_cb_args *args = data; 123 struct gendisk *disk = args->disk; 124 125 if (disk->zone_wplugs_hash) 126 disk_zone_wplug_sync_wp_offset(disk, zone); 127 128 if (!args->user_cb) 129 return 0; 130 131 return args->user_cb(zone, idx, args->user_data); 132 } 133 134 /** 135 * blkdev_report_zones - Get zones information 136 * @bdev: Target block device 137 * @sector: Sector from which to report zones 138 * @nr_zones: Maximum number of zones to report 139 * @cb: Callback function called for each reported zone 140 * @data: Private data for the callback 141 * 142 * Description: 143 * Get zone information starting from the zone containing @sector for at most 144 * @nr_zones, and call @cb for each zone reported by the device. 145 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES 146 * constant can be passed to @nr_zones. 147 * Returns the number of zones reported by the device, or a negative errno 148 * value in case of failure. 149 * 150 * Note: The caller must use memalloc_noXX_save/restore() calls to control 151 * memory allocations done within this function. 152 */ 153 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 154 unsigned int nr_zones, report_zones_cb cb, void *data) 155 { 156 struct gendisk *disk = bdev->bd_disk; 157 sector_t capacity = get_capacity(disk); 158 struct disk_report_zones_cb_args args = { 159 .disk = disk, 160 .user_cb = cb, 161 .user_data = data, 162 }; 163 164 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) 165 return -EOPNOTSUPP; 166 167 if (!nr_zones || sector >= capacity) 168 return 0; 169 170 return disk->fops->report_zones(disk, sector, nr_zones, 171 disk_report_zones_cb, &args); 172 } 173 EXPORT_SYMBOL_GPL(blkdev_report_zones); 174 175 static int blkdev_zone_reset_all(struct block_device *bdev) 176 { 177 struct bio bio; 178 179 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); 180 return submit_bio_wait(&bio); 181 } 182 183 /** 184 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones 185 * @bdev: Target block device 186 * @op: Operation to be performed on the zones 187 * @sector: Start sector of the first zone to operate on 188 * @nr_sectors: Number of sectors, should be at least the length of one zone and 189 * must be zone size aligned. 190 * 191 * Description: 192 * Perform the specified operation on the range of zones specified by 193 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range 194 * is valid, but the specified range should not contain conventional zones. 195 * The operation to execute on each zone can be a zone reset, open, close 196 * or finish request. 197 */ 198 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 199 sector_t sector, sector_t nr_sectors) 200 { 201 sector_t zone_sectors = bdev_zone_sectors(bdev); 202 sector_t capacity = bdev_nr_sectors(bdev); 203 sector_t end_sector = sector + nr_sectors; 204 struct bio *bio = NULL; 205 int ret = 0; 206 207 if (!bdev_is_zoned(bdev)) 208 return -EOPNOTSUPP; 209 210 if (bdev_read_only(bdev)) 211 return -EPERM; 212 213 if (!op_is_zone_mgmt(op)) 214 return -EOPNOTSUPP; 215 216 if (end_sector <= sector || end_sector > capacity) 217 /* Out of range */ 218 return -EINVAL; 219 220 /* Check alignment (handle eventual smaller last zone) */ 221 if (!bdev_is_zone_start(bdev, sector)) 222 return -EINVAL; 223 224 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) 225 return -EINVAL; 226 227 /* 228 * In the case of a zone reset operation over all zones, use 229 * REQ_OP_ZONE_RESET_ALL. 230 */ 231 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) 232 return blkdev_zone_reset_all(bdev); 233 234 while (sector < end_sector) { 235 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); 236 bio->bi_iter.bi_sector = sector; 237 sector += zone_sectors; 238 239 /* This may take a while, so be nice to others */ 240 cond_resched(); 241 } 242 243 ret = submit_bio_wait(bio); 244 bio_put(bio); 245 246 return ret; 247 } 248 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); 249 250 struct zone_report_args { 251 struct blk_zone __user *zones; 252 }; 253 254 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, 255 void *data) 256 { 257 struct zone_report_args *args = data; 258 259 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) 260 return -EFAULT; 261 return 0; 262 } 263 264 /* 265 * BLKREPORTZONE ioctl processing. 266 * Called from blkdev_ioctl. 267 */ 268 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, 269 unsigned long arg) 270 { 271 void __user *argp = (void __user *)arg; 272 struct zone_report_args args; 273 struct blk_zone_report rep; 274 int ret; 275 276 if (!argp) 277 return -EINVAL; 278 279 if (!bdev_is_zoned(bdev)) 280 return -ENOTTY; 281 282 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) 283 return -EFAULT; 284 285 if (!rep.nr_zones) 286 return -EINVAL; 287 288 args.zones = argp + sizeof(struct blk_zone_report); 289 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, 290 blkdev_copy_zone_to_user, &args); 291 if (ret < 0) 292 return ret; 293 294 rep.nr_zones = ret; 295 rep.flags = BLK_ZONE_REP_CAPACITY; 296 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) 297 return -EFAULT; 298 return 0; 299 } 300 301 static int blkdev_truncate_zone_range(struct block_device *bdev, 302 blk_mode_t mode, const struct blk_zone_range *zrange) 303 { 304 loff_t start, end; 305 306 if (zrange->sector + zrange->nr_sectors <= zrange->sector || 307 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) 308 /* Out of range */ 309 return -EINVAL; 310 311 start = zrange->sector << SECTOR_SHIFT; 312 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; 313 314 return truncate_bdev_range(bdev, mode, start, end); 315 } 316 317 /* 318 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. 319 * Called from blkdev_ioctl. 320 */ 321 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, 322 unsigned int cmd, unsigned long arg) 323 { 324 void __user *argp = (void __user *)arg; 325 struct blk_zone_range zrange; 326 enum req_op op; 327 int ret; 328 329 if (!argp) 330 return -EINVAL; 331 332 if (!bdev_is_zoned(bdev)) 333 return -ENOTTY; 334 335 if (!(mode & BLK_OPEN_WRITE)) 336 return -EBADF; 337 338 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) 339 return -EFAULT; 340 341 switch (cmd) { 342 case BLKRESETZONE: 343 op = REQ_OP_ZONE_RESET; 344 345 /* Invalidate the page cache, including dirty pages. */ 346 filemap_invalidate_lock(bdev->bd_mapping); 347 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 348 if (ret) 349 goto fail; 350 break; 351 case BLKOPENZONE: 352 op = REQ_OP_ZONE_OPEN; 353 break; 354 case BLKCLOSEZONE: 355 op = REQ_OP_ZONE_CLOSE; 356 break; 357 case BLKFINISHZONE: 358 op = REQ_OP_ZONE_FINISH; 359 break; 360 default: 361 return -ENOTTY; 362 } 363 364 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors); 365 366 fail: 367 if (cmd == BLKRESETZONE) 368 filemap_invalidate_unlock(bdev->bd_mapping); 369 370 return ret; 371 } 372 373 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone) 374 { 375 return zone->start + zone->len >= get_capacity(disk); 376 } 377 378 static bool disk_zone_is_full(struct gendisk *disk, 379 unsigned int zno, unsigned int offset_in_zone) 380 { 381 if (zno < disk->nr_zones - 1) 382 return offset_in_zone >= disk->zone_capacity; 383 return offset_in_zone >= disk->last_zone_capacity; 384 } 385 386 static bool disk_zone_wplug_is_full(struct gendisk *disk, 387 struct blk_zone_wplug *zwplug) 388 { 389 return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset); 390 } 391 392 static bool disk_insert_zone_wplug(struct gendisk *disk, 393 struct blk_zone_wplug *zwplug) 394 { 395 struct blk_zone_wplug *zwplg; 396 unsigned long flags; 397 unsigned int idx = 398 hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits); 399 400 /* 401 * Add the new zone write plug to the hash table, but carefully as we 402 * are racing with other submission context, so we may already have a 403 * zone write plug for the same zone. 404 */ 405 spin_lock_irqsave(&disk->zone_wplugs_lock, flags); 406 hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) { 407 if (zwplg->zone_no == zwplug->zone_no) { 408 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); 409 return false; 410 } 411 } 412 hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); 413 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); 414 415 return true; 416 } 417 418 static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, 419 sector_t sector) 420 { 421 unsigned int zno = disk_zone_no(disk, sector); 422 unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits); 423 struct blk_zone_wplug *zwplug; 424 425 rcu_read_lock(); 426 427 hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) { 428 if (zwplug->zone_no == zno && 429 refcount_inc_not_zero(&zwplug->ref)) { 430 rcu_read_unlock(); 431 return zwplug; 432 } 433 } 434 435 rcu_read_unlock(); 436 437 return NULL; 438 } 439 440 static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) 441 { 442 struct blk_zone_wplug *zwplug = 443 container_of(rcu_head, struct blk_zone_wplug, rcu_head); 444 445 mempool_free(zwplug, zwplug->disk->zone_wplugs_pool); 446 } 447 448 static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug) 449 { 450 if (refcount_dec_and_test(&zwplug->ref)) { 451 WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list)); 452 WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); 453 WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)); 454 455 call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu); 456 } 457 } 458 459 static inline bool disk_should_remove_zone_wplug(struct gendisk *disk, 460 struct blk_zone_wplug *zwplug) 461 { 462 lockdep_assert_held(&zwplug->lock); 463 464 /* If the zone write plug was already removed, we are done. */ 465 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) 466 return false; 467 468 /* If the zone write plug is still plugged, it cannot be removed. */ 469 if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) 470 return false; 471 472 /* 473 * Completions of BIOs with blk_zone_write_plug_bio_endio() may 474 * happen after handling a request completion with 475 * blk_zone_write_plug_finish_request() (e.g. with split BIOs 476 * that are chained). In such case, disk_zone_wplug_unplug_bio() 477 * should not attempt to remove the zone write plug until all BIO 478 * completions are seen. Check by looking at the zone write plug 479 * reference count, which is 2 when the plug is unused (one reference 480 * taken when the plug was allocated and another reference taken by the 481 * caller context). 482 */ 483 if (refcount_read(&zwplug->ref) > 2) 484 return false; 485 486 /* We can remove zone write plugs for zones that are empty or full. */ 487 return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug); 488 } 489 490 static void disk_remove_zone_wplug(struct gendisk *disk, 491 struct blk_zone_wplug *zwplug) 492 { 493 unsigned long flags; 494 495 /* If the zone write plug was already removed, we have nothing to do. */ 496 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) 497 return; 498 499 /* 500 * Mark the zone write plug as unhashed and drop the extra reference we 501 * took when the plug was inserted in the hash table. 502 */ 503 zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; 504 spin_lock_irqsave(&disk->zone_wplugs_lock, flags); 505 hlist_del_init_rcu(&zwplug->node); 506 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); 507 disk_put_zone_wplug(zwplug); 508 } 509 510 static void blk_zone_wplug_bio_work(struct work_struct *work); 511 512 /* 513 * Get a reference on the write plug for the zone containing @sector. 514 * If the plug does not exist, it is allocated and hashed. 515 * Return a pointer to the zone write plug with the plug spinlock held. 516 */ 517 static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk, 518 sector_t sector, gfp_t gfp_mask, 519 unsigned long *flags) 520 { 521 unsigned int zno = disk_zone_no(disk, sector); 522 struct blk_zone_wplug *zwplug; 523 524 again: 525 zwplug = disk_get_zone_wplug(disk, sector); 526 if (zwplug) { 527 /* 528 * Check that a BIO completion or a zone reset or finish 529 * operation has not already removed the zone write plug from 530 * the hash table and dropped its reference count. In such case, 531 * we need to get a new plug so start over from the beginning. 532 */ 533 spin_lock_irqsave(&zwplug->lock, *flags); 534 if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) { 535 spin_unlock_irqrestore(&zwplug->lock, *flags); 536 disk_put_zone_wplug(zwplug); 537 goto again; 538 } 539 return zwplug; 540 } 541 542 /* 543 * Allocate and initialize a zone write plug with an extra reference 544 * so that it is not freed when the zone write plug becomes idle without 545 * the zone being full. 546 */ 547 zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask); 548 if (!zwplug) 549 return NULL; 550 551 INIT_HLIST_NODE(&zwplug->node); 552 refcount_set(&zwplug->ref, 2); 553 spin_lock_init(&zwplug->lock); 554 zwplug->flags = 0; 555 zwplug->zone_no = zno; 556 zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector); 557 bio_list_init(&zwplug->bio_list); 558 INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work); 559 zwplug->disk = disk; 560 561 spin_lock_irqsave(&zwplug->lock, *flags); 562 563 /* 564 * Insert the new zone write plug in the hash table. This can fail only 565 * if another context already inserted a plug. Retry from the beginning 566 * in such case. 567 */ 568 if (!disk_insert_zone_wplug(disk, zwplug)) { 569 spin_unlock_irqrestore(&zwplug->lock, *flags); 570 mempool_free(zwplug, disk->zone_wplugs_pool); 571 goto again; 572 } 573 574 return zwplug; 575 } 576 577 static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug, 578 struct bio *bio) 579 { 580 struct request_queue *q = zwplug->disk->queue; 581 582 bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); 583 bio_io_error(bio); 584 disk_put_zone_wplug(zwplug); 585 /* Drop the reference taken by disk_zone_wplug_add_bio(() */ 586 blk_queue_exit(q); 587 } 588 589 /* 590 * Abort (fail) all plugged BIOs of a zone write plug. 591 */ 592 static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) 593 { 594 struct bio *bio; 595 596 while ((bio = bio_list_pop(&zwplug->bio_list))) 597 blk_zone_wplug_bio_io_error(zwplug, bio); 598 } 599 600 /* 601 * Set a zone write plug write pointer offset to the specified value. 602 * This aborts all plugged BIOs, which is fine as this function is called for 603 * a zone reset operation, a zone finish operation or if the zone needs a wp 604 * update from a report zone after a write error. 605 */ 606 static void disk_zone_wplug_set_wp_offset(struct gendisk *disk, 607 struct blk_zone_wplug *zwplug, 608 unsigned int wp_offset) 609 { 610 lockdep_assert_held(&zwplug->lock); 611 612 /* Update the zone write pointer and abort all plugged BIOs. */ 613 zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE; 614 zwplug->wp_offset = wp_offset; 615 disk_zone_wplug_abort(zwplug); 616 617 /* 618 * The zone write plug now has no BIO plugged: remove it from the 619 * hash table so that it cannot be seen. The plug will be freed 620 * when the last reference is dropped. 621 */ 622 if (disk_should_remove_zone_wplug(disk, zwplug)) 623 disk_remove_zone_wplug(disk, zwplug); 624 } 625 626 static unsigned int blk_zone_wp_offset(struct blk_zone *zone) 627 { 628 switch (zone->cond) { 629 case BLK_ZONE_COND_IMP_OPEN: 630 case BLK_ZONE_COND_EXP_OPEN: 631 case BLK_ZONE_COND_CLOSED: 632 return zone->wp - zone->start; 633 case BLK_ZONE_COND_FULL: 634 return zone->len; 635 case BLK_ZONE_COND_EMPTY: 636 return 0; 637 case BLK_ZONE_COND_NOT_WP: 638 case BLK_ZONE_COND_OFFLINE: 639 case BLK_ZONE_COND_READONLY: 640 default: 641 /* 642 * Conventional, offline and read-only zones do not have a valid 643 * write pointer. 644 */ 645 return UINT_MAX; 646 } 647 } 648 649 static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, 650 struct blk_zone *zone) 651 { 652 struct blk_zone_wplug *zwplug; 653 unsigned long flags; 654 655 zwplug = disk_get_zone_wplug(disk, zone->start); 656 if (!zwplug) 657 return; 658 659 spin_lock_irqsave(&zwplug->lock, flags); 660 if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) 661 disk_zone_wplug_set_wp_offset(disk, zwplug, 662 blk_zone_wp_offset(zone)); 663 spin_unlock_irqrestore(&zwplug->lock, flags); 664 665 disk_put_zone_wplug(zwplug); 666 } 667 668 static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector) 669 { 670 struct disk_report_zones_cb_args args = { 671 .disk = disk, 672 }; 673 674 return disk->fops->report_zones(disk, sector, 1, 675 disk_report_zones_cb, &args); 676 } 677 678 static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, 679 unsigned int wp_offset) 680 { 681 struct gendisk *disk = bio->bi_bdev->bd_disk; 682 sector_t sector = bio->bi_iter.bi_sector; 683 struct blk_zone_wplug *zwplug; 684 unsigned long flags; 685 686 /* Conventional zones cannot be reset nor finished. */ 687 if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { 688 bio_io_error(bio); 689 return true; 690 } 691 692 /* 693 * No-wait reset or finish BIOs do not make much sense as the callers 694 * issue these as blocking operations in most cases. To avoid issues 695 * the BIO execution potentially failing with BLK_STS_AGAIN, warn about 696 * REQ_NOWAIT being set and ignore that flag. 697 */ 698 if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) 699 bio->bi_opf &= ~REQ_NOWAIT; 700 701 /* 702 * If we have a zone write plug, set its write pointer offset to 0 703 * (reset case) or to the zone size (finish case). This will abort all 704 * BIOs plugged for the target zone. It is fine as resetting or 705 * finishing zones while writes are still in-flight will result in the 706 * writes failing anyway. 707 */ 708 zwplug = disk_get_zone_wplug(disk, sector); 709 if (zwplug) { 710 spin_lock_irqsave(&zwplug->lock, flags); 711 disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); 712 spin_unlock_irqrestore(&zwplug->lock, flags); 713 disk_put_zone_wplug(zwplug); 714 } 715 716 return false; 717 } 718 719 static bool blk_zone_wplug_handle_reset_all(struct bio *bio) 720 { 721 struct gendisk *disk = bio->bi_bdev->bd_disk; 722 struct blk_zone_wplug *zwplug; 723 unsigned long flags; 724 sector_t sector; 725 726 /* 727 * Set the write pointer offset of all zone write plugs to 0. This will 728 * abort all plugged BIOs. It is fine as resetting zones while writes 729 * are still in-flight will result in the writes failing anyway. 730 */ 731 for (sector = 0; sector < get_capacity(disk); 732 sector += disk->queue->limits.chunk_sectors) { 733 zwplug = disk_get_zone_wplug(disk, sector); 734 if (zwplug) { 735 spin_lock_irqsave(&zwplug->lock, flags); 736 disk_zone_wplug_set_wp_offset(disk, zwplug, 0); 737 spin_unlock_irqrestore(&zwplug->lock, flags); 738 disk_put_zone_wplug(zwplug); 739 } 740 } 741 742 return false; 743 } 744 745 static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk, 746 struct blk_zone_wplug *zwplug) 747 { 748 /* 749 * Take a reference on the zone write plug and schedule the submission 750 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the 751 * reference we take here. 752 */ 753 WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)); 754 refcount_inc(&zwplug->ref); 755 queue_work(disk->zone_wplugs_wq, &zwplug->bio_work); 756 } 757 758 static inline void disk_zone_wplug_add_bio(struct gendisk *disk, 759 struct blk_zone_wplug *zwplug, 760 struct bio *bio, unsigned int nr_segs) 761 { 762 bool schedule_bio_work = false; 763 764 /* 765 * Grab an extra reference on the BIO request queue usage counter. 766 * This reference will be reused to submit a request for the BIO for 767 * blk-mq devices and dropped when the BIO is failed and after 768 * it is issued in the case of BIO-based devices. 769 */ 770 percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter); 771 772 /* 773 * The BIO is being plugged and thus will have to wait for the on-going 774 * write and for all other writes already plugged. So polling makes 775 * no sense. 776 */ 777 bio_clear_polled(bio); 778 779 /* 780 * REQ_NOWAIT BIOs are always handled using the zone write plug BIO 781 * work, which can block. So clear the REQ_NOWAIT flag and schedule the 782 * work if this is the first BIO we are plugging. 783 */ 784 if (bio->bi_opf & REQ_NOWAIT) { 785 schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); 786 bio->bi_opf &= ~REQ_NOWAIT; 787 } 788 789 /* 790 * Reuse the poll cookie field to store the number of segments when 791 * split to the hardware limits. 792 */ 793 bio->__bi_nr_segments = nr_segs; 794 795 /* 796 * We always receive BIOs after they are split and ready to be issued. 797 * The block layer passes the parts of a split BIO in order, and the 798 * user must also issue write sequentially. So simply add the new BIO 799 * at the tail of the list to preserve the sequential write order. 800 */ 801 bio_list_add(&zwplug->bio_list, bio); 802 803 zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; 804 805 if (schedule_bio_work) 806 disk_zone_wplug_schedule_bio_work(disk, zwplug); 807 } 808 809 /* 810 * Called from bio_attempt_back_merge() when a BIO was merged with a request. 811 */ 812 void blk_zone_write_plug_bio_merged(struct bio *bio) 813 { 814 struct blk_zone_wplug *zwplug; 815 unsigned long flags; 816 817 /* 818 * If the BIO was already plugged, then we were called through 819 * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge(). 820 * For this case, we already hold a reference on the zone write plug for 821 * the BIO and blk_zone_write_plug_init_request() will handle the 822 * zone write pointer offset update. 823 */ 824 if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) 825 return; 826 827 bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); 828 829 /* 830 * Get a reference on the zone write plug of the target zone and advance 831 * the zone write pointer offset. Given that this is a merge, we already 832 * have at least one request and one BIO referencing the zone write 833 * plug. So this should not fail. 834 */ 835 zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk, 836 bio->bi_iter.bi_sector); 837 if (WARN_ON_ONCE(!zwplug)) 838 return; 839 840 spin_lock_irqsave(&zwplug->lock, flags); 841 zwplug->wp_offset += bio_sectors(bio); 842 spin_unlock_irqrestore(&zwplug->lock, flags); 843 } 844 845 /* 846 * Attempt to merge plugged BIOs with a newly prepared request for a BIO that 847 * already went through zone write plugging (either a new BIO or one that was 848 * unplugged). 849 */ 850 void blk_zone_write_plug_init_request(struct request *req) 851 { 852 sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req); 853 struct request_queue *q = req->q; 854 struct gendisk *disk = q->disk; 855 struct blk_zone_wplug *zwplug = 856 disk_get_zone_wplug(disk, blk_rq_pos(req)); 857 unsigned long flags; 858 struct bio *bio; 859 860 if (WARN_ON_ONCE(!zwplug)) 861 return; 862 863 /* 864 * Indicate that completion of this request needs to be handled with 865 * blk_zone_write_plug_finish_request(), which will drop the reference 866 * on the zone write plug we took above on entry to this function. 867 */ 868 req->rq_flags |= RQF_ZONE_WRITE_PLUGGING; 869 870 if (blk_queue_nomerges(q)) 871 return; 872 873 /* 874 * Walk through the list of plugged BIOs to check if they can be merged 875 * into the back of the request. 876 */ 877 spin_lock_irqsave(&zwplug->lock, flags); 878 while (!disk_zone_wplug_is_full(disk, zwplug)) { 879 bio = bio_list_peek(&zwplug->bio_list); 880 if (!bio) 881 break; 882 883 if (bio->bi_iter.bi_sector != req_back_sector || 884 !blk_rq_merge_ok(req, bio)) 885 break; 886 887 WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES && 888 !bio->__bi_nr_segments); 889 890 bio_list_pop(&zwplug->bio_list); 891 if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) != 892 BIO_MERGE_OK) { 893 bio_list_add_head(&zwplug->bio_list, bio); 894 break; 895 } 896 897 /* Drop the reference taken by disk_zone_wplug_add_bio(). */ 898 blk_queue_exit(q); 899 zwplug->wp_offset += bio_sectors(bio); 900 901 req_back_sector += bio_sectors(bio); 902 } 903 spin_unlock_irqrestore(&zwplug->lock, flags); 904 } 905 906 /* 907 * Check and prepare a BIO for submission by incrementing the write pointer 908 * offset of its zone write plug and changing zone append operations into 909 * regular write when zone append emulation is needed. 910 */ 911 static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, 912 struct bio *bio) 913 { 914 struct gendisk *disk = bio->bi_bdev->bd_disk; 915 916 lockdep_assert_held(&zwplug->lock); 917 918 /* 919 * If we lost track of the zone write pointer due to a write error, 920 * the user must either execute a report zones, reset the zone or finish 921 * the to recover a reliable write pointer position. Fail BIOs if the 922 * user did not do that as we cannot handle emulated zone append 923 * otherwise. 924 */ 925 if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) 926 return false; 927 928 /* 929 * Check that the user is not attempting to write to a full zone. 930 * We know such BIO will fail, and that would potentially overflow our 931 * write pointer offset beyond the end of the zone. 932 */ 933 if (disk_zone_wplug_is_full(disk, zwplug)) 934 return false; 935 936 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 937 /* 938 * Use a regular write starting at the current write pointer. 939 * Similarly to native zone append operations, do not allow 940 * merging. 941 */ 942 bio->bi_opf &= ~REQ_OP_MASK; 943 bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE; 944 bio->bi_iter.bi_sector += zwplug->wp_offset; 945 946 /* 947 * Remember that this BIO is in fact a zone append operation 948 * so that we can restore its operation code on completion. 949 */ 950 bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND); 951 } else { 952 /* 953 * Check for non-sequential writes early as we know that BIOs 954 * with a start sector not unaligned to the zone write pointer 955 * will fail. 956 */ 957 if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) 958 return false; 959 } 960 961 /* Advance the zone write pointer offset. */ 962 zwplug->wp_offset += bio_sectors(bio); 963 964 return true; 965 } 966 967 static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) 968 { 969 struct gendisk *disk = bio->bi_bdev->bd_disk; 970 sector_t sector = bio->bi_iter.bi_sector; 971 struct blk_zone_wplug *zwplug; 972 gfp_t gfp_mask = GFP_NOIO; 973 unsigned long flags; 974 975 /* 976 * BIOs must be fully contained within a zone so that we use the correct 977 * zone write plug for the entire BIO. For blk-mq devices, the block 978 * layer should already have done any splitting required to ensure this 979 * and this BIO should thus not be straddling zone boundaries. For 980 * BIO-based devices, it is the responsibility of the driver to split 981 * the bio before submitting it. 982 */ 983 if (WARN_ON_ONCE(bio_straddles_zones(bio))) { 984 bio_io_error(bio); 985 return true; 986 } 987 988 /* Conventional zones do not need write plugging. */ 989 if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { 990 /* Zone append to conventional zones is not allowed. */ 991 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 992 bio_io_error(bio); 993 return true; 994 } 995 return false; 996 } 997 998 if (bio->bi_opf & REQ_NOWAIT) 999 gfp_mask = GFP_NOWAIT; 1000 1001 zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags); 1002 if (!zwplug) { 1003 if (bio->bi_opf & REQ_NOWAIT) 1004 bio_wouldblock_error(bio); 1005 else 1006 bio_io_error(bio); 1007 return true; 1008 } 1009 1010 /* Indicate that this BIO is being handled using zone write plugging. */ 1011 bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); 1012 1013 /* 1014 * If the zone is already plugged, add the BIO to the plug BIO list. 1015 * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a 1016 * BLK_STS_AGAIN failure if we let the BIO execute. 1017 * Otherwise, plug and let the BIO execute. 1018 */ 1019 if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) || 1020 (bio->bi_opf & REQ_NOWAIT)) 1021 goto plug; 1022 1023 if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { 1024 spin_unlock_irqrestore(&zwplug->lock, flags); 1025 bio_io_error(bio); 1026 return true; 1027 } 1028 1029 zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; 1030 1031 spin_unlock_irqrestore(&zwplug->lock, flags); 1032 1033 return false; 1034 1035 plug: 1036 disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs); 1037 1038 spin_unlock_irqrestore(&zwplug->lock, flags); 1039 1040 return true; 1041 } 1042 1043 /** 1044 * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging 1045 * @bio: The BIO being submitted 1046 * @nr_segs: The number of physical segments of @bio 1047 * 1048 * Handle write, write zeroes and zone append operations requiring emulation 1049 * using zone write plugging. 1050 * 1051 * Return true whenever @bio execution needs to be delayed through the zone 1052 * write plug. Otherwise, return false to let the submission path process 1053 * @bio normally. 1054 */ 1055 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 1056 { 1057 struct block_device *bdev = bio->bi_bdev; 1058 1059 if (!bdev->bd_disk->zone_wplugs_hash) 1060 return false; 1061 1062 /* 1063 * If the BIO already has the plugging flag set, then it was already 1064 * handled through this path and this is a submission from the zone 1065 * plug bio submit work. 1066 */ 1067 if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) 1068 return false; 1069 1070 /* 1071 * We do not need to do anything special for empty flush BIOs, e.g 1072 * BIOs such as issued by blkdev_issue_flush(). The is because it is 1073 * the responsibility of the user to first wait for the completion of 1074 * write operations for flush to have any effect on the persistence of 1075 * the written data. 1076 */ 1077 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) 1078 return false; 1079 1080 /* 1081 * Regular writes and write zeroes need to be handled through the target 1082 * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH 1083 * which may need to go through the flush machinery depending on the 1084 * target device capabilities. Plugging such writes is fine as the flush 1085 * machinery operates at the request level, below the plug, and 1086 * completion of the flush sequence will go through the regular BIO 1087 * completion, which will handle zone write plugging. 1088 * Zone append operations for devices that requested emulation must 1089 * also be plugged so that these BIOs can be changed into regular 1090 * write BIOs. 1091 * Zone reset, reset all and finish commands need special treatment 1092 * to correctly track the write pointer offset of zones. These commands 1093 * are not plugged as we do not need serialization with write 1094 * operations. It is the responsibility of the user to not issue reset 1095 * and finish commands when write operations are in flight. 1096 */ 1097 switch (bio_op(bio)) { 1098 case REQ_OP_ZONE_APPEND: 1099 if (!bdev_emulates_zone_append(bdev)) 1100 return false; 1101 fallthrough; 1102 case REQ_OP_WRITE: 1103 case REQ_OP_WRITE_ZEROES: 1104 return blk_zone_wplug_handle_write(bio, nr_segs); 1105 case REQ_OP_ZONE_RESET: 1106 return blk_zone_wplug_handle_reset_or_finish(bio, 0); 1107 case REQ_OP_ZONE_FINISH: 1108 return blk_zone_wplug_handle_reset_or_finish(bio, 1109 bdev_zone_sectors(bdev)); 1110 case REQ_OP_ZONE_RESET_ALL: 1111 return blk_zone_wplug_handle_reset_all(bio); 1112 default: 1113 return false; 1114 } 1115 1116 return false; 1117 } 1118 EXPORT_SYMBOL_GPL(blk_zone_plug_bio); 1119 1120 static void disk_zone_wplug_unplug_bio(struct gendisk *disk, 1121 struct blk_zone_wplug *zwplug) 1122 { 1123 unsigned long flags; 1124 1125 spin_lock_irqsave(&zwplug->lock, flags); 1126 1127 /* Schedule submission of the next plugged BIO if we have one. */ 1128 if (!bio_list_empty(&zwplug->bio_list)) { 1129 disk_zone_wplug_schedule_bio_work(disk, zwplug); 1130 spin_unlock_irqrestore(&zwplug->lock, flags); 1131 return; 1132 } 1133 1134 zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; 1135 1136 /* 1137 * If the zone is full (it was fully written or finished, or empty 1138 * (it was reset), remove its zone write plug from the hash table. 1139 */ 1140 if (disk_should_remove_zone_wplug(disk, zwplug)) 1141 disk_remove_zone_wplug(disk, zwplug); 1142 1143 spin_unlock_irqrestore(&zwplug->lock, flags); 1144 } 1145 1146 void blk_zone_write_plug_bio_endio(struct bio *bio) 1147 { 1148 struct gendisk *disk = bio->bi_bdev->bd_disk; 1149 struct blk_zone_wplug *zwplug = 1150 disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); 1151 unsigned long flags; 1152 1153 if (WARN_ON_ONCE(!zwplug)) 1154 return; 1155 1156 /* Make sure we do not see this BIO again by clearing the plug flag. */ 1157 bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); 1158 1159 /* 1160 * If this is a regular write emulating a zone append operation, 1161 * restore the original operation code. 1162 */ 1163 if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) { 1164 bio->bi_opf &= ~REQ_OP_MASK; 1165 bio->bi_opf |= REQ_OP_ZONE_APPEND; 1166 } 1167 1168 /* 1169 * If the BIO failed, abort all plugged BIOs and mark the plug as 1170 * needing a write pointer update. 1171 */ 1172 if (bio->bi_status != BLK_STS_OK) { 1173 spin_lock_irqsave(&zwplug->lock, flags); 1174 disk_zone_wplug_abort(zwplug); 1175 zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE; 1176 spin_unlock_irqrestore(&zwplug->lock, flags); 1177 } 1178 1179 /* Drop the reference we took when the BIO was issued. */ 1180 disk_put_zone_wplug(zwplug); 1181 1182 /* 1183 * For BIO-based devices, blk_zone_write_plug_finish_request() 1184 * is not called. So we need to schedule execution of the next 1185 * plugged BIO here. 1186 */ 1187 if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) 1188 disk_zone_wplug_unplug_bio(disk, zwplug); 1189 1190 /* Drop the reference we took when entering this function. */ 1191 disk_put_zone_wplug(zwplug); 1192 } 1193 1194 void blk_zone_write_plug_finish_request(struct request *req) 1195 { 1196 struct gendisk *disk = req->q->disk; 1197 struct blk_zone_wplug *zwplug; 1198 1199 zwplug = disk_get_zone_wplug(disk, req->__sector); 1200 if (WARN_ON_ONCE(!zwplug)) 1201 return; 1202 1203 req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING; 1204 1205 /* 1206 * Drop the reference we took when the request was initialized in 1207 * blk_zone_write_plug_init_request(). 1208 */ 1209 disk_put_zone_wplug(zwplug); 1210 1211 disk_zone_wplug_unplug_bio(disk, zwplug); 1212 1213 /* Drop the reference we took when entering this function. */ 1214 disk_put_zone_wplug(zwplug); 1215 } 1216 1217 static void blk_zone_wplug_bio_work(struct work_struct *work) 1218 { 1219 struct blk_zone_wplug *zwplug = 1220 container_of(work, struct blk_zone_wplug, bio_work); 1221 struct block_device *bdev; 1222 unsigned long flags; 1223 struct bio *bio; 1224 1225 /* 1226 * Submit the next plugged BIO. If we do not have any, clear 1227 * the plugged flag. 1228 */ 1229 spin_lock_irqsave(&zwplug->lock, flags); 1230 1231 again: 1232 bio = bio_list_pop(&zwplug->bio_list); 1233 if (!bio) { 1234 zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; 1235 spin_unlock_irqrestore(&zwplug->lock, flags); 1236 goto put_zwplug; 1237 } 1238 1239 if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { 1240 blk_zone_wplug_bio_io_error(zwplug, bio); 1241 goto again; 1242 } 1243 1244 spin_unlock_irqrestore(&zwplug->lock, flags); 1245 1246 bdev = bio->bi_bdev; 1247 submit_bio_noacct_nocheck(bio); 1248 1249 /* 1250 * blk-mq devices will reuse the extra reference on the request queue 1251 * usage counter we took when the BIO was plugged, but the submission 1252 * path for BIO-based devices will not do that. So drop this extra 1253 * reference here. 1254 */ 1255 if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) 1256 blk_queue_exit(bdev->bd_disk->queue); 1257 1258 put_zwplug: 1259 /* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */ 1260 disk_put_zone_wplug(zwplug); 1261 } 1262 1263 static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) 1264 { 1265 return 1U << disk->zone_wplugs_hash_bits; 1266 } 1267 1268 void disk_init_zone_resources(struct gendisk *disk) 1269 { 1270 spin_lock_init(&disk->zone_wplugs_lock); 1271 } 1272 1273 /* 1274 * For the size of a disk zone write plug hash table, use the size of the 1275 * zone write plug mempool, which is the maximum of the disk open zones and 1276 * active zones limits. But do not exceed 4KB (512 hlist head entries), that is, 1277 * 9 bits. For a disk that has no limits, mempool size defaults to 128. 1278 */ 1279 #define BLK_ZONE_WPLUG_MAX_HASH_BITS 9 1280 #define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE 128 1281 1282 static int disk_alloc_zone_resources(struct gendisk *disk, 1283 unsigned int pool_size) 1284 { 1285 unsigned int i; 1286 1287 disk->zone_wplugs_hash_bits = 1288 min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); 1289 1290 disk->zone_wplugs_hash = 1291 kcalloc(disk_zone_wplugs_hash_size(disk), 1292 sizeof(struct hlist_head), GFP_KERNEL); 1293 if (!disk->zone_wplugs_hash) 1294 return -ENOMEM; 1295 1296 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) 1297 INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]); 1298 1299 disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size, 1300 sizeof(struct blk_zone_wplug)); 1301 if (!disk->zone_wplugs_pool) 1302 goto free_hash; 1303 1304 disk->zone_wplugs_wq = 1305 alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI, 1306 pool_size, disk->disk_name); 1307 if (!disk->zone_wplugs_wq) 1308 goto destroy_pool; 1309 1310 return 0; 1311 1312 destroy_pool: 1313 mempool_destroy(disk->zone_wplugs_pool); 1314 disk->zone_wplugs_pool = NULL; 1315 free_hash: 1316 kfree(disk->zone_wplugs_hash); 1317 disk->zone_wplugs_hash = NULL; 1318 disk->zone_wplugs_hash_bits = 0; 1319 return -ENOMEM; 1320 } 1321 1322 static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) 1323 { 1324 struct blk_zone_wplug *zwplug; 1325 unsigned int i; 1326 1327 if (!disk->zone_wplugs_hash) 1328 return; 1329 1330 /* Free all the zone write plugs we have. */ 1331 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) { 1332 while (!hlist_empty(&disk->zone_wplugs_hash[i])) { 1333 zwplug = hlist_entry(disk->zone_wplugs_hash[i].first, 1334 struct blk_zone_wplug, node); 1335 refcount_inc(&zwplug->ref); 1336 disk_remove_zone_wplug(disk, zwplug); 1337 disk_put_zone_wplug(zwplug); 1338 } 1339 } 1340 1341 kfree(disk->zone_wplugs_hash); 1342 disk->zone_wplugs_hash = NULL; 1343 disk->zone_wplugs_hash_bits = 0; 1344 } 1345 1346 static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk, 1347 unsigned long *bitmap) 1348 { 1349 unsigned int nr_conv_zones = 0; 1350 unsigned long flags; 1351 1352 spin_lock_irqsave(&disk->zone_wplugs_lock, flags); 1353 if (bitmap) 1354 nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones); 1355 bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap, 1356 lockdep_is_held(&disk->zone_wplugs_lock)); 1357 spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); 1358 1359 kfree_rcu_mightsleep(bitmap); 1360 1361 return nr_conv_zones; 1362 } 1363 1364 void disk_free_zone_resources(struct gendisk *disk) 1365 { 1366 if (!disk->zone_wplugs_pool) 1367 return; 1368 1369 if (disk->zone_wplugs_wq) { 1370 destroy_workqueue(disk->zone_wplugs_wq); 1371 disk->zone_wplugs_wq = NULL; 1372 } 1373 1374 disk_destroy_zone_wplugs_hash_table(disk); 1375 1376 /* 1377 * Wait for the zone write plugs to be RCU-freed before 1378 * destorying the mempool. 1379 */ 1380 rcu_barrier(); 1381 1382 mempool_destroy(disk->zone_wplugs_pool); 1383 disk->zone_wplugs_pool = NULL; 1384 1385 disk_set_conv_zones_bitmap(disk, NULL); 1386 disk->zone_capacity = 0; 1387 disk->last_zone_capacity = 0; 1388 disk->nr_zones = 0; 1389 } 1390 1391 static inline bool disk_need_zone_resources(struct gendisk *disk) 1392 { 1393 /* 1394 * All mq zoned devices need zone resources so that the block layer 1395 * can automatically handle write BIO plugging. BIO-based device drivers 1396 * (e.g. DM devices) are normally responsible for handling zone write 1397 * ordering and do not need zone resources, unless the driver requires 1398 * zone append emulation. 1399 */ 1400 return queue_is_mq(disk->queue) || 1401 queue_emulates_zone_append(disk->queue); 1402 } 1403 1404 static int disk_revalidate_zone_resources(struct gendisk *disk, 1405 unsigned int nr_zones) 1406 { 1407 struct queue_limits *lim = &disk->queue->limits; 1408 unsigned int pool_size; 1409 1410 if (!disk_need_zone_resources(disk)) 1411 return 0; 1412 1413 /* 1414 * If the device has no limit on the maximum number of open and active 1415 * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE. 1416 */ 1417 pool_size = max(lim->max_open_zones, lim->max_active_zones); 1418 if (!pool_size) 1419 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); 1420 1421 if (!disk->zone_wplugs_hash) 1422 return disk_alloc_zone_resources(disk, pool_size); 1423 1424 return 0; 1425 } 1426 1427 struct blk_revalidate_zone_args { 1428 struct gendisk *disk; 1429 unsigned long *conv_zones_bitmap; 1430 unsigned int nr_zones; 1431 unsigned int zone_capacity; 1432 unsigned int last_zone_capacity; 1433 sector_t sector; 1434 }; 1435 1436 /* 1437 * Update the disk zone resources information and device queue limits. 1438 * The disk queue is frozen when this is executed. 1439 */ 1440 static int disk_update_zone_resources(struct gendisk *disk, 1441 struct blk_revalidate_zone_args *args) 1442 { 1443 struct request_queue *q = disk->queue; 1444 unsigned int nr_seq_zones, nr_conv_zones; 1445 unsigned int pool_size; 1446 struct queue_limits lim; 1447 1448 disk->nr_zones = args->nr_zones; 1449 disk->zone_capacity = args->zone_capacity; 1450 disk->last_zone_capacity = args->last_zone_capacity; 1451 nr_conv_zones = 1452 disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap); 1453 if (nr_conv_zones >= disk->nr_zones) { 1454 pr_warn("%s: Invalid number of conventional zones %u / %u\n", 1455 disk->disk_name, nr_conv_zones, disk->nr_zones); 1456 return -ENODEV; 1457 } 1458 1459 lim = queue_limits_start_update(q); 1460 1461 /* 1462 * Some devices can advertize zone resource limits that are larger than 1463 * the number of sequential zones of the zoned block device, e.g. a 1464 * small ZNS namespace. For such case, assume that the zoned device has 1465 * no zone resource limits. 1466 */ 1467 nr_seq_zones = disk->nr_zones - nr_conv_zones; 1468 if (lim.max_open_zones >= nr_seq_zones) 1469 lim.max_open_zones = 0; 1470 if (lim.max_active_zones >= nr_seq_zones) 1471 lim.max_active_zones = 0; 1472 1473 if (!disk->zone_wplugs_pool) 1474 goto commit; 1475 1476 /* 1477 * If the device has no limit on the maximum number of open and active 1478 * zones, set its max open zone limit to the mempool size to indicate 1479 * to the user that there is a potential performance impact due to 1480 * dynamic zone write plug allocation when simultaneously writing to 1481 * more zones than the size of the mempool. 1482 */ 1483 pool_size = max(lim.max_open_zones, lim.max_active_zones); 1484 if (!pool_size) 1485 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones); 1486 1487 mempool_resize(disk->zone_wplugs_pool, pool_size); 1488 1489 if (!lim.max_open_zones && !lim.max_active_zones) { 1490 if (pool_size < nr_seq_zones) 1491 lim.max_open_zones = pool_size; 1492 else 1493 lim.max_open_zones = 0; 1494 } 1495 1496 commit: 1497 return queue_limits_commit_update_frozen(q, &lim); 1498 } 1499 1500 static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, 1501 struct blk_revalidate_zone_args *args) 1502 { 1503 struct gendisk *disk = args->disk; 1504 1505 if (zone->capacity != zone->len) { 1506 pr_warn("%s: Invalid conventional zone capacity\n", 1507 disk->disk_name); 1508 return -ENODEV; 1509 } 1510 1511 if (disk_zone_is_last(disk, zone)) 1512 args->last_zone_capacity = zone->capacity; 1513 1514 if (!disk_need_zone_resources(disk)) 1515 return 0; 1516 1517 if (!args->conv_zones_bitmap) { 1518 args->conv_zones_bitmap = 1519 bitmap_zalloc(args->nr_zones, GFP_NOIO); 1520 if (!args->conv_zones_bitmap) 1521 return -ENOMEM; 1522 } 1523 1524 set_bit(idx, args->conv_zones_bitmap); 1525 1526 return 0; 1527 } 1528 1529 static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, 1530 struct blk_revalidate_zone_args *args) 1531 { 1532 struct gendisk *disk = args->disk; 1533 struct blk_zone_wplug *zwplug; 1534 unsigned int wp_offset; 1535 unsigned long flags; 1536 1537 /* 1538 * Remember the capacity of the first sequential zone and check 1539 * if it is constant for all zones, ignoring the last zone as it can be 1540 * smaller. 1541 */ 1542 if (!args->zone_capacity) 1543 args->zone_capacity = zone->capacity; 1544 if (disk_zone_is_last(disk, zone)) { 1545 args->last_zone_capacity = zone->capacity; 1546 } else if (zone->capacity != args->zone_capacity) { 1547 pr_warn("%s: Invalid variable zone capacity\n", 1548 disk->disk_name); 1549 return -ENODEV; 1550 } 1551 1552 /* 1553 * We need to track the write pointer of all zones that are not 1554 * empty nor full. So make sure we have a zone write plug for 1555 * such zone if the device has a zone write plug hash table. 1556 */ 1557 if (!disk->zone_wplugs_hash) 1558 return 0; 1559 1560 disk_zone_wplug_sync_wp_offset(disk, zone); 1561 1562 wp_offset = blk_zone_wp_offset(zone); 1563 if (!wp_offset || wp_offset >= zone->capacity) 1564 return 0; 1565 1566 zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags); 1567 if (!zwplug) 1568 return -ENOMEM; 1569 spin_unlock_irqrestore(&zwplug->lock, flags); 1570 disk_put_zone_wplug(zwplug); 1571 1572 return 0; 1573 } 1574 1575 /* 1576 * Helper function to check the validity of zones of a zoned block device. 1577 */ 1578 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, 1579 void *data) 1580 { 1581 struct blk_revalidate_zone_args *args = data; 1582 struct gendisk *disk = args->disk; 1583 sector_t zone_sectors = disk->queue->limits.chunk_sectors; 1584 int ret; 1585 1586 /* Check for bad zones and holes in the zone report */ 1587 if (zone->start != args->sector) { 1588 pr_warn("%s: Zone gap at sectors %llu..%llu\n", 1589 disk->disk_name, args->sector, zone->start); 1590 return -ENODEV; 1591 } 1592 1593 if (zone->start >= get_capacity(disk) || !zone->len) { 1594 pr_warn("%s: Invalid zone start %llu, length %llu\n", 1595 disk->disk_name, zone->start, zone->len); 1596 return -ENODEV; 1597 } 1598 1599 /* 1600 * All zones must have the same size, with the exception on an eventual 1601 * smaller last zone. 1602 */ 1603 if (!disk_zone_is_last(disk, zone)) { 1604 if (zone->len != zone_sectors) { 1605 pr_warn("%s: Invalid zoned device with non constant zone size\n", 1606 disk->disk_name); 1607 return -ENODEV; 1608 } 1609 } else if (zone->len > zone_sectors) { 1610 pr_warn("%s: Invalid zoned device with larger last zone size\n", 1611 disk->disk_name); 1612 return -ENODEV; 1613 } 1614 1615 if (!zone->capacity || zone->capacity > zone->len) { 1616 pr_warn("%s: Invalid zone capacity\n", 1617 disk->disk_name); 1618 return -ENODEV; 1619 } 1620 1621 /* Check zone type */ 1622 switch (zone->type) { 1623 case BLK_ZONE_TYPE_CONVENTIONAL: 1624 ret = blk_revalidate_conv_zone(zone, idx, args); 1625 break; 1626 case BLK_ZONE_TYPE_SEQWRITE_REQ: 1627 ret = blk_revalidate_seq_zone(zone, idx, args); 1628 break; 1629 case BLK_ZONE_TYPE_SEQWRITE_PREF: 1630 default: 1631 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", 1632 disk->disk_name, (int)zone->type, zone->start); 1633 ret = -ENODEV; 1634 } 1635 1636 if (!ret) 1637 args->sector += zone->len; 1638 1639 return ret; 1640 } 1641 1642 /** 1643 * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs 1644 * @disk: Target disk 1645 * 1646 * Helper function for low-level device drivers to check, (re) allocate and 1647 * initialize resources used for managing zoned disks. This function should 1648 * normally be called by blk-mq based drivers when a zoned gendisk is probed 1649 * and when the zone configuration of the gendisk changes (e.g. after a format). 1650 * Before calling this function, the device driver must already have set the 1651 * device zone size (chunk_sector limit) and the max zone append limit. 1652 * BIO based drivers can also use this function as long as the device queue 1653 * can be safely frozen. 1654 */ 1655 int blk_revalidate_disk_zones(struct gendisk *disk) 1656 { 1657 struct request_queue *q = disk->queue; 1658 sector_t zone_sectors = q->limits.chunk_sectors; 1659 sector_t capacity = get_capacity(disk); 1660 struct blk_revalidate_zone_args args = { }; 1661 unsigned int noio_flag; 1662 int ret = -ENOMEM; 1663 1664 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 1665 return -EIO; 1666 1667 if (!capacity) 1668 return -ENODEV; 1669 1670 /* 1671 * Checks that the device driver indicated a valid zone size and that 1672 * the max zone append limit is set. 1673 */ 1674 if (!zone_sectors || !is_power_of_2(zone_sectors)) { 1675 pr_warn("%s: Invalid non power of two zone size (%llu)\n", 1676 disk->disk_name, zone_sectors); 1677 return -ENODEV; 1678 } 1679 1680 /* 1681 * Ensure that all memory allocations in this context are done as if 1682 * GFP_NOIO was specified. 1683 */ 1684 args.disk = disk; 1685 args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); 1686 noio_flag = memalloc_noio_save(); 1687 ret = disk_revalidate_zone_resources(disk, args.nr_zones); 1688 if (ret) { 1689 memalloc_noio_restore(noio_flag); 1690 return ret; 1691 } 1692 1693 ret = disk->fops->report_zones(disk, 0, UINT_MAX, 1694 blk_revalidate_zone_cb, &args); 1695 if (!ret) { 1696 pr_warn("%s: No zones reported\n", disk->disk_name); 1697 ret = -ENODEV; 1698 } 1699 memalloc_noio_restore(noio_flag); 1700 1701 /* 1702 * If zones where reported, make sure that the entire disk capacity 1703 * has been checked. 1704 */ 1705 if (ret > 0 && args.sector != capacity) { 1706 pr_warn("%s: Missing zones from sector %llu\n", 1707 disk->disk_name, args.sector); 1708 ret = -ENODEV; 1709 } 1710 1711 /* 1712 * Set the new disk zone parameters only once the queue is frozen and 1713 * all I/Os are completed. 1714 */ 1715 if (ret > 0) 1716 ret = disk_update_zone_resources(disk, &args); 1717 else 1718 pr_warn("%s: failed to revalidate zones\n", disk->disk_name); 1719 if (ret) { 1720 blk_mq_freeze_queue(q); 1721 disk_free_zone_resources(disk); 1722 blk_mq_unfreeze_queue(q); 1723 } 1724 1725 return ret; 1726 } 1727 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); 1728 1729 /** 1730 * blk_zone_issue_zeroout - zero-fill a block range in a zone 1731 * @bdev: blockdev to write 1732 * @sector: start sector 1733 * @nr_sects: number of sectors to write 1734 * @gfp_mask: memory allocation flags (for bio_alloc) 1735 * 1736 * Description: 1737 * Zero-fill a block range in a zone (@sector must be equal to the zone write 1738 * pointer), handling potential errors due to the (initially unknown) lack of 1739 * hardware offload (See blkdev_issue_zeroout()). 1740 */ 1741 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, 1742 sector_t nr_sects, gfp_t gfp_mask) 1743 { 1744 int ret; 1745 1746 if (WARN_ON_ONCE(!bdev_is_zoned(bdev))) 1747 return -EIO; 1748 1749 ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 1750 BLKDEV_ZERO_NOFALLBACK); 1751 if (ret != -EOPNOTSUPP) 1752 return ret; 1753 1754 /* 1755 * The failed call to blkdev_issue_zeroout() advanced the zone write 1756 * pointer. Undo this using a report zone to update the zone write 1757 * pointer to the correct current value. 1758 */ 1759 ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector); 1760 if (ret != 1) 1761 return ret < 0 ? ret : -EIO; 1762 1763 /* 1764 * Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a 1765 * regular write with zero-pages. 1766 */ 1767 return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0); 1768 } 1769 EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout); 1770 1771 #ifdef CONFIG_BLK_DEBUG_FS 1772 static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug, 1773 struct seq_file *m) 1774 { 1775 unsigned int zwp_wp_offset, zwp_flags; 1776 unsigned int zwp_zone_no, zwp_ref; 1777 unsigned int zwp_bio_list_size; 1778 unsigned long flags; 1779 1780 spin_lock_irqsave(&zwplug->lock, flags); 1781 zwp_zone_no = zwplug->zone_no; 1782 zwp_flags = zwplug->flags; 1783 zwp_ref = refcount_read(&zwplug->ref); 1784 zwp_wp_offset = zwplug->wp_offset; 1785 zwp_bio_list_size = bio_list_size(&zwplug->bio_list); 1786 spin_unlock_irqrestore(&zwplug->lock, flags); 1787 1788 seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref, 1789 zwp_wp_offset, zwp_bio_list_size); 1790 } 1791 1792 int queue_zone_wplugs_show(void *data, struct seq_file *m) 1793 { 1794 struct request_queue *q = data; 1795 struct gendisk *disk = q->disk; 1796 struct blk_zone_wplug *zwplug; 1797 unsigned int i; 1798 1799 if (!disk->zone_wplugs_hash) 1800 return 0; 1801 1802 rcu_read_lock(); 1803 for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) 1804 hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i], 1805 node) 1806 queue_zone_wplug_show(zwplug, m); 1807 rcu_read_unlock(); 1808 1809 return 0; 1810 } 1811 1812 #endif 1813