1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/vmalloc.h> 3 #include <linux/bitmap.h> 4 #include "null_blk.h" 5 6 #define CREATE_TRACE_POINTS 7 #include "trace.h" 8 9 #undef pr_fmt 10 #define pr_fmt(fmt) "null_blk: " fmt 11 12 static inline sector_t mb_to_sects(unsigned long mb) 13 { 14 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT; 15 } 16 17 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) 18 { 19 return sect >> ilog2(dev->zone_size_sects); 20 } 21 22 static inline void null_lock_zone_res(struct nullb_device *dev) 23 { 24 if (dev->need_zone_res_mgmt) 25 spin_lock_irq(&dev->zone_res_lock); 26 } 27 28 static inline void null_unlock_zone_res(struct nullb_device *dev) 29 { 30 if (dev->need_zone_res_mgmt) 31 spin_unlock_irq(&dev->zone_res_lock); 32 } 33 34 static inline void null_init_zone_lock(struct nullb_device *dev, 35 struct nullb_zone *zone) 36 { 37 if (!dev->memory_backed) 38 spin_lock_init(&zone->spinlock); 39 else 40 mutex_init(&zone->mutex); 41 } 42 43 static inline void null_lock_zone(struct nullb_device *dev, 44 struct nullb_zone *zone) 45 { 46 if (!dev->memory_backed) 47 spin_lock_irq(&zone->spinlock); 48 else 49 mutex_lock(&zone->mutex); 50 } 51 52 static inline void null_unlock_zone(struct nullb_device *dev, 53 struct nullb_zone *zone) 54 { 55 if (!dev->memory_backed) 56 spin_unlock_irq(&zone->spinlock); 57 else 58 mutex_unlock(&zone->mutex); 59 } 60 61 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) 62 { 63 sector_t dev_capacity_sects, zone_capacity_sects; 64 struct nullb_zone *zone; 65 sector_t sector = 0; 66 unsigned int i; 67 68 if (!is_power_of_2(dev->zone_size)) { 69 pr_err("zone_size must be power-of-two\n"); 70 return -EINVAL; 71 } 72 if (dev->zone_size > dev->size) { 73 pr_err("Zone size larger than device capacity\n"); 74 return -EINVAL; 75 } 76 77 if (!dev->zone_capacity) 78 dev->zone_capacity = dev->zone_size; 79 80 if (dev->zone_capacity > dev->zone_size) { 81 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n", 82 dev->zone_capacity, dev->zone_size); 83 return -EINVAL; 84 } 85 86 zone_capacity_sects = mb_to_sects(dev->zone_capacity); 87 dev_capacity_sects = mb_to_sects(dev->size); 88 dev->zone_size_sects = mb_to_sects(dev->zone_size); 89 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects) 90 >> ilog2(dev->zone_size_sects); 91 92 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone), 93 GFP_KERNEL | __GFP_ZERO); 94 if (!dev->zones) 95 return -ENOMEM; 96 97 spin_lock_init(&dev->zone_res_lock); 98 99 if (dev->zone_nr_conv >= dev->nr_zones) { 100 dev->zone_nr_conv = dev->nr_zones - 1; 101 pr_info("changed the number of conventional zones to %u", 102 dev->zone_nr_conv); 103 } 104 105 /* Max active zones has to be < nbr of seq zones in order to be enforceable */ 106 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { 107 dev->zone_max_active = 0; 108 pr_info("zone_max_active limit disabled, limit >= zone count\n"); 109 } 110 111 /* Max open zones has to be <= max active zones */ 112 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) { 113 dev->zone_max_open = dev->zone_max_active; 114 pr_info("changed the maximum number of open zones to %u\n", 115 dev->nr_zones); 116 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { 117 dev->zone_max_open = 0; 118 pr_info("zone_max_open limit disabled, limit >= zone count\n"); 119 } 120 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open; 121 dev->imp_close_zone_no = dev->zone_nr_conv; 122 123 for (i = 0; i < dev->zone_nr_conv; i++) { 124 zone = &dev->zones[i]; 125 126 null_init_zone_lock(dev, zone); 127 zone->start = sector; 128 zone->len = dev->zone_size_sects; 129 zone->capacity = zone->len; 130 zone->wp = zone->start + zone->len; 131 zone->type = BLK_ZONE_TYPE_CONVENTIONAL; 132 zone->cond = BLK_ZONE_COND_NOT_WP; 133 134 sector += dev->zone_size_sects; 135 } 136 137 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { 138 zone = &dev->zones[i]; 139 140 null_init_zone_lock(dev, zone); 141 zone->start = zone->wp = sector; 142 if (zone->start + dev->zone_size_sects > dev_capacity_sects) 143 zone->len = dev_capacity_sects - zone->start; 144 else 145 zone->len = dev->zone_size_sects; 146 zone->capacity = 147 min_t(sector_t, zone->len, zone_capacity_sects); 148 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; 149 zone->cond = BLK_ZONE_COND_EMPTY; 150 151 sector += dev->zone_size_sects; 152 } 153 154 return 0; 155 } 156 157 int null_register_zoned_dev(struct nullb *nullb) 158 { 159 struct nullb_device *dev = nullb->dev; 160 struct request_queue *q = nullb->q; 161 162 disk_set_zoned(nullb->disk, BLK_ZONED_HM); 163 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); 164 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); 165 166 if (queue_is_mq(q)) { 167 int ret = blk_revalidate_disk_zones(nullb->disk, NULL); 168 169 if (ret) 170 return ret; 171 } else { 172 blk_queue_chunk_sectors(q, dev->zone_size_sects); 173 nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0); 174 } 175 176 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); 177 disk_set_max_open_zones(nullb->disk, dev->zone_max_open); 178 disk_set_max_active_zones(nullb->disk, dev->zone_max_active); 179 180 return 0; 181 } 182 183 void null_free_zoned_dev(struct nullb_device *dev) 184 { 185 kvfree(dev->zones); 186 dev->zones = NULL; 187 } 188 189 int null_report_zones(struct gendisk *disk, sector_t sector, 190 unsigned int nr_zones, report_zones_cb cb, void *data) 191 { 192 struct nullb *nullb = disk->private_data; 193 struct nullb_device *dev = nullb->dev; 194 unsigned int first_zone, i; 195 struct nullb_zone *zone; 196 struct blk_zone blkz; 197 int error; 198 199 first_zone = null_zone_no(dev, sector); 200 if (first_zone >= dev->nr_zones) 201 return 0; 202 203 nr_zones = min(nr_zones, dev->nr_zones - first_zone); 204 trace_nullb_report_zones(nullb, nr_zones); 205 206 memset(&blkz, 0, sizeof(struct blk_zone)); 207 zone = &dev->zones[first_zone]; 208 for (i = 0; i < nr_zones; i++, zone++) { 209 /* 210 * Stacked DM target drivers will remap the zone information by 211 * modifying the zone information passed to the report callback. 212 * So use a local copy to avoid corruption of the device zone 213 * array. 214 */ 215 null_lock_zone(dev, zone); 216 blkz.start = zone->start; 217 blkz.len = zone->len; 218 blkz.wp = zone->wp; 219 blkz.type = zone->type; 220 blkz.cond = zone->cond; 221 blkz.capacity = zone->capacity; 222 null_unlock_zone(dev, zone); 223 224 error = cb(&blkz, i, data); 225 if (error) 226 return error; 227 } 228 229 return nr_zones; 230 } 231 232 /* 233 * This is called in the case of memory backing from null_process_cmd() 234 * with the target zone already locked. 235 */ 236 size_t null_zone_valid_read_len(struct nullb *nullb, 237 sector_t sector, unsigned int len) 238 { 239 struct nullb_device *dev = nullb->dev; 240 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; 241 unsigned int nr_sectors = len >> SECTOR_SHIFT; 242 243 /* Read must be below the write pointer position */ 244 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || 245 sector + nr_sectors <= zone->wp) 246 return len; 247 248 if (sector > zone->wp) 249 return 0; 250 251 return (zone->wp - sector) << SECTOR_SHIFT; 252 } 253 254 static blk_status_t __null_close_zone(struct nullb_device *dev, 255 struct nullb_zone *zone) 256 { 257 switch (zone->cond) { 258 case BLK_ZONE_COND_CLOSED: 259 /* close operation on closed is not an error */ 260 return BLK_STS_OK; 261 case BLK_ZONE_COND_IMP_OPEN: 262 dev->nr_zones_imp_open--; 263 break; 264 case BLK_ZONE_COND_EXP_OPEN: 265 dev->nr_zones_exp_open--; 266 break; 267 case BLK_ZONE_COND_EMPTY: 268 case BLK_ZONE_COND_FULL: 269 default: 270 return BLK_STS_IOERR; 271 } 272 273 if (zone->wp == zone->start) { 274 zone->cond = BLK_ZONE_COND_EMPTY; 275 } else { 276 zone->cond = BLK_ZONE_COND_CLOSED; 277 dev->nr_zones_closed++; 278 } 279 280 return BLK_STS_OK; 281 } 282 283 static void null_close_imp_open_zone(struct nullb_device *dev) 284 { 285 struct nullb_zone *zone; 286 unsigned int zno, i; 287 288 zno = dev->imp_close_zone_no; 289 if (zno >= dev->nr_zones) 290 zno = dev->zone_nr_conv; 291 292 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { 293 zone = &dev->zones[zno]; 294 zno++; 295 if (zno >= dev->nr_zones) 296 zno = dev->zone_nr_conv; 297 298 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) { 299 __null_close_zone(dev, zone); 300 dev->imp_close_zone_no = zno; 301 return; 302 } 303 } 304 } 305 306 static blk_status_t null_check_active(struct nullb_device *dev) 307 { 308 if (!dev->zone_max_active) 309 return BLK_STS_OK; 310 311 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open + 312 dev->nr_zones_closed < dev->zone_max_active) 313 return BLK_STS_OK; 314 315 return BLK_STS_ZONE_ACTIVE_RESOURCE; 316 } 317 318 static blk_status_t null_check_open(struct nullb_device *dev) 319 { 320 if (!dev->zone_max_open) 321 return BLK_STS_OK; 322 323 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open) 324 return BLK_STS_OK; 325 326 if (dev->nr_zones_imp_open) { 327 if (null_check_active(dev) == BLK_STS_OK) { 328 null_close_imp_open_zone(dev); 329 return BLK_STS_OK; 330 } 331 } 332 333 return BLK_STS_ZONE_OPEN_RESOURCE; 334 } 335 336 /* 337 * This function matches the manage open zone resources function in the ZBC standard, 338 * with the addition of max active zones support (added in the ZNS standard). 339 * 340 * The function determines if a zone can transition to implicit open or explicit open, 341 * while maintaining the max open zone (and max active zone) limit(s). It may close an 342 * implicit open zone in order to make additional zone resources available. 343 * 344 * ZBC states that an implicit open zone shall be closed only if there is not 345 * room within the open limit. However, with the addition of an active limit, 346 * it is not certain that closing an implicit open zone will allow a new zone 347 * to be opened, since we might already be at the active limit capacity. 348 */ 349 static blk_status_t null_check_zone_resources(struct nullb_device *dev, 350 struct nullb_zone *zone) 351 { 352 blk_status_t ret; 353 354 switch (zone->cond) { 355 case BLK_ZONE_COND_EMPTY: 356 ret = null_check_active(dev); 357 if (ret != BLK_STS_OK) 358 return ret; 359 fallthrough; 360 case BLK_ZONE_COND_CLOSED: 361 return null_check_open(dev); 362 default: 363 /* Should never be called for other states */ 364 WARN_ON(1); 365 return BLK_STS_IOERR; 366 } 367 } 368 369 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, 370 unsigned int nr_sectors, bool append) 371 { 372 struct nullb_device *dev = cmd->nq->dev; 373 unsigned int zno = null_zone_no(dev, sector); 374 struct nullb_zone *zone = &dev->zones[zno]; 375 blk_status_t ret; 376 377 trace_nullb_zone_op(cmd, zno, zone->cond); 378 379 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { 380 if (append) 381 return BLK_STS_IOERR; 382 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); 383 } 384 385 null_lock_zone(dev, zone); 386 387 if (zone->cond == BLK_ZONE_COND_FULL || 388 zone->cond == BLK_ZONE_COND_READONLY || 389 zone->cond == BLK_ZONE_COND_OFFLINE) { 390 /* Cannot write to the zone */ 391 ret = BLK_STS_IOERR; 392 goto unlock; 393 } 394 395 /* 396 * Regular writes must be at the write pointer position. 397 * Zone append writes are automatically issued at the write 398 * pointer and the position returned using the request or BIO 399 * sector. 400 */ 401 if (append) { 402 sector = zone->wp; 403 if (dev->queue_mode == NULL_Q_MQ) 404 cmd->rq->__sector = sector; 405 else 406 cmd->bio->bi_iter.bi_sector = sector; 407 } else if (sector != zone->wp) { 408 ret = BLK_STS_IOERR; 409 goto unlock; 410 } 411 412 if (zone->wp + nr_sectors > zone->start + zone->capacity) { 413 ret = BLK_STS_IOERR; 414 goto unlock; 415 } 416 417 if (zone->cond == BLK_ZONE_COND_CLOSED || 418 zone->cond == BLK_ZONE_COND_EMPTY) { 419 null_lock_zone_res(dev); 420 421 ret = null_check_zone_resources(dev, zone); 422 if (ret != BLK_STS_OK) { 423 null_unlock_zone_res(dev); 424 goto unlock; 425 } 426 if (zone->cond == BLK_ZONE_COND_CLOSED) { 427 dev->nr_zones_closed--; 428 dev->nr_zones_imp_open++; 429 } else if (zone->cond == BLK_ZONE_COND_EMPTY) { 430 dev->nr_zones_imp_open++; 431 } 432 433 if (zone->cond != BLK_ZONE_COND_EXP_OPEN) 434 zone->cond = BLK_ZONE_COND_IMP_OPEN; 435 436 null_unlock_zone_res(dev); 437 } 438 439 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); 440 if (ret != BLK_STS_OK) 441 goto unlock; 442 443 zone->wp += nr_sectors; 444 if (zone->wp == zone->start + zone->capacity) { 445 null_lock_zone_res(dev); 446 if (zone->cond == BLK_ZONE_COND_EXP_OPEN) 447 dev->nr_zones_exp_open--; 448 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) 449 dev->nr_zones_imp_open--; 450 zone->cond = BLK_ZONE_COND_FULL; 451 null_unlock_zone_res(dev); 452 } 453 454 ret = BLK_STS_OK; 455 456 unlock: 457 null_unlock_zone(dev, zone); 458 459 return ret; 460 } 461 462 static blk_status_t null_open_zone(struct nullb_device *dev, 463 struct nullb_zone *zone) 464 { 465 blk_status_t ret = BLK_STS_OK; 466 467 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 468 return BLK_STS_IOERR; 469 470 null_lock_zone_res(dev); 471 472 switch (zone->cond) { 473 case BLK_ZONE_COND_EXP_OPEN: 474 /* open operation on exp open is not an error */ 475 goto unlock; 476 case BLK_ZONE_COND_EMPTY: 477 ret = null_check_zone_resources(dev, zone); 478 if (ret != BLK_STS_OK) 479 goto unlock; 480 break; 481 case BLK_ZONE_COND_IMP_OPEN: 482 dev->nr_zones_imp_open--; 483 break; 484 case BLK_ZONE_COND_CLOSED: 485 ret = null_check_zone_resources(dev, zone); 486 if (ret != BLK_STS_OK) 487 goto unlock; 488 dev->nr_zones_closed--; 489 break; 490 case BLK_ZONE_COND_FULL: 491 default: 492 ret = BLK_STS_IOERR; 493 goto unlock; 494 } 495 496 zone->cond = BLK_ZONE_COND_EXP_OPEN; 497 dev->nr_zones_exp_open++; 498 499 unlock: 500 null_unlock_zone_res(dev); 501 502 return ret; 503 } 504 505 static blk_status_t null_close_zone(struct nullb_device *dev, 506 struct nullb_zone *zone) 507 { 508 blk_status_t ret; 509 510 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 511 return BLK_STS_IOERR; 512 513 null_lock_zone_res(dev); 514 ret = __null_close_zone(dev, zone); 515 null_unlock_zone_res(dev); 516 517 return ret; 518 } 519 520 static blk_status_t null_finish_zone(struct nullb_device *dev, 521 struct nullb_zone *zone) 522 { 523 blk_status_t ret = BLK_STS_OK; 524 525 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 526 return BLK_STS_IOERR; 527 528 null_lock_zone_res(dev); 529 530 switch (zone->cond) { 531 case BLK_ZONE_COND_FULL: 532 /* finish operation on full is not an error */ 533 goto unlock; 534 case BLK_ZONE_COND_EMPTY: 535 ret = null_check_zone_resources(dev, zone); 536 if (ret != BLK_STS_OK) 537 goto unlock; 538 break; 539 case BLK_ZONE_COND_IMP_OPEN: 540 dev->nr_zones_imp_open--; 541 break; 542 case BLK_ZONE_COND_EXP_OPEN: 543 dev->nr_zones_exp_open--; 544 break; 545 case BLK_ZONE_COND_CLOSED: 546 ret = null_check_zone_resources(dev, zone); 547 if (ret != BLK_STS_OK) 548 goto unlock; 549 dev->nr_zones_closed--; 550 break; 551 default: 552 ret = BLK_STS_IOERR; 553 goto unlock; 554 } 555 556 zone->cond = BLK_ZONE_COND_FULL; 557 zone->wp = zone->start + zone->len; 558 559 unlock: 560 null_unlock_zone_res(dev); 561 562 return ret; 563 } 564 565 static blk_status_t null_reset_zone(struct nullb_device *dev, 566 struct nullb_zone *zone) 567 { 568 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 569 return BLK_STS_IOERR; 570 571 null_lock_zone_res(dev); 572 573 switch (zone->cond) { 574 case BLK_ZONE_COND_EMPTY: 575 /* reset operation on empty is not an error */ 576 null_unlock_zone_res(dev); 577 return BLK_STS_OK; 578 case BLK_ZONE_COND_IMP_OPEN: 579 dev->nr_zones_imp_open--; 580 break; 581 case BLK_ZONE_COND_EXP_OPEN: 582 dev->nr_zones_exp_open--; 583 break; 584 case BLK_ZONE_COND_CLOSED: 585 dev->nr_zones_closed--; 586 break; 587 case BLK_ZONE_COND_FULL: 588 break; 589 default: 590 null_unlock_zone_res(dev); 591 return BLK_STS_IOERR; 592 } 593 594 zone->cond = BLK_ZONE_COND_EMPTY; 595 zone->wp = zone->start; 596 597 null_unlock_zone_res(dev); 598 599 if (dev->memory_backed) 600 return null_handle_discard(dev, zone->start, zone->len); 601 602 return BLK_STS_OK; 603 } 604 605 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op, 606 sector_t sector) 607 { 608 struct nullb_device *dev = cmd->nq->dev; 609 unsigned int zone_no; 610 struct nullb_zone *zone; 611 blk_status_t ret; 612 size_t i; 613 614 if (op == REQ_OP_ZONE_RESET_ALL) { 615 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { 616 zone = &dev->zones[i]; 617 null_lock_zone(dev, zone); 618 if (zone->cond != BLK_ZONE_COND_EMPTY && 619 zone->cond != BLK_ZONE_COND_READONLY && 620 zone->cond != BLK_ZONE_COND_OFFLINE) { 621 null_reset_zone(dev, zone); 622 trace_nullb_zone_op(cmd, i, zone->cond); 623 } 624 null_unlock_zone(dev, zone); 625 } 626 return BLK_STS_OK; 627 } 628 629 zone_no = null_zone_no(dev, sector); 630 zone = &dev->zones[zone_no]; 631 632 null_lock_zone(dev, zone); 633 634 if (zone->cond == BLK_ZONE_COND_READONLY || 635 zone->cond == BLK_ZONE_COND_OFFLINE) { 636 ret = BLK_STS_IOERR; 637 goto unlock; 638 } 639 640 switch (op) { 641 case REQ_OP_ZONE_RESET: 642 ret = null_reset_zone(dev, zone); 643 break; 644 case REQ_OP_ZONE_OPEN: 645 ret = null_open_zone(dev, zone); 646 break; 647 case REQ_OP_ZONE_CLOSE: 648 ret = null_close_zone(dev, zone); 649 break; 650 case REQ_OP_ZONE_FINISH: 651 ret = null_finish_zone(dev, zone); 652 break; 653 default: 654 ret = BLK_STS_NOTSUPP; 655 break; 656 } 657 658 if (ret == BLK_STS_OK) 659 trace_nullb_zone_op(cmd, zone_no, zone->cond); 660 661 unlock: 662 null_unlock_zone(dev, zone); 663 664 return ret; 665 } 666 667 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, 668 sector_t sector, sector_t nr_sectors) 669 { 670 struct nullb_device *dev; 671 struct nullb_zone *zone; 672 blk_status_t sts; 673 674 switch (op) { 675 case REQ_OP_WRITE: 676 return null_zone_write(cmd, sector, nr_sectors, false); 677 case REQ_OP_ZONE_APPEND: 678 return null_zone_write(cmd, sector, nr_sectors, true); 679 case REQ_OP_ZONE_RESET: 680 case REQ_OP_ZONE_RESET_ALL: 681 case REQ_OP_ZONE_OPEN: 682 case REQ_OP_ZONE_CLOSE: 683 case REQ_OP_ZONE_FINISH: 684 return null_zone_mgmt(cmd, op, sector); 685 default: 686 dev = cmd->nq->dev; 687 zone = &dev->zones[null_zone_no(dev, sector)]; 688 if (zone->cond == BLK_ZONE_COND_OFFLINE) 689 return BLK_STS_IOERR; 690 691 null_lock_zone(dev, zone); 692 sts = null_process_cmd(cmd, op, sector, nr_sectors); 693 null_unlock_zone(dev, zone); 694 return sts; 695 } 696 } 697 698 /* 699 * Set a zone in the read-only or offline condition. 700 */ 701 static void null_set_zone_cond(struct nullb_device *dev, 702 struct nullb_zone *zone, enum blk_zone_cond cond) 703 { 704 if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY && 705 cond != BLK_ZONE_COND_OFFLINE)) 706 return; 707 708 null_lock_zone(dev, zone); 709 710 /* 711 * If the read-only condition is requested again to zones already in 712 * read-only condition, restore back normal empty condition. Do the same 713 * if the offline condition is requested for offline zones. Otherwise, 714 * set the specified zone condition to the zones. Finish the zones 715 * beforehand to free up zone resources. 716 */ 717 if (zone->cond == cond) { 718 zone->cond = BLK_ZONE_COND_EMPTY; 719 zone->wp = zone->start; 720 if (dev->memory_backed) 721 null_handle_discard(dev, zone->start, zone->len); 722 } else { 723 if (zone->cond != BLK_ZONE_COND_READONLY && 724 zone->cond != BLK_ZONE_COND_OFFLINE) 725 null_finish_zone(dev, zone); 726 zone->cond = cond; 727 zone->wp = (sector_t)-1; 728 } 729 730 null_unlock_zone(dev, zone); 731 } 732 733 /* 734 * Identify a zone from the sector written to configfs file. Then set zone 735 * condition to the zone. 736 */ 737 ssize_t zone_cond_store(struct nullb_device *dev, const char *page, 738 size_t count, enum blk_zone_cond cond) 739 { 740 unsigned long long sector; 741 unsigned int zone_no; 742 int ret; 743 744 if (!dev->zoned) { 745 pr_err("null_blk device is not zoned\n"); 746 return -EINVAL; 747 } 748 749 if (!dev->zones) { 750 pr_err("null_blk device is not yet powered\n"); 751 return -EINVAL; 752 } 753 754 ret = kstrtoull(page, 0, §or); 755 if (ret < 0) 756 return ret; 757 758 zone_no = null_zone_no(dev, sector); 759 if (zone_no >= dev->nr_zones) { 760 pr_err("Sector out of range\n"); 761 return -EINVAL; 762 } 763 764 if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) { 765 pr_err("Can not change condition of conventional zones\n"); 766 return -EINVAL; 767 } 768 769 null_set_zone_cond(dev, &dev->zones[zone_no], cond); 770 771 return count; 772 } 773