1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SCSI Zoned Block commands 4 * 5 * Copyright (C) 2014-2015 SUSE Linux GmbH 6 * Written by: Hannes Reinecke <hare@suse.de> 7 * Modified by: Damien Le Moal <damien.lemoal@hgst.com> 8 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com> 9 */ 10 11 #include <linux/blkdev.h> 12 #include <linux/vmalloc.h> 13 #include <linux/sched/mm.h> 14 #include <linux/mutex.h> 15 16 #include <asm/unaligned.h> 17 18 #include <scsi/scsi.h> 19 #include <scsi/scsi_cmnd.h> 20 21 #include "sd.h" 22 23 #define CREATE_TRACE_POINTS 24 #include "sd_trace.h" 25 26 /** 27 * sd_zbc_get_zone_wp_offset - Get zone write pointer offset. 28 * @zone: Zone for which to return the write pointer offset. 29 * 30 * Return: offset of the write pointer from the start of the zone. 31 */ 32 static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone) 33 { 34 if (zone->type == ZBC_ZONE_TYPE_CONV) 35 return 0; 36 37 switch (zone->cond) { 38 case BLK_ZONE_COND_IMP_OPEN: 39 case BLK_ZONE_COND_EXP_OPEN: 40 case BLK_ZONE_COND_CLOSED: 41 return zone->wp - zone->start; 42 case BLK_ZONE_COND_FULL: 43 return zone->len; 44 case BLK_ZONE_COND_EMPTY: 45 case BLK_ZONE_COND_OFFLINE: 46 case BLK_ZONE_COND_READONLY: 47 default: 48 /* 49 * Offline and read-only zones do not have a valid 50 * write pointer. Use 0 as for an empty zone. 51 */ 52 return 0; 53 } 54 } 55 56 /* Whether or not a SCSI zone descriptor describes a gap zone. */ 57 static bool sd_zbc_is_gap_zone(const u8 buf[64]) 58 { 59 return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP; 60 } 61 62 /** 63 * sd_zbc_parse_report - Parse a SCSI zone descriptor 64 * @sdkp: SCSI disk pointer. 65 * @buf: SCSI zone descriptor. 66 * @idx: Index of the zone relative to the first zone reported by the current 67 * sd_zbc_report_zones() call. 68 * @cb: Callback function pointer. 69 * @data: Second argument passed to @cb. 70 * 71 * Return: Value returned by @cb. 72 * 73 * Convert a SCSI zone descriptor into struct blk_zone format. Additionally, 74 * call @cb(blk_zone, @data). 75 */ 76 static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64], 77 unsigned int idx, report_zones_cb cb, void *data) 78 { 79 struct scsi_device *sdp = sdkp->device; 80 struct blk_zone zone = { 0 }; 81 sector_t start_lba, gran; 82 int ret; 83 84 if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf))) 85 return -EINVAL; 86 87 zone.type = buf[0] & 0x0f; 88 zone.cond = (buf[1] >> 4) & 0xf; 89 if (buf[1] & 0x01) 90 zone.reset = 1; 91 if (buf[1] & 0x02) 92 zone.non_seq = 1; 93 94 start_lba = get_unaligned_be64(&buf[16]); 95 zone.start = logical_to_sectors(sdp, start_lba); 96 zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); 97 zone.len = zone.capacity; 98 if (sdkp->zone_starting_lba_gran) { 99 gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran); 100 if (zone.len > gran) { 101 sd_printk(KERN_ERR, sdkp, 102 "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n", 103 start_lba, 104 sectors_to_logical(sdp, zone.capacity), 105 sectors_to_logical(sdp, zone.len), 106 sectors_to_logical(sdp, gran)); 107 return -EINVAL; 108 } 109 /* 110 * Use the starting LBA granularity instead of the zone length 111 * obtained from the REPORT ZONES command. 112 */ 113 zone.len = gran; 114 } 115 if (zone.cond == ZBC_ZONE_COND_FULL) 116 zone.wp = zone.start + zone.len; 117 else 118 zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); 119 120 ret = cb(&zone, idx, data); 121 if (ret) 122 return ret; 123 124 if (sdkp->rev_wp_offset) 125 sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone); 126 127 return 0; 128 } 129 130 /** 131 * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command. 132 * @sdkp: The target disk 133 * @buf: vmalloc-ed buffer to use for the reply 134 * @buflen: the buffer size 135 * @lba: Start LBA of the report 136 * @partial: Do partial report 137 * 138 * For internal use during device validation. 139 * Using partial=true can significantly speed up execution of a report zones 140 * command because the disk does not have to count all possible report matching 141 * zones and will only report the count of zones fitting in the command reply 142 * buffer. 143 */ 144 static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, 145 unsigned int buflen, sector_t lba, 146 bool partial) 147 { 148 struct scsi_device *sdp = sdkp->device; 149 const int timeout = sdp->request_queue->rq_timeout; 150 struct scsi_sense_hdr sshdr; 151 unsigned char cmd[16]; 152 unsigned int rep_len; 153 int result; 154 155 memset(cmd, 0, 16); 156 cmd[0] = ZBC_IN; 157 cmd[1] = ZI_REPORT_ZONES; 158 put_unaligned_be64(lba, &cmd[2]); 159 put_unaligned_be32(buflen, &cmd[10]); 160 if (partial) 161 cmd[14] = ZBC_REPORT_ZONE_PARTIAL; 162 163 result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 164 buf, buflen, &sshdr, 165 timeout, SD_MAX_RETRIES, NULL); 166 if (result) { 167 sd_printk(KERN_ERR, sdkp, 168 "REPORT ZONES start lba %llu failed\n", lba); 169 sd_print_result(sdkp, "REPORT ZONES", result); 170 if (result > 0 && scsi_sense_valid(&sshdr)) 171 sd_print_sense_hdr(sdkp, &sshdr); 172 return -EIO; 173 } 174 175 rep_len = get_unaligned_be32(&buf[0]); 176 if (rep_len < 64) { 177 sd_printk(KERN_ERR, sdkp, 178 "REPORT ZONES report invalid length %u\n", 179 rep_len); 180 return -EIO; 181 } 182 183 return 0; 184 } 185 186 /** 187 * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply. 188 * @sdkp: The target disk 189 * @nr_zones: Maximum number of zones to report 190 * @buflen: Size of the buffer allocated 191 * 192 * Try to allocate a reply buffer for the number of requested zones. 193 * The size of the buffer allocated may be smaller than requested to 194 * satify the device constraint (max_hw_sectors, max_segments, etc). 195 * 196 * Return the address of the allocated buffer and update @buflen with 197 * the size of the allocated buffer. 198 */ 199 static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, 200 unsigned int nr_zones, size_t *buflen) 201 { 202 struct request_queue *q = sdkp->disk->queue; 203 size_t bufsize; 204 void *buf; 205 206 /* 207 * Report zone buffer size should be at most 64B times the number of 208 * zones requested plus the 64B reply header, but should be aligned 209 * to SECTOR_SIZE for ATA devices. 210 * Make sure that this size does not exceed the hardware capabilities. 211 * Furthermore, since the report zone command cannot be split, make 212 * sure that the allocated buffer can always be mapped by limiting the 213 * number of pages allocated to the HBA max segments limit. 214 */ 215 nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); 216 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); 217 bufsize = min_t(size_t, bufsize, 218 queue_max_hw_sectors(q) << SECTOR_SHIFT); 219 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); 220 221 while (bufsize >= SECTOR_SIZE) { 222 buf = __vmalloc(bufsize, 223 GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); 224 if (buf) { 225 *buflen = bufsize; 226 return buf; 227 } 228 bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); 229 } 230 231 return NULL; 232 } 233 234 /** 235 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors. 236 * @sdkp: The target disk 237 */ 238 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) 239 { 240 return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks); 241 } 242 243 /** 244 * sd_zbc_report_zones - SCSI .report_zones() callback. 245 * @disk: Disk to report zones for. 246 * @sector: Start sector. 247 * @nr_zones: Maximum number of zones to report. 248 * @cb: Callback function called to report zone information. 249 * @data: Second argument passed to @cb. 250 * 251 * Called by the block layer to iterate over zone information. See also the 252 * disk->fops->report_zones() calls in block/blk-zoned.c. 253 */ 254 int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, 255 unsigned int nr_zones, report_zones_cb cb, void *data) 256 { 257 struct scsi_disk *sdkp = scsi_disk(disk); 258 sector_t lba = sectors_to_logical(sdkp->device, sector); 259 unsigned int nr, i; 260 unsigned char *buf; 261 u64 zone_length, start_lba; 262 size_t offset, buflen = 0; 263 int zone_idx = 0; 264 int ret; 265 266 if (!sd_is_zoned(sdkp)) 267 /* Not a zoned device */ 268 return -EOPNOTSUPP; 269 270 if (!sdkp->capacity) 271 /* Device gone or invalid */ 272 return -ENODEV; 273 274 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); 275 if (!buf) 276 return -ENOMEM; 277 278 while (zone_idx < nr_zones && lba < sdkp->capacity) { 279 ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true); 280 if (ret) 281 goto out; 282 283 offset = 0; 284 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); 285 if (!nr) 286 break; 287 288 for (i = 0; i < nr && zone_idx < nr_zones; i++) { 289 offset += 64; 290 start_lba = get_unaligned_be64(&buf[offset + 16]); 291 zone_length = get_unaligned_be64(&buf[offset + 8]); 292 if ((zone_idx == 0 && 293 (lba < start_lba || 294 lba >= start_lba + zone_length)) || 295 (zone_idx > 0 && start_lba != lba) || 296 start_lba + zone_length < start_lba) { 297 sd_printk(KERN_ERR, sdkp, 298 "Zone %d at LBA %llu is invalid: %llu + %llu\n", 299 zone_idx, lba, start_lba, zone_length); 300 ret = -EINVAL; 301 goto out; 302 } 303 lba = start_lba + zone_length; 304 if (sd_zbc_is_gap_zone(&buf[offset])) { 305 if (sdkp->zone_starting_lba_gran) 306 continue; 307 sd_printk(KERN_ERR, sdkp, 308 "Gap zone without constant LBA offsets\n"); 309 ret = -EINVAL; 310 goto out; 311 } 312 313 ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx, 314 cb, data); 315 if (ret) 316 goto out; 317 318 zone_idx++; 319 } 320 } 321 322 ret = zone_idx; 323 out: 324 kvfree(buf); 325 return ret; 326 } 327 328 static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) 329 { 330 struct request *rq = scsi_cmd_to_rq(cmd); 331 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 332 sector_t sector = blk_rq_pos(rq); 333 334 if (!sd_is_zoned(sdkp)) 335 /* Not a zoned device */ 336 return BLK_STS_IOERR; 337 338 if (sdkp->device->changed) 339 return BLK_STS_IOERR; 340 341 if (sector & (sd_zbc_zone_sectors(sdkp) - 1)) 342 /* Unaligned request */ 343 return BLK_STS_IOERR; 344 345 return BLK_STS_OK; 346 } 347 348 #define SD_ZBC_INVALID_WP_OFST (~0u) 349 #define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1) 350 351 static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, 352 void *data) 353 { 354 struct scsi_disk *sdkp = data; 355 356 lockdep_assert_held(&sdkp->zones_wp_offset_lock); 357 358 sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone); 359 360 return 0; 361 } 362 363 /* 364 * An attempt to append a zone triggered an invalid write pointer error. 365 * Reread the write pointer of the zone(s) in which the append failed. 366 */ 367 static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) 368 { 369 struct scsi_disk *sdkp; 370 unsigned long flags; 371 sector_t zno; 372 int ret; 373 374 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); 375 376 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 377 for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { 378 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) 379 continue; 380 381 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 382 ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, 383 SD_BUF_SIZE, 384 zno * sdkp->zone_info.zone_blocks, true); 385 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 386 if (!ret) 387 sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, 388 zno, sd_zbc_update_wp_offset_cb, 389 sdkp); 390 } 391 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 392 393 scsi_device_put(sdkp->device); 394 } 395 396 /** 397 * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command. 398 * @cmd: the command to setup 399 * @lba: the LBA to patch 400 * @nr_blocks: the number of LBAs to be written 401 * 402 * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND. 403 * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and 404 * patching of the lba for an emulated ZONE_APPEND command. 405 * 406 * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will 407 * schedule a REPORT ZONES command and return BLK_STS_IOERR. 408 */ 409 blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, 410 unsigned int nr_blocks) 411 { 412 struct request *rq = scsi_cmd_to_rq(cmd); 413 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 414 unsigned int wp_offset, zno = blk_rq_zone_no(rq); 415 unsigned long flags; 416 blk_status_t ret; 417 418 ret = sd_zbc_cmnd_checks(cmd); 419 if (ret != BLK_STS_OK) 420 return ret; 421 422 if (!blk_rq_zone_is_seq(rq)) 423 return BLK_STS_IOERR; 424 425 /* Unlock of the write lock will happen in sd_zbc_complete() */ 426 if (!blk_req_zone_write_trylock(rq)) 427 return BLK_STS_ZONE_RESOURCE; 428 429 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 430 wp_offset = sdkp->zones_wp_offset[zno]; 431 switch (wp_offset) { 432 case SD_ZBC_INVALID_WP_OFST: 433 /* 434 * We are about to schedule work to update a zone write pointer 435 * offset, which will cause the zone append command to be 436 * requeued. So make sure that the scsi device does not go away 437 * while the work is being processed. 438 */ 439 if (scsi_device_get(sdkp->device)) { 440 ret = BLK_STS_IOERR; 441 break; 442 } 443 sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST; 444 schedule_work(&sdkp->zone_wp_offset_work); 445 fallthrough; 446 case SD_ZBC_UPDATING_WP_OFST: 447 ret = BLK_STS_DEV_RESOURCE; 448 break; 449 default: 450 wp_offset = sectors_to_logical(sdkp->device, wp_offset); 451 if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) { 452 ret = BLK_STS_IOERR; 453 break; 454 } 455 456 trace_scsi_prepare_zone_append(cmd, *lba, wp_offset); 457 *lba += wp_offset; 458 } 459 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 460 if (ret) 461 blk_req_zone_write_unlock(rq); 462 return ret; 463 } 464 465 /** 466 * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations 467 * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH. 468 * @cmd: the command to setup 469 * @op: Operation to be performed 470 * @all: All zones control 471 * 472 * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL, 473 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests. 474 */ 475 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, 476 unsigned char op, bool all) 477 { 478 struct request *rq = scsi_cmd_to_rq(cmd); 479 sector_t sector = blk_rq_pos(rq); 480 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 481 sector_t block = sectors_to_logical(sdkp->device, sector); 482 blk_status_t ret; 483 484 ret = sd_zbc_cmnd_checks(cmd); 485 if (ret != BLK_STS_OK) 486 return ret; 487 488 cmd->cmd_len = 16; 489 memset(cmd->cmnd, 0, cmd->cmd_len); 490 cmd->cmnd[0] = ZBC_OUT; 491 cmd->cmnd[1] = op; 492 if (all) 493 cmd->cmnd[14] = 0x1; 494 else 495 put_unaligned_be64(block, &cmd->cmnd[2]); 496 497 rq->timeout = SD_TIMEOUT; 498 cmd->sc_data_direction = DMA_NONE; 499 cmd->transfersize = 0; 500 cmd->allowed = 0; 501 502 return BLK_STS_OK; 503 } 504 505 static bool sd_zbc_need_zone_wp_update(struct request *rq) 506 { 507 switch (req_op(rq)) { 508 case REQ_OP_ZONE_APPEND: 509 case REQ_OP_ZONE_FINISH: 510 case REQ_OP_ZONE_RESET: 511 case REQ_OP_ZONE_RESET_ALL: 512 return true; 513 case REQ_OP_WRITE: 514 case REQ_OP_WRITE_ZEROES: 515 return blk_rq_zone_is_seq(rq); 516 default: 517 return false; 518 } 519 } 520 521 /** 522 * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion 523 * @cmd: Completed command 524 * @good_bytes: Command reply bytes 525 * 526 * Called from sd_zbc_complete() to handle the update of the cached zone write 527 * pointer value in case an update is needed. 528 */ 529 static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, 530 unsigned int good_bytes) 531 { 532 int result = cmd->result; 533 struct request *rq = scsi_cmd_to_rq(cmd); 534 struct scsi_disk *sdkp = scsi_disk(rq->q->disk); 535 unsigned int zno = blk_rq_zone_no(rq); 536 enum req_op op = req_op(rq); 537 unsigned long flags; 538 539 /* 540 * If we got an error for a command that needs updating the write 541 * pointer offset cache, we must mark the zone wp offset entry as 542 * invalid to force an update from disk the next time a zone append 543 * command is issued. 544 */ 545 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); 546 547 if (result && op != REQ_OP_ZONE_RESET_ALL) { 548 if (op == REQ_OP_ZONE_APPEND) { 549 /* Force complete completion (no retry) */ 550 good_bytes = 0; 551 scsi_set_resid(cmd, blk_rq_bytes(rq)); 552 } 553 554 /* 555 * Force an update of the zone write pointer offset on 556 * the next zone append access. 557 */ 558 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) 559 sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST; 560 goto unlock_wp_offset; 561 } 562 563 switch (op) { 564 case REQ_OP_ZONE_APPEND: 565 trace_scsi_zone_wp_update(cmd, rq->__sector, 566 sdkp->zones_wp_offset[zno], good_bytes); 567 rq->__sector += sdkp->zones_wp_offset[zno]; 568 fallthrough; 569 case REQ_OP_WRITE_ZEROES: 570 case REQ_OP_WRITE: 571 if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp)) 572 sdkp->zones_wp_offset[zno] += 573 good_bytes >> SECTOR_SHIFT; 574 break; 575 case REQ_OP_ZONE_RESET: 576 sdkp->zones_wp_offset[zno] = 0; 577 break; 578 case REQ_OP_ZONE_FINISH: 579 sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp); 580 break; 581 case REQ_OP_ZONE_RESET_ALL: 582 memset(sdkp->zones_wp_offset, 0, 583 sdkp->zone_info.nr_zones * sizeof(unsigned int)); 584 break; 585 default: 586 break; 587 } 588 589 unlock_wp_offset: 590 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); 591 592 return good_bytes; 593 } 594 595 /** 596 * sd_zbc_complete - ZBC command post processing. 597 * @cmd: Completed command 598 * @good_bytes: Command reply bytes 599 * @sshdr: command sense header 600 * 601 * Called from sd_done() to handle zone commands errors and updates to the 602 * device queue zone write pointer offset cahce. 603 */ 604 unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, 605 struct scsi_sense_hdr *sshdr) 606 { 607 int result = cmd->result; 608 struct request *rq = scsi_cmd_to_rq(cmd); 609 610 if (op_is_zone_mgmt(req_op(rq)) && 611 result && 612 sshdr->sense_key == ILLEGAL_REQUEST && 613 sshdr->asc == 0x24) { 614 /* 615 * INVALID FIELD IN CDB error: a zone management command was 616 * attempted on a conventional zone. Nothing to worry about, 617 * so be quiet about the error. 618 */ 619 rq->rq_flags |= RQF_QUIET; 620 } else if (sd_zbc_need_zone_wp_update(rq)) 621 good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes); 622 623 if (req_op(rq) == REQ_OP_ZONE_APPEND) 624 blk_req_zone_write_unlock(rq); 625 626 return good_bytes; 627 } 628 629 /** 630 * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics 631 * @sdkp: Target disk 632 * @buf: Buffer where to store the VPD page data 633 * 634 * Read VPD page B6, get information and check that reads are unconstrained. 635 */ 636 static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, 637 unsigned char *buf) 638 { 639 u64 zone_starting_lba_gran; 640 641 if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) { 642 sd_printk(KERN_NOTICE, sdkp, 643 "Read zoned characteristics VPD page failed\n"); 644 return -ENODEV; 645 } 646 647 if (sdkp->device->type != TYPE_ZBC) { 648 /* Host-aware */ 649 sdkp->urswrz = 1; 650 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); 651 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); 652 sdkp->zones_max_open = 0; 653 return 0; 654 } 655 656 /* Host-managed */ 657 sdkp->urswrz = buf[4] & 1; 658 sdkp->zones_optimal_open = 0; 659 sdkp->zones_optimal_nonseq = 0; 660 sdkp->zones_max_open = get_unaligned_be32(&buf[16]); 661 /* Check zone alignment method */ 662 switch (buf[23] & 0xf) { 663 case 0: 664 case ZBC_CONSTANT_ZONE_LENGTH: 665 /* Use zone length */ 666 break; 667 case ZBC_CONSTANT_ZONE_START_OFFSET: 668 zone_starting_lba_gran = get_unaligned_be64(&buf[24]); 669 if (zone_starting_lba_gran == 0 || 670 !is_power_of_2(zone_starting_lba_gran) || 671 logical_to_sectors(sdkp->device, zone_starting_lba_gran) > 672 UINT_MAX) { 673 sd_printk(KERN_ERR, sdkp, 674 "Invalid zone starting LBA granularity %llu\n", 675 zone_starting_lba_gran); 676 return -ENODEV; 677 } 678 sdkp->zone_starting_lba_gran = zone_starting_lba_gran; 679 break; 680 default: 681 sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n"); 682 return -ENODEV; 683 } 684 685 /* 686 * Check for unconstrained reads: host-managed devices with 687 * constrained reads (drives failing read after write pointer) 688 * are not supported. 689 */ 690 if (!sdkp->urswrz) { 691 if (sdkp->first_scan) 692 sd_printk(KERN_NOTICE, sdkp, 693 "constrained reads devices are not supported\n"); 694 return -ENODEV; 695 } 696 697 return 0; 698 } 699 700 /** 701 * sd_zbc_check_capacity - Check the device capacity 702 * @sdkp: Target disk 703 * @buf: command buffer 704 * @zblocks: zone size in logical blocks 705 * 706 * Get the device zone size and check that the device capacity as reported 707 * by READ CAPACITY matches the max_lba value (plus one) of the report zones 708 * command reply for devices with RC_BASIS == 0. 709 * 710 * Returns 0 upon success or an error code upon failure. 711 */ 712 static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, 713 u32 *zblocks) 714 { 715 u64 zone_blocks; 716 sector_t max_lba; 717 unsigned char *rec; 718 int ret; 719 720 /* Do a report zone to get max_lba and the size of the first zone */ 721 ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false); 722 if (ret) 723 return ret; 724 725 if (sdkp->rc_basis == 0) { 726 /* The max_lba field is the capacity of this device */ 727 max_lba = get_unaligned_be64(&buf[8]); 728 if (sdkp->capacity != max_lba + 1) { 729 if (sdkp->first_scan) 730 sd_printk(KERN_WARNING, sdkp, 731 "Changing capacity from %llu to max LBA+1 %llu\n", 732 (unsigned long long)sdkp->capacity, 733 (unsigned long long)max_lba + 1); 734 sdkp->capacity = max_lba + 1; 735 } 736 } 737 738 if (sdkp->zone_starting_lba_gran == 0) { 739 /* Get the size of the first reported zone */ 740 rec = buf + 64; 741 zone_blocks = get_unaligned_be64(&rec[8]); 742 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { 743 if (sdkp->first_scan) 744 sd_printk(KERN_NOTICE, sdkp, 745 "Zone size too large\n"); 746 return -EFBIG; 747 } 748 } else { 749 zone_blocks = sdkp->zone_starting_lba_gran; 750 } 751 752 if (!is_power_of_2(zone_blocks)) { 753 sd_printk(KERN_ERR, sdkp, 754 "Zone size %llu is not a power of two.\n", 755 zone_blocks); 756 return -EINVAL; 757 } 758 759 *zblocks = zone_blocks; 760 761 return 0; 762 } 763 764 static void sd_zbc_print_zones(struct scsi_disk *sdkp) 765 { 766 if (!sd_is_zoned(sdkp) || !sdkp->capacity) 767 return; 768 769 if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) 770 sd_printk(KERN_NOTICE, sdkp, 771 "%u zones of %u logical blocks + 1 runt zone\n", 772 sdkp->zone_info.nr_zones - 1, 773 sdkp->zone_info.zone_blocks); 774 else 775 sd_printk(KERN_NOTICE, sdkp, 776 "%u zones of %u logical blocks\n", 777 sdkp->zone_info.nr_zones, 778 sdkp->zone_info.zone_blocks); 779 } 780 781 static int sd_zbc_init_disk(struct scsi_disk *sdkp) 782 { 783 sdkp->zones_wp_offset = NULL; 784 spin_lock_init(&sdkp->zones_wp_offset_lock); 785 sdkp->rev_wp_offset = NULL; 786 mutex_init(&sdkp->rev_mutex); 787 INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); 788 sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); 789 if (!sdkp->zone_wp_update_buf) 790 return -ENOMEM; 791 792 return 0; 793 } 794 795 void sd_zbc_free_zone_info(struct scsi_disk *sdkp) 796 { 797 if (!sdkp->zone_wp_update_buf) 798 return; 799 800 /* Serialize against revalidate zones */ 801 mutex_lock(&sdkp->rev_mutex); 802 803 kvfree(sdkp->zones_wp_offset); 804 sdkp->zones_wp_offset = NULL; 805 kfree(sdkp->zone_wp_update_buf); 806 sdkp->zone_wp_update_buf = NULL; 807 808 sdkp->early_zone_info = (struct zoned_disk_info){ }; 809 sdkp->zone_info = (struct zoned_disk_info){ }; 810 811 mutex_unlock(&sdkp->rev_mutex); 812 } 813 814 static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) 815 { 816 struct scsi_disk *sdkp = scsi_disk(disk); 817 818 swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset); 819 } 820 821 /* 822 * Call blk_revalidate_disk_zones() if any of the zoned disk properties have 823 * changed that make it necessary to call that function. Called by 824 * sd_revalidate_disk() after the gendisk capacity has been set. 825 */ 826 int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) 827 { 828 struct gendisk *disk = sdkp->disk; 829 struct request_queue *q = disk->queue; 830 u32 zone_blocks = sdkp->early_zone_info.zone_blocks; 831 unsigned int nr_zones = sdkp->early_zone_info.nr_zones; 832 u32 max_append; 833 int ret = 0; 834 unsigned int flags; 835 836 /* 837 * For all zoned disks, initialize zone append emulation data if not 838 * already done. This is necessary also for host-aware disks used as 839 * regular disks due to the presence of partitions as these partitions 840 * may be deleted and the disk zoned model changed back from 841 * BLK_ZONED_NONE to BLK_ZONED_HA. 842 */ 843 if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) { 844 ret = sd_zbc_init_disk(sdkp); 845 if (ret) 846 return ret; 847 } 848 849 /* 850 * There is nothing to do for regular disks, including host-aware disks 851 * that have partitions. 852 */ 853 if (!blk_queue_is_zoned(q)) 854 return 0; 855 856 /* 857 * Make sure revalidate zones are serialized to ensure exclusive 858 * updates of the scsi disk data. 859 */ 860 mutex_lock(&sdkp->rev_mutex); 861 862 if (sdkp->zone_info.zone_blocks == zone_blocks && 863 sdkp->zone_info.nr_zones == nr_zones && 864 disk->nr_zones == nr_zones) 865 goto unlock; 866 867 flags = memalloc_noio_save(); 868 sdkp->zone_info.zone_blocks = zone_blocks; 869 sdkp->zone_info.nr_zones = nr_zones; 870 sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL); 871 if (!sdkp->rev_wp_offset) { 872 ret = -ENOMEM; 873 memalloc_noio_restore(flags); 874 goto unlock; 875 } 876 877 ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb); 878 879 memalloc_noio_restore(flags); 880 kvfree(sdkp->rev_wp_offset); 881 sdkp->rev_wp_offset = NULL; 882 883 if (ret) { 884 sdkp->zone_info = (struct zoned_disk_info){ }; 885 sdkp->capacity = 0; 886 goto unlock; 887 } 888 889 max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks), 890 q->limits.max_segments << (PAGE_SHIFT - 9)); 891 max_append = min_t(u32, max_append, queue_max_hw_sectors(q)); 892 893 blk_queue_max_zone_append_sectors(q, max_append); 894 895 sd_zbc_print_zones(sdkp); 896 897 unlock: 898 mutex_unlock(&sdkp->rev_mutex); 899 900 return ret; 901 } 902 903 /** 904 * sd_zbc_read_zones - Read zone information and update the request queue 905 * @sdkp: SCSI disk pointer. 906 * @buf: 512 byte buffer used for storing SCSI command output. 907 * 908 * Read zone information and update the request queue zone characteristics and 909 * also the zoned device information in *sdkp. Called by sd_revalidate_disk() 910 * before the gendisk capacity has been set. 911 */ 912 int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) 913 { 914 struct gendisk *disk = sdkp->disk; 915 struct request_queue *q = disk->queue; 916 unsigned int nr_zones; 917 u32 zone_blocks = 0; 918 int ret; 919 920 if (!sd_is_zoned(sdkp)) { 921 /* 922 * Device managed or normal SCSI disk, no special handling 923 * required. Nevertheless, free the disk zone information in 924 * case the device type changed. 925 */ 926 sd_zbc_free_zone_info(sdkp); 927 return 0; 928 } 929 930 /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ 931 sdkp->device->use_16_for_rw = 1; 932 sdkp->device->use_10_for_rw = 0; 933 sdkp->device->use_16_for_sync = 1; 934 935 if (!blk_queue_is_zoned(q)) { 936 /* 937 * This can happen for a host aware disk with partitions. 938 * The block device zone model was already cleared by 939 * disk_set_zoned(). Only free the scsi disk zone 940 * information and exit early. 941 */ 942 sd_zbc_free_zone_info(sdkp); 943 return 0; 944 } 945 946 /* Check zoned block device characteristics (unconstrained reads) */ 947 ret = sd_zbc_check_zoned_characteristics(sdkp, buf); 948 if (ret) 949 goto err; 950 951 /* Check the device capacity reported by report zones */ 952 ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks); 953 if (ret != 0) 954 goto err; 955 956 /* The drive satisfies the kernel restrictions: set it up */ 957 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); 958 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); 959 if (sdkp->zones_max_open == U32_MAX) 960 disk_set_max_open_zones(disk, 0); 961 else 962 disk_set_max_open_zones(disk, sdkp->zones_max_open); 963 disk_set_max_active_zones(disk, 0); 964 nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); 965 966 /* 967 * Per ZBC and ZAC specifications, writes in sequential write required 968 * zones of host-managed devices must be aligned to the device physical 969 * block size. 970 */ 971 if (blk_queue_zoned_model(q) == BLK_ZONED_HM) 972 blk_queue_zone_write_granularity(q, sdkp->physical_block_size); 973 974 sdkp->early_zone_info.nr_zones = nr_zones; 975 sdkp->early_zone_info.zone_blocks = zone_blocks; 976 977 return 0; 978 979 err: 980 sdkp->capacity = 0; 981 982 return ret; 983 } 984