1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_pscsi.c 4 * 5 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/string.h> 14 #include <linux/parser.h> 15 #include <linux/timer.h> 16 #include <linux/blkdev.h> 17 #include <linux/blk_types.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/cdrom.h> 21 #include <linux/ratelimit.h> 22 #include <linux/module.h> 23 #include <linux/unaligned.h> 24 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_host.h> 27 #include <scsi/scsi_tcq.h> 28 29 #include <target/target_core_base.h> 30 #include <target/target_core_backend.h> 31 32 #include "target_core_alua.h" 33 #include "target_core_internal.h" 34 #include "target_core_pscsi.h" 35 36 static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) 37 { 38 return container_of(dev, struct pscsi_dev_virt, dev); 39 } 40 41 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); 42 static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t, 43 const struct io_comp_batch *); 44 45 /* pscsi_attach_hba(): 46 * 47 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. 48 * from the passed SCSI Host ID. 49 */ 50 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) 51 { 52 struct pscsi_hba_virt *phv; 53 54 phv = kzalloc_obj(struct pscsi_hba_virt, GFP_KERNEL); 55 if (!phv) { 56 pr_err("Unable to allocate struct pscsi_hba_virt\n"); 57 return -ENOMEM; 58 } 59 phv->phv_host_id = host_id; 60 phv->phv_mode = PHV_VIRTUAL_HOST_ID; 61 62 hba->hba_ptr = phv; 63 64 pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 65 " Generic Target Core Stack %s\n", hba->hba_id, 66 PSCSI_VERSION, TARGET_CORE_VERSION); 67 pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 68 hba->hba_id); 69 70 return 0; 71 } 72 73 static void pscsi_detach_hba(struct se_hba *hba) 74 { 75 struct pscsi_hba_virt *phv = hba->hba_ptr; 76 struct Scsi_Host *scsi_host = phv->phv_lld_host; 77 78 if (scsi_host) { 79 scsi_host_put(scsi_host); 80 81 pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" 82 " Generic Target Core\n", hba->hba_id, 83 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 84 "Unknown"); 85 } else 86 pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" 87 " from Generic Target Core\n", hba->hba_id); 88 89 kfree(phv); 90 hba->hba_ptr = NULL; 91 } 92 93 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 94 { 95 struct pscsi_hba_virt *phv = hba->hba_ptr; 96 struct Scsi_Host *sh = phv->phv_lld_host; 97 /* 98 * Release the struct Scsi_Host 99 */ 100 if (!mode_flag) { 101 if (!sh) 102 return 0; 103 104 phv->phv_lld_host = NULL; 105 phv->phv_mode = PHV_VIRTUAL_HOST_ID; 106 107 pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 108 " %s\n", hba->hba_id, (sh->hostt->name) ? 109 (sh->hostt->name) : "Unknown"); 110 111 scsi_host_put(sh); 112 return 0; 113 } 114 /* 115 * Otherwise, locate struct Scsi_Host from the original passed 116 * pSCSI Host ID and enable for phba mode 117 */ 118 sh = scsi_host_lookup(phv->phv_host_id); 119 if (!sh) { 120 pr_err("pSCSI: Unable to locate SCSI Host for" 121 " phv_host_id: %d\n", phv->phv_host_id); 122 return -EINVAL; 123 } 124 125 phv->phv_lld_host = sh; 126 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 127 128 pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 129 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 130 131 return 1; 132 } 133 134 static void pscsi_tape_read_blocksize(struct se_device *dev, 135 struct scsi_device *sdev) 136 { 137 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 138 int ret; 139 140 buf = kzalloc(12, GFP_KERNEL); 141 if (!buf) 142 goto out_free; 143 144 memset(cdb, 0, MAX_COMMAND_SIZE); 145 cdb[0] = MODE_SENSE; 146 cdb[4] = 0x0c; /* 12 bytes */ 147 148 ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL); 149 if (ret) 150 goto out_free; 151 152 /* 153 * If MODE_SENSE still returns zero, set the default value to 1024. 154 */ 155 sdev->sector_size = get_unaligned_be24(&buf[9]); 156 out_free: 157 if (!sdev->sector_size) 158 sdev->sector_size = 1024; 159 160 kfree(buf); 161 } 162 163 static void 164 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 165 { 166 if (sdev->inquiry_len < INQUIRY_LEN) 167 return; 168 /* 169 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun() 170 */ 171 BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1); 172 snprintf(wwn->vendor, sizeof(wwn->vendor), 173 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor); 174 BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1); 175 snprintf(wwn->model, sizeof(wwn->model), 176 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model); 177 BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1); 178 snprintf(wwn->revision, sizeof(wwn->revision), 179 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev); 180 } 181 182 static int 183 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) 184 { 185 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 186 int ret; 187 188 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 189 if (!buf) 190 return -ENOMEM; 191 192 memset(cdb, 0, MAX_COMMAND_SIZE); 193 cdb[0] = INQUIRY; 194 cdb[1] = 0x01; /* Query VPD */ 195 cdb[2] = 0x80; /* Unit Serial Number */ 196 put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]); 197 198 ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 199 INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL); 200 if (ret) 201 goto out_free; 202 203 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 204 205 wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL; 206 207 kfree(buf); 208 return 0; 209 210 out_free: 211 kfree(buf); 212 return -EPERM; 213 } 214 215 static void 216 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, 217 struct t10_wwn *wwn) 218 { 219 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; 220 int ident_len, page_len, off = 4, ret; 221 struct t10_vpd *vpd; 222 223 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 224 if (!buf) 225 return; 226 227 memset(cdb, 0, MAX_COMMAND_SIZE); 228 cdb[0] = INQUIRY; 229 cdb[1] = 0x01; /* Query VPD */ 230 cdb[2] = 0x83; /* Device Identifier */ 231 put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]); 232 233 ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 234 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL); 235 if (ret) 236 goto out; 237 238 page_len = get_unaligned_be16(&buf[2]); 239 while (page_len > 0) { 240 /* Grab a pointer to the Identification descriptor */ 241 page_83 = &buf[off]; 242 ident_len = page_83[3]; 243 if (!ident_len) { 244 pr_err("page_83[3]: identifier" 245 " length zero!\n"); 246 break; 247 } 248 pr_debug("T10 VPD Identifier Length: %d\n", ident_len); 249 250 vpd = kzalloc_obj(struct t10_vpd, GFP_KERNEL); 251 if (!vpd) { 252 pr_err("Unable to allocate memory for" 253 " struct t10_vpd\n"); 254 goto out; 255 } 256 INIT_LIST_HEAD(&vpd->vpd_list); 257 258 transport_set_vpd_proto_id(vpd, page_83); 259 transport_set_vpd_assoc(vpd, page_83); 260 261 if (transport_set_vpd_ident_type(vpd, page_83) < 0) { 262 off += (ident_len + 4); 263 page_len -= (ident_len + 4); 264 kfree(vpd); 265 continue; 266 } 267 if (transport_set_vpd_ident(vpd, page_83) < 0) { 268 off += (ident_len + 4); 269 page_len -= (ident_len + 4); 270 kfree(vpd); 271 continue; 272 } 273 274 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); 275 off += (ident_len + 4); 276 page_len -= (ident_len + 4); 277 } 278 279 out: 280 kfree(buf); 281 } 282 283 static int pscsi_add_device_to_list(struct se_device *dev, 284 struct scsi_device *sd) 285 { 286 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 287 struct request_queue *q = sd->request_queue; 288 289 pdv->pdv_sd = sd; 290 291 if (!sd->queue_depth) { 292 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 293 294 pr_err("Set broken SCSI Device %d:%d:%llu" 295 " queue_depth to %d\n", sd->channel, sd->id, 296 sd->lun, sd->queue_depth); 297 } 298 299 dev->dev_attrib.hw_block_size = 300 min_not_zero((int)sd->sector_size, 512); 301 dev->dev_attrib.hw_max_sectors = 302 min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); 303 dev->dev_attrib.hw_queue_depth = sd->queue_depth; 304 305 /* 306 * Setup our standard INQUIRY info into se_dev->t10_wwn 307 */ 308 pscsi_set_inquiry_info(sd, &dev->t10_wwn); 309 310 /* 311 * Locate VPD WWN Information used for various purposes within 312 * the Storage Engine. 313 */ 314 if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) { 315 /* 316 * If VPD Unit Serial returned GOOD status, try 317 * VPD Device Identification page (0x83). 318 */ 319 pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn); 320 } 321 322 /* 323 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 324 */ 325 if (sd->type == TYPE_TAPE) { 326 pscsi_tape_read_blocksize(dev, sd); 327 dev->dev_attrib.hw_block_size = sd->sector_size; 328 } 329 return 0; 330 } 331 332 static struct se_device *pscsi_alloc_device(struct se_hba *hba, 333 const char *name) 334 { 335 struct pscsi_dev_virt *pdv; 336 337 pdv = kzalloc_obj(struct pscsi_dev_virt, GFP_KERNEL); 338 if (!pdv) { 339 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 340 return NULL; 341 } 342 343 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 344 return &pdv->dev; 345 } 346 347 /* 348 * Called with struct Scsi_Host->host_lock called. 349 */ 350 static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) 351 __releases(sh->host_lock) 352 { 353 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 354 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 355 struct Scsi_Host *sh = sd->host; 356 struct file *bdev_file; 357 int ret; 358 359 if (scsi_device_get(sd)) { 360 pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", 361 sh->host_no, sd->channel, sd->id, sd->lun); 362 spin_unlock_irq(sh->host_lock); 363 return -EIO; 364 } 365 spin_unlock_irq(sh->host_lock); 366 /* 367 * Claim exclusive struct block_device access to struct scsi_device 368 * for TYPE_DISK and TYPE_ZBC using supplied udev_path 369 */ 370 bdev_file = bdev_file_open_by_path(dev->udev_path, 371 BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL); 372 if (IS_ERR(bdev_file)) { 373 pr_err("pSCSI: bdev_file_open_by_path() failed\n"); 374 scsi_device_put(sd); 375 return PTR_ERR(bdev_file); 376 } 377 pdv->pdv_bdev_file = bdev_file; 378 379 ret = pscsi_add_device_to_list(dev, sd); 380 if (ret) { 381 fput(bdev_file); 382 scsi_device_put(sd); 383 return ret; 384 } 385 386 pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n", 387 phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC", 388 sh->host_no, sd->channel, sd->id, sd->lun); 389 return 0; 390 } 391 392 /* 393 * Called with struct Scsi_Host->host_lock called. 394 */ 395 static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) 396 __releases(sh->host_lock) 397 { 398 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 399 struct Scsi_Host *sh = sd->host; 400 int ret; 401 402 if (scsi_device_get(sd)) { 403 pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", 404 sh->host_no, sd->channel, sd->id, sd->lun); 405 spin_unlock_irq(sh->host_lock); 406 return -EIO; 407 } 408 spin_unlock_irq(sh->host_lock); 409 410 ret = pscsi_add_device_to_list(dev, sd); 411 if (ret) { 412 scsi_device_put(sd); 413 return ret; 414 } 415 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", 416 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 417 sd->channel, sd->id, sd->lun); 418 419 return 0; 420 } 421 422 static int pscsi_configure_device(struct se_device *dev) 423 { 424 struct se_hba *hba = dev->se_hba; 425 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 426 struct scsi_device *sd; 427 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 428 struct Scsi_Host *sh = phv->phv_lld_host; 429 int legacy_mode_enable = 0; 430 int ret; 431 432 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 433 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 434 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 435 pr_err("Missing scsi_channel_id=, scsi_target_id= and" 436 " scsi_lun_id= parameters\n"); 437 return -EINVAL; 438 } 439 440 /* 441 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 442 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 443 */ 444 if (!sh) { 445 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 446 pr_err("pSCSI: Unable to locate struct" 447 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 448 return -ENODEV; 449 } 450 /* 451 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device 452 * reference, we enforce that udev_path has been set 453 */ 454 if (!(dev->dev_flags & DF_USING_UDEV_PATH)) { 455 pr_err("pSCSI: udev_path attribute has not" 456 " been set before ENABLE=1\n"); 457 return -EINVAL; 458 } 459 /* 460 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, 461 * use the original TCM hba ID to reference Linux/SCSI Host No 462 * and enable for PHV_LLD_SCSI_HOST_NO mode. 463 */ 464 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 465 if (hba->dev_count) { 466 pr_err("pSCSI: Unable to set hba_mode" 467 " with active devices\n"); 468 return -EEXIST; 469 } 470 471 if (pscsi_pmode_enable_hba(hba, 1) != 1) 472 return -ENODEV; 473 474 legacy_mode_enable = 1; 475 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 476 sh = phv->phv_lld_host; 477 } else { 478 sh = scsi_host_lookup(pdv->pdv_host_id); 479 if (!sh) { 480 pr_err("pSCSI: Unable to locate" 481 " pdv_host_id: %d\n", pdv->pdv_host_id); 482 return -EINVAL; 483 } 484 pdv->pdv_lld_host = sh; 485 } 486 } else { 487 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { 488 pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" 489 " struct Scsi_Host exists\n"); 490 return -EEXIST; 491 } 492 } 493 494 spin_lock_irq(sh->host_lock); 495 list_for_each_entry(sd, &sh->__devices, siblings) { 496 if ((pdv->pdv_channel_id != sd->channel) || 497 (pdv->pdv_target_id != sd->id) || 498 (pdv->pdv_lun_id != sd->lun)) 499 continue; 500 /* 501 * Functions will release the held struct scsi_host->host_lock 502 * before calling pscsi_add_device_to_list() to register 503 * struct scsi_device with target_core_mod. 504 */ 505 switch (sd->type) { 506 case TYPE_DISK: 507 case TYPE_ZBC: 508 ret = pscsi_create_type_disk(dev, sd); 509 break; 510 default: 511 ret = pscsi_create_type_nondisk(dev, sd); 512 break; 513 } 514 515 if (ret) { 516 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 517 scsi_host_put(sh); 518 else if (legacy_mode_enable) { 519 pscsi_pmode_enable_hba(hba, 0); 520 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 521 } 522 pdv->pdv_sd = NULL; 523 return ret; 524 } 525 return 0; 526 } 527 spin_unlock_irq(sh->host_lock); 528 529 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 530 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 531 532 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 533 scsi_host_put(sh); 534 else if (legacy_mode_enable) { 535 pscsi_pmode_enable_hba(hba, 0); 536 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 537 } 538 539 return -ENODEV; 540 } 541 542 static void pscsi_dev_call_rcu(struct rcu_head *p) 543 { 544 struct se_device *dev = container_of(p, struct se_device, rcu_head); 545 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 546 547 kfree(pdv); 548 } 549 550 static void pscsi_free_device(struct se_device *dev) 551 { 552 call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); 553 } 554 555 static void pscsi_destroy_device(struct se_device *dev) 556 { 557 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 558 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 559 struct scsi_device *sd = pdv->pdv_sd; 560 561 if (sd) { 562 /* 563 * Release exclusive pSCSI internal struct block_device claim for 564 * struct scsi_device with TYPE_DISK or TYPE_ZBC 565 * from pscsi_create_type_disk() 566 */ 567 if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && 568 pdv->pdv_bdev_file) { 569 fput(pdv->pdv_bdev_file); 570 pdv->pdv_bdev_file = NULL; 571 } 572 /* 573 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference 574 * to struct Scsi_Host now. 575 */ 576 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 577 (phv->phv_lld_host != NULL)) 578 scsi_host_put(phv->phv_lld_host); 579 else if (pdv->pdv_lld_host) 580 scsi_host_put(pdv->pdv_lld_host); 581 582 scsi_device_put(sd); 583 584 pdv->pdv_sd = NULL; 585 } 586 } 587 588 static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, 589 unsigned char *req_sense, int valid_data) 590 { 591 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 592 struct scsi_device *sd = pdv->pdv_sd; 593 unsigned char *cdb = cmd->priv; 594 595 /* 596 * Special case for REPORT_LUNs which is emulated and not passed on. 597 */ 598 if (!cdb) 599 return; 600 601 /* 602 * Hack to make sure that Write-Protect modepage is set if R/O mode is 603 * forced. 604 */ 605 if (!cmd->data_length) 606 goto after_mode_sense; 607 608 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 609 scsi_status == SAM_STAT_GOOD) { 610 bool read_only = target_lun_is_rdonly(cmd); 611 612 if (read_only) { 613 unsigned char *buf; 614 615 buf = transport_kmap_data_sg(cmd); 616 if (!buf) { 617 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ 618 } else { 619 if (cdb[0] == MODE_SENSE_10) { 620 if (!(buf[3] & 0x80)) 621 buf[3] |= 0x80; 622 } else { 623 if (!(buf[2] & 0x80)) 624 buf[2] |= 0x80; 625 } 626 627 transport_kunmap_data_sg(cmd); 628 } 629 } 630 } 631 after_mode_sense: 632 633 if (sd->type != TYPE_TAPE || !cmd->data_length) 634 goto after_mode_select; 635 636 /* 637 * Hack to correctly obtain the initiator requested blocksize for 638 * TYPE_TAPE. Since this value is dependent upon each tape media, 639 * struct scsi_device->sector_size will not contain the correct value 640 * by default, so we go ahead and set it so 641 * TRANSPORT(dev)->get_blockdev() returns the correct value to the 642 * storage engine. 643 */ 644 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 645 scsi_status == SAM_STAT_GOOD) { 646 unsigned char *buf; 647 u16 bdl; 648 u32 blocksize; 649 650 buf = sg_virt(&cmd->t_data_sg[0]); 651 if (!buf) { 652 pr_err("Unable to get buf for scatterlist\n"); 653 goto after_mode_select; 654 } 655 656 if (cdb[0] == MODE_SELECT) 657 bdl = buf[3]; 658 else 659 bdl = get_unaligned_be16(&buf[6]); 660 661 if (!bdl) 662 goto after_mode_select; 663 664 if (cdb[0] == MODE_SELECT) 665 blocksize = get_unaligned_be24(&buf[9]); 666 else 667 blocksize = get_unaligned_be24(&buf[13]); 668 669 sd->sector_size = blocksize; 670 } 671 after_mode_select: 672 673 if (scsi_status == SAM_STAT_CHECK_CONDITION) { 674 transport_copy_sense_to_cmd(cmd, req_sense); 675 676 /* 677 * check for TAPE device reads with 678 * FM/EOM/ILI set, so that we can get data 679 * back despite framework assumption that a 680 * check condition means there is no data 681 */ 682 if (sd->type == TYPE_TAPE && valid_data && 683 cmd->data_direction == DMA_FROM_DEVICE) { 684 /* 685 * is sense data valid, fixed format, 686 * and have FM, EOM, or ILI set? 687 */ 688 if (req_sense[0] == 0xf0 && /* valid, fixed format */ 689 req_sense[2] & 0xe0 && /* FM, EOM, or ILI */ 690 (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */ 691 pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n"); 692 cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 693 } 694 } 695 } 696 } 697 698 enum { 699 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, 700 Opt_scsi_lun_id, Opt_err 701 }; 702 703 static match_table_t tokens = { 704 {Opt_scsi_host_id, "scsi_host_id=%d"}, 705 {Opt_scsi_channel_id, "scsi_channel_id=%d"}, 706 {Opt_scsi_target_id, "scsi_target_id=%d"}, 707 {Opt_scsi_lun_id, "scsi_lun_id=%d"}, 708 {Opt_err, NULL} 709 }; 710 711 static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, 712 const char *page, ssize_t count) 713 { 714 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 715 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 716 char *orig, *ptr, *opts; 717 substring_t args[MAX_OPT_ARGS]; 718 int ret = 0, arg, token; 719 720 opts = kstrdup(page, GFP_KERNEL); 721 if (!opts) 722 return -ENOMEM; 723 724 orig = opts; 725 726 while ((ptr = strsep(&opts, ",\n")) != NULL) { 727 if (!*ptr) 728 continue; 729 730 token = match_token(ptr, tokens, args); 731 switch (token) { 732 case Opt_scsi_host_id: 733 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 734 pr_err("PSCSI[%d]: Unable to accept" 735 " scsi_host_id while phv_mode ==" 736 " PHV_LLD_SCSI_HOST_NO\n", 737 phv->phv_host_id); 738 ret = -EINVAL; 739 goto out; 740 } 741 ret = match_int(args, &arg); 742 if (ret) 743 goto out; 744 pdv->pdv_host_id = arg; 745 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 746 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 747 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 748 break; 749 case Opt_scsi_channel_id: 750 ret = match_int(args, &arg); 751 if (ret) 752 goto out; 753 pdv->pdv_channel_id = arg; 754 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 755 " ID: %d\n", phv->phv_host_id, 756 pdv->pdv_channel_id); 757 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 758 break; 759 case Opt_scsi_target_id: 760 ret = match_int(args, &arg); 761 if (ret) 762 goto out; 763 pdv->pdv_target_id = arg; 764 pr_debug("PSCSI[%d]: Referencing SCSI Target" 765 " ID: %d\n", phv->phv_host_id, 766 pdv->pdv_target_id); 767 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 768 break; 769 case Opt_scsi_lun_id: 770 ret = match_int(args, &arg); 771 if (ret) 772 goto out; 773 pdv->pdv_lun_id = arg; 774 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 775 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 776 pdv->pdv_flags |= PDF_HAS_LUN_ID; 777 break; 778 default: 779 break; 780 } 781 } 782 783 out: 784 kfree(orig); 785 return (!ret) ? count : ret; 786 } 787 788 static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) 789 { 790 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 791 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 792 struct scsi_device *sd = pdv->pdv_sd; 793 unsigned char host_id[16]; 794 ssize_t bl; 795 796 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 797 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 798 else 799 snprintf(host_id, 16, "PHBA Mode"); 800 801 bl = sprintf(b, "SCSI Device Bus Location:" 802 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", 803 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, 804 host_id); 805 806 if (sd) { 807 bl += sprintf(b + bl, " Vendor: %." 808 __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor); 809 bl += sprintf(b + bl, " Model: %." 810 __stringify(INQUIRY_MODEL_LEN) "s", sd->model); 811 bl += sprintf(b + bl, " Rev: %." 812 __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev); 813 } 814 return bl; 815 } 816 817 static void pscsi_bi_endio(struct bio *bio) 818 { 819 bio_uninit(bio); 820 kfree(bio); 821 } 822 823 static sense_reason_t 824 pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 825 struct request *req) 826 { 827 struct bio *bio = NULL; 828 struct page *page; 829 struct scatterlist *sg; 830 u32 data_len = cmd->data_length, i, len, bytes, off; 831 int nr_pages = (cmd->data_length + sgl[0].offset + 832 PAGE_SIZE - 1) >> PAGE_SHIFT; 833 int nr_vecs = 0, rc; 834 int rw = (cmd->data_direction == DMA_TO_DEVICE); 835 836 BUG_ON(!cmd->data_length); 837 838 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); 839 840 for_each_sg(sgl, sg, sgl_nents, i) { 841 page = sg_page(sg); 842 off = sg->offset; 843 len = sg->length; 844 845 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, 846 page, len, off); 847 848 /* 849 * We only have one page of data in each sg element, 850 * we can not cross a page boundary. 851 */ 852 if (off + len > PAGE_SIZE) 853 goto fail; 854 855 if (len > 0 && data_len > 0) { 856 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 857 bytes = min(bytes, data_len); 858 859 if (!bio) { 860 new_bio: 861 nr_vecs = bio_max_segs(nr_pages); 862 bio = bio_kmalloc(nr_vecs, GFP_KERNEL); 863 if (!bio) 864 goto fail; 865 bio_init_inline(bio, NULL, nr_vecs, 866 rw ? REQ_OP_WRITE : REQ_OP_READ); 867 bio->bi_end_io = pscsi_bi_endio; 868 869 pr_debug("PSCSI: Allocated bio: %p," 870 " dir: %s nr_vecs: %d\n", bio, 871 (rw) ? "rw" : "r", nr_vecs); 872 } 873 874 pr_debug("PSCSI: Calling bio_add_page() i: %d" 875 " bio: %p page: %p len: %d off: %d\n", i, bio, 876 page, len, off); 877 878 rc = bio_add_page(bio, page, bytes, off); 879 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 880 bio_segments(bio), nr_vecs); 881 if (rc != bytes) { 882 pr_debug("PSCSI: Reached bio->bi_vcnt max:" 883 " %d i: %d bio: %p, allocating another" 884 " bio\n", bio->bi_vcnt, i, bio); 885 886 rc = blk_rq_append_bio(req, bio); 887 if (rc) { 888 pr_err("pSCSI: failed to append bio\n"); 889 goto fail; 890 } 891 892 goto new_bio; 893 } 894 895 data_len -= bytes; 896 } 897 } 898 899 if (bio) { 900 rc = blk_rq_append_bio(req, bio); 901 if (rc) { 902 pr_err("pSCSI: failed to append bio\n"); 903 goto fail; 904 } 905 } 906 907 return 0; 908 fail: 909 if (bio) { 910 bio_uninit(bio); 911 kfree(bio); 912 } 913 while (req->bio) { 914 bio = req->bio; 915 req->bio = bio->bi_next; 916 bio_uninit(bio); 917 kfree(bio); 918 } 919 req->biotail = NULL; 920 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 921 } 922 923 static sense_reason_t 924 pscsi_parse_cdb(struct se_cmd *cmd) 925 { 926 if (cmd->se_cmd_flags & SCF_BIDI) 927 return TCM_UNSUPPORTED_SCSI_OPCODE; 928 929 return passthrough_parse_cdb(cmd, pscsi_execute_cmd); 930 } 931 932 static sense_reason_t 933 pscsi_execute_cmd(struct se_cmd *cmd) 934 { 935 struct scatterlist *sgl = cmd->t_data_sg; 936 u32 sgl_nents = cmd->t_data_nents; 937 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 938 struct scsi_cmnd *scmd; 939 struct request *req; 940 sense_reason_t ret; 941 942 req = scsi_alloc_request(pdv->pdv_sd->request_queue, 943 cmd->data_direction == DMA_TO_DEVICE ? 944 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 945 if (IS_ERR(req)) 946 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 947 948 if (sgl) { 949 ret = pscsi_map_sg(cmd, sgl, sgl_nents, req); 950 if (ret) 951 goto fail_put_request; 952 } 953 954 req->end_io = pscsi_req_done; 955 req->end_io_data = cmd; 956 957 scmd = blk_mq_rq_to_pdu(req); 958 scmd->cmd_len = scsi_command_size(cmd->t_task_cdb); 959 if (scmd->cmd_len > sizeof(scmd->cmnd)) { 960 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 961 goto fail_put_request; 962 } 963 memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len); 964 965 if (pdv->pdv_sd->type == TYPE_DISK || 966 pdv->pdv_sd->type == TYPE_ZBC) 967 req->timeout = PS_TIMEOUT_DISK; 968 else 969 req->timeout = PS_TIMEOUT_OTHER; 970 scmd->allowed = PS_RETRY; 971 972 cmd->priv = scmd->cmnd; 973 974 blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG); 975 976 return 0; 977 978 fail_put_request: 979 blk_mq_free_request(req); 980 return ret; 981 } 982 983 /* pscsi_get_device_type(): 984 * 985 * 986 */ 987 static u32 pscsi_get_device_type(struct se_device *dev) 988 { 989 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 990 struct scsi_device *sd = pdv->pdv_sd; 991 992 return (sd) ? sd->type : TYPE_NO_LUN; 993 } 994 995 static sector_t pscsi_get_blocks(struct se_device *dev) 996 { 997 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 998 999 if (pdv->pdv_bdev_file) 1000 return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file)); 1001 return 0; 1002 } 1003 1004 static enum rq_end_io_ret pscsi_req_done(struct request *req, 1005 blk_status_t status, 1006 const struct io_comp_batch *iob) 1007 { 1008 struct se_cmd *cmd = req->end_io_data; 1009 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); 1010 enum sam_status scsi_status = scmd->result & 0xff; 1011 int valid_data = cmd->data_length - scmd->resid_len; 1012 u8 *cdb = cmd->priv; 1013 1014 if (scsi_status != SAM_STAT_GOOD) { 1015 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" 1016 " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); 1017 } 1018 1019 pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data); 1020 1021 switch (host_byte(scmd->result)) { 1022 case DID_OK: 1023 target_complete_cmd_with_length(cmd, scsi_status, valid_data); 1024 break; 1025 default: 1026 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" 1027 " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); 1028 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 1029 break; 1030 } 1031 1032 blk_mq_free_request(req); 1033 return RQ_END_IO_NONE; 1034 } 1035 1036 static const struct target_backend_ops pscsi_ops = { 1037 .name = "pscsi", 1038 .owner = THIS_MODULE, 1039 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH | 1040 TRANSPORT_FLAG_PASSTHROUGH_ALUA | 1041 TRANSPORT_FLAG_PASSTHROUGH_PGR, 1042 .attach_hba = pscsi_attach_hba, 1043 .detach_hba = pscsi_detach_hba, 1044 .pmode_enable_hba = pscsi_pmode_enable_hba, 1045 .alloc_device = pscsi_alloc_device, 1046 .configure_device = pscsi_configure_device, 1047 .destroy_device = pscsi_destroy_device, 1048 .free_device = pscsi_free_device, 1049 .parse_cdb = pscsi_parse_cdb, 1050 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1051 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1052 .get_device_type = pscsi_get_device_type, 1053 .get_blocks = pscsi_get_blocks, 1054 .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1055 }; 1056 1057 static int __init pscsi_module_init(void) 1058 { 1059 return transport_backend_register(&pscsi_ops); 1060 } 1061 1062 static void __exit pscsi_module_exit(void) 1063 { 1064 target_backend_unregister(&pscsi_ops); 1065 } 1066 1067 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); 1068 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 1069 MODULE_LICENSE("GPL"); 1070 1071 module_init(pscsi_module_init); 1072 module_exit(pscsi_module_exit); 1073