1 /* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi_proto.h> 28 #include <scsi/scsi_common.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 42 { 43 struct t10_alua_tg_pt_gp *tg_pt_gp; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explicit and/or implicit ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 spin_lock(&lun->lun_tg_pt_gp_lock); 57 tg_pt_gp = lun->lun_tg_pt_gp; 58 if (tg_pt_gp) 59 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 60 spin_unlock(&lun->lun_tg_pt_gp_lock); 61 } 62 63 sense_reason_t 64 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 65 { 66 struct se_lun *lun = cmd->se_lun; 67 struct se_device *dev = cmd->se_dev; 68 struct se_session *sess = cmd->se_sess; 69 70 /* Set RMB (removable media) for tape devices */ 71 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 72 buf[1] = 0x80; 73 74 buf[2] = 0x05; /* SPC-3 */ 75 76 /* 77 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 78 * 79 * SPC4 says: 80 * A RESPONSE DATA FORMAT field set to 2h indicates that the 81 * standard INQUIRY data is in the format defined in this 82 * standard. Response data format values less than 2h are 83 * obsolete. Response data format values greater than 2h are 84 * reserved. 85 */ 86 buf[3] = 2; 87 88 /* 89 * Enable SCCS and TPGS fields for Emulated ALUA 90 */ 91 spc_fill_alua_data(lun, buf); 92 93 /* 94 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 95 */ 96 if (dev->dev_attrib.emulate_3pc) 97 buf[5] |= 0x8; 98 /* 99 * Set Protection (PROTECT) bit when DIF has been enabled on the 100 * device, and the fabric supports VERIFY + PASS. Also report 101 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 102 * to unprotected devices. 103 */ 104 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 105 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 106 buf[5] |= 0x1; 107 } 108 109 buf[7] = 0x2; /* CmdQue=1 */ 110 111 memcpy(&buf[8], "LIO-ORG ", 8); 112 memset(&buf[16], 0x20, 16); 113 memcpy(&buf[16], dev->t10_wwn.model, 114 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 115 memcpy(&buf[32], dev->t10_wwn.revision, 116 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 117 buf[4] = 31; /* Set additional length to 31 */ 118 119 return 0; 120 } 121 EXPORT_SYMBOL(spc_emulate_inquiry_std); 122 123 /* unit serial number */ 124 static sense_reason_t 125 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 126 { 127 struct se_device *dev = cmd->se_dev; 128 u16 len; 129 130 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 131 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 132 len++; /* Extra Byte for NULL Terminator */ 133 buf[3] = len; 134 } 135 return 0; 136 } 137 138 void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 139 unsigned char *buf) 140 { 141 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 142 int cnt; 143 bool next = true; 144 145 /* 146 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 147 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 148 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 149 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 150 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 151 * per device uniqeness. 152 */ 153 for (cnt = 0; *p && cnt < 13; p++) { 154 int val = hex_to_bin(*p); 155 156 if (val < 0) 157 continue; 158 159 if (next) { 160 next = false; 161 buf[cnt++] |= val; 162 } else { 163 next = true; 164 buf[cnt] = val << 4; 165 } 166 } 167 } 168 169 /* 170 * Device identification VPD, for a complete list of 171 * DESIGNATOR TYPEs see spc4r17 Table 459. 172 */ 173 sense_reason_t 174 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 175 { 176 struct se_device *dev = cmd->se_dev; 177 struct se_lun *lun = cmd->se_lun; 178 struct se_portal_group *tpg = NULL; 179 struct t10_alua_lu_gp_member *lu_gp_mem; 180 struct t10_alua_tg_pt_gp *tg_pt_gp; 181 unsigned char *prod = &dev->t10_wwn.model[0]; 182 u32 prod_len; 183 u32 unit_serial_len, off = 0; 184 u16 len = 0, id_len; 185 186 off = 4; 187 188 /* 189 * NAA IEEE Registered Extended Assigned designator format, see 190 * spc4r17 section 7.7.3.6.5 191 * 192 * We depend upon a target_core_mod/ConfigFS provided 193 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 194 * value in order to return the NAA id. 195 */ 196 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 197 goto check_t10_vend_desc; 198 199 /* CODE SET == Binary */ 200 buf[off++] = 0x1; 201 202 /* Set ASSOCIATION == addressed logical unit: 0)b */ 203 buf[off] = 0x00; 204 205 /* Identifier/Designator type == NAA identifier */ 206 buf[off++] |= 0x3; 207 off++; 208 209 /* Identifier/Designator length */ 210 buf[off++] = 0x10; 211 212 /* 213 * Start NAA IEEE Registered Extended Identifier/Designator 214 */ 215 buf[off++] = (0x6 << 4); 216 217 /* 218 * Use OpenFabrics IEEE Company ID: 00 14 05 219 */ 220 buf[off++] = 0x01; 221 buf[off++] = 0x40; 222 buf[off] = (0x5 << 4); 223 224 /* 225 * Return ConfigFS Unit Serial Number information for 226 * VENDOR_SPECIFIC_IDENTIFIER and 227 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 228 */ 229 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 230 231 len = 20; 232 off = (len + 4); 233 234 check_t10_vend_desc: 235 /* 236 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 237 */ 238 id_len = 8; /* For Vendor field */ 239 prod_len = 4; /* For VPD Header */ 240 prod_len += 8; /* For Vendor field */ 241 prod_len += strlen(prod); 242 prod_len++; /* For : */ 243 244 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 245 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 246 unit_serial_len++; /* For NULL Terminator */ 247 248 id_len += sprintf(&buf[off+12], "%s:%s", prod, 249 &dev->t10_wwn.unit_serial[0]); 250 } 251 buf[off] = 0x2; /* ASCII */ 252 buf[off+1] = 0x1; /* T10 Vendor ID */ 253 buf[off+2] = 0x0; 254 memcpy(&buf[off+4], "LIO-ORG", 8); 255 /* Extra Byte for NULL Terminator */ 256 id_len++; 257 /* Identifier Length */ 258 buf[off+3] = id_len; 259 /* Header size for Designation descriptor */ 260 len += (id_len + 4); 261 off += (id_len + 4); 262 263 if (1) { 264 struct t10_alua_lu_gp *lu_gp; 265 u32 padding, scsi_name_len, scsi_target_len; 266 u16 lu_gp_id = 0; 267 u16 tg_pt_gp_id = 0; 268 u16 tpgt; 269 270 tpg = lun->lun_tpg; 271 /* 272 * Relative target port identifer, see spc4r17 273 * section 7.7.3.7 274 * 275 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 276 * section 7.5.1 Table 362 277 */ 278 buf[off] = tpg->proto_id << 4; 279 buf[off++] |= 0x1; /* CODE SET == Binary */ 280 buf[off] = 0x80; /* Set PIV=1 */ 281 /* Set ASSOCIATION == target port: 01b */ 282 buf[off] |= 0x10; 283 /* DESIGNATOR TYPE == Relative target port identifer */ 284 buf[off++] |= 0x4; 285 off++; /* Skip over Reserved */ 286 buf[off++] = 4; /* DESIGNATOR LENGTH */ 287 /* Skip over Obsolete field in RTPI payload 288 * in Table 472 */ 289 off += 2; 290 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 291 buf[off++] = (lun->lun_rtpi & 0xff); 292 len += 8; /* Header size + Designation descriptor */ 293 /* 294 * Target port group identifier, see spc4r17 295 * section 7.7.3.8 296 * 297 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 298 * section 7.5.1 Table 362 299 */ 300 spin_lock(&lun->lun_tg_pt_gp_lock); 301 tg_pt_gp = lun->lun_tg_pt_gp; 302 if (!tg_pt_gp) { 303 spin_unlock(&lun->lun_tg_pt_gp_lock); 304 goto check_lu_gp; 305 } 306 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 307 spin_unlock(&lun->lun_tg_pt_gp_lock); 308 309 buf[off] = tpg->proto_id << 4; 310 buf[off++] |= 0x1; /* CODE SET == Binary */ 311 buf[off] = 0x80; /* Set PIV=1 */ 312 /* Set ASSOCIATION == target port: 01b */ 313 buf[off] |= 0x10; 314 /* DESIGNATOR TYPE == Target port group identifier */ 315 buf[off++] |= 0x5; 316 off++; /* Skip over Reserved */ 317 buf[off++] = 4; /* DESIGNATOR LENGTH */ 318 off += 2; /* Skip over Reserved Field */ 319 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 320 buf[off++] = (tg_pt_gp_id & 0xff); 321 len += 8; /* Header size + Designation descriptor */ 322 /* 323 * Logical Unit Group identifier, see spc4r17 324 * section 7.7.3.8 325 */ 326 check_lu_gp: 327 lu_gp_mem = dev->dev_alua_lu_gp_mem; 328 if (!lu_gp_mem) 329 goto check_scsi_name; 330 331 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 332 lu_gp = lu_gp_mem->lu_gp; 333 if (!lu_gp) { 334 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 335 goto check_scsi_name; 336 } 337 lu_gp_id = lu_gp->lu_gp_id; 338 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 339 340 buf[off++] |= 0x1; /* CODE SET == Binary */ 341 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 342 buf[off++] |= 0x6; 343 off++; /* Skip over Reserved */ 344 buf[off++] = 4; /* DESIGNATOR LENGTH */ 345 off += 2; /* Skip over Reserved Field */ 346 buf[off++] = ((lu_gp_id >> 8) & 0xff); 347 buf[off++] = (lu_gp_id & 0xff); 348 len += 8; /* Header size + Designation descriptor */ 349 /* 350 * SCSI name string designator, see spc4r17 351 * section 7.7.3.11 352 * 353 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 354 * section 7.5.1 Table 362 355 */ 356 check_scsi_name: 357 buf[off] = tpg->proto_id << 4; 358 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 359 buf[off] = 0x80; /* Set PIV=1 */ 360 /* Set ASSOCIATION == target port: 01b */ 361 buf[off] |= 0x10; 362 /* DESIGNATOR TYPE == SCSI name string */ 363 buf[off++] |= 0x8; 364 off += 2; /* Skip over Reserved and length */ 365 /* 366 * SCSI name string identifer containing, $FABRIC_MOD 367 * dependent information. For LIO-Target and iSCSI 368 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 369 * UTF-8 encoding. 370 */ 371 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 372 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 373 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 374 scsi_name_len += 1 /* Include NULL terminator */; 375 /* 376 * The null-terminated, null-padded (see 4.4.2) SCSI 377 * NAME STRING field contains a UTF-8 format string. 378 * The number of bytes in the SCSI NAME STRING field 379 * (i.e., the value in the DESIGNATOR LENGTH field) 380 * shall be no larger than 256 and shall be a multiple 381 * of four. 382 */ 383 padding = ((-scsi_name_len) & 3); 384 if (padding) 385 scsi_name_len += padding; 386 if (scsi_name_len > 256) 387 scsi_name_len = 256; 388 389 buf[off-1] = scsi_name_len; 390 off += scsi_name_len; 391 /* Header size + Designation descriptor */ 392 len += (scsi_name_len + 4); 393 394 /* 395 * Target device designator 396 */ 397 buf[off] = tpg->proto_id << 4; 398 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 399 buf[off] = 0x80; /* Set PIV=1 */ 400 /* Set ASSOCIATION == target device: 10b */ 401 buf[off] |= 0x20; 402 /* DESIGNATOR TYPE == SCSI name string */ 403 buf[off++] |= 0x8; 404 off += 2; /* Skip over Reserved and length */ 405 /* 406 * SCSI name string identifer containing, $FABRIC_MOD 407 * dependent information. For LIO-Target and iSCSI 408 * Target Port, this means "<iSCSI name>" in 409 * UTF-8 encoding. 410 */ 411 scsi_target_len = sprintf(&buf[off], "%s", 412 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 413 scsi_target_len += 1 /* Include NULL terminator */; 414 /* 415 * The null-terminated, null-padded (see 4.4.2) SCSI 416 * NAME STRING field contains a UTF-8 format string. 417 * The number of bytes in the SCSI NAME STRING field 418 * (i.e., the value in the DESIGNATOR LENGTH field) 419 * shall be no larger than 256 and shall be a multiple 420 * of four. 421 */ 422 padding = ((-scsi_target_len) & 3); 423 if (padding) 424 scsi_target_len += padding; 425 if (scsi_target_len > 256) 426 scsi_target_len = 256; 427 428 buf[off-1] = scsi_target_len; 429 off += scsi_target_len; 430 431 /* Header size + Designation descriptor */ 432 len += (scsi_target_len + 4); 433 } 434 buf[2] = ((len >> 8) & 0xff); 435 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 436 return 0; 437 } 438 EXPORT_SYMBOL(spc_emulate_evpd_83); 439 440 /* Extended INQUIRY Data VPD Page */ 441 static sense_reason_t 442 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 443 { 444 struct se_device *dev = cmd->se_dev; 445 struct se_session *sess = cmd->se_sess; 446 447 buf[3] = 0x3c; 448 /* 449 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 450 * only for TYPE3 protection. 451 */ 452 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 453 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 455 buf[4] = 0x5; 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 458 buf[4] = 0x4; 459 } 460 461 /* logical unit supports type 1 and type 3 protection */ 462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) && 463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && 464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { 465 buf[4] |= (0x3 << 3); 466 } 467 468 /* Set HEADSUP, ORDSUP, SIMPSUP */ 469 buf[5] = 0x07; 470 471 /* If WriteCache emulation is enabled, set V_SUP */ 472 if (target_check_wce(dev)) 473 buf[6] = 0x01; 474 /* If an LBA map is present set R_SUP */ 475 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 476 if (!list_empty(&dev->t10_alua.lba_map_list)) 477 buf[8] = 0x10; 478 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 479 return 0; 480 } 481 482 /* Block Limits VPD page */ 483 static sense_reason_t 484 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 485 { 486 struct se_device *dev = cmd->se_dev; 487 u32 mtl = 0; 488 int have_tp = 0, opt, min; 489 490 /* 491 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 492 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 493 * different page length for Thin Provisioning. 494 */ 495 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 496 have_tp = 1; 497 498 buf[0] = dev->transport->get_device_type(dev); 499 buf[3] = have_tp ? 0x3c : 0x10; 500 501 /* Set WSNZ to 1 */ 502 buf[4] = 0x01; 503 /* 504 * Set MAXIMUM COMPARE AND WRITE LENGTH 505 */ 506 if (dev->dev_attrib.emulate_caw) 507 buf[5] = 0x01; 508 509 /* 510 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 511 */ 512 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 513 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 514 else 515 put_unaligned_be16(1, &buf[6]); 516 517 /* 518 * Set MAXIMUM TRANSFER LENGTH 519 * 520 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics 521 * enforcing maximum HW scatter-gather-list entry limit 522 */ 523 if (cmd->se_tfo->max_data_sg_nents) { 524 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / 525 dev->dev_attrib.block_size; 526 } 527 put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); 528 529 /* 530 * Set OPTIMAL TRANSFER LENGTH 531 */ 532 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 533 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 534 else 535 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 536 537 /* 538 * Exit now if we don't support TP. 539 */ 540 if (!have_tp) 541 goto max_write_same; 542 543 /* 544 * Set MAXIMUM UNMAP LBA COUNT 545 */ 546 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 547 548 /* 549 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 550 */ 551 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 552 &buf[24]); 553 554 /* 555 * Set OPTIMAL UNMAP GRANULARITY 556 */ 557 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 558 559 /* 560 * UNMAP GRANULARITY ALIGNMENT 561 */ 562 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 563 &buf[32]); 564 if (dev->dev_attrib.unmap_granularity_alignment != 0) 565 buf[32] |= 0x80; /* Set the UGAVALID bit */ 566 567 /* 568 * MAXIMUM WRITE SAME LENGTH 569 */ 570 max_write_same: 571 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 572 573 return 0; 574 } 575 576 /* Block Device Characteristics VPD page */ 577 static sense_reason_t 578 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 579 { 580 struct se_device *dev = cmd->se_dev; 581 582 buf[0] = dev->transport->get_device_type(dev); 583 buf[3] = 0x3c; 584 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 585 586 return 0; 587 } 588 589 /* Thin Provisioning VPD */ 590 static sense_reason_t 591 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 592 { 593 struct se_device *dev = cmd->se_dev; 594 595 /* 596 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 597 * 598 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 599 * zero, then the page length shall be set to 0004h. If the DP bit 600 * is set to one, then the page length shall be set to the value 601 * defined in table 162. 602 */ 603 buf[0] = dev->transport->get_device_type(dev); 604 605 /* 606 * Set Hardcoded length mentioned above for DP=0 607 */ 608 put_unaligned_be16(0x0004, &buf[2]); 609 610 /* 611 * The THRESHOLD EXPONENT field indicates the threshold set size in 612 * LBAs as a power of 2 (i.e., the threshold set size is equal to 613 * 2(threshold exponent)). 614 * 615 * Note that this is currently set to 0x00 as mkp says it will be 616 * changing again. We can enable this once it has settled in T10 617 * and is actually used by Linux/SCSI ML code. 618 */ 619 buf[4] = 0x00; 620 621 /* 622 * A TPU bit set to one indicates that the device server supports 623 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 624 * that the device server does not support the UNMAP command. 625 */ 626 if (dev->dev_attrib.emulate_tpu != 0) 627 buf[5] = 0x80; 628 629 /* 630 * A TPWS bit set to one indicates that the device server supports 631 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 632 * A TPWS bit set to zero indicates that the device server does not 633 * support the use of the WRITE SAME (16) command to unmap LBAs. 634 */ 635 if (dev->dev_attrib.emulate_tpws != 0) 636 buf[5] |= 0x40 | 0x20; 637 638 return 0; 639 } 640 641 /* Referrals VPD page */ 642 static sense_reason_t 643 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 644 { 645 struct se_device *dev = cmd->se_dev; 646 647 buf[0] = dev->transport->get_device_type(dev); 648 buf[3] = 0x0c; 649 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 650 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 651 652 return 0; 653 } 654 655 static sense_reason_t 656 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 657 658 static struct { 659 uint8_t page; 660 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 661 } evpd_handlers[] = { 662 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 663 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 664 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 665 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 666 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 667 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 668 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 669 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 670 }; 671 672 /* supported vital product data pages */ 673 static sense_reason_t 674 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 675 { 676 int p; 677 678 /* 679 * Only report the INQUIRY EVPD=1 pages after a valid NAA 680 * Registered Extended LUN WWN has been set via ConfigFS 681 * during device creation/restart. 682 */ 683 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 684 buf[3] = ARRAY_SIZE(evpd_handlers); 685 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 686 buf[p + 4] = evpd_handlers[p].page; 687 } 688 689 return 0; 690 } 691 692 static sense_reason_t 693 spc_emulate_inquiry(struct se_cmd *cmd) 694 { 695 struct se_device *dev = cmd->se_dev; 696 struct se_portal_group *tpg = cmd->se_lun->lun_tpg; 697 unsigned char *rbuf; 698 unsigned char *cdb = cmd->t_task_cdb; 699 unsigned char *buf; 700 sense_reason_t ret; 701 int p; 702 int len = 0; 703 704 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 705 if (!buf) { 706 pr_err("Unable to allocate response buffer for INQUIRY\n"); 707 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 708 } 709 710 if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev)) 711 buf[0] = 0x3f; /* Not connected */ 712 else 713 buf[0] = dev->transport->get_device_type(dev); 714 715 if (!(cdb[1] & 0x1)) { 716 if (cdb[2]) { 717 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 718 cdb[2]); 719 ret = TCM_INVALID_CDB_FIELD; 720 goto out; 721 } 722 723 ret = spc_emulate_inquiry_std(cmd, buf); 724 len = buf[4] + 5; 725 goto out; 726 } 727 728 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 729 if (cdb[2] == evpd_handlers[p].page) { 730 buf[1] = cdb[2]; 731 ret = evpd_handlers[p].emulate(cmd, buf); 732 len = get_unaligned_be16(&buf[2]) + 4; 733 goto out; 734 } 735 } 736 737 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 738 ret = TCM_INVALID_CDB_FIELD; 739 740 out: 741 rbuf = transport_kmap_data_sg(cmd); 742 if (rbuf) { 743 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 744 transport_kunmap_data_sg(cmd); 745 } 746 kfree(buf); 747 748 if (!ret) 749 target_complete_cmd_with_length(cmd, GOOD, len); 750 return ret; 751 } 752 753 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 754 { 755 p[0] = 0x01; 756 p[1] = 0x0a; 757 758 /* No changeable values for now */ 759 if (pc == 1) 760 goto out; 761 762 out: 763 return 12; 764 } 765 766 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 767 { 768 struct se_device *dev = cmd->se_dev; 769 struct se_session *sess = cmd->se_sess; 770 771 p[0] = 0x0a; 772 p[1] = 0x0a; 773 774 /* No changeable values for now */ 775 if (pc == 1) 776 goto out; 777 778 /* GLTSD: No implicit save of log parameters */ 779 p[2] = (1 << 1); 780 if (target_sense_desc_format(dev)) 781 /* D_SENSE: Descriptor format sense data for 64bit sectors */ 782 p[2] |= (1 << 2); 783 784 /* 785 * From spc4r23, 7.4.7 Control mode page 786 * 787 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 788 * restrictions on the algorithm used for reordering commands 789 * having the SIMPLE task attribute (see SAM-4). 790 * 791 * Table 368 -- QUEUE ALGORITHM MODIFIER field 792 * Code Description 793 * 0h Restricted reordering 794 * 1h Unrestricted reordering allowed 795 * 2h to 7h Reserved 796 * 8h to Fh Vendor specific 797 * 798 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 799 * the device server shall order the processing sequence of commands 800 * having the SIMPLE task attribute such that data integrity is maintained 801 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 802 * requests is halted at any time, the final value of all data observable 803 * on the medium shall be the same as if all the commands had been processed 804 * with the ORDERED task attribute). 805 * 806 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 807 * device server may reorder the processing sequence of commands having the 808 * SIMPLE task attribute in any manner. Any data integrity exposures related to 809 * command sequence order shall be explicitly handled by the application client 810 * through the selection of appropriate ommands and task attributes. 811 */ 812 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 813 /* 814 * From spc4r17, section 7.4.6 Control mode Page 815 * 816 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 817 * 818 * 00b: The logical unit shall clear any unit attention condition 819 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 820 * status and shall not establish a unit attention condition when a com- 821 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 822 * status. 823 * 824 * 10b: The logical unit shall not clear any unit attention condition 825 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 826 * status and shall not establish a unit attention condition when 827 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 828 * CONFLICT status. 829 * 830 * 11b a The logical unit shall not clear any unit attention condition 831 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 832 * status and shall establish a unit attention condition for the 833 * initiator port associated with the I_T nexus on which the BUSY, 834 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 835 * Depending on the status, the additional sense code shall be set to 836 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 837 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 838 * command, a unit attention condition shall be established only once 839 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 840 * to the number of commands completed with one of those status codes. 841 */ 842 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 843 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 844 /* 845 * From spc4r17, section 7.4.6 Control mode Page 846 * 847 * Task Aborted Status (TAS) bit set to zero. 848 * 849 * A task aborted status (TAS) bit set to zero specifies that aborted 850 * tasks shall be terminated by the device server without any response 851 * to the application client. A TAS bit set to one specifies that tasks 852 * aborted by the actions of an I_T nexus other than the I_T nexus on 853 * which the command was received shall be completed with TASK ABORTED 854 * status (see SAM-4). 855 */ 856 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 857 /* 858 * From spc4r30, section 7.5.7 Control mode page 859 * 860 * Application Tag Owner (ATO) bit set to one. 861 * 862 * If the ATO bit is set to one the device server shall not modify the 863 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 864 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 865 * TAG field. 866 */ 867 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 868 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 869 p[5] |= 0x80; 870 } 871 872 p[8] = 0xff; 873 p[9] = 0xff; 874 p[11] = 30; 875 876 out: 877 return 12; 878 } 879 880 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 881 { 882 struct se_device *dev = cmd->se_dev; 883 884 p[0] = 0x08; 885 p[1] = 0x12; 886 887 /* No changeable values for now */ 888 if (pc == 1) 889 goto out; 890 891 if (target_check_wce(dev)) 892 p[2] = 0x04; /* Write Cache Enable */ 893 p[12] = 0x20; /* Disabled Read Ahead */ 894 895 out: 896 return 20; 897 } 898 899 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 900 { 901 p[0] = 0x1c; 902 p[1] = 0x0a; 903 904 /* No changeable values for now */ 905 if (pc == 1) 906 goto out; 907 908 out: 909 return 12; 910 } 911 912 static struct { 913 uint8_t page; 914 uint8_t subpage; 915 int (*emulate)(struct se_cmd *, u8, unsigned char *); 916 } modesense_handlers[] = { 917 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 918 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 919 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 920 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 921 }; 922 923 static void spc_modesense_write_protect(unsigned char *buf, int type) 924 { 925 /* 926 * I believe that the WP bit (bit 7) in the mode header is the same for 927 * all device types.. 928 */ 929 switch (type) { 930 case TYPE_DISK: 931 case TYPE_TAPE: 932 default: 933 buf[0] |= 0x80; /* WP bit */ 934 break; 935 } 936 } 937 938 static void spc_modesense_dpofua(unsigned char *buf, int type) 939 { 940 switch (type) { 941 case TYPE_DISK: 942 buf[0] |= 0x10; /* DPOFUA bit */ 943 break; 944 default: 945 break; 946 } 947 } 948 949 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 950 { 951 *buf++ = 8; 952 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 953 buf += 4; 954 put_unaligned_be32(block_size, buf); 955 return 9; 956 } 957 958 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 959 { 960 if (blocks <= 0xffffffff) 961 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 962 963 *buf++ = 1; /* LONGLBA */ 964 buf += 2; 965 *buf++ = 16; 966 put_unaligned_be64(blocks, buf); 967 buf += 12; 968 put_unaligned_be32(block_size, buf); 969 970 return 17; 971 } 972 973 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 974 { 975 struct se_device *dev = cmd->se_dev; 976 char *cdb = cmd->t_task_cdb; 977 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 978 int type = dev->transport->get_device_type(dev); 979 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 980 bool dbd = !!(cdb[1] & 0x08); 981 bool llba = ten ? !!(cdb[1] & 0x10) : false; 982 u8 pc = cdb[2] >> 6; 983 u8 page = cdb[2] & 0x3f; 984 u8 subpage = cdb[3]; 985 int length = 0; 986 int ret; 987 int i; 988 bool read_only = target_lun_is_rdonly(cmd);; 989 990 memset(buf, 0, SE_MODE_PAGE_BUF); 991 992 /* 993 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 994 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 995 */ 996 length = ten ? 3 : 2; 997 998 /* DEVICE-SPECIFIC PARAMETER */ 999 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only) 1000 spc_modesense_write_protect(&buf[length], type); 1001 1002 /* 1003 * SBC only allows us to enable FUA and DPO together. Fortunately 1004 * DPO is explicitly specified as a hint, so a noop is a perfectly 1005 * valid implementation. 1006 */ 1007 if (target_check_fua(dev)) 1008 spc_modesense_dpofua(&buf[length], type); 1009 1010 ++length; 1011 1012 /* BLOCK DESCRIPTOR */ 1013 1014 /* 1015 * For now we only include a block descriptor for disk (SBC) 1016 * devices; other command sets use a slightly different format. 1017 */ 1018 if (!dbd && type == TYPE_DISK) { 1019 u64 blocks = dev->transport->get_blocks(dev); 1020 u32 block_size = dev->dev_attrib.block_size; 1021 1022 if (ten) { 1023 if (llba) { 1024 length += spc_modesense_long_blockdesc(&buf[length], 1025 blocks, block_size); 1026 } else { 1027 length += 3; 1028 length += spc_modesense_blockdesc(&buf[length], 1029 blocks, block_size); 1030 } 1031 } else { 1032 length += spc_modesense_blockdesc(&buf[length], blocks, 1033 block_size); 1034 } 1035 } else { 1036 if (ten) 1037 length += 4; 1038 else 1039 length += 1; 1040 } 1041 1042 if (page == 0x3f) { 1043 if (subpage != 0x00 && subpage != 0xff) { 1044 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1045 return TCM_INVALID_CDB_FIELD; 1046 } 1047 1048 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1049 /* 1050 * Tricky way to say all subpage 00h for 1051 * subpage==0, all subpages for subpage==0xff 1052 * (and we just checked above that those are 1053 * the only two possibilities). 1054 */ 1055 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1056 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1057 if (!ten && length + ret >= 255) 1058 break; 1059 length += ret; 1060 } 1061 } 1062 1063 goto set_length; 1064 } 1065 1066 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1067 if (modesense_handlers[i].page == page && 1068 modesense_handlers[i].subpage == subpage) { 1069 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1070 goto set_length; 1071 } 1072 1073 /* 1074 * We don't intend to implement: 1075 * - obsolete page 03h "format parameters" (checked by Solaris) 1076 */ 1077 if (page != 0x03) 1078 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1079 page, subpage); 1080 1081 return TCM_UNKNOWN_MODE_PAGE; 1082 1083 set_length: 1084 if (ten) 1085 put_unaligned_be16(length - 2, buf); 1086 else 1087 buf[0] = length - 1; 1088 1089 rbuf = transport_kmap_data_sg(cmd); 1090 if (rbuf) { 1091 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1092 transport_kunmap_data_sg(cmd); 1093 } 1094 1095 target_complete_cmd_with_length(cmd, GOOD, length); 1096 return 0; 1097 } 1098 1099 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1100 { 1101 char *cdb = cmd->t_task_cdb; 1102 bool ten = cdb[0] == MODE_SELECT_10; 1103 int off = ten ? 8 : 4; 1104 bool pf = !!(cdb[1] & 0x10); 1105 u8 page, subpage; 1106 unsigned char *buf; 1107 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1108 int length; 1109 sense_reason_t ret = 0; 1110 int i; 1111 1112 if (!cmd->data_length) { 1113 target_complete_cmd(cmd, GOOD); 1114 return 0; 1115 } 1116 1117 if (cmd->data_length < off + 2) 1118 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1119 1120 buf = transport_kmap_data_sg(cmd); 1121 if (!buf) 1122 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1123 1124 if (!pf) { 1125 ret = TCM_INVALID_CDB_FIELD; 1126 goto out; 1127 } 1128 1129 page = buf[off] & 0x3f; 1130 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1131 1132 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1133 if (modesense_handlers[i].page == page && 1134 modesense_handlers[i].subpage == subpage) { 1135 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1136 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1137 goto check_contents; 1138 } 1139 1140 ret = TCM_UNKNOWN_MODE_PAGE; 1141 goto out; 1142 1143 check_contents: 1144 if (cmd->data_length < off + length) { 1145 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1146 goto out; 1147 } 1148 1149 if (memcmp(buf + off, tbuf, length)) 1150 ret = TCM_INVALID_PARAMETER_LIST; 1151 1152 out: 1153 transport_kunmap_data_sg(cmd); 1154 1155 if (!ret) 1156 target_complete_cmd(cmd, GOOD); 1157 return ret; 1158 } 1159 1160 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1161 { 1162 unsigned char *cdb = cmd->t_task_cdb; 1163 unsigned char *rbuf; 1164 u8 ua_asc = 0, ua_ascq = 0; 1165 unsigned char buf[SE_SENSE_BUF]; 1166 bool desc_format = target_sense_desc_format(cmd->se_dev); 1167 1168 memset(buf, 0, SE_SENSE_BUF); 1169 1170 if (cdb[1] & 0x01) { 1171 pr_err("REQUEST_SENSE description emulation not" 1172 " supported\n"); 1173 return TCM_INVALID_CDB_FIELD; 1174 } 1175 1176 rbuf = transport_kmap_data_sg(cmd); 1177 if (!rbuf) 1178 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1179 1180 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) 1181 scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION, 1182 ua_asc, ua_ascq); 1183 else 1184 scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0); 1185 1186 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1187 transport_kunmap_data_sg(cmd); 1188 1189 target_complete_cmd(cmd, GOOD); 1190 return 0; 1191 } 1192 1193 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1194 { 1195 struct se_dev_entry *deve; 1196 struct se_session *sess = cmd->se_sess; 1197 struct se_node_acl *nacl; 1198 struct scsi_lun slun; 1199 unsigned char *buf; 1200 u32 lun_count = 0, offset = 8; 1201 __be32 len; 1202 1203 buf = transport_kmap_data_sg(cmd); 1204 if (cmd->data_length && !buf) 1205 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1206 1207 /* 1208 * If no struct se_session pointer is present, this struct se_cmd is 1209 * coming via a target_core_mod PASSTHROUGH op, and not through 1210 * a $FABRIC_MOD. In that case, report LUN=0 only. 1211 */ 1212 if (!sess) 1213 goto done; 1214 1215 nacl = sess->se_node_acl; 1216 1217 rcu_read_lock(); 1218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 1219 /* 1220 * We determine the correct LUN LIST LENGTH even once we 1221 * have reached the initial allocation length. 1222 * See SPC2-R20 7.19. 1223 */ 1224 lun_count++; 1225 if (offset >= cmd->data_length) 1226 continue; 1227 1228 int_to_scsilun(deve->mapped_lun, &slun); 1229 memcpy(buf + offset, &slun, 1230 min(8u, cmd->data_length - offset)); 1231 offset += 8; 1232 } 1233 rcu_read_unlock(); 1234 1235 /* 1236 * See SPC3 r07, page 159. 1237 */ 1238 done: 1239 /* 1240 * If no LUNs are accessible, report virtual LUN 0. 1241 */ 1242 if (lun_count == 0) { 1243 int_to_scsilun(0, &slun); 1244 if (cmd->data_length > 8) 1245 memcpy(buf + offset, &slun, 1246 min(8u, cmd->data_length - offset)); 1247 lun_count = 1; 1248 } 1249 1250 if (buf) { 1251 len = cpu_to_be32(lun_count * 8); 1252 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); 1253 transport_kunmap_data_sg(cmd); 1254 } 1255 1256 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1257 return 0; 1258 } 1259 EXPORT_SYMBOL(spc_emulate_report_luns); 1260 1261 static sense_reason_t 1262 spc_emulate_testunitready(struct se_cmd *cmd) 1263 { 1264 target_complete_cmd(cmd, GOOD); 1265 return 0; 1266 } 1267 1268 sense_reason_t 1269 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1270 { 1271 struct se_device *dev = cmd->se_dev; 1272 unsigned char *cdb = cmd->t_task_cdb; 1273 1274 switch (cdb[0]) { 1275 case MODE_SELECT: 1276 *size = cdb[4]; 1277 cmd->execute_cmd = spc_emulate_modeselect; 1278 break; 1279 case MODE_SELECT_10: 1280 *size = (cdb[7] << 8) + cdb[8]; 1281 cmd->execute_cmd = spc_emulate_modeselect; 1282 break; 1283 case MODE_SENSE: 1284 *size = cdb[4]; 1285 cmd->execute_cmd = spc_emulate_modesense; 1286 break; 1287 case MODE_SENSE_10: 1288 *size = (cdb[7] << 8) + cdb[8]; 1289 cmd->execute_cmd = spc_emulate_modesense; 1290 break; 1291 case LOG_SELECT: 1292 case LOG_SENSE: 1293 *size = (cdb[7] << 8) + cdb[8]; 1294 break; 1295 case PERSISTENT_RESERVE_IN: 1296 *size = (cdb[7] << 8) + cdb[8]; 1297 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1298 break; 1299 case PERSISTENT_RESERVE_OUT: 1300 *size = (cdb[7] << 8) + cdb[8]; 1301 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1302 break; 1303 case RELEASE: 1304 case RELEASE_10: 1305 if (cdb[0] == RELEASE_10) 1306 *size = (cdb[7] << 8) | cdb[8]; 1307 else 1308 *size = cmd->data_length; 1309 1310 cmd->execute_cmd = target_scsi2_reservation_release; 1311 break; 1312 case RESERVE: 1313 case RESERVE_10: 1314 /* 1315 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1316 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1317 */ 1318 if (cdb[0] == RESERVE_10) 1319 *size = (cdb[7] << 8) | cdb[8]; 1320 else 1321 *size = cmd->data_length; 1322 1323 cmd->execute_cmd = target_scsi2_reservation_reserve; 1324 break; 1325 case REQUEST_SENSE: 1326 *size = cdb[4]; 1327 cmd->execute_cmd = spc_emulate_request_sense; 1328 break; 1329 case INQUIRY: 1330 *size = (cdb[3] << 8) + cdb[4]; 1331 1332 /* 1333 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1334 * See spc4r17 section 5.3 1335 */ 1336 cmd->sam_task_attr = TCM_HEAD_TAG; 1337 cmd->execute_cmd = spc_emulate_inquiry; 1338 break; 1339 case SECURITY_PROTOCOL_IN: 1340 case SECURITY_PROTOCOL_OUT: 1341 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1342 break; 1343 case EXTENDED_COPY: 1344 *size = get_unaligned_be32(&cdb[10]); 1345 cmd->execute_cmd = target_do_xcopy; 1346 break; 1347 case RECEIVE_COPY_RESULTS: 1348 *size = get_unaligned_be32(&cdb[10]); 1349 cmd->execute_cmd = target_do_receive_copy_results; 1350 break; 1351 case READ_ATTRIBUTE: 1352 case WRITE_ATTRIBUTE: 1353 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1354 (cdb[12] << 8) | cdb[13]; 1355 break; 1356 case RECEIVE_DIAGNOSTIC: 1357 case SEND_DIAGNOSTIC: 1358 *size = (cdb[3] << 8) | cdb[4]; 1359 break; 1360 case WRITE_BUFFER: 1361 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1362 break; 1363 case REPORT_LUNS: 1364 cmd->execute_cmd = spc_emulate_report_luns; 1365 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1366 /* 1367 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1368 * See spc4r17 section 5.3 1369 */ 1370 cmd->sam_task_attr = TCM_HEAD_TAG; 1371 break; 1372 case TEST_UNIT_READY: 1373 cmd->execute_cmd = spc_emulate_testunitready; 1374 *size = 0; 1375 break; 1376 case MAINTENANCE_IN: 1377 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1378 /* 1379 * MAINTENANCE_IN from SCC-2 1380 * Check for emulated MI_REPORT_TARGET_PGS 1381 */ 1382 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1383 cmd->execute_cmd = 1384 target_emulate_report_target_port_groups; 1385 } 1386 *size = get_unaligned_be32(&cdb[6]); 1387 } else { 1388 /* 1389 * GPCMD_SEND_KEY from multi media commands 1390 */ 1391 *size = get_unaligned_be16(&cdb[8]); 1392 } 1393 break; 1394 case MAINTENANCE_OUT: 1395 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1396 /* 1397 * MAINTENANCE_OUT from SCC-2 1398 * Check for emulated MO_SET_TARGET_PGS. 1399 */ 1400 if (cdb[1] == MO_SET_TARGET_PGS) { 1401 cmd->execute_cmd = 1402 target_emulate_set_target_port_groups; 1403 } 1404 *size = get_unaligned_be32(&cdb[6]); 1405 } else { 1406 /* 1407 * GPCMD_SEND_KEY from multi media commands 1408 */ 1409 *size = get_unaligned_be16(&cdb[8]); 1410 } 1411 break; 1412 default: 1413 return TCM_UNSUPPORTED_SCSI_OPCODE; 1414 } 1415 1416 return 0; 1417 } 1418 EXPORT_SYMBOL(spc_parse_cdb); 1419