1 /* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2012 RisingTide Systems LLC. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_tcq.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 41 { 42 struct t10_alua_tg_pt_gp *tg_pt_gp; 43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explict and/or implict ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 if (!port) 57 return; 58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 59 if (!tg_pt_gp_mem) 60 return; 61 62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 64 if (tg_pt_gp) 65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 67 } 68 69 static sense_reason_t 70 spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) 71 { 72 struct se_lun *lun = cmd->se_lun; 73 struct se_device *dev = cmd->se_dev; 74 75 /* Set RMB (removable media) for tape devices */ 76 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 77 buf[1] = 0x80; 78 79 buf[2] = 0x05; /* SPC-3 */ 80 81 /* 82 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 83 * 84 * SPC4 says: 85 * A RESPONSE DATA FORMAT field set to 2h indicates that the 86 * standard INQUIRY data is in the format defined in this 87 * standard. Response data format values less than 2h are 88 * obsolete. Response data format values greater than 2h are 89 * reserved. 90 */ 91 buf[3] = 2; 92 93 /* 94 * Enable SCCS and TPGS fields for Emulated ALUA 95 */ 96 spc_fill_alua_data(lun->lun_sep, buf); 97 98 buf[7] = 0x2; /* CmdQue=1 */ 99 100 snprintf(&buf[8], 8, "LIO-ORG"); 101 snprintf(&buf[16], 16, "%s", dev->t10_wwn.model); 102 snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision); 103 buf[4] = 31; /* Set additional length to 31 */ 104 105 return 0; 106 } 107 108 /* unit serial number */ 109 static sense_reason_t 110 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 111 { 112 struct se_device *dev = cmd->se_dev; 113 u16 len = 0; 114 115 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 116 u32 unit_serial_len; 117 118 unit_serial_len = strlen(dev->t10_wwn.unit_serial); 119 unit_serial_len++; /* For NULL Terminator */ 120 121 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 122 len++; /* Extra Byte for NULL Terminator */ 123 buf[3] = len; 124 } 125 return 0; 126 } 127 128 static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 129 unsigned char *buf) 130 { 131 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 132 int cnt; 133 bool next = true; 134 135 /* 136 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 137 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 138 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 139 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 140 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 141 * per device uniqeness. 142 */ 143 for (cnt = 0; *p && cnt < 13; p++) { 144 int val = hex_to_bin(*p); 145 146 if (val < 0) 147 continue; 148 149 if (next) { 150 next = false; 151 buf[cnt++] |= val; 152 } else { 153 next = true; 154 buf[cnt] = val << 4; 155 } 156 } 157 } 158 159 /* 160 * Device identification VPD, for a complete list of 161 * DESIGNATOR TYPEs see spc4r17 Table 459. 162 */ 163 static sense_reason_t 164 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 165 { 166 struct se_device *dev = cmd->se_dev; 167 struct se_lun *lun = cmd->se_lun; 168 struct se_port *port = NULL; 169 struct se_portal_group *tpg = NULL; 170 struct t10_alua_lu_gp_member *lu_gp_mem; 171 struct t10_alua_tg_pt_gp *tg_pt_gp; 172 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 173 unsigned char *prod = &dev->t10_wwn.model[0]; 174 u32 prod_len; 175 u32 unit_serial_len, off = 0; 176 u16 len = 0, id_len; 177 178 off = 4; 179 180 /* 181 * NAA IEEE Registered Extended Assigned designator format, see 182 * spc4r17 section 7.7.3.6.5 183 * 184 * We depend upon a target_core_mod/ConfigFS provided 185 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 186 * value in order to return the NAA id. 187 */ 188 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 189 goto check_t10_vend_desc; 190 191 /* CODE SET == Binary */ 192 buf[off++] = 0x1; 193 194 /* Set ASSOCIATION == addressed logical unit: 0)b */ 195 buf[off] = 0x00; 196 197 /* Identifier/Designator type == NAA identifier */ 198 buf[off++] |= 0x3; 199 off++; 200 201 /* Identifier/Designator length */ 202 buf[off++] = 0x10; 203 204 /* 205 * Start NAA IEEE Registered Extended Identifier/Designator 206 */ 207 buf[off++] = (0x6 << 4); 208 209 /* 210 * Use OpenFabrics IEEE Company ID: 00 14 05 211 */ 212 buf[off++] = 0x01; 213 buf[off++] = 0x40; 214 buf[off] = (0x5 << 4); 215 216 /* 217 * Return ConfigFS Unit Serial Number information for 218 * VENDOR_SPECIFIC_IDENTIFIER and 219 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 220 */ 221 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 222 223 len = 20; 224 off = (len + 4); 225 226 check_t10_vend_desc: 227 /* 228 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 229 */ 230 id_len = 8; /* For Vendor field */ 231 prod_len = 4; /* For VPD Header */ 232 prod_len += 8; /* For Vendor field */ 233 prod_len += strlen(prod); 234 prod_len++; /* For : */ 235 236 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 237 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 238 unit_serial_len++; /* For NULL Terminator */ 239 240 id_len += sprintf(&buf[off+12], "%s:%s", prod, 241 &dev->t10_wwn.unit_serial[0]); 242 } 243 buf[off] = 0x2; /* ASCII */ 244 buf[off+1] = 0x1; /* T10 Vendor ID */ 245 buf[off+2] = 0x0; 246 memcpy(&buf[off+4], "LIO-ORG", 8); 247 /* Extra Byte for NULL Terminator */ 248 id_len++; 249 /* Identifier Length */ 250 buf[off+3] = id_len; 251 /* Header size for Designation descriptor */ 252 len += (id_len + 4); 253 off += (id_len + 4); 254 /* 255 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 256 */ 257 port = lun->lun_sep; 258 if (port) { 259 struct t10_alua_lu_gp *lu_gp; 260 u32 padding, scsi_name_len; 261 u16 lu_gp_id = 0; 262 u16 tg_pt_gp_id = 0; 263 u16 tpgt; 264 265 tpg = port->sep_tpg; 266 /* 267 * Relative target port identifer, see spc4r17 268 * section 7.7.3.7 269 * 270 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 271 * section 7.5.1 Table 362 272 */ 273 buf[off] = 274 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 275 buf[off++] |= 0x1; /* CODE SET == Binary */ 276 buf[off] = 0x80; /* Set PIV=1 */ 277 /* Set ASSOCIATION == target port: 01b */ 278 buf[off] |= 0x10; 279 /* DESIGNATOR TYPE == Relative target port identifer */ 280 buf[off++] |= 0x4; 281 off++; /* Skip over Reserved */ 282 buf[off++] = 4; /* DESIGNATOR LENGTH */ 283 /* Skip over Obsolete field in RTPI payload 284 * in Table 472 */ 285 off += 2; 286 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 287 buf[off++] = (port->sep_rtpi & 0xff); 288 len += 8; /* Header size + Designation descriptor */ 289 /* 290 * Target port group identifier, see spc4r17 291 * section 7.7.3.8 292 * 293 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 294 * section 7.5.1 Table 362 295 */ 296 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 297 if (!tg_pt_gp_mem) 298 goto check_lu_gp; 299 300 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 301 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 302 if (!tg_pt_gp) { 303 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 304 goto check_lu_gp; 305 } 306 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 307 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 308 309 buf[off] = 310 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 311 buf[off++] |= 0x1; /* CODE SET == Binary */ 312 buf[off] = 0x80; /* Set PIV=1 */ 313 /* Set ASSOCIATION == target port: 01b */ 314 buf[off] |= 0x10; 315 /* DESIGNATOR TYPE == Target port group identifier */ 316 buf[off++] |= 0x5; 317 off++; /* Skip over Reserved */ 318 buf[off++] = 4; /* DESIGNATOR LENGTH */ 319 off += 2; /* Skip over Reserved Field */ 320 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 321 buf[off++] = (tg_pt_gp_id & 0xff); 322 len += 8; /* Header size + Designation descriptor */ 323 /* 324 * Logical Unit Group identifier, see spc4r17 325 * section 7.7.3.8 326 */ 327 check_lu_gp: 328 lu_gp_mem = dev->dev_alua_lu_gp_mem; 329 if (!lu_gp_mem) 330 goto check_scsi_name; 331 332 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 333 lu_gp = lu_gp_mem->lu_gp; 334 if (!lu_gp) { 335 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 336 goto check_scsi_name; 337 } 338 lu_gp_id = lu_gp->lu_gp_id; 339 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 340 341 buf[off++] |= 0x1; /* CODE SET == Binary */ 342 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 343 buf[off++] |= 0x6; 344 off++; /* Skip over Reserved */ 345 buf[off++] = 4; /* DESIGNATOR LENGTH */ 346 off += 2; /* Skip over Reserved Field */ 347 buf[off++] = ((lu_gp_id >> 8) & 0xff); 348 buf[off++] = (lu_gp_id & 0xff); 349 len += 8; /* Header size + Designation descriptor */ 350 /* 351 * SCSI name string designator, see spc4r17 352 * section 7.7.3.11 353 * 354 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 355 * section 7.5.1 Table 362 356 */ 357 check_scsi_name: 358 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 359 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ 360 scsi_name_len += 10; 361 /* Check for 4-byte padding */ 362 padding = ((-scsi_name_len) & 3); 363 if (padding != 0) 364 scsi_name_len += padding; 365 /* Header size + Designation descriptor */ 366 scsi_name_len += 4; 367 368 buf[off] = 369 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 370 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 371 buf[off] = 0x80; /* Set PIV=1 */ 372 /* Set ASSOCIATION == target port: 01b */ 373 buf[off] |= 0x10; 374 /* DESIGNATOR TYPE == SCSI name string */ 375 buf[off++] |= 0x8; 376 off += 2; /* Skip over Reserved and length */ 377 /* 378 * SCSI name string identifer containing, $FABRIC_MOD 379 * dependent information. For LIO-Target and iSCSI 380 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 381 * UTF-8 encoding. 382 */ 383 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 384 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 385 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 386 scsi_name_len += 1 /* Include NULL terminator */; 387 /* 388 * The null-terminated, null-padded (see 4.4.2) SCSI 389 * NAME STRING field contains a UTF-8 format string. 390 * The number of bytes in the SCSI NAME STRING field 391 * (i.e., the value in the DESIGNATOR LENGTH field) 392 * shall be no larger than 256 and shall be a multiple 393 * of four. 394 */ 395 if (padding) 396 scsi_name_len += padding; 397 398 buf[off-1] = scsi_name_len; 399 off += scsi_name_len; 400 /* Header size + Designation descriptor */ 401 len += (scsi_name_len + 4); 402 } 403 buf[2] = ((len >> 8) & 0xff); 404 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 405 return 0; 406 } 407 408 /* Extended INQUIRY Data VPD Page */ 409 static sense_reason_t 410 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 411 { 412 buf[3] = 0x3c; 413 /* Set HEADSUP, ORDSUP, SIMPSUP */ 414 buf[5] = 0x07; 415 416 /* If WriteCache emulation is enabled, set V_SUP */ 417 if (cmd->se_dev->dev_attrib.emulate_write_cache > 0) 418 buf[6] = 0x01; 419 return 0; 420 } 421 422 /* Block Limits VPD page */ 423 static sense_reason_t 424 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 425 { 426 struct se_device *dev = cmd->se_dev; 427 u32 max_sectors; 428 int have_tp = 0; 429 430 /* 431 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 432 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 433 * different page length for Thin Provisioning. 434 */ 435 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 436 have_tp = 1; 437 438 buf[0] = dev->transport->get_device_type(dev); 439 buf[3] = have_tp ? 0x3c : 0x10; 440 441 /* Set WSNZ to 1 */ 442 buf[4] = 0x01; 443 444 /* 445 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 446 */ 447 put_unaligned_be16(1, &buf[6]); 448 449 /* 450 * Set MAXIMUM TRANSFER LENGTH 451 */ 452 max_sectors = min(dev->dev_attrib.fabric_max_sectors, 453 dev->dev_attrib.hw_max_sectors); 454 put_unaligned_be32(max_sectors, &buf[8]); 455 456 /* 457 * Set OPTIMAL TRANSFER LENGTH 458 */ 459 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 460 461 /* 462 * Exit now if we don't support TP. 463 */ 464 if (!have_tp) 465 goto max_write_same; 466 467 /* 468 * Set MAXIMUM UNMAP LBA COUNT 469 */ 470 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 471 472 /* 473 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 474 */ 475 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 476 &buf[24]); 477 478 /* 479 * Set OPTIMAL UNMAP GRANULARITY 480 */ 481 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 482 483 /* 484 * UNMAP GRANULARITY ALIGNMENT 485 */ 486 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 487 &buf[32]); 488 if (dev->dev_attrib.unmap_granularity_alignment != 0) 489 buf[32] |= 0x80; /* Set the UGAVALID bit */ 490 491 /* 492 * MAXIMUM WRITE SAME LENGTH 493 */ 494 max_write_same: 495 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 496 497 return 0; 498 } 499 500 /* Block Device Characteristics VPD page */ 501 static sense_reason_t 502 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 503 { 504 struct se_device *dev = cmd->se_dev; 505 506 buf[0] = dev->transport->get_device_type(dev); 507 buf[3] = 0x3c; 508 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 509 510 return 0; 511 } 512 513 /* Thin Provisioning VPD */ 514 static sense_reason_t 515 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 516 { 517 struct se_device *dev = cmd->se_dev; 518 519 /* 520 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 521 * 522 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 523 * zero, then the page length shall be set to 0004h. If the DP bit 524 * is set to one, then the page length shall be set to the value 525 * defined in table 162. 526 */ 527 buf[0] = dev->transport->get_device_type(dev); 528 529 /* 530 * Set Hardcoded length mentioned above for DP=0 531 */ 532 put_unaligned_be16(0x0004, &buf[2]); 533 534 /* 535 * The THRESHOLD EXPONENT field indicates the threshold set size in 536 * LBAs as a power of 2 (i.e., the threshold set size is equal to 537 * 2(threshold exponent)). 538 * 539 * Note that this is currently set to 0x00 as mkp says it will be 540 * changing again. We can enable this once it has settled in T10 541 * and is actually used by Linux/SCSI ML code. 542 */ 543 buf[4] = 0x00; 544 545 /* 546 * A TPU bit set to one indicates that the device server supports 547 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 548 * that the device server does not support the UNMAP command. 549 */ 550 if (dev->dev_attrib.emulate_tpu != 0) 551 buf[5] = 0x80; 552 553 /* 554 * A TPWS bit set to one indicates that the device server supports 555 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 556 * A TPWS bit set to zero indicates that the device server does not 557 * support the use of the WRITE SAME (16) command to unmap LBAs. 558 */ 559 if (dev->dev_attrib.emulate_tpws != 0) 560 buf[5] |= 0x40; 561 562 return 0; 563 } 564 565 static sense_reason_t 566 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 567 568 static struct { 569 uint8_t page; 570 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 571 } evpd_handlers[] = { 572 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 573 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 574 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 575 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 576 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 577 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 578 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 579 }; 580 581 /* supported vital product data pages */ 582 static sense_reason_t 583 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 584 { 585 int p; 586 587 /* 588 * Only report the INQUIRY EVPD=1 pages after a valid NAA 589 * Registered Extended LUN WWN has been set via ConfigFS 590 * during device creation/restart. 591 */ 592 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 593 buf[3] = ARRAY_SIZE(evpd_handlers); 594 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 595 buf[p + 4] = evpd_handlers[p].page; 596 } 597 598 return 0; 599 } 600 601 static sense_reason_t 602 spc_emulate_inquiry(struct se_cmd *cmd) 603 { 604 struct se_device *dev = cmd->se_dev; 605 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 606 unsigned char *rbuf; 607 unsigned char *cdb = cmd->t_task_cdb; 608 unsigned char buf[SE_INQUIRY_BUF]; 609 sense_reason_t ret; 610 int p; 611 612 memset(buf, 0, SE_INQUIRY_BUF); 613 614 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 615 buf[0] = 0x3f; /* Not connected */ 616 else 617 buf[0] = dev->transport->get_device_type(dev); 618 619 if (!(cdb[1] & 0x1)) { 620 if (cdb[2]) { 621 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 622 cdb[2]); 623 ret = TCM_INVALID_CDB_FIELD; 624 goto out; 625 } 626 627 ret = spc_emulate_inquiry_std(cmd, buf); 628 goto out; 629 } 630 631 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 632 if (cdb[2] == evpd_handlers[p].page) { 633 buf[1] = cdb[2]; 634 ret = evpd_handlers[p].emulate(cmd, buf); 635 goto out; 636 } 637 } 638 639 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 640 ret = TCM_INVALID_CDB_FIELD; 641 642 out: 643 rbuf = transport_kmap_data_sg(cmd); 644 if (!rbuf) 645 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 646 647 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 648 transport_kunmap_data_sg(cmd); 649 650 if (!ret) 651 target_complete_cmd(cmd, GOOD); 652 return ret; 653 } 654 655 static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) 656 { 657 p[0] = 0x01; 658 p[1] = 0x0a; 659 660 /* No changeable values for now */ 661 if (pc == 1) 662 goto out; 663 664 out: 665 return 12; 666 } 667 668 static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) 669 { 670 p[0] = 0x0a; 671 p[1] = 0x0a; 672 673 /* No changeable values for now */ 674 if (pc == 1) 675 goto out; 676 677 p[2] = 2; 678 /* 679 * From spc4r23, 7.4.7 Control mode page 680 * 681 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 682 * restrictions on the algorithm used for reordering commands 683 * having the SIMPLE task attribute (see SAM-4). 684 * 685 * Table 368 -- QUEUE ALGORITHM MODIFIER field 686 * Code Description 687 * 0h Restricted reordering 688 * 1h Unrestricted reordering allowed 689 * 2h to 7h Reserved 690 * 8h to Fh Vendor specific 691 * 692 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 693 * the device server shall order the processing sequence of commands 694 * having the SIMPLE task attribute such that data integrity is maintained 695 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 696 * requests is halted at any time, the final value of all data observable 697 * on the medium shall be the same as if all the commands had been processed 698 * with the ORDERED task attribute). 699 * 700 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 701 * device server may reorder the processing sequence of commands having the 702 * SIMPLE task attribute in any manner. Any data integrity exposures related to 703 * command sequence order shall be explicitly handled by the application client 704 * through the selection of appropriate ommands and task attributes. 705 */ 706 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 707 /* 708 * From spc4r17, section 7.4.6 Control mode Page 709 * 710 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 711 * 712 * 00b: The logical unit shall clear any unit attention condition 713 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 714 * status and shall not establish a unit attention condition when a com- 715 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 716 * status. 717 * 718 * 10b: The logical unit shall not clear any unit attention condition 719 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 720 * status and shall not establish a unit attention condition when 721 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 722 * CONFLICT status. 723 * 724 * 11b a The logical unit shall not clear any unit attention condition 725 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 726 * status and shall establish a unit attention condition for the 727 * initiator port associated with the I_T nexus on which the BUSY, 728 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 729 * Depending on the status, the additional sense code shall be set to 730 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 731 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 732 * command, a unit attention condition shall be established only once 733 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 734 * to the number of commands completed with one of those status codes. 735 */ 736 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 737 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 738 /* 739 * From spc4r17, section 7.4.6 Control mode Page 740 * 741 * Task Aborted Status (TAS) bit set to zero. 742 * 743 * A task aborted status (TAS) bit set to zero specifies that aborted 744 * tasks shall be terminated by the device server without any response 745 * to the application client. A TAS bit set to one specifies that tasks 746 * aborted by the actions of an I_T nexus other than the I_T nexus on 747 * which the command was received shall be completed with TASK ABORTED 748 * status (see SAM-4). 749 */ 750 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 751 p[8] = 0xff; 752 p[9] = 0xff; 753 p[11] = 30; 754 755 out: 756 return 12; 757 } 758 759 static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) 760 { 761 p[0] = 0x08; 762 p[1] = 0x12; 763 764 /* No changeable values for now */ 765 if (pc == 1) 766 goto out; 767 768 if (dev->dev_attrib.emulate_write_cache > 0) 769 p[2] = 0x04; /* Write Cache Enable */ 770 p[12] = 0x20; /* Disabled Read Ahead */ 771 772 out: 773 return 20; 774 } 775 776 static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) 777 { 778 p[0] = 0x1c; 779 p[1] = 0x0a; 780 781 /* No changeable values for now */ 782 if (pc == 1) 783 goto out; 784 785 out: 786 return 12; 787 } 788 789 static struct { 790 uint8_t page; 791 uint8_t subpage; 792 int (*emulate)(struct se_device *, u8, unsigned char *); 793 } modesense_handlers[] = { 794 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 795 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 796 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 797 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 798 }; 799 800 static void spc_modesense_write_protect(unsigned char *buf, int type) 801 { 802 /* 803 * I believe that the WP bit (bit 7) in the mode header is the same for 804 * all device types.. 805 */ 806 switch (type) { 807 case TYPE_DISK: 808 case TYPE_TAPE: 809 default: 810 buf[0] |= 0x80; /* WP bit */ 811 break; 812 } 813 } 814 815 static void spc_modesense_dpofua(unsigned char *buf, int type) 816 { 817 switch (type) { 818 case TYPE_DISK: 819 buf[0] |= 0x10; /* DPOFUA bit */ 820 break; 821 default: 822 break; 823 } 824 } 825 826 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 827 { 828 *buf++ = 8; 829 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 830 buf += 4; 831 put_unaligned_be32(block_size, buf); 832 return 9; 833 } 834 835 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 836 { 837 if (blocks <= 0xffffffff) 838 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 839 840 *buf++ = 1; /* LONGLBA */ 841 buf += 2; 842 *buf++ = 16; 843 put_unaligned_be64(blocks, buf); 844 buf += 12; 845 put_unaligned_be32(block_size, buf); 846 847 return 17; 848 } 849 850 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 851 { 852 struct se_device *dev = cmd->se_dev; 853 char *cdb = cmd->t_task_cdb; 854 unsigned char *buf, *map_buf; 855 int type = dev->transport->get_device_type(dev); 856 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 857 bool dbd = !!(cdb[1] & 0x08); 858 bool llba = ten ? !!(cdb[1] & 0x10) : false; 859 u8 pc = cdb[2] >> 6; 860 u8 page = cdb[2] & 0x3f; 861 u8 subpage = cdb[3]; 862 int length = 0; 863 int ret; 864 int i; 865 866 map_buf = transport_kmap_data_sg(cmd); 867 if (!map_buf) 868 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 869 /* 870 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we 871 * know we actually allocated a full page. Otherwise, if the 872 * data buffer is too small, allocate a temporary buffer so we 873 * don't have to worry about overruns in all our INQUIRY 874 * emulation handling. 875 */ 876 if (cmd->data_length < SE_MODE_PAGE_BUF && 877 (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 878 buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); 879 if (!buf) { 880 transport_kunmap_data_sg(cmd); 881 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 882 } 883 } else { 884 buf = map_buf; 885 } 886 /* 887 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 888 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 889 */ 890 length = ten ? 3 : 2; 891 892 /* DEVICE-SPECIFIC PARAMETER */ 893 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 894 (cmd->se_deve && 895 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 896 spc_modesense_write_protect(&buf[length], type); 897 898 if ((dev->dev_attrib.emulate_write_cache > 0) && 899 (dev->dev_attrib.emulate_fua_write > 0)) 900 spc_modesense_dpofua(&buf[length], type); 901 902 ++length; 903 904 /* BLOCK DESCRIPTOR */ 905 906 /* 907 * For now we only include a block descriptor for disk (SBC) 908 * devices; other command sets use a slightly different format. 909 */ 910 if (!dbd && type == TYPE_DISK) { 911 u64 blocks = dev->transport->get_blocks(dev); 912 u32 block_size = dev->dev_attrib.block_size; 913 914 if (ten) { 915 if (llba) { 916 length += spc_modesense_long_blockdesc(&buf[length], 917 blocks, block_size); 918 } else { 919 length += 3; 920 length += spc_modesense_blockdesc(&buf[length], 921 blocks, block_size); 922 } 923 } else { 924 length += spc_modesense_blockdesc(&buf[length], blocks, 925 block_size); 926 } 927 } else { 928 if (ten) 929 length += 4; 930 else 931 length += 1; 932 } 933 934 if (page == 0x3f) { 935 if (subpage != 0x00 && subpage != 0xff) { 936 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 937 kfree(buf); 938 transport_kunmap_data_sg(cmd); 939 return TCM_INVALID_CDB_FIELD; 940 } 941 942 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 943 /* 944 * Tricky way to say all subpage 00h for 945 * subpage==0, all subpages for subpage==0xff 946 * (and we just checked above that those are 947 * the only two possibilities). 948 */ 949 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 950 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); 951 if (!ten && length + ret >= 255) 952 break; 953 length += ret; 954 } 955 } 956 957 goto set_length; 958 } 959 960 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 961 if (modesense_handlers[i].page == page && 962 modesense_handlers[i].subpage == subpage) { 963 length += modesense_handlers[i].emulate(dev, pc, &buf[length]); 964 goto set_length; 965 } 966 967 /* 968 * We don't intend to implement: 969 * - obsolete page 03h "format parameters" (checked by Solaris) 970 */ 971 if (page != 0x03) 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 973 page, subpage); 974 975 transport_kunmap_data_sg(cmd); 976 return TCM_UNKNOWN_MODE_PAGE; 977 978 set_length: 979 if (ten) 980 put_unaligned_be16(length - 2, buf); 981 else 982 buf[0] = length - 1; 983 984 if (buf != map_buf) { 985 memcpy(map_buf, buf, cmd->data_length); 986 kfree(buf); 987 } 988 989 transport_kunmap_data_sg(cmd); 990 target_complete_cmd(cmd, GOOD); 991 return 0; 992 } 993 994 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 995 { 996 struct se_device *dev = cmd->se_dev; 997 char *cdb = cmd->t_task_cdb; 998 bool ten = cdb[0] == MODE_SELECT_10; 999 int off = ten ? 8 : 4; 1000 bool pf = !!(cdb[1] & 0x10); 1001 u8 page, subpage; 1002 unsigned char *buf; 1003 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1004 int length; 1005 int ret = 0; 1006 int i; 1007 1008 buf = transport_kmap_data_sg(cmd); 1009 if (!buf) 1010 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1011 1012 if (!pf) { 1013 ret = TCM_INVALID_CDB_FIELD; 1014 goto out; 1015 } 1016 1017 page = buf[off] & 0x3f; 1018 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1019 1020 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1021 if (modesense_handlers[i].page == page && 1022 modesense_handlers[i].subpage == subpage) { 1023 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1024 length = modesense_handlers[i].emulate(dev, 0, tbuf); 1025 goto check_contents; 1026 } 1027 1028 ret = TCM_UNKNOWN_MODE_PAGE; 1029 goto out; 1030 1031 check_contents: 1032 if (memcmp(buf + off, tbuf, length)) 1033 ret = TCM_INVALID_PARAMETER_LIST; 1034 1035 out: 1036 transport_kunmap_data_sg(cmd); 1037 1038 if (!ret) 1039 target_complete_cmd(cmd, GOOD); 1040 return ret; 1041 } 1042 1043 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1044 { 1045 unsigned char *cdb = cmd->t_task_cdb; 1046 unsigned char *rbuf; 1047 u8 ua_asc = 0, ua_ascq = 0; 1048 unsigned char buf[SE_SENSE_BUF]; 1049 1050 memset(buf, 0, SE_SENSE_BUF); 1051 1052 if (cdb[1] & 0x01) { 1053 pr_err("REQUEST_SENSE description emulation not" 1054 " supported\n"); 1055 return TCM_INVALID_CDB_FIELD; 1056 } 1057 1058 rbuf = transport_kmap_data_sg(cmd); 1059 if (!rbuf) 1060 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1061 1062 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1063 /* 1064 * CURRENT ERROR, UNIT ATTENTION 1065 */ 1066 buf[0] = 0x70; 1067 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1068 1069 /* 1070 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1071 */ 1072 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1073 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1074 buf[7] = 0x0A; 1075 } else { 1076 /* 1077 * CURRENT ERROR, NO SENSE 1078 */ 1079 buf[0] = 0x70; 1080 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1081 1082 /* 1083 * NO ADDITIONAL SENSE INFORMATION 1084 */ 1085 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1086 buf[7] = 0x0A; 1087 } 1088 1089 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1090 transport_kunmap_data_sg(cmd); 1091 1092 target_complete_cmd(cmd, GOOD); 1093 return 0; 1094 } 1095 1096 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1097 { 1098 struct se_dev_entry *deve; 1099 struct se_session *sess = cmd->se_sess; 1100 unsigned char *buf; 1101 u32 lun_count = 0, offset = 8, i; 1102 1103 if (cmd->data_length < 16) { 1104 pr_warn("REPORT LUNS allocation length %u too small\n", 1105 cmd->data_length); 1106 return TCM_INVALID_CDB_FIELD; 1107 } 1108 1109 buf = transport_kmap_data_sg(cmd); 1110 if (!buf) 1111 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1112 1113 /* 1114 * If no struct se_session pointer is present, this struct se_cmd is 1115 * coming via a target_core_mod PASSTHROUGH op, and not through 1116 * a $FABRIC_MOD. In that case, report LUN=0 only. 1117 */ 1118 if (!sess) { 1119 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 1120 lun_count = 1; 1121 goto done; 1122 } 1123 1124 spin_lock_irq(&sess->se_node_acl->device_list_lock); 1125 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 1126 deve = sess->se_node_acl->device_list[i]; 1127 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 1128 continue; 1129 /* 1130 * We determine the correct LUN LIST LENGTH even once we 1131 * have reached the initial allocation length. 1132 * See SPC2-R20 7.19. 1133 */ 1134 lun_count++; 1135 if ((offset + 8) > cmd->data_length) 1136 continue; 1137 1138 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1139 offset += 8; 1140 } 1141 spin_unlock_irq(&sess->se_node_acl->device_list_lock); 1142 1143 /* 1144 * See SPC3 r07, page 159. 1145 */ 1146 done: 1147 lun_count *= 8; 1148 buf[0] = ((lun_count >> 24) & 0xff); 1149 buf[1] = ((lun_count >> 16) & 0xff); 1150 buf[2] = ((lun_count >> 8) & 0xff); 1151 buf[3] = (lun_count & 0xff); 1152 transport_kunmap_data_sg(cmd); 1153 1154 target_complete_cmd(cmd, GOOD); 1155 return 0; 1156 } 1157 EXPORT_SYMBOL(spc_emulate_report_luns); 1158 1159 static sense_reason_t 1160 spc_emulate_testunitready(struct se_cmd *cmd) 1161 { 1162 target_complete_cmd(cmd, GOOD); 1163 return 0; 1164 } 1165 1166 sense_reason_t 1167 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1168 { 1169 struct se_device *dev = cmd->se_dev; 1170 unsigned char *cdb = cmd->t_task_cdb; 1171 1172 switch (cdb[0]) { 1173 case MODE_SELECT: 1174 *size = cdb[4]; 1175 cmd->execute_cmd = spc_emulate_modeselect; 1176 break; 1177 case MODE_SELECT_10: 1178 *size = (cdb[7] << 8) + cdb[8]; 1179 cmd->execute_cmd = spc_emulate_modeselect; 1180 break; 1181 case MODE_SENSE: 1182 *size = cdb[4]; 1183 cmd->execute_cmd = spc_emulate_modesense; 1184 break; 1185 case MODE_SENSE_10: 1186 *size = (cdb[7] << 8) + cdb[8]; 1187 cmd->execute_cmd = spc_emulate_modesense; 1188 break; 1189 case LOG_SELECT: 1190 case LOG_SENSE: 1191 *size = (cdb[7] << 8) + cdb[8]; 1192 break; 1193 case PERSISTENT_RESERVE_IN: 1194 *size = (cdb[7] << 8) + cdb[8]; 1195 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1196 break; 1197 case PERSISTENT_RESERVE_OUT: 1198 *size = (cdb[7] << 8) + cdb[8]; 1199 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1200 break; 1201 case RELEASE: 1202 case RELEASE_10: 1203 if (cdb[0] == RELEASE_10) 1204 *size = (cdb[7] << 8) | cdb[8]; 1205 else 1206 *size = cmd->data_length; 1207 1208 cmd->execute_cmd = target_scsi2_reservation_release; 1209 break; 1210 case RESERVE: 1211 case RESERVE_10: 1212 /* 1213 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1214 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1215 */ 1216 if (cdb[0] == RESERVE_10) 1217 *size = (cdb[7] << 8) | cdb[8]; 1218 else 1219 *size = cmd->data_length; 1220 1221 cmd->execute_cmd = target_scsi2_reservation_reserve; 1222 break; 1223 case REQUEST_SENSE: 1224 *size = cdb[4]; 1225 cmd->execute_cmd = spc_emulate_request_sense; 1226 break; 1227 case INQUIRY: 1228 *size = (cdb[3] << 8) + cdb[4]; 1229 1230 /* 1231 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 1232 * See spc4r17 section 5.3 1233 */ 1234 cmd->sam_task_attr = MSG_HEAD_TAG; 1235 cmd->execute_cmd = spc_emulate_inquiry; 1236 break; 1237 case SECURITY_PROTOCOL_IN: 1238 case SECURITY_PROTOCOL_OUT: 1239 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1240 break; 1241 case EXTENDED_COPY: 1242 case READ_ATTRIBUTE: 1243 case RECEIVE_COPY_RESULTS: 1244 case WRITE_ATTRIBUTE: 1245 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1246 (cdb[12] << 8) | cdb[13]; 1247 break; 1248 case RECEIVE_DIAGNOSTIC: 1249 case SEND_DIAGNOSTIC: 1250 *size = (cdb[3] << 8) | cdb[4]; 1251 break; 1252 case WRITE_BUFFER: 1253 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1254 break; 1255 case REPORT_LUNS: 1256 cmd->execute_cmd = spc_emulate_report_luns; 1257 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1258 /* 1259 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 1260 * See spc4r17 section 5.3 1261 */ 1262 cmd->sam_task_attr = MSG_HEAD_TAG; 1263 break; 1264 case TEST_UNIT_READY: 1265 cmd->execute_cmd = spc_emulate_testunitready; 1266 *size = 0; 1267 break; 1268 case MAINTENANCE_IN: 1269 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1270 /* 1271 * MAINTENANCE_IN from SCC-2 1272 * Check for emulated MI_REPORT_TARGET_PGS 1273 */ 1274 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1275 cmd->execute_cmd = 1276 target_emulate_report_target_port_groups; 1277 } 1278 *size = get_unaligned_be32(&cdb[6]); 1279 } else { 1280 /* 1281 * GPCMD_SEND_KEY from multi media commands 1282 */ 1283 *size = get_unaligned_be16(&cdb[8]); 1284 } 1285 break; 1286 case MAINTENANCE_OUT: 1287 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1288 /* 1289 * MAINTENANCE_OUT from SCC-2 1290 * Check for emulated MO_SET_TARGET_PGS. 1291 */ 1292 if (cdb[1] == MO_SET_TARGET_PGS) { 1293 cmd->execute_cmd = 1294 target_emulate_set_target_port_groups; 1295 } 1296 *size = get_unaligned_be32(&cdb[6]); 1297 } else { 1298 /* 1299 * GPCMD_SEND_KEY from multi media commands 1300 */ 1301 *size = get_unaligned_be16(&cdb[8]); 1302 } 1303 break; 1304 default: 1305 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 1306 " 0x%02x, sending CHECK_CONDITION.\n", 1307 cmd->se_tfo->get_fabric_name(), cdb[0]); 1308 return TCM_UNSUPPORTED_SCSI_OPCODE; 1309 } 1310 1311 return 0; 1312 } 1313 EXPORT_SYMBOL(spc_parse_cdb); 1314