1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); 42 43 static sense_reason_t 44 sbc_emulate_readcapacity(struct se_cmd *cmd) 45 { 46 struct se_device *dev = cmd->se_dev; 47 unsigned char *cdb = cmd->t_task_cdb; 48 unsigned long long blocks_long = dev->transport->get_blocks(dev); 49 unsigned char *rbuf; 50 unsigned char buf[8]; 51 u32 blocks; 52 53 /* 54 * SBC-2 says: 55 * If the PMI bit is set to zero and the LOGICAL BLOCK 56 * ADDRESS field is not set to zero, the device server shall 57 * terminate the command with CHECK CONDITION status with 58 * the sense key set to ILLEGAL REQUEST and the additional 59 * sense code set to INVALID FIELD IN CDB. 60 * 61 * In SBC-3, these fields are obsolete, but some SCSI 62 * compliance tests actually check this, so we might as well 63 * follow SBC-2. 64 */ 65 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 66 return TCM_INVALID_CDB_FIELD; 67 68 if (blocks_long >= 0x00000000ffffffff) 69 blocks = 0xffffffff; 70 else 71 blocks = (u32)blocks_long; 72 73 buf[0] = (blocks >> 24) & 0xff; 74 buf[1] = (blocks >> 16) & 0xff; 75 buf[2] = (blocks >> 8) & 0xff; 76 buf[3] = blocks & 0xff; 77 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 78 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 79 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 80 buf[7] = dev->dev_attrib.block_size & 0xff; 81 82 rbuf = transport_kmap_data_sg(cmd); 83 if (rbuf) { 84 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 85 transport_kunmap_data_sg(cmd); 86 } 87 88 target_complete_cmd_with_length(cmd, GOOD, 8); 89 return 0; 90 } 91 92 static sense_reason_t 93 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 94 { 95 struct se_device *dev = cmd->se_dev; 96 struct se_session *sess = cmd->se_sess; 97 int pi_prot_type = dev->dev_attrib.pi_prot_type; 98 99 unsigned char *rbuf; 100 unsigned char buf[32]; 101 unsigned long long blocks = dev->transport->get_blocks(dev); 102 103 memset(buf, 0, sizeof(buf)); 104 buf[0] = (blocks >> 56) & 0xff; 105 buf[1] = (blocks >> 48) & 0xff; 106 buf[2] = (blocks >> 40) & 0xff; 107 buf[3] = (blocks >> 32) & 0xff; 108 buf[4] = (blocks >> 24) & 0xff; 109 buf[5] = (blocks >> 16) & 0xff; 110 buf[6] = (blocks >> 8) & 0xff; 111 buf[7] = blocks & 0xff; 112 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 113 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 114 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 115 buf[11] = dev->dev_attrib.block_size & 0xff; 116 /* 117 * Set P_TYPE and PROT_EN bits for DIF support 118 */ 119 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 120 /* 121 * Only override a device's pi_prot_type if no T10-PI is 122 * available, and sess_prot_type has been explicitly enabled. 123 */ 124 if (!pi_prot_type) 125 pi_prot_type = sess->sess_prot_type; 126 127 if (pi_prot_type) 128 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 129 } 130 131 if (dev->transport->get_lbppbe) 132 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 133 134 if (dev->transport->get_alignment_offset_lbas) { 135 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 136 buf[14] = (lalba >> 8) & 0x3f; 137 buf[15] = lalba & 0xff; 138 } 139 140 /* 141 * Set Thin Provisioning Enable bit following sbc3r22 in section 142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 143 */ 144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) { 145 buf[14] |= 0x80; 146 147 /* 148 * LBPRZ signifies that zeroes will be read back from an LBA after 149 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2) 150 */ 151 if (dev->dev_attrib.unmap_zeroes_data) 152 buf[14] |= 0x40; 153 } 154 155 rbuf = transport_kmap_data_sg(cmd); 156 if (rbuf) { 157 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 158 transport_kunmap_data_sg(cmd); 159 } 160 161 target_complete_cmd_with_length(cmd, GOOD, 32); 162 return 0; 163 } 164 165 static sense_reason_t 166 sbc_emulate_startstop(struct se_cmd *cmd) 167 { 168 unsigned char *cdb = cmd->t_task_cdb; 169 170 /* 171 * See sbc3r36 section 5.25 172 * Immediate bit should be set since there is nothing to complete 173 * POWER CONDITION MODIFIER 0h 174 */ 175 if (!(cdb[1] & 1) || cdb[2] || cdb[3]) 176 return TCM_INVALID_CDB_FIELD; 177 178 /* 179 * See sbc3r36 section 5.25 180 * POWER CONDITION 0h START_VALID - process START and LOEJ 181 */ 182 if (cdb[4] >> 4 & 0xf) 183 return TCM_INVALID_CDB_FIELD; 184 185 /* 186 * See sbc3r36 section 5.25 187 * LOEJ 0h - nothing to load or unload 188 * START 1h - we are ready 189 */ 190 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4)) 191 return TCM_INVALID_CDB_FIELD; 192 193 target_complete_cmd(cmd, SAM_STAT_GOOD); 194 return 0; 195 } 196 197 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 198 { 199 u32 num_blocks; 200 201 if (cmd->t_task_cdb[0] == WRITE_SAME) 202 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 203 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 204 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 205 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 206 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 207 208 /* 209 * Use the explicit range when non zero is supplied, otherwise calculate 210 * the remaining range based on ->get_blocks() - starting LBA. 211 */ 212 if (num_blocks) 213 return num_blocks; 214 215 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 216 cmd->t_task_lba + 1; 217 } 218 EXPORT_SYMBOL(sbc_get_write_same_sectors); 219 220 static sense_reason_t 221 sbc_execute_write_same_unmap(struct se_cmd *cmd) 222 { 223 struct sbc_ops *ops = cmd->protocol_data; 224 sector_t nolb = sbc_get_write_same_sectors(cmd); 225 sense_reason_t ret; 226 227 if (nolb) { 228 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); 229 if (ret) 230 return ret; 231 } 232 233 target_complete_cmd(cmd, GOOD); 234 return 0; 235 } 236 237 static sense_reason_t 238 sbc_emulate_noop(struct se_cmd *cmd) 239 { 240 target_complete_cmd(cmd, GOOD); 241 return 0; 242 } 243 244 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 245 { 246 return cmd->se_dev->dev_attrib.block_size * sectors; 247 } 248 249 static inline u32 transport_get_sectors_6(unsigned char *cdb) 250 { 251 /* 252 * Use 8-bit sector value. SBC-3 says: 253 * 254 * A TRANSFER LENGTH field set to zero specifies that 256 255 * logical blocks shall be written. Any other value 256 * specifies the number of logical blocks that shall be 257 * written. 258 */ 259 return cdb[4] ? : 256; 260 } 261 262 static inline u32 transport_get_sectors_10(unsigned char *cdb) 263 { 264 return (u32)(cdb[7] << 8) + cdb[8]; 265 } 266 267 static inline u32 transport_get_sectors_12(unsigned char *cdb) 268 { 269 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 270 } 271 272 static inline u32 transport_get_sectors_16(unsigned char *cdb) 273 { 274 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 275 (cdb[12] << 8) + cdb[13]; 276 } 277 278 /* 279 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 280 */ 281 static inline u32 transport_get_sectors_32(unsigned char *cdb) 282 { 283 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 284 (cdb[30] << 8) + cdb[31]; 285 286 } 287 288 static inline u32 transport_lba_21(unsigned char *cdb) 289 { 290 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 291 } 292 293 static inline u32 transport_lba_32(unsigned char *cdb) 294 { 295 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 296 } 297 298 static inline unsigned long long transport_lba_64(unsigned char *cdb) 299 { 300 unsigned int __v1, __v2; 301 302 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 303 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 304 305 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 306 } 307 308 /* 309 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 310 */ 311 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 312 { 313 unsigned int __v1, __v2; 314 315 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 316 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 317 318 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 319 } 320 321 static sense_reason_t 322 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 323 { 324 struct se_device *dev = cmd->se_dev; 325 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 326 unsigned int sectors = sbc_get_write_same_sectors(cmd); 327 sense_reason_t ret; 328 329 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 330 pr_err("WRITE_SAME PBDATA and LBDATA" 331 " bits not supported for Block Discard" 332 " Emulation\n"); 333 return TCM_UNSUPPORTED_SCSI_OPCODE; 334 } 335 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 336 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 337 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 338 return TCM_INVALID_CDB_FIELD; 339 } 340 /* 341 * Sanity check for LBA wrap and request past end of device. 342 */ 343 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 344 ((cmd->t_task_lba + sectors) > end_lba)) { 345 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 346 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 347 return TCM_ADDRESS_OUT_OF_RANGE; 348 } 349 350 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 351 if (flags[0] & 0x10) { 352 pr_warn("WRITE SAME with ANCHOR not supported\n"); 353 return TCM_INVALID_CDB_FIELD; 354 } 355 /* 356 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 357 * translated into block discard requests within backend code. 358 */ 359 if (flags[0] & 0x08) { 360 if (!ops->execute_unmap) 361 return TCM_UNSUPPORTED_SCSI_OPCODE; 362 363 if (!dev->dev_attrib.emulate_tpws) { 364 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 365 " has emulate_tpws disabled\n"); 366 return TCM_UNSUPPORTED_SCSI_OPCODE; 367 } 368 cmd->execute_cmd = sbc_execute_write_same_unmap; 369 return 0; 370 } 371 if (!ops->execute_write_same) 372 return TCM_UNSUPPORTED_SCSI_OPCODE; 373 374 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 375 if (ret) 376 return ret; 377 378 cmd->execute_cmd = ops->execute_write_same; 379 return 0; 380 } 381 382 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, 383 int *post_ret) 384 { 385 unsigned char *buf, *addr; 386 struct scatterlist *sg; 387 unsigned int offset; 388 sense_reason_t ret = TCM_NO_SENSE; 389 int i, count; 390 /* 391 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 392 * 393 * 1) read the specified logical block(s); 394 * 2) transfer logical blocks from the data-out buffer; 395 * 3) XOR the logical blocks transferred from the data-out buffer with 396 * the logical blocks read, storing the resulting XOR data in a buffer; 397 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 398 * blocks transferred from the data-out buffer; and 399 * 5) transfer the resulting XOR data to the data-in buffer. 400 */ 401 buf = kmalloc(cmd->data_length, GFP_KERNEL); 402 if (!buf) { 403 pr_err("Unable to allocate xor_callback buf\n"); 404 return TCM_OUT_OF_RESOURCES; 405 } 406 /* 407 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 408 * into the locally allocated *buf 409 */ 410 sg_copy_to_buffer(cmd->t_data_sg, 411 cmd->t_data_nents, 412 buf, 413 cmd->data_length); 414 415 /* 416 * Now perform the XOR against the BIDI read memory located at 417 * cmd->t_mem_bidi_list 418 */ 419 420 offset = 0; 421 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 422 addr = kmap_atomic(sg_page(sg)); 423 if (!addr) { 424 ret = TCM_OUT_OF_RESOURCES; 425 goto out; 426 } 427 428 for (i = 0; i < sg->length; i++) 429 *(addr + sg->offset + i) ^= *(buf + offset + i); 430 431 offset += sg->length; 432 kunmap_atomic(addr); 433 } 434 435 out: 436 kfree(buf); 437 return ret; 438 } 439 440 static sense_reason_t 441 sbc_execute_rw(struct se_cmd *cmd) 442 { 443 struct sbc_ops *ops = cmd->protocol_data; 444 445 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 446 cmd->data_direction); 447 } 448 449 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, 450 int *post_ret) 451 { 452 struct se_device *dev = cmd->se_dev; 453 454 /* 455 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 456 * within target_complete_ok_work() if the command was successfully 457 * sent to the backend driver. 458 */ 459 spin_lock_irq(&cmd->t_state_lock); 460 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 461 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 462 *post_ret = 1; 463 } 464 spin_unlock_irq(&cmd->t_state_lock); 465 466 /* 467 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 468 * before the original READ I/O submission. 469 */ 470 up(&dev->caw_sem); 471 472 return TCM_NO_SENSE; 473 } 474 475 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 476 int *post_ret) 477 { 478 struct se_device *dev = cmd->se_dev; 479 struct scatterlist *write_sg = NULL, *sg; 480 unsigned char *buf = NULL, *addr; 481 struct sg_mapping_iter m; 482 unsigned int offset = 0, len; 483 unsigned int nlbas = cmd->t_task_nolb; 484 unsigned int block_size = dev->dev_attrib.block_size; 485 unsigned int compare_len = (nlbas * block_size); 486 sense_reason_t ret = TCM_NO_SENSE; 487 int rc, i; 488 489 /* 490 * Handle early failure in transport_generic_request_failure(), 491 * which will not have taken ->caw_sem yet.. 492 */ 493 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 494 return TCM_NO_SENSE; 495 /* 496 * Handle special case for zero-length COMPARE_AND_WRITE 497 */ 498 if (!cmd->data_length) 499 goto out; 500 /* 501 * Immediately exit + release dev->caw_sem if command has already 502 * been failed with a non-zero SCSI status. 503 */ 504 if (cmd->scsi_status) { 505 pr_err("compare_and_write_callback: non zero scsi_status:" 506 " 0x%02x\n", cmd->scsi_status); 507 goto out; 508 } 509 510 buf = kzalloc(cmd->data_length, GFP_KERNEL); 511 if (!buf) { 512 pr_err("Unable to allocate compare_and_write buf\n"); 513 ret = TCM_OUT_OF_RESOURCES; 514 goto out; 515 } 516 517 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 518 GFP_KERNEL); 519 if (!write_sg) { 520 pr_err("Unable to allocate compare_and_write sg\n"); 521 ret = TCM_OUT_OF_RESOURCES; 522 goto out; 523 } 524 sg_init_table(write_sg, cmd->t_data_nents); 525 /* 526 * Setup verify and write data payloads from total NumberLBAs. 527 */ 528 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 529 cmd->data_length); 530 if (!rc) { 531 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 532 ret = TCM_OUT_OF_RESOURCES; 533 goto out; 534 } 535 /* 536 * Compare against SCSI READ payload against verify payload 537 */ 538 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 539 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 540 if (!addr) { 541 ret = TCM_OUT_OF_RESOURCES; 542 goto out; 543 } 544 545 len = min(sg->length, compare_len); 546 547 if (memcmp(addr, buf + offset, len)) { 548 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 549 addr, buf + offset); 550 kunmap_atomic(addr); 551 goto miscompare; 552 } 553 kunmap_atomic(addr); 554 555 offset += len; 556 compare_len -= len; 557 if (!compare_len) 558 break; 559 } 560 561 i = 0; 562 len = cmd->t_task_nolb * block_size; 563 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 564 /* 565 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 566 */ 567 while (len) { 568 sg_miter_next(&m); 569 570 if (block_size < PAGE_SIZE) { 571 sg_set_page(&write_sg[i], m.page, block_size, 572 m.piter.sg->offset + block_size); 573 } else { 574 sg_miter_next(&m); 575 sg_set_page(&write_sg[i], m.page, block_size, 576 m.piter.sg->offset); 577 } 578 len -= block_size; 579 i++; 580 } 581 sg_miter_stop(&m); 582 /* 583 * Save the original SGL + nents values before updating to new 584 * assignments, to be released in transport_free_pages() -> 585 * transport_reset_sgl_orig() 586 */ 587 cmd->t_data_sg_orig = cmd->t_data_sg; 588 cmd->t_data_sg = write_sg; 589 cmd->t_data_nents_orig = cmd->t_data_nents; 590 cmd->t_data_nents = 1; 591 592 cmd->sam_task_attr = TCM_HEAD_TAG; 593 cmd->transport_complete_callback = compare_and_write_post; 594 /* 595 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 596 * for submitting the adjusted SGL to write instance user-data. 597 */ 598 cmd->execute_cmd = sbc_execute_rw; 599 600 spin_lock_irq(&cmd->t_state_lock); 601 cmd->t_state = TRANSPORT_PROCESSING; 602 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 603 spin_unlock_irq(&cmd->t_state_lock); 604 605 __target_execute_cmd(cmd); 606 607 kfree(buf); 608 return ret; 609 610 miscompare: 611 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 612 dev->transport->name); 613 ret = TCM_MISCOMPARE_VERIFY; 614 out: 615 /* 616 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 617 * sbc_compare_and_write() before the original READ I/O submission. 618 */ 619 up(&dev->caw_sem); 620 kfree(write_sg); 621 kfree(buf); 622 return ret; 623 } 624 625 static sense_reason_t 626 sbc_compare_and_write(struct se_cmd *cmd) 627 { 628 struct sbc_ops *ops = cmd->protocol_data; 629 struct se_device *dev = cmd->se_dev; 630 sense_reason_t ret; 631 int rc; 632 /* 633 * Submit the READ first for COMPARE_AND_WRITE to perform the 634 * comparision using SGLs at cmd->t_bidi_data_sg.. 635 */ 636 rc = down_interruptible(&dev->caw_sem); 637 if (rc != 0) { 638 cmd->transport_complete_callback = NULL; 639 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 640 } 641 /* 642 * Reset cmd->data_length to individual block_size in order to not 643 * confuse backend drivers that depend on this value matching the 644 * size of the I/O being submitted. 645 */ 646 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 647 648 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 649 DMA_FROM_DEVICE); 650 if (ret) { 651 cmd->transport_complete_callback = NULL; 652 up(&dev->caw_sem); 653 return ret; 654 } 655 /* 656 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 657 * upon MISCOMPARE, or in compare_and_write_done() upon completion 658 * of WRITE instance user-data. 659 */ 660 return TCM_NO_SENSE; 661 } 662 663 static int 664 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 665 bool is_write, struct se_cmd *cmd) 666 { 667 if (is_write) { 668 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 669 protect ? TARGET_PROT_DOUT_PASS : 670 TARGET_PROT_DOUT_INSERT; 671 switch (protect) { 672 case 0x0: 673 case 0x3: 674 cmd->prot_checks = 0; 675 break; 676 case 0x1: 677 case 0x5: 678 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 679 if (prot_type == TARGET_DIF_TYPE1_PROT) 680 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 681 break; 682 case 0x2: 683 if (prot_type == TARGET_DIF_TYPE1_PROT) 684 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 685 break; 686 case 0x4: 687 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 688 break; 689 default: 690 pr_err("Unsupported protect field %d\n", protect); 691 return -EINVAL; 692 } 693 } else { 694 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 695 protect ? TARGET_PROT_DIN_PASS : 696 TARGET_PROT_DIN_STRIP; 697 switch (protect) { 698 case 0x0: 699 case 0x1: 700 case 0x5: 701 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 702 if (prot_type == TARGET_DIF_TYPE1_PROT) 703 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 704 break; 705 case 0x2: 706 if (prot_type == TARGET_DIF_TYPE1_PROT) 707 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 708 break; 709 case 0x3: 710 cmd->prot_checks = 0; 711 break; 712 case 0x4: 713 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 714 break; 715 default: 716 pr_err("Unsupported protect field %d\n", protect); 717 return -EINVAL; 718 } 719 } 720 721 return 0; 722 } 723 724 static sense_reason_t 725 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 726 u32 sectors, bool is_write) 727 { 728 u8 protect = cdb[1] >> 5; 729 int sp_ops = cmd->se_sess->sup_prot_ops; 730 int pi_prot_type = dev->dev_attrib.pi_prot_type; 731 bool fabric_prot = false; 732 733 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 734 if (unlikely(protect && 735 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 736 pr_err("CDB contains protect bit, but device + fabric does" 737 " not advertise PROTECT=1 feature bit\n"); 738 return TCM_INVALID_CDB_FIELD; 739 } 740 if (cmd->prot_pto) 741 return TCM_NO_SENSE; 742 } 743 744 switch (dev->dev_attrib.pi_prot_type) { 745 case TARGET_DIF_TYPE3_PROT: 746 cmd->reftag_seed = 0xffffffff; 747 break; 748 case TARGET_DIF_TYPE2_PROT: 749 if (protect) 750 return TCM_INVALID_CDB_FIELD; 751 752 cmd->reftag_seed = cmd->t_task_lba; 753 break; 754 case TARGET_DIF_TYPE1_PROT: 755 cmd->reftag_seed = cmd->t_task_lba; 756 break; 757 case TARGET_DIF_TYPE0_PROT: 758 /* 759 * See if the fabric supports T10-PI, and the session has been 760 * configured to allow export PROTECT=1 feature bit with backend 761 * devices that don't support T10-PI. 762 */ 763 fabric_prot = is_write ? 764 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 765 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 766 767 if (fabric_prot && cmd->se_sess->sess_prot_type) { 768 pi_prot_type = cmd->se_sess->sess_prot_type; 769 break; 770 } 771 if (!protect) 772 return TCM_NO_SENSE; 773 /* Fallthrough */ 774 default: 775 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 776 "PROTECT: 0x%02x\n", cdb[0], protect); 777 return TCM_INVALID_CDB_FIELD; 778 } 779 780 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 781 return TCM_INVALID_CDB_FIELD; 782 783 cmd->prot_type = pi_prot_type; 784 cmd->prot_length = dev->prot_length * sectors; 785 786 /** 787 * In case protection information exists over the wire 788 * we modify command data length to describe pure data. 789 * The actual transfer length is data length + protection 790 * length 791 **/ 792 if (protect) 793 cmd->data_length = sectors * dev->dev_attrib.block_size; 794 795 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 796 "prot_op=%d prot_checks=%d\n", 797 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 798 cmd->prot_op, cmd->prot_checks); 799 800 return TCM_NO_SENSE; 801 } 802 803 static int 804 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 805 { 806 if (cdb[1] & 0x10) { 807 /* see explanation in spc_emulate_modesense */ 808 if (!target_check_fua(dev)) { 809 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 810 " does not advertise support for DPO\n", cdb[0]); 811 return -EINVAL; 812 } 813 } 814 if (cdb[1] & 0x8) { 815 if (!target_check_fua(dev)) { 816 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 817 " does not advertise support for FUA write\n", 818 cdb[0]); 819 return -EINVAL; 820 } 821 cmd->se_cmd_flags |= SCF_FUA; 822 } 823 return 0; 824 } 825 826 sense_reason_t 827 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 828 { 829 struct se_device *dev = cmd->se_dev; 830 unsigned char *cdb = cmd->t_task_cdb; 831 unsigned int size; 832 u32 sectors = 0; 833 sense_reason_t ret; 834 835 cmd->protocol_data = ops; 836 837 switch (cdb[0]) { 838 case READ_6: 839 sectors = transport_get_sectors_6(cdb); 840 cmd->t_task_lba = transport_lba_21(cdb); 841 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 842 cmd->execute_cmd = sbc_execute_rw; 843 break; 844 case READ_10: 845 sectors = transport_get_sectors_10(cdb); 846 cmd->t_task_lba = transport_lba_32(cdb); 847 848 if (sbc_check_dpofua(dev, cmd, cdb)) 849 return TCM_INVALID_CDB_FIELD; 850 851 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 852 if (ret) 853 return ret; 854 855 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 856 cmd->execute_cmd = sbc_execute_rw; 857 break; 858 case READ_12: 859 sectors = transport_get_sectors_12(cdb); 860 cmd->t_task_lba = transport_lba_32(cdb); 861 862 if (sbc_check_dpofua(dev, cmd, cdb)) 863 return TCM_INVALID_CDB_FIELD; 864 865 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 866 if (ret) 867 return ret; 868 869 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 870 cmd->execute_cmd = sbc_execute_rw; 871 break; 872 case READ_16: 873 sectors = transport_get_sectors_16(cdb); 874 cmd->t_task_lba = transport_lba_64(cdb); 875 876 if (sbc_check_dpofua(dev, cmd, cdb)) 877 return TCM_INVALID_CDB_FIELD; 878 879 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 880 if (ret) 881 return ret; 882 883 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 884 cmd->execute_cmd = sbc_execute_rw; 885 break; 886 case WRITE_6: 887 sectors = transport_get_sectors_6(cdb); 888 cmd->t_task_lba = transport_lba_21(cdb); 889 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 890 cmd->execute_cmd = sbc_execute_rw; 891 break; 892 case WRITE_10: 893 case WRITE_VERIFY: 894 sectors = transport_get_sectors_10(cdb); 895 cmd->t_task_lba = transport_lba_32(cdb); 896 897 if (sbc_check_dpofua(dev, cmd, cdb)) 898 return TCM_INVALID_CDB_FIELD; 899 900 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 901 if (ret) 902 return ret; 903 904 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 905 cmd->execute_cmd = sbc_execute_rw; 906 break; 907 case WRITE_12: 908 sectors = transport_get_sectors_12(cdb); 909 cmd->t_task_lba = transport_lba_32(cdb); 910 911 if (sbc_check_dpofua(dev, cmd, cdb)) 912 return TCM_INVALID_CDB_FIELD; 913 914 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 915 if (ret) 916 return ret; 917 918 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 919 cmd->execute_cmd = sbc_execute_rw; 920 break; 921 case WRITE_16: 922 sectors = transport_get_sectors_16(cdb); 923 cmd->t_task_lba = transport_lba_64(cdb); 924 925 if (sbc_check_dpofua(dev, cmd, cdb)) 926 return TCM_INVALID_CDB_FIELD; 927 928 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 929 if (ret) 930 return ret; 931 932 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 933 cmd->execute_cmd = sbc_execute_rw; 934 break; 935 case XDWRITEREAD_10: 936 if (cmd->data_direction != DMA_TO_DEVICE || 937 !(cmd->se_cmd_flags & SCF_BIDI)) 938 return TCM_INVALID_CDB_FIELD; 939 sectors = transport_get_sectors_10(cdb); 940 941 if (sbc_check_dpofua(dev, cmd, cdb)) 942 return TCM_INVALID_CDB_FIELD; 943 944 cmd->t_task_lba = transport_lba_32(cdb); 945 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 946 947 /* 948 * Setup BIDI XOR callback to be run after I/O completion. 949 */ 950 cmd->execute_cmd = sbc_execute_rw; 951 cmd->transport_complete_callback = &xdreadwrite_callback; 952 break; 953 case VARIABLE_LENGTH_CMD: 954 { 955 u16 service_action = get_unaligned_be16(&cdb[8]); 956 switch (service_action) { 957 case XDWRITEREAD_32: 958 sectors = transport_get_sectors_32(cdb); 959 960 if (sbc_check_dpofua(dev, cmd, cdb)) 961 return TCM_INVALID_CDB_FIELD; 962 /* 963 * Use WRITE_32 and READ_32 opcodes for the emulated 964 * XDWRITE_READ_32 logic. 965 */ 966 cmd->t_task_lba = transport_lba_64_ext(cdb); 967 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 968 969 /* 970 * Setup BIDI XOR callback to be run during after I/O 971 * completion. 972 */ 973 cmd->execute_cmd = sbc_execute_rw; 974 cmd->transport_complete_callback = &xdreadwrite_callback; 975 break; 976 case WRITE_SAME_32: 977 sectors = transport_get_sectors_32(cdb); 978 if (!sectors) { 979 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 980 " supported\n"); 981 return TCM_INVALID_CDB_FIELD; 982 } 983 984 size = sbc_get_size(cmd, 1); 985 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 986 987 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 988 if (ret) 989 return ret; 990 break; 991 default: 992 pr_err("VARIABLE_LENGTH_CMD service action" 993 " 0x%04x not supported\n", service_action); 994 return TCM_UNSUPPORTED_SCSI_OPCODE; 995 } 996 break; 997 } 998 case COMPARE_AND_WRITE: 999 sectors = cdb[13]; 1000 /* 1001 * Currently enforce COMPARE_AND_WRITE for a single sector 1002 */ 1003 if (sectors > 1) { 1004 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 1005 " than 1\n", sectors); 1006 return TCM_INVALID_CDB_FIELD; 1007 } 1008 if (sbc_check_dpofua(dev, cmd, cdb)) 1009 return TCM_INVALID_CDB_FIELD; 1010 1011 /* 1012 * Double size because we have two buffers, note that 1013 * zero is not an error.. 1014 */ 1015 size = 2 * sbc_get_size(cmd, sectors); 1016 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1017 cmd->t_task_nolb = sectors; 1018 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 1019 cmd->execute_cmd = sbc_compare_and_write; 1020 cmd->transport_complete_callback = compare_and_write_callback; 1021 break; 1022 case READ_CAPACITY: 1023 size = READ_CAP_LEN; 1024 cmd->execute_cmd = sbc_emulate_readcapacity; 1025 break; 1026 case SERVICE_ACTION_IN_16: 1027 switch (cmd->t_task_cdb[1] & 0x1f) { 1028 case SAI_READ_CAPACITY_16: 1029 cmd->execute_cmd = sbc_emulate_readcapacity_16; 1030 break; 1031 case SAI_REPORT_REFERRALS: 1032 cmd->execute_cmd = target_emulate_report_referrals; 1033 break; 1034 default: 1035 pr_err("Unsupported SA: 0x%02x\n", 1036 cmd->t_task_cdb[1] & 0x1f); 1037 return TCM_INVALID_CDB_FIELD; 1038 } 1039 size = (cdb[10] << 24) | (cdb[11] << 16) | 1040 (cdb[12] << 8) | cdb[13]; 1041 break; 1042 case SYNCHRONIZE_CACHE: 1043 case SYNCHRONIZE_CACHE_16: 1044 if (cdb[0] == SYNCHRONIZE_CACHE) { 1045 sectors = transport_get_sectors_10(cdb); 1046 cmd->t_task_lba = transport_lba_32(cdb); 1047 } else { 1048 sectors = transport_get_sectors_16(cdb); 1049 cmd->t_task_lba = transport_lba_64(cdb); 1050 } 1051 if (ops->execute_sync_cache) { 1052 cmd->execute_cmd = ops->execute_sync_cache; 1053 goto check_lba; 1054 } 1055 size = 0; 1056 cmd->execute_cmd = sbc_emulate_noop; 1057 break; 1058 case UNMAP: 1059 if (!ops->execute_unmap) 1060 return TCM_UNSUPPORTED_SCSI_OPCODE; 1061 1062 if (!dev->dev_attrib.emulate_tpu) { 1063 pr_err("Got UNMAP, but backend device has" 1064 " emulate_tpu disabled\n"); 1065 return TCM_UNSUPPORTED_SCSI_OPCODE; 1066 } 1067 size = get_unaligned_be16(&cdb[7]); 1068 cmd->execute_cmd = sbc_execute_unmap; 1069 break; 1070 case WRITE_SAME_16: 1071 sectors = transport_get_sectors_16(cdb); 1072 if (!sectors) { 1073 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1074 return TCM_INVALID_CDB_FIELD; 1075 } 1076 1077 size = sbc_get_size(cmd, 1); 1078 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1079 1080 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1081 if (ret) 1082 return ret; 1083 break; 1084 case WRITE_SAME: 1085 sectors = transport_get_sectors_10(cdb); 1086 if (!sectors) { 1087 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1088 return TCM_INVALID_CDB_FIELD; 1089 } 1090 1091 size = sbc_get_size(cmd, 1); 1092 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1093 1094 /* 1095 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1096 * of byte 1 bit 3 UNMAP instead of original reserved field 1097 */ 1098 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1099 if (ret) 1100 return ret; 1101 break; 1102 case VERIFY: 1103 size = 0; 1104 sectors = transport_get_sectors_10(cdb); 1105 cmd->t_task_lba = transport_lba_32(cdb); 1106 cmd->execute_cmd = sbc_emulate_noop; 1107 goto check_lba; 1108 case REZERO_UNIT: 1109 case SEEK_6: 1110 case SEEK_10: 1111 /* 1112 * There are still clients out there which use these old SCSI-2 1113 * commands. This mainly happens when running VMs with legacy 1114 * guest systems, connected via SCSI command pass-through to 1115 * iSCSI targets. Make them happy and return status GOOD. 1116 */ 1117 size = 0; 1118 cmd->execute_cmd = sbc_emulate_noop; 1119 break; 1120 case START_STOP: 1121 size = 0; 1122 cmd->execute_cmd = sbc_emulate_startstop; 1123 break; 1124 default: 1125 ret = spc_parse_cdb(cmd, &size); 1126 if (ret) 1127 return ret; 1128 } 1129 1130 /* reject any command that we don't have a handler for */ 1131 if (!cmd->execute_cmd) 1132 return TCM_UNSUPPORTED_SCSI_OPCODE; 1133 1134 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1135 unsigned long long end_lba; 1136 check_lba: 1137 end_lba = dev->transport->get_blocks(dev) + 1; 1138 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1139 ((cmd->t_task_lba + sectors) > end_lba)) { 1140 pr_err("cmd exceeds last lba %llu " 1141 "(lba %llu, sectors %u)\n", 1142 end_lba, cmd->t_task_lba, sectors); 1143 return TCM_ADDRESS_OUT_OF_RANGE; 1144 } 1145 1146 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1147 size = sbc_get_size(cmd, sectors); 1148 } 1149 1150 return target_cmd_size_check(cmd, size); 1151 } 1152 EXPORT_SYMBOL(sbc_parse_cdb); 1153 1154 u32 sbc_get_device_type(struct se_device *dev) 1155 { 1156 return TYPE_DISK; 1157 } 1158 EXPORT_SYMBOL(sbc_get_device_type); 1159 1160 static sense_reason_t 1161 sbc_execute_unmap(struct se_cmd *cmd) 1162 { 1163 struct sbc_ops *ops = cmd->protocol_data; 1164 struct se_device *dev = cmd->se_dev; 1165 unsigned char *buf, *ptr = NULL; 1166 sector_t lba; 1167 int size; 1168 u32 range; 1169 sense_reason_t ret = 0; 1170 int dl, bd_dl; 1171 1172 /* We never set ANC_SUP */ 1173 if (cmd->t_task_cdb[1]) 1174 return TCM_INVALID_CDB_FIELD; 1175 1176 if (cmd->data_length == 0) { 1177 target_complete_cmd(cmd, SAM_STAT_GOOD); 1178 return 0; 1179 } 1180 1181 if (cmd->data_length < 8) { 1182 pr_warn("UNMAP parameter list length %u too small\n", 1183 cmd->data_length); 1184 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1185 } 1186 1187 buf = transport_kmap_data_sg(cmd); 1188 if (!buf) 1189 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1190 1191 dl = get_unaligned_be16(&buf[0]); 1192 bd_dl = get_unaligned_be16(&buf[2]); 1193 1194 size = cmd->data_length - 8; 1195 if (bd_dl > size) 1196 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1197 cmd->data_length, bd_dl); 1198 else 1199 size = bd_dl; 1200 1201 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1202 ret = TCM_INVALID_PARAMETER_LIST; 1203 goto err; 1204 } 1205 1206 /* First UNMAP block descriptor starts at 8 byte offset */ 1207 ptr = &buf[8]; 1208 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1209 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1210 1211 while (size >= 16) { 1212 lba = get_unaligned_be64(&ptr[0]); 1213 range = get_unaligned_be32(&ptr[8]); 1214 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1215 (unsigned long long)lba, range); 1216 1217 if (range > dev->dev_attrib.max_unmap_lba_count) { 1218 ret = TCM_INVALID_PARAMETER_LIST; 1219 goto err; 1220 } 1221 1222 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1223 ret = TCM_ADDRESS_OUT_OF_RANGE; 1224 goto err; 1225 } 1226 1227 ret = ops->execute_unmap(cmd, lba, range); 1228 if (ret) 1229 goto err; 1230 1231 ptr += 16; 1232 size -= 16; 1233 } 1234 1235 err: 1236 transport_kunmap_data_sg(cmd); 1237 if (!ret) 1238 target_complete_cmd(cmd, GOOD); 1239 return ret; 1240 } 1241 1242 void 1243 sbc_dif_generate(struct se_cmd *cmd) 1244 { 1245 struct se_device *dev = cmd->se_dev; 1246 struct t10_pi_tuple *sdt; 1247 struct scatterlist *dsg = cmd->t_data_sg, *psg; 1248 sector_t sector = cmd->t_task_lba; 1249 void *daddr, *paddr; 1250 int i, j, offset = 0; 1251 unsigned int block_size = dev->dev_attrib.block_size; 1252 1253 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1254 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1255 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1256 1257 for (j = 0; j < psg->length; 1258 j += sizeof(*sdt)) { 1259 __u16 crc; 1260 unsigned int avail; 1261 1262 if (offset >= dsg->length) { 1263 offset -= dsg->length; 1264 kunmap_atomic(daddr - dsg->offset); 1265 dsg = sg_next(dsg); 1266 if (!dsg) { 1267 kunmap_atomic(paddr - psg->offset); 1268 return; 1269 } 1270 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1271 } 1272 1273 sdt = paddr + j; 1274 avail = min(block_size, dsg->length - offset); 1275 crc = crc_t10dif(daddr + offset, avail); 1276 if (avail < block_size) { 1277 kunmap_atomic(daddr - dsg->offset); 1278 dsg = sg_next(dsg); 1279 if (!dsg) { 1280 kunmap_atomic(paddr - psg->offset); 1281 return; 1282 } 1283 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1284 offset = block_size - avail; 1285 crc = crc_t10dif_update(crc, daddr, offset); 1286 } else { 1287 offset += block_size; 1288 } 1289 1290 sdt->guard_tag = cpu_to_be16(crc); 1291 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1292 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1293 sdt->app_tag = 0; 1294 1295 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1296 " app_tag: 0x%04x ref_tag: %u\n", 1297 (cmd->data_direction == DMA_TO_DEVICE) ? 1298 "WRITE" : "READ", (unsigned long long)sector, 1299 sdt->guard_tag, sdt->app_tag, 1300 be32_to_cpu(sdt->ref_tag)); 1301 1302 sector++; 1303 } 1304 1305 kunmap_atomic(daddr - dsg->offset); 1306 kunmap_atomic(paddr - psg->offset); 1307 } 1308 } 1309 1310 static sense_reason_t 1311 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt, 1312 __u16 crc, sector_t sector, unsigned int ei_lba) 1313 { 1314 __be16 csum; 1315 1316 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1317 goto check_ref; 1318 1319 csum = cpu_to_be16(crc); 1320 1321 if (sdt->guard_tag != csum) { 1322 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1323 " csum 0x%04x\n", (unsigned long long)sector, 1324 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1325 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1326 } 1327 1328 check_ref: 1329 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1330 return 0; 1331 1332 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1333 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1334 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1335 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1336 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1337 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1338 } 1339 1340 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1341 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1342 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1343 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1344 be32_to_cpu(sdt->ref_tag), ei_lba); 1345 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1346 } 1347 1348 return 0; 1349 } 1350 1351 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1352 struct scatterlist *sg, int sg_off) 1353 { 1354 struct se_device *dev = cmd->se_dev; 1355 struct scatterlist *psg; 1356 void *paddr, *addr; 1357 unsigned int i, len, left; 1358 unsigned int offset = sg_off; 1359 1360 if (!sg) 1361 return; 1362 1363 left = sectors * dev->prot_length; 1364 1365 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1366 unsigned int psg_len, copied = 0; 1367 1368 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1369 psg_len = min(left, psg->length); 1370 while (psg_len) { 1371 len = min(psg_len, sg->length - offset); 1372 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1373 1374 if (read) 1375 memcpy(paddr + copied, addr, len); 1376 else 1377 memcpy(addr, paddr + copied, len); 1378 1379 left -= len; 1380 offset += len; 1381 copied += len; 1382 psg_len -= len; 1383 1384 kunmap_atomic(addr - sg->offset - offset); 1385 1386 if (offset >= sg->length) { 1387 sg = sg_next(sg); 1388 offset = 0; 1389 } 1390 } 1391 kunmap_atomic(paddr - psg->offset); 1392 } 1393 } 1394 EXPORT_SYMBOL(sbc_dif_copy_prot); 1395 1396 sense_reason_t 1397 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1398 unsigned int ei_lba, struct scatterlist *psg, int psg_off) 1399 { 1400 struct se_device *dev = cmd->se_dev; 1401 struct t10_pi_tuple *sdt; 1402 struct scatterlist *dsg = cmd->t_data_sg; 1403 sector_t sector = start; 1404 void *daddr, *paddr; 1405 int i; 1406 sense_reason_t rc; 1407 int dsg_off = 0; 1408 unsigned int block_size = dev->dev_attrib.block_size; 1409 1410 for (; psg && sector < start + sectors; psg = sg_next(psg)) { 1411 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1412 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1413 1414 for (i = psg_off; i < psg->length && 1415 sector < start + sectors; 1416 i += sizeof(*sdt)) { 1417 __u16 crc; 1418 unsigned int avail; 1419 1420 if (dsg_off >= dsg->length) { 1421 dsg_off -= dsg->length; 1422 kunmap_atomic(daddr - dsg->offset); 1423 dsg = sg_next(dsg); 1424 if (!dsg) { 1425 kunmap_atomic(paddr - psg->offset); 1426 return 0; 1427 } 1428 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1429 } 1430 1431 sdt = paddr + i; 1432 1433 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1434 " app_tag: 0x%04x ref_tag: %u\n", 1435 (unsigned long long)sector, sdt->guard_tag, 1436 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1437 1438 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1439 dsg_off += block_size; 1440 goto next; 1441 } 1442 1443 avail = min(block_size, dsg->length - dsg_off); 1444 crc = crc_t10dif(daddr + dsg_off, avail); 1445 if (avail < block_size) { 1446 kunmap_atomic(daddr - dsg->offset); 1447 dsg = sg_next(dsg); 1448 if (!dsg) { 1449 kunmap_atomic(paddr - psg->offset); 1450 return 0; 1451 } 1452 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1453 dsg_off = block_size - avail; 1454 crc = crc_t10dif_update(crc, daddr, dsg_off); 1455 } else { 1456 dsg_off += block_size; 1457 } 1458 1459 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); 1460 if (rc) { 1461 kunmap_atomic(daddr - dsg->offset); 1462 kunmap_atomic(paddr - psg->offset); 1463 cmd->bad_sector = sector; 1464 return rc; 1465 } 1466 next: 1467 sector++; 1468 ei_lba++; 1469 } 1470 1471 psg_off = 0; 1472 kunmap_atomic(daddr - dsg->offset); 1473 kunmap_atomic(paddr - psg->offset); 1474 } 1475 1476 return 0; 1477 } 1478 EXPORT_SYMBOL(sbc_dif_verify); 1479