1 /* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ratelimit.h> 26 #include <linux/crc-t10dif.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi_proto.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_ua.h" 37 #include "target_core_alua.h" 38 39 static sense_reason_t 40 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); 42 43 static sense_reason_t 44 sbc_emulate_readcapacity(struct se_cmd *cmd) 45 { 46 struct se_device *dev = cmd->se_dev; 47 unsigned char *cdb = cmd->t_task_cdb; 48 unsigned long long blocks_long = dev->transport->get_blocks(dev); 49 unsigned char *rbuf; 50 unsigned char buf[8]; 51 u32 blocks; 52 53 /* 54 * SBC-2 says: 55 * If the PMI bit is set to zero and the LOGICAL BLOCK 56 * ADDRESS field is not set to zero, the device server shall 57 * terminate the command with CHECK CONDITION status with 58 * the sense key set to ILLEGAL REQUEST and the additional 59 * sense code set to INVALID FIELD IN CDB. 60 * 61 * In SBC-3, these fields are obsolete, but some SCSI 62 * compliance tests actually check this, so we might as well 63 * follow SBC-2. 64 */ 65 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 66 return TCM_INVALID_CDB_FIELD; 67 68 if (blocks_long >= 0x00000000ffffffff) 69 blocks = 0xffffffff; 70 else 71 blocks = (u32)blocks_long; 72 73 buf[0] = (blocks >> 24) & 0xff; 74 buf[1] = (blocks >> 16) & 0xff; 75 buf[2] = (blocks >> 8) & 0xff; 76 buf[3] = blocks & 0xff; 77 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 78 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 79 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 80 buf[7] = dev->dev_attrib.block_size & 0xff; 81 82 rbuf = transport_kmap_data_sg(cmd); 83 if (rbuf) { 84 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 85 transport_kunmap_data_sg(cmd); 86 } 87 88 target_complete_cmd_with_length(cmd, GOOD, 8); 89 return 0; 90 } 91 92 static sense_reason_t 93 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 94 { 95 struct se_device *dev = cmd->se_dev; 96 struct se_session *sess = cmd->se_sess; 97 int pi_prot_type = dev->dev_attrib.pi_prot_type; 98 99 unsigned char *rbuf; 100 unsigned char buf[32]; 101 unsigned long long blocks = dev->transport->get_blocks(dev); 102 103 memset(buf, 0, sizeof(buf)); 104 buf[0] = (blocks >> 56) & 0xff; 105 buf[1] = (blocks >> 48) & 0xff; 106 buf[2] = (blocks >> 40) & 0xff; 107 buf[3] = (blocks >> 32) & 0xff; 108 buf[4] = (blocks >> 24) & 0xff; 109 buf[5] = (blocks >> 16) & 0xff; 110 buf[6] = (blocks >> 8) & 0xff; 111 buf[7] = blocks & 0xff; 112 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 113 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 114 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 115 buf[11] = dev->dev_attrib.block_size & 0xff; 116 /* 117 * Set P_TYPE and PROT_EN bits for DIF support 118 */ 119 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 120 /* 121 * Only override a device's pi_prot_type if no T10-PI is 122 * available, and sess_prot_type has been explicitly enabled. 123 */ 124 if (!pi_prot_type) 125 pi_prot_type = sess->sess_prot_type; 126 127 if (pi_prot_type) 128 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 129 } 130 131 if (dev->transport->get_lbppbe) 132 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 133 134 if (dev->transport->get_alignment_offset_lbas) { 135 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 136 buf[14] = (lalba >> 8) & 0x3f; 137 buf[15] = lalba & 0xff; 138 } 139 140 /* 141 * Set Thin Provisioning Enable bit following sbc3r22 in section 142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 143 */ 144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 145 buf[14] |= 0x80; 146 147 rbuf = transport_kmap_data_sg(cmd); 148 if (rbuf) { 149 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 150 transport_kunmap_data_sg(cmd); 151 } 152 153 target_complete_cmd_with_length(cmd, GOOD, 32); 154 return 0; 155 } 156 157 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 158 { 159 u32 num_blocks; 160 161 if (cmd->t_task_cdb[0] == WRITE_SAME) 162 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 163 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 164 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 165 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 166 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 167 168 /* 169 * Use the explicit range when non zero is supplied, otherwise calculate 170 * the remaining range based on ->get_blocks() - starting LBA. 171 */ 172 if (num_blocks) 173 return num_blocks; 174 175 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 176 cmd->t_task_lba + 1; 177 } 178 EXPORT_SYMBOL(sbc_get_write_same_sectors); 179 180 static sense_reason_t 181 sbc_execute_write_same_unmap(struct se_cmd *cmd) 182 { 183 struct sbc_ops *ops = cmd->protocol_data; 184 sector_t nolb = sbc_get_write_same_sectors(cmd); 185 sense_reason_t ret; 186 187 if (nolb) { 188 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); 189 if (ret) 190 return ret; 191 } 192 193 target_complete_cmd(cmd, GOOD); 194 return 0; 195 } 196 197 static sense_reason_t 198 sbc_emulate_noop(struct se_cmd *cmd) 199 { 200 target_complete_cmd(cmd, GOOD); 201 return 0; 202 } 203 204 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 205 { 206 return cmd->se_dev->dev_attrib.block_size * sectors; 207 } 208 209 static inline u32 transport_get_sectors_6(unsigned char *cdb) 210 { 211 /* 212 * Use 8-bit sector value. SBC-3 says: 213 * 214 * A TRANSFER LENGTH field set to zero specifies that 256 215 * logical blocks shall be written. Any other value 216 * specifies the number of logical blocks that shall be 217 * written. 218 */ 219 return cdb[4] ? : 256; 220 } 221 222 static inline u32 transport_get_sectors_10(unsigned char *cdb) 223 { 224 return (u32)(cdb[7] << 8) + cdb[8]; 225 } 226 227 static inline u32 transport_get_sectors_12(unsigned char *cdb) 228 { 229 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 230 } 231 232 static inline u32 transport_get_sectors_16(unsigned char *cdb) 233 { 234 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 235 (cdb[12] << 8) + cdb[13]; 236 } 237 238 /* 239 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 240 */ 241 static inline u32 transport_get_sectors_32(unsigned char *cdb) 242 { 243 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 244 (cdb[30] << 8) + cdb[31]; 245 246 } 247 248 static inline u32 transport_lba_21(unsigned char *cdb) 249 { 250 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 251 } 252 253 static inline u32 transport_lba_32(unsigned char *cdb) 254 { 255 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 256 } 257 258 static inline unsigned long long transport_lba_64(unsigned char *cdb) 259 { 260 unsigned int __v1, __v2; 261 262 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 263 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 264 265 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 266 } 267 268 /* 269 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 270 */ 271 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 272 { 273 unsigned int __v1, __v2; 274 275 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 276 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 277 278 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 279 } 280 281 static sense_reason_t 282 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 283 { 284 struct se_device *dev = cmd->se_dev; 285 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 286 unsigned int sectors = sbc_get_write_same_sectors(cmd); 287 sense_reason_t ret; 288 289 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 290 pr_err("WRITE_SAME PBDATA and LBDATA" 291 " bits not supported for Block Discard" 292 " Emulation\n"); 293 return TCM_UNSUPPORTED_SCSI_OPCODE; 294 } 295 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 296 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 297 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 298 return TCM_INVALID_CDB_FIELD; 299 } 300 /* 301 * Sanity check for LBA wrap and request past end of device. 302 */ 303 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 304 ((cmd->t_task_lba + sectors) > end_lba)) { 305 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 306 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 307 return TCM_ADDRESS_OUT_OF_RANGE; 308 } 309 310 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 311 if (flags[0] & 0x10) { 312 pr_warn("WRITE SAME with ANCHOR not supported\n"); 313 return TCM_INVALID_CDB_FIELD; 314 } 315 /* 316 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 317 * translated into block discard requests within backend code. 318 */ 319 if (flags[0] & 0x08) { 320 if (!ops->execute_unmap) 321 return TCM_UNSUPPORTED_SCSI_OPCODE; 322 323 if (!dev->dev_attrib.emulate_tpws) { 324 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 325 " has emulate_tpws disabled\n"); 326 return TCM_UNSUPPORTED_SCSI_OPCODE; 327 } 328 cmd->execute_cmd = sbc_execute_write_same_unmap; 329 return 0; 330 } 331 if (!ops->execute_write_same) 332 return TCM_UNSUPPORTED_SCSI_OPCODE; 333 334 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 335 if (ret) 336 return ret; 337 338 cmd->execute_cmd = ops->execute_write_same; 339 return 0; 340 } 341 342 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 343 { 344 unsigned char *buf, *addr; 345 struct scatterlist *sg; 346 unsigned int offset; 347 sense_reason_t ret = TCM_NO_SENSE; 348 int i, count; 349 /* 350 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 351 * 352 * 1) read the specified logical block(s); 353 * 2) transfer logical blocks from the data-out buffer; 354 * 3) XOR the logical blocks transferred from the data-out buffer with 355 * the logical blocks read, storing the resulting XOR data in a buffer; 356 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 357 * blocks transferred from the data-out buffer; and 358 * 5) transfer the resulting XOR data to the data-in buffer. 359 */ 360 buf = kmalloc(cmd->data_length, GFP_KERNEL); 361 if (!buf) { 362 pr_err("Unable to allocate xor_callback buf\n"); 363 return TCM_OUT_OF_RESOURCES; 364 } 365 /* 366 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 367 * into the locally allocated *buf 368 */ 369 sg_copy_to_buffer(cmd->t_data_sg, 370 cmd->t_data_nents, 371 buf, 372 cmd->data_length); 373 374 /* 375 * Now perform the XOR against the BIDI read memory located at 376 * cmd->t_mem_bidi_list 377 */ 378 379 offset = 0; 380 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 381 addr = kmap_atomic(sg_page(sg)); 382 if (!addr) { 383 ret = TCM_OUT_OF_RESOURCES; 384 goto out; 385 } 386 387 for (i = 0; i < sg->length; i++) 388 *(addr + sg->offset + i) ^= *(buf + offset + i); 389 390 offset += sg->length; 391 kunmap_atomic(addr); 392 } 393 394 out: 395 kfree(buf); 396 return ret; 397 } 398 399 static sense_reason_t 400 sbc_execute_rw(struct se_cmd *cmd) 401 { 402 struct sbc_ops *ops = cmd->protocol_data; 403 404 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 405 cmd->data_direction); 406 } 407 408 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 409 { 410 struct se_device *dev = cmd->se_dev; 411 412 /* 413 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 414 * within target_complete_ok_work() if the command was successfully 415 * sent to the backend driver. 416 */ 417 spin_lock_irq(&cmd->t_state_lock); 418 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 419 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 420 spin_unlock_irq(&cmd->t_state_lock); 421 422 /* 423 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 424 * before the original READ I/O submission. 425 */ 426 up(&dev->caw_sem); 427 428 return TCM_NO_SENSE; 429 } 430 431 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 432 { 433 struct se_device *dev = cmd->se_dev; 434 struct scatterlist *write_sg = NULL, *sg; 435 unsigned char *buf = NULL, *addr; 436 struct sg_mapping_iter m; 437 unsigned int offset = 0, len; 438 unsigned int nlbas = cmd->t_task_nolb; 439 unsigned int block_size = dev->dev_attrib.block_size; 440 unsigned int compare_len = (nlbas * block_size); 441 sense_reason_t ret = TCM_NO_SENSE; 442 int rc, i; 443 444 /* 445 * Handle early failure in transport_generic_request_failure(), 446 * which will not have taken ->caw_sem yet.. 447 */ 448 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 449 return TCM_NO_SENSE; 450 /* 451 * Handle special case for zero-length COMPARE_AND_WRITE 452 */ 453 if (!cmd->data_length) 454 goto out; 455 /* 456 * Immediately exit + release dev->caw_sem if command has already 457 * been failed with a non-zero SCSI status. 458 */ 459 if (cmd->scsi_status) { 460 pr_err("compare_and_write_callback: non zero scsi_status:" 461 " 0x%02x\n", cmd->scsi_status); 462 goto out; 463 } 464 465 buf = kzalloc(cmd->data_length, GFP_KERNEL); 466 if (!buf) { 467 pr_err("Unable to allocate compare_and_write buf\n"); 468 ret = TCM_OUT_OF_RESOURCES; 469 goto out; 470 } 471 472 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 473 GFP_KERNEL); 474 if (!write_sg) { 475 pr_err("Unable to allocate compare_and_write sg\n"); 476 ret = TCM_OUT_OF_RESOURCES; 477 goto out; 478 } 479 sg_init_table(write_sg, cmd->t_data_nents); 480 /* 481 * Setup verify and write data payloads from total NumberLBAs. 482 */ 483 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 484 cmd->data_length); 485 if (!rc) { 486 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 487 ret = TCM_OUT_OF_RESOURCES; 488 goto out; 489 } 490 /* 491 * Compare against SCSI READ payload against verify payload 492 */ 493 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 494 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 495 if (!addr) { 496 ret = TCM_OUT_OF_RESOURCES; 497 goto out; 498 } 499 500 len = min(sg->length, compare_len); 501 502 if (memcmp(addr, buf + offset, len)) { 503 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 504 addr, buf + offset); 505 kunmap_atomic(addr); 506 goto miscompare; 507 } 508 kunmap_atomic(addr); 509 510 offset += len; 511 compare_len -= len; 512 if (!compare_len) 513 break; 514 } 515 516 i = 0; 517 len = cmd->t_task_nolb * block_size; 518 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 519 /* 520 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 521 */ 522 while (len) { 523 sg_miter_next(&m); 524 525 if (block_size < PAGE_SIZE) { 526 sg_set_page(&write_sg[i], m.page, block_size, 527 block_size); 528 } else { 529 sg_miter_next(&m); 530 sg_set_page(&write_sg[i], m.page, block_size, 531 0); 532 } 533 len -= block_size; 534 i++; 535 } 536 sg_miter_stop(&m); 537 /* 538 * Save the original SGL + nents values before updating to new 539 * assignments, to be released in transport_free_pages() -> 540 * transport_reset_sgl_orig() 541 */ 542 cmd->t_data_sg_orig = cmd->t_data_sg; 543 cmd->t_data_sg = write_sg; 544 cmd->t_data_nents_orig = cmd->t_data_nents; 545 cmd->t_data_nents = 1; 546 547 cmd->sam_task_attr = TCM_HEAD_TAG; 548 cmd->transport_complete_callback = compare_and_write_post; 549 /* 550 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 551 * for submitting the adjusted SGL to write instance user-data. 552 */ 553 cmd->execute_cmd = sbc_execute_rw; 554 555 spin_lock_irq(&cmd->t_state_lock); 556 cmd->t_state = TRANSPORT_PROCESSING; 557 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 558 spin_unlock_irq(&cmd->t_state_lock); 559 560 __target_execute_cmd(cmd); 561 562 kfree(buf); 563 return ret; 564 565 miscompare: 566 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 567 dev->transport->name); 568 ret = TCM_MISCOMPARE_VERIFY; 569 out: 570 /* 571 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 572 * sbc_compare_and_write() before the original READ I/O submission. 573 */ 574 up(&dev->caw_sem); 575 kfree(write_sg); 576 kfree(buf); 577 return ret; 578 } 579 580 static sense_reason_t 581 sbc_compare_and_write(struct se_cmd *cmd) 582 { 583 struct sbc_ops *ops = cmd->protocol_data; 584 struct se_device *dev = cmd->se_dev; 585 sense_reason_t ret; 586 int rc; 587 /* 588 * Submit the READ first for COMPARE_AND_WRITE to perform the 589 * comparision using SGLs at cmd->t_bidi_data_sg.. 590 */ 591 rc = down_interruptible(&dev->caw_sem); 592 if (rc != 0) { 593 cmd->transport_complete_callback = NULL; 594 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 595 } 596 /* 597 * Reset cmd->data_length to individual block_size in order to not 598 * confuse backend drivers that depend on this value matching the 599 * size of the I/O being submitted. 600 */ 601 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 602 603 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 604 DMA_FROM_DEVICE); 605 if (ret) { 606 cmd->transport_complete_callback = NULL; 607 up(&dev->caw_sem); 608 return ret; 609 } 610 /* 611 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 612 * upon MISCOMPARE, or in compare_and_write_done() upon completion 613 * of WRITE instance user-data. 614 */ 615 return TCM_NO_SENSE; 616 } 617 618 static int 619 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 620 bool is_write, struct se_cmd *cmd) 621 { 622 if (is_write) { 623 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 624 protect ? TARGET_PROT_DOUT_PASS : 625 TARGET_PROT_DOUT_INSERT; 626 switch (protect) { 627 case 0x0: 628 case 0x3: 629 cmd->prot_checks = 0; 630 break; 631 case 0x1: 632 case 0x5: 633 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 634 if (prot_type == TARGET_DIF_TYPE1_PROT) 635 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 636 break; 637 case 0x2: 638 if (prot_type == TARGET_DIF_TYPE1_PROT) 639 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 640 break; 641 case 0x4: 642 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 643 break; 644 default: 645 pr_err("Unsupported protect field %d\n", protect); 646 return -EINVAL; 647 } 648 } else { 649 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 650 protect ? TARGET_PROT_DIN_PASS : 651 TARGET_PROT_DIN_STRIP; 652 switch (protect) { 653 case 0x0: 654 case 0x1: 655 case 0x5: 656 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 657 if (prot_type == TARGET_DIF_TYPE1_PROT) 658 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 659 break; 660 case 0x2: 661 if (prot_type == TARGET_DIF_TYPE1_PROT) 662 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 663 break; 664 case 0x3: 665 cmd->prot_checks = 0; 666 break; 667 case 0x4: 668 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 669 break; 670 default: 671 pr_err("Unsupported protect field %d\n", protect); 672 return -EINVAL; 673 } 674 } 675 676 return 0; 677 } 678 679 static sense_reason_t 680 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 681 u32 sectors, bool is_write) 682 { 683 u8 protect = cdb[1] >> 5; 684 int sp_ops = cmd->se_sess->sup_prot_ops; 685 int pi_prot_type = dev->dev_attrib.pi_prot_type; 686 bool fabric_prot = false; 687 688 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 689 if (unlikely(protect && 690 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 691 pr_err("CDB contains protect bit, but device + fabric does" 692 " not advertise PROTECT=1 feature bit\n"); 693 return TCM_INVALID_CDB_FIELD; 694 } 695 if (cmd->prot_pto) 696 return TCM_NO_SENSE; 697 } 698 699 switch (dev->dev_attrib.pi_prot_type) { 700 case TARGET_DIF_TYPE3_PROT: 701 cmd->reftag_seed = 0xffffffff; 702 break; 703 case TARGET_DIF_TYPE2_PROT: 704 if (protect) 705 return TCM_INVALID_CDB_FIELD; 706 707 cmd->reftag_seed = cmd->t_task_lba; 708 break; 709 case TARGET_DIF_TYPE1_PROT: 710 cmd->reftag_seed = cmd->t_task_lba; 711 break; 712 case TARGET_DIF_TYPE0_PROT: 713 /* 714 * See if the fabric supports T10-PI, and the session has been 715 * configured to allow export PROTECT=1 feature bit with backend 716 * devices that don't support T10-PI. 717 */ 718 fabric_prot = is_write ? 719 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 720 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 721 722 if (fabric_prot && cmd->se_sess->sess_prot_type) { 723 pi_prot_type = cmd->se_sess->sess_prot_type; 724 break; 725 } 726 if (!protect) 727 return TCM_NO_SENSE; 728 /* Fallthrough */ 729 default: 730 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 731 "PROTECT: 0x%02x\n", cdb[0], protect); 732 return TCM_INVALID_CDB_FIELD; 733 } 734 735 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 736 return TCM_INVALID_CDB_FIELD; 737 738 cmd->prot_type = pi_prot_type; 739 cmd->prot_length = dev->prot_length * sectors; 740 741 /** 742 * In case protection information exists over the wire 743 * we modify command data length to describe pure data. 744 * The actual transfer length is data length + protection 745 * length 746 **/ 747 if (protect) 748 cmd->data_length = sectors * dev->dev_attrib.block_size; 749 750 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 751 "prot_op=%d prot_checks=%d\n", 752 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 753 cmd->prot_op, cmd->prot_checks); 754 755 return TCM_NO_SENSE; 756 } 757 758 static int 759 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 760 { 761 if (cdb[1] & 0x10) { 762 /* see explanation in spc_emulate_modesense */ 763 if (!target_check_fua(dev)) { 764 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 765 " does not advertise support for DPO\n", cdb[0]); 766 return -EINVAL; 767 } 768 } 769 if (cdb[1] & 0x8) { 770 if (!target_check_fua(dev)) { 771 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 772 " does not advertise support for FUA write\n", 773 cdb[0]); 774 return -EINVAL; 775 } 776 cmd->se_cmd_flags |= SCF_FUA; 777 } 778 return 0; 779 } 780 781 sense_reason_t 782 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 783 { 784 struct se_device *dev = cmd->se_dev; 785 unsigned char *cdb = cmd->t_task_cdb; 786 unsigned int size; 787 u32 sectors = 0; 788 sense_reason_t ret; 789 790 cmd->protocol_data = ops; 791 792 switch (cdb[0]) { 793 case READ_6: 794 sectors = transport_get_sectors_6(cdb); 795 cmd->t_task_lba = transport_lba_21(cdb); 796 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 797 cmd->execute_cmd = sbc_execute_rw; 798 break; 799 case READ_10: 800 sectors = transport_get_sectors_10(cdb); 801 cmd->t_task_lba = transport_lba_32(cdb); 802 803 if (sbc_check_dpofua(dev, cmd, cdb)) 804 return TCM_INVALID_CDB_FIELD; 805 806 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 807 if (ret) 808 return ret; 809 810 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 811 cmd->execute_cmd = sbc_execute_rw; 812 break; 813 case READ_12: 814 sectors = transport_get_sectors_12(cdb); 815 cmd->t_task_lba = transport_lba_32(cdb); 816 817 if (sbc_check_dpofua(dev, cmd, cdb)) 818 return TCM_INVALID_CDB_FIELD; 819 820 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 821 if (ret) 822 return ret; 823 824 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 825 cmd->execute_cmd = sbc_execute_rw; 826 break; 827 case READ_16: 828 sectors = transport_get_sectors_16(cdb); 829 cmd->t_task_lba = transport_lba_64(cdb); 830 831 if (sbc_check_dpofua(dev, cmd, cdb)) 832 return TCM_INVALID_CDB_FIELD; 833 834 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 835 if (ret) 836 return ret; 837 838 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 839 cmd->execute_cmd = sbc_execute_rw; 840 break; 841 case WRITE_6: 842 sectors = transport_get_sectors_6(cdb); 843 cmd->t_task_lba = transport_lba_21(cdb); 844 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 845 cmd->execute_cmd = sbc_execute_rw; 846 break; 847 case WRITE_10: 848 case WRITE_VERIFY: 849 sectors = transport_get_sectors_10(cdb); 850 cmd->t_task_lba = transport_lba_32(cdb); 851 852 if (sbc_check_dpofua(dev, cmd, cdb)) 853 return TCM_INVALID_CDB_FIELD; 854 855 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 856 if (ret) 857 return ret; 858 859 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 860 cmd->execute_cmd = sbc_execute_rw; 861 break; 862 case WRITE_12: 863 sectors = transport_get_sectors_12(cdb); 864 cmd->t_task_lba = transport_lba_32(cdb); 865 866 if (sbc_check_dpofua(dev, cmd, cdb)) 867 return TCM_INVALID_CDB_FIELD; 868 869 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 870 if (ret) 871 return ret; 872 873 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 874 cmd->execute_cmd = sbc_execute_rw; 875 break; 876 case WRITE_16: 877 sectors = transport_get_sectors_16(cdb); 878 cmd->t_task_lba = transport_lba_64(cdb); 879 880 if (sbc_check_dpofua(dev, cmd, cdb)) 881 return TCM_INVALID_CDB_FIELD; 882 883 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 884 if (ret) 885 return ret; 886 887 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 888 cmd->execute_cmd = sbc_execute_rw; 889 break; 890 case XDWRITEREAD_10: 891 if (cmd->data_direction != DMA_TO_DEVICE || 892 !(cmd->se_cmd_flags & SCF_BIDI)) 893 return TCM_INVALID_CDB_FIELD; 894 sectors = transport_get_sectors_10(cdb); 895 896 if (sbc_check_dpofua(dev, cmd, cdb)) 897 return TCM_INVALID_CDB_FIELD; 898 899 cmd->t_task_lba = transport_lba_32(cdb); 900 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 901 902 /* 903 * Setup BIDI XOR callback to be run after I/O completion. 904 */ 905 cmd->execute_cmd = sbc_execute_rw; 906 cmd->transport_complete_callback = &xdreadwrite_callback; 907 break; 908 case VARIABLE_LENGTH_CMD: 909 { 910 u16 service_action = get_unaligned_be16(&cdb[8]); 911 switch (service_action) { 912 case XDWRITEREAD_32: 913 sectors = transport_get_sectors_32(cdb); 914 915 if (sbc_check_dpofua(dev, cmd, cdb)) 916 return TCM_INVALID_CDB_FIELD; 917 /* 918 * Use WRITE_32 and READ_32 opcodes for the emulated 919 * XDWRITE_READ_32 logic. 920 */ 921 cmd->t_task_lba = transport_lba_64_ext(cdb); 922 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 923 924 /* 925 * Setup BIDI XOR callback to be run during after I/O 926 * completion. 927 */ 928 cmd->execute_cmd = sbc_execute_rw; 929 cmd->transport_complete_callback = &xdreadwrite_callback; 930 break; 931 case WRITE_SAME_32: 932 sectors = transport_get_sectors_32(cdb); 933 if (!sectors) { 934 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 935 " supported\n"); 936 return TCM_INVALID_CDB_FIELD; 937 } 938 939 size = sbc_get_size(cmd, 1); 940 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 941 942 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 943 if (ret) 944 return ret; 945 break; 946 default: 947 pr_err("VARIABLE_LENGTH_CMD service action" 948 " 0x%04x not supported\n", service_action); 949 return TCM_UNSUPPORTED_SCSI_OPCODE; 950 } 951 break; 952 } 953 case COMPARE_AND_WRITE: 954 sectors = cdb[13]; 955 /* 956 * Currently enforce COMPARE_AND_WRITE for a single sector 957 */ 958 if (sectors > 1) { 959 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 960 " than 1\n", sectors); 961 return TCM_INVALID_CDB_FIELD; 962 } 963 /* 964 * Double size because we have two buffers, note that 965 * zero is not an error.. 966 */ 967 size = 2 * sbc_get_size(cmd, sectors); 968 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 969 cmd->t_task_nolb = sectors; 970 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 971 cmd->execute_cmd = sbc_compare_and_write; 972 cmd->transport_complete_callback = compare_and_write_callback; 973 break; 974 case READ_CAPACITY: 975 size = READ_CAP_LEN; 976 cmd->execute_cmd = sbc_emulate_readcapacity; 977 break; 978 case SERVICE_ACTION_IN_16: 979 switch (cmd->t_task_cdb[1] & 0x1f) { 980 case SAI_READ_CAPACITY_16: 981 cmd->execute_cmd = sbc_emulate_readcapacity_16; 982 break; 983 case SAI_REPORT_REFERRALS: 984 cmd->execute_cmd = target_emulate_report_referrals; 985 break; 986 default: 987 pr_err("Unsupported SA: 0x%02x\n", 988 cmd->t_task_cdb[1] & 0x1f); 989 return TCM_INVALID_CDB_FIELD; 990 } 991 size = (cdb[10] << 24) | (cdb[11] << 16) | 992 (cdb[12] << 8) | cdb[13]; 993 break; 994 case SYNCHRONIZE_CACHE: 995 case SYNCHRONIZE_CACHE_16: 996 if (cdb[0] == SYNCHRONIZE_CACHE) { 997 sectors = transport_get_sectors_10(cdb); 998 cmd->t_task_lba = transport_lba_32(cdb); 999 } else { 1000 sectors = transport_get_sectors_16(cdb); 1001 cmd->t_task_lba = transport_lba_64(cdb); 1002 } 1003 if (ops->execute_sync_cache) { 1004 cmd->execute_cmd = ops->execute_sync_cache; 1005 goto check_lba; 1006 } 1007 size = 0; 1008 cmd->execute_cmd = sbc_emulate_noop; 1009 break; 1010 case UNMAP: 1011 if (!ops->execute_unmap) 1012 return TCM_UNSUPPORTED_SCSI_OPCODE; 1013 1014 if (!dev->dev_attrib.emulate_tpu) { 1015 pr_err("Got UNMAP, but backend device has" 1016 " emulate_tpu disabled\n"); 1017 return TCM_UNSUPPORTED_SCSI_OPCODE; 1018 } 1019 size = get_unaligned_be16(&cdb[7]); 1020 cmd->execute_cmd = sbc_execute_unmap; 1021 break; 1022 case WRITE_SAME_16: 1023 sectors = transport_get_sectors_16(cdb); 1024 if (!sectors) { 1025 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1026 return TCM_INVALID_CDB_FIELD; 1027 } 1028 1029 size = sbc_get_size(cmd, 1); 1030 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1031 1032 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1033 if (ret) 1034 return ret; 1035 break; 1036 case WRITE_SAME: 1037 sectors = transport_get_sectors_10(cdb); 1038 if (!sectors) { 1039 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1040 return TCM_INVALID_CDB_FIELD; 1041 } 1042 1043 size = sbc_get_size(cmd, 1); 1044 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1045 1046 /* 1047 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1048 * of byte 1 bit 3 UNMAP instead of original reserved field 1049 */ 1050 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1051 if (ret) 1052 return ret; 1053 break; 1054 case VERIFY: 1055 size = 0; 1056 sectors = transport_get_sectors_10(cdb); 1057 cmd->t_task_lba = transport_lba_32(cdb); 1058 cmd->execute_cmd = sbc_emulate_noop; 1059 goto check_lba; 1060 case REZERO_UNIT: 1061 case SEEK_6: 1062 case SEEK_10: 1063 /* 1064 * There are still clients out there which use these old SCSI-2 1065 * commands. This mainly happens when running VMs with legacy 1066 * guest systems, connected via SCSI command pass-through to 1067 * iSCSI targets. Make them happy and return status GOOD. 1068 */ 1069 size = 0; 1070 cmd->execute_cmd = sbc_emulate_noop; 1071 break; 1072 default: 1073 ret = spc_parse_cdb(cmd, &size); 1074 if (ret) 1075 return ret; 1076 } 1077 1078 /* reject any command that we don't have a handler for */ 1079 if (!cmd->execute_cmd) 1080 return TCM_UNSUPPORTED_SCSI_OPCODE; 1081 1082 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1083 unsigned long long end_lba; 1084 check_lba: 1085 end_lba = dev->transport->get_blocks(dev) + 1; 1086 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1087 ((cmd->t_task_lba + sectors) > end_lba)) { 1088 pr_err("cmd exceeds last lba %llu " 1089 "(lba %llu, sectors %u)\n", 1090 end_lba, cmd->t_task_lba, sectors); 1091 return TCM_ADDRESS_OUT_OF_RANGE; 1092 } 1093 1094 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1095 size = sbc_get_size(cmd, sectors); 1096 } 1097 1098 return target_cmd_size_check(cmd, size); 1099 } 1100 EXPORT_SYMBOL(sbc_parse_cdb); 1101 1102 u32 sbc_get_device_type(struct se_device *dev) 1103 { 1104 return TYPE_DISK; 1105 } 1106 EXPORT_SYMBOL(sbc_get_device_type); 1107 1108 static sense_reason_t 1109 sbc_execute_unmap(struct se_cmd *cmd) 1110 { 1111 struct sbc_ops *ops = cmd->protocol_data; 1112 struct se_device *dev = cmd->se_dev; 1113 unsigned char *buf, *ptr = NULL; 1114 sector_t lba; 1115 int size; 1116 u32 range; 1117 sense_reason_t ret = 0; 1118 int dl, bd_dl; 1119 1120 /* We never set ANC_SUP */ 1121 if (cmd->t_task_cdb[1]) 1122 return TCM_INVALID_CDB_FIELD; 1123 1124 if (cmd->data_length == 0) { 1125 target_complete_cmd(cmd, SAM_STAT_GOOD); 1126 return 0; 1127 } 1128 1129 if (cmd->data_length < 8) { 1130 pr_warn("UNMAP parameter list length %u too small\n", 1131 cmd->data_length); 1132 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1133 } 1134 1135 buf = transport_kmap_data_sg(cmd); 1136 if (!buf) 1137 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1138 1139 dl = get_unaligned_be16(&buf[0]); 1140 bd_dl = get_unaligned_be16(&buf[2]); 1141 1142 size = cmd->data_length - 8; 1143 if (bd_dl > size) 1144 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1145 cmd->data_length, bd_dl); 1146 else 1147 size = bd_dl; 1148 1149 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1150 ret = TCM_INVALID_PARAMETER_LIST; 1151 goto err; 1152 } 1153 1154 /* First UNMAP block descriptor starts at 8 byte offset */ 1155 ptr = &buf[8]; 1156 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1157 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1158 1159 while (size >= 16) { 1160 lba = get_unaligned_be64(&ptr[0]); 1161 range = get_unaligned_be32(&ptr[8]); 1162 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1163 (unsigned long long)lba, range); 1164 1165 if (range > dev->dev_attrib.max_unmap_lba_count) { 1166 ret = TCM_INVALID_PARAMETER_LIST; 1167 goto err; 1168 } 1169 1170 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1171 ret = TCM_ADDRESS_OUT_OF_RANGE; 1172 goto err; 1173 } 1174 1175 ret = ops->execute_unmap(cmd, lba, range); 1176 if (ret) 1177 goto err; 1178 1179 ptr += 16; 1180 size -= 16; 1181 } 1182 1183 err: 1184 transport_kunmap_data_sg(cmd); 1185 if (!ret) 1186 target_complete_cmd(cmd, GOOD); 1187 return ret; 1188 } 1189 1190 void 1191 sbc_dif_generate(struct se_cmd *cmd) 1192 { 1193 struct se_device *dev = cmd->se_dev; 1194 struct se_dif_v1_tuple *sdt; 1195 struct scatterlist *dsg = cmd->t_data_sg, *psg; 1196 sector_t sector = cmd->t_task_lba; 1197 void *daddr, *paddr; 1198 int i, j, offset = 0; 1199 unsigned int block_size = dev->dev_attrib.block_size; 1200 1201 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1202 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1203 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1204 1205 for (j = 0; j < psg->length; 1206 j += sizeof(struct se_dif_v1_tuple)) { 1207 __u16 crc; 1208 unsigned int avail; 1209 1210 if (offset >= dsg->length) { 1211 offset -= dsg->length; 1212 kunmap_atomic(daddr - dsg->offset); 1213 dsg = sg_next(dsg); 1214 if (!dsg) { 1215 kunmap_atomic(paddr - psg->offset); 1216 return; 1217 } 1218 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1219 } 1220 1221 sdt = paddr + j; 1222 avail = min(block_size, dsg->length - offset); 1223 crc = crc_t10dif(daddr + offset, avail); 1224 if (avail < block_size) { 1225 kunmap_atomic(daddr - dsg->offset); 1226 dsg = sg_next(dsg); 1227 if (!dsg) { 1228 kunmap_atomic(paddr - psg->offset); 1229 return; 1230 } 1231 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1232 offset = block_size - avail; 1233 crc = crc_t10dif_update(crc, daddr, offset); 1234 } else { 1235 offset += block_size; 1236 } 1237 1238 sdt->guard_tag = cpu_to_be16(crc); 1239 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1240 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1241 sdt->app_tag = 0; 1242 1243 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1244 " app_tag: 0x%04x ref_tag: %u\n", 1245 (cmd->data_direction == DMA_TO_DEVICE) ? 1246 "WRITE" : "READ", (unsigned long long)sector, 1247 sdt->guard_tag, sdt->app_tag, 1248 be32_to_cpu(sdt->ref_tag)); 1249 1250 sector++; 1251 } 1252 1253 kunmap_atomic(daddr - dsg->offset); 1254 kunmap_atomic(paddr - psg->offset); 1255 } 1256 } 1257 1258 static sense_reason_t 1259 sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, 1260 __u16 crc, sector_t sector, unsigned int ei_lba) 1261 { 1262 __be16 csum; 1263 1264 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1265 goto check_ref; 1266 1267 csum = cpu_to_be16(crc); 1268 1269 if (sdt->guard_tag != csum) { 1270 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1271 " csum 0x%04x\n", (unsigned long long)sector, 1272 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1273 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1274 } 1275 1276 check_ref: 1277 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1278 return 0; 1279 1280 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1281 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1282 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1283 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1284 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1285 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1286 } 1287 1288 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1289 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1290 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1291 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1292 be32_to_cpu(sdt->ref_tag), ei_lba); 1293 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1294 } 1295 1296 return 0; 1297 } 1298 1299 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1300 struct scatterlist *sg, int sg_off) 1301 { 1302 struct se_device *dev = cmd->se_dev; 1303 struct scatterlist *psg; 1304 void *paddr, *addr; 1305 unsigned int i, len, left; 1306 unsigned int offset = sg_off; 1307 1308 if (!sg) 1309 return; 1310 1311 left = sectors * dev->prot_length; 1312 1313 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1314 unsigned int psg_len, copied = 0; 1315 1316 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1317 psg_len = min(left, psg->length); 1318 while (psg_len) { 1319 len = min(psg_len, sg->length - offset); 1320 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1321 1322 if (read) 1323 memcpy(paddr + copied, addr, len); 1324 else 1325 memcpy(addr, paddr + copied, len); 1326 1327 left -= len; 1328 offset += len; 1329 copied += len; 1330 psg_len -= len; 1331 1332 kunmap_atomic(addr - sg->offset - offset); 1333 1334 if (offset >= sg->length) { 1335 sg = sg_next(sg); 1336 offset = 0; 1337 } 1338 } 1339 kunmap_atomic(paddr - psg->offset); 1340 } 1341 } 1342 EXPORT_SYMBOL(sbc_dif_copy_prot); 1343 1344 sense_reason_t 1345 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1346 unsigned int ei_lba, struct scatterlist *psg, int psg_off) 1347 { 1348 struct se_device *dev = cmd->se_dev; 1349 struct se_dif_v1_tuple *sdt; 1350 struct scatterlist *dsg = cmd->t_data_sg; 1351 sector_t sector = start; 1352 void *daddr, *paddr; 1353 int i; 1354 sense_reason_t rc; 1355 int dsg_off = 0; 1356 unsigned int block_size = dev->dev_attrib.block_size; 1357 1358 for (; psg && sector < start + sectors; psg = sg_next(psg)) { 1359 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1360 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1361 1362 for (i = psg_off; i < psg->length && 1363 sector < start + sectors; 1364 i += sizeof(struct se_dif_v1_tuple)) { 1365 __u16 crc; 1366 unsigned int avail; 1367 1368 if (dsg_off >= dsg->length) { 1369 dsg_off -= dsg->length; 1370 kunmap_atomic(daddr - dsg->offset); 1371 dsg = sg_next(dsg); 1372 if (!dsg) { 1373 kunmap_atomic(paddr - psg->offset); 1374 return 0; 1375 } 1376 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1377 } 1378 1379 sdt = paddr + i; 1380 1381 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1382 " app_tag: 0x%04x ref_tag: %u\n", 1383 (unsigned long long)sector, sdt->guard_tag, 1384 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1385 1386 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1387 dsg_off += block_size; 1388 goto next; 1389 } 1390 1391 avail = min(block_size, dsg->length - dsg_off); 1392 crc = crc_t10dif(daddr + dsg_off, avail); 1393 if (avail < block_size) { 1394 kunmap_atomic(daddr - dsg->offset); 1395 dsg = sg_next(dsg); 1396 if (!dsg) { 1397 kunmap_atomic(paddr - psg->offset); 1398 return 0; 1399 } 1400 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1401 dsg_off = block_size - avail; 1402 crc = crc_t10dif_update(crc, daddr, dsg_off); 1403 } else { 1404 dsg_off += block_size; 1405 } 1406 1407 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); 1408 if (rc) { 1409 kunmap_atomic(daddr - dsg->offset); 1410 kunmap_atomic(paddr - psg->offset); 1411 cmd->bad_sector = sector; 1412 return rc; 1413 } 1414 next: 1415 sector++; 1416 ei_lba++; 1417 } 1418 1419 psg_off = 0; 1420 kunmap_atomic(daddr - dsg->offset); 1421 kunmap_atomic(paddr - psg->offset); 1422 } 1423 1424 return 0; 1425 } 1426 EXPORT_SYMBOL(sbc_dif_verify); 1427