1 /******************************************************************************* 2 * Filename: target_core_xcopy.c 3 * 4 * This file contains support for SPC-4 Extended-Copy offload with generic 5 * TCM backends. 6 * 7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. 8 * 9 * Author: 10 * Nicholas A. Bellinger <nab@daterainc.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ******************************************************************************/ 23 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <linux/list.h> 27 #include <linux/configfs.h> 28 #include <scsi/scsi_proto.h> 29 #include <asm/unaligned.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 #include <target/target_core_configfs.h> 35 36 #include "target_core_internal.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static struct workqueue_struct *xcopy_wq = NULL; 42 43 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 44 { 45 int off = 0; 46 47 buf[off++] = (0x6 << 4); 48 buf[off++] = 0x01; 49 buf[off++] = 0x40; 50 buf[off] = (0x5 << 4); 51 52 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 53 return 0; 54 } 55 56 static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 57 bool src) 58 { 59 struct se_device *se_dev; 60 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 61 int rc; 62 63 if (src) 64 dev_wwn = &xop->dst_tid_wwn[0]; 65 else 66 dev_wwn = &xop->src_tid_wwn[0]; 67 68 mutex_lock(&g_device_mutex); 69 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 70 71 if (!se_dev->dev_attrib.emulate_3pc) 72 continue; 73 74 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 75 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 76 77 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 78 if (rc != 0) 79 continue; 80 81 if (src) { 82 xop->dst_dev = se_dev; 83 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" 84 " se_dev\n", xop->dst_dev); 85 } else { 86 xop->src_dev = se_dev; 87 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" 88 " se_dev\n", xop->src_dev); 89 } 90 91 rc = target_depend_item(&se_dev->dev_group.cg_item); 92 if (rc != 0) { 93 pr_err("configfs_depend_item attempt failed:" 94 " %d for se_dev: %p\n", rc, se_dev); 95 mutex_unlock(&g_device_mutex); 96 return rc; 97 } 98 99 pr_debug("Called configfs_depend_item for se_dev: %p" 100 " se_dev->se_dev_group: %p\n", se_dev, 101 &se_dev->dev_group); 102 103 mutex_unlock(&g_device_mutex); 104 return 0; 105 } 106 mutex_unlock(&g_device_mutex); 107 108 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 109 return -EINVAL; 110 } 111 112 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 113 unsigned char *p, bool src) 114 { 115 unsigned char *desc = p; 116 unsigned short ript; 117 u8 desig_len; 118 /* 119 * Extract RELATIVE INITIATOR PORT IDENTIFIER 120 */ 121 ript = get_unaligned_be16(&desc[2]); 122 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); 123 /* 124 * Check for supported code set, association, and designator type 125 */ 126 if ((desc[4] & 0x0f) != 0x1) { 127 pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); 128 return -EINVAL; 129 } 130 if ((desc[5] & 0x30) != 0x00) { 131 pr_err("XCOPY 0xe4: association other than LUN not supported\n"); 132 return -EINVAL; 133 } 134 if ((desc[5] & 0x0f) != 0x3) { 135 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", 136 (desc[5] & 0x0f)); 137 return -EINVAL; 138 } 139 /* 140 * Check for matching 16 byte length for NAA IEEE Registered Extended 141 * Assigned designator 142 */ 143 desig_len = desc[7]; 144 if (desig_len != 16) { 145 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); 146 return -EINVAL; 147 } 148 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); 149 /* 150 * Check for NAA IEEE Registered Extended Assigned header.. 151 */ 152 if ((desc[8] & 0xf0) != 0x60) { 153 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", 154 (desc[8] & 0xf0)); 155 return -EINVAL; 156 } 157 158 if (src) { 159 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 160 /* 161 * Determine if the source designator matches the local device 162 */ 163 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], 164 XCOPY_NAA_IEEE_REGEX_LEN)) { 165 xop->op_origin = XCOL_SOURCE_RECV_OP; 166 xop->src_dev = se_cmd->se_dev; 167 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" 168 " received xop\n", xop->src_dev); 169 } 170 } else { 171 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 172 /* 173 * Determine if the destination designator matches the local device 174 */ 175 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], 176 XCOPY_NAA_IEEE_REGEX_LEN)) { 177 xop->op_origin = XCOL_DEST_RECV_OP; 178 xop->dst_dev = se_cmd->se_dev; 179 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" 180 " received xop\n", xop->dst_dev); 181 } 182 } 183 184 return 0; 185 } 186 187 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, 188 struct xcopy_op *xop, unsigned char *p, 189 unsigned short tdll) 190 { 191 struct se_device *local_dev = se_cmd->se_dev; 192 unsigned char *desc = p; 193 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; 194 unsigned short start = 0; 195 bool src = true; 196 197 if (offset != 0) { 198 pr_err("XCOPY target descriptor list length is not" 199 " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 200 return -EINVAL; 201 } 202 if (tdll > 64) { 203 pr_err("XCOPY target descriptor supports a maximum" 204 " two src/dest descriptors, tdll: %hu too large..\n", tdll); 205 return -EINVAL; 206 } 207 /* 208 * Generate an IEEE Registered Extended designator based upon the 209 * se_device the XCOPY was received upon.. 210 */ 211 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 212 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]); 213 214 while (start < tdll) { 215 /* 216 * Check target descriptor identification with 0xE4 type with 217 * use VPD 0x83 WWPN matching .. 218 */ 219 switch (desc[0]) { 220 case 0xe4: 221 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, 222 &desc[0], src); 223 if (rc != 0) 224 goto out; 225 /* 226 * Assume target descriptors are in source -> destination order.. 227 */ 228 if (src) 229 src = false; 230 else 231 src = true; 232 start += XCOPY_TARGET_DESC_LEN; 233 desc += XCOPY_TARGET_DESC_LEN; 234 ret++; 235 break; 236 default: 237 pr_err("XCOPY unsupported descriptor type code:" 238 " 0x%02x\n", desc[0]); 239 goto out; 240 } 241 } 242 243 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 244 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 245 else 246 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 247 248 if (rc < 0) 249 goto out; 250 251 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", 252 xop->src_dev, &xop->src_tid_wwn[0]); 253 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", 254 xop->dst_dev, &xop->dst_tid_wwn[0]); 255 256 return ret; 257 258 out: 259 return -EINVAL; 260 } 261 262 static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, 263 unsigned char *p) 264 { 265 unsigned char *desc = p; 266 int dc = (desc[1] & 0x02); 267 unsigned short desc_len; 268 269 desc_len = get_unaligned_be16(&desc[2]); 270 if (desc_len != 0x18) { 271 pr_err("XCOPY segment desc 0x02: Illegal desc_len:" 272 " %hu\n", desc_len); 273 return -EINVAL; 274 } 275 276 xop->stdi = get_unaligned_be16(&desc[4]); 277 xop->dtdi = get_unaligned_be16(&desc[6]); 278 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", 279 desc_len, xop->stdi, xop->dtdi, dc); 280 281 xop->nolb = get_unaligned_be16(&desc[10]); 282 xop->src_lba = get_unaligned_be64(&desc[12]); 283 xop->dst_lba = get_unaligned_be64(&desc[20]); 284 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", 285 xop->nolb, (unsigned long long)xop->src_lba, 286 (unsigned long long)xop->dst_lba); 287 288 if (dc != 0) { 289 xop->dbl = (desc[29] & 0xff) << 16; 290 xop->dbl |= (desc[30] & 0xff) << 8; 291 xop->dbl |= desc[31] & 0xff; 292 293 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 294 } 295 return 0; 296 } 297 298 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 299 struct xcopy_op *xop, unsigned char *p, 300 unsigned int sdll) 301 { 302 unsigned char *desc = p; 303 unsigned int start = 0; 304 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; 305 306 if (offset != 0) { 307 pr_err("XCOPY segment descriptor list length is not" 308 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); 309 return -EINVAL; 310 } 311 312 while (start < sdll) { 313 /* 314 * Check segment descriptor type code for block -> block 315 */ 316 switch (desc[0]) { 317 case 0x02: 318 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); 319 if (rc < 0) 320 goto out; 321 322 ret++; 323 start += XCOPY_SEGMENT_DESC_LEN; 324 desc += XCOPY_SEGMENT_DESC_LEN; 325 break; 326 default: 327 pr_err("XCOPY unsupported segment descriptor" 328 "type: 0x%02x\n", desc[0]); 329 goto out; 330 } 331 } 332 333 return ret; 334 335 out: 336 return -EINVAL; 337 } 338 339 /* 340 * Start xcopy_pt ops 341 */ 342 343 struct xcopy_pt_cmd { 344 bool remote_port; 345 struct se_cmd se_cmd; 346 struct xcopy_op *xcopy_op; 347 struct completion xpt_passthrough_sem; 348 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 349 }; 350 351 static struct se_port xcopy_pt_port; 352 static struct se_portal_group xcopy_pt_tpg; 353 static struct se_session xcopy_pt_sess; 354 static struct se_node_acl xcopy_pt_nacl; 355 356 static char *xcopy_pt_get_fabric_name(void) 357 { 358 return "xcopy-pt"; 359 } 360 361 static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd) 362 { 363 return 0; 364 } 365 366 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 367 { 368 return 0; 369 } 370 371 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 372 { 373 struct se_device *remote_dev; 374 375 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 376 remote_dev = xop->dst_dev; 377 else 378 remote_dev = xop->src_dev; 379 380 pr_debug("Calling configfs_undepend_item for" 381 " remote_dev: %p remote_dev->dev_group: %p\n", 382 remote_dev, &remote_dev->dev_group.cg_item); 383 384 target_undepend_item(&remote_dev->dev_group.cg_item); 385 } 386 387 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) 388 { 389 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 390 struct xcopy_pt_cmd, se_cmd); 391 392 kfree(xpt_cmd); 393 } 394 395 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) 396 { 397 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 398 struct xcopy_pt_cmd, se_cmd); 399 400 complete(&xpt_cmd->xpt_passthrough_sem); 401 return 0; 402 } 403 404 static int xcopy_pt_write_pending(struct se_cmd *se_cmd) 405 { 406 return 0; 407 } 408 409 static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd) 410 { 411 return 0; 412 } 413 414 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) 415 { 416 return 0; 417 } 418 419 static int xcopy_pt_queue_status(struct se_cmd *se_cmd) 420 { 421 return 0; 422 } 423 424 static const struct target_core_fabric_ops xcopy_pt_tfo = { 425 .get_fabric_name = xcopy_pt_get_fabric_name, 426 .get_task_tag = xcopy_pt_get_tag, 427 .get_cmd_state = xcopy_pt_get_cmd_state, 428 .release_cmd = xcopy_pt_release_cmd, 429 .check_stop_free = xcopy_pt_check_stop_free, 430 .write_pending = xcopy_pt_write_pending, 431 .write_pending_status = xcopy_pt_write_pending_status, 432 .queue_data_in = xcopy_pt_queue_data_in, 433 .queue_status = xcopy_pt_queue_status, 434 }; 435 436 /* 437 * End xcopy_pt_ops 438 */ 439 440 int target_xcopy_setup_pt(void) 441 { 442 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 443 if (!xcopy_wq) { 444 pr_err("Unable to allocate xcopy_wq\n"); 445 return -ENOMEM; 446 } 447 448 memset(&xcopy_pt_port, 0, sizeof(struct se_port)); 449 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); 450 INIT_LIST_HEAD(&xcopy_pt_port.sep_list); 451 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); 452 453 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); 454 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); 455 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); 456 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); 457 458 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; 459 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; 460 461 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); 462 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 463 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 464 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 465 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); 466 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); 467 468 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 469 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 470 471 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; 472 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; 473 474 return 0; 475 } 476 477 void target_xcopy_release_pt(void) 478 { 479 if (xcopy_wq) 480 destroy_workqueue(xcopy_wq); 481 } 482 483 static void target_xcopy_setup_pt_port( 484 struct xcopy_pt_cmd *xpt_cmd, 485 struct xcopy_op *xop, 486 bool remote_port) 487 { 488 struct se_cmd *ec_cmd = xop->xop_se_cmd; 489 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd; 490 491 if (xop->op_origin == XCOL_SOURCE_RECV_OP) { 492 /* 493 * Honor destination port reservations for X-COPY PUSH emulation 494 * when CDB is received on local source port, and READs blocks to 495 * WRITE on remote destination port. 496 */ 497 if (remote_port) { 498 xpt_cmd->remote_port = remote_port; 499 pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 500 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" 501 " cmd->se_lun->lun_sep for X-COPY data PUSH\n", 502 pt_cmd->se_lun->lun_sep); 503 } else { 504 pt_cmd->se_lun = ec_cmd->se_lun; 505 pt_cmd->se_dev = ec_cmd->se_dev; 506 507 pr_debug("Honoring local SRC port from ec_cmd->se_dev:" 508 " %p\n", pt_cmd->se_dev); 509 pt_cmd->se_lun = ec_cmd->se_lun; 510 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n", 511 pt_cmd->se_lun); 512 } 513 } else { 514 /* 515 * Honor source port reservation for X-COPY PULL emulation 516 * when CDB is received on local desintation port, and READs 517 * blocks from the remote source port to WRITE on local 518 * destination port. 519 */ 520 if (remote_port) { 521 xpt_cmd->remote_port = remote_port; 522 pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 523 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" 524 " cmd->se_lun->lun_sep for X-COPY data PULL\n", 525 pt_cmd->se_lun->lun_sep); 526 } else { 527 pt_cmd->se_lun = ec_cmd->se_lun; 528 pt_cmd->se_dev = ec_cmd->se_dev; 529 530 pr_debug("Honoring local DST port from ec_cmd->se_dev:" 531 " %p\n", pt_cmd->se_dev); 532 pt_cmd->se_lun = ec_cmd->se_lun; 533 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n", 534 pt_cmd->se_lun); 535 } 536 } 537 } 538 539 static void target_xcopy_init_pt_lun(struct se_device *se_dev, 540 struct se_cmd *pt_cmd, bool remote_port) 541 { 542 /* 543 * Don't allocate + init an pt_cmd->se_lun if honoring local port for 544 * reservations. The pt_cmd->se_lun pointer will be setup from within 545 * target_xcopy_setup_pt_port() 546 */ 547 if (remote_port) { 548 pr_debug("Setup emulated se_dev: %p from se_dev\n", 549 pt_cmd->se_dev); 550 pt_cmd->se_lun = &se_dev->xcopy_lun; 551 pt_cmd->se_dev = se_dev; 552 } 553 554 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 555 } 556 557 static int target_xcopy_setup_pt_cmd( 558 struct xcopy_pt_cmd *xpt_cmd, 559 struct xcopy_op *xop, 560 struct se_device *se_dev, 561 unsigned char *cdb, 562 bool remote_port, 563 bool alloc_mem) 564 { 565 struct se_cmd *cmd = &xpt_cmd->se_cmd; 566 sense_reason_t sense_rc; 567 int ret = 0, rc; 568 /* 569 * Setup LUN+port to honor reservations based upon xop->op_origin for 570 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 571 */ 572 target_xcopy_init_pt_lun(se_dev, cmd, remote_port); 573 574 xpt_cmd->xcopy_op = xop; 575 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 576 577 sense_rc = target_setup_cmd_from_cdb(cmd, cdb); 578 if (sense_rc) { 579 ret = -EINVAL; 580 goto out; 581 } 582 583 if (alloc_mem) { 584 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 585 cmd->data_length, false); 586 if (rc < 0) { 587 ret = rc; 588 goto out; 589 } 590 /* 591 * Set this bit so that transport_free_pages() allows the 592 * caller to release SGLs + physical memory allocated by 593 * transport_generic_get_mem().. 594 */ 595 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 596 } else { 597 /* 598 * Here the previously allocated SGLs for the internal READ 599 * are mapped zero-copy to the internal WRITE. 600 */ 601 sense_rc = transport_generic_map_mem_to_cmd(cmd, 602 xop->xop_data_sg, xop->xop_data_nents, 603 NULL, 0); 604 if (sense_rc) { 605 ret = -EINVAL; 606 goto out; 607 } 608 609 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 610 " %u\n", cmd->t_data_sg, cmd->t_data_nents); 611 } 612 613 return 0; 614 615 out: 616 return ret; 617 } 618 619 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) 620 { 621 struct se_cmd *se_cmd = &xpt_cmd->se_cmd; 622 sense_reason_t sense_rc; 623 624 sense_rc = transport_generic_new_cmd(se_cmd); 625 if (sense_rc) 626 return -EINVAL; 627 628 if (se_cmd->data_direction == DMA_TO_DEVICE) 629 target_execute_cmd(se_cmd); 630 631 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); 632 633 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 634 se_cmd->scsi_status); 635 636 return (se_cmd->scsi_status) ? -EINVAL : 0; 637 } 638 639 static int target_xcopy_read_source( 640 struct se_cmd *ec_cmd, 641 struct xcopy_op *xop, 642 struct se_device *src_dev, 643 sector_t src_lba, 644 u32 src_sectors) 645 { 646 struct xcopy_pt_cmd *xpt_cmd; 647 struct se_cmd *se_cmd; 648 u32 length = (src_sectors * src_dev->dev_attrib.block_size); 649 int rc; 650 unsigned char cdb[16]; 651 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); 652 653 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 654 if (!xpt_cmd) { 655 pr_err("Unable to allocate xcopy_pt_cmd\n"); 656 return -ENOMEM; 657 } 658 init_completion(&xpt_cmd->xpt_passthrough_sem); 659 se_cmd = &xpt_cmd->se_cmd; 660 661 memset(&cdb[0], 0, 16); 662 cdb[0] = READ_16; 663 put_unaligned_be64(src_lba, &cdb[2]); 664 put_unaligned_be32(src_sectors, &cdb[10]); 665 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", 666 (unsigned long long)src_lba, src_sectors, length); 667 668 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 669 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 670 xop->src_pt_cmd = xpt_cmd; 671 672 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 673 remote_port, true); 674 if (rc < 0) { 675 transport_generic_free_cmd(se_cmd, 0); 676 return rc; 677 } 678 679 xop->xop_data_sg = se_cmd->t_data_sg; 680 xop->xop_data_nents = se_cmd->t_data_nents; 681 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" 682 " memory\n", xop->xop_data_sg, xop->xop_data_nents); 683 684 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 685 if (rc < 0) { 686 transport_generic_free_cmd(se_cmd, 0); 687 return rc; 688 } 689 /* 690 * Clear off the allocated t_data_sg, that has been saved for 691 * zero-copy WRITE submission reuse in struct xcopy_op.. 692 */ 693 se_cmd->t_data_sg = NULL; 694 se_cmd->t_data_nents = 0; 695 696 return 0; 697 } 698 699 static int target_xcopy_write_destination( 700 struct se_cmd *ec_cmd, 701 struct xcopy_op *xop, 702 struct se_device *dst_dev, 703 sector_t dst_lba, 704 u32 dst_sectors) 705 { 706 struct xcopy_pt_cmd *xpt_cmd; 707 struct se_cmd *se_cmd; 708 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); 709 int rc; 710 unsigned char cdb[16]; 711 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); 712 713 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 714 if (!xpt_cmd) { 715 pr_err("Unable to allocate xcopy_pt_cmd\n"); 716 return -ENOMEM; 717 } 718 init_completion(&xpt_cmd->xpt_passthrough_sem); 719 se_cmd = &xpt_cmd->se_cmd; 720 721 memset(&cdb[0], 0, 16); 722 cdb[0] = WRITE_16; 723 put_unaligned_be64(dst_lba, &cdb[2]); 724 put_unaligned_be32(dst_sectors, &cdb[10]); 725 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", 726 (unsigned long long)dst_lba, dst_sectors, length); 727 728 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 729 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 730 xop->dst_pt_cmd = xpt_cmd; 731 732 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 733 remote_port, false); 734 if (rc < 0) { 735 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 736 /* 737 * If the failure happened before the t_mem_list hand-off in 738 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 739 * core releases this memory on error during X-COPY WRITE I/O. 740 */ 741 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 742 src_cmd->t_data_sg = xop->xop_data_sg; 743 src_cmd->t_data_nents = xop->xop_data_nents; 744 745 transport_generic_free_cmd(se_cmd, 0); 746 return rc; 747 } 748 749 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 750 if (rc < 0) { 751 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 752 transport_generic_free_cmd(se_cmd, 0); 753 return rc; 754 } 755 756 return 0; 757 } 758 759 static void target_xcopy_do_work(struct work_struct *work) 760 { 761 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 762 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; 763 struct se_cmd *ec_cmd = xop->xop_se_cmd; 764 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 765 unsigned int max_sectors; 766 int rc; 767 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 768 769 end_lba = src_lba + nolb; 770 /* 771 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the 772 * smallest max_sectors between src_dev + dev_dev, or 773 */ 774 max_sectors = min(src_dev->dev_attrib.hw_max_sectors, 775 dst_dev->dev_attrib.hw_max_sectors); 776 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); 777 778 max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); 779 780 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", 781 nolb, max_nolb, (unsigned long long)end_lba); 782 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", 783 (unsigned long long)src_lba, (unsigned long long)dst_lba); 784 785 while (src_lba < end_lba) { 786 cur_nolb = min(nolb, max_nolb); 787 788 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," 789 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); 790 791 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); 792 if (rc < 0) 793 goto out; 794 795 src_lba += cur_nolb; 796 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", 797 (unsigned long long)src_lba); 798 799 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," 800 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); 801 802 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, 803 dst_lba, cur_nolb); 804 if (rc < 0) { 805 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 806 goto out; 807 } 808 809 dst_lba += cur_nolb; 810 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", 811 (unsigned long long)dst_lba); 812 813 copied_nolb += cur_nolb; 814 nolb -= cur_nolb; 815 816 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 817 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 818 819 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); 820 } 821 822 xcopy_pt_undepend_remotedev(xop); 823 kfree(xop); 824 825 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", 826 (unsigned long long)src_lba, (unsigned long long)dst_lba); 827 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", 828 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); 829 830 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); 831 target_complete_cmd(ec_cmd, SAM_STAT_GOOD); 832 return; 833 834 out: 835 xcopy_pt_undepend_remotedev(xop); 836 kfree(xop); 837 838 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); 839 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 840 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 841 } 842 843 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 844 { 845 struct se_device *dev = se_cmd->se_dev; 846 struct xcopy_op *xop = NULL; 847 unsigned char *p = NULL, *seg_desc; 848 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 849 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 850 int rc; 851 unsigned short tdll; 852 853 if (!dev->dev_attrib.emulate_3pc) { 854 pr_err("EXTENDED_COPY operation explicitly disabled\n"); 855 return TCM_UNSUPPORTED_SCSI_OPCODE; 856 } 857 858 sa = se_cmd->t_task_cdb[1] & 0x1f; 859 if (sa != 0x00) { 860 pr_err("EXTENDED_COPY(LID4) not supported\n"); 861 return TCM_UNSUPPORTED_SCSI_OPCODE; 862 } 863 864 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 865 if (!xop) { 866 pr_err("Unable to allocate xcopy_op\n"); 867 return TCM_OUT_OF_RESOURCES; 868 } 869 xop->xop_se_cmd = se_cmd; 870 871 p = transport_kmap_data_sg(se_cmd); 872 if (!p) { 873 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 874 kfree(xop); 875 return TCM_OUT_OF_RESOURCES; 876 } 877 878 list_id = p[0]; 879 list_id_usage = (p[1] & 0x18) >> 3; 880 881 /* 882 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 883 */ 884 tdll = get_unaligned_be16(&p[2]); 885 sdll = get_unaligned_be32(&p[8]); 886 887 inline_dl = get_unaligned_be32(&p[12]); 888 if (inline_dl != 0) { 889 pr_err("XCOPY with non zero inline data length\n"); 890 goto out; 891 } 892 893 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 894 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 895 tdll, sdll, inline_dl); 896 897 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); 898 if (rc <= 0) 899 goto out; 900 901 if (xop->src_dev->dev_attrib.block_size != 902 xop->dst_dev->dev_attrib.block_size) { 903 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" 904 " block_size: %u currently unsupported\n", 905 xop->src_dev->dev_attrib.block_size, 906 xop->dst_dev->dev_attrib.block_size); 907 xcopy_pt_undepend_remotedev(xop); 908 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 909 goto out; 910 } 911 912 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 913 rc * XCOPY_TARGET_DESC_LEN); 914 seg_desc = &p[16]; 915 seg_desc += (rc * XCOPY_TARGET_DESC_LEN); 916 917 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); 918 if (rc <= 0) { 919 xcopy_pt_undepend_remotedev(xop); 920 goto out; 921 } 922 transport_kunmap_data_sg(se_cmd); 923 924 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, 925 rc * XCOPY_SEGMENT_DESC_LEN); 926 INIT_WORK(&xop->xop_work, target_xcopy_do_work); 927 queue_work(xcopy_wq, &xop->xop_work); 928 return TCM_NO_SENSE; 929 930 out: 931 if (p) 932 transport_kunmap_data_sg(se_cmd); 933 kfree(xop); 934 return ret; 935 } 936 937 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 938 { 939 unsigned char *p; 940 941 p = transport_kmap_data_sg(se_cmd); 942 if (!p) { 943 pr_err("transport_kmap_data_sg failed in" 944 " target_rcr_operating_parameters\n"); 945 return TCM_OUT_OF_RESOURCES; 946 } 947 948 if (se_cmd->data_length < 54) { 949 pr_err("Receive Copy Results Op Parameters length" 950 " too small: %u\n", se_cmd->data_length); 951 transport_kunmap_data_sg(se_cmd); 952 return TCM_INVALID_CDB_FIELD; 953 } 954 /* 955 * Set SNLID=1 (Supports no List ID) 956 */ 957 p[4] = 0x1; 958 /* 959 * MAXIMUM TARGET DESCRIPTOR COUNT 960 */ 961 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); 962 /* 963 * MAXIMUM SEGMENT DESCRIPTOR COUNT 964 */ 965 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); 966 /* 967 * MAXIMUM DESCRIPTOR LIST LENGTH 968 */ 969 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); 970 /* 971 * MAXIMUM SEGMENT LENGTH 972 */ 973 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); 974 /* 975 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) 976 */ 977 put_unaligned_be32(0x0, &p[20]); 978 /* 979 * HELD DATA LIMIT 980 */ 981 put_unaligned_be32(0x0, &p[24]); 982 /* 983 * MAXIMUM STREAM DEVICE TRANSFER SIZE 984 */ 985 put_unaligned_be32(0x0, &p[28]); 986 /* 987 * TOTAL CONCURRENT COPIES 988 */ 989 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); 990 /* 991 * MAXIMUM CONCURRENT COPIES 992 */ 993 p[36] = RCR_OP_MAX_CONCURR_COPIES; 994 /* 995 * DATA SEGMENT GRANULARITY (log 2) 996 */ 997 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; 998 /* 999 * INLINE DATA GRANULARITY log 2) 1000 */ 1001 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; 1002 /* 1003 * HELD DATA GRANULARITY 1004 */ 1005 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; 1006 /* 1007 * IMPLEMENTED DESCRIPTOR LIST LENGTH 1008 */ 1009 p[43] = 0x2; 1010 /* 1011 * List of implemented descriptor type codes (ordered) 1012 */ 1013 p[44] = 0x02; /* Copy Block to Block device */ 1014 p[45] = 0xe4; /* Identification descriptor target descriptor */ 1015 1016 /* 1017 * AVAILABLE DATA (n-3) 1018 */ 1019 put_unaligned_be32(42, &p[0]); 1020 1021 transport_kunmap_data_sg(se_cmd); 1022 target_complete_cmd(se_cmd, GOOD); 1023 1024 return TCM_NO_SENSE; 1025 } 1026 1027 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) 1028 { 1029 unsigned char *cdb = &se_cmd->t_task_cdb[0]; 1030 int sa = (cdb[1] & 0x1f), list_id = cdb[2]; 1031 sense_reason_t rc = TCM_NO_SENSE; 1032 1033 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" 1034 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); 1035 1036 if (list_id != 0) { 1037 pr_err("Receive Copy Results with non zero list identifier" 1038 " not supported\n"); 1039 return TCM_INVALID_CDB_FIELD; 1040 } 1041 1042 switch (sa) { 1043 case RCR_SA_OPERATING_PARAMETERS: 1044 rc = target_rcr_operating_parameters(se_cmd); 1045 break; 1046 case RCR_SA_COPY_STATUS: 1047 case RCR_SA_RECEIVE_DATA: 1048 case RCR_SA_FAILED_SEGMENT_DETAILS: 1049 default: 1050 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); 1051 return TCM_INVALID_CDB_FIELD; 1052 } 1053 1054 return rc; 1055 } 1056