1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term 4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 */ 6 7 #include <target/target_core_base.h> 8 #include <target/target_core_fabric.h> 9 #include "efct_driver.h" 10 #include "efct_lio.h" 11 12 /* 13 * lio_wq is used to call the LIO backed during creation or deletion of 14 * sessions. This brings serialization to the session management as we create 15 * single threaded work queue. 16 */ 17 static struct workqueue_struct *lio_wq; 18 19 static int 20 efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn) 21 { 22 u8 a[8]; 23 24 put_unaligned_be64(wwn, a); 25 return snprintf(str, len, "%s%8phC", pre, a); 26 } 27 28 static int 29 efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv) 30 { 31 int num; 32 u8 b[8]; 33 34 if (npiv) { 35 num = sscanf(name, 36 "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", 37 &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6], 38 &b[7]); 39 } else { 40 num = sscanf(name, 41 "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", 42 &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6], 43 &b[7]); 44 } 45 46 if (num != 8) 47 return -EINVAL; 48 49 *wwp = get_unaligned_be64(b); 50 return 0; 51 } 52 53 static int 54 efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn) 55 { 56 unsigned int cnt = size; 57 int rc; 58 59 *wwpn = *wwnn = 0; 60 if (name[cnt - 1] == '\n' || name[cnt - 1] == 0) 61 cnt--; 62 63 /* validate we have enough characters for WWPN */ 64 if ((cnt != (16 + 1 + 16)) || (name[16] != ':')) 65 return -EINVAL; 66 67 rc = efct_lio_parse_wwn(&name[0], wwpn, 1); 68 if (rc) 69 return rc; 70 71 rc = efct_lio_parse_wwn(&name[17], wwnn, 1); 72 if (rc) 73 return rc; 74 75 return 0; 76 } 77 78 static ssize_t 79 efct_lio_tpg_enable_show(struct config_item *item, char *page) 80 { 81 struct se_portal_group *se_tpg = to_tpg(item); 82 struct efct_lio_tpg *tpg = 83 container_of(se_tpg, struct efct_lio_tpg, tpg); 84 85 return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled); 86 } 87 88 static ssize_t 89 efct_lio_tpg_enable_store(struct config_item *item, const char *page, 90 size_t count) 91 { 92 struct se_portal_group *se_tpg = to_tpg(item); 93 struct efct_lio_tpg *tpg = 94 container_of(se_tpg, struct efct_lio_tpg, tpg); 95 struct efct *efct; 96 struct efc *efc; 97 unsigned long op; 98 99 if (!tpg->nport || !tpg->nport->efct) { 100 pr_err("%s: Unable to find EFCT device\n", __func__); 101 return -EINVAL; 102 } 103 104 efct = tpg->nport->efct; 105 efc = efct->efcport; 106 107 if (kstrtoul(page, 0, &op) < 0) 108 return -EINVAL; 109 110 if (op == 1) { 111 int ret; 112 113 tpg->enabled = true; 114 efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt); 115 116 ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE); 117 if (ret) { 118 efct->tgt_efct.lio_nport = NULL; 119 efc_log_debug(efct, "cannot bring port online\n"); 120 return ret; 121 } 122 } else if (op == 0) { 123 efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt); 124 125 if (efc->domain && efc->domain->nport) 126 efct_scsi_tgt_del_nport(efc, efc->domain->nport); 127 128 tpg->enabled = false; 129 } else { 130 return -EINVAL; 131 } 132 133 return count; 134 } 135 136 static ssize_t 137 efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page) 138 { 139 struct se_portal_group *se_tpg = to_tpg(item); 140 struct efct_lio_tpg *tpg = 141 container_of(se_tpg, struct efct_lio_tpg, tpg); 142 143 return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled); 144 } 145 146 static ssize_t 147 efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page, 148 size_t count) 149 { 150 struct se_portal_group *se_tpg = to_tpg(item); 151 struct efct_lio_tpg *tpg = 152 container_of(se_tpg, struct efct_lio_tpg, tpg); 153 struct efct_lio_vport *lio_vport = tpg->vport; 154 struct efct *efct; 155 struct efc *efc; 156 unsigned long op; 157 158 if (kstrtoul(page, 0, &op) < 0) 159 return -EINVAL; 160 161 if (!lio_vport) { 162 pr_err("Unable to find vport\n"); 163 return -EINVAL; 164 } 165 166 efct = lio_vport->efct; 167 efc = efct->efcport; 168 169 if (op == 1) { 170 tpg->enabled = true; 171 efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt); 172 173 if (efc->domain) { 174 int ret; 175 176 ret = efc_nport_vport_new(efc->domain, 177 lio_vport->npiv_wwpn, 178 lio_vport->npiv_wwnn, 179 U32_MAX, false, true, 180 NULL, NULL); 181 if (ret != 0) { 182 efc_log_err(efct, "Failed to create Vport\n"); 183 return ret; 184 } 185 return count; 186 } 187 188 if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn, 189 lio_vport->npiv_wwpn, U32_MAX, 190 false, true, NULL, NULL))) 191 return -ENOMEM; 192 193 } else if (op == 0) { 194 efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt); 195 196 tpg->enabled = false; 197 /* only physical nport should exist, free lio_nport 198 * allocated in efct_lio_make_nport 199 */ 200 if (efc->domain) { 201 efc_nport_vport_del(efct->efcport, efc->domain, 202 lio_vport->npiv_wwpn, 203 lio_vport->npiv_wwnn); 204 return count; 205 } 206 } else { 207 return -EINVAL; 208 } 209 return count; 210 } 211 212 static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg) 213 { 214 struct efct_lio_tpg *tpg = 215 container_of(se_tpg, struct efct_lio_tpg, tpg); 216 217 return tpg->nport->wwpn_str; 218 } 219 220 static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg) 221 { 222 struct efct_lio_tpg *tpg = 223 container_of(se_tpg, struct efct_lio_tpg, tpg); 224 225 return tpg->vport->wwpn_str; 226 } 227 228 static u16 efct_lio_get_tag(struct se_portal_group *se_tpg) 229 { 230 struct efct_lio_tpg *tpg = 231 container_of(se_tpg, struct efct_lio_tpg, tpg); 232 233 return tpg->tpgt; 234 } 235 236 static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg) 237 { 238 struct efct_lio_tpg *tpg = 239 container_of(se_tpg, struct efct_lio_tpg, tpg); 240 241 return tpg->tpgt; 242 } 243 244 static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg) 245 { 246 return 1; 247 } 248 249 static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg) 250 { 251 return 1; 252 } 253 254 static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg) 255 { 256 struct efct_lio_tpg *tpg = 257 container_of(se_tpg, struct efct_lio_tpg, tpg); 258 259 return tpg->tpg_attrib.demo_mode_write_protect; 260 } 261 262 static int 263 efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg) 264 { 265 struct efct_lio_tpg *tpg = 266 container_of(se_tpg, struct efct_lio_tpg, tpg); 267 268 return tpg->tpg_attrib.demo_mode_write_protect; 269 } 270 271 static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg) 272 { 273 struct efct_lio_tpg *tpg = 274 container_of(se_tpg, struct efct_lio_tpg, tpg); 275 276 return tpg->tpg_attrib.prod_mode_write_protect; 277 } 278 279 static int 280 efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg) 281 { 282 struct efct_lio_tpg *tpg = 283 container_of(se_tpg, struct efct_lio_tpg, tpg); 284 285 return tpg->tpg_attrib.prod_mode_write_protect; 286 } 287 288 static int efct_lio_check_stop_free(struct se_cmd *se_cmd) 289 { 290 struct efct_scsi_tgt_io *ocp = 291 container_of(se_cmd, struct efct_scsi_tgt_io, cmd); 292 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 293 294 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE); 295 return target_put_sess_cmd(se_cmd); 296 } 297 298 static int 299 efct_lio_abort_tgt_cb(struct efct_io *io, 300 enum efct_scsi_io_status scsi_status, 301 u32 flags, void *arg) 302 { 303 efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status); 304 return 0; 305 } 306 307 static void 308 efct_lio_aborted_task(struct se_cmd *se_cmd) 309 { 310 struct efct_scsi_tgt_io *ocp = 311 container_of(se_cmd, struct efct_scsi_tgt_io, cmd); 312 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 313 314 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK); 315 316 if (ocp->rsp_sent) 317 return; 318 319 /* command has been aborted, cleanup here */ 320 ocp->aborting = true; 321 ocp->err = EFCT_SCSI_STATUS_ABORTED; 322 /* terminate the exchange */ 323 efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL); 324 } 325 326 static void efct_lio_release_cmd(struct se_cmd *se_cmd) 327 { 328 struct efct_scsi_tgt_io *ocp = 329 container_of(se_cmd, struct efct_scsi_tgt_io, cmd); 330 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 331 struct efct *efct = io->efct; 332 333 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD); 334 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD); 335 efct_scsi_io_complete(io); 336 atomic_sub_return(1, &efct->tgt_efct.ios_in_use); 337 } 338 339 static void efct_lio_close_session(struct se_session *se_sess) 340 { 341 struct efc_node *node = se_sess->fabric_sess_ptr; 342 343 pr_debug("se_sess=%p node=%p", se_sess, node); 344 345 if (!node) { 346 pr_debug("node is NULL"); 347 return; 348 } 349 350 efc_node_post_shutdown(node, NULL); 351 } 352 353 static int efct_lio_get_cmd_state(struct se_cmd *cmd) 354 { 355 struct efct_scsi_tgt_io *ocp = 356 container_of(cmd, struct efct_scsi_tgt_io, cmd); 357 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 358 359 return io->tgt_io.state; 360 } 361 362 static int 363 efct_lio_sg_map(struct efct_io *io) 364 { 365 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 366 struct se_cmd *cmd = &ocp->cmd; 367 368 ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg, 369 cmd->t_data_nents, cmd->data_direction); 370 if (ocp->seg_map_cnt == 0) 371 return -EFAULT; 372 return 0; 373 } 374 375 static void 376 efct_lio_sg_unmap(struct efct_io *io) 377 { 378 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 379 struct se_cmd *cmd = &ocp->cmd; 380 381 if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg)) 382 return; 383 384 dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg, 385 ocp->seg_map_cnt, cmd->data_direction); 386 ocp->seg_map_cnt = 0; 387 } 388 389 static int 390 efct_lio_status_done(struct efct_io *io, 391 enum efct_scsi_io_status scsi_status, 392 u32 flags, void *arg) 393 { 394 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 395 396 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE); 397 if (scsi_status != EFCT_SCSI_STATUS_GOOD) { 398 efct_lio_io_printf(io, "callback completed with error=%d\n", 399 scsi_status); 400 ocp->err = scsi_status; 401 } 402 if (ocp->seg_map_cnt) 403 efct_lio_sg_unmap(io); 404 405 efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n", 406 scsi_status, ocp->err, flags, ocp->ddir); 407 408 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); 409 transport_generic_free_cmd(&io->tgt_io.cmd, 0); 410 return 0; 411 } 412 413 static int 414 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, 415 u32 flags, void *arg); 416 417 static int 418 efct_lio_write_pending(struct se_cmd *cmd) 419 { 420 struct efct_scsi_tgt_io *ocp = 421 container_of(cmd, struct efct_scsi_tgt_io, cmd); 422 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 423 struct efct_scsi_sgl *sgl = io->sgl; 424 struct scatterlist *sg; 425 u32 flags = 0, cnt, curcnt; 426 u64 length = 0; 427 428 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING); 429 efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n", 430 cmd->transport_state, cmd->se_cmd_flags); 431 432 if (ocp->seg_cnt == 0) { 433 ocp->seg_cnt = cmd->t_data_nents; 434 ocp->cur_seg = 0; 435 if (efct_lio_sg_map(io)) { 436 efct_lio_io_printf(io, "efct_lio_sg_map failed\n"); 437 return -EFAULT; 438 } 439 } 440 curcnt = (ocp->seg_map_cnt - ocp->cur_seg); 441 curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated; 442 /* find current sg */ 443 for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++, 444 sg = sg_next(sg)) 445 ;/* do nothing */ 446 447 for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) { 448 sgl[cnt].addr = sg_dma_address(sg); 449 sgl[cnt].dif_addr = 0; 450 sgl[cnt].len = sg_dma_len(sg); 451 length += sgl[cnt].len; 452 ocp->cur_seg++; 453 } 454 455 if (ocp->cur_seg == ocp->seg_cnt) 456 flags = EFCT_SCSI_LAST_DATAPHASE; 457 458 return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length, 459 efct_lio_datamove_done, NULL); 460 } 461 462 static int 463 efct_lio_queue_data_in(struct se_cmd *cmd) 464 { 465 struct efct_scsi_tgt_io *ocp = 466 container_of(cmd, struct efct_scsi_tgt_io, cmd); 467 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 468 struct efct_scsi_sgl *sgl = io->sgl; 469 struct scatterlist *sg = NULL; 470 uint flags = 0, cnt = 0, curcnt = 0; 471 u64 length = 0; 472 473 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN); 474 475 if (ocp->seg_cnt == 0) { 476 if (cmd->data_length) { 477 ocp->seg_cnt = cmd->t_data_nents; 478 ocp->cur_seg = 0; 479 if (efct_lio_sg_map(io)) { 480 efct_lio_io_printf(io, 481 "efct_lio_sg_map failed\n"); 482 return -EAGAIN; 483 } 484 } else { 485 /* If command length is 0, send the response status */ 486 struct efct_scsi_cmd_resp rsp; 487 488 memset(&rsp, 0, sizeof(rsp)); 489 efct_lio_io_printf(io, 490 "cmd : %p length 0, send status\n", 491 cmd); 492 return efct_scsi_send_resp(io, 0, &rsp, 493 efct_lio_status_done, NULL); 494 } 495 } 496 curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated); 497 498 while (cnt < curcnt) { 499 sg = &cmd->t_data_sg[ocp->cur_seg]; 500 sgl[cnt].addr = sg_dma_address(sg); 501 sgl[cnt].dif_addr = 0; 502 if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length) 503 sgl[cnt].len = cmd->data_length - ocp->transferred_len; 504 else 505 sgl[cnt].len = sg_dma_len(sg); 506 507 ocp->transferred_len += sgl[cnt].len; 508 length += sgl[cnt].len; 509 ocp->cur_seg++; 510 cnt++; 511 if (ocp->transferred_len == cmd->data_length) 512 break; 513 } 514 515 if (ocp->transferred_len == cmd->data_length) { 516 flags = EFCT_SCSI_LAST_DATAPHASE; 517 ocp->seg_cnt = ocp->cur_seg; 518 } 519 520 /* If there is residual, disable Auto Good Response */ 521 if (cmd->residual_count) 522 flags |= EFCT_SCSI_NO_AUTO_RESPONSE; 523 524 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA); 525 526 return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length, 527 efct_lio_datamove_done, NULL); 528 } 529 530 static void 531 efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status, 532 u32 flags) 533 { 534 struct efct_scsi_cmd_resp rsp; 535 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 536 struct se_cmd *cmd = &io->tgt_io.cmd; 537 int rc; 538 539 if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) { 540 ocp->rsp_sent = true; 541 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); 542 transport_generic_free_cmd(&io->tgt_io.cmd, 0); 543 return; 544 } 545 546 /* send check condition if an error occurred */ 547 memset(&rsp, 0, sizeof(rsp)); 548 rsp.scsi_status = cmd->scsi_status; 549 rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer; 550 rsp.sense_data_length = cmd->scsi_sense_length; 551 552 /* Check for residual underrun or overrun */ 553 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) 554 rsp.residual = -cmd->residual_count; 555 else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) 556 rsp.residual = cmd->residual_count; 557 558 rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL); 559 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP); 560 if (rc != 0) { 561 efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc); 562 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); 563 transport_generic_free_cmd(&io->tgt_io.cmd, 0); 564 } else { 565 ocp->rsp_sent = true; 566 } 567 } 568 569 static int 570 efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, 571 u32 flags, void *arg) 572 { 573 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 574 575 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE); 576 if (scsi_status != EFCT_SCSI_STATUS_GOOD) { 577 efct_lio_io_printf(io, "callback completed with error=%d\n", 578 scsi_status); 579 ocp->err = scsi_status; 580 } 581 efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt); 582 if (ocp->seg_map_cnt) { 583 if (ocp->err == EFCT_SCSI_STATUS_GOOD && 584 ocp->cur_seg < ocp->seg_cnt) { 585 int rc; 586 587 efct_lio_io_printf(io, "continuing cmd at segm=%d\n", 588 ocp->cur_seg); 589 if (ocp->ddir == DMA_TO_DEVICE) 590 rc = efct_lio_write_pending(&ocp->cmd); 591 else 592 rc = efct_lio_queue_data_in(&ocp->cmd); 593 if (!rc) 594 return 0; 595 596 ocp->err = EFCT_SCSI_STATUS_ERROR; 597 efct_lio_io_printf(io, "could not continue command\n"); 598 } 599 efct_lio_sg_unmap(io); 600 } 601 602 if (io->tgt_io.aborting) { 603 efct_lio_io_printf(io, "IO done aborted\n"); 604 return 0; 605 } 606 607 if (ocp->ddir == DMA_TO_DEVICE) { 608 efct_lio_io_printf(io, "Write done, trans_state=0x%x\n", 609 io->tgt_io.cmd.transport_state); 610 if (scsi_status != EFCT_SCSI_STATUS_GOOD) { 611 transport_generic_request_failure(&io->tgt_io.cmd, 612 TCM_CHECK_CONDITION_ABORT_CMD); 613 efct_set_lio_io_state(io, 614 EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE); 615 } else { 616 efct_set_lio_io_state(io, 617 EFCT_LIO_STATE_TGT_EXECUTE_CMD); 618 target_execute_cmd(&io->tgt_io.cmd); 619 } 620 } else { 621 efct_lio_send_resp(io, scsi_status, flags); 622 } 623 return 0; 624 } 625 626 static int 627 efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status, 628 u32 flags, void *arg) 629 { 630 efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n", 631 &io->tgt_io.cmd, scsi_status, flags); 632 633 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE); 634 transport_generic_free_cmd(&io->tgt_io.cmd, 0); 635 return 0; 636 } 637 638 static int 639 efct_lio_null_tmf_done(struct efct_io *tmfio, 640 enum efct_scsi_io_status scsi_status, 641 u32 flags, void *arg) 642 { 643 efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n", 644 &tmfio->tgt_io.cmd, scsi_status, flags); 645 646 /* free struct efct_io only, no active se_cmd */ 647 efct_scsi_io_complete(tmfio); 648 return 0; 649 } 650 651 static int 652 efct_lio_queue_status(struct se_cmd *cmd) 653 { 654 struct efct_scsi_cmd_resp rsp; 655 struct efct_scsi_tgt_io *ocp = 656 container_of(cmd, struct efct_scsi_tgt_io, cmd); 657 struct efct_io *io = container_of(ocp, struct efct_io, tgt_io); 658 int rc = 0; 659 660 efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS); 661 efct_lio_io_printf(io, 662 "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n", 663 cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags, 664 cmd->scsi_sense_length); 665 666 memset(&rsp, 0, sizeof(rsp)); 667 rsp.scsi_status = cmd->scsi_status; 668 rsp.sense_data = (u8 *)io->tgt_io.sense_buffer; 669 rsp.sense_data_length = cmd->scsi_sense_length; 670 671 /* Check for residual underrun or overrun, mark negitive value for 672 * underrun to recognize in HW 673 */ 674 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) 675 rsp.residual = -cmd->residual_count; 676 else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) 677 rsp.residual = cmd->residual_count; 678 679 rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL); 680 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP); 681 if (rc == 0) 682 ocp->rsp_sent = true; 683 return rc; 684 } 685 686 static void efct_lio_queue_tm_rsp(struct se_cmd *cmd) 687 { 688 struct efct_scsi_tgt_io *ocp = 689 container_of(cmd, struct efct_scsi_tgt_io, cmd); 690 struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io); 691 struct se_tmr_req *se_tmr = cmd->se_tmr_req; 692 u8 rspcode; 693 694 efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n", 695 cmd, se_tmr->function, se_tmr->response); 696 switch (se_tmr->response) { 697 case TMR_FUNCTION_COMPLETE: 698 rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE; 699 break; 700 case TMR_TASK_DOES_NOT_EXIST: 701 rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND; 702 break; 703 case TMR_LUN_DOES_NOT_EXIST: 704 rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER; 705 break; 706 case TMR_FUNCTION_REJECTED: 707 default: 708 rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED; 709 break; 710 } 711 efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL); 712 } 713 714 static struct efct *efct_find_wwpn(u64 wwpn) 715 { 716 struct efct *efct; 717 718 /* Search for the HBA that has this WWPN */ 719 list_for_each_entry(efct, &efct_devices, list_entry) { 720 721 if (wwpn == efct_get_wwpn(&efct->hw)) 722 return efct; 723 } 724 725 return NULL; 726 } 727 728 static struct se_wwn * 729 efct_lio_make_nport(struct target_fabric_configfs *tf, 730 struct config_group *group, const char *name) 731 { 732 struct efct_lio_nport *lio_nport; 733 struct efct *efct; 734 int ret; 735 u64 wwpn; 736 737 ret = efct_lio_parse_wwn(name, &wwpn, 0); 738 if (ret) 739 return ERR_PTR(ret); 740 741 efct = efct_find_wwpn(wwpn); 742 if (!efct) { 743 pr_err("cannot find EFCT for base wwpn %s\n", name); 744 return ERR_PTR(-ENXIO); 745 } 746 747 lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL); 748 if (!lio_nport) 749 return ERR_PTR(-ENOMEM); 750 751 lio_nport->efct = efct; 752 lio_nport->wwpn = wwpn; 753 efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str), 754 "naa.", wwpn); 755 efct->tgt_efct.lio_nport = lio_nport; 756 757 return &lio_nport->nport_wwn; 758 } 759 760 static struct se_wwn * 761 efct_lio_npiv_make_nport(struct target_fabric_configfs *tf, 762 struct config_group *group, const char *name) 763 { 764 struct efct_lio_vport *lio_vport; 765 struct efct *efct; 766 int ret; 767 u64 p_wwpn, npiv_wwpn, npiv_wwnn; 768 char *p, *pbuf, tmp[128]; 769 struct efct_lio_vport_list_t *vport_list; 770 struct fc_vport *new_fc_vport; 771 struct fc_vport_identifiers vport_id; 772 unsigned long flags = 0; 773 774 snprintf(tmp, sizeof(tmp), "%s", name); 775 pbuf = &tmp[0]; 776 777 p = strsep(&pbuf, "@"); 778 779 if (!p || !pbuf) { 780 pr_err("Unable to find separator operator(@)\n"); 781 return ERR_PTR(-EINVAL); 782 } 783 784 ret = efct_lio_parse_wwn(p, &p_wwpn, 0); 785 if (ret) 786 return ERR_PTR(ret); 787 788 ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn, 789 &npiv_wwnn); 790 if (ret) 791 return ERR_PTR(ret); 792 793 efct = efct_find_wwpn(p_wwpn); 794 if (!efct) { 795 pr_err("cannot find EFCT for base wwpn %s\n", name); 796 return ERR_PTR(-ENXIO); 797 } 798 799 lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL); 800 if (!lio_vport) 801 return ERR_PTR(-ENOMEM); 802 803 lio_vport->efct = efct; 804 lio_vport->wwpn = p_wwpn; 805 lio_vport->npiv_wwpn = npiv_wwpn; 806 lio_vport->npiv_wwnn = npiv_wwnn; 807 808 efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str), 809 "naa.", npiv_wwpn); 810 811 vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL); 812 if (!vport_list) { 813 kfree(lio_vport); 814 return ERR_PTR(-ENOMEM); 815 } 816 817 vport_list->lio_vport = lio_vport; 818 819 memset(&vport_id, 0, sizeof(vport_id)); 820 vport_id.port_name = npiv_wwpn; 821 vport_id.node_name = npiv_wwnn; 822 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 823 vport_id.vport_type = FC_PORTTYPE_NPIV; 824 vport_id.disable = false; 825 826 new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id); 827 if (!new_fc_vport) { 828 efc_log_err(efct, "fc_vport_create failed\n"); 829 kfree(lio_vport); 830 kfree(vport_list); 831 return ERR_PTR(-ENOMEM); 832 } 833 834 lio_vport->fc_vport = new_fc_vport; 835 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 836 INIT_LIST_HEAD(&vport_list->list_entry); 837 list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list); 838 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); 839 840 return &lio_vport->vport_wwn; 841 } 842 843 static void 844 efct_lio_drop_nport(struct se_wwn *wwn) 845 { 846 struct efct_lio_nport *lio_nport = 847 container_of(wwn, struct efct_lio_nport, nport_wwn); 848 struct efct *efct = lio_nport->efct; 849 850 /* only physical nport should exist, free lio_nport allocated 851 * in efct_lio_make_nport. 852 */ 853 kfree(efct->tgt_efct.lio_nport); 854 efct->tgt_efct.lio_nport = NULL; 855 } 856 857 static void 858 efct_lio_npiv_drop_nport(struct se_wwn *wwn) 859 { 860 struct efct_lio_vport *lio_vport = 861 container_of(wwn, struct efct_lio_vport, vport_wwn); 862 struct efct_lio_vport_list_t *vport, *next_vport; 863 struct efct *efct = lio_vport->efct; 864 unsigned long flags = 0; 865 866 if (lio_vport->fc_vport) 867 fc_vport_terminate(lio_vport->fc_vport); 868 869 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 870 871 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, 872 list_entry) { 873 if (vport->lio_vport == lio_vport) { 874 list_del(&vport->list_entry); 875 kfree(vport->lio_vport); 876 kfree(vport); 877 break; 878 } 879 } 880 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); 881 } 882 883 static struct se_portal_group * 884 efct_lio_make_tpg(struct se_wwn *wwn, const char *name) 885 { 886 struct efct_lio_nport *lio_nport = 887 container_of(wwn, struct efct_lio_nport, nport_wwn); 888 struct efct_lio_tpg *tpg; 889 struct efct *efct; 890 unsigned long n; 891 int ret; 892 893 if (strstr(name, "tpgt_") != name) 894 return ERR_PTR(-EINVAL); 895 if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX) 896 return ERR_PTR(-EINVAL); 897 898 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 899 if (!tpg) 900 return ERR_PTR(-ENOMEM); 901 902 tpg->nport = lio_nport; 903 tpg->tpgt = n; 904 tpg->enabled = false; 905 906 tpg->tpg_attrib.generate_node_acls = 1; 907 tpg->tpg_attrib.demo_mode_write_protect = 1; 908 tpg->tpg_attrib.cache_dynamic_acls = 1; 909 tpg->tpg_attrib.demo_mode_login_only = 1; 910 tpg->tpg_attrib.session_deletion_wait = 1; 911 912 ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP); 913 if (ret < 0) { 914 kfree(tpg); 915 return NULL; 916 } 917 efct = lio_nport->efct; 918 efct->tgt_efct.tpg = tpg; 919 efc_log_debug(efct, "create portal group %d\n", tpg->tpgt); 920 921 xa_init(&efct->lookup); 922 return &tpg->tpg; 923 } 924 925 static void 926 efct_lio_drop_tpg(struct se_portal_group *se_tpg) 927 { 928 struct efct_lio_tpg *tpg = 929 container_of(se_tpg, struct efct_lio_tpg, tpg); 930 931 struct efct *efct = tpg->nport->efct; 932 933 efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt); 934 tpg->nport->efct->tgt_efct.tpg = NULL; 935 core_tpg_deregister(se_tpg); 936 xa_destroy(&efct->lookup); 937 kfree(tpg); 938 } 939 940 static struct se_portal_group * 941 efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name) 942 { 943 struct efct_lio_vport *lio_vport = 944 container_of(wwn, struct efct_lio_vport, vport_wwn); 945 struct efct_lio_tpg *tpg; 946 struct efct *efct; 947 unsigned long n; 948 int ret; 949 950 efct = lio_vport->efct; 951 if (strstr(name, "tpgt_") != name) 952 return ERR_PTR(-EINVAL); 953 if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX) 954 return ERR_PTR(-EINVAL); 955 956 if (n != 1) { 957 efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n); 958 return ERR_PTR(-EINVAL); 959 } 960 961 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 962 if (!tpg) 963 return ERR_PTR(-ENOMEM); 964 965 tpg->vport = lio_vport; 966 tpg->tpgt = n; 967 tpg->enabled = false; 968 969 tpg->tpg_attrib.generate_node_acls = 1; 970 tpg->tpg_attrib.demo_mode_write_protect = 1; 971 tpg->tpg_attrib.cache_dynamic_acls = 1; 972 tpg->tpg_attrib.demo_mode_login_only = 1; 973 tpg->tpg_attrib.session_deletion_wait = 1; 974 975 ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP); 976 977 if (ret < 0) { 978 kfree(tpg); 979 return NULL; 980 } 981 lio_vport->tpg = tpg; 982 efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt); 983 984 return &tpg->tpg; 985 } 986 987 static void 988 efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg) 989 { 990 struct efct_lio_tpg *tpg = 991 container_of(se_tpg, struct efct_lio_tpg, tpg); 992 993 efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n", 994 tpg->tpgt); 995 core_tpg_deregister(se_tpg); 996 kfree(tpg); 997 } 998 999 static int 1000 efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 1001 { 1002 struct efct_lio_nacl *nacl; 1003 u64 wwnn; 1004 1005 if (efct_lio_parse_wwn(name, &wwnn, 0) < 0) 1006 return -EINVAL; 1007 1008 nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl); 1009 nacl->nport_wwnn = wwnn; 1010 1011 efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn); 1012 return 0; 1013 } 1014 1015 static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg) 1016 { 1017 struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg); 1018 1019 return tpg->tpg_attrib.demo_mode_login_only; 1020 } 1021 1022 static int 1023 efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg) 1024 { 1025 struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg); 1026 1027 return tpg->tpg_attrib.demo_mode_login_only; 1028 } 1029 1030 static struct efct_lio_tpg * 1031 efct_get_vport_tpg(struct efc_node *node) 1032 { 1033 struct efct *efct; 1034 u64 wwpn = node->nport->wwpn; 1035 struct efct_lio_vport_list_t *vport, *next; 1036 struct efct_lio_vport *lio_vport = NULL; 1037 struct efct_lio_tpg *tpg = NULL; 1038 unsigned long flags = 0; 1039 1040 efct = node->efc->base; 1041 spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 1042 list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list, 1043 list_entry) { 1044 lio_vport = vport->lio_vport; 1045 if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) { 1046 efc_log_debug(efct, "found tpg on vport\n"); 1047 tpg = lio_vport->tpg; 1048 break; 1049 } 1050 } 1051 spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags); 1052 return tpg; 1053 } 1054 1055 static void 1056 _efct_tgt_node_free(struct kref *arg) 1057 { 1058 struct efct_node *tgt_node = container_of(arg, struct efct_node, ref); 1059 struct efc_node *node = tgt_node->node; 1060 1061 efc_scsi_del_initiator_complete(node->efc, node); 1062 kfree(tgt_node); 1063 } 1064 1065 static int efct_session_cb(struct se_portal_group *se_tpg, 1066 struct se_session *se_sess, void *private) 1067 { 1068 struct efc_node *node = private; 1069 struct efct_node *tgt_node; 1070 struct efct *efct = node->efc->base; 1071 1072 tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL); 1073 if (!tgt_node) 1074 return -ENOMEM; 1075 1076 kref_init(&tgt_node->ref); 1077 tgt_node->release = _efct_tgt_node_free; 1078 1079 tgt_node->session = se_sess; 1080 node->tgt_node = tgt_node; 1081 tgt_node->efct = efct; 1082 1083 tgt_node->node = node; 1084 1085 tgt_node->node_fc_id = node->rnode.fc_id; 1086 tgt_node->port_fc_id = node->nport->fc_id; 1087 tgt_node->vpi = node->nport->indicator; 1088 tgt_node->rpi = node->rnode.indicator; 1089 1090 spin_lock_init(&tgt_node->active_ios_lock); 1091 INIT_LIST_HEAD(&tgt_node->active_ios); 1092 1093 return 0; 1094 } 1095 1096 int efct_scsi_tgt_new_device(struct efct *efct) 1097 { 1098 u32 total_ios; 1099 1100 /* Get the max settings */ 1101 efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli); 1102 efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli); 1103 1104 /* initialize IO watermark fields */ 1105 atomic_set(&efct->tgt_efct.ios_in_use, 0); 1106 total_ios = efct->hw.config.n_io; 1107 efc_log_debug(efct, "total_ios=%d\n", total_ios); 1108 efct->tgt_efct.watermark_min = 1109 (total_ios * EFCT_WATERMARK_LOW_PCT) / 100; 1110 efct->tgt_efct.watermark_max = 1111 (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100; 1112 atomic_set(&efct->tgt_efct.io_high_watermark, 1113 efct->tgt_efct.watermark_max); 1114 atomic_set(&efct->tgt_efct.watermark_hit, 0); 1115 atomic_set(&efct->tgt_efct.initiator_count, 0); 1116 1117 lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, 1118 "efct_lio_worker"); 1119 if (!lio_wq) { 1120 efc_log_err(efct, "workqueue create failed\n"); 1121 return -EIO; 1122 } 1123 1124 spin_lock_init(&efct->tgt_efct.efct_lio_lock); 1125 INIT_LIST_HEAD(&efct->tgt_efct.vport_list); 1126 1127 return 0; 1128 } 1129 1130 int efct_scsi_tgt_del_device(struct efct *efct) 1131 { 1132 flush_workqueue(lio_wq); 1133 1134 return 0; 1135 } 1136 1137 int 1138 efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport) 1139 { 1140 struct efct *efct = nport->efc->base; 1141 1142 efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name, 1143 efct->tgt_efct.lio_nport->wwpn_str); 1144 1145 return 0; 1146 } 1147 1148 void 1149 efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport) 1150 { 1151 efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name); 1152 } 1153 1154 static void efct_lio_setup_session(struct work_struct *work) 1155 { 1156 struct efct_lio_wq_data *wq_data = 1157 container_of(work, struct efct_lio_wq_data, work); 1158 struct efct *efct = wq_data->efct; 1159 struct efc_node *node = wq_data->ptr; 1160 char wwpn[WWN_NAME_LEN]; 1161 struct efct_lio_tpg *tpg; 1162 struct efct_node *tgt_node; 1163 struct se_portal_group *se_tpg; 1164 struct se_session *se_sess; 1165 int watermark; 1166 int ini_count; 1167 u64 id; 1168 1169 /* Check to see if it's belongs to vport, 1170 * if not get physical port 1171 */ 1172 tpg = efct_get_vport_tpg(node); 1173 if (tpg) { 1174 se_tpg = &tpg->tpg; 1175 } else if (efct->tgt_efct.tpg) { 1176 tpg = efct->tgt_efct.tpg; 1177 se_tpg = &tpg->tpg; 1178 } else { 1179 efc_log_err(efct, "failed to init session\n"); 1180 return; 1181 } 1182 1183 /* 1184 * Format the FCP Initiator port_name into colon 1185 * separated values to match the format by our explicit 1186 * ConfigFS NodeACLs. 1187 */ 1188 efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node)); 1189 1190 se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn, 1191 node, efct_session_cb); 1192 if (IS_ERR(se_sess)) { 1193 efc_log_err(efct, "failed to setup session\n"); 1194 kfree(wq_data); 1195 efc_scsi_sess_reg_complete(node, -EIO); 1196 return; 1197 } 1198 1199 tgt_node = node->tgt_node; 1200 id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id; 1201 1202 efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n", 1203 se_sess, node, id); 1204 1205 if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL))) 1206 efc_log_err(efct, "Node lookup store failed\n"); 1207 1208 efc_scsi_sess_reg_complete(node, 0); 1209 1210 /* update IO watermark: increment initiator count */ 1211 ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count); 1212 watermark = efct->tgt_efct.watermark_max - 1213 ini_count * EFCT_IO_WATERMARK_PER_INITIATOR; 1214 watermark = (efct->tgt_efct.watermark_min > watermark) ? 1215 efct->tgt_efct.watermark_min : watermark; 1216 atomic_set(&efct->tgt_efct.io_high_watermark, watermark); 1217 1218 kfree(wq_data); 1219 } 1220 1221 int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node) 1222 { 1223 struct efct *efct = node->efc->base; 1224 struct efct_lio_wq_data *wq_data; 1225 1226 /* 1227 * Since LIO only supports initiator validation at thread level, 1228 * we are open minded and accept all callers. 1229 */ 1230 wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); 1231 if (!wq_data) 1232 return -ENOMEM; 1233 1234 wq_data->ptr = node; 1235 wq_data->efct = efct; 1236 INIT_WORK(&wq_data->work, efct_lio_setup_session); 1237 queue_work(lio_wq, &wq_data->work); 1238 return EFC_SCSI_CALL_ASYNC; 1239 } 1240 1241 static void efct_lio_remove_session(struct work_struct *work) 1242 { 1243 struct efct_lio_wq_data *wq_data = 1244 container_of(work, struct efct_lio_wq_data, work); 1245 struct efct *efct = wq_data->efct; 1246 struct efc_node *node = wq_data->ptr; 1247 struct efct_node *tgt_node; 1248 struct se_session *se_sess; 1249 1250 tgt_node = node->tgt_node; 1251 if (!tgt_node) { 1252 /* base driver has sent back-to-back requests 1253 * to unreg session with no intervening 1254 * register 1255 */ 1256 efc_log_err(efct, "unreg session for NULL session\n"); 1257 efc_scsi_del_initiator_complete(node->efc, node); 1258 return; 1259 } 1260 1261 se_sess = tgt_node->session; 1262 efc_log_debug(efct, "unreg session se_sess=%p node=%p\n", 1263 se_sess, node); 1264 1265 /* first flag all session commands to complete */ 1266 target_stop_session(se_sess); 1267 1268 /* now wait for session commands to complete */ 1269 target_wait_for_sess_cmds(se_sess); 1270 target_remove_session(se_sess); 1271 tgt_node->session = NULL; 1272 node->tgt_node = NULL; 1273 kref_put(&tgt_node->ref, tgt_node->release); 1274 1275 kfree(wq_data); 1276 } 1277 1278 int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason) 1279 { 1280 struct efct *efct = node->efc->base; 1281 struct efct_node *tgt_node = node->tgt_node; 1282 struct efct_lio_wq_data *wq_data; 1283 int watermark; 1284 int ini_count; 1285 u64 id; 1286 1287 if (reason == EFCT_SCSI_INITIATOR_MISSING) 1288 return EFC_SCSI_CALL_COMPLETE; 1289 1290 if (!tgt_node) { 1291 efc_log_err(efct, "tgt_node is NULL\n"); 1292 return -EIO; 1293 } 1294 1295 wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); 1296 if (!wq_data) 1297 return -ENOMEM; 1298 1299 id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id; 1300 xa_erase(&efct->lookup, id); 1301 1302 wq_data->ptr = node; 1303 wq_data->efct = efct; 1304 INIT_WORK(&wq_data->work, efct_lio_remove_session); 1305 queue_work(lio_wq, &wq_data->work); 1306 1307 /* 1308 * update IO watermark: decrement initiator count 1309 */ 1310 ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count); 1311 1312 watermark = efct->tgt_efct.watermark_max - 1313 ini_count * EFCT_IO_WATERMARK_PER_INITIATOR; 1314 watermark = (efct->tgt_efct.watermark_min > watermark) ? 1315 efct->tgt_efct.watermark_min : watermark; 1316 atomic_set(&efct->tgt_efct.io_high_watermark, watermark); 1317 1318 return EFC_SCSI_CALL_ASYNC; 1319 } 1320 1321 void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, 1322 u32 cdb_len, u32 flags) 1323 { 1324 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 1325 struct se_cmd *se_cmd = &io->tgt_io.cmd; 1326 struct efct *efct = io->efct; 1327 char *ddir; 1328 struct efct_node *tgt_node; 1329 struct se_session *se_sess; 1330 int rc = 0; 1331 1332 memset(ocp, 0, sizeof(struct efct_scsi_tgt_io)); 1333 efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD); 1334 atomic_add_return(1, &efct->tgt_efct.ios_in_use); 1335 1336 /* set target timeout */ 1337 io->timeout = efct->target_io_timer_sec; 1338 1339 if (flags & EFCT_SCSI_CMD_SIMPLE) 1340 ocp->task_attr = TCM_SIMPLE_TAG; 1341 else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE) 1342 ocp->task_attr = TCM_HEAD_TAG; 1343 else if (flags & EFCT_SCSI_CMD_ORDERED) 1344 ocp->task_attr = TCM_ORDERED_TAG; 1345 else if (flags & EFCT_SCSI_CMD_ACA) 1346 ocp->task_attr = TCM_ACA_TAG; 1347 1348 switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) { 1349 case EFCT_SCSI_CMD_DIR_IN: 1350 ddir = "FROM_INITIATOR"; 1351 ocp->ddir = DMA_TO_DEVICE; 1352 break; 1353 case EFCT_SCSI_CMD_DIR_OUT: 1354 ddir = "TO_INITIATOR"; 1355 ocp->ddir = DMA_FROM_DEVICE; 1356 break; 1357 case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT: 1358 ddir = "BIDIR"; 1359 ocp->ddir = DMA_BIDIRECTIONAL; 1360 break; 1361 default: 1362 ddir = "NONE"; 1363 ocp->ddir = DMA_NONE; 1364 break; 1365 } 1366 1367 ocp->lun = lun; 1368 efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n", 1369 cdb[0], ddir, io->exp_xfer_len); 1370 1371 tgt_node = io->node; 1372 se_sess = tgt_node->session; 1373 if (!se_sess) { 1374 efc_log_err(efct, "No session found to submit IO se_cmd: %p\n", 1375 &ocp->cmd); 1376 efct_scsi_io_free(io); 1377 return; 1378 } 1379 1380 efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD); 1381 rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0], 1382 ocp->lun, io->exp_xfer_len, ocp->task_attr, 1383 ocp->ddir, TARGET_SCF_ACK_KREF); 1384 if (rc) { 1385 efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd); 1386 efct_scsi_io_free(io); 1387 return; 1388 } 1389 1390 if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, 1391 NULL, 0, GFP_ATOMIC)) 1392 return; 1393 1394 target_submit(se_cmd); 1395 } 1396 1397 int 1398 efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd, 1399 struct efct_io *io_to_abort, u32 flags) 1400 { 1401 unsigned char tmr_func; 1402 struct efct *efct = tmfio->efct; 1403 struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io; 1404 struct efct_node *tgt_node; 1405 struct se_session *se_sess; 1406 int rc; 1407 1408 memset(ocp, 0, sizeof(struct efct_scsi_tgt_io)); 1409 efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF); 1410 atomic_add_return(1, &efct->tgt_efct.ios_in_use); 1411 efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n", 1412 tmfio->display_name, cmd, lun); 1413 1414 switch (cmd) { 1415 case EFCT_SCSI_TMF_ABORT_TASK: 1416 tmr_func = TMR_ABORT_TASK; 1417 break; 1418 case EFCT_SCSI_TMF_ABORT_TASK_SET: 1419 tmr_func = TMR_ABORT_TASK_SET; 1420 break; 1421 case EFCT_SCSI_TMF_CLEAR_TASK_SET: 1422 tmr_func = TMR_CLEAR_TASK_SET; 1423 break; 1424 case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET: 1425 tmr_func = TMR_LUN_RESET; 1426 break; 1427 case EFCT_SCSI_TMF_CLEAR_ACA: 1428 tmr_func = TMR_CLEAR_ACA; 1429 break; 1430 case EFCT_SCSI_TMF_TARGET_RESET: 1431 tmr_func = TMR_TARGET_WARM_RESET; 1432 break; 1433 case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: 1434 case EFCT_SCSI_TMF_QUERY_TASK_SET: 1435 default: 1436 goto tmf_fail; 1437 } 1438 1439 tmfio->tgt_io.tmf = tmr_func; 1440 tmfio->tgt_io.lun = lun; 1441 tmfio->tgt_io.io_to_abort = io_to_abort; 1442 1443 tgt_node = tmfio->node; 1444 1445 se_sess = tgt_node->session; 1446 if (!se_sess) 1447 return 0; 1448 1449 rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func, 1450 GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF); 1451 1452 efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR); 1453 if (rc) 1454 goto tmf_fail; 1455 1456 return 0; 1457 1458 tmf_fail: 1459 efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED, 1460 NULL, efct_lio_null_tmf_done, NULL); 1461 return 0; 1462 } 1463 1464 /* Start items for efct_lio_tpg_attrib_cit */ 1465 1466 #define DEF_EFCT_TPG_ATTRIB(name) \ 1467 \ 1468 static ssize_t efct_lio_tpg_attrib_##name##_show( \ 1469 struct config_item *item, char *page) \ 1470 { \ 1471 struct se_portal_group *se_tpg = to_tpg(item); \ 1472 struct efct_lio_tpg *tpg = container_of(se_tpg, \ 1473 struct efct_lio_tpg, tpg); \ 1474 \ 1475 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ 1476 } \ 1477 \ 1478 static ssize_t efct_lio_tpg_attrib_##name##_store( \ 1479 struct config_item *item, const char *page, size_t count) \ 1480 { \ 1481 struct se_portal_group *se_tpg = to_tpg(item); \ 1482 struct efct_lio_tpg *tpg = container_of(se_tpg, \ 1483 struct efct_lio_tpg, tpg); \ 1484 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \ 1485 unsigned long val; \ 1486 int ret; \ 1487 \ 1488 ret = kstrtoul(page, 0, &val); \ 1489 if (ret < 0) { \ 1490 pr_err("kstrtoul() failed with ret: %d\n", ret); \ 1491 return ret; \ 1492 } \ 1493 \ 1494 if (val != 0 && val != 1) { \ 1495 pr_err("Illegal boolean value %lu\n", val); \ 1496 return -EINVAL; \ 1497 } \ 1498 \ 1499 a->name = val; \ 1500 \ 1501 return count; \ 1502 } \ 1503 CONFIGFS_ATTR(efct_lio_tpg_attrib_, name) 1504 1505 DEF_EFCT_TPG_ATTRIB(generate_node_acls); 1506 DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls); 1507 DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect); 1508 DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect); 1509 DEF_EFCT_TPG_ATTRIB(demo_mode_login_only); 1510 DEF_EFCT_TPG_ATTRIB(session_deletion_wait); 1511 1512 static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = { 1513 &efct_lio_tpg_attrib_attr_generate_node_acls, 1514 &efct_lio_tpg_attrib_attr_cache_dynamic_acls, 1515 &efct_lio_tpg_attrib_attr_demo_mode_write_protect, 1516 &efct_lio_tpg_attrib_attr_prod_mode_write_protect, 1517 &efct_lio_tpg_attrib_attr_demo_mode_login_only, 1518 &efct_lio_tpg_attrib_attr_session_deletion_wait, 1519 NULL, 1520 }; 1521 1522 #define DEF_EFCT_NPIV_TPG_ATTRIB(name) \ 1523 \ 1524 static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \ 1525 struct config_item *item, char *page) \ 1526 { \ 1527 struct se_portal_group *se_tpg = to_tpg(item); \ 1528 struct efct_lio_tpg *tpg = container_of(se_tpg, \ 1529 struct efct_lio_tpg, tpg); \ 1530 \ 1531 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ 1532 } \ 1533 \ 1534 static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \ 1535 struct config_item *item, const char *page, size_t count) \ 1536 { \ 1537 struct se_portal_group *se_tpg = to_tpg(item); \ 1538 struct efct_lio_tpg *tpg = container_of(se_tpg, \ 1539 struct efct_lio_tpg, tpg); \ 1540 struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \ 1541 unsigned long val; \ 1542 int ret; \ 1543 \ 1544 ret = kstrtoul(page, 0, &val); \ 1545 if (ret < 0) { \ 1546 pr_err("kstrtoul() failed with ret: %d\n", ret); \ 1547 return ret; \ 1548 } \ 1549 \ 1550 if (val != 0 && val != 1) { \ 1551 pr_err("Illegal boolean value %lu\n", val); \ 1552 return -EINVAL; \ 1553 } \ 1554 \ 1555 a->name = val; \ 1556 \ 1557 return count; \ 1558 } \ 1559 CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name) 1560 1561 DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls); 1562 DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls); 1563 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect); 1564 DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect); 1565 DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only); 1566 DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait); 1567 1568 static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = { 1569 &efct_lio_npiv_tpg_attrib_attr_generate_node_acls, 1570 &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls, 1571 &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect, 1572 &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect, 1573 &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only, 1574 &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait, 1575 NULL, 1576 }; 1577 1578 CONFIGFS_ATTR(efct_lio_tpg_, enable); 1579 static struct configfs_attribute *efct_lio_tpg_attrs[] = { 1580 &efct_lio_tpg_attr_enable, NULL }; 1581 CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable); 1582 static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = { 1583 &efct_lio_npiv_tpg_attr_enable, NULL }; 1584 1585 static const struct target_core_fabric_ops efct_lio_ops = { 1586 .module = THIS_MODULE, 1587 .fabric_name = "efct", 1588 .node_acl_size = sizeof(struct efct_lio_nacl), 1589 .max_data_sg_nents = 65535, 1590 .tpg_get_wwn = efct_lio_get_fabric_wwn, 1591 .tpg_get_tag = efct_lio_get_tag, 1592 .fabric_init_nodeacl = efct_lio_init_nodeacl, 1593 .tpg_check_demo_mode = efct_lio_check_demo_mode, 1594 .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache, 1595 .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect, 1596 .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect, 1597 .check_stop_free = efct_lio_check_stop_free, 1598 .aborted_task = efct_lio_aborted_task, 1599 .release_cmd = efct_lio_release_cmd, 1600 .close_session = efct_lio_close_session, 1601 .write_pending = efct_lio_write_pending, 1602 .get_cmd_state = efct_lio_get_cmd_state, 1603 .queue_data_in = efct_lio_queue_data_in, 1604 .queue_status = efct_lio_queue_status, 1605 .queue_tm_rsp = efct_lio_queue_tm_rsp, 1606 .fabric_make_wwn = efct_lio_make_nport, 1607 .fabric_drop_wwn = efct_lio_drop_nport, 1608 .fabric_make_tpg = efct_lio_make_tpg, 1609 .fabric_drop_tpg = efct_lio_drop_tpg, 1610 .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only, 1611 .tpg_check_prot_fabric_only = NULL, 1612 .sess_get_initiator_sid = NULL, 1613 .tfc_tpg_base_attrs = efct_lio_tpg_attrs, 1614 .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs, 1615 .default_submit_type = TARGET_DIRECT_SUBMIT, 1616 .direct_submit_supp = 1, 1617 }; 1618 1619 static const struct target_core_fabric_ops efct_lio_npiv_ops = { 1620 .module = THIS_MODULE, 1621 .fabric_name = "efct_npiv", 1622 .node_acl_size = sizeof(struct efct_lio_nacl), 1623 .max_data_sg_nents = 65535, 1624 .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn, 1625 .tpg_get_tag = efct_lio_get_npiv_tag, 1626 .fabric_init_nodeacl = efct_lio_init_nodeacl, 1627 .tpg_check_demo_mode = efct_lio_check_demo_mode, 1628 .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache, 1629 .tpg_check_demo_mode_write_protect = 1630 efct_lio_npiv_check_demo_write_protect, 1631 .tpg_check_prod_mode_write_protect = 1632 efct_lio_npiv_check_prod_write_protect, 1633 .check_stop_free = efct_lio_check_stop_free, 1634 .aborted_task = efct_lio_aborted_task, 1635 .release_cmd = efct_lio_release_cmd, 1636 .close_session = efct_lio_close_session, 1637 .write_pending = efct_lio_write_pending, 1638 .get_cmd_state = efct_lio_get_cmd_state, 1639 .queue_data_in = efct_lio_queue_data_in, 1640 .queue_status = efct_lio_queue_status, 1641 .queue_tm_rsp = efct_lio_queue_tm_rsp, 1642 .fabric_make_wwn = efct_lio_npiv_make_nport, 1643 .fabric_drop_wwn = efct_lio_npiv_drop_nport, 1644 .fabric_make_tpg = efct_lio_npiv_make_tpg, 1645 .fabric_drop_tpg = efct_lio_npiv_drop_tpg, 1646 .tpg_check_demo_mode_login_only = 1647 efct_lio_npiv_check_demo_mode_login_only, 1648 .tpg_check_prot_fabric_only = NULL, 1649 .sess_get_initiator_sid = NULL, 1650 .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs, 1651 .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs, 1652 1653 .default_submit_type = TARGET_DIRECT_SUBMIT, 1654 .direct_submit_supp = 1, 1655 }; 1656 1657 int efct_scsi_tgt_driver_init(void) 1658 { 1659 int rc; 1660 1661 /* Register the top level struct config_item_type with TCM core */ 1662 rc = target_register_template(&efct_lio_ops); 1663 if (rc < 0) { 1664 pr_err("target_fabric_configfs_register failed with %d\n", rc); 1665 return rc; 1666 } 1667 rc = target_register_template(&efct_lio_npiv_ops); 1668 if (rc < 0) { 1669 pr_err("target_fabric_configfs_register failed with %d\n", rc); 1670 target_unregister_template(&efct_lio_ops); 1671 return rc; 1672 } 1673 return 0; 1674 } 1675 1676 int efct_scsi_tgt_driver_exit(void) 1677 { 1678 target_unregister_template(&efct_lio_ops); 1679 target_unregister_template(&efct_lio_npiv_ops); 1680 return 0; 1681 } 1682