1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 10 #include "../host/nvme.h" 11 #include "../target/nvmet.h" 12 #include <linux/nvme-fc-driver.h> 13 #include <linux/nvme-fc.h> 14 15 16 enum { 17 NVMF_OPT_ERR = 0, 18 NVMF_OPT_WWNN = 1 << 0, 19 NVMF_OPT_WWPN = 1 << 1, 20 NVMF_OPT_ROLES = 1 << 2, 21 NVMF_OPT_FCADDR = 1 << 3, 22 NVMF_OPT_LPWWNN = 1 << 4, 23 NVMF_OPT_LPWWPN = 1 << 5, 24 }; 25 26 struct fcloop_ctrl_options { 27 int mask; 28 u64 wwnn; 29 u64 wwpn; 30 u32 roles; 31 u32 fcaddr; 32 u64 lpwwnn; 33 u64 lpwwpn; 34 }; 35 36 static const match_table_t opt_tokens = { 37 { NVMF_OPT_WWNN, "wwnn=%s" }, 38 { NVMF_OPT_WWPN, "wwpn=%s" }, 39 { NVMF_OPT_ROLES, "roles=%d" }, 40 { NVMF_OPT_FCADDR, "fcaddr=%x" }, 41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, 42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, 43 { NVMF_OPT_ERR, NULL } 44 }; 45 46 static int fcloop_verify_addr(substring_t *s) 47 { 48 size_t blen = s->to - s->from + 1; 49 50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 || 51 strncmp(s->from, "0x", 2)) 52 return -EINVAL; 53 54 return 0; 55 } 56 57 static int 58 fcloop_parse_options(struct fcloop_ctrl_options *opts, 59 const char *buf) 60 { 61 substring_t args[MAX_OPT_ARGS]; 62 char *options, *o, *p; 63 int token, ret = 0; 64 u64 token64; 65 66 options = o = kstrdup(buf, GFP_KERNEL); 67 if (!options) 68 return -ENOMEM; 69 70 while ((p = strsep(&o, ",\n")) != NULL) { 71 if (!*p) 72 continue; 73 74 token = match_token(p, opt_tokens, args); 75 opts->mask |= token; 76 switch (token) { 77 case NVMF_OPT_WWNN: 78 if (fcloop_verify_addr(args) || 79 match_u64(args, &token64)) { 80 ret = -EINVAL; 81 goto out_free_options; 82 } 83 opts->wwnn = token64; 84 break; 85 case NVMF_OPT_WWPN: 86 if (fcloop_verify_addr(args) || 87 match_u64(args, &token64)) { 88 ret = -EINVAL; 89 goto out_free_options; 90 } 91 opts->wwpn = token64; 92 break; 93 case NVMF_OPT_ROLES: 94 if (match_int(args, &token)) { 95 ret = -EINVAL; 96 goto out_free_options; 97 } 98 opts->roles = token; 99 break; 100 case NVMF_OPT_FCADDR: 101 if (match_hex(args, &token)) { 102 ret = -EINVAL; 103 goto out_free_options; 104 } 105 opts->fcaddr = token; 106 break; 107 case NVMF_OPT_LPWWNN: 108 if (fcloop_verify_addr(args) || 109 match_u64(args, &token64)) { 110 ret = -EINVAL; 111 goto out_free_options; 112 } 113 opts->lpwwnn = token64; 114 break; 115 case NVMF_OPT_LPWWPN: 116 if (fcloop_verify_addr(args) || 117 match_u64(args, &token64)) { 118 ret = -EINVAL; 119 goto out_free_options; 120 } 121 opts->lpwwpn = token64; 122 break; 123 default: 124 pr_warn("unknown parameter or missing value '%s'\n", p); 125 ret = -EINVAL; 126 goto out_free_options; 127 } 128 } 129 130 out_free_options: 131 kfree(options); 132 return ret; 133 } 134 135 136 static int 137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, 138 const char *buf) 139 { 140 substring_t args[MAX_OPT_ARGS]; 141 char *options, *o, *p; 142 int token, ret = 0; 143 u64 token64; 144 145 *nname = -1; 146 *pname = -1; 147 148 options = o = kstrdup(buf, GFP_KERNEL); 149 if (!options) 150 return -ENOMEM; 151 152 while ((p = strsep(&o, ",\n")) != NULL) { 153 if (!*p) 154 continue; 155 156 token = match_token(p, opt_tokens, args); 157 switch (token) { 158 case NVMF_OPT_WWNN: 159 if (fcloop_verify_addr(args) || 160 match_u64(args, &token64)) { 161 ret = -EINVAL; 162 goto out_free_options; 163 } 164 *nname = token64; 165 break; 166 case NVMF_OPT_WWPN: 167 if (fcloop_verify_addr(args) || 168 match_u64(args, &token64)) { 169 ret = -EINVAL; 170 goto out_free_options; 171 } 172 *pname = token64; 173 break; 174 default: 175 pr_warn("unknown parameter or missing value '%s'\n", p); 176 ret = -EINVAL; 177 goto out_free_options; 178 } 179 } 180 181 out_free_options: 182 kfree(options); 183 184 if (!ret) { 185 if (*nname == -1) 186 return -EINVAL; 187 if (*pname == -1) 188 return -EINVAL; 189 } 190 191 return ret; 192 } 193 194 195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 196 197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ 198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) 199 200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 201 202 203 static DEFINE_SPINLOCK(fcloop_lock); 204 static LIST_HEAD(fcloop_lports); 205 static LIST_HEAD(fcloop_nports); 206 207 struct fcloop_lport { 208 struct nvme_fc_local_port *localport; 209 struct list_head lport_list; 210 struct completion unreg_done; 211 refcount_t ref; 212 }; 213 214 struct fcloop_lport_priv { 215 struct fcloop_lport *lport; 216 }; 217 218 struct fcloop_rport { 219 struct nvme_fc_remote_port *remoteport; 220 struct nvmet_fc_target_port *targetport; 221 struct fcloop_nport *nport; 222 struct fcloop_lport *lport; 223 spinlock_t lock; 224 struct list_head ls_list; 225 struct work_struct ls_work; 226 }; 227 228 struct fcloop_tport { 229 struct nvmet_fc_target_port *targetport; 230 struct nvme_fc_remote_port *remoteport; 231 struct fcloop_nport *nport; 232 struct fcloop_lport *lport; 233 spinlock_t lock; 234 struct list_head ls_list; 235 struct work_struct ls_work; 236 }; 237 238 struct fcloop_nport { 239 struct fcloop_rport *rport; 240 struct fcloop_tport *tport; 241 struct fcloop_lport *lport; 242 struct list_head nport_list; 243 refcount_t ref; 244 u64 node_name; 245 u64 port_name; 246 u32 port_role; 247 u32 port_id; 248 }; 249 250 struct fcloop_lsreq { 251 struct nvmefc_ls_req *lsreq; 252 struct nvmefc_ls_rsp ls_rsp; 253 int lsdir; /* H2T or T2H */ 254 int status; 255 struct list_head ls_list; /* fcloop_rport->ls_list */ 256 }; 257 258 struct fcloop_rscn { 259 struct fcloop_tport *tport; 260 struct work_struct work; 261 }; 262 263 enum { 264 INI_IO_START = 0, 265 INI_IO_ACTIVE = 1, 266 INI_IO_ABORTED = 2, 267 INI_IO_COMPLETED = 3, 268 }; 269 270 struct fcloop_fcpreq { 271 struct fcloop_tport *tport; 272 struct nvmefc_fcp_req *fcpreq; 273 spinlock_t reqlock; 274 u16 status; 275 u32 inistate; 276 bool active; 277 bool aborted; 278 refcount_t ref; 279 struct work_struct fcp_rcv_work; 280 struct work_struct abort_rcv_work; 281 struct work_struct tio_done_work; 282 struct nvmefc_tgt_fcp_req tgt_fcp_req; 283 }; 284 285 struct fcloop_ini_fcpreq { 286 struct nvmefc_fcp_req *fcpreq; 287 struct fcloop_fcpreq *tfcp_req; 288 spinlock_t inilock; 289 }; 290 291 static inline struct fcloop_lsreq * 292 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) 293 { 294 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp); 295 } 296 297 static inline struct fcloop_fcpreq * 298 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) 299 { 300 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); 301 } 302 303 304 static int 305 fcloop_create_queue(struct nvme_fc_local_port *localport, 306 unsigned int qidx, u16 qsize, 307 void **handle) 308 { 309 *handle = localport; 310 return 0; 311 } 312 313 static void 314 fcloop_delete_queue(struct nvme_fc_local_port *localport, 315 unsigned int idx, void *handle) 316 { 317 } 318 319 static void 320 fcloop_rport_lsrqst_work(struct work_struct *work) 321 { 322 struct fcloop_rport *rport = 323 container_of(work, struct fcloop_rport, ls_work); 324 struct fcloop_lsreq *tls_req; 325 326 spin_lock(&rport->lock); 327 for (;;) { 328 tls_req = list_first_entry_or_null(&rport->ls_list, 329 struct fcloop_lsreq, ls_list); 330 if (!tls_req) 331 break; 332 333 list_del(&tls_req->ls_list); 334 spin_unlock(&rport->lock); 335 336 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 337 /* 338 * callee may free memory containing tls_req. 339 * do not reference lsreq after this. 340 */ 341 342 spin_lock(&rport->lock); 343 } 344 spin_unlock(&rport->lock); 345 } 346 347 static int 348 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, 349 struct nvme_fc_remote_port *remoteport, 350 struct nvmefc_ls_req *lsreq) 351 { 352 struct fcloop_lsreq *tls_req = lsreq->private; 353 struct fcloop_rport *rport = remoteport->private; 354 int ret = 0; 355 356 tls_req->lsreq = lsreq; 357 INIT_LIST_HEAD(&tls_req->ls_list); 358 359 if (!rport->targetport) { 360 tls_req->status = -ECONNREFUSED; 361 spin_lock(&rport->lock); 362 list_add_tail(&tls_req->ls_list, &rport->ls_list); 363 spin_unlock(&rport->lock); 364 queue_work(nvmet_wq, &rport->ls_work); 365 return ret; 366 } 367 368 tls_req->status = 0; 369 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport, 370 &tls_req->ls_rsp, 371 lsreq->rqstaddr, lsreq->rqstlen); 372 373 return ret; 374 } 375 376 static int 377 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, 378 struct nvmefc_ls_rsp *lsrsp) 379 { 380 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 381 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 382 struct fcloop_tport *tport = targetport->private; 383 struct nvme_fc_remote_port *remoteport = tport->remoteport; 384 struct fcloop_rport *rport; 385 386 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 387 ((lsreq->rsplen < lsrsp->rsplen) ? 388 lsreq->rsplen : lsrsp->rsplen)); 389 390 lsrsp->done(lsrsp); 391 392 if (remoteport) { 393 rport = remoteport->private; 394 spin_lock(&rport->lock); 395 list_add_tail(&tls_req->ls_list, &rport->ls_list); 396 spin_unlock(&rport->lock); 397 queue_work(nvmet_wq, &rport->ls_work); 398 } 399 400 return 0; 401 } 402 403 static void 404 fcloop_tport_lsrqst_work(struct work_struct *work) 405 { 406 struct fcloop_tport *tport = 407 container_of(work, struct fcloop_tport, ls_work); 408 struct fcloop_lsreq *tls_req; 409 410 spin_lock(&tport->lock); 411 for (;;) { 412 tls_req = list_first_entry_or_null(&tport->ls_list, 413 struct fcloop_lsreq, ls_list); 414 if (!tls_req) 415 break; 416 417 list_del(&tls_req->ls_list); 418 spin_unlock(&tport->lock); 419 420 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 421 /* 422 * callee may free memory containing tls_req. 423 * do not reference lsreq after this. 424 */ 425 426 spin_lock(&tport->lock); 427 } 428 spin_unlock(&tport->lock); 429 } 430 431 static int 432 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, 433 struct nvmefc_ls_req *lsreq) 434 { 435 struct fcloop_lsreq *tls_req = lsreq->private; 436 struct fcloop_tport *tport = targetport->private; 437 int ret = 0; 438 439 /* 440 * hosthandle should be the dst.rport value. 441 * hosthandle ignored as fcloop currently is 442 * 1:1 tgtport vs remoteport 443 */ 444 tls_req->lsreq = lsreq; 445 INIT_LIST_HEAD(&tls_req->ls_list); 446 447 if (!tport->remoteport) { 448 tls_req->status = -ECONNREFUSED; 449 spin_lock(&tport->lock); 450 list_add_tail(&tls_req->ls_list, &tport->ls_list); 451 spin_unlock(&tport->lock); 452 queue_work(nvmet_wq, &tport->ls_work); 453 return ret; 454 } 455 456 tls_req->status = 0; 457 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, 458 lsreq->rqstaddr, lsreq->rqstlen); 459 460 return ret; 461 } 462 463 static int 464 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, 465 struct nvme_fc_remote_port *remoteport, 466 struct nvmefc_ls_rsp *lsrsp) 467 { 468 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 469 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 470 struct fcloop_rport *rport = remoteport->private; 471 struct nvmet_fc_target_port *targetport = rport->targetport; 472 struct fcloop_tport *tport; 473 474 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 475 ((lsreq->rsplen < lsrsp->rsplen) ? 476 lsreq->rsplen : lsrsp->rsplen)); 477 lsrsp->done(lsrsp); 478 479 if (targetport) { 480 tport = targetport->private; 481 spin_lock(&tport->lock); 482 list_add_tail(&tls_req->ls_list, &tport->ls_list); 483 spin_unlock(&tport->lock); 484 queue_work(nvmet_wq, &tport->ls_work); 485 } 486 487 return 0; 488 } 489 490 static void 491 fcloop_t2h_host_release(void *hosthandle) 492 { 493 /* host handle ignored for now */ 494 } 495 496 static int 497 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) 498 { 499 struct fcloop_rport *rport = hosthandle; 500 501 *wwnn = rport->lport->localport->node_name; 502 *wwpn = rport->lport->localport->port_name; 503 return 0; 504 } 505 506 /* 507 * Simulate reception of RSCN and converting it to a initiator transport 508 * call to rescan a remote port. 509 */ 510 static void 511 fcloop_tgt_rscn_work(struct work_struct *work) 512 { 513 struct fcloop_rscn *tgt_rscn = 514 container_of(work, struct fcloop_rscn, work); 515 struct fcloop_tport *tport = tgt_rscn->tport; 516 517 if (tport->remoteport) 518 nvme_fc_rescan_remoteport(tport->remoteport); 519 kfree(tgt_rscn); 520 } 521 522 static void 523 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) 524 { 525 struct fcloop_rscn *tgt_rscn; 526 527 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL); 528 if (!tgt_rscn) 529 return; 530 531 tgt_rscn->tport = tgtport->private; 532 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); 533 534 queue_work(nvmet_wq, &tgt_rscn->work); 535 } 536 537 static void 538 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) 539 { 540 if (!refcount_dec_and_test(&tfcp_req->ref)) 541 return; 542 543 kfree(tfcp_req); 544 } 545 546 static int 547 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) 548 { 549 return refcount_inc_not_zero(&tfcp_req->ref); 550 } 551 552 static void 553 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, 554 struct fcloop_fcpreq *tfcp_req, int status) 555 { 556 struct fcloop_ini_fcpreq *inireq = NULL; 557 558 if (fcpreq) { 559 inireq = fcpreq->private; 560 spin_lock(&inireq->inilock); 561 inireq->tfcp_req = NULL; 562 spin_unlock(&inireq->inilock); 563 564 fcpreq->status = status; 565 fcpreq->done(fcpreq); 566 } 567 568 /* release original io reference on tgt struct */ 569 fcloop_tfcp_req_put(tfcp_req); 570 } 571 572 static bool drop_fabric_opcode; 573 #define DROP_OPCODE_MASK 0x00FF 574 /* fabrics opcode will have a bit set above 1st byte */ 575 static int drop_opcode = -1; 576 static int drop_instance; 577 static int drop_amount; 578 static int drop_current_cnt; 579 580 /* 581 * Routine to parse io and determine if the io is to be dropped. 582 * Returns: 583 * 0 if io is not obstructed 584 * 1 if io was dropped 585 */ 586 static int check_for_drop(struct fcloop_fcpreq *tfcp_req) 587 { 588 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; 589 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr; 590 struct nvme_command *sqe = &cmdiu->sqe; 591 592 if (drop_opcode == -1) 593 return 0; 594 595 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x " 596 "inst %d start %d amt %d\n", 597 __func__, sqe->common.opcode, sqe->fabrics.fctype, 598 drop_fabric_opcode ? "y" : "n", 599 drop_opcode, drop_current_cnt, drop_instance, drop_amount); 600 601 if ((drop_fabric_opcode && 602 (sqe->common.opcode != nvme_fabrics_command || 603 sqe->fabrics.fctype != drop_opcode)) || 604 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode)) 605 return 0; 606 607 if (++drop_current_cnt >= drop_instance) { 608 if (drop_current_cnt >= drop_instance + drop_amount) 609 drop_opcode = -1; 610 return 1; 611 } 612 613 return 0; 614 } 615 616 static void 617 fcloop_fcp_recv_work(struct work_struct *work) 618 { 619 struct fcloop_fcpreq *tfcp_req = 620 container_of(work, struct fcloop_fcpreq, fcp_rcv_work); 621 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; 622 unsigned long flags; 623 int ret = 0; 624 bool aborted = false; 625 626 spin_lock_irqsave(&tfcp_req->reqlock, flags); 627 switch (tfcp_req->inistate) { 628 case INI_IO_START: 629 tfcp_req->inistate = INI_IO_ACTIVE; 630 break; 631 case INI_IO_ABORTED: 632 aborted = true; 633 break; 634 default: 635 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 636 WARN_ON(1); 637 return; 638 } 639 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 640 641 if (unlikely(aborted)) 642 ret = -ECANCELED; 643 else { 644 if (likely(!check_for_drop(tfcp_req))) 645 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, 646 &tfcp_req->tgt_fcp_req, 647 fcpreq->cmdaddr, fcpreq->cmdlen); 648 else 649 pr_info("%s: dropped command ********\n", __func__); 650 } 651 if (ret) 652 fcloop_call_host_done(fcpreq, tfcp_req, ret); 653 } 654 655 static void 656 fcloop_fcp_abort_recv_work(struct work_struct *work) 657 { 658 struct fcloop_fcpreq *tfcp_req = 659 container_of(work, struct fcloop_fcpreq, abort_rcv_work); 660 struct nvmefc_fcp_req *fcpreq; 661 bool completed = false; 662 unsigned long flags; 663 664 spin_lock_irqsave(&tfcp_req->reqlock, flags); 665 fcpreq = tfcp_req->fcpreq; 666 switch (tfcp_req->inistate) { 667 case INI_IO_ABORTED: 668 break; 669 case INI_IO_COMPLETED: 670 completed = true; 671 break; 672 default: 673 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 674 WARN_ON(1); 675 return; 676 } 677 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 678 679 if (unlikely(completed)) { 680 /* remove reference taken in original abort downcall */ 681 fcloop_tfcp_req_put(tfcp_req); 682 return; 683 } 684 685 if (tfcp_req->tport->targetport) 686 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, 687 &tfcp_req->tgt_fcp_req); 688 689 spin_lock_irqsave(&tfcp_req->reqlock, flags); 690 tfcp_req->fcpreq = NULL; 691 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 692 693 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); 694 /* call_host_done releases reference for abort downcall */ 695 } 696 697 /* 698 * FCP IO operation done by target completion. 699 * call back up initiator "done" flows. 700 */ 701 static void 702 fcloop_tgt_fcprqst_done_work(struct work_struct *work) 703 { 704 struct fcloop_fcpreq *tfcp_req = 705 container_of(work, struct fcloop_fcpreq, tio_done_work); 706 struct nvmefc_fcp_req *fcpreq; 707 unsigned long flags; 708 709 spin_lock_irqsave(&tfcp_req->reqlock, flags); 710 fcpreq = tfcp_req->fcpreq; 711 tfcp_req->inistate = INI_IO_COMPLETED; 712 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 713 714 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); 715 } 716 717 718 static int 719 fcloop_fcp_req(struct nvme_fc_local_port *localport, 720 struct nvme_fc_remote_port *remoteport, 721 void *hw_queue_handle, 722 struct nvmefc_fcp_req *fcpreq) 723 { 724 struct fcloop_rport *rport = remoteport->private; 725 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 726 struct fcloop_fcpreq *tfcp_req; 727 728 if (!rport->targetport) 729 return -ECONNREFUSED; 730 731 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC); 732 if (!tfcp_req) 733 return -ENOMEM; 734 735 inireq->fcpreq = fcpreq; 736 inireq->tfcp_req = tfcp_req; 737 spin_lock_init(&inireq->inilock); 738 739 tfcp_req->fcpreq = fcpreq; 740 tfcp_req->tport = rport->targetport->private; 741 tfcp_req->inistate = INI_IO_START; 742 spin_lock_init(&tfcp_req->reqlock); 743 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); 744 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); 745 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 746 refcount_set(&tfcp_req->ref, 1); 747 748 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); 749 750 return 0; 751 } 752 753 static void 754 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, 755 struct scatterlist *io_sg, u32 offset, u32 length) 756 { 757 void *data_p, *io_p; 758 u32 data_len, io_len, tlen; 759 760 io_p = sg_virt(io_sg); 761 io_len = io_sg->length; 762 763 for ( ; offset; ) { 764 tlen = min_t(u32, offset, io_len); 765 offset -= tlen; 766 io_len -= tlen; 767 if (!io_len) { 768 io_sg = sg_next(io_sg); 769 io_p = sg_virt(io_sg); 770 io_len = io_sg->length; 771 } else 772 io_p += tlen; 773 } 774 775 data_p = sg_virt(data_sg); 776 data_len = data_sg->length; 777 778 for ( ; length; ) { 779 tlen = min_t(u32, io_len, data_len); 780 tlen = min_t(u32, tlen, length); 781 782 if (op == NVMET_FCOP_WRITEDATA) 783 memcpy(data_p, io_p, tlen); 784 else 785 memcpy(io_p, data_p, tlen); 786 787 length -= tlen; 788 789 io_len -= tlen; 790 if ((!io_len) && (length)) { 791 io_sg = sg_next(io_sg); 792 io_p = sg_virt(io_sg); 793 io_len = io_sg->length; 794 } else 795 io_p += tlen; 796 797 data_len -= tlen; 798 if ((!data_len) && (length)) { 799 data_sg = sg_next(data_sg); 800 data_p = sg_virt(data_sg); 801 data_len = data_sg->length; 802 } else 803 data_p += tlen; 804 } 805 } 806 807 static int 808 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, 809 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 810 { 811 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 812 struct nvmefc_fcp_req *fcpreq; 813 u32 rsplen = 0, xfrlen = 0; 814 int fcp_err = 0, active, aborted; 815 u8 op = tgt_fcpreq->op; 816 unsigned long flags; 817 818 spin_lock_irqsave(&tfcp_req->reqlock, flags); 819 fcpreq = tfcp_req->fcpreq; 820 active = tfcp_req->active; 821 aborted = tfcp_req->aborted; 822 tfcp_req->active = true; 823 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 824 825 if (unlikely(active)) 826 /* illegal - call while i/o active */ 827 return -EALREADY; 828 829 if (unlikely(aborted)) { 830 /* target transport has aborted i/o prior */ 831 spin_lock_irqsave(&tfcp_req->reqlock, flags); 832 tfcp_req->active = false; 833 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 834 tgt_fcpreq->transferred_length = 0; 835 tgt_fcpreq->fcp_error = -ECANCELED; 836 tgt_fcpreq->done(tgt_fcpreq); 837 return 0; 838 } 839 840 /* 841 * if fcpreq is NULL, the I/O has been aborted (from 842 * initiator side). For the target side, act as if all is well 843 * but don't actually move data. 844 */ 845 846 switch (op) { 847 case NVMET_FCOP_WRITEDATA: 848 xfrlen = tgt_fcpreq->transfer_length; 849 if (fcpreq) { 850 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 851 fcpreq->first_sgl, tgt_fcpreq->offset, 852 xfrlen); 853 fcpreq->transferred_length += xfrlen; 854 } 855 break; 856 857 case NVMET_FCOP_READDATA: 858 case NVMET_FCOP_READDATA_RSP: 859 xfrlen = tgt_fcpreq->transfer_length; 860 if (fcpreq) { 861 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 862 fcpreq->first_sgl, tgt_fcpreq->offset, 863 xfrlen); 864 fcpreq->transferred_length += xfrlen; 865 } 866 if (op == NVMET_FCOP_READDATA) 867 break; 868 869 /* Fall-Thru to RSP handling */ 870 fallthrough; 871 872 case NVMET_FCOP_RSP: 873 if (fcpreq) { 874 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? 875 fcpreq->rsplen : tgt_fcpreq->rsplen); 876 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); 877 if (rsplen < tgt_fcpreq->rsplen) 878 fcp_err = -E2BIG; 879 fcpreq->rcv_rsplen = rsplen; 880 fcpreq->status = 0; 881 } 882 tfcp_req->status = 0; 883 break; 884 885 default: 886 fcp_err = -EINVAL; 887 break; 888 } 889 890 spin_lock_irqsave(&tfcp_req->reqlock, flags); 891 tfcp_req->active = false; 892 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 893 894 tgt_fcpreq->transferred_length = xfrlen; 895 tgt_fcpreq->fcp_error = fcp_err; 896 tgt_fcpreq->done(tgt_fcpreq); 897 898 return 0; 899 } 900 901 static void 902 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, 903 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 904 { 905 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 906 unsigned long flags; 907 908 /* 909 * mark aborted only in case there were 2 threads in transport 910 * (one doing io, other doing abort) and only kills ops posted 911 * after the abort request 912 */ 913 spin_lock_irqsave(&tfcp_req->reqlock, flags); 914 tfcp_req->aborted = true; 915 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 916 917 tfcp_req->status = NVME_SC_INTERNAL; 918 919 /* 920 * nothing more to do. If io wasn't active, the transport should 921 * immediately call the req_release. If it was active, the op 922 * will complete, and the lldd should call req_release. 923 */ 924 } 925 926 static void 927 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, 928 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 929 { 930 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 931 932 queue_work(nvmet_wq, &tfcp_req->tio_done_work); 933 } 934 935 static void 936 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport, 937 struct nvme_fc_remote_port *remoteport, 938 struct nvmefc_ls_req *lsreq) 939 { 940 } 941 942 static void 943 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport, 944 void *hosthandle, struct nvmefc_ls_req *lsreq) 945 { 946 } 947 948 static void 949 fcloop_fcp_abort(struct nvme_fc_local_port *localport, 950 struct nvme_fc_remote_port *remoteport, 951 void *hw_queue_handle, 952 struct nvmefc_fcp_req *fcpreq) 953 { 954 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 955 struct fcloop_fcpreq *tfcp_req; 956 bool abortio = true; 957 unsigned long flags; 958 959 spin_lock(&inireq->inilock); 960 tfcp_req = inireq->tfcp_req; 961 if (tfcp_req) 962 fcloop_tfcp_req_get(tfcp_req); 963 spin_unlock(&inireq->inilock); 964 965 if (!tfcp_req) 966 /* abort has already been called */ 967 return; 968 969 /* break initiator/target relationship for io */ 970 spin_lock_irqsave(&tfcp_req->reqlock, flags); 971 switch (tfcp_req->inistate) { 972 case INI_IO_START: 973 case INI_IO_ACTIVE: 974 tfcp_req->inistate = INI_IO_ABORTED; 975 break; 976 case INI_IO_COMPLETED: 977 abortio = false; 978 break; 979 default: 980 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 981 WARN_ON(1); 982 return; 983 } 984 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 985 986 if (abortio) 987 /* leave the reference while the work item is scheduled */ 988 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); 989 else { 990 /* 991 * as the io has already had the done callback made, 992 * nothing more to do. So release the reference taken above 993 */ 994 fcloop_tfcp_req_put(tfcp_req); 995 } 996 } 997 998 static void 999 fcloop_lport_put(struct fcloop_lport *lport) 1000 { 1001 unsigned long flags; 1002 1003 if (!refcount_dec_and_test(&lport->ref)) 1004 return; 1005 1006 spin_lock_irqsave(&fcloop_lock, flags); 1007 list_del(&lport->lport_list); 1008 spin_unlock_irqrestore(&fcloop_lock, flags); 1009 1010 kfree(lport); 1011 } 1012 1013 static int 1014 fcloop_lport_get(struct fcloop_lport *lport) 1015 { 1016 return refcount_inc_not_zero(&lport->ref); 1017 } 1018 1019 static void 1020 fcloop_nport_put(struct fcloop_nport *nport) 1021 { 1022 if (!refcount_dec_and_test(&nport->ref)) 1023 return; 1024 1025 kfree(nport); 1026 } 1027 1028 static int 1029 fcloop_nport_get(struct fcloop_nport *nport) 1030 { 1031 return refcount_inc_not_zero(&nport->ref); 1032 } 1033 1034 static void 1035 fcloop_localport_delete(struct nvme_fc_local_port *localport) 1036 { 1037 struct fcloop_lport_priv *lport_priv = localport->private; 1038 struct fcloop_lport *lport = lport_priv->lport; 1039 1040 /* release any threads waiting for the unreg to complete */ 1041 complete(&lport->unreg_done); 1042 1043 fcloop_lport_put(lport); 1044 } 1045 1046 static void 1047 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) 1048 { 1049 struct fcloop_rport *rport = remoteport->private; 1050 1051 flush_work(&rport->ls_work); 1052 fcloop_nport_put(rport->nport); 1053 } 1054 1055 static void 1056 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) 1057 { 1058 struct fcloop_tport *tport = targetport->private; 1059 1060 flush_work(&tport->ls_work); 1061 fcloop_nport_put(tport->nport); 1062 } 1063 1064 #define FCLOOP_HW_QUEUES 4 1065 #define FCLOOP_SGL_SEGS 256 1066 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF 1067 1068 static struct nvme_fc_port_template fctemplate = { 1069 .localport_delete = fcloop_localport_delete, 1070 .remoteport_delete = fcloop_remoteport_delete, 1071 .create_queue = fcloop_create_queue, 1072 .delete_queue = fcloop_delete_queue, 1073 .ls_req = fcloop_h2t_ls_req, 1074 .fcp_io = fcloop_fcp_req, 1075 .ls_abort = fcloop_h2t_ls_abort, 1076 .fcp_abort = fcloop_fcp_abort, 1077 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp, 1078 .max_hw_queues = FCLOOP_HW_QUEUES, 1079 .max_sgl_segments = FCLOOP_SGL_SEGS, 1080 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1081 .dma_boundary = FCLOOP_DMABOUND_4G, 1082 /* sizes of additional private data for data structures */ 1083 .local_priv_sz = sizeof(struct fcloop_lport_priv), 1084 .remote_priv_sz = sizeof(struct fcloop_rport), 1085 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), 1086 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), 1087 }; 1088 1089 static struct nvmet_fc_target_template tgttemplate = { 1090 .targetport_delete = fcloop_targetport_delete, 1091 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp, 1092 .fcp_op = fcloop_fcp_op, 1093 .fcp_abort = fcloop_tgt_fcp_abort, 1094 .fcp_req_release = fcloop_fcp_req_release, 1095 .discovery_event = fcloop_tgt_discovery_evt, 1096 .ls_req = fcloop_t2h_ls_req, 1097 .ls_abort = fcloop_t2h_ls_abort, 1098 .host_release = fcloop_t2h_host_release, 1099 .host_traddr = fcloop_t2h_host_traddr, 1100 .max_hw_queues = FCLOOP_HW_QUEUES, 1101 .max_sgl_segments = FCLOOP_SGL_SEGS, 1102 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1103 .dma_boundary = FCLOOP_DMABOUND_4G, 1104 /* optional features */ 1105 .target_features = 0, 1106 /* sizes of additional private data for data structures */ 1107 .target_priv_sz = sizeof(struct fcloop_tport), 1108 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), 1109 }; 1110 1111 static ssize_t 1112 fcloop_create_local_port(struct device *dev, struct device_attribute *attr, 1113 const char *buf, size_t count) 1114 { 1115 struct nvme_fc_port_info pinfo; 1116 struct fcloop_ctrl_options *opts; 1117 struct nvme_fc_local_port *localport; 1118 struct fcloop_lport *lport; 1119 struct fcloop_lport_priv *lport_priv; 1120 unsigned long flags; 1121 int ret = -ENOMEM; 1122 1123 lport = kzalloc(sizeof(*lport), GFP_KERNEL); 1124 if (!lport) 1125 return -ENOMEM; 1126 1127 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1128 if (!opts) 1129 goto out_free_lport; 1130 1131 ret = fcloop_parse_options(opts, buf); 1132 if (ret) 1133 goto out_free_opts; 1134 1135 /* everything there ? */ 1136 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { 1137 ret = -EINVAL; 1138 goto out_free_opts; 1139 } 1140 1141 memset(&pinfo, 0, sizeof(pinfo)); 1142 pinfo.node_name = opts->wwnn; 1143 pinfo.port_name = opts->wwpn; 1144 pinfo.port_role = opts->roles; 1145 pinfo.port_id = opts->fcaddr; 1146 1147 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); 1148 if (!ret) { 1149 /* success */ 1150 lport_priv = localport->private; 1151 lport_priv->lport = lport; 1152 1153 lport->localport = localport; 1154 INIT_LIST_HEAD(&lport->lport_list); 1155 refcount_set(&lport->ref, 1); 1156 1157 spin_lock_irqsave(&fcloop_lock, flags); 1158 list_add_tail(&lport->lport_list, &fcloop_lports); 1159 spin_unlock_irqrestore(&fcloop_lock, flags); 1160 } 1161 1162 out_free_opts: 1163 kfree(opts); 1164 out_free_lport: 1165 /* free only if we're going to fail */ 1166 if (ret) 1167 kfree(lport); 1168 1169 return ret ? ret : count; 1170 } 1171 1172 static int 1173 __wait_localport_unreg(struct fcloop_lport *lport) 1174 { 1175 int ret; 1176 1177 init_completion(&lport->unreg_done); 1178 1179 ret = nvme_fc_unregister_localport(lport->localport); 1180 1181 if (!ret) 1182 wait_for_completion(&lport->unreg_done); 1183 1184 return ret; 1185 } 1186 1187 1188 static ssize_t 1189 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, 1190 const char *buf, size_t count) 1191 { 1192 struct fcloop_lport *tlport, *lport = NULL; 1193 u64 nodename, portname; 1194 unsigned long flags; 1195 int ret; 1196 1197 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1198 if (ret) 1199 return ret; 1200 1201 spin_lock_irqsave(&fcloop_lock, flags); 1202 1203 list_for_each_entry(tlport, &fcloop_lports, lport_list) { 1204 if (tlport->localport->node_name == nodename && 1205 tlport->localport->port_name == portname) { 1206 if (!fcloop_lport_get(tlport)) 1207 break; 1208 lport = tlport; 1209 break; 1210 } 1211 } 1212 spin_unlock_irqrestore(&fcloop_lock, flags); 1213 1214 if (!lport) 1215 return -ENOENT; 1216 1217 ret = __wait_localport_unreg(lport); 1218 fcloop_lport_put(lport); 1219 1220 return ret ? ret : count; 1221 } 1222 1223 static struct fcloop_nport * 1224 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 1225 { 1226 struct fcloop_nport *newnport, *nport = NULL; 1227 struct fcloop_lport *tmplport, *lport = NULL; 1228 struct fcloop_ctrl_options *opts; 1229 unsigned long flags; 1230 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; 1231 int ret; 1232 1233 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1234 if (!opts) 1235 return NULL; 1236 1237 ret = fcloop_parse_options(opts, buf); 1238 if (ret) 1239 goto out_free_opts; 1240 1241 /* everything there ? */ 1242 if ((opts->mask & opts_mask) != opts_mask) { 1243 ret = -EINVAL; 1244 goto out_free_opts; 1245 } 1246 1247 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); 1248 if (!newnport) 1249 goto out_free_opts; 1250 1251 INIT_LIST_HEAD(&newnport->nport_list); 1252 newnport->node_name = opts->wwnn; 1253 newnport->port_name = opts->wwpn; 1254 if (opts->mask & NVMF_OPT_ROLES) 1255 newnport->port_role = opts->roles; 1256 if (opts->mask & NVMF_OPT_FCADDR) 1257 newnport->port_id = opts->fcaddr; 1258 refcount_set(&newnport->ref, 1); 1259 1260 spin_lock_irqsave(&fcloop_lock, flags); 1261 1262 list_for_each_entry(tmplport, &fcloop_lports, lport_list) { 1263 if (tmplport->localport->node_name == opts->wwnn && 1264 tmplport->localport->port_name == opts->wwpn) 1265 goto out_invalid_opts; 1266 1267 if (tmplport->localport->node_name == opts->lpwwnn && 1268 tmplport->localport->port_name == opts->lpwwpn) 1269 lport = tmplport; 1270 } 1271 1272 if (remoteport) { 1273 if (!lport) 1274 goto out_invalid_opts; 1275 newnport->lport = lport; 1276 } 1277 1278 list_for_each_entry(nport, &fcloop_nports, nport_list) { 1279 if (nport->node_name == opts->wwnn && 1280 nport->port_name == opts->wwpn) { 1281 if ((remoteport && nport->rport) || 1282 (!remoteport && nport->tport)) { 1283 nport = NULL; 1284 goto out_invalid_opts; 1285 } 1286 1287 fcloop_nport_get(nport); 1288 1289 spin_unlock_irqrestore(&fcloop_lock, flags); 1290 1291 if (remoteport) 1292 nport->lport = lport; 1293 if (opts->mask & NVMF_OPT_ROLES) 1294 nport->port_role = opts->roles; 1295 if (opts->mask & NVMF_OPT_FCADDR) 1296 nport->port_id = opts->fcaddr; 1297 goto out_free_newnport; 1298 } 1299 } 1300 1301 list_add_tail(&newnport->nport_list, &fcloop_nports); 1302 1303 spin_unlock_irqrestore(&fcloop_lock, flags); 1304 1305 kfree(opts); 1306 return newnport; 1307 1308 out_invalid_opts: 1309 spin_unlock_irqrestore(&fcloop_lock, flags); 1310 out_free_newnport: 1311 kfree(newnport); 1312 out_free_opts: 1313 kfree(opts); 1314 return nport; 1315 } 1316 1317 static ssize_t 1318 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, 1319 const char *buf, size_t count) 1320 { 1321 struct nvme_fc_remote_port *remoteport; 1322 struct fcloop_nport *nport; 1323 struct fcloop_rport *rport; 1324 struct nvme_fc_port_info pinfo; 1325 int ret; 1326 1327 nport = fcloop_alloc_nport(buf, count, true); 1328 if (!nport) 1329 return -EIO; 1330 1331 memset(&pinfo, 0, sizeof(pinfo)); 1332 pinfo.node_name = nport->node_name; 1333 pinfo.port_name = nport->port_name; 1334 pinfo.port_role = nport->port_role; 1335 pinfo.port_id = nport->port_id; 1336 1337 ret = nvme_fc_register_remoteport(nport->lport->localport, 1338 &pinfo, &remoteport); 1339 if (ret || !remoteport) { 1340 fcloop_nport_put(nport); 1341 return ret; 1342 } 1343 1344 /* success */ 1345 rport = remoteport->private; 1346 rport->remoteport = remoteport; 1347 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; 1348 if (nport->tport) { 1349 nport->tport->remoteport = remoteport; 1350 nport->tport->lport = nport->lport; 1351 } 1352 rport->nport = nport; 1353 rport->lport = nport->lport; 1354 nport->rport = rport; 1355 spin_lock_init(&rport->lock); 1356 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); 1357 INIT_LIST_HEAD(&rport->ls_list); 1358 1359 return count; 1360 } 1361 1362 1363 static struct fcloop_rport * 1364 __unlink_remote_port(struct fcloop_nport *nport) 1365 { 1366 struct fcloop_rport *rport = nport->rport; 1367 1368 if (rport && nport->tport) 1369 nport->tport->remoteport = NULL; 1370 nport->rport = NULL; 1371 1372 list_del(&nport->nport_list); 1373 1374 return rport; 1375 } 1376 1377 static int 1378 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 1379 { 1380 if (!rport) 1381 return -EALREADY; 1382 1383 return nvme_fc_unregister_remoteport(rport->remoteport); 1384 } 1385 1386 static ssize_t 1387 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, 1388 const char *buf, size_t count) 1389 { 1390 struct fcloop_nport *nport = NULL, *tmpport; 1391 static struct fcloop_rport *rport; 1392 u64 nodename, portname; 1393 unsigned long flags; 1394 int ret; 1395 1396 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1397 if (ret) 1398 return ret; 1399 1400 spin_lock_irqsave(&fcloop_lock, flags); 1401 1402 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1403 if (tmpport->node_name == nodename && 1404 tmpport->port_name == portname && tmpport->rport) { 1405 nport = tmpport; 1406 rport = __unlink_remote_port(nport); 1407 break; 1408 } 1409 } 1410 1411 spin_unlock_irqrestore(&fcloop_lock, flags); 1412 1413 if (!nport) 1414 return -ENOENT; 1415 1416 ret = __remoteport_unreg(nport, rport); 1417 1418 return ret ? ret : count; 1419 } 1420 1421 static ssize_t 1422 fcloop_create_target_port(struct device *dev, struct device_attribute *attr, 1423 const char *buf, size_t count) 1424 { 1425 struct nvmet_fc_target_port *targetport; 1426 struct fcloop_nport *nport; 1427 struct fcloop_tport *tport; 1428 struct nvmet_fc_port_info tinfo; 1429 int ret; 1430 1431 nport = fcloop_alloc_nport(buf, count, false); 1432 if (!nport) 1433 return -EIO; 1434 1435 tinfo.node_name = nport->node_name; 1436 tinfo.port_name = nport->port_name; 1437 tinfo.port_id = nport->port_id; 1438 1439 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, 1440 &targetport); 1441 if (ret) { 1442 fcloop_nport_put(nport); 1443 return ret; 1444 } 1445 1446 /* success */ 1447 tport = targetport->private; 1448 tport->targetport = targetport; 1449 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; 1450 if (nport->rport) 1451 nport->rport->targetport = targetport; 1452 tport->nport = nport; 1453 tport->lport = nport->lport; 1454 nport->tport = tport; 1455 spin_lock_init(&tport->lock); 1456 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); 1457 INIT_LIST_HEAD(&tport->ls_list); 1458 1459 return count; 1460 } 1461 1462 1463 static struct fcloop_tport * 1464 __unlink_target_port(struct fcloop_nport *nport) 1465 { 1466 struct fcloop_tport *tport = nport->tport; 1467 1468 if (tport && nport->rport) 1469 nport->rport->targetport = NULL; 1470 nport->tport = NULL; 1471 1472 return tport; 1473 } 1474 1475 static int 1476 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1477 { 1478 if (!tport) 1479 return -EALREADY; 1480 1481 return nvmet_fc_unregister_targetport(tport->targetport); 1482 } 1483 1484 static ssize_t 1485 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, 1486 const char *buf, size_t count) 1487 { 1488 struct fcloop_nport *nport = NULL, *tmpport; 1489 struct fcloop_tport *tport = NULL; 1490 u64 nodename, portname; 1491 unsigned long flags; 1492 int ret; 1493 1494 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1495 if (ret) 1496 return ret; 1497 1498 spin_lock_irqsave(&fcloop_lock, flags); 1499 1500 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1501 if (tmpport->node_name == nodename && 1502 tmpport->port_name == portname && tmpport->tport) { 1503 nport = tmpport; 1504 tport = __unlink_target_port(nport); 1505 break; 1506 } 1507 } 1508 1509 spin_unlock_irqrestore(&fcloop_lock, flags); 1510 1511 if (!nport) 1512 return -ENOENT; 1513 1514 ret = __targetport_unreg(nport, tport); 1515 1516 return ret ? ret : count; 1517 } 1518 1519 static ssize_t 1520 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, 1521 const char *buf, size_t count) 1522 { 1523 unsigned int opcode; 1524 int starting, amount; 1525 1526 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) 1527 return -EBADRQC; 1528 1529 drop_current_cnt = 0; 1530 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false; 1531 drop_opcode = (opcode & DROP_OPCODE_MASK); 1532 drop_instance = starting; 1533 /* the check to drop routine uses instance + count to know when 1534 * to end. Thus, if dropping 1 instance, count should be 0. 1535 * so subtract 1 from the count. 1536 */ 1537 drop_amount = amount - 1; 1538 1539 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d " 1540 "instances\n", 1541 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "", 1542 drop_opcode, drop_amount); 1543 1544 return count; 1545 } 1546 1547 1548 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); 1549 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); 1550 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); 1551 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); 1552 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); 1553 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); 1554 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop); 1555 1556 static struct attribute *fcloop_dev_attrs[] = { 1557 &dev_attr_add_local_port.attr, 1558 &dev_attr_del_local_port.attr, 1559 &dev_attr_add_remote_port.attr, 1560 &dev_attr_del_remote_port.attr, 1561 &dev_attr_add_target_port.attr, 1562 &dev_attr_del_target_port.attr, 1563 &dev_attr_set_cmd_drop.attr, 1564 NULL 1565 }; 1566 1567 static const struct attribute_group fclopp_dev_attrs_group = { 1568 .attrs = fcloop_dev_attrs, 1569 }; 1570 1571 static const struct attribute_group *fcloop_dev_attr_groups[] = { 1572 &fclopp_dev_attrs_group, 1573 NULL, 1574 }; 1575 1576 static const struct class fcloop_class = { 1577 .name = "fcloop", 1578 }; 1579 static struct device *fcloop_device; 1580 1581 1582 static int __init fcloop_init(void) 1583 { 1584 int ret; 1585 1586 ret = class_register(&fcloop_class); 1587 if (ret) { 1588 pr_err("couldn't register class fcloop\n"); 1589 return ret; 1590 } 1591 1592 fcloop_device = device_create_with_groups( 1593 &fcloop_class, NULL, MKDEV(0, 0), NULL, 1594 fcloop_dev_attr_groups, "ctl"); 1595 if (IS_ERR(fcloop_device)) { 1596 pr_err("couldn't create ctl device!\n"); 1597 ret = PTR_ERR(fcloop_device); 1598 goto out_destroy_class; 1599 } 1600 1601 get_device(fcloop_device); 1602 1603 return 0; 1604 1605 out_destroy_class: 1606 class_unregister(&fcloop_class); 1607 return ret; 1608 } 1609 1610 static void __exit fcloop_exit(void) 1611 { 1612 struct fcloop_lport *lport = NULL; 1613 struct fcloop_nport *nport = NULL; 1614 struct fcloop_tport *tport; 1615 struct fcloop_rport *rport; 1616 unsigned long flags; 1617 int ret; 1618 1619 spin_lock_irqsave(&fcloop_lock, flags); 1620 1621 for (;;) { 1622 nport = list_first_entry_or_null(&fcloop_nports, 1623 typeof(*nport), nport_list); 1624 if (!nport) 1625 break; 1626 1627 tport = __unlink_target_port(nport); 1628 rport = __unlink_remote_port(nport); 1629 1630 spin_unlock_irqrestore(&fcloop_lock, flags); 1631 1632 ret = __targetport_unreg(nport, tport); 1633 if (ret) 1634 pr_warn("%s: Failed deleting target port\n", __func__); 1635 1636 ret = __remoteport_unreg(nport, rport); 1637 if (ret) 1638 pr_warn("%s: Failed deleting remote port\n", __func__); 1639 1640 spin_lock_irqsave(&fcloop_lock, flags); 1641 } 1642 1643 for (;;) { 1644 lport = list_first_entry_or_null(&fcloop_lports, 1645 typeof(*lport), lport_list); 1646 if (!lport || !fcloop_lport_get(lport)) 1647 break; 1648 1649 spin_unlock_irqrestore(&fcloop_lock, flags); 1650 1651 ret = __wait_localport_unreg(lport); 1652 if (ret) 1653 pr_warn("%s: Failed deleting local port\n", __func__); 1654 1655 fcloop_lport_put(lport); 1656 1657 spin_lock_irqsave(&fcloop_lock, flags); 1658 } 1659 1660 spin_unlock_irqrestore(&fcloop_lock, flags); 1661 1662 put_device(fcloop_device); 1663 1664 device_destroy(&fcloop_class, MKDEV(0, 0)); 1665 class_unregister(&fcloop_class); 1666 } 1667 1668 module_init(fcloop_init); 1669 module_exit(fcloop_exit); 1670 1671 MODULE_DESCRIPTION("NVMe target FC loop transport driver"); 1672 MODULE_LICENSE("GPL v2"); 1673