1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 10 #include "../host/nvme.h" 11 #include "../target/nvmet.h" 12 #include <linux/nvme-fc-driver.h> 13 #include <linux/nvme-fc.h> 14 15 16 enum { 17 NVMF_OPT_ERR = 0, 18 NVMF_OPT_WWNN = 1 << 0, 19 NVMF_OPT_WWPN = 1 << 1, 20 NVMF_OPT_ROLES = 1 << 2, 21 NVMF_OPT_FCADDR = 1 << 3, 22 NVMF_OPT_LPWWNN = 1 << 4, 23 NVMF_OPT_LPWWPN = 1 << 5, 24 }; 25 26 struct fcloop_ctrl_options { 27 int mask; 28 u64 wwnn; 29 u64 wwpn; 30 u32 roles; 31 u32 fcaddr; 32 u64 lpwwnn; 33 u64 lpwwpn; 34 }; 35 36 static const match_table_t opt_tokens = { 37 { NVMF_OPT_WWNN, "wwnn=%s" }, 38 { NVMF_OPT_WWPN, "wwpn=%s" }, 39 { NVMF_OPT_ROLES, "roles=%d" }, 40 { NVMF_OPT_FCADDR, "fcaddr=%x" }, 41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, 42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, 43 { NVMF_OPT_ERR, NULL } 44 }; 45 46 static int fcloop_verify_addr(substring_t *s) 47 { 48 size_t blen = s->to - s->from + 1; 49 50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 || 51 strncmp(s->from, "0x", 2)) 52 return -EINVAL; 53 54 return 0; 55 } 56 57 static int 58 fcloop_parse_options(struct fcloop_ctrl_options *opts, 59 const char *buf) 60 { 61 substring_t args[MAX_OPT_ARGS]; 62 char *options, *o, *p; 63 int token, ret = 0; 64 u64 token64; 65 66 options = o = kstrdup(buf, GFP_KERNEL); 67 if (!options) 68 return -ENOMEM; 69 70 while ((p = strsep(&o, ",\n")) != NULL) { 71 if (!*p) 72 continue; 73 74 token = match_token(p, opt_tokens, args); 75 opts->mask |= token; 76 switch (token) { 77 case NVMF_OPT_WWNN: 78 if (fcloop_verify_addr(args) || 79 match_u64(args, &token64)) { 80 ret = -EINVAL; 81 goto out_free_options; 82 } 83 opts->wwnn = token64; 84 break; 85 case NVMF_OPT_WWPN: 86 if (fcloop_verify_addr(args) || 87 match_u64(args, &token64)) { 88 ret = -EINVAL; 89 goto out_free_options; 90 } 91 opts->wwpn = token64; 92 break; 93 case NVMF_OPT_ROLES: 94 if (match_int(args, &token)) { 95 ret = -EINVAL; 96 goto out_free_options; 97 } 98 opts->roles = token; 99 break; 100 case NVMF_OPT_FCADDR: 101 if (match_hex(args, &token)) { 102 ret = -EINVAL; 103 goto out_free_options; 104 } 105 opts->fcaddr = token; 106 break; 107 case NVMF_OPT_LPWWNN: 108 if (fcloop_verify_addr(args) || 109 match_u64(args, &token64)) { 110 ret = -EINVAL; 111 goto out_free_options; 112 } 113 opts->lpwwnn = token64; 114 break; 115 case NVMF_OPT_LPWWPN: 116 if (fcloop_verify_addr(args) || 117 match_u64(args, &token64)) { 118 ret = -EINVAL; 119 goto out_free_options; 120 } 121 opts->lpwwpn = token64; 122 break; 123 default: 124 pr_warn("unknown parameter or missing value '%s'\n", p); 125 ret = -EINVAL; 126 goto out_free_options; 127 } 128 } 129 130 out_free_options: 131 kfree(options); 132 return ret; 133 } 134 135 136 static int 137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, 138 const char *buf) 139 { 140 substring_t args[MAX_OPT_ARGS]; 141 char *options, *o, *p; 142 int token, ret = 0; 143 u64 token64; 144 145 *nname = -1; 146 *pname = -1; 147 148 options = o = kstrdup(buf, GFP_KERNEL); 149 if (!options) 150 return -ENOMEM; 151 152 while ((p = strsep(&o, ",\n")) != NULL) { 153 if (!*p) 154 continue; 155 156 token = match_token(p, opt_tokens, args); 157 switch (token) { 158 case NVMF_OPT_WWNN: 159 if (fcloop_verify_addr(args) || 160 match_u64(args, &token64)) { 161 ret = -EINVAL; 162 goto out_free_options; 163 } 164 *nname = token64; 165 break; 166 case NVMF_OPT_WWPN: 167 if (fcloop_verify_addr(args) || 168 match_u64(args, &token64)) { 169 ret = -EINVAL; 170 goto out_free_options; 171 } 172 *pname = token64; 173 break; 174 default: 175 pr_warn("unknown parameter or missing value '%s'\n", p); 176 ret = -EINVAL; 177 goto out_free_options; 178 } 179 } 180 181 out_free_options: 182 kfree(options); 183 184 if (!ret) { 185 if (*nname == -1) 186 return -EINVAL; 187 if (*pname == -1) 188 return -EINVAL; 189 } 190 191 return ret; 192 } 193 194 195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 196 197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ 198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) 199 200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 201 202 203 static DEFINE_SPINLOCK(fcloop_lock); 204 static LIST_HEAD(fcloop_lports); 205 static LIST_HEAD(fcloop_nports); 206 207 struct fcloop_lport { 208 struct nvme_fc_local_port *localport; 209 struct list_head lport_list; 210 refcount_t ref; 211 }; 212 213 struct fcloop_lport_priv { 214 struct fcloop_lport *lport; 215 }; 216 217 /* The port is already being removed, avoid double free */ 218 #define PORT_DELETED 0 219 220 struct fcloop_rport { 221 struct nvme_fc_remote_port *remoteport; 222 struct nvmet_fc_target_port *targetport; 223 struct fcloop_nport *nport; 224 struct fcloop_lport *lport; 225 spinlock_t lock; 226 struct list_head ls_list; 227 struct work_struct ls_work; 228 unsigned long flags; 229 }; 230 231 struct fcloop_tport { 232 struct nvmet_fc_target_port *targetport; 233 struct nvme_fc_remote_port *remoteport; 234 struct fcloop_nport *nport; 235 struct fcloop_lport *lport; 236 spinlock_t lock; 237 struct list_head ls_list; 238 struct work_struct ls_work; 239 unsigned long flags; 240 }; 241 242 struct fcloop_nport { 243 struct fcloop_rport *rport; 244 struct fcloop_tport *tport; 245 struct fcloop_lport *lport; 246 struct list_head nport_list; 247 refcount_t ref; 248 u64 node_name; 249 u64 port_name; 250 u32 port_role; 251 u32 port_id; 252 }; 253 254 struct fcloop_lsreq { 255 struct nvmefc_ls_req *lsreq; 256 struct nvmefc_ls_rsp ls_rsp; 257 int lsdir; /* H2T or T2H */ 258 int status; 259 struct list_head ls_list; /* fcloop_rport->ls_list */ 260 }; 261 262 struct fcloop_rscn { 263 struct fcloop_tport *tport; 264 struct work_struct work; 265 }; 266 267 enum { 268 INI_IO_START = 0, 269 INI_IO_ACTIVE = 1, 270 INI_IO_ABORTED = 2, 271 INI_IO_COMPLETED = 3, 272 }; 273 274 struct fcloop_fcpreq { 275 struct fcloop_tport *tport; 276 struct nvmefc_fcp_req *fcpreq; 277 spinlock_t reqlock; 278 u16 status; 279 u32 inistate; 280 bool active; 281 bool aborted; 282 refcount_t ref; 283 struct work_struct fcp_rcv_work; 284 struct work_struct abort_rcv_work; 285 struct work_struct tio_done_work; 286 struct nvmefc_tgt_fcp_req tgt_fcp_req; 287 }; 288 289 struct fcloop_ini_fcpreq { 290 struct nvmefc_fcp_req *fcpreq; 291 struct fcloop_fcpreq *tfcp_req; 292 spinlock_t inilock; 293 }; 294 295 /* SLAB cache for fcloop_lsreq structures */ 296 static struct kmem_cache *lsreq_cache; 297 298 static inline struct fcloop_lsreq * 299 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) 300 { 301 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp); 302 } 303 304 static inline struct fcloop_fcpreq * 305 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) 306 { 307 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); 308 } 309 310 311 static int 312 fcloop_create_queue(struct nvme_fc_local_port *localport, 313 unsigned int qidx, u16 qsize, 314 void **handle) 315 { 316 *handle = localport; 317 return 0; 318 } 319 320 static void 321 fcloop_delete_queue(struct nvme_fc_local_port *localport, 322 unsigned int idx, void *handle) 323 { 324 } 325 326 static void 327 fcloop_rport_lsrqst_work(struct work_struct *work) 328 { 329 struct fcloop_rport *rport = 330 container_of(work, struct fcloop_rport, ls_work); 331 struct fcloop_lsreq *tls_req; 332 333 spin_lock(&rport->lock); 334 for (;;) { 335 tls_req = list_first_entry_or_null(&rport->ls_list, 336 struct fcloop_lsreq, ls_list); 337 if (!tls_req) 338 break; 339 340 list_del(&tls_req->ls_list); 341 spin_unlock(&rport->lock); 342 343 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 344 /* 345 * callee may free memory containing tls_req. 346 * do not reference lsreq after this. 347 */ 348 kmem_cache_free(lsreq_cache, tls_req); 349 350 spin_lock(&rport->lock); 351 } 352 spin_unlock(&rport->lock); 353 } 354 355 static int 356 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, 357 struct nvme_fc_remote_port *remoteport, 358 struct nvmefc_ls_req *lsreq) 359 { 360 struct fcloop_rport *rport = remoteport->private; 361 struct fcloop_lsreq *tls_req; 362 int ret = 0; 363 364 tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); 365 if (!tls_req) 366 return -ENOMEM; 367 tls_req->lsreq = lsreq; 368 INIT_LIST_HEAD(&tls_req->ls_list); 369 370 if (!rport->targetport) { 371 tls_req->status = -ECONNREFUSED; 372 spin_lock(&rport->lock); 373 list_add_tail(&tls_req->ls_list, &rport->ls_list); 374 spin_unlock(&rport->lock); 375 queue_work(nvmet_wq, &rport->ls_work); 376 return ret; 377 } 378 379 tls_req->status = 0; 380 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport, 381 &tls_req->ls_rsp, 382 lsreq->rqstaddr, lsreq->rqstlen); 383 384 return ret; 385 } 386 387 static int 388 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, 389 struct nvmefc_ls_rsp *lsrsp) 390 { 391 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 392 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 393 struct fcloop_tport *tport = targetport->private; 394 struct nvme_fc_remote_port *remoteport = tport->remoteport; 395 struct fcloop_rport *rport; 396 397 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 398 ((lsreq->rsplen < lsrsp->rsplen) ? 399 lsreq->rsplen : lsrsp->rsplen)); 400 401 lsrsp->done(lsrsp); 402 403 if (!remoteport) { 404 kmem_cache_free(lsreq_cache, tls_req); 405 return 0; 406 } 407 408 rport = remoteport->private; 409 spin_lock(&rport->lock); 410 list_add_tail(&tls_req->ls_list, &rport->ls_list); 411 spin_unlock(&rport->lock); 412 queue_work(nvmet_wq, &rport->ls_work); 413 414 return 0; 415 } 416 417 static void 418 fcloop_tport_lsrqst_work(struct work_struct *work) 419 { 420 struct fcloop_tport *tport = 421 container_of(work, struct fcloop_tport, ls_work); 422 struct fcloop_lsreq *tls_req; 423 424 spin_lock(&tport->lock); 425 for (;;) { 426 tls_req = list_first_entry_or_null(&tport->ls_list, 427 struct fcloop_lsreq, ls_list); 428 if (!tls_req) 429 break; 430 431 list_del(&tls_req->ls_list); 432 spin_unlock(&tport->lock); 433 434 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 435 /* 436 * callee may free memory containing tls_req. 437 * do not reference lsreq after this. 438 */ 439 kmem_cache_free(lsreq_cache, tls_req); 440 441 spin_lock(&tport->lock); 442 } 443 spin_unlock(&tport->lock); 444 } 445 446 static int 447 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, 448 struct nvmefc_ls_req *lsreq) 449 { 450 struct fcloop_tport *tport = targetport->private; 451 struct fcloop_lsreq *tls_req; 452 int ret = 0; 453 454 /* 455 * hosthandle should be the dst.rport value. 456 * hosthandle ignored as fcloop currently is 457 * 1:1 tgtport vs remoteport 458 */ 459 460 tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); 461 if (!tls_req) 462 return -ENOMEM; 463 tls_req->lsreq = lsreq; 464 INIT_LIST_HEAD(&tls_req->ls_list); 465 466 if (!tport->remoteport) { 467 tls_req->status = -ECONNREFUSED; 468 spin_lock(&tport->lock); 469 list_add_tail(&tls_req->ls_list, &tport->ls_list); 470 spin_unlock(&tport->lock); 471 queue_work(nvmet_wq, &tport->ls_work); 472 return ret; 473 } 474 475 tls_req->status = 0; 476 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, 477 lsreq->rqstaddr, lsreq->rqstlen); 478 479 if (ret) 480 kmem_cache_free(lsreq_cache, tls_req); 481 482 return ret; 483 } 484 485 static int 486 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, 487 struct nvme_fc_remote_port *remoteport, 488 struct nvmefc_ls_rsp *lsrsp) 489 { 490 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 491 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 492 struct fcloop_rport *rport = remoteport->private; 493 struct nvmet_fc_target_port *targetport = rport->targetport; 494 struct fcloop_tport *tport; 495 496 if (!targetport) { 497 /* 498 * The target port is gone. The target doesn't expect any 499 * response anymore and thus lsreq can't be accessed anymore. 500 * 501 * We end up here from delete association exchange: 502 * nvmet_fc_xmt_disconnect_assoc sends an async request. 503 * 504 * Return success because this is what LLDDs do; silently 505 * drop the response. 506 */ 507 lsrsp->done(lsrsp); 508 kmem_cache_free(lsreq_cache, tls_req); 509 return 0; 510 } 511 512 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 513 ((lsreq->rsplen < lsrsp->rsplen) ? 514 lsreq->rsplen : lsrsp->rsplen)); 515 lsrsp->done(lsrsp); 516 517 tport = targetport->private; 518 spin_lock(&tport->lock); 519 list_add_tail(&tls_req->ls_list, &tport->ls_list); 520 spin_unlock(&tport->lock); 521 queue_work(nvmet_wq, &tport->ls_work); 522 523 return 0; 524 } 525 526 static void 527 fcloop_t2h_host_release(void *hosthandle) 528 { 529 /* host handle ignored for now */ 530 } 531 532 static int 533 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) 534 { 535 struct fcloop_rport *rport = hosthandle; 536 537 *wwnn = rport->lport->localport->node_name; 538 *wwpn = rport->lport->localport->port_name; 539 return 0; 540 } 541 542 /* 543 * Simulate reception of RSCN and converting it to a initiator transport 544 * call to rescan a remote port. 545 */ 546 static void 547 fcloop_tgt_rscn_work(struct work_struct *work) 548 { 549 struct fcloop_rscn *tgt_rscn = 550 container_of(work, struct fcloop_rscn, work); 551 struct fcloop_tport *tport = tgt_rscn->tport; 552 553 if (tport->remoteport) 554 nvme_fc_rescan_remoteport(tport->remoteport); 555 kfree(tgt_rscn); 556 } 557 558 static void 559 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) 560 { 561 struct fcloop_rscn *tgt_rscn; 562 563 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL); 564 if (!tgt_rscn) 565 return; 566 567 tgt_rscn->tport = tgtport->private; 568 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); 569 570 queue_work(nvmet_wq, &tgt_rscn->work); 571 } 572 573 static void 574 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) 575 { 576 if (!refcount_dec_and_test(&tfcp_req->ref)) 577 return; 578 579 kfree(tfcp_req); 580 } 581 582 static int 583 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) 584 { 585 return refcount_inc_not_zero(&tfcp_req->ref); 586 } 587 588 static void 589 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, 590 struct fcloop_fcpreq *tfcp_req, int status) 591 { 592 struct fcloop_ini_fcpreq *inireq = NULL; 593 594 if (fcpreq) { 595 inireq = fcpreq->private; 596 spin_lock(&inireq->inilock); 597 inireq->tfcp_req = NULL; 598 spin_unlock(&inireq->inilock); 599 600 fcpreq->status = status; 601 fcpreq->done(fcpreq); 602 } 603 604 /* release original io reference on tgt struct */ 605 if (tfcp_req) 606 fcloop_tfcp_req_put(tfcp_req); 607 } 608 609 static bool drop_fabric_opcode; 610 #define DROP_OPCODE_MASK 0x00FF 611 /* fabrics opcode will have a bit set above 1st byte */ 612 static int drop_opcode = -1; 613 static int drop_instance; 614 static int drop_amount; 615 static int drop_current_cnt; 616 617 /* 618 * Routine to parse io and determine if the io is to be dropped. 619 * Returns: 620 * 0 if io is not obstructed 621 * 1 if io was dropped 622 */ 623 static int check_for_drop(struct fcloop_fcpreq *tfcp_req) 624 { 625 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; 626 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr; 627 struct nvme_command *sqe = &cmdiu->sqe; 628 629 if (drop_opcode == -1) 630 return 0; 631 632 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x " 633 "inst %d start %d amt %d\n", 634 __func__, sqe->common.opcode, sqe->fabrics.fctype, 635 drop_fabric_opcode ? "y" : "n", 636 drop_opcode, drop_current_cnt, drop_instance, drop_amount); 637 638 if ((drop_fabric_opcode && 639 (sqe->common.opcode != nvme_fabrics_command || 640 sqe->fabrics.fctype != drop_opcode)) || 641 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode)) 642 return 0; 643 644 if (++drop_current_cnt >= drop_instance) { 645 if (drop_current_cnt >= drop_instance + drop_amount) 646 drop_opcode = -1; 647 return 1; 648 } 649 650 return 0; 651 } 652 653 static void 654 fcloop_fcp_recv_work(struct work_struct *work) 655 { 656 struct fcloop_fcpreq *tfcp_req = 657 container_of(work, struct fcloop_fcpreq, fcp_rcv_work); 658 struct nvmefc_fcp_req *fcpreq; 659 unsigned long flags; 660 int ret = 0; 661 bool aborted = false; 662 663 spin_lock_irqsave(&tfcp_req->reqlock, flags); 664 fcpreq = tfcp_req->fcpreq; 665 switch (tfcp_req->inistate) { 666 case INI_IO_START: 667 tfcp_req->inistate = INI_IO_ACTIVE; 668 break; 669 case INI_IO_ABORTED: 670 aborted = true; 671 break; 672 default: 673 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 674 WARN_ON(1); 675 return; 676 } 677 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 678 679 if (unlikely(aborted)) { 680 /* the abort handler will call fcloop_call_host_done */ 681 return; 682 } 683 684 if (unlikely(check_for_drop(tfcp_req))) { 685 pr_info("%s: dropped command ********\n", __func__); 686 return; 687 } 688 689 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, 690 &tfcp_req->tgt_fcp_req, 691 fcpreq->cmdaddr, fcpreq->cmdlen); 692 if (ret) 693 fcloop_call_host_done(fcpreq, tfcp_req, ret); 694 } 695 696 static void 697 fcloop_fcp_abort_recv_work(struct work_struct *work) 698 { 699 struct fcloop_fcpreq *tfcp_req = 700 container_of(work, struct fcloop_fcpreq, abort_rcv_work); 701 struct nvmefc_fcp_req *fcpreq; 702 bool completed = false; 703 unsigned long flags; 704 705 spin_lock_irqsave(&tfcp_req->reqlock, flags); 706 switch (tfcp_req->inistate) { 707 case INI_IO_ABORTED: 708 fcpreq = tfcp_req->fcpreq; 709 tfcp_req->fcpreq = NULL; 710 break; 711 case INI_IO_COMPLETED: 712 completed = true; 713 break; 714 default: 715 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 716 fcloop_tfcp_req_put(tfcp_req); 717 WARN_ON(1); 718 return; 719 } 720 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 721 722 if (unlikely(completed)) { 723 /* remove reference taken in original abort downcall */ 724 fcloop_tfcp_req_put(tfcp_req); 725 return; 726 } 727 728 if (tfcp_req->tport->targetport) 729 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, 730 &tfcp_req->tgt_fcp_req); 731 732 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); 733 /* call_host_done releases reference for abort downcall */ 734 } 735 736 /* 737 * FCP IO operation done by target completion. 738 * call back up initiator "done" flows. 739 */ 740 static void 741 fcloop_tgt_fcprqst_done_work(struct work_struct *work) 742 { 743 struct fcloop_fcpreq *tfcp_req = 744 container_of(work, struct fcloop_fcpreq, tio_done_work); 745 struct nvmefc_fcp_req *fcpreq; 746 unsigned long flags; 747 748 spin_lock_irqsave(&tfcp_req->reqlock, flags); 749 fcpreq = tfcp_req->fcpreq; 750 tfcp_req->inistate = INI_IO_COMPLETED; 751 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 752 753 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); 754 } 755 756 757 static int 758 fcloop_fcp_req(struct nvme_fc_local_port *localport, 759 struct nvme_fc_remote_port *remoteport, 760 void *hw_queue_handle, 761 struct nvmefc_fcp_req *fcpreq) 762 { 763 struct fcloop_rport *rport = remoteport->private; 764 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 765 struct fcloop_fcpreq *tfcp_req; 766 767 if (!rport->targetport) 768 return -ECONNREFUSED; 769 770 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC); 771 if (!tfcp_req) 772 return -ENOMEM; 773 774 inireq->fcpreq = fcpreq; 775 inireq->tfcp_req = tfcp_req; 776 spin_lock_init(&inireq->inilock); 777 778 tfcp_req->fcpreq = fcpreq; 779 tfcp_req->tport = rport->targetport->private; 780 tfcp_req->inistate = INI_IO_START; 781 spin_lock_init(&tfcp_req->reqlock); 782 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); 783 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); 784 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 785 refcount_set(&tfcp_req->ref, 1); 786 787 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); 788 789 return 0; 790 } 791 792 static void 793 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, 794 struct scatterlist *io_sg, u32 offset, u32 length) 795 { 796 void *data_p, *io_p; 797 u32 data_len, io_len, tlen; 798 799 io_p = sg_virt(io_sg); 800 io_len = io_sg->length; 801 802 for ( ; offset; ) { 803 tlen = min_t(u32, offset, io_len); 804 offset -= tlen; 805 io_len -= tlen; 806 if (!io_len) { 807 io_sg = sg_next(io_sg); 808 io_p = sg_virt(io_sg); 809 io_len = io_sg->length; 810 } else 811 io_p += tlen; 812 } 813 814 data_p = sg_virt(data_sg); 815 data_len = data_sg->length; 816 817 for ( ; length; ) { 818 tlen = min_t(u32, io_len, data_len); 819 tlen = min_t(u32, tlen, length); 820 821 if (op == NVMET_FCOP_WRITEDATA) 822 memcpy(data_p, io_p, tlen); 823 else 824 memcpy(io_p, data_p, tlen); 825 826 length -= tlen; 827 828 io_len -= tlen; 829 if ((!io_len) && (length)) { 830 io_sg = sg_next(io_sg); 831 io_p = sg_virt(io_sg); 832 io_len = io_sg->length; 833 } else 834 io_p += tlen; 835 836 data_len -= tlen; 837 if ((!data_len) && (length)) { 838 data_sg = sg_next(data_sg); 839 data_p = sg_virt(data_sg); 840 data_len = data_sg->length; 841 } else 842 data_p += tlen; 843 } 844 } 845 846 static int 847 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, 848 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 849 { 850 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 851 struct nvmefc_fcp_req *fcpreq; 852 u32 rsplen = 0, xfrlen = 0; 853 int fcp_err = 0, active, aborted; 854 u8 op = tgt_fcpreq->op; 855 unsigned long flags; 856 857 spin_lock_irqsave(&tfcp_req->reqlock, flags); 858 fcpreq = tfcp_req->fcpreq; 859 active = tfcp_req->active; 860 aborted = tfcp_req->aborted; 861 tfcp_req->active = true; 862 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 863 864 if (unlikely(active)) 865 /* illegal - call while i/o active */ 866 return -EALREADY; 867 868 if (unlikely(aborted)) { 869 /* target transport has aborted i/o prior */ 870 spin_lock_irqsave(&tfcp_req->reqlock, flags); 871 tfcp_req->active = false; 872 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 873 tgt_fcpreq->transferred_length = 0; 874 tgt_fcpreq->fcp_error = -ECANCELED; 875 tgt_fcpreq->done(tgt_fcpreq); 876 return 0; 877 } 878 879 /* 880 * if fcpreq is NULL, the I/O has been aborted (from 881 * initiator side). For the target side, act as if all is well 882 * but don't actually move data. 883 */ 884 885 switch (op) { 886 case NVMET_FCOP_WRITEDATA: 887 xfrlen = tgt_fcpreq->transfer_length; 888 if (fcpreq) { 889 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 890 fcpreq->first_sgl, tgt_fcpreq->offset, 891 xfrlen); 892 fcpreq->transferred_length += xfrlen; 893 } 894 break; 895 896 case NVMET_FCOP_READDATA: 897 case NVMET_FCOP_READDATA_RSP: 898 xfrlen = tgt_fcpreq->transfer_length; 899 if (fcpreq) { 900 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 901 fcpreq->first_sgl, tgt_fcpreq->offset, 902 xfrlen); 903 fcpreq->transferred_length += xfrlen; 904 } 905 if (op == NVMET_FCOP_READDATA) 906 break; 907 908 /* Fall-Thru to RSP handling */ 909 fallthrough; 910 911 case NVMET_FCOP_RSP: 912 if (fcpreq) { 913 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? 914 fcpreq->rsplen : tgt_fcpreq->rsplen); 915 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); 916 if (rsplen < tgt_fcpreq->rsplen) 917 fcp_err = -E2BIG; 918 fcpreq->rcv_rsplen = rsplen; 919 fcpreq->status = 0; 920 } 921 tfcp_req->status = 0; 922 break; 923 924 default: 925 fcp_err = -EINVAL; 926 break; 927 } 928 929 spin_lock_irqsave(&tfcp_req->reqlock, flags); 930 tfcp_req->active = false; 931 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 932 933 tgt_fcpreq->transferred_length = xfrlen; 934 tgt_fcpreq->fcp_error = fcp_err; 935 tgt_fcpreq->done(tgt_fcpreq); 936 937 return 0; 938 } 939 940 static void 941 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, 942 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 943 { 944 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 945 unsigned long flags; 946 947 /* 948 * mark aborted only in case there were 2 threads in transport 949 * (one doing io, other doing abort) and only kills ops posted 950 * after the abort request 951 */ 952 spin_lock_irqsave(&tfcp_req->reqlock, flags); 953 tfcp_req->aborted = true; 954 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 955 956 tfcp_req->status = NVME_SC_INTERNAL; 957 958 /* 959 * nothing more to do. If io wasn't active, the transport should 960 * immediately call the req_release. If it was active, the op 961 * will complete, and the lldd should call req_release. 962 */ 963 } 964 965 static void 966 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, 967 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 968 { 969 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 970 971 queue_work(nvmet_wq, &tfcp_req->tio_done_work); 972 } 973 974 static void 975 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport, 976 struct nvme_fc_remote_port *remoteport, 977 struct nvmefc_ls_req *lsreq) 978 { 979 } 980 981 static void 982 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport, 983 void *hosthandle, struct nvmefc_ls_req *lsreq) 984 { 985 } 986 987 static void 988 fcloop_fcp_abort(struct nvme_fc_local_port *localport, 989 struct nvme_fc_remote_port *remoteport, 990 void *hw_queue_handle, 991 struct nvmefc_fcp_req *fcpreq) 992 { 993 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 994 struct fcloop_fcpreq *tfcp_req; 995 bool abortio = true; 996 unsigned long flags; 997 998 spin_lock(&inireq->inilock); 999 tfcp_req = inireq->tfcp_req; 1000 if (tfcp_req) { 1001 if (!fcloop_tfcp_req_get(tfcp_req)) 1002 tfcp_req = NULL; 1003 } 1004 spin_unlock(&inireq->inilock); 1005 1006 if (!tfcp_req) { 1007 /* abort has already been called */ 1008 goto out_host_done; 1009 } 1010 1011 /* break initiator/target relationship for io */ 1012 spin_lock_irqsave(&tfcp_req->reqlock, flags); 1013 switch (tfcp_req->inistate) { 1014 case INI_IO_START: 1015 case INI_IO_ACTIVE: 1016 tfcp_req->inistate = INI_IO_ABORTED; 1017 break; 1018 case INI_IO_COMPLETED: 1019 abortio = false; 1020 break; 1021 default: 1022 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 1023 WARN_ON(1); 1024 goto out_host_done; 1025 } 1026 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 1027 1028 if (abortio) 1029 /* leave the reference while the work item is scheduled */ 1030 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); 1031 else { 1032 /* 1033 * as the io has already had the done callback made, 1034 * nothing more to do. So release the reference taken above 1035 */ 1036 fcloop_tfcp_req_put(tfcp_req); 1037 } 1038 1039 return; 1040 1041 out_host_done: 1042 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); 1043 } 1044 1045 static void 1046 fcloop_lport_put(struct fcloop_lport *lport) 1047 { 1048 unsigned long flags; 1049 1050 if (!refcount_dec_and_test(&lport->ref)) 1051 return; 1052 1053 spin_lock_irqsave(&fcloop_lock, flags); 1054 list_del(&lport->lport_list); 1055 spin_unlock_irqrestore(&fcloop_lock, flags); 1056 1057 kfree(lport); 1058 } 1059 1060 static int 1061 fcloop_lport_get(struct fcloop_lport *lport) 1062 { 1063 return refcount_inc_not_zero(&lport->ref); 1064 } 1065 1066 static void 1067 fcloop_nport_put(struct fcloop_nport *nport) 1068 { 1069 unsigned long flags; 1070 1071 if (!refcount_dec_and_test(&nport->ref)) 1072 return; 1073 1074 spin_lock_irqsave(&fcloop_lock, flags); 1075 list_del(&nport->nport_list); 1076 spin_unlock_irqrestore(&fcloop_lock, flags); 1077 1078 if (nport->lport) 1079 fcloop_lport_put(nport->lport); 1080 1081 kfree(nport); 1082 } 1083 1084 static int 1085 fcloop_nport_get(struct fcloop_nport *nport) 1086 { 1087 return refcount_inc_not_zero(&nport->ref); 1088 } 1089 1090 static void 1091 fcloop_localport_delete(struct nvme_fc_local_port *localport) 1092 { 1093 struct fcloop_lport_priv *lport_priv = localport->private; 1094 struct fcloop_lport *lport = lport_priv->lport; 1095 1096 fcloop_lport_put(lport); 1097 } 1098 1099 static void 1100 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) 1101 { 1102 struct fcloop_rport *rport = remoteport->private; 1103 bool put_port = false; 1104 unsigned long flags; 1105 1106 flush_work(&rport->ls_work); 1107 1108 spin_lock_irqsave(&fcloop_lock, flags); 1109 if (!test_and_set_bit(PORT_DELETED, &rport->flags)) 1110 put_port = true; 1111 rport->nport->rport = NULL; 1112 spin_unlock_irqrestore(&fcloop_lock, flags); 1113 1114 if (put_port) 1115 fcloop_nport_put(rport->nport); 1116 } 1117 1118 static void 1119 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) 1120 { 1121 struct fcloop_tport *tport = targetport->private; 1122 bool put_port = false; 1123 unsigned long flags; 1124 1125 flush_work(&tport->ls_work); 1126 1127 spin_lock_irqsave(&fcloop_lock, flags); 1128 if (!test_and_set_bit(PORT_DELETED, &tport->flags)) 1129 put_port = true; 1130 tport->nport->tport = NULL; 1131 spin_unlock_irqrestore(&fcloop_lock, flags); 1132 1133 if (put_port) 1134 fcloop_nport_put(tport->nport); 1135 } 1136 1137 #define FCLOOP_HW_QUEUES 4 1138 #define FCLOOP_SGL_SEGS 256 1139 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF 1140 1141 static struct nvme_fc_port_template fctemplate = { 1142 .localport_delete = fcloop_localport_delete, 1143 .remoteport_delete = fcloop_remoteport_delete, 1144 .create_queue = fcloop_create_queue, 1145 .delete_queue = fcloop_delete_queue, 1146 .ls_req = fcloop_h2t_ls_req, 1147 .fcp_io = fcloop_fcp_req, 1148 .ls_abort = fcloop_h2t_ls_abort, 1149 .fcp_abort = fcloop_fcp_abort, 1150 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp, 1151 .max_hw_queues = FCLOOP_HW_QUEUES, 1152 .max_sgl_segments = FCLOOP_SGL_SEGS, 1153 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1154 .dma_boundary = FCLOOP_DMABOUND_4G, 1155 /* sizes of additional private data for data structures */ 1156 .local_priv_sz = sizeof(struct fcloop_lport_priv), 1157 .remote_priv_sz = sizeof(struct fcloop_rport), 1158 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), 1159 }; 1160 1161 static struct nvmet_fc_target_template tgttemplate = { 1162 .targetport_delete = fcloop_targetport_delete, 1163 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp, 1164 .fcp_op = fcloop_fcp_op, 1165 .fcp_abort = fcloop_tgt_fcp_abort, 1166 .fcp_req_release = fcloop_fcp_req_release, 1167 .discovery_event = fcloop_tgt_discovery_evt, 1168 .ls_req = fcloop_t2h_ls_req, 1169 .ls_abort = fcloop_t2h_ls_abort, 1170 .host_release = fcloop_t2h_host_release, 1171 .host_traddr = fcloop_t2h_host_traddr, 1172 .max_hw_queues = FCLOOP_HW_QUEUES, 1173 .max_sgl_segments = FCLOOP_SGL_SEGS, 1174 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1175 .dma_boundary = FCLOOP_DMABOUND_4G, 1176 /* optional features */ 1177 .target_features = 0, 1178 /* sizes of additional private data for data structures */ 1179 .target_priv_sz = sizeof(struct fcloop_tport), 1180 }; 1181 1182 static ssize_t 1183 fcloop_create_local_port(struct device *dev, struct device_attribute *attr, 1184 const char *buf, size_t count) 1185 { 1186 struct nvme_fc_port_info pinfo; 1187 struct fcloop_ctrl_options *opts; 1188 struct nvme_fc_local_port *localport; 1189 struct fcloop_lport *lport; 1190 struct fcloop_lport_priv *lport_priv; 1191 unsigned long flags; 1192 int ret = -ENOMEM; 1193 1194 lport = kzalloc(sizeof(*lport), GFP_KERNEL); 1195 if (!lport) 1196 return -ENOMEM; 1197 1198 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1199 if (!opts) 1200 goto out_free_lport; 1201 1202 ret = fcloop_parse_options(opts, buf); 1203 if (ret) 1204 goto out_free_opts; 1205 1206 /* everything there ? */ 1207 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { 1208 ret = -EINVAL; 1209 goto out_free_opts; 1210 } 1211 1212 memset(&pinfo, 0, sizeof(pinfo)); 1213 pinfo.node_name = opts->wwnn; 1214 pinfo.port_name = opts->wwpn; 1215 pinfo.port_role = opts->roles; 1216 pinfo.port_id = opts->fcaddr; 1217 1218 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); 1219 if (!ret) { 1220 /* success */ 1221 lport_priv = localport->private; 1222 lport_priv->lport = lport; 1223 1224 lport->localport = localport; 1225 INIT_LIST_HEAD(&lport->lport_list); 1226 refcount_set(&lport->ref, 1); 1227 1228 spin_lock_irqsave(&fcloop_lock, flags); 1229 list_add_tail(&lport->lport_list, &fcloop_lports); 1230 spin_unlock_irqrestore(&fcloop_lock, flags); 1231 } 1232 1233 out_free_opts: 1234 kfree(opts); 1235 out_free_lport: 1236 /* free only if we're going to fail */ 1237 if (ret) 1238 kfree(lport); 1239 1240 return ret ? ret : count; 1241 } 1242 1243 static int 1244 __localport_unreg(struct fcloop_lport *lport) 1245 { 1246 return nvme_fc_unregister_localport(lport->localport); 1247 } 1248 1249 static struct fcloop_nport * 1250 __fcloop_nport_lookup(u64 node_name, u64 port_name) 1251 { 1252 struct fcloop_nport *nport; 1253 1254 list_for_each_entry(nport, &fcloop_nports, nport_list) { 1255 if (nport->node_name != node_name || 1256 nport->port_name != port_name) 1257 continue; 1258 1259 if (fcloop_nport_get(nport)) 1260 return nport; 1261 1262 break; 1263 } 1264 1265 return NULL; 1266 } 1267 1268 static struct fcloop_nport * 1269 fcloop_nport_lookup(u64 node_name, u64 port_name) 1270 { 1271 struct fcloop_nport *nport; 1272 unsigned long flags; 1273 1274 spin_lock_irqsave(&fcloop_lock, flags); 1275 nport = __fcloop_nport_lookup(node_name, port_name); 1276 spin_unlock_irqrestore(&fcloop_lock, flags); 1277 1278 return nport; 1279 } 1280 1281 static struct fcloop_lport * 1282 __fcloop_lport_lookup(u64 node_name, u64 port_name) 1283 { 1284 struct fcloop_lport *lport; 1285 1286 list_for_each_entry(lport, &fcloop_lports, lport_list) { 1287 if (lport->localport->node_name != node_name || 1288 lport->localport->port_name != port_name) 1289 continue; 1290 1291 if (fcloop_lport_get(lport)) 1292 return lport; 1293 1294 break; 1295 } 1296 1297 return NULL; 1298 } 1299 1300 static struct fcloop_lport * 1301 fcloop_lport_lookup(u64 node_name, u64 port_name) 1302 { 1303 struct fcloop_lport *lport; 1304 unsigned long flags; 1305 1306 spin_lock_irqsave(&fcloop_lock, flags); 1307 lport = __fcloop_lport_lookup(node_name, port_name); 1308 spin_unlock_irqrestore(&fcloop_lock, flags); 1309 1310 return lport; 1311 } 1312 1313 static ssize_t 1314 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, 1315 const char *buf, size_t count) 1316 { 1317 struct fcloop_lport *lport; 1318 u64 nodename, portname; 1319 int ret; 1320 1321 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1322 if (ret) 1323 return ret; 1324 1325 lport = fcloop_lport_lookup(nodename, portname); 1326 if (!lport) 1327 return -ENOENT; 1328 1329 ret = __localport_unreg(lport); 1330 fcloop_lport_put(lport); 1331 1332 return ret ? ret : count; 1333 } 1334 1335 static struct fcloop_nport * 1336 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 1337 { 1338 struct fcloop_nport *newnport, *nport; 1339 struct fcloop_lport *lport; 1340 struct fcloop_ctrl_options *opts; 1341 unsigned long flags; 1342 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; 1343 int ret; 1344 1345 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1346 if (!opts) 1347 return NULL; 1348 1349 ret = fcloop_parse_options(opts, buf); 1350 if (ret) 1351 goto out_free_opts; 1352 1353 /* everything there ? */ 1354 if ((opts->mask & opts_mask) != opts_mask) 1355 goto out_free_opts; 1356 1357 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); 1358 if (!newnport) 1359 goto out_free_opts; 1360 1361 INIT_LIST_HEAD(&newnport->nport_list); 1362 newnport->node_name = opts->wwnn; 1363 newnport->port_name = opts->wwpn; 1364 if (opts->mask & NVMF_OPT_ROLES) 1365 newnport->port_role = opts->roles; 1366 if (opts->mask & NVMF_OPT_FCADDR) 1367 newnport->port_id = opts->fcaddr; 1368 refcount_set(&newnport->ref, 1); 1369 1370 spin_lock_irqsave(&fcloop_lock, flags); 1371 lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn); 1372 if (lport) { 1373 /* invalid configuration */ 1374 fcloop_lport_put(lport); 1375 goto out_free_newnport; 1376 } 1377 1378 if (remoteport) { 1379 lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn); 1380 if (!lport) { 1381 /* invalid configuration */ 1382 goto out_free_newnport; 1383 } 1384 } 1385 1386 nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn); 1387 if (nport) { 1388 if ((remoteport && nport->rport) || 1389 (!remoteport && nport->tport)) { 1390 /* invalid configuration */ 1391 goto out_put_nport; 1392 } 1393 1394 /* found existing nport, discard the new nport */ 1395 kfree(newnport); 1396 } else { 1397 list_add_tail(&newnport->nport_list, &fcloop_nports); 1398 nport = newnport; 1399 } 1400 1401 if (opts->mask & NVMF_OPT_ROLES) 1402 nport->port_role = opts->roles; 1403 if (opts->mask & NVMF_OPT_FCADDR) 1404 nport->port_id = opts->fcaddr; 1405 if (lport) { 1406 if (!nport->lport) 1407 nport->lport = lport; 1408 else 1409 fcloop_lport_put(lport); 1410 } 1411 spin_unlock_irqrestore(&fcloop_lock, flags); 1412 1413 kfree(opts); 1414 return nport; 1415 1416 out_put_nport: 1417 if (lport) 1418 fcloop_lport_put(lport); 1419 fcloop_nport_put(nport); 1420 out_free_newnport: 1421 spin_unlock_irqrestore(&fcloop_lock, flags); 1422 kfree(newnport); 1423 out_free_opts: 1424 kfree(opts); 1425 return NULL; 1426 } 1427 1428 static ssize_t 1429 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, 1430 const char *buf, size_t count) 1431 { 1432 struct nvme_fc_remote_port *remoteport; 1433 struct fcloop_nport *nport; 1434 struct fcloop_rport *rport; 1435 struct nvme_fc_port_info pinfo; 1436 int ret; 1437 1438 nport = fcloop_alloc_nport(buf, count, true); 1439 if (!nport) 1440 return -EIO; 1441 1442 memset(&pinfo, 0, sizeof(pinfo)); 1443 pinfo.node_name = nport->node_name; 1444 pinfo.port_name = nport->port_name; 1445 pinfo.port_role = nport->port_role; 1446 pinfo.port_id = nport->port_id; 1447 1448 ret = nvme_fc_register_remoteport(nport->lport->localport, 1449 &pinfo, &remoteport); 1450 if (ret || !remoteport) { 1451 fcloop_nport_put(nport); 1452 return ret; 1453 } 1454 1455 /* success */ 1456 rport = remoteport->private; 1457 rport->remoteport = remoteport; 1458 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; 1459 if (nport->tport) { 1460 nport->tport->remoteport = remoteport; 1461 nport->tport->lport = nport->lport; 1462 } 1463 rport->nport = nport; 1464 rport->lport = nport->lport; 1465 nport->rport = rport; 1466 rport->flags = 0; 1467 spin_lock_init(&rport->lock); 1468 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); 1469 INIT_LIST_HEAD(&rport->ls_list); 1470 1471 return count; 1472 } 1473 1474 1475 static struct fcloop_rport * 1476 __unlink_remote_port(struct fcloop_nport *nport) 1477 { 1478 struct fcloop_rport *rport = nport->rport; 1479 1480 lockdep_assert_held(&fcloop_lock); 1481 1482 if (rport && nport->tport) 1483 nport->tport->remoteport = NULL; 1484 nport->rport = NULL; 1485 1486 return rport; 1487 } 1488 1489 static int 1490 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 1491 { 1492 return nvme_fc_unregister_remoteport(rport->remoteport); 1493 } 1494 1495 static ssize_t 1496 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, 1497 const char *buf, size_t count) 1498 { 1499 struct fcloop_nport *nport; 1500 struct fcloop_rport *rport; 1501 u64 nodename, portname; 1502 unsigned long flags; 1503 int ret; 1504 1505 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1506 if (ret) 1507 return ret; 1508 1509 nport = fcloop_nport_lookup(nodename, portname); 1510 if (!nport) 1511 return -ENOENT; 1512 1513 spin_lock_irqsave(&fcloop_lock, flags); 1514 rport = __unlink_remote_port(nport); 1515 spin_unlock_irqrestore(&fcloop_lock, flags); 1516 1517 if (!rport) { 1518 ret = -ENOENT; 1519 goto out_nport_put; 1520 } 1521 1522 ret = __remoteport_unreg(nport, rport); 1523 1524 out_nport_put: 1525 fcloop_nport_put(nport); 1526 1527 return ret ? ret : count; 1528 } 1529 1530 static ssize_t 1531 fcloop_create_target_port(struct device *dev, struct device_attribute *attr, 1532 const char *buf, size_t count) 1533 { 1534 struct nvmet_fc_target_port *targetport; 1535 struct fcloop_nport *nport; 1536 struct fcloop_tport *tport; 1537 struct nvmet_fc_port_info tinfo; 1538 int ret; 1539 1540 nport = fcloop_alloc_nport(buf, count, false); 1541 if (!nport) 1542 return -EIO; 1543 1544 tinfo.node_name = nport->node_name; 1545 tinfo.port_name = nport->port_name; 1546 tinfo.port_id = nport->port_id; 1547 1548 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, 1549 &targetport); 1550 if (ret) { 1551 fcloop_nport_put(nport); 1552 return ret; 1553 } 1554 1555 /* success */ 1556 tport = targetport->private; 1557 tport->targetport = targetport; 1558 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; 1559 if (nport->rport) 1560 nport->rport->targetport = targetport; 1561 tport->nport = nport; 1562 tport->lport = nport->lport; 1563 nport->tport = tport; 1564 tport->flags = 0; 1565 spin_lock_init(&tport->lock); 1566 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); 1567 INIT_LIST_HEAD(&tport->ls_list); 1568 1569 return count; 1570 } 1571 1572 1573 static struct fcloop_tport * 1574 __unlink_target_port(struct fcloop_nport *nport) 1575 { 1576 struct fcloop_tport *tport = nport->tport; 1577 1578 lockdep_assert_held(&fcloop_lock); 1579 1580 if (tport && nport->rport) 1581 nport->rport->targetport = NULL; 1582 nport->tport = NULL; 1583 1584 return tport; 1585 } 1586 1587 static int 1588 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1589 { 1590 return nvmet_fc_unregister_targetport(tport->targetport); 1591 } 1592 1593 static ssize_t 1594 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, 1595 const char *buf, size_t count) 1596 { 1597 struct fcloop_nport *nport; 1598 struct fcloop_tport *tport; 1599 u64 nodename, portname; 1600 unsigned long flags; 1601 int ret; 1602 1603 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1604 if (ret) 1605 return ret; 1606 1607 nport = fcloop_nport_lookup(nodename, portname); 1608 if (!nport) 1609 return -ENOENT; 1610 1611 spin_lock_irqsave(&fcloop_lock, flags); 1612 tport = __unlink_target_port(nport); 1613 spin_unlock_irqrestore(&fcloop_lock, flags); 1614 1615 if (!tport) { 1616 ret = -ENOENT; 1617 goto out_nport_put; 1618 } 1619 1620 ret = __targetport_unreg(nport, tport); 1621 1622 out_nport_put: 1623 fcloop_nport_put(nport); 1624 1625 return ret ? ret : count; 1626 } 1627 1628 static ssize_t 1629 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, 1630 const char *buf, size_t count) 1631 { 1632 unsigned int opcode; 1633 int starting, amount; 1634 1635 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) 1636 return -EBADRQC; 1637 1638 drop_current_cnt = 0; 1639 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false; 1640 drop_opcode = (opcode & DROP_OPCODE_MASK); 1641 drop_instance = starting; 1642 /* the check to drop routine uses instance + count to know when 1643 * to end. Thus, if dropping 1 instance, count should be 0. 1644 * so subtract 1 from the count. 1645 */ 1646 drop_amount = amount - 1; 1647 1648 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d " 1649 "instances\n", 1650 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "", 1651 drop_opcode, drop_amount); 1652 1653 return count; 1654 } 1655 1656 1657 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); 1658 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); 1659 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); 1660 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); 1661 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); 1662 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); 1663 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop); 1664 1665 static struct attribute *fcloop_dev_attrs[] = { 1666 &dev_attr_add_local_port.attr, 1667 &dev_attr_del_local_port.attr, 1668 &dev_attr_add_remote_port.attr, 1669 &dev_attr_del_remote_port.attr, 1670 &dev_attr_add_target_port.attr, 1671 &dev_attr_del_target_port.attr, 1672 &dev_attr_set_cmd_drop.attr, 1673 NULL 1674 }; 1675 1676 static const struct attribute_group fclopp_dev_attrs_group = { 1677 .attrs = fcloop_dev_attrs, 1678 }; 1679 1680 static const struct attribute_group *fcloop_dev_attr_groups[] = { 1681 &fclopp_dev_attrs_group, 1682 NULL, 1683 }; 1684 1685 static const struct class fcloop_class = { 1686 .name = "fcloop", 1687 }; 1688 static struct device *fcloop_device; 1689 1690 static int __init fcloop_init(void) 1691 { 1692 int ret; 1693 1694 lsreq_cache = kmem_cache_create("lsreq_cache", 1695 sizeof(struct fcloop_lsreq), 0, 1696 0, NULL); 1697 if (!lsreq_cache) 1698 return -ENOMEM; 1699 1700 ret = class_register(&fcloop_class); 1701 if (ret) { 1702 pr_err("couldn't register class fcloop\n"); 1703 goto out_destroy_cache; 1704 } 1705 1706 fcloop_device = device_create_with_groups( 1707 &fcloop_class, NULL, MKDEV(0, 0), NULL, 1708 fcloop_dev_attr_groups, "ctl"); 1709 if (IS_ERR(fcloop_device)) { 1710 pr_err("couldn't create ctl device!\n"); 1711 ret = PTR_ERR(fcloop_device); 1712 goto out_destroy_class; 1713 } 1714 1715 get_device(fcloop_device); 1716 1717 return 0; 1718 1719 out_destroy_class: 1720 class_unregister(&fcloop_class); 1721 out_destroy_cache: 1722 kmem_cache_destroy(lsreq_cache); 1723 return ret; 1724 } 1725 1726 static void __exit fcloop_exit(void) 1727 { 1728 struct fcloop_lport *lport; 1729 struct fcloop_nport *nport; 1730 struct fcloop_tport *tport; 1731 struct fcloop_rport *rport; 1732 unsigned long flags; 1733 int ret; 1734 1735 spin_lock_irqsave(&fcloop_lock, flags); 1736 1737 for (;;) { 1738 nport = list_first_entry_or_null(&fcloop_nports, 1739 typeof(*nport), nport_list); 1740 if (!nport || !fcloop_nport_get(nport)) 1741 break; 1742 1743 tport = __unlink_target_port(nport); 1744 rport = __unlink_remote_port(nport); 1745 1746 spin_unlock_irqrestore(&fcloop_lock, flags); 1747 1748 if (tport) { 1749 ret = __targetport_unreg(nport, tport); 1750 if (ret) 1751 pr_warn("%s: Failed deleting target port\n", 1752 __func__); 1753 } 1754 1755 if (rport) { 1756 ret = __remoteport_unreg(nport, rport); 1757 if (ret) 1758 pr_warn("%s: Failed deleting remote port\n", 1759 __func__); 1760 } 1761 1762 fcloop_nport_put(nport); 1763 1764 spin_lock_irqsave(&fcloop_lock, flags); 1765 } 1766 1767 for (;;) { 1768 lport = list_first_entry_or_null(&fcloop_lports, 1769 typeof(*lport), lport_list); 1770 if (!lport || !fcloop_lport_get(lport)) 1771 break; 1772 1773 spin_unlock_irqrestore(&fcloop_lock, flags); 1774 1775 ret = __localport_unreg(lport); 1776 if (ret) 1777 pr_warn("%s: Failed deleting local port\n", __func__); 1778 1779 fcloop_lport_put(lport); 1780 1781 spin_lock_irqsave(&fcloop_lock, flags); 1782 } 1783 1784 spin_unlock_irqrestore(&fcloop_lock, flags); 1785 1786 put_device(fcloop_device); 1787 1788 device_destroy(&fcloop_class, MKDEV(0, 0)); 1789 class_unregister(&fcloop_class); 1790 kmem_cache_destroy(lsreq_cache); 1791 } 1792 1793 module_init(fcloop_init); 1794 module_exit(fcloop_exit); 1795 1796 MODULE_DESCRIPTION("NVMe target FC loop transport driver"); 1797 MODULE_LICENSE("GPL v2"); 1798