1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 #include <uapi/scsi/fc/fc_els.h> 10 #include <linux/delay.h> 11 #include <linux/overflow.h> 12 13 #include "nvme.h" 14 #include "fabrics.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "fc.h" 18 #include <scsi/scsi_transport_fc.h> 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 enum nvme_fc_queue_flags { 24 NVME_FC_Q_CONNECTED = 0, 25 NVME_FC_Q_LIVE, 26 }; 27 28 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 29 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects 30 * when connected and a 31 * connection failure. 32 */ 33 34 struct nvme_fc_queue { 35 struct nvme_fc_ctrl *ctrl; 36 struct device *dev; 37 struct blk_mq_hw_ctx *hctx; 38 void *lldd_handle; 39 size_t cmnd_capsule_len; 40 u32 qnum; 41 u32 rqcnt; 42 u32 seqno; 43 44 u64 connection_id; 45 atomic_t csn; 46 47 unsigned long flags; 48 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 49 50 enum nvme_fcop_flags { 51 FCOP_FLAGS_TERMIO = (1 << 0), 52 FCOP_FLAGS_AEN = (1 << 1), 53 }; 54 55 struct nvmefc_ls_req_op { 56 struct nvmefc_ls_req ls_req; 57 58 struct nvme_fc_rport *rport; 59 struct nvme_fc_queue *queue; 60 struct request *rq; 61 u32 flags; 62 63 int ls_error; 64 struct completion ls_done; 65 struct list_head lsreq_list; /* rport->ls_req_list */ 66 bool req_queued; 67 }; 68 69 struct nvmefc_ls_rcv_op { 70 struct nvme_fc_rport *rport; 71 struct nvmefc_ls_rsp *lsrsp; 72 union nvmefc_ls_requests *rqstbuf; 73 union nvmefc_ls_responses *rspbuf; 74 u16 rqstdatalen; 75 bool handled; 76 dma_addr_t rspdma; 77 struct list_head lsrcv_list; /* rport->ls_rcv_list */ 78 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 79 80 enum nvme_fcpop_state { 81 FCPOP_STATE_UNINIT = 0, 82 FCPOP_STATE_IDLE = 1, 83 FCPOP_STATE_ACTIVE = 2, 84 FCPOP_STATE_ABORTED = 3, 85 FCPOP_STATE_COMPLETE = 4, 86 }; 87 88 struct nvme_fc_fcp_op { 89 struct nvme_request nreq; /* 90 * nvme/host/core.c 91 * requires this to be 92 * the 1st element in the 93 * private structure 94 * associated with the 95 * request. 96 */ 97 struct nvmefc_fcp_req fcp_req; 98 99 struct nvme_fc_ctrl *ctrl; 100 struct nvme_fc_queue *queue; 101 struct request *rq; 102 103 atomic_t state; 104 u32 flags; 105 u32 rqno; 106 u32 nents; 107 108 struct nvme_fc_cmd_iu cmd_iu; 109 struct nvme_fc_ersp_iu rsp_iu; 110 }; 111 112 struct nvme_fcp_op_w_sgl { 113 struct nvme_fc_fcp_op op; 114 struct scatterlist sgl[NVME_INLINE_SG_CNT]; 115 uint8_t priv[]; 116 }; 117 118 struct nvme_fc_lport { 119 struct nvme_fc_local_port localport; 120 121 struct ida endp_cnt; 122 struct list_head port_list; /* nvme_fc_port_list */ 123 struct list_head endp_list; 124 struct device *dev; /* physical device for dma */ 125 struct nvme_fc_port_template *ops; 126 struct kref ref; 127 atomic_t act_rport_cnt; 128 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 129 130 struct nvme_fc_rport { 131 struct nvme_fc_remote_port remoteport; 132 133 struct list_head endp_list; /* for lport->endp_list */ 134 struct list_head ctrl_list; 135 struct list_head ls_req_list; 136 struct list_head ls_rcv_list; 137 struct list_head disc_list; 138 struct device *dev; /* physical device for dma */ 139 struct nvme_fc_lport *lport; 140 spinlock_t lock; 141 struct kref ref; 142 atomic_t act_ctrl_cnt; 143 unsigned long dev_loss_end; 144 struct work_struct lsrcv_work; 145 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 146 147 /* fc_ctrl flags values - specified as bit positions */ 148 #define ASSOC_ACTIVE 0 149 #define ASSOC_FAILED 1 150 #define FCCTRL_TERMIO 2 151 152 struct nvme_fc_ctrl { 153 spinlock_t lock; 154 struct nvme_fc_queue *queues; 155 struct device *dev; 156 struct nvme_fc_lport *lport; 157 struct nvme_fc_rport *rport; 158 u32 cnum; 159 160 bool ioq_live; 161 u64 association_id; 162 struct nvmefc_ls_rcv_op *rcv_disconn; 163 164 struct list_head ctrl_list; /* rport->ctrl_list */ 165 166 struct blk_mq_tag_set admin_tag_set; 167 struct blk_mq_tag_set tag_set; 168 169 struct delayed_work connect_work; 170 171 struct kref ref; 172 unsigned long flags; 173 u32 iocnt; 174 wait_queue_head_t ioabort_wait; 175 176 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 177 178 struct nvme_ctrl ctrl; 179 }; 180 181 static inline struct nvme_fc_ctrl * 182 to_fc_ctrl(struct nvme_ctrl *ctrl) 183 { 184 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 185 } 186 187 static inline struct nvme_fc_lport * 188 localport_to_lport(struct nvme_fc_local_port *portptr) 189 { 190 return container_of(portptr, struct nvme_fc_lport, localport); 191 } 192 193 static inline struct nvme_fc_rport * 194 remoteport_to_rport(struct nvme_fc_remote_port *portptr) 195 { 196 return container_of(portptr, struct nvme_fc_rport, remoteport); 197 } 198 199 static inline struct nvmefc_ls_req_op * 200 ls_req_to_lsop(struct nvmefc_ls_req *lsreq) 201 { 202 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 203 } 204 205 static inline struct nvme_fc_fcp_op * 206 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 207 { 208 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 209 } 210 211 212 213 /* *************************** Globals **************************** */ 214 215 216 static DEFINE_SPINLOCK(nvme_fc_lock); 217 218 static LIST_HEAD(nvme_fc_lport_list); 219 static DEFINE_IDA(nvme_fc_local_port_cnt); 220 static DEFINE_IDA(nvme_fc_ctrl_cnt); 221 222 static struct workqueue_struct *nvme_fc_wq; 223 224 static bool nvme_fc_waiting_to_unload; 225 static DECLARE_COMPLETION(nvme_fc_unload_proceed); 226 227 /* 228 * These items are short-term. They will eventually be moved into 229 * a generic FC class. See comments in module init. 230 */ 231 static struct device *fc_udev_device; 232 233 static void nvme_fc_complete_rq(struct request *rq); 234 235 /* *********************** FC-NVME Port Management ************************ */ 236 237 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 238 struct nvme_fc_queue *, unsigned int); 239 240 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); 241 242 243 static void 244 nvme_fc_free_lport(struct kref *ref) 245 { 246 struct nvme_fc_lport *lport = 247 container_of(ref, struct nvme_fc_lport, ref); 248 unsigned long flags; 249 250 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 251 WARN_ON(!list_empty(&lport->endp_list)); 252 253 /* remove from transport list */ 254 spin_lock_irqsave(&nvme_fc_lock, flags); 255 list_del(&lport->port_list); 256 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) 257 complete(&nvme_fc_unload_proceed); 258 spin_unlock_irqrestore(&nvme_fc_lock, flags); 259 260 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); 261 ida_destroy(&lport->endp_cnt); 262 263 put_device(lport->dev); 264 265 kfree(lport); 266 } 267 268 static void 269 nvme_fc_lport_put(struct nvme_fc_lport *lport) 270 { 271 kref_put(&lport->ref, nvme_fc_free_lport); 272 } 273 274 static int 275 nvme_fc_lport_get(struct nvme_fc_lport *lport) 276 { 277 return kref_get_unless_zero(&lport->ref); 278 } 279 280 281 static struct nvme_fc_lport * 282 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 283 struct nvme_fc_port_template *ops, 284 struct device *dev) 285 { 286 struct nvme_fc_lport *lport; 287 unsigned long flags; 288 289 spin_lock_irqsave(&nvme_fc_lock, flags); 290 291 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 292 if (lport->localport.node_name != pinfo->node_name || 293 lport->localport.port_name != pinfo->port_name) 294 continue; 295 296 if (lport->dev != dev) { 297 lport = ERR_PTR(-EXDEV); 298 goto out_done; 299 } 300 301 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 302 lport = ERR_PTR(-EEXIST); 303 goto out_done; 304 } 305 306 if (!nvme_fc_lport_get(lport)) { 307 /* 308 * fails if ref cnt already 0. If so, 309 * act as if lport already deleted 310 */ 311 lport = NULL; 312 goto out_done; 313 } 314 315 /* resume the lport */ 316 317 lport->ops = ops; 318 lport->localport.port_role = pinfo->port_role; 319 lport->localport.port_id = pinfo->port_id; 320 lport->localport.port_state = FC_OBJSTATE_ONLINE; 321 322 spin_unlock_irqrestore(&nvme_fc_lock, flags); 323 324 return lport; 325 } 326 327 lport = NULL; 328 329 out_done: 330 spin_unlock_irqrestore(&nvme_fc_lock, flags); 331 332 return lport; 333 } 334 335 /** 336 * nvme_fc_register_localport - transport entry point called by an 337 * LLDD to register the existence of a NVME 338 * host FC port. 339 * @pinfo: pointer to information about the port to be registered 340 * @template: LLDD entrypoints and operational parameters for the port 341 * @dev: physical hardware device node port corresponds to. Will be 342 * used for DMA mappings 343 * @portptr: pointer to a local port pointer. Upon success, the routine 344 * will allocate a nvme_fc_local_port structure and place its 345 * address in the local port pointer. Upon failure, local port 346 * pointer will be set to 0. 347 * 348 * Returns: 349 * a completion status. Must be 0 upon success; a negative errno 350 * (ex: -ENXIO) upon failure. 351 */ 352 int 353 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 354 struct nvme_fc_port_template *template, 355 struct device *dev, 356 struct nvme_fc_local_port **portptr) 357 { 358 struct nvme_fc_lport *newrec; 359 unsigned long flags; 360 int ret, idx; 361 362 if (!template->localport_delete || !template->remoteport_delete || 363 !template->ls_req || !template->fcp_io || 364 !template->ls_abort || !template->fcp_abort || 365 !template->max_hw_queues || !template->max_sgl_segments || 366 !template->max_dif_sgl_segments || !template->dma_boundary) { 367 ret = -EINVAL; 368 goto out_reghost_failed; 369 } 370 371 /* 372 * look to see if there is already a localport that had been 373 * deregistered and in the process of waiting for all the 374 * references to fully be removed. If the references haven't 375 * expired, we can simply re-enable the localport. Remoteports 376 * and controller reconnections should resume naturally. 377 */ 378 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 379 380 /* found an lport, but something about its state is bad */ 381 if (IS_ERR(newrec)) { 382 ret = PTR_ERR(newrec); 383 goto out_reghost_failed; 384 385 /* found existing lport, which was resumed */ 386 } else if (newrec) { 387 *portptr = &newrec->localport; 388 return 0; 389 } 390 391 /* nothing found - allocate a new localport struct */ 392 393 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 394 GFP_KERNEL); 395 if (!newrec) { 396 ret = -ENOMEM; 397 goto out_reghost_failed; 398 } 399 400 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); 401 if (idx < 0) { 402 ret = -ENOSPC; 403 goto out_fail_kfree; 404 } 405 406 if (!get_device(dev) && dev) { 407 ret = -ENODEV; 408 goto out_ida_put; 409 } 410 411 INIT_LIST_HEAD(&newrec->port_list); 412 INIT_LIST_HEAD(&newrec->endp_list); 413 kref_init(&newrec->ref); 414 atomic_set(&newrec->act_rport_cnt, 0); 415 newrec->ops = template; 416 newrec->dev = dev; 417 ida_init(&newrec->endp_cnt); 418 if (template->local_priv_sz) 419 newrec->localport.private = &newrec[1]; 420 else 421 newrec->localport.private = NULL; 422 newrec->localport.node_name = pinfo->node_name; 423 newrec->localport.port_name = pinfo->port_name; 424 newrec->localport.port_role = pinfo->port_role; 425 newrec->localport.port_id = pinfo->port_id; 426 newrec->localport.port_state = FC_OBJSTATE_ONLINE; 427 newrec->localport.port_num = idx; 428 429 spin_lock_irqsave(&nvme_fc_lock, flags); 430 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 431 spin_unlock_irqrestore(&nvme_fc_lock, flags); 432 433 if (dev) 434 dma_set_seg_boundary(dev, template->dma_boundary); 435 436 *portptr = &newrec->localport; 437 return 0; 438 439 out_ida_put: 440 ida_simple_remove(&nvme_fc_local_port_cnt, idx); 441 out_fail_kfree: 442 kfree(newrec); 443 out_reghost_failed: 444 *portptr = NULL; 445 446 return ret; 447 } 448 EXPORT_SYMBOL_GPL(nvme_fc_register_localport); 449 450 /** 451 * nvme_fc_unregister_localport - transport entry point called by an 452 * LLDD to deregister/remove a previously 453 * registered a NVME host FC port. 454 * @portptr: pointer to the (registered) local port that is to be deregistered. 455 * 456 * Returns: 457 * a completion status. Must be 0 upon success; a negative errno 458 * (ex: -ENXIO) upon failure. 459 */ 460 int 461 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 462 { 463 struct nvme_fc_lport *lport = localport_to_lport(portptr); 464 unsigned long flags; 465 466 if (!portptr) 467 return -EINVAL; 468 469 spin_lock_irqsave(&nvme_fc_lock, flags); 470 471 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 472 spin_unlock_irqrestore(&nvme_fc_lock, flags); 473 return -EINVAL; 474 } 475 portptr->port_state = FC_OBJSTATE_DELETED; 476 477 spin_unlock_irqrestore(&nvme_fc_lock, flags); 478 479 if (atomic_read(&lport->act_rport_cnt) == 0) 480 lport->ops->localport_delete(&lport->localport); 481 482 nvme_fc_lport_put(lport); 483 484 return 0; 485 } 486 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 487 488 /* 489 * TRADDR strings, per FC-NVME are fixed format: 490 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 491 * udev event will only differ by prefix of what field is 492 * being specified: 493 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 494 * 19 + 43 + null_fudge = 64 characters 495 */ 496 #define FCNVME_TRADDR_LENGTH 64 497 498 static void 499 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 500 struct nvme_fc_rport *rport) 501 { 502 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 503 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 504 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 505 506 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 507 return; 508 509 snprintf(hostaddr, sizeof(hostaddr), 510 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 511 lport->localport.node_name, lport->localport.port_name); 512 snprintf(tgtaddr, sizeof(tgtaddr), 513 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 514 rport->remoteport.node_name, rport->remoteport.port_name); 515 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 516 } 517 518 static void 519 nvme_fc_free_rport(struct kref *ref) 520 { 521 struct nvme_fc_rport *rport = 522 container_of(ref, struct nvme_fc_rport, ref); 523 struct nvme_fc_lport *lport = 524 localport_to_lport(rport->remoteport.localport); 525 unsigned long flags; 526 527 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 528 WARN_ON(!list_empty(&rport->ctrl_list)); 529 530 /* remove from lport list */ 531 spin_lock_irqsave(&nvme_fc_lock, flags); 532 list_del(&rport->endp_list); 533 spin_unlock_irqrestore(&nvme_fc_lock, flags); 534 535 WARN_ON(!list_empty(&rport->disc_list)); 536 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); 537 538 kfree(rport); 539 540 nvme_fc_lport_put(lport); 541 } 542 543 static void 544 nvme_fc_rport_put(struct nvme_fc_rport *rport) 545 { 546 kref_put(&rport->ref, nvme_fc_free_rport); 547 } 548 549 static int 550 nvme_fc_rport_get(struct nvme_fc_rport *rport) 551 { 552 return kref_get_unless_zero(&rport->ref); 553 } 554 555 static void 556 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 557 { 558 switch (ctrl->ctrl.state) { 559 case NVME_CTRL_NEW: 560 case NVME_CTRL_CONNECTING: 561 /* 562 * As all reconnects were suppressed, schedule a 563 * connect. 564 */ 565 dev_info(ctrl->ctrl.device, 566 "NVME-FC{%d}: connectivity re-established. " 567 "Attempting reconnect\n", ctrl->cnum); 568 569 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 570 break; 571 572 case NVME_CTRL_RESETTING: 573 /* 574 * Controller is already in the process of terminating the 575 * association. No need to do anything further. The reconnect 576 * step will naturally occur after the reset completes. 577 */ 578 break; 579 580 default: 581 /* no action to take - let it delete */ 582 break; 583 } 584 } 585 586 static struct nvme_fc_rport * 587 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 588 struct nvme_fc_port_info *pinfo) 589 { 590 struct nvme_fc_rport *rport; 591 struct nvme_fc_ctrl *ctrl; 592 unsigned long flags; 593 594 spin_lock_irqsave(&nvme_fc_lock, flags); 595 596 list_for_each_entry(rport, &lport->endp_list, endp_list) { 597 if (rport->remoteport.node_name != pinfo->node_name || 598 rport->remoteport.port_name != pinfo->port_name) 599 continue; 600 601 if (!nvme_fc_rport_get(rport)) { 602 rport = ERR_PTR(-ENOLCK); 603 goto out_done; 604 } 605 606 spin_unlock_irqrestore(&nvme_fc_lock, flags); 607 608 spin_lock_irqsave(&rport->lock, flags); 609 610 /* has it been unregistered */ 611 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 612 /* means lldd called us twice */ 613 spin_unlock_irqrestore(&rport->lock, flags); 614 nvme_fc_rport_put(rport); 615 return ERR_PTR(-ESTALE); 616 } 617 618 rport->remoteport.port_role = pinfo->port_role; 619 rport->remoteport.port_id = pinfo->port_id; 620 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 621 rport->dev_loss_end = 0; 622 623 /* 624 * kick off a reconnect attempt on all associations to the 625 * remote port. A successful reconnects will resume i/o. 626 */ 627 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 628 nvme_fc_resume_controller(ctrl); 629 630 spin_unlock_irqrestore(&rport->lock, flags); 631 632 return rport; 633 } 634 635 rport = NULL; 636 637 out_done: 638 spin_unlock_irqrestore(&nvme_fc_lock, flags); 639 640 return rport; 641 } 642 643 static inline void 644 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 645 struct nvme_fc_port_info *pinfo) 646 { 647 if (pinfo->dev_loss_tmo) 648 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 649 else 650 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 651 } 652 653 /** 654 * nvme_fc_register_remoteport - transport entry point called by an 655 * LLDD to register the existence of a NVME 656 * subsystem FC port on its fabric. 657 * @localport: pointer to the (registered) local port that the remote 658 * subsystem port is connected to. 659 * @pinfo: pointer to information about the port to be registered 660 * @portptr: pointer to a remote port pointer. Upon success, the routine 661 * will allocate a nvme_fc_remote_port structure and place its 662 * address in the remote port pointer. Upon failure, remote port 663 * pointer will be set to 0. 664 * 665 * Returns: 666 * a completion status. Must be 0 upon success; a negative errno 667 * (ex: -ENXIO) upon failure. 668 */ 669 int 670 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 671 struct nvme_fc_port_info *pinfo, 672 struct nvme_fc_remote_port **portptr) 673 { 674 struct nvme_fc_lport *lport = localport_to_lport(localport); 675 struct nvme_fc_rport *newrec; 676 unsigned long flags; 677 int ret, idx; 678 679 if (!nvme_fc_lport_get(lport)) { 680 ret = -ESHUTDOWN; 681 goto out_reghost_failed; 682 } 683 684 /* 685 * look to see if there is already a remoteport that is waiting 686 * for a reconnect (within dev_loss_tmo) with the same WWN's. 687 * If so, transition to it and reconnect. 688 */ 689 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 690 691 /* found an rport, but something about its state is bad */ 692 if (IS_ERR(newrec)) { 693 ret = PTR_ERR(newrec); 694 goto out_lport_put; 695 696 /* found existing rport, which was resumed */ 697 } else if (newrec) { 698 nvme_fc_lport_put(lport); 699 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 700 nvme_fc_signal_discovery_scan(lport, newrec); 701 *portptr = &newrec->remoteport; 702 return 0; 703 } 704 705 /* nothing found - allocate a new remoteport struct */ 706 707 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 708 GFP_KERNEL); 709 if (!newrec) { 710 ret = -ENOMEM; 711 goto out_lport_put; 712 } 713 714 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); 715 if (idx < 0) { 716 ret = -ENOSPC; 717 goto out_kfree_rport; 718 } 719 720 INIT_LIST_HEAD(&newrec->endp_list); 721 INIT_LIST_HEAD(&newrec->ctrl_list); 722 INIT_LIST_HEAD(&newrec->ls_req_list); 723 INIT_LIST_HEAD(&newrec->disc_list); 724 kref_init(&newrec->ref); 725 atomic_set(&newrec->act_ctrl_cnt, 0); 726 spin_lock_init(&newrec->lock); 727 newrec->remoteport.localport = &lport->localport; 728 INIT_LIST_HEAD(&newrec->ls_rcv_list); 729 newrec->dev = lport->dev; 730 newrec->lport = lport; 731 if (lport->ops->remote_priv_sz) 732 newrec->remoteport.private = &newrec[1]; 733 else 734 newrec->remoteport.private = NULL; 735 newrec->remoteport.port_role = pinfo->port_role; 736 newrec->remoteport.node_name = pinfo->node_name; 737 newrec->remoteport.port_name = pinfo->port_name; 738 newrec->remoteport.port_id = pinfo->port_id; 739 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 740 newrec->remoteport.port_num = idx; 741 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 742 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); 743 744 spin_lock_irqsave(&nvme_fc_lock, flags); 745 list_add_tail(&newrec->endp_list, &lport->endp_list); 746 spin_unlock_irqrestore(&nvme_fc_lock, flags); 747 748 nvme_fc_signal_discovery_scan(lport, newrec); 749 750 *portptr = &newrec->remoteport; 751 return 0; 752 753 out_kfree_rport: 754 kfree(newrec); 755 out_lport_put: 756 nvme_fc_lport_put(lport); 757 out_reghost_failed: 758 *portptr = NULL; 759 return ret; 760 } 761 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 762 763 static int 764 nvme_fc_abort_lsops(struct nvme_fc_rport *rport) 765 { 766 struct nvmefc_ls_req_op *lsop; 767 unsigned long flags; 768 769 restart: 770 spin_lock_irqsave(&rport->lock, flags); 771 772 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 773 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 774 lsop->flags |= FCOP_FLAGS_TERMIO; 775 spin_unlock_irqrestore(&rport->lock, flags); 776 rport->lport->ops->ls_abort(&rport->lport->localport, 777 &rport->remoteport, 778 &lsop->ls_req); 779 goto restart; 780 } 781 } 782 spin_unlock_irqrestore(&rport->lock, flags); 783 784 return 0; 785 } 786 787 static void 788 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 789 { 790 dev_info(ctrl->ctrl.device, 791 "NVME-FC{%d}: controller connectivity lost. Awaiting " 792 "Reconnect", ctrl->cnum); 793 794 switch (ctrl->ctrl.state) { 795 case NVME_CTRL_NEW: 796 case NVME_CTRL_LIVE: 797 /* 798 * Schedule a controller reset. The reset will terminate the 799 * association and schedule the reconnect timer. Reconnects 800 * will be attempted until either the ctlr_loss_tmo 801 * (max_retries * connect_delay) expires or the remoteport's 802 * dev_loss_tmo expires. 803 */ 804 if (nvme_reset_ctrl(&ctrl->ctrl)) { 805 dev_warn(ctrl->ctrl.device, 806 "NVME-FC{%d}: Couldn't schedule reset.\n", 807 ctrl->cnum); 808 nvme_delete_ctrl(&ctrl->ctrl); 809 } 810 break; 811 812 case NVME_CTRL_CONNECTING: 813 /* 814 * The association has already been terminated and the 815 * controller is attempting reconnects. No need to do anything 816 * futher. Reconnects will be attempted until either the 817 * ctlr_loss_tmo (max_retries * connect_delay) expires or the 818 * remoteport's dev_loss_tmo expires. 819 */ 820 break; 821 822 case NVME_CTRL_RESETTING: 823 /* 824 * Controller is already in the process of terminating the 825 * association. No need to do anything further. The reconnect 826 * step will kick in naturally after the association is 827 * terminated. 828 */ 829 break; 830 831 case NVME_CTRL_DELETING: 832 case NVME_CTRL_DELETING_NOIO: 833 default: 834 /* no action to take - let it delete */ 835 break; 836 } 837 } 838 839 /** 840 * nvme_fc_unregister_remoteport - transport entry point called by an 841 * LLDD to deregister/remove a previously 842 * registered a NVME subsystem FC port. 843 * @portptr: pointer to the (registered) remote port that is to be 844 * deregistered. 845 * 846 * Returns: 847 * a completion status. Must be 0 upon success; a negative errno 848 * (ex: -ENXIO) upon failure. 849 */ 850 int 851 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 852 { 853 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 854 struct nvme_fc_ctrl *ctrl; 855 unsigned long flags; 856 857 if (!portptr) 858 return -EINVAL; 859 860 spin_lock_irqsave(&rport->lock, flags); 861 862 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 863 spin_unlock_irqrestore(&rport->lock, flags); 864 return -EINVAL; 865 } 866 portptr->port_state = FC_OBJSTATE_DELETED; 867 868 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 869 870 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 871 /* if dev_loss_tmo==0, dev loss is immediate */ 872 if (!portptr->dev_loss_tmo) { 873 dev_warn(ctrl->ctrl.device, 874 "NVME-FC{%d}: controller connectivity lost.\n", 875 ctrl->cnum); 876 nvme_delete_ctrl(&ctrl->ctrl); 877 } else 878 nvme_fc_ctrl_connectivity_loss(ctrl); 879 } 880 881 spin_unlock_irqrestore(&rport->lock, flags); 882 883 nvme_fc_abort_lsops(rport); 884 885 if (atomic_read(&rport->act_ctrl_cnt) == 0) 886 rport->lport->ops->remoteport_delete(portptr); 887 888 /* 889 * release the reference, which will allow, if all controllers 890 * go away, which should only occur after dev_loss_tmo occurs, 891 * for the rport to be torn down. 892 */ 893 nvme_fc_rport_put(rport); 894 895 return 0; 896 } 897 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 898 899 /** 900 * nvme_fc_rescan_remoteport - transport entry point called by an 901 * LLDD to request a nvme device rescan. 902 * @remoteport: pointer to the (registered) remote port that is to be 903 * rescanned. 904 * 905 * Returns: N/A 906 */ 907 void 908 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 909 { 910 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 911 912 nvme_fc_signal_discovery_scan(rport->lport, rport); 913 } 914 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 915 916 int 917 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 918 u32 dev_loss_tmo) 919 { 920 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 921 unsigned long flags; 922 923 spin_lock_irqsave(&rport->lock, flags); 924 925 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 926 spin_unlock_irqrestore(&rport->lock, flags); 927 return -EINVAL; 928 } 929 930 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 931 rport->remoteport.dev_loss_tmo = dev_loss_tmo; 932 933 spin_unlock_irqrestore(&rport->lock, flags); 934 935 return 0; 936 } 937 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 938 939 940 /* *********************** FC-NVME DMA Handling **************************** */ 941 942 /* 943 * The fcloop device passes in a NULL device pointer. Real LLD's will 944 * pass in a valid device pointer. If NULL is passed to the dma mapping 945 * routines, depending on the platform, it may or may not succeed, and 946 * may crash. 947 * 948 * As such: 949 * Wrapper all the dma routines and check the dev pointer. 950 * 951 * If simple mappings (return just a dma address, we'll noop them, 952 * returning a dma address of 0. 953 * 954 * On more complex mappings (dma_map_sg), a pseudo routine fills 955 * in the scatter list, setting all dma addresses to 0. 956 */ 957 958 static inline dma_addr_t 959 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 960 enum dma_data_direction dir) 961 { 962 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 963 } 964 965 static inline int 966 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 967 { 968 return dev ? dma_mapping_error(dev, dma_addr) : 0; 969 } 970 971 static inline void 972 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 973 enum dma_data_direction dir) 974 { 975 if (dev) 976 dma_unmap_single(dev, addr, size, dir); 977 } 978 979 static inline void 980 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 981 enum dma_data_direction dir) 982 { 983 if (dev) 984 dma_sync_single_for_cpu(dev, addr, size, dir); 985 } 986 987 static inline void 988 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 989 enum dma_data_direction dir) 990 { 991 if (dev) 992 dma_sync_single_for_device(dev, addr, size, dir); 993 } 994 995 /* pseudo dma_map_sg call */ 996 static int 997 fc_map_sg(struct scatterlist *sg, int nents) 998 { 999 struct scatterlist *s; 1000 int i; 1001 1002 WARN_ON(nents == 0 || sg[0].length == 0); 1003 1004 for_each_sg(sg, s, nents, i) { 1005 s->dma_address = 0L; 1006 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1007 s->dma_length = s->length; 1008 #endif 1009 } 1010 return nents; 1011 } 1012 1013 static inline int 1014 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1015 enum dma_data_direction dir) 1016 { 1017 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 1018 } 1019 1020 static inline void 1021 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1022 enum dma_data_direction dir) 1023 { 1024 if (dev) 1025 dma_unmap_sg(dev, sg, nents, dir); 1026 } 1027 1028 /* *********************** FC-NVME LS Handling **************************** */ 1029 1030 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 1031 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 1032 1033 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1034 1035 static void 1036 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 1037 { 1038 struct nvme_fc_rport *rport = lsop->rport; 1039 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1040 unsigned long flags; 1041 1042 spin_lock_irqsave(&rport->lock, flags); 1043 1044 if (!lsop->req_queued) { 1045 spin_unlock_irqrestore(&rport->lock, flags); 1046 return; 1047 } 1048 1049 list_del(&lsop->lsreq_list); 1050 1051 lsop->req_queued = false; 1052 1053 spin_unlock_irqrestore(&rport->lock, flags); 1054 1055 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1056 (lsreq->rqstlen + lsreq->rsplen), 1057 DMA_BIDIRECTIONAL); 1058 1059 nvme_fc_rport_put(rport); 1060 } 1061 1062 static int 1063 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 1064 struct nvmefc_ls_req_op *lsop, 1065 void (*done)(struct nvmefc_ls_req *req, int status)) 1066 { 1067 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1068 unsigned long flags; 1069 int ret = 0; 1070 1071 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1072 return -ECONNREFUSED; 1073 1074 if (!nvme_fc_rport_get(rport)) 1075 return -ESHUTDOWN; 1076 1077 lsreq->done = done; 1078 lsop->rport = rport; 1079 lsop->req_queued = false; 1080 INIT_LIST_HEAD(&lsop->lsreq_list); 1081 init_completion(&lsop->ls_done); 1082 1083 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 1084 lsreq->rqstlen + lsreq->rsplen, 1085 DMA_BIDIRECTIONAL); 1086 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 1087 ret = -EFAULT; 1088 goto out_putrport; 1089 } 1090 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 1091 1092 spin_lock_irqsave(&rport->lock, flags); 1093 1094 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 1095 1096 lsop->req_queued = true; 1097 1098 spin_unlock_irqrestore(&rport->lock, flags); 1099 1100 ret = rport->lport->ops->ls_req(&rport->lport->localport, 1101 &rport->remoteport, lsreq); 1102 if (ret) 1103 goto out_unlink; 1104 1105 return 0; 1106 1107 out_unlink: 1108 lsop->ls_error = ret; 1109 spin_lock_irqsave(&rport->lock, flags); 1110 lsop->req_queued = false; 1111 list_del(&lsop->lsreq_list); 1112 spin_unlock_irqrestore(&rport->lock, flags); 1113 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1114 (lsreq->rqstlen + lsreq->rsplen), 1115 DMA_BIDIRECTIONAL); 1116 out_putrport: 1117 nvme_fc_rport_put(rport); 1118 1119 return ret; 1120 } 1121 1122 static void 1123 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 1124 { 1125 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1126 1127 lsop->ls_error = status; 1128 complete(&lsop->ls_done); 1129 } 1130 1131 static int 1132 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 1133 { 1134 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1135 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 1136 int ret; 1137 1138 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 1139 1140 if (!ret) { 1141 /* 1142 * No timeout/not interruptible as we need the struct 1143 * to exist until the lldd calls us back. Thus mandate 1144 * wait until driver calls back. lldd responsible for 1145 * the timeout action 1146 */ 1147 wait_for_completion(&lsop->ls_done); 1148 1149 __nvme_fc_finish_ls_req(lsop); 1150 1151 ret = lsop->ls_error; 1152 } 1153 1154 if (ret) 1155 return ret; 1156 1157 /* ACC or RJT payload ? */ 1158 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 1159 return -ENXIO; 1160 1161 return 0; 1162 } 1163 1164 static int 1165 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 1166 struct nvmefc_ls_req_op *lsop, 1167 void (*done)(struct nvmefc_ls_req *req, int status)) 1168 { 1169 /* don't wait for completion */ 1170 1171 return __nvme_fc_send_ls_req(rport, lsop, done); 1172 } 1173 1174 static int 1175 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 1176 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 1177 { 1178 struct nvmefc_ls_req_op *lsop; 1179 struct nvmefc_ls_req *lsreq; 1180 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 1181 struct fcnvme_ls_cr_assoc_acc *assoc_acc; 1182 unsigned long flags; 1183 int ret, fcret = 0; 1184 1185 lsop = kzalloc((sizeof(*lsop) + 1186 sizeof(*assoc_rqst) + sizeof(*assoc_acc) + 1187 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1188 if (!lsop) { 1189 dev_info(ctrl->ctrl.device, 1190 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", 1191 ctrl->cnum); 1192 ret = -ENOMEM; 1193 goto out_no_memory; 1194 } 1195 1196 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; 1197 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 1198 lsreq = &lsop->ls_req; 1199 if (ctrl->lport->ops->lsrqst_priv_sz) 1200 lsreq->private = &assoc_acc[1]; 1201 else 1202 lsreq->private = NULL; 1203 1204 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 1205 assoc_rqst->desc_list_len = 1206 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1207 1208 assoc_rqst->assoc_cmd.desc_tag = 1209 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 1210 assoc_rqst->assoc_cmd.desc_len = 1211 fcnvme_lsdesc_len( 1212 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1213 1214 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1215 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 1216 /* Linux supports only Dynamic controllers */ 1217 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1218 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1219 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 1220 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 1221 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 1222 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 1223 1224 lsop->queue = queue; 1225 lsreq->rqstaddr = assoc_rqst; 1226 lsreq->rqstlen = sizeof(*assoc_rqst); 1227 lsreq->rspaddr = assoc_acc; 1228 lsreq->rsplen = sizeof(*assoc_acc); 1229 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1230 1231 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1232 if (ret) 1233 goto out_free_buffer; 1234 1235 /* process connect LS completion */ 1236 1237 /* validate the ACC response */ 1238 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1239 fcret = VERR_LSACC; 1240 else if (assoc_acc->hdr.desc_list_len != 1241 fcnvme_lsdesc_len( 1242 sizeof(struct fcnvme_ls_cr_assoc_acc))) 1243 fcret = VERR_CR_ASSOC_ACC_LEN; 1244 else if (assoc_acc->hdr.rqst.desc_tag != 1245 cpu_to_be32(FCNVME_LSDESC_RQST)) 1246 fcret = VERR_LSDESC_RQST; 1247 else if (assoc_acc->hdr.rqst.desc_len != 1248 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1249 fcret = VERR_LSDESC_RQST_LEN; 1250 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 1251 fcret = VERR_CR_ASSOC; 1252 else if (assoc_acc->associd.desc_tag != 1253 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1254 fcret = VERR_ASSOC_ID; 1255 else if (assoc_acc->associd.desc_len != 1256 fcnvme_lsdesc_len( 1257 sizeof(struct fcnvme_lsdesc_assoc_id))) 1258 fcret = VERR_ASSOC_ID_LEN; 1259 else if (assoc_acc->connectid.desc_tag != 1260 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1261 fcret = VERR_CONN_ID; 1262 else if (assoc_acc->connectid.desc_len != 1263 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1264 fcret = VERR_CONN_ID_LEN; 1265 1266 if (fcret) { 1267 ret = -EBADF; 1268 dev_err(ctrl->dev, 1269 "q %d Create Association LS failed: %s\n", 1270 queue->qnum, validation_errors[fcret]); 1271 } else { 1272 spin_lock_irqsave(&ctrl->lock, flags); 1273 ctrl->association_id = 1274 be64_to_cpu(assoc_acc->associd.association_id); 1275 queue->connection_id = 1276 be64_to_cpu(assoc_acc->connectid.connection_id); 1277 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1278 spin_unlock_irqrestore(&ctrl->lock, flags); 1279 } 1280 1281 out_free_buffer: 1282 kfree(lsop); 1283 out_no_memory: 1284 if (ret) 1285 dev_err(ctrl->dev, 1286 "queue %d connect admin queue failed (%d).\n", 1287 queue->qnum, ret); 1288 return ret; 1289 } 1290 1291 static int 1292 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1293 u16 qsize, u16 ersp_ratio) 1294 { 1295 struct nvmefc_ls_req_op *lsop; 1296 struct nvmefc_ls_req *lsreq; 1297 struct fcnvme_ls_cr_conn_rqst *conn_rqst; 1298 struct fcnvme_ls_cr_conn_acc *conn_acc; 1299 int ret, fcret = 0; 1300 1301 lsop = kzalloc((sizeof(*lsop) + 1302 sizeof(*conn_rqst) + sizeof(*conn_acc) + 1303 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1304 if (!lsop) { 1305 dev_info(ctrl->ctrl.device, 1306 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", 1307 ctrl->cnum); 1308 ret = -ENOMEM; 1309 goto out_no_memory; 1310 } 1311 1312 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; 1313 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 1314 lsreq = &lsop->ls_req; 1315 if (ctrl->lport->ops->lsrqst_priv_sz) 1316 lsreq->private = (void *)&conn_acc[1]; 1317 else 1318 lsreq->private = NULL; 1319 1320 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 1321 conn_rqst->desc_list_len = cpu_to_be32( 1322 sizeof(struct fcnvme_lsdesc_assoc_id) + 1323 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1324 1325 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1326 conn_rqst->associd.desc_len = 1327 fcnvme_lsdesc_len( 1328 sizeof(struct fcnvme_lsdesc_assoc_id)); 1329 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1330 conn_rqst->connect_cmd.desc_tag = 1331 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 1332 conn_rqst->connect_cmd.desc_len = 1333 fcnvme_lsdesc_len( 1334 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1335 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1336 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1337 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 1338 1339 lsop->queue = queue; 1340 lsreq->rqstaddr = conn_rqst; 1341 lsreq->rqstlen = sizeof(*conn_rqst); 1342 lsreq->rspaddr = conn_acc; 1343 lsreq->rsplen = sizeof(*conn_acc); 1344 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1345 1346 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1347 if (ret) 1348 goto out_free_buffer; 1349 1350 /* process connect LS completion */ 1351 1352 /* validate the ACC response */ 1353 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1354 fcret = VERR_LSACC; 1355 else if (conn_acc->hdr.desc_list_len != 1356 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 1357 fcret = VERR_CR_CONN_ACC_LEN; 1358 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 1359 fcret = VERR_LSDESC_RQST; 1360 else if (conn_acc->hdr.rqst.desc_len != 1361 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1362 fcret = VERR_LSDESC_RQST_LEN; 1363 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 1364 fcret = VERR_CR_CONN; 1365 else if (conn_acc->connectid.desc_tag != 1366 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1367 fcret = VERR_CONN_ID; 1368 else if (conn_acc->connectid.desc_len != 1369 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1370 fcret = VERR_CONN_ID_LEN; 1371 1372 if (fcret) { 1373 ret = -EBADF; 1374 dev_err(ctrl->dev, 1375 "q %d Create I/O Connection LS failed: %s\n", 1376 queue->qnum, validation_errors[fcret]); 1377 } else { 1378 queue->connection_id = 1379 be64_to_cpu(conn_acc->connectid.connection_id); 1380 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1381 } 1382 1383 out_free_buffer: 1384 kfree(lsop); 1385 out_no_memory: 1386 if (ret) 1387 dev_err(ctrl->dev, 1388 "queue %d connect I/O queue failed (%d).\n", 1389 queue->qnum, ret); 1390 return ret; 1391 } 1392 1393 static void 1394 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 1395 { 1396 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1397 1398 __nvme_fc_finish_ls_req(lsop); 1399 1400 /* fc-nvme initiator doesn't care about success or failure of cmd */ 1401 1402 kfree(lsop); 1403 } 1404 1405 /* 1406 * This routine sends a FC-NVME LS to disconnect (aka terminate) 1407 * the FC-NVME Association. Terminating the association also 1408 * terminates the FC-NVME connections (per queue, both admin and io 1409 * queues) that are part of the association. E.g. things are torn 1410 * down, and the related FC-NVME Association ID and Connection IDs 1411 * become invalid. 1412 * 1413 * The behavior of the fc-nvme initiator is such that it's 1414 * understanding of the association and connections will implicitly 1415 * be torn down. The action is implicit as it may be due to a loss of 1416 * connectivity with the fc-nvme target, so you may never get a 1417 * response even if you tried. As such, the action of this routine 1418 * is to asynchronously send the LS, ignore any results of the LS, and 1419 * continue on with terminating the association. If the fc-nvme target 1420 * is present and receives the LS, it too can tear down. 1421 */ 1422 static void 1423 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 1424 { 1425 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 1426 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 1427 struct nvmefc_ls_req_op *lsop; 1428 struct nvmefc_ls_req *lsreq; 1429 int ret; 1430 1431 lsop = kzalloc((sizeof(*lsop) + 1432 sizeof(*discon_rqst) + sizeof(*discon_acc) + 1433 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1434 if (!lsop) { 1435 dev_info(ctrl->ctrl.device, 1436 "NVME-FC{%d}: send Disconnect Association " 1437 "failed: ENOMEM\n", 1438 ctrl->cnum); 1439 return; 1440 } 1441 1442 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 1443 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 1444 lsreq = &lsop->ls_req; 1445 if (ctrl->lport->ops->lsrqst_priv_sz) 1446 lsreq->private = (void *)&discon_acc[1]; 1447 else 1448 lsreq->private = NULL; 1449 1450 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 1451 ctrl->association_id); 1452 1453 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 1454 nvme_fc_disconnect_assoc_done); 1455 if (ret) 1456 kfree(lsop); 1457 } 1458 1459 static void 1460 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1461 { 1462 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; 1463 struct nvme_fc_rport *rport = lsop->rport; 1464 struct nvme_fc_lport *lport = rport->lport; 1465 unsigned long flags; 1466 1467 spin_lock_irqsave(&rport->lock, flags); 1468 list_del(&lsop->lsrcv_list); 1469 spin_unlock_irqrestore(&rport->lock, flags); 1470 1471 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, 1472 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1473 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1474 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1475 1476 kfree(lsop); 1477 1478 nvme_fc_rport_put(rport); 1479 } 1480 1481 static void 1482 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) 1483 { 1484 struct nvme_fc_rport *rport = lsop->rport; 1485 struct nvme_fc_lport *lport = rport->lport; 1486 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1487 int ret; 1488 1489 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, 1490 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1491 1492 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, 1493 lsop->lsrsp); 1494 if (ret) { 1495 dev_warn(lport->dev, 1496 "LLDD rejected LS RSP xmt: LS %d status %d\n", 1497 w0->ls_cmd, ret); 1498 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); 1499 return; 1500 } 1501 } 1502 1503 static struct nvme_fc_ctrl * 1504 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, 1505 struct nvmefc_ls_rcv_op *lsop) 1506 { 1507 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1508 &lsop->rqstbuf->rq_dis_assoc; 1509 struct nvme_fc_ctrl *ctrl, *ret = NULL; 1510 struct nvmefc_ls_rcv_op *oldls = NULL; 1511 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1512 unsigned long flags; 1513 1514 spin_lock_irqsave(&rport->lock, flags); 1515 1516 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 1517 if (!nvme_fc_ctrl_get(ctrl)) 1518 continue; 1519 spin_lock(&ctrl->lock); 1520 if (association_id == ctrl->association_id) { 1521 oldls = ctrl->rcv_disconn; 1522 ctrl->rcv_disconn = lsop; 1523 ret = ctrl; 1524 } 1525 spin_unlock(&ctrl->lock); 1526 if (ret) 1527 /* leave the ctrl get reference */ 1528 break; 1529 nvme_fc_ctrl_put(ctrl); 1530 } 1531 1532 spin_unlock_irqrestore(&rport->lock, flags); 1533 1534 /* transmit a response for anything that was pending */ 1535 if (oldls) { 1536 dev_info(rport->lport->dev, 1537 "NVME-FC{%d}: Multiple Disconnect Association " 1538 "LS's received\n", ctrl->cnum); 1539 /* overwrite good response with bogus failure */ 1540 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1541 sizeof(*oldls->rspbuf), 1542 rqst->w0.ls_cmd, 1543 FCNVME_RJT_RC_UNAB, 1544 FCNVME_RJT_EXP_NONE, 0); 1545 nvme_fc_xmt_ls_rsp(oldls); 1546 } 1547 1548 return ret; 1549 } 1550 1551 /* 1552 * returns true to mean LS handled and ls_rsp can be sent 1553 * returns false to defer ls_rsp xmt (will be done as part of 1554 * association termination) 1555 */ 1556 static bool 1557 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) 1558 { 1559 struct nvme_fc_rport *rport = lsop->rport; 1560 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1561 &lsop->rqstbuf->rq_dis_assoc; 1562 struct fcnvme_ls_disconnect_assoc_acc *acc = 1563 &lsop->rspbuf->rsp_dis_assoc; 1564 struct nvme_fc_ctrl *ctrl = NULL; 1565 int ret = 0; 1566 1567 memset(acc, 0, sizeof(*acc)); 1568 1569 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); 1570 if (!ret) { 1571 /* match an active association */ 1572 ctrl = nvme_fc_match_disconn_ls(rport, lsop); 1573 if (!ctrl) 1574 ret = VERR_NO_ASSOC; 1575 } 1576 1577 if (ret) { 1578 dev_info(rport->lport->dev, 1579 "Disconnect LS failed: %s\n", 1580 validation_errors[ret]); 1581 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1582 sizeof(*acc), rqst->w0.ls_cmd, 1583 (ret == VERR_NO_ASSOC) ? 1584 FCNVME_RJT_RC_INV_ASSOC : 1585 FCNVME_RJT_RC_LOGIC, 1586 FCNVME_RJT_EXP_NONE, 0); 1587 return true; 1588 } 1589 1590 /* format an ACCept response */ 1591 1592 lsop->lsrsp->rsplen = sizeof(*acc); 1593 1594 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1595 fcnvme_lsdesc_len( 1596 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1597 FCNVME_LS_DISCONNECT_ASSOC); 1598 1599 /* 1600 * the transmit of the response will occur after the exchanges 1601 * for the association have been ABTS'd by 1602 * nvme_fc_delete_association(). 1603 */ 1604 1605 /* fail the association */ 1606 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); 1607 1608 /* release the reference taken by nvme_fc_match_disconn_ls() */ 1609 nvme_fc_ctrl_put(ctrl); 1610 1611 return false; 1612 } 1613 1614 /* 1615 * Actual Processing routine for received FC-NVME LS Requests from the LLD 1616 * returns true if a response should be sent afterward, false if rsp will 1617 * be sent asynchronously. 1618 */ 1619 static bool 1620 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) 1621 { 1622 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1623 bool ret = true; 1624 1625 lsop->lsrsp->nvme_fc_private = lsop; 1626 lsop->lsrsp->rspbuf = lsop->rspbuf; 1627 lsop->lsrsp->rspdma = lsop->rspdma; 1628 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; 1629 /* Be preventative. handlers will later set to valid length */ 1630 lsop->lsrsp->rsplen = 0; 1631 1632 /* 1633 * handlers: 1634 * parse request input, execute the request, and format the 1635 * LS response 1636 */ 1637 switch (w0->ls_cmd) { 1638 case FCNVME_LS_DISCONNECT_ASSOC: 1639 ret = nvme_fc_ls_disconnect_assoc(lsop); 1640 break; 1641 case FCNVME_LS_DISCONNECT_CONN: 1642 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1643 sizeof(*lsop->rspbuf), w0->ls_cmd, 1644 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); 1645 break; 1646 case FCNVME_LS_CREATE_ASSOCIATION: 1647 case FCNVME_LS_CREATE_CONNECTION: 1648 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1649 sizeof(*lsop->rspbuf), w0->ls_cmd, 1650 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); 1651 break; 1652 default: 1653 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1654 sizeof(*lsop->rspbuf), w0->ls_cmd, 1655 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1656 break; 1657 } 1658 1659 return(ret); 1660 } 1661 1662 static void 1663 nvme_fc_handle_ls_rqst_work(struct work_struct *work) 1664 { 1665 struct nvme_fc_rport *rport = 1666 container_of(work, struct nvme_fc_rport, lsrcv_work); 1667 struct fcnvme_ls_rqst_w0 *w0; 1668 struct nvmefc_ls_rcv_op *lsop; 1669 unsigned long flags; 1670 bool sendrsp; 1671 1672 restart: 1673 sendrsp = true; 1674 spin_lock_irqsave(&rport->lock, flags); 1675 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { 1676 if (lsop->handled) 1677 continue; 1678 1679 lsop->handled = true; 1680 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 1681 spin_unlock_irqrestore(&rport->lock, flags); 1682 sendrsp = nvme_fc_handle_ls_rqst(lsop); 1683 } else { 1684 spin_unlock_irqrestore(&rport->lock, flags); 1685 w0 = &lsop->rqstbuf->w0; 1686 lsop->lsrsp->rsplen = nvme_fc_format_rjt( 1687 lsop->rspbuf, 1688 sizeof(*lsop->rspbuf), 1689 w0->ls_cmd, 1690 FCNVME_RJT_RC_UNAB, 1691 FCNVME_RJT_EXP_NONE, 0); 1692 } 1693 if (sendrsp) 1694 nvme_fc_xmt_ls_rsp(lsop); 1695 goto restart; 1696 } 1697 spin_unlock_irqrestore(&rport->lock, flags); 1698 } 1699 1700 /** 1701 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD 1702 * upon the reception of a NVME LS request. 1703 * 1704 * The nvme-fc layer will copy payload to an internal structure for 1705 * processing. As such, upon completion of the routine, the LLDD may 1706 * immediately free/reuse the LS request buffer passed in the call. 1707 * 1708 * If this routine returns error, the LLDD should abort the exchange. 1709 * 1710 * @remoteport: pointer to the (registered) remote port that the LS 1711 * was received from. The remoteport is associated with 1712 * a specific localport. 1713 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be 1714 * used to reference the exchange corresponding to the LS 1715 * when issuing an ls response. 1716 * @lsreqbuf: pointer to the buffer containing the LS Request 1717 * @lsreqbuf_len: length, in bytes, of the received LS request 1718 */ 1719 int 1720 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, 1721 struct nvmefc_ls_rsp *lsrsp, 1722 void *lsreqbuf, u32 lsreqbuf_len) 1723 { 1724 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 1725 struct nvme_fc_lport *lport = rport->lport; 1726 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 1727 struct nvmefc_ls_rcv_op *lsop; 1728 unsigned long flags; 1729 int ret; 1730 1731 nvme_fc_rport_get(rport); 1732 1733 /* validate there's a routine to transmit a response */ 1734 if (!lport->ops->xmt_ls_rsp) { 1735 dev_info(lport->dev, 1736 "RCV %s LS failed: no LLDD xmt_ls_rsp\n", 1737 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1738 nvmefc_ls_names[w0->ls_cmd] : ""); 1739 ret = -EINVAL; 1740 goto out_put; 1741 } 1742 1743 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 1744 dev_info(lport->dev, 1745 "RCV %s LS failed: payload too large\n", 1746 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1747 nvmefc_ls_names[w0->ls_cmd] : ""); 1748 ret = -E2BIG; 1749 goto out_put; 1750 } 1751 1752 lsop = kzalloc(sizeof(*lsop) + 1753 sizeof(union nvmefc_ls_requests) + 1754 sizeof(union nvmefc_ls_responses), 1755 GFP_KERNEL); 1756 if (!lsop) { 1757 dev_info(lport->dev, 1758 "RCV %s LS failed: No memory\n", 1759 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1760 nvmefc_ls_names[w0->ls_cmd] : ""); 1761 ret = -ENOMEM; 1762 goto out_put; 1763 } 1764 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; 1765 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; 1766 1767 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, 1768 sizeof(*lsop->rspbuf), 1769 DMA_TO_DEVICE); 1770 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { 1771 dev_info(lport->dev, 1772 "RCV %s LS failed: DMA mapping failure\n", 1773 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1774 nvmefc_ls_names[w0->ls_cmd] : ""); 1775 ret = -EFAULT; 1776 goto out_free; 1777 } 1778 1779 lsop->rport = rport; 1780 lsop->lsrsp = lsrsp; 1781 1782 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); 1783 lsop->rqstdatalen = lsreqbuf_len; 1784 1785 spin_lock_irqsave(&rport->lock, flags); 1786 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { 1787 spin_unlock_irqrestore(&rport->lock, flags); 1788 ret = -ENOTCONN; 1789 goto out_unmap; 1790 } 1791 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); 1792 spin_unlock_irqrestore(&rport->lock, flags); 1793 1794 schedule_work(&rport->lsrcv_work); 1795 1796 return 0; 1797 1798 out_unmap: 1799 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1800 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1801 out_free: 1802 kfree(lsop); 1803 out_put: 1804 nvme_fc_rport_put(rport); 1805 return ret; 1806 } 1807 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); 1808 1809 1810 /* *********************** NVME Ctrl Routines **************************** */ 1811 1812 static void 1813 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 1814 struct nvme_fc_fcp_op *op) 1815 { 1816 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 1817 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1818 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 1819 sizeof(op->cmd_iu), DMA_TO_DEVICE); 1820 1821 atomic_set(&op->state, FCPOP_STATE_UNINIT); 1822 } 1823 1824 static void 1825 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1826 unsigned int hctx_idx) 1827 { 1828 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1829 1830 return __nvme_fc_exit_request(set->driver_data, op); 1831 } 1832 1833 static int 1834 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1835 { 1836 unsigned long flags; 1837 int opstate; 1838 1839 spin_lock_irqsave(&ctrl->lock, flags); 1840 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1841 if (opstate != FCPOP_STATE_ACTIVE) 1842 atomic_set(&op->state, opstate); 1843 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { 1844 op->flags |= FCOP_FLAGS_TERMIO; 1845 ctrl->iocnt++; 1846 } 1847 spin_unlock_irqrestore(&ctrl->lock, flags); 1848 1849 if (opstate != FCPOP_STATE_ACTIVE) 1850 return -ECANCELED; 1851 1852 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1853 &ctrl->rport->remoteport, 1854 op->queue->lldd_handle, 1855 &op->fcp_req); 1856 1857 return 0; 1858 } 1859 1860 static void 1861 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1862 { 1863 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1864 int i; 1865 1866 /* ensure we've initialized the ops once */ 1867 if (!(aen_op->flags & FCOP_FLAGS_AEN)) 1868 return; 1869 1870 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1871 __nvme_fc_abort_op(ctrl, aen_op); 1872 } 1873 1874 static inline void 1875 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1876 struct nvme_fc_fcp_op *op, int opstate) 1877 { 1878 unsigned long flags; 1879 1880 if (opstate == FCPOP_STATE_ABORTED) { 1881 spin_lock_irqsave(&ctrl->lock, flags); 1882 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && 1883 op->flags & FCOP_FLAGS_TERMIO) { 1884 if (!--ctrl->iocnt) 1885 wake_up(&ctrl->ioabort_wait); 1886 } 1887 spin_unlock_irqrestore(&ctrl->lock, flags); 1888 } 1889 } 1890 1891 static void 1892 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1893 { 1894 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1895 struct request *rq = op->rq; 1896 struct nvmefc_fcp_req *freq = &op->fcp_req; 1897 struct nvme_fc_ctrl *ctrl = op->ctrl; 1898 struct nvme_fc_queue *queue = op->queue; 1899 struct nvme_completion *cqe = &op->rsp_iu.cqe; 1900 struct nvme_command *sqe = &op->cmd_iu.sqe; 1901 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1902 union nvme_result result; 1903 bool terminate_assoc = true; 1904 int opstate; 1905 1906 /* 1907 * WARNING: 1908 * The current linux implementation of a nvme controller 1909 * allocates a single tag set for all io queues and sizes 1910 * the io queues to fully hold all possible tags. Thus, the 1911 * implementation does not reference or care about the sqhd 1912 * value as it never needs to use the sqhd/sqtail pointers 1913 * for submission pacing. 1914 * 1915 * This affects the FC-NVME implementation in two ways: 1916 * 1) As the value doesn't matter, we don't need to waste 1917 * cycles extracting it from ERSPs and stamping it in the 1918 * cases where the transport fabricates CQEs on successful 1919 * completions. 1920 * 2) The FC-NVME implementation requires that delivery of 1921 * ERSP completions are to go back to the nvme layer in order 1922 * relative to the rsn, such that the sqhd value will always 1923 * be "in order" for the nvme layer. As the nvme layer in 1924 * linux doesn't care about sqhd, there's no need to return 1925 * them in order. 1926 * 1927 * Additionally: 1928 * As the core nvme layer in linux currently does not look at 1929 * every field in the cqe - in cases where the FC transport must 1930 * fabricate a CQE, the following fields will not be set as they 1931 * are not referenced: 1932 * cqe.sqid, cqe.sqhd, cqe.command_id 1933 * 1934 * Failure or error of an individual i/o, in a transport 1935 * detected fashion unrelated to the nvme completion status, 1936 * potentially cause the initiator and target sides to get out 1937 * of sync on SQ head/tail (aka outstanding io count allowed). 1938 * Per FC-NVME spec, failure of an individual command requires 1939 * the connection to be terminated, which in turn requires the 1940 * association to be terminated. 1941 */ 1942 1943 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1944 1945 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1946 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1947 1948 if (opstate == FCPOP_STATE_ABORTED) 1949 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1950 else if (freq->status) { 1951 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1952 dev_info(ctrl->ctrl.device, 1953 "NVME-FC{%d}: io failed due to lldd error %d\n", 1954 ctrl->cnum, freq->status); 1955 } 1956 1957 /* 1958 * For the linux implementation, if we have an unsuccesful 1959 * status, they blk-mq layer can typically be called with the 1960 * non-zero status and the content of the cqe isn't important. 1961 */ 1962 if (status) 1963 goto done; 1964 1965 /* 1966 * command completed successfully relative to the wire 1967 * protocol. However, validate anything received and 1968 * extract the status and result from the cqe (create it 1969 * where necessary). 1970 */ 1971 1972 switch (freq->rcv_rsplen) { 1973 1974 case 0: 1975 case NVME_FC_SIZEOF_ZEROS_RSP: 1976 /* 1977 * No response payload or 12 bytes of payload (which 1978 * should all be zeros) are considered successful and 1979 * no payload in the CQE by the transport. 1980 */ 1981 if (freq->transferred_length != 1982 be32_to_cpu(op->cmd_iu.data_len)) { 1983 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1984 dev_info(ctrl->ctrl.device, 1985 "NVME-FC{%d}: io failed due to bad transfer " 1986 "length: %d vs expected %d\n", 1987 ctrl->cnum, freq->transferred_length, 1988 be32_to_cpu(op->cmd_iu.data_len)); 1989 goto done; 1990 } 1991 result.u64 = 0; 1992 break; 1993 1994 case sizeof(struct nvme_fc_ersp_iu): 1995 /* 1996 * The ERSP IU contains a full completion with CQE. 1997 * Validate ERSP IU and look at cqe. 1998 */ 1999 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 2000 (freq->rcv_rsplen / 4) || 2001 be32_to_cpu(op->rsp_iu.xfrd_len) != 2002 freq->transferred_length || 2003 op->rsp_iu.ersp_result || 2004 sqe->common.command_id != cqe->command_id)) { 2005 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2006 dev_info(ctrl->ctrl.device, 2007 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " 2008 "iu len %d, xfr len %d vs %d, status code " 2009 "%d, cmdid %d vs %d\n", 2010 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), 2011 be32_to_cpu(op->rsp_iu.xfrd_len), 2012 freq->transferred_length, 2013 op->rsp_iu.ersp_result, 2014 sqe->common.command_id, 2015 cqe->command_id); 2016 goto done; 2017 } 2018 result = cqe->result; 2019 status = cqe->status; 2020 break; 2021 2022 default: 2023 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2024 dev_info(ctrl->ctrl.device, 2025 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " 2026 "len %d\n", 2027 ctrl->cnum, freq->rcv_rsplen); 2028 goto done; 2029 } 2030 2031 terminate_assoc = false; 2032 2033 done: 2034 if (op->flags & FCOP_FLAGS_AEN) { 2035 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 2036 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2037 atomic_set(&op->state, FCPOP_STATE_IDLE); 2038 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 2039 nvme_fc_ctrl_put(ctrl); 2040 goto check_error; 2041 } 2042 2043 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2044 if (!nvme_try_complete_req(rq, status, result)) 2045 nvme_fc_complete_rq(rq); 2046 2047 check_error: 2048 if (terminate_assoc) 2049 nvme_fc_error_recovery(ctrl, "transport detected io error"); 2050 } 2051 2052 static int 2053 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 2054 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 2055 struct request *rq, u32 rqno) 2056 { 2057 struct nvme_fcp_op_w_sgl *op_w_sgl = 2058 container_of(op, typeof(*op_w_sgl), op); 2059 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2060 int ret = 0; 2061 2062 memset(op, 0, sizeof(*op)); 2063 op->fcp_req.cmdaddr = &op->cmd_iu; 2064 op->fcp_req.cmdlen = sizeof(op->cmd_iu); 2065 op->fcp_req.rspaddr = &op->rsp_iu; 2066 op->fcp_req.rsplen = sizeof(op->rsp_iu); 2067 op->fcp_req.done = nvme_fc_fcpio_done; 2068 op->ctrl = ctrl; 2069 op->queue = queue; 2070 op->rq = rq; 2071 op->rqno = rqno; 2072 2073 cmdiu->format_id = NVME_CMD_FORMAT_ID; 2074 cmdiu->fc_id = NVME_CMD_FC_ID; 2075 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 2076 if (queue->qnum) 2077 cmdiu->rsv_cat = fccmnd_set_cat_css(0, 2078 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); 2079 else 2080 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); 2081 2082 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 2083 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 2084 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 2085 dev_err(ctrl->dev, 2086 "FCP Op failed - cmdiu dma mapping failed.\n"); 2087 ret = -EFAULT; 2088 goto out_on_error; 2089 } 2090 2091 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 2092 &op->rsp_iu, sizeof(op->rsp_iu), 2093 DMA_FROM_DEVICE); 2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 2095 dev_err(ctrl->dev, 2096 "FCP Op failed - rspiu dma mapping failed.\n"); 2097 ret = -EFAULT; 2098 } 2099 2100 atomic_set(&op->state, FCPOP_STATE_IDLE); 2101 out_on_error: 2102 return ret; 2103 } 2104 2105 static int 2106 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 2107 unsigned int hctx_idx, unsigned int numa_node) 2108 { 2109 struct nvme_fc_ctrl *ctrl = set->driver_data; 2110 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 2111 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 2112 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 2113 int res; 2114 2115 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 2116 if (res) 2117 return res; 2118 op->op.fcp_req.first_sgl = op->sgl; 2119 op->op.fcp_req.private = &op->priv[0]; 2120 nvme_req(rq)->ctrl = &ctrl->ctrl; 2121 return res; 2122 } 2123 2124 static int 2125 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 2126 { 2127 struct nvme_fc_fcp_op *aen_op; 2128 struct nvme_fc_cmd_iu *cmdiu; 2129 struct nvme_command *sqe; 2130 void *private = NULL; 2131 int i, ret; 2132 2133 aen_op = ctrl->aen_ops; 2134 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2135 if (ctrl->lport->ops->fcprqst_priv_sz) { 2136 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 2137 GFP_KERNEL); 2138 if (!private) 2139 return -ENOMEM; 2140 } 2141 2142 cmdiu = &aen_op->cmd_iu; 2143 sqe = &cmdiu->sqe; 2144 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 2145 aen_op, (struct request *)NULL, 2146 (NVME_AQ_BLK_MQ_DEPTH + i)); 2147 if (ret) { 2148 kfree(private); 2149 return ret; 2150 } 2151 2152 aen_op->flags = FCOP_FLAGS_AEN; 2153 aen_op->fcp_req.private = private; 2154 2155 memset(sqe, 0, sizeof(*sqe)); 2156 sqe->common.opcode = nvme_admin_async_event; 2157 /* Note: core layer may overwrite the sqe.command_id value */ 2158 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 2159 } 2160 return 0; 2161 } 2162 2163 static void 2164 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 2165 { 2166 struct nvme_fc_fcp_op *aen_op; 2167 int i; 2168 2169 cancel_work_sync(&ctrl->ctrl.async_event_work); 2170 aen_op = ctrl->aen_ops; 2171 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2172 __nvme_fc_exit_request(ctrl, aen_op); 2173 2174 kfree(aen_op->fcp_req.private); 2175 aen_op->fcp_req.private = NULL; 2176 } 2177 } 2178 2179 static inline void 2180 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, 2181 unsigned int qidx) 2182 { 2183 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 2184 2185 hctx->driver_data = queue; 2186 queue->hctx = hctx; 2187 } 2188 2189 static int 2190 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2191 unsigned int hctx_idx) 2192 { 2193 struct nvme_fc_ctrl *ctrl = data; 2194 2195 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); 2196 2197 return 0; 2198 } 2199 2200 static int 2201 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2202 unsigned int hctx_idx) 2203 { 2204 struct nvme_fc_ctrl *ctrl = data; 2205 2206 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); 2207 2208 return 0; 2209 } 2210 2211 static void 2212 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 2213 { 2214 struct nvme_fc_queue *queue; 2215 2216 queue = &ctrl->queues[idx]; 2217 memset(queue, 0, sizeof(*queue)); 2218 queue->ctrl = ctrl; 2219 queue->qnum = idx; 2220 atomic_set(&queue->csn, 0); 2221 queue->dev = ctrl->dev; 2222 2223 if (idx > 0) 2224 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 2225 else 2226 queue->cmnd_capsule_len = sizeof(struct nvme_command); 2227 2228 /* 2229 * Considered whether we should allocate buffers for all SQEs 2230 * and CQEs and dma map them - mapping their respective entries 2231 * into the request structures (kernel vm addr and dma address) 2232 * thus the driver could use the buffers/mappings directly. 2233 * It only makes sense if the LLDD would use them for its 2234 * messaging api. It's very unlikely most adapter api's would use 2235 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 2236 * structures were used instead. 2237 */ 2238 } 2239 2240 /* 2241 * This routine terminates a queue at the transport level. 2242 * The transport has already ensured that all outstanding ios on 2243 * the queue have been terminated. 2244 * The transport will send a Disconnect LS request to terminate 2245 * the queue's connection. Termination of the admin queue will also 2246 * terminate the association at the target. 2247 */ 2248 static void 2249 nvme_fc_free_queue(struct nvme_fc_queue *queue) 2250 { 2251 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 2252 return; 2253 2254 clear_bit(NVME_FC_Q_LIVE, &queue->flags); 2255 /* 2256 * Current implementation never disconnects a single queue. 2257 * It always terminates a whole association. So there is never 2258 * a disconnect(queue) LS sent to the target. 2259 */ 2260 2261 queue->connection_id = 0; 2262 atomic_set(&queue->csn, 0); 2263 } 2264 2265 static void 2266 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 2267 struct nvme_fc_queue *queue, unsigned int qidx) 2268 { 2269 if (ctrl->lport->ops->delete_queue) 2270 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 2271 queue->lldd_handle); 2272 queue->lldd_handle = NULL; 2273 } 2274 2275 static void 2276 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 2277 { 2278 int i; 2279 2280 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2281 nvme_fc_free_queue(&ctrl->queues[i]); 2282 } 2283 2284 static int 2285 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 2286 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 2287 { 2288 int ret = 0; 2289 2290 queue->lldd_handle = NULL; 2291 if (ctrl->lport->ops->create_queue) 2292 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 2293 qidx, qsize, &queue->lldd_handle); 2294 2295 return ret; 2296 } 2297 2298 static void 2299 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 2300 { 2301 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 2302 int i; 2303 2304 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 2305 __nvme_fc_delete_hw_queue(ctrl, queue, i); 2306 } 2307 2308 static int 2309 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2310 { 2311 struct nvme_fc_queue *queue = &ctrl->queues[1]; 2312 int i, ret; 2313 2314 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 2315 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 2316 if (ret) 2317 goto delete_queues; 2318 } 2319 2320 return 0; 2321 2322 delete_queues: 2323 for (; i > 0; i--) 2324 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 2325 return ret; 2326 } 2327 2328 static int 2329 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2330 { 2331 int i, ret = 0; 2332 2333 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2334 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 2335 (qsize / 5)); 2336 if (ret) 2337 break; 2338 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 2339 if (ret) 2340 break; 2341 2342 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 2343 } 2344 2345 return ret; 2346 } 2347 2348 static void 2349 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 2350 { 2351 int i; 2352 2353 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2354 nvme_fc_init_queue(ctrl, i); 2355 } 2356 2357 static void 2358 nvme_fc_ctrl_free(struct kref *ref) 2359 { 2360 struct nvme_fc_ctrl *ctrl = 2361 container_of(ref, struct nvme_fc_ctrl, ref); 2362 unsigned long flags; 2363 2364 if (ctrl->ctrl.tagset) { 2365 blk_cleanup_queue(ctrl->ctrl.connect_q); 2366 blk_mq_free_tag_set(&ctrl->tag_set); 2367 } 2368 2369 /* remove from rport list */ 2370 spin_lock_irqsave(&ctrl->rport->lock, flags); 2371 list_del(&ctrl->ctrl_list); 2372 spin_unlock_irqrestore(&ctrl->rport->lock, flags); 2373 2374 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2375 blk_cleanup_queue(ctrl->ctrl.admin_q); 2376 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 2377 blk_mq_free_tag_set(&ctrl->admin_tag_set); 2378 2379 kfree(ctrl->queues); 2380 2381 put_device(ctrl->dev); 2382 nvme_fc_rport_put(ctrl->rport); 2383 2384 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 2385 if (ctrl->ctrl.opts) 2386 nvmf_free_options(ctrl->ctrl.opts); 2387 kfree(ctrl); 2388 } 2389 2390 static void 2391 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 2392 { 2393 kref_put(&ctrl->ref, nvme_fc_ctrl_free); 2394 } 2395 2396 static int 2397 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 2398 { 2399 return kref_get_unless_zero(&ctrl->ref); 2400 } 2401 2402 /* 2403 * All accesses from nvme core layer done - can now free the 2404 * controller. Called after last nvme_put_ctrl() call 2405 */ 2406 static void 2407 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 2408 { 2409 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2410 2411 WARN_ON(nctrl != &ctrl->ctrl); 2412 2413 nvme_fc_ctrl_put(ctrl); 2414 } 2415 2416 /* 2417 * This routine is used by the transport when it needs to find active 2418 * io on a queue that is to be terminated. The transport uses 2419 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 2420 * this routine to kill them on a 1 by 1 basis. 2421 * 2422 * As FC allocates FC exchange for each io, the transport must contact 2423 * the LLDD to terminate the exchange, thus releasing the FC exchange. 2424 * After terminating the exchange the LLDD will call the transport's 2425 * normal io done path for the request, but it will have an aborted 2426 * status. The done path will return the io request back to the block 2427 * layer with an error status. 2428 */ 2429 static bool 2430 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) 2431 { 2432 struct nvme_ctrl *nctrl = data; 2433 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2434 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2435 2436 __nvme_fc_abort_op(ctrl, op); 2437 return true; 2438 } 2439 2440 /* 2441 * This routine runs through all outstanding commands on the association 2442 * and aborts them. This routine is typically be called by the 2443 * delete_association routine. It is also called due to an error during 2444 * reconnect. In that scenario, it is most likely a command that initializes 2445 * the controller, including fabric Connect commands on io queues, that 2446 * may have timed out or failed thus the io must be killed for the connect 2447 * thread to see the error. 2448 */ 2449 static void 2450 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) 2451 { 2452 /* 2453 * If io queues are present, stop them and terminate all outstanding 2454 * ios on them. As FC allocates FC exchange for each io, the 2455 * transport must contact the LLDD to terminate the exchange, 2456 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 2457 * to tell us what io's are busy and invoke a transport routine 2458 * to kill them with the LLDD. After terminating the exchange 2459 * the LLDD will call the transport's normal io done path, but it 2460 * will have an aborted status. The done path will return the 2461 * io requests back to the block layer as part of normal completions 2462 * (but with error status). 2463 */ 2464 if (ctrl->ctrl.queue_count > 1) { 2465 nvme_stop_queues(&ctrl->ctrl); 2466 blk_mq_tagset_busy_iter(&ctrl->tag_set, 2467 nvme_fc_terminate_exchange, &ctrl->ctrl); 2468 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 2469 if (start_queues) 2470 nvme_start_queues(&ctrl->ctrl); 2471 } 2472 2473 /* 2474 * Other transports, which don't have link-level contexts bound 2475 * to sqe's, would try to gracefully shutdown the controller by 2476 * writing the registers for shutdown and polling (call 2477 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 2478 * just aborted and we will wait on those contexts, and given 2479 * there was no indication of how live the controlelr is on the 2480 * link, don't send more io to create more contexts for the 2481 * shutdown. Let the controller fail via keepalive failure if 2482 * its still present. 2483 */ 2484 2485 /* 2486 * clean up the admin queue. Same thing as above. 2487 */ 2488 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 2489 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2490 nvme_fc_terminate_exchange, &ctrl->ctrl); 2491 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 2492 } 2493 2494 static void 2495 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2496 { 2497 /* 2498 * if an error (io timeout, etc) while (re)connecting, the remote 2499 * port requested terminating of the association (disconnect_ls) 2500 * or an error (timeout or abort) occurred on an io while creating 2501 * the controller. Abort any ios on the association and let the 2502 * create_association error path resolve things. 2503 */ 2504 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2505 __nvme_fc_abort_outstanding_ios(ctrl, true); 2506 set_bit(ASSOC_FAILED, &ctrl->flags); 2507 return; 2508 } 2509 2510 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2511 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2512 return; 2513 2514 dev_warn(ctrl->ctrl.device, 2515 "NVME-FC{%d}: transport association event: %s\n", 2516 ctrl->cnum, errmsg); 2517 dev_warn(ctrl->ctrl.device, 2518 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 2519 2520 nvme_reset_ctrl(&ctrl->ctrl); 2521 } 2522 2523 static enum blk_eh_timer_return 2524 nvme_fc_timeout(struct request *rq, bool reserved) 2525 { 2526 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2527 struct nvme_fc_ctrl *ctrl = op->ctrl; 2528 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2529 struct nvme_command *sqe = &cmdiu->sqe; 2530 2531 /* 2532 * Attempt to abort the offending command. Command completion 2533 * will detect the aborted io and will fail the connection. 2534 */ 2535 dev_info(ctrl->ctrl.device, 2536 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " 2537 "x%08x/x%08x\n", 2538 ctrl->cnum, op->queue->qnum, sqe->common.opcode, 2539 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); 2540 if (__nvme_fc_abort_op(ctrl, op)) 2541 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); 2542 2543 /* 2544 * the io abort has been initiated. Have the reset timer 2545 * restarted and the abort completion will complete the io 2546 * shortly. Avoids a synchronous wait while the abort finishes. 2547 */ 2548 return BLK_EH_RESET_TIMER; 2549 } 2550 2551 static int 2552 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2553 struct nvme_fc_fcp_op *op) 2554 { 2555 struct nvmefc_fcp_req *freq = &op->fcp_req; 2556 int ret; 2557 2558 freq->sg_cnt = 0; 2559 2560 if (!blk_rq_nr_phys_segments(rq)) 2561 return 0; 2562 2563 freq->sg_table.sgl = freq->first_sgl; 2564 ret = sg_alloc_table_chained(&freq->sg_table, 2565 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, 2566 NVME_INLINE_SG_CNT); 2567 if (ret) 2568 return -ENOMEM; 2569 2570 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2571 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2572 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2573 op->nents, rq_dma_dir(rq)); 2574 if (unlikely(freq->sg_cnt <= 0)) { 2575 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2576 freq->sg_cnt = 0; 2577 return -EFAULT; 2578 } 2579 2580 /* 2581 * TODO: blk_integrity_rq(rq) for DIF 2582 */ 2583 return 0; 2584 } 2585 2586 static void 2587 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2588 struct nvme_fc_fcp_op *op) 2589 { 2590 struct nvmefc_fcp_req *freq = &op->fcp_req; 2591 2592 if (!freq->sg_cnt) 2593 return; 2594 2595 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 2596 rq_dma_dir(rq)); 2597 2598 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2599 2600 freq->sg_cnt = 0; 2601 } 2602 2603 /* 2604 * In FC, the queue is a logical thing. At transport connect, the target 2605 * creates its "queue" and returns a handle that is to be given to the 2606 * target whenever it posts something to the corresponding SQ. When an 2607 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 2608 * command contained within the SQE, an io, and assigns a FC exchange 2609 * to it. The SQE and the associated SQ handle are sent in the initial 2610 * CMD IU sents on the exchange. All transfers relative to the io occur 2611 * as part of the exchange. The CQE is the last thing for the io, 2612 * which is transferred (explicitly or implicitly) with the RSP IU 2613 * sent on the exchange. After the CQE is received, the FC exchange is 2614 * terminaed and the Exchange may be used on a different io. 2615 * 2616 * The transport to LLDD api has the transport making a request for a 2617 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 2618 * resource and transfers the command. The LLDD will then process all 2619 * steps to complete the io. Upon completion, the transport done routine 2620 * is called. 2621 * 2622 * So - while the operation is outstanding to the LLDD, there is a link 2623 * level FC exchange resource that is also outstanding. This must be 2624 * considered in all cleanup operations. 2625 */ 2626 static blk_status_t 2627 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 2628 struct nvme_fc_fcp_op *op, u32 data_len, 2629 enum nvmefc_fcp_datadir io_dir) 2630 { 2631 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2632 struct nvme_command *sqe = &cmdiu->sqe; 2633 int ret, opstate; 2634 2635 /* 2636 * before attempting to send the io, check to see if we believe 2637 * the target device is present 2638 */ 2639 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2640 return BLK_STS_RESOURCE; 2641 2642 if (!nvme_fc_ctrl_get(ctrl)) 2643 return BLK_STS_IOERR; 2644 2645 /* format the FC-NVME CMD IU and fcp_req */ 2646 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2647 cmdiu->data_len = cpu_to_be32(data_len); 2648 switch (io_dir) { 2649 case NVMEFC_FCP_WRITE: 2650 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 2651 break; 2652 case NVMEFC_FCP_READ: 2653 cmdiu->flags = FCNVME_CMD_FLAGS_READ; 2654 break; 2655 case NVMEFC_FCP_NODATA: 2656 cmdiu->flags = 0; 2657 break; 2658 } 2659 op->fcp_req.payload_length = data_len; 2660 op->fcp_req.io_dir = io_dir; 2661 op->fcp_req.transferred_length = 0; 2662 op->fcp_req.rcv_rsplen = 0; 2663 op->fcp_req.status = NVME_SC_SUCCESS; 2664 op->fcp_req.sqid = cpu_to_le16(queue->qnum); 2665 2666 /* 2667 * validate per fabric rules, set fields mandated by fabric spec 2668 * as well as those by FC-NVME spec. 2669 */ 2670 WARN_ON_ONCE(sqe->common.metadata); 2671 sqe->common.flags |= NVME_CMD_SGL_METABUF; 2672 2673 /* 2674 * format SQE DPTR field per FC-NVME rules: 2675 * type=0x5 Transport SGL Data Block Descriptor 2676 * subtype=0xA Transport-specific value 2677 * address=0 2678 * length=length of the data series 2679 */ 2680 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2681 NVME_SGL_FMT_TRANSPORT_A; 2682 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2683 sqe->rw.dptr.sgl.addr = 0; 2684 2685 if (!(op->flags & FCOP_FLAGS_AEN)) { 2686 ret = nvme_fc_map_data(ctrl, op->rq, op); 2687 if (ret < 0) { 2688 nvme_cleanup_cmd(op->rq); 2689 nvme_fc_ctrl_put(ctrl); 2690 if (ret == -ENOMEM || ret == -EAGAIN) 2691 return BLK_STS_RESOURCE; 2692 return BLK_STS_IOERR; 2693 } 2694 } 2695 2696 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 2697 sizeof(op->cmd_iu), DMA_TO_DEVICE); 2698 2699 atomic_set(&op->state, FCPOP_STATE_ACTIVE); 2700 2701 if (!(op->flags & FCOP_FLAGS_AEN)) 2702 blk_mq_start_request(op->rq); 2703 2704 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 2705 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2706 &ctrl->rport->remoteport, 2707 queue->lldd_handle, &op->fcp_req); 2708 2709 if (ret) { 2710 /* 2711 * If the lld fails to send the command is there an issue with 2712 * the csn value? If the command that fails is the Connect, 2713 * no - as the connection won't be live. If it is a command 2714 * post-connect, it's possible a gap in csn may be created. 2715 * Does this matter? As Linux initiators don't send fused 2716 * commands, no. The gap would exist, but as there's nothing 2717 * that depends on csn order to be delivered on the target 2718 * side, it shouldn't hurt. It would be difficult for a 2719 * target to even detect the csn gap as it has no idea when the 2720 * cmd with the csn was supposed to arrive. 2721 */ 2722 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2723 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2724 2725 if (!(op->flags & FCOP_FLAGS_AEN)) { 2726 nvme_fc_unmap_data(ctrl, op->rq, op); 2727 nvme_cleanup_cmd(op->rq); 2728 } 2729 2730 nvme_fc_ctrl_put(ctrl); 2731 2732 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 2733 ret != -EBUSY) 2734 return BLK_STS_IOERR; 2735 2736 return BLK_STS_RESOURCE; 2737 } 2738 2739 return BLK_STS_OK; 2740 } 2741 2742 static blk_status_t 2743 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2744 const struct blk_mq_queue_data *bd) 2745 { 2746 struct nvme_ns *ns = hctx->queue->queuedata; 2747 struct nvme_fc_queue *queue = hctx->driver_data; 2748 struct nvme_fc_ctrl *ctrl = queue->ctrl; 2749 struct request *rq = bd->rq; 2750 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2751 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2752 struct nvme_command *sqe = &cmdiu->sqe; 2753 enum nvmefc_fcp_datadir io_dir; 2754 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 2755 u32 data_len; 2756 blk_status_t ret; 2757 2758 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2759 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2760 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 2761 2762 ret = nvme_setup_cmd(ns, rq, sqe); 2763 if (ret) 2764 return ret; 2765 2766 /* 2767 * nvme core doesn't quite treat the rq opaquely. Commands such 2768 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2769 * there is no actual payload to be transferred. 2770 * To get it right, key data transmission on there being 1 or 2771 * more physical segments in the sg list. If there is no 2772 * physical segments, there is no payload. 2773 */ 2774 if (blk_rq_nr_phys_segments(rq)) { 2775 data_len = blk_rq_payload_bytes(rq); 2776 io_dir = ((rq_data_dir(rq) == WRITE) ? 2777 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2778 } else { 2779 data_len = 0; 2780 io_dir = NVMEFC_FCP_NODATA; 2781 } 2782 2783 2784 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2785 } 2786 2787 static void 2788 nvme_fc_submit_async_event(struct nvme_ctrl *arg) 2789 { 2790 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 2791 struct nvme_fc_fcp_op *aen_op; 2792 blk_status_t ret; 2793 2794 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 2795 return; 2796 2797 aen_op = &ctrl->aen_ops[0]; 2798 2799 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 2800 NVMEFC_FCP_NODATA); 2801 if (ret) 2802 dev_err(ctrl->ctrl.device, 2803 "failed async event work\n"); 2804 } 2805 2806 static void 2807 nvme_fc_complete_rq(struct request *rq) 2808 { 2809 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2810 struct nvme_fc_ctrl *ctrl = op->ctrl; 2811 2812 atomic_set(&op->state, FCPOP_STATE_IDLE); 2813 op->flags &= ~FCOP_FLAGS_TERMIO; 2814 2815 nvme_fc_unmap_data(ctrl, rq, op); 2816 nvme_complete_rq(rq); 2817 nvme_fc_ctrl_put(ctrl); 2818 } 2819 2820 2821 static const struct blk_mq_ops nvme_fc_mq_ops = { 2822 .queue_rq = nvme_fc_queue_rq, 2823 .complete = nvme_fc_complete_rq, 2824 .init_request = nvme_fc_init_request, 2825 .exit_request = nvme_fc_exit_request, 2826 .init_hctx = nvme_fc_init_hctx, 2827 .timeout = nvme_fc_timeout, 2828 }; 2829 2830 static int 2831 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 2832 { 2833 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2834 unsigned int nr_io_queues; 2835 int ret; 2836 2837 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2838 ctrl->lport->ops->max_hw_queues); 2839 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2840 if (ret) { 2841 dev_info(ctrl->ctrl.device, 2842 "set_queue_count failed: %d\n", ret); 2843 return ret; 2844 } 2845 2846 ctrl->ctrl.queue_count = nr_io_queues + 1; 2847 if (!nr_io_queues) 2848 return 0; 2849 2850 nvme_fc_init_io_queues(ctrl); 2851 2852 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2853 ctrl->tag_set.ops = &nvme_fc_mq_ops; 2854 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 2855 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 2856 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 2857 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2858 ctrl->tag_set.cmd_size = 2859 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 2860 ctrl->lport->ops->fcprqst_priv_sz); 2861 ctrl->tag_set.driver_data = ctrl; 2862 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 2863 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 2864 2865 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 2866 if (ret) 2867 return ret; 2868 2869 ctrl->ctrl.tagset = &ctrl->tag_set; 2870 2871 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 2872 if (IS_ERR(ctrl->ctrl.connect_q)) { 2873 ret = PTR_ERR(ctrl->ctrl.connect_q); 2874 goto out_free_tag_set; 2875 } 2876 2877 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2878 if (ret) 2879 goto out_cleanup_blk_queue; 2880 2881 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2882 if (ret) 2883 goto out_delete_hw_queues; 2884 2885 ctrl->ioq_live = true; 2886 2887 return 0; 2888 2889 out_delete_hw_queues: 2890 nvme_fc_delete_hw_io_queues(ctrl); 2891 out_cleanup_blk_queue: 2892 blk_cleanup_queue(ctrl->ctrl.connect_q); 2893 out_free_tag_set: 2894 blk_mq_free_tag_set(&ctrl->tag_set); 2895 nvme_fc_free_io_queues(ctrl); 2896 2897 /* force put free routine to ignore io queues */ 2898 ctrl->ctrl.tagset = NULL; 2899 2900 return ret; 2901 } 2902 2903 static int 2904 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2905 { 2906 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2907 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 2908 unsigned int nr_io_queues; 2909 int ret; 2910 2911 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2912 ctrl->lport->ops->max_hw_queues); 2913 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2914 if (ret) { 2915 dev_info(ctrl->ctrl.device, 2916 "set_queue_count failed: %d\n", ret); 2917 return ret; 2918 } 2919 2920 if (!nr_io_queues && prior_ioq_cnt) { 2921 dev_info(ctrl->ctrl.device, 2922 "Fail Reconnect: At least 1 io queue " 2923 "required (was %d)\n", prior_ioq_cnt); 2924 return -ENOSPC; 2925 } 2926 2927 ctrl->ctrl.queue_count = nr_io_queues + 1; 2928 /* check for io queues existing */ 2929 if (ctrl->ctrl.queue_count == 1) 2930 return 0; 2931 2932 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2933 if (ret) 2934 goto out_free_io_queues; 2935 2936 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2937 if (ret) 2938 goto out_delete_hw_queues; 2939 2940 if (prior_ioq_cnt != nr_io_queues) { 2941 dev_info(ctrl->ctrl.device, 2942 "reconnect: revising io queue count from %d to %d\n", 2943 prior_ioq_cnt, nr_io_queues); 2944 nvme_wait_freeze(&ctrl->ctrl); 2945 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2946 nvme_unfreeze(&ctrl->ctrl); 2947 } 2948 2949 return 0; 2950 2951 out_delete_hw_queues: 2952 nvme_fc_delete_hw_io_queues(ctrl); 2953 out_free_io_queues: 2954 nvme_fc_free_io_queues(ctrl); 2955 return ret; 2956 } 2957 2958 static void 2959 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 2960 { 2961 struct nvme_fc_lport *lport = rport->lport; 2962 2963 atomic_inc(&lport->act_rport_cnt); 2964 } 2965 2966 static void 2967 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 2968 { 2969 struct nvme_fc_lport *lport = rport->lport; 2970 u32 cnt; 2971 2972 cnt = atomic_dec_return(&lport->act_rport_cnt); 2973 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 2974 lport->ops->localport_delete(&lport->localport); 2975 } 2976 2977 static int 2978 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 2979 { 2980 struct nvme_fc_rport *rport = ctrl->rport; 2981 u32 cnt; 2982 2983 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) 2984 return 1; 2985 2986 cnt = atomic_inc_return(&rport->act_ctrl_cnt); 2987 if (cnt == 1) 2988 nvme_fc_rport_active_on_lport(rport); 2989 2990 return 0; 2991 } 2992 2993 static int 2994 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 2995 { 2996 struct nvme_fc_rport *rport = ctrl->rport; 2997 struct nvme_fc_lport *lport = rport->lport; 2998 u32 cnt; 2999 3000 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ 3001 3002 cnt = atomic_dec_return(&rport->act_ctrl_cnt); 3003 if (cnt == 0) { 3004 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 3005 lport->ops->remoteport_delete(&rport->remoteport); 3006 nvme_fc_rport_inactive_on_lport(rport); 3007 } 3008 3009 return 0; 3010 } 3011 3012 /* 3013 * This routine restarts the controller on the host side, and 3014 * on the link side, recreates the controller association. 3015 */ 3016 static int 3017 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 3018 { 3019 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 3020 struct nvmefc_ls_rcv_op *disls = NULL; 3021 unsigned long flags; 3022 int ret; 3023 bool changed; 3024 3025 ++ctrl->ctrl.nr_reconnects; 3026 3027 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3028 return -ENODEV; 3029 3030 if (nvme_fc_ctlr_active_on_rport(ctrl)) 3031 return -ENOTUNIQ; 3032 3033 dev_info(ctrl->ctrl.device, 3034 "NVME-FC{%d}: create association : host wwpn 0x%016llx " 3035 " rport wwpn 0x%016llx: NQN \"%s\"\n", 3036 ctrl->cnum, ctrl->lport->localport.port_name, 3037 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); 3038 3039 clear_bit(ASSOC_FAILED, &ctrl->flags); 3040 3041 /* 3042 * Create the admin queue 3043 */ 3044 3045 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 3046 NVME_AQ_DEPTH); 3047 if (ret) 3048 goto out_free_queue; 3049 3050 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 3051 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 3052 if (ret) 3053 goto out_delete_hw_queue; 3054 3055 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 3056 if (ret) 3057 goto out_disconnect_admin_queue; 3058 3059 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 3060 3061 /* 3062 * Check controller capabilities 3063 * 3064 * todo:- add code to check if ctrl attributes changed from 3065 * prior connection values 3066 */ 3067 3068 ret = nvme_enable_ctrl(&ctrl->ctrl); 3069 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3070 goto out_disconnect_admin_queue; 3071 3072 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; 3073 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << 3074 (ilog2(SZ_4K) - 9); 3075 3076 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 3077 3078 ret = nvme_init_identify(&ctrl->ctrl); 3079 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3080 goto out_disconnect_admin_queue; 3081 3082 /* sanity checks */ 3083 3084 /* FC-NVME does not have other data in the capsule */ 3085 if (ctrl->ctrl.icdoff) { 3086 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 3087 ctrl->ctrl.icdoff); 3088 goto out_disconnect_admin_queue; 3089 } 3090 3091 /* FC-NVME supports normal SGL Data Block Descriptors */ 3092 3093 if (opts->queue_size > ctrl->ctrl.maxcmd) { 3094 /* warn if maxcmd is lower than queue_size */ 3095 dev_warn(ctrl->ctrl.device, 3096 "queue_size %zu > ctrl maxcmd %u, reducing " 3097 "to maxcmd\n", 3098 opts->queue_size, ctrl->ctrl.maxcmd); 3099 opts->queue_size = ctrl->ctrl.maxcmd; 3100 } 3101 3102 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 3103 /* warn if sqsize is lower than queue_size */ 3104 dev_warn(ctrl->ctrl.device, 3105 "queue_size %zu > ctrl sqsize %u, reducing " 3106 "to sqsize\n", 3107 opts->queue_size, ctrl->ctrl.sqsize + 1); 3108 opts->queue_size = ctrl->ctrl.sqsize + 1; 3109 } 3110 3111 ret = nvme_fc_init_aen_ops(ctrl); 3112 if (ret) 3113 goto out_term_aen_ops; 3114 3115 /* 3116 * Create the io queues 3117 */ 3118 3119 if (ctrl->ctrl.queue_count > 1) { 3120 if (!ctrl->ioq_live) 3121 ret = nvme_fc_create_io_queues(ctrl); 3122 else 3123 ret = nvme_fc_recreate_io_queues(ctrl); 3124 } 3125 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3126 goto out_term_aen_ops; 3127 3128 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 3129 3130 ctrl->ctrl.nr_reconnects = 0; 3131 3132 if (changed) 3133 nvme_start_ctrl(&ctrl->ctrl); 3134 3135 return 0; /* Success */ 3136 3137 out_term_aen_ops: 3138 nvme_fc_term_aen_ops(ctrl); 3139 out_disconnect_admin_queue: 3140 /* send a Disconnect(association) LS to fc-nvme target */ 3141 nvme_fc_xmt_disconnect_assoc(ctrl); 3142 spin_lock_irqsave(&ctrl->lock, flags); 3143 ctrl->association_id = 0; 3144 disls = ctrl->rcv_disconn; 3145 ctrl->rcv_disconn = NULL; 3146 spin_unlock_irqrestore(&ctrl->lock, flags); 3147 if (disls) 3148 nvme_fc_xmt_ls_rsp(disls); 3149 out_delete_hw_queue: 3150 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3151 out_free_queue: 3152 nvme_fc_free_queue(&ctrl->queues[0]); 3153 clear_bit(ASSOC_ACTIVE, &ctrl->flags); 3154 nvme_fc_ctlr_inactive_on_rport(ctrl); 3155 3156 return ret; 3157 } 3158 3159 3160 /* 3161 * This routine stops operation of the controller on the host side. 3162 * On the host os stack side: Admin and IO queues are stopped, 3163 * outstanding ios on them terminated via FC ABTS. 3164 * On the link side: the association is terminated. 3165 */ 3166 static void 3167 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 3168 { 3169 struct nvmefc_ls_rcv_op *disls = NULL; 3170 unsigned long flags; 3171 3172 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) 3173 return; 3174 3175 spin_lock_irqsave(&ctrl->lock, flags); 3176 set_bit(FCCTRL_TERMIO, &ctrl->flags); 3177 ctrl->iocnt = 0; 3178 spin_unlock_irqrestore(&ctrl->lock, flags); 3179 3180 __nvme_fc_abort_outstanding_ios(ctrl, false); 3181 3182 /* kill the aens as they are a separate path */ 3183 nvme_fc_abort_aen_ops(ctrl); 3184 3185 /* wait for all io that had to be aborted */ 3186 spin_lock_irq(&ctrl->lock); 3187 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 3188 clear_bit(FCCTRL_TERMIO, &ctrl->flags); 3189 spin_unlock_irq(&ctrl->lock); 3190 3191 nvme_fc_term_aen_ops(ctrl); 3192 3193 /* 3194 * send a Disconnect(association) LS to fc-nvme target 3195 * Note: could have been sent at top of process, but 3196 * cleaner on link traffic if after the aborts complete. 3197 * Note: if association doesn't exist, association_id will be 0 3198 */ 3199 if (ctrl->association_id) 3200 nvme_fc_xmt_disconnect_assoc(ctrl); 3201 3202 spin_lock_irqsave(&ctrl->lock, flags); 3203 ctrl->association_id = 0; 3204 disls = ctrl->rcv_disconn; 3205 ctrl->rcv_disconn = NULL; 3206 spin_unlock_irqrestore(&ctrl->lock, flags); 3207 if (disls) 3208 /* 3209 * if a Disconnect Request was waiting for a response, send 3210 * now that all ABTS's have been issued (and are complete). 3211 */ 3212 nvme_fc_xmt_ls_rsp(disls); 3213 3214 if (ctrl->ctrl.tagset) { 3215 nvme_fc_delete_hw_io_queues(ctrl); 3216 nvme_fc_free_io_queues(ctrl); 3217 } 3218 3219 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3220 nvme_fc_free_queue(&ctrl->queues[0]); 3221 3222 /* re-enable the admin_q so anything new can fast fail */ 3223 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 3224 3225 /* resume the io queues so that things will fast fail */ 3226 nvme_start_queues(&ctrl->ctrl); 3227 3228 nvme_fc_ctlr_inactive_on_rport(ctrl); 3229 } 3230 3231 static void 3232 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 3233 { 3234 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3235 3236 cancel_delayed_work_sync(&ctrl->connect_work); 3237 /* 3238 * kill the association on the link side. this will block 3239 * waiting for io to terminate 3240 */ 3241 nvme_fc_delete_association(ctrl); 3242 } 3243 3244 static void 3245 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 3246 { 3247 struct nvme_fc_rport *rport = ctrl->rport; 3248 struct nvme_fc_remote_port *portptr = &rport->remoteport; 3249 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 3250 bool recon = true; 3251 3252 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 3253 return; 3254 3255 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3256 dev_info(ctrl->ctrl.device, 3257 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 3258 ctrl->cnum, status); 3259 else if (time_after_eq(jiffies, rport->dev_loss_end)) 3260 recon = false; 3261 3262 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 3263 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3264 dev_info(ctrl->ctrl.device, 3265 "NVME-FC{%d}: Reconnect attempt in %ld " 3266 "seconds\n", 3267 ctrl->cnum, recon_delay / HZ); 3268 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 3269 recon_delay = rport->dev_loss_end - jiffies; 3270 3271 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 3272 } else { 3273 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3274 dev_warn(ctrl->ctrl.device, 3275 "NVME-FC{%d}: Max reconnect attempts (%d) " 3276 "reached.\n", 3277 ctrl->cnum, ctrl->ctrl.nr_reconnects); 3278 else 3279 dev_warn(ctrl->ctrl.device, 3280 "NVME-FC{%d}: dev_loss_tmo (%d) expired " 3281 "while waiting for remoteport connectivity.\n", 3282 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, 3283 (ctrl->ctrl.opts->max_reconnects * 3284 ctrl->ctrl.opts->reconnect_delay))); 3285 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 3286 } 3287 } 3288 3289 static void 3290 nvme_fc_reset_ctrl_work(struct work_struct *work) 3291 { 3292 struct nvme_fc_ctrl *ctrl = 3293 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 3294 3295 nvme_stop_ctrl(&ctrl->ctrl); 3296 3297 /* will block will waiting for io to terminate */ 3298 nvme_fc_delete_association(ctrl); 3299 3300 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 3301 dev_err(ctrl->ctrl.device, 3302 "NVME-FC{%d}: error_recovery: Couldn't change state " 3303 "to CONNECTING\n", ctrl->cnum); 3304 3305 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 3306 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3307 dev_err(ctrl->ctrl.device, 3308 "NVME-FC{%d}: failed to schedule connect " 3309 "after reset\n", ctrl->cnum); 3310 } else { 3311 flush_delayed_work(&ctrl->connect_work); 3312 } 3313 } else { 3314 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); 3315 } 3316 } 3317 3318 3319 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 3320 .name = "fc", 3321 .module = THIS_MODULE, 3322 .flags = NVME_F_FABRICS, 3323 .reg_read32 = nvmf_reg_read32, 3324 .reg_read64 = nvmf_reg_read64, 3325 .reg_write32 = nvmf_reg_write32, 3326 .free_ctrl = nvme_fc_nvme_ctrl_freed, 3327 .submit_async_event = nvme_fc_submit_async_event, 3328 .delete_ctrl = nvme_fc_delete_ctrl, 3329 .get_address = nvmf_get_address, 3330 }; 3331 3332 static void 3333 nvme_fc_connect_ctrl_work(struct work_struct *work) 3334 { 3335 int ret; 3336 3337 struct nvme_fc_ctrl *ctrl = 3338 container_of(to_delayed_work(work), 3339 struct nvme_fc_ctrl, connect_work); 3340 3341 ret = nvme_fc_create_association(ctrl); 3342 if (ret) 3343 nvme_fc_reconnect_or_delete(ctrl, ret); 3344 else 3345 dev_info(ctrl->ctrl.device, 3346 "NVME-FC{%d}: controller connect complete\n", 3347 ctrl->cnum); 3348 } 3349 3350 3351 static const struct blk_mq_ops nvme_fc_admin_mq_ops = { 3352 .queue_rq = nvme_fc_queue_rq, 3353 .complete = nvme_fc_complete_rq, 3354 .init_request = nvme_fc_init_request, 3355 .exit_request = nvme_fc_exit_request, 3356 .init_hctx = nvme_fc_init_admin_hctx, 3357 .timeout = nvme_fc_timeout, 3358 }; 3359 3360 3361 /* 3362 * Fails a controller request if it matches an existing controller 3363 * (association) with the same tuple: 3364 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 3365 * 3366 * The ports don't need to be compared as they are intrinsically 3367 * already matched by the port pointers supplied. 3368 */ 3369 static bool 3370 nvme_fc_existing_controller(struct nvme_fc_rport *rport, 3371 struct nvmf_ctrl_options *opts) 3372 { 3373 struct nvme_fc_ctrl *ctrl; 3374 unsigned long flags; 3375 bool found = false; 3376 3377 spin_lock_irqsave(&rport->lock, flags); 3378 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3379 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 3380 if (found) 3381 break; 3382 } 3383 spin_unlock_irqrestore(&rport->lock, flags); 3384 3385 return found; 3386 } 3387 3388 static struct nvme_ctrl * 3389 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 3390 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 3391 { 3392 struct nvme_fc_ctrl *ctrl; 3393 unsigned long flags; 3394 int ret, idx, ctrl_loss_tmo; 3395 3396 if (!(rport->remoteport.port_role & 3397 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 3398 ret = -EBADR; 3399 goto out_fail; 3400 } 3401 3402 if (!opts->duplicate_connect && 3403 nvme_fc_existing_controller(rport, opts)) { 3404 ret = -EALREADY; 3405 goto out_fail; 3406 } 3407 3408 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 3409 if (!ctrl) { 3410 ret = -ENOMEM; 3411 goto out_fail; 3412 } 3413 3414 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); 3415 if (idx < 0) { 3416 ret = -ENOSPC; 3417 goto out_free_ctrl; 3418 } 3419 3420 /* 3421 * if ctrl_loss_tmo is being enforced and the default reconnect delay 3422 * is being used, change to a shorter reconnect delay for FC. 3423 */ 3424 if (opts->max_reconnects != -1 && 3425 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && 3426 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { 3427 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; 3428 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; 3429 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3430 opts->reconnect_delay); 3431 } 3432 3433 ctrl->ctrl.opts = opts; 3434 ctrl->ctrl.nr_reconnects = 0; 3435 if (lport->dev) 3436 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3437 else 3438 ctrl->ctrl.numa_node = NUMA_NO_NODE; 3439 INIT_LIST_HEAD(&ctrl->ctrl_list); 3440 ctrl->lport = lport; 3441 ctrl->rport = rport; 3442 ctrl->dev = lport->dev; 3443 ctrl->cnum = idx; 3444 ctrl->ioq_live = false; 3445 init_waitqueue_head(&ctrl->ioabort_wait); 3446 3447 get_device(ctrl->dev); 3448 kref_init(&ctrl->ref); 3449 3450 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3451 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3452 spin_lock_init(&ctrl->lock); 3453 3454 /* io queue count */ 3455 ctrl->ctrl.queue_count = min_t(unsigned int, 3456 opts->nr_io_queues, 3457 lport->ops->max_hw_queues); 3458 ctrl->ctrl.queue_count++; /* +1 for admin queue */ 3459 3460 ctrl->ctrl.sqsize = opts->queue_size - 1; 3461 ctrl->ctrl.kato = opts->kato; 3462 ctrl->ctrl.cntlid = 0xffff; 3463 3464 ret = -ENOMEM; 3465 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 3466 sizeof(struct nvme_fc_queue), GFP_KERNEL); 3467 if (!ctrl->queues) 3468 goto out_free_ida; 3469 3470 nvme_fc_init_queue(ctrl, 0); 3471 3472 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3473 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 3474 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3475 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 3476 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 3477 ctrl->admin_tag_set.cmd_size = 3478 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 3479 ctrl->lport->ops->fcprqst_priv_sz); 3480 ctrl->admin_tag_set.driver_data = ctrl; 3481 ctrl->admin_tag_set.nr_hw_queues = 1; 3482 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT; 3483 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3484 3485 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3486 if (ret) 3487 goto out_free_queues; 3488 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3489 3490 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3491 if (IS_ERR(ctrl->ctrl.fabrics_q)) { 3492 ret = PTR_ERR(ctrl->ctrl.fabrics_q); 3493 goto out_free_admin_tag_set; 3494 } 3495 3496 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3497 if (IS_ERR(ctrl->ctrl.admin_q)) { 3498 ret = PTR_ERR(ctrl->ctrl.admin_q); 3499 goto out_cleanup_fabrics_q; 3500 } 3501 3502 /* 3503 * Would have been nice to init io queues tag set as well. 3504 * However, we require interaction from the controller 3505 * for max io queue count before we can do so. 3506 * Defer this to the connect path. 3507 */ 3508 3509 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 3510 if (ret) 3511 goto out_cleanup_admin_q; 3512 3513 /* at this point, teardown path changes to ref counting on nvme ctrl */ 3514 3515 spin_lock_irqsave(&rport->lock, flags); 3516 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 3517 spin_unlock_irqrestore(&rport->lock, flags); 3518 3519 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 3520 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 3521 dev_err(ctrl->ctrl.device, 3522 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 3523 goto fail_ctrl; 3524 } 3525 3526 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3527 dev_err(ctrl->ctrl.device, 3528 "NVME-FC{%d}: failed to schedule initial connect\n", 3529 ctrl->cnum); 3530 goto fail_ctrl; 3531 } 3532 3533 flush_delayed_work(&ctrl->connect_work); 3534 3535 dev_info(ctrl->ctrl.device, 3536 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 3537 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 3538 3539 return &ctrl->ctrl; 3540 3541 fail_ctrl: 3542 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3543 cancel_work_sync(&ctrl->ctrl.reset_work); 3544 cancel_delayed_work_sync(&ctrl->connect_work); 3545 3546 ctrl->ctrl.opts = NULL; 3547 3548 /* initiate nvme ctrl ref counting teardown */ 3549 nvme_uninit_ctrl(&ctrl->ctrl); 3550 3551 /* Remove core ctrl ref. */ 3552 nvme_put_ctrl(&ctrl->ctrl); 3553 3554 /* as we're past the point where we transition to the ref 3555 * counting teardown path, if we return a bad pointer here, 3556 * the calling routine, thinking it's prior to the 3557 * transition, will do an rport put. Since the teardown 3558 * path also does a rport put, we do an extra get here to 3559 * so proper order/teardown happens. 3560 */ 3561 nvme_fc_rport_get(rport); 3562 3563 return ERR_PTR(-EIO); 3564 3565 out_cleanup_admin_q: 3566 blk_cleanup_queue(ctrl->ctrl.admin_q); 3567 out_cleanup_fabrics_q: 3568 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 3569 out_free_admin_tag_set: 3570 blk_mq_free_tag_set(&ctrl->admin_tag_set); 3571 out_free_queues: 3572 kfree(ctrl->queues); 3573 out_free_ida: 3574 put_device(ctrl->dev); 3575 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 3576 out_free_ctrl: 3577 kfree(ctrl); 3578 out_fail: 3579 /* exit via here doesn't follow ctlr ref points */ 3580 return ERR_PTR(ret); 3581 } 3582 3583 3584 struct nvmet_fc_traddr { 3585 u64 nn; 3586 u64 pn; 3587 }; 3588 3589 static int 3590 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 3591 { 3592 u64 token64; 3593 3594 if (match_u64(sstr, &token64)) 3595 return -EINVAL; 3596 *val = token64; 3597 3598 return 0; 3599 } 3600 3601 /* 3602 * This routine validates and extracts the WWN's from the TRADDR string. 3603 * As kernel parsers need the 0x to determine number base, universally 3604 * build string to parse with 0x prefix before parsing name strings. 3605 */ 3606 static int 3607 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 3608 { 3609 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 3610 substring_t wwn = { name, &name[sizeof(name)-1] }; 3611 int nnoffset, pnoffset; 3612 3613 /* validate if string is one of the 2 allowed formats */ 3614 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 3615 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 3616 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 3617 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 3618 nnoffset = NVME_FC_TRADDR_OXNNLEN; 3619 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 3620 NVME_FC_TRADDR_OXNNLEN; 3621 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 3622 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 3623 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 3624 "pn-", NVME_FC_TRADDR_NNLEN))) { 3625 nnoffset = NVME_FC_TRADDR_NNLEN; 3626 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 3627 } else 3628 goto out_einval; 3629 3630 name[0] = '0'; 3631 name[1] = 'x'; 3632 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 3633 3634 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3635 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 3636 goto out_einval; 3637 3638 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3639 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 3640 goto out_einval; 3641 3642 return 0; 3643 3644 out_einval: 3645 pr_warn("%s: bad traddr string\n", __func__); 3646 return -EINVAL; 3647 } 3648 3649 static struct nvme_ctrl * 3650 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 3651 { 3652 struct nvme_fc_lport *lport; 3653 struct nvme_fc_rport *rport; 3654 struct nvme_ctrl *ctrl; 3655 struct nvmet_fc_traddr laddr = { 0L, 0L }; 3656 struct nvmet_fc_traddr raddr = { 0L, 0L }; 3657 unsigned long flags; 3658 int ret; 3659 3660 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 3661 if (ret || !raddr.nn || !raddr.pn) 3662 return ERR_PTR(-EINVAL); 3663 3664 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 3665 if (ret || !laddr.nn || !laddr.pn) 3666 return ERR_PTR(-EINVAL); 3667 3668 /* find the host and remote ports to connect together */ 3669 spin_lock_irqsave(&nvme_fc_lock, flags); 3670 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3671 if (lport->localport.node_name != laddr.nn || 3672 lport->localport.port_name != laddr.pn || 3673 lport->localport.port_state != FC_OBJSTATE_ONLINE) 3674 continue; 3675 3676 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3677 if (rport->remoteport.node_name != raddr.nn || 3678 rport->remoteport.port_name != raddr.pn || 3679 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3680 continue; 3681 3682 /* if fail to get reference fall through. Will error */ 3683 if (!nvme_fc_rport_get(rport)) 3684 break; 3685 3686 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3687 3688 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 3689 if (IS_ERR(ctrl)) 3690 nvme_fc_rport_put(rport); 3691 return ctrl; 3692 } 3693 } 3694 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3695 3696 pr_warn("%s: %s - %s combination not found\n", 3697 __func__, opts->traddr, opts->host_traddr); 3698 return ERR_PTR(-ENOENT); 3699 } 3700 3701 3702 static struct nvmf_transport_ops nvme_fc_transport = { 3703 .name = "fc", 3704 .module = THIS_MODULE, 3705 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 3706 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 3707 .create_ctrl = nvme_fc_create_ctrl, 3708 }; 3709 3710 /* Arbitrary successive failures max. With lots of subsystems could be high */ 3711 #define DISCOVERY_MAX_FAIL 20 3712 3713 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 3714 struct device_attribute *attr, const char *buf, size_t count) 3715 { 3716 unsigned long flags; 3717 LIST_HEAD(local_disc_list); 3718 struct nvme_fc_lport *lport; 3719 struct nvme_fc_rport *rport; 3720 int failcnt = 0; 3721 3722 spin_lock_irqsave(&nvme_fc_lock, flags); 3723 restart: 3724 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3725 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3726 if (!nvme_fc_lport_get(lport)) 3727 continue; 3728 if (!nvme_fc_rport_get(rport)) { 3729 /* 3730 * This is a temporary condition. Upon restart 3731 * this rport will be gone from the list. 3732 * 3733 * Revert the lport put and retry. Anything 3734 * added to the list already will be skipped (as 3735 * they are no longer list_empty). Loops should 3736 * resume at rports that were not yet seen. 3737 */ 3738 nvme_fc_lport_put(lport); 3739 3740 if (failcnt++ < DISCOVERY_MAX_FAIL) 3741 goto restart; 3742 3743 pr_err("nvme_discovery: too many reference " 3744 "failures\n"); 3745 goto process_local_list; 3746 } 3747 if (list_empty(&rport->disc_list)) 3748 list_add_tail(&rport->disc_list, 3749 &local_disc_list); 3750 } 3751 } 3752 3753 process_local_list: 3754 while (!list_empty(&local_disc_list)) { 3755 rport = list_first_entry(&local_disc_list, 3756 struct nvme_fc_rport, disc_list); 3757 list_del_init(&rport->disc_list); 3758 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3759 3760 lport = rport->lport; 3761 /* signal discovery. Won't hurt if it repeats */ 3762 nvme_fc_signal_discovery_scan(lport, rport); 3763 nvme_fc_rport_put(rport); 3764 nvme_fc_lport_put(lport); 3765 3766 spin_lock_irqsave(&nvme_fc_lock, flags); 3767 } 3768 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3769 3770 return count; 3771 } 3772 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 3773 3774 static struct attribute *nvme_fc_attrs[] = { 3775 &dev_attr_nvme_discovery.attr, 3776 NULL 3777 }; 3778 3779 static struct attribute_group nvme_fc_attr_group = { 3780 .attrs = nvme_fc_attrs, 3781 }; 3782 3783 static const struct attribute_group *nvme_fc_attr_groups[] = { 3784 &nvme_fc_attr_group, 3785 NULL 3786 }; 3787 3788 static struct class fc_class = { 3789 .name = "fc", 3790 .dev_groups = nvme_fc_attr_groups, 3791 .owner = THIS_MODULE, 3792 }; 3793 3794 static int __init nvme_fc_init_module(void) 3795 { 3796 int ret; 3797 3798 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 3799 if (!nvme_fc_wq) 3800 return -ENOMEM; 3801 3802 /* 3803 * NOTE: 3804 * It is expected that in the future the kernel will combine 3805 * the FC-isms that are currently under scsi and now being 3806 * added to by NVME into a new standalone FC class. The SCSI 3807 * and NVME protocols and their devices would be under this 3808 * new FC class. 3809 * 3810 * As we need something to post FC-specific udev events to, 3811 * specifically for nvme probe events, start by creating the 3812 * new device class. When the new standalone FC class is 3813 * put in place, this code will move to a more generic 3814 * location for the class. 3815 */ 3816 ret = class_register(&fc_class); 3817 if (ret) { 3818 pr_err("couldn't register class fc\n"); 3819 goto out_destroy_wq; 3820 } 3821 3822 /* 3823 * Create a device for the FC-centric udev events 3824 */ 3825 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 3826 "fc_udev_device"); 3827 if (IS_ERR(fc_udev_device)) { 3828 pr_err("couldn't create fc_udev device!\n"); 3829 ret = PTR_ERR(fc_udev_device); 3830 goto out_destroy_class; 3831 } 3832 3833 ret = nvmf_register_transport(&nvme_fc_transport); 3834 if (ret) 3835 goto out_destroy_device; 3836 3837 return 0; 3838 3839 out_destroy_device: 3840 device_destroy(&fc_class, MKDEV(0, 0)); 3841 out_destroy_class: 3842 class_unregister(&fc_class); 3843 out_destroy_wq: 3844 destroy_workqueue(nvme_fc_wq); 3845 3846 return ret; 3847 } 3848 3849 static void 3850 nvme_fc_delete_controllers(struct nvme_fc_rport *rport) 3851 { 3852 struct nvme_fc_ctrl *ctrl; 3853 3854 spin_lock(&rport->lock); 3855 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3856 dev_warn(ctrl->ctrl.device, 3857 "NVME-FC{%d}: transport unloading: deleting ctrl\n", 3858 ctrl->cnum); 3859 nvme_delete_ctrl(&ctrl->ctrl); 3860 } 3861 spin_unlock(&rport->lock); 3862 } 3863 3864 static void 3865 nvme_fc_cleanup_for_unload(void) 3866 { 3867 struct nvme_fc_lport *lport; 3868 struct nvme_fc_rport *rport; 3869 3870 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3871 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3872 nvme_fc_delete_controllers(rport); 3873 } 3874 } 3875 } 3876 3877 static void __exit nvme_fc_exit_module(void) 3878 { 3879 unsigned long flags; 3880 bool need_cleanup = false; 3881 3882 spin_lock_irqsave(&nvme_fc_lock, flags); 3883 nvme_fc_waiting_to_unload = true; 3884 if (!list_empty(&nvme_fc_lport_list)) { 3885 need_cleanup = true; 3886 nvme_fc_cleanup_for_unload(); 3887 } 3888 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3889 if (need_cleanup) { 3890 pr_info("%s: waiting for ctlr deletes\n", __func__); 3891 wait_for_completion(&nvme_fc_unload_proceed); 3892 pr_info("%s: ctrl deletes complete\n", __func__); 3893 } 3894 3895 nvmf_unregister_transport(&nvme_fc_transport); 3896 3897 ida_destroy(&nvme_fc_local_port_cnt); 3898 ida_destroy(&nvme_fc_ctrl_cnt); 3899 3900 device_destroy(&fc_class, MKDEV(0, 0)); 3901 class_unregister(&fc_class); 3902 destroy_workqueue(nvme_fc_wq); 3903 } 3904 3905 module_init(nvme_fc_init_module); 3906 module_exit(nvme_fc_exit_module); 3907 3908 MODULE_LICENSE("GPL v2"); 3909