Lines Matching +full:dev +full:- +full:ctrl
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/blk-cgroup.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
35 struct nvme_fc_ctrl *ctrl; member
36 struct device *dev; member
65 struct list_head lsreq_list; /* rport->ls_req_list */
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
99 struct nvme_fc_ctrl *ctrl; member
124 struct device *dev; /* physical device for dma */ member
133 struct list_head endp_list; /* for lport->endp_list */
138 struct device *dev; /* physical device for dma */ member
147 /* fc_ctrl flags values - specified as bit positions */
155 struct device *dev; member
164 struct list_head ctrl_list; /* rport->ctrl_list */
179 struct nvme_ctrl ctrl; member
183 to_fc_ctrl(struct nvme_ctrl *ctrl) in to_fc_ctrl() argument
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); in to_fc_ctrl()
224 * These items are short-term. They will eventually be moved into
231 /* *********************** FC-NVME Port Management ************************ */
246 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_lport()
247 WARN_ON(!list_empty(&lport->endp_list)); in nvme_fc_free_lport()
251 list_del(&lport->port_list); in nvme_fc_free_lport()
254 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); in nvme_fc_free_lport()
255 ida_destroy(&lport->endp_cnt); in nvme_fc_free_lport()
257 put_device(lport->dev); in nvme_fc_free_lport()
265 kref_put(&lport->ref, nvme_fc_free_lport); in nvme_fc_lport_put()
271 return kref_get_unless_zero(&lport->ref); in nvme_fc_lport_get()
278 struct device *dev) in nvme_fc_attach_to_unreg_lport() argument
286 if (lport->localport.node_name != pinfo->node_name || in nvme_fc_attach_to_unreg_lport()
287 lport->localport.port_name != pinfo->port_name) in nvme_fc_attach_to_unreg_lport()
290 if (lport->dev != dev) { in nvme_fc_attach_to_unreg_lport()
291 lport = ERR_PTR(-EXDEV); in nvme_fc_attach_to_unreg_lport()
295 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_unreg_lport()
296 lport = ERR_PTR(-EEXIST); in nvme_fc_attach_to_unreg_lport()
311 lport->ops = ops; in nvme_fc_attach_to_unreg_lport()
312 lport->localport.port_role = pinfo->port_role; in nvme_fc_attach_to_unreg_lport()
313 lport->localport.port_id = pinfo->port_id; in nvme_fc_attach_to_unreg_lport()
314 lport->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_unreg_lport()
330 * nvme_fc_register_localport - transport entry point called by an
335 * @dev: physical hardware device node port corresponds to. Will be
344 * (ex: -ENXIO) upon failure.
349 struct device *dev, in nvme_fc_register_localport() argument
356 if (!template->localport_delete || !template->remoteport_delete || in nvme_fc_register_localport()
357 !template->ls_req || !template->fcp_io || in nvme_fc_register_localport()
358 !template->ls_abort || !template->fcp_abort || in nvme_fc_register_localport()
359 !template->max_hw_queues || !template->max_sgl_segments || in nvme_fc_register_localport()
360 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvme_fc_register_localport()
361 ret = -EINVAL; in nvme_fc_register_localport()
369 * expired, we can simply re-enable the localport. Remoteports in nvme_fc_register_localport()
372 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); in nvme_fc_register_localport()
381 *portptr = &newrec->localport; in nvme_fc_register_localport()
385 /* nothing found - allocate a new localport struct */ in nvme_fc_register_localport()
387 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), in nvme_fc_register_localport()
390 ret = -ENOMEM; in nvme_fc_register_localport()
396 ret = -ENOSPC; in nvme_fc_register_localport()
400 if (!get_device(dev) && dev) { in nvme_fc_register_localport()
401 ret = -ENODEV; in nvme_fc_register_localport()
405 INIT_LIST_HEAD(&newrec->port_list); in nvme_fc_register_localport()
406 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_localport()
407 kref_init(&newrec->ref); in nvme_fc_register_localport()
408 atomic_set(&newrec->act_rport_cnt, 0); in nvme_fc_register_localport()
409 newrec->ops = template; in nvme_fc_register_localport()
410 newrec->dev = dev; in nvme_fc_register_localport()
411 ida_init(&newrec->endp_cnt); in nvme_fc_register_localport()
412 if (template->local_priv_sz) in nvme_fc_register_localport()
413 newrec->localport.private = &newrec[1]; in nvme_fc_register_localport()
415 newrec->localport.private = NULL; in nvme_fc_register_localport()
416 newrec->localport.node_name = pinfo->node_name; in nvme_fc_register_localport()
417 newrec->localport.port_name = pinfo->port_name; in nvme_fc_register_localport()
418 newrec->localport.port_role = pinfo->port_role; in nvme_fc_register_localport()
419 newrec->localport.port_id = pinfo->port_id; in nvme_fc_register_localport()
420 newrec->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_localport()
421 newrec->localport.port_num = idx; in nvme_fc_register_localport()
424 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); in nvme_fc_register_localport()
427 if (dev) in nvme_fc_register_localport()
428 dma_set_seg_boundary(dev, template->dma_boundary); in nvme_fc_register_localport()
430 *portptr = &newrec->localport; in nvme_fc_register_localport()
445 * nvme_fc_unregister_localport - transport entry point called by an
452 * (ex: -ENXIO) upon failure.
461 return -EINVAL; in nvme_fc_unregister_localport()
465 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_localport()
467 return -EINVAL; in nvme_fc_unregister_localport()
469 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_localport()
473 if (atomic_read(&lport->act_rport_cnt) == 0) in nvme_fc_unregister_localport()
474 lport->ops->localport_delete(&lport->localport); in nvme_fc_unregister_localport()
483 * TRADDR strings, per FC-NVME are fixed format:
484 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
487 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
500 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) in nvme_fc_signal_discovery_scan()
504 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
505 lport->localport.node_name, lport->localport.port_name); in nvme_fc_signal_discovery_scan()
507 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
508 rport->remoteport.node_name, rport->remoteport.port_name); in nvme_fc_signal_discovery_scan()
509 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); in nvme_fc_signal_discovery_scan()
518 localport_to_lport(rport->remoteport.localport); in nvme_fc_free_rport()
521 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_rport()
522 WARN_ON(!list_empty(&rport->ctrl_list)); in nvme_fc_free_rport()
526 list_del(&rport->endp_list); in nvme_fc_free_rport()
529 WARN_ON(!list_empty(&rport->disc_list)); in nvme_fc_free_rport()
530 ida_free(&lport->endp_cnt, rport->remoteport.port_num); in nvme_fc_free_rport()
540 kref_put(&rport->ref, nvme_fc_free_rport); in nvme_fc_rport_put()
546 return kref_get_unless_zero(&rport->ref); in nvme_fc_rport_get()
550 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) in nvme_fc_resume_controller() argument
552 switch (nvme_ctrl_state(&ctrl->ctrl)) { in nvme_fc_resume_controller()
559 dev_info(ctrl->ctrl.device, in nvme_fc_resume_controller()
560 "NVME-FC{%d}: connectivity re-established. " in nvme_fc_resume_controller()
561 "Attempting reconnect\n", ctrl->cnum); in nvme_fc_resume_controller()
563 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); in nvme_fc_resume_controller()
575 /* no action to take - let it delete */ in nvme_fc_resume_controller()
585 struct nvme_fc_ctrl *ctrl; in nvme_fc_attach_to_suspended_rport() local
590 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_attach_to_suspended_rport()
591 if (rport->remoteport.node_name != pinfo->node_name || in nvme_fc_attach_to_suspended_rport()
592 rport->remoteport.port_name != pinfo->port_name) in nvme_fc_attach_to_suspended_rport()
596 rport = ERR_PTR(-ENOLCK); in nvme_fc_attach_to_suspended_rport()
602 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
605 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_suspended_rport()
607 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
609 return ERR_PTR(-ESTALE); in nvme_fc_attach_to_suspended_rport()
612 rport->remoteport.port_role = pinfo->port_role; in nvme_fc_attach_to_suspended_rport()
613 rport->remoteport.port_id = pinfo->port_id; in nvme_fc_attach_to_suspended_rport()
614 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_suspended_rport()
615 rport->dev_loss_end = 0; in nvme_fc_attach_to_suspended_rport()
621 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) in nvme_fc_attach_to_suspended_rport()
622 nvme_fc_resume_controller(ctrl); in nvme_fc_attach_to_suspended_rport()
624 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
641 if (pinfo->dev_loss_tmo) in __nvme_fc_set_dev_loss_tmo()
642 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; in __nvme_fc_set_dev_loss_tmo()
644 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; in __nvme_fc_set_dev_loss_tmo()
648 * nvme_fc_register_remoteport - transport entry point called by an
661 * (ex: -ENXIO) upon failure.
674 ret = -ESHUTDOWN; in nvme_fc_register_remoteport()
695 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
699 /* nothing found - allocate a new remoteport struct */ in nvme_fc_register_remoteport()
701 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), in nvme_fc_register_remoteport()
704 ret = -ENOMEM; in nvme_fc_register_remoteport()
708 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL); in nvme_fc_register_remoteport()
710 ret = -ENOSPC; in nvme_fc_register_remoteport()
714 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_remoteport()
715 INIT_LIST_HEAD(&newrec->ctrl_list); in nvme_fc_register_remoteport()
716 INIT_LIST_HEAD(&newrec->ls_req_list); in nvme_fc_register_remoteport()
717 INIT_LIST_HEAD(&newrec->disc_list); in nvme_fc_register_remoteport()
718 kref_init(&newrec->ref); in nvme_fc_register_remoteport()
719 atomic_set(&newrec->act_ctrl_cnt, 0); in nvme_fc_register_remoteport()
720 spin_lock_init(&newrec->lock); in nvme_fc_register_remoteport()
721 newrec->remoteport.localport = &lport->localport; in nvme_fc_register_remoteport()
722 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvme_fc_register_remoteport()
723 newrec->dev = lport->dev; in nvme_fc_register_remoteport()
724 newrec->lport = lport; in nvme_fc_register_remoteport()
725 if (lport->ops->remote_priv_sz) in nvme_fc_register_remoteport()
726 newrec->remoteport.private = &newrec[1]; in nvme_fc_register_remoteport()
728 newrec->remoteport.private = NULL; in nvme_fc_register_remoteport()
729 newrec->remoteport.port_role = pinfo->port_role; in nvme_fc_register_remoteport()
730 newrec->remoteport.node_name = pinfo->node_name; in nvme_fc_register_remoteport()
731 newrec->remoteport.port_name = pinfo->port_name; in nvme_fc_register_remoteport()
732 newrec->remoteport.port_id = pinfo->port_id; in nvme_fc_register_remoteport()
733 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_remoteport()
734 newrec->remoteport.port_num = idx; in nvme_fc_register_remoteport()
736 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); in nvme_fc_register_remoteport()
739 list_add_tail(&newrec->endp_list, &lport->endp_list); in nvme_fc_register_remoteport()
744 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
764 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_abort_lsops()
766 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { in nvme_fc_abort_lsops()
767 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { in nvme_fc_abort_lsops()
768 lsop->flags |= FCOP_FLAGS_TERMIO; in nvme_fc_abort_lsops()
769 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
770 rport->lport->ops->ls_abort(&rport->lport->localport, in nvme_fc_abort_lsops()
771 &rport->remoteport, in nvme_fc_abort_lsops()
772 &lsop->ls_req); in nvme_fc_abort_lsops()
776 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
782 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_connectivity_loss() argument
784 dev_info(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
785 "NVME-FC{%d}: controller connectivity lost. Awaiting " in nvme_fc_ctrl_connectivity_loss()
786 "Reconnect", ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
788 set_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_ctrl_connectivity_loss()
789 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_ctrl_connectivity_loss()
793 * nvme_fc_unregister_remoteport - transport entry point called by an
801 * (ex: -ENXIO) upon failure.
807 struct nvme_fc_ctrl *ctrl; in nvme_fc_unregister_remoteport() local
811 return -EINVAL; in nvme_fc_unregister_remoteport()
813 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_unregister_remoteport()
815 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_remoteport()
816 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
817 return -EINVAL; in nvme_fc_unregister_remoteport()
819 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_remoteport()
821 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); in nvme_fc_unregister_remoteport()
823 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_unregister_remoteport()
824 /* if dev_loss_tmo==0, dev loss is immediate */ in nvme_fc_unregister_remoteport()
825 if (!portptr->dev_loss_tmo) { in nvme_fc_unregister_remoteport()
826 dev_warn(ctrl->ctrl.device, in nvme_fc_unregister_remoteport()
827 "NVME-FC{%d}: controller connectivity lost.\n", in nvme_fc_unregister_remoteport()
828 ctrl->cnum); in nvme_fc_unregister_remoteport()
829 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_unregister_remoteport()
831 nvme_fc_ctrl_connectivity_loss(ctrl); in nvme_fc_unregister_remoteport()
834 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
838 if (atomic_read(&rport->act_ctrl_cnt) == 0) in nvme_fc_unregister_remoteport()
839 rport->lport->ops->remoteport_delete(portptr); in nvme_fc_unregister_remoteport()
853 * nvme_fc_rescan_remoteport - transport entry point called by an
865 nvme_fc_signal_discovery_scan(rport->lport, rport); in nvme_fc_rescan_remoteport()
876 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
878 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_set_remoteport_devloss()
879 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
880 return -EINVAL; in nvme_fc_set_remoteport_devloss()
884 rport->remoteport.dev_loss_tmo = dev_loss_tmo; in nvme_fc_set_remoteport_devloss()
886 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
893 /* *********************** FC-NVME DMA Handling **************************** */
902 * Wrapper all the dma routines and check the dev pointer.
912 fc_dma_map_single(struct device *dev, void *ptr, size_t size, in fc_dma_map_single() argument
915 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; in fc_dma_map_single()
919 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) in fc_dma_mapping_error() argument
921 return dev ? dma_mapping_error(dev, dma_addr) : 0; in fc_dma_mapping_error()
925 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, in fc_dma_unmap_single() argument
928 if (dev) in fc_dma_unmap_single()
929 dma_unmap_single(dev, addr, size, dir); in fc_dma_unmap_single()
933 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, in fc_dma_sync_single_for_cpu() argument
936 if (dev) in fc_dma_sync_single_for_cpu()
937 dma_sync_single_for_cpu(dev, addr, size, dir); in fc_dma_sync_single_for_cpu()
941 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, in fc_dma_sync_single_for_device() argument
944 if (dev) in fc_dma_sync_single_for_device()
945 dma_sync_single_for_device(dev, addr, size, dir); in fc_dma_sync_single_for_device()
958 s->dma_address = 0L; in fc_map_sg()
960 s->dma_length = s->length; in fc_map_sg()
967 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, in fc_dma_map_sg() argument
970 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); in fc_dma_map_sg()
974 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, in fc_dma_unmap_sg() argument
977 if (dev) in fc_dma_unmap_sg()
978 dma_unmap_sg(dev, sg, nents, dir); in fc_dma_unmap_sg()
981 /* *********************** FC-NVME LS Handling **************************** */
986 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
991 struct nvme_fc_rport *rport = lsop->rport; in __nvme_fc_finish_ls_req()
992 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_finish_ls_req()
995 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_finish_ls_req()
997 if (!lsop->req_queued) { in __nvme_fc_finish_ls_req()
998 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1002 list_del(&lsop->lsreq_list); in __nvme_fc_finish_ls_req()
1004 lsop->req_queued = false; in __nvme_fc_finish_ls_req()
1006 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1008 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_finish_ls_req()
1009 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_finish_ls_req()
1020 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_send_ls_req()
1024 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in __nvme_fc_send_ls_req()
1025 return -ECONNREFUSED; in __nvme_fc_send_ls_req()
1028 return -ESHUTDOWN; in __nvme_fc_send_ls_req()
1030 lsreq->done = done; in __nvme_fc_send_ls_req()
1031 lsop->rport = rport; in __nvme_fc_send_ls_req()
1032 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1033 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1034 init_completion(&lsop->ls_done); in __nvme_fc_send_ls_req()
1036 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, in __nvme_fc_send_ls_req()
1037 lsreq->rqstlen + lsreq->rsplen, in __nvme_fc_send_ls_req()
1039 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { in __nvme_fc_send_ls_req()
1040 ret = -EFAULT; in __nvme_fc_send_ls_req()
1043 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvme_fc_send_ls_req()
1045 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1047 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); in __nvme_fc_send_ls_req()
1049 lsop->req_queued = true; in __nvme_fc_send_ls_req()
1051 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1053 ret = rport->lport->ops->ls_req(&rport->lport->localport, in __nvme_fc_send_ls_req()
1054 &rport->remoteport, lsreq); in __nvme_fc_send_ls_req()
1061 lsop->ls_error = ret; in __nvme_fc_send_ls_req()
1062 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1063 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1064 list_del(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1065 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1066 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_send_ls_req()
1067 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_send_ls_req()
1080 lsop->ls_error = status; in nvme_fc_send_ls_req_done()
1081 complete(&lsop->ls_done); in nvme_fc_send_ls_req_done()
1087 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in nvme_fc_send_ls_req()
1088 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; in nvme_fc_send_ls_req()
1100 wait_for_completion(&lsop->ls_done); in nvme_fc_send_ls_req()
1104 ret = lsop->ls_error; in nvme_fc_send_ls_req()
1111 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) in nvme_fc_send_ls_req()
1112 return -ENXIO; in nvme_fc_send_ls_req()
1128 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, in nvme_fc_connect_admin_queue() argument
1140 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_admin_queue()
1142 dev_info(ctrl->ctrl.device, in nvme_fc_connect_admin_queue()
1143 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", in nvme_fc_connect_admin_queue()
1144 ctrl->cnum); in nvme_fc_connect_admin_queue()
1145 ret = -ENOMEM; in nvme_fc_connect_admin_queue()
1151 lsreq = &lsop->ls_req; in nvme_fc_connect_admin_queue()
1152 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_admin_queue()
1153 lsreq->private = &assoc_acc[1]; in nvme_fc_connect_admin_queue()
1155 lsreq->private = NULL; in nvme_fc_connect_admin_queue()
1157 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; in nvme_fc_connect_admin_queue()
1158 assoc_rqst->desc_list_len = in nvme_fc_connect_admin_queue()
1161 assoc_rqst->assoc_cmd.desc_tag = in nvme_fc_connect_admin_queue()
1163 assoc_rqst->assoc_cmd.desc_len = in nvme_fc_connect_admin_queue()
1167 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_admin_queue()
1168 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue()
1170 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); in nvme_fc_connect_admin_queue()
1171 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); in nvme_fc_connect_admin_queue()
1172 strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, in nvme_fc_connect_admin_queue()
1173 sizeof(assoc_rqst->assoc_cmd.hostnqn)); in nvme_fc_connect_admin_queue()
1174 strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, in nvme_fc_connect_admin_queue()
1175 sizeof(assoc_rqst->assoc_cmd.subnqn)); in nvme_fc_connect_admin_queue()
1177 lsop->queue = queue; in nvme_fc_connect_admin_queue()
1178 lsreq->rqstaddr = assoc_rqst; in nvme_fc_connect_admin_queue()
1179 lsreq->rqstlen = sizeof(*assoc_rqst); in nvme_fc_connect_admin_queue()
1180 lsreq->rspaddr = assoc_acc; in nvme_fc_connect_admin_queue()
1181 lsreq->rsplen = sizeof(*assoc_acc); in nvme_fc_connect_admin_queue()
1182 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_admin_queue()
1184 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_admin_queue()
1191 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_admin_queue()
1193 else if (assoc_acc->hdr.desc_list_len != in nvme_fc_connect_admin_queue()
1197 else if (assoc_acc->hdr.rqst.desc_tag != in nvme_fc_connect_admin_queue()
1200 else if (assoc_acc->hdr.rqst.desc_len != in nvme_fc_connect_admin_queue()
1203 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) in nvme_fc_connect_admin_queue()
1205 else if (assoc_acc->associd.desc_tag != in nvme_fc_connect_admin_queue()
1208 else if (assoc_acc->associd.desc_len != in nvme_fc_connect_admin_queue()
1212 else if (assoc_acc->connectid.desc_tag != in nvme_fc_connect_admin_queue()
1215 else if (assoc_acc->connectid.desc_len != in nvme_fc_connect_admin_queue()
1220 ret = -EBADF; in nvme_fc_connect_admin_queue()
1221 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1223 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_admin_queue()
1225 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1226 ctrl->association_id = in nvme_fc_connect_admin_queue()
1227 be64_to_cpu(assoc_acc->associd.association_id); in nvme_fc_connect_admin_queue()
1228 queue->connection_id = in nvme_fc_connect_admin_queue()
1229 be64_to_cpu(assoc_acc->connectid.connection_id); in nvme_fc_connect_admin_queue()
1230 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_admin_queue()
1231 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1238 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1240 queue->qnum, ret); in nvme_fc_connect_admin_queue()
1245 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_connect_queue() argument
1256 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_queue()
1258 dev_info(ctrl->ctrl.device, in nvme_fc_connect_queue()
1259 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", in nvme_fc_connect_queue()
1260 ctrl->cnum); in nvme_fc_connect_queue()
1261 ret = -ENOMEM; in nvme_fc_connect_queue()
1267 lsreq = &lsop->ls_req; in nvme_fc_connect_queue()
1268 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_queue()
1269 lsreq->private = (void *)&conn_acc[1]; in nvme_fc_connect_queue()
1271 lsreq->private = NULL; in nvme_fc_connect_queue()
1273 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; in nvme_fc_connect_queue()
1274 conn_rqst->desc_list_len = cpu_to_be32( in nvme_fc_connect_queue()
1278 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvme_fc_connect_queue()
1279 conn_rqst->associd.desc_len = in nvme_fc_connect_queue()
1282 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); in nvme_fc_connect_queue()
1283 conn_rqst->connect_cmd.desc_tag = in nvme_fc_connect_queue()
1285 conn_rqst->connect_cmd.desc_len = in nvme_fc_connect_queue()
1288 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_queue()
1289 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); in nvme_fc_connect_queue()
1290 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue()
1292 lsop->queue = queue; in nvme_fc_connect_queue()
1293 lsreq->rqstaddr = conn_rqst; in nvme_fc_connect_queue()
1294 lsreq->rqstlen = sizeof(*conn_rqst); in nvme_fc_connect_queue()
1295 lsreq->rspaddr = conn_acc; in nvme_fc_connect_queue()
1296 lsreq->rsplen = sizeof(*conn_acc); in nvme_fc_connect_queue()
1297 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_queue()
1299 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_queue()
1306 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_queue()
1308 else if (conn_acc->hdr.desc_list_len != in nvme_fc_connect_queue()
1311 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) in nvme_fc_connect_queue()
1313 else if (conn_acc->hdr.rqst.desc_len != in nvme_fc_connect_queue()
1316 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) in nvme_fc_connect_queue()
1318 else if (conn_acc->connectid.desc_tag != in nvme_fc_connect_queue()
1321 else if (conn_acc->connectid.desc_len != in nvme_fc_connect_queue()
1326 ret = -EBADF; in nvme_fc_connect_queue()
1327 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1329 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_queue()
1331 queue->connection_id = in nvme_fc_connect_queue()
1332 be64_to_cpu(conn_acc->connectid.connection_id); in nvme_fc_connect_queue()
1333 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_queue()
1340 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1342 queue->qnum, ret); in nvme_fc_connect_queue()
1353 /* fc-nvme initiator doesn't care about success or failure of cmd */ in nvme_fc_disconnect_assoc_done()
1359 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1360 * the FC-NVME Association. Terminating the association also
1361 * terminates the FC-NVME connections (per queue, both admin and io
1363 * down, and the related FC-NVME Association ID and Connection IDs
1366 * The behavior of the fc-nvme initiator is such that it's
1369 * connectivity with the fc-nvme target, so you may never get a
1372 * continue on with terminating the association. If the fc-nvme target
1376 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) in nvme_fc_xmt_disconnect_assoc() argument
1386 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_xmt_disconnect_assoc()
1388 dev_info(ctrl->ctrl.device, in nvme_fc_xmt_disconnect_assoc()
1389 "NVME-FC{%d}: send Disconnect Association " in nvme_fc_xmt_disconnect_assoc()
1391 ctrl->cnum); in nvme_fc_xmt_disconnect_assoc()
1397 lsreq = &lsop->ls_req; in nvme_fc_xmt_disconnect_assoc()
1398 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_xmt_disconnect_assoc()
1399 lsreq->private = (void *)&discon_acc[1]; in nvme_fc_xmt_disconnect_assoc()
1401 lsreq->private = NULL; in nvme_fc_xmt_disconnect_assoc()
1404 ctrl->association_id); in nvme_fc_xmt_disconnect_assoc()
1406 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, in nvme_fc_xmt_disconnect_assoc()
1415 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; in nvme_fc_xmt_ls_rsp_done()
1416 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp_done()
1417 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp_done()
1420 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1421 list_del(&lsop->lsrcv_list); in nvme_fc_xmt_ls_rsp_done()
1422 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1424 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1425 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1426 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1427 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1429 kfree(lsop->rspbuf); in nvme_fc_xmt_ls_rsp_done()
1430 kfree(lsop->rqstbuf); in nvme_fc_xmt_ls_rsp_done()
1439 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp()
1440 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp()
1441 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_xmt_ls_rsp()
1444 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp()
1445 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp()
1447 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, in nvme_fc_xmt_ls_rsp()
1448 lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1450 dev_warn(lport->dev, in nvme_fc_xmt_ls_rsp()
1452 w0->ls_cmd, ret); in nvme_fc_xmt_ls_rsp()
1453 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1463 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_match_disconn_ls()
1464 struct nvme_fc_ctrl *ctrl, *ret = NULL; in nvme_fc_match_disconn_ls() local
1466 u64 association_id = be64_to_cpu(rqst->associd.association_id); in nvme_fc_match_disconn_ls()
1469 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1471 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_match_disconn_ls()
1472 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_match_disconn_ls()
1474 spin_lock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1475 if (association_id == ctrl->association_id) { in nvme_fc_match_disconn_ls()
1476 oldls = ctrl->rcv_disconn; in nvme_fc_match_disconn_ls()
1477 ctrl->rcv_disconn = lsop; in nvme_fc_match_disconn_ls()
1478 ret = ctrl; in nvme_fc_match_disconn_ls()
1480 spin_unlock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1482 /* leave the ctrl get reference */ in nvme_fc_match_disconn_ls()
1484 nvme_fc_ctrl_put(ctrl); in nvme_fc_match_disconn_ls()
1487 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1491 dev_info(rport->lport->dev, in nvme_fc_match_disconn_ls()
1492 "NVME-FC{%d}: Multiple Disconnect Association " in nvme_fc_match_disconn_ls()
1493 "LS's received\n", ctrl->cnum); in nvme_fc_match_disconn_ls()
1495 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvme_fc_match_disconn_ls()
1496 sizeof(*oldls->rspbuf), in nvme_fc_match_disconn_ls()
1497 rqst->w0.ls_cmd, in nvme_fc_match_disconn_ls()
1514 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_ls_disconnect_assoc()
1516 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1518 &lsop->rspbuf->rsp_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1519 struct nvme_fc_ctrl *ctrl = NULL; in nvme_fc_ls_disconnect_assoc() local
1524 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); in nvme_fc_ls_disconnect_assoc()
1527 ctrl = nvme_fc_match_disconn_ls(rport, lsop); in nvme_fc_ls_disconnect_assoc()
1528 if (!ctrl) in nvme_fc_ls_disconnect_assoc()
1533 dev_info(rport->lport->dev, in nvme_fc_ls_disconnect_assoc()
1536 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvme_fc_ls_disconnect_assoc()
1537 sizeof(*acc), rqst->w0.ls_cmd, in nvme_fc_ls_disconnect_assoc()
1547 lsop->lsrsp->rsplen = sizeof(*acc); in nvme_fc_ls_disconnect_assoc()
1561 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); in nvme_fc_ls_disconnect_assoc()
1564 nvme_fc_ctrl_put(ctrl); in nvme_fc_ls_disconnect_assoc()
1570 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1577 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst()
1580 lsop->lsrsp->nvme_fc_private = lsop; in nvme_fc_handle_ls_rqst()
1581 lsop->lsrsp->rspbuf = lsop->rspbuf; in nvme_fc_handle_ls_rqst()
1582 lsop->lsrsp->rspdma = lsop->rspdma; in nvme_fc_handle_ls_rqst()
1583 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; in nvme_fc_handle_ls_rqst()
1585 lsop->lsrsp->rsplen = 0; in nvme_fc_handle_ls_rqst()
1592 switch (w0->ls_cmd) { in nvme_fc_handle_ls_rqst()
1597 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1598 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1603 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1604 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1608 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1609 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1629 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1630 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { in nvme_fc_handle_ls_rqst_work()
1631 if (lsop->handled) in nvme_fc_handle_ls_rqst_work()
1634 lsop->handled = true; in nvme_fc_handle_ls_rqst_work()
1635 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_handle_ls_rqst_work()
1636 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1639 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1640 w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst_work()
1641 lsop->lsrsp->rsplen = nvme_fc_format_rjt( in nvme_fc_handle_ls_rqst_work()
1642 lsop->rspbuf, in nvme_fc_handle_ls_rqst_work()
1643 sizeof(*lsop->rspbuf), in nvme_fc_handle_ls_rqst_work()
1644 w0->ls_cmd, in nvme_fc_handle_ls_rqst_work()
1652 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1659 dev_info(lport->dev, "RCV %s LS failed: No memory\n", in nvme_fc_rcv_ls_req_err_msg()
1660 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req_err_msg()
1661 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req_err_msg()
1665 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1668 * The nvme-fc layer will copy payload to an internal structure for
1689 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rcv_ls_req()
1698 if (!lport->ops->xmt_ls_rsp) { in nvme_fc_rcv_ls_req()
1699 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1701 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1702 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1703 ret = -EINVAL; in nvme_fc_rcv_ls_req()
1708 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1710 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1711 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1712 ret = -E2BIG; in nvme_fc_rcv_ls_req()
1719 ret = -ENOMEM; in nvme_fc_rcv_ls_req()
1723 lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL); in nvme_fc_rcv_ls_req()
1724 lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL); in nvme_fc_rcv_ls_req()
1725 if (!lsop->rqstbuf || !lsop->rspbuf) { in nvme_fc_rcv_ls_req()
1727 ret = -ENOMEM; in nvme_fc_rcv_ls_req()
1731 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, in nvme_fc_rcv_ls_req()
1732 sizeof(*lsop->rspbuf), in nvme_fc_rcv_ls_req()
1734 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { in nvme_fc_rcv_ls_req()
1735 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1737 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1738 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1739 ret = -EFAULT; in nvme_fc_rcv_ls_req()
1743 lsop->rport = rport; in nvme_fc_rcv_ls_req()
1744 lsop->lsrsp = lsrsp; in nvme_fc_rcv_ls_req()
1746 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); in nvme_fc_rcv_ls_req()
1747 lsop->rqstdatalen = lsreqbuf_len; in nvme_fc_rcv_ls_req()
1749 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1750 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_rcv_ls_req()
1751 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1752 ret = -ENOTCONN; in nvme_fc_rcv_ls_req()
1755 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); in nvme_fc_rcv_ls_req()
1756 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1758 schedule_work(&rport->lsrcv_work); in nvme_fc_rcv_ls_req()
1763 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_rcv_ls_req()
1764 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_rcv_ls_req()
1766 kfree(lsop->rspbuf); in nvme_fc_rcv_ls_req()
1767 kfree(lsop->rqstbuf); in nvme_fc_rcv_ls_req()
1776 /* *********************** NVME Ctrl Routines **************************** */
1779 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_exit_request() argument
1782 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, in __nvme_fc_exit_request()
1783 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in __nvme_fc_exit_request()
1784 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, in __nvme_fc_exit_request()
1785 sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_exit_request()
1787 atomic_set(&op->state, FCPOP_STATE_UNINIT); in __nvme_fc_exit_request()
1796 return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op); in nvme_fc_exit_request()
1800 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) in __nvme_fc_abort_op() argument
1805 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_abort_op()
1806 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); in __nvme_fc_abort_op()
1808 atomic_set(&op->state, opstate); in __nvme_fc_abort_op()
1809 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { in __nvme_fc_abort_op()
1810 op->flags |= FCOP_FLAGS_TERMIO; in __nvme_fc_abort_op()
1811 ctrl->iocnt++; in __nvme_fc_abort_op()
1813 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_abort_op()
1816 return -ECANCELED; in __nvme_fc_abort_op()
1818 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, in __nvme_fc_abort_op()
1819 &ctrl->rport->remoteport, in __nvme_fc_abort_op()
1820 op->queue->lldd_handle, in __nvme_fc_abort_op()
1821 &op->fcp_req); in __nvme_fc_abort_op()
1827 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_abort_aen_ops() argument
1829 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; in nvme_fc_abort_aen_ops()
1833 if (!(aen_op->flags & FCOP_FLAGS_AEN)) in nvme_fc_abort_aen_ops()
1837 __nvme_fc_abort_op(ctrl, aen_op); in nvme_fc_abort_aen_ops()
1841 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, in __nvme_fc_fcpop_chk_teardowns() argument
1847 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1848 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && in __nvme_fc_fcpop_chk_teardowns()
1849 op->flags & FCOP_FLAGS_TERMIO) { in __nvme_fc_fcpop_chk_teardowns()
1850 if (!--ctrl->iocnt) in __nvme_fc_fcpop_chk_teardowns()
1851 wake_up(&ctrl->ioabort_wait); in __nvme_fc_fcpop_chk_teardowns()
1853 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1860 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_ioerr_work() local
1863 nvme_fc_error_recovery(ctrl, "transport detected io error"); in nvme_fc_ctrl_ioerr_work()
1867 * nvme_fc_io_getuuid - Routine called to get the appid field
1876 struct request *rq = op->rq; in nvme_fc_io_getuuid()
1878 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) in nvme_fc_io_getuuid()
1880 return blkcg_get_fc_appid(rq->bio); in nvme_fc_io_getuuid()
1888 struct request *rq = op->rq; in nvme_fc_fcpio_done()
1889 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_fcpio_done()
1890 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_fcpio_done() local
1891 struct nvme_fc_queue *queue = op->queue; in nvme_fc_fcpio_done()
1892 struct nvme_completion *cqe = &op->rsp_iu.cqe; in nvme_fc_fcpio_done()
1893 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done()
1908 * This affects the FC-NVME implementation in two ways: in nvme_fc_fcpio_done()
1913 * 2) The FC-NVME implementation requires that delivery of in nvme_fc_fcpio_done()
1922 * every field in the cqe - in cases where the FC transport must in nvme_fc_fcpio_done()
1931 * Per FC-NVME spec, failure of an individual command requires in nvme_fc_fcpio_done()
1936 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_fcpio_done()
1938 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, in nvme_fc_fcpio_done()
1939 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in nvme_fc_fcpio_done()
1943 else if (freq->status) { in nvme_fc_fcpio_done()
1945 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1946 "NVME-FC{%d}: io failed due to lldd error %d\n", in nvme_fc_fcpio_done()
1947 ctrl->cnum, freq->status); in nvme_fc_fcpio_done()
1952 * status, they blk-mq layer can typically be called with the in nvme_fc_fcpio_done()
1953 * non-zero status and the content of the cqe isn't important. in nvme_fc_fcpio_done()
1965 switch (freq->rcv_rsplen) { in nvme_fc_fcpio_done()
1974 if (freq->transferred_length != in nvme_fc_fcpio_done()
1975 be32_to_cpu(op->cmd_iu.data_len)) { in nvme_fc_fcpio_done()
1977 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1978 "NVME-FC{%d}: io failed due to bad transfer " in nvme_fc_fcpio_done()
1980 ctrl->cnum, freq->transferred_length, in nvme_fc_fcpio_done()
1981 be32_to_cpu(op->cmd_iu.data_len)); in nvme_fc_fcpio_done()
1992 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != in nvme_fc_fcpio_done()
1993 (freq->rcv_rsplen / 4) || in nvme_fc_fcpio_done()
1994 be32_to_cpu(op->rsp_iu.xfrd_len) != in nvme_fc_fcpio_done()
1995 freq->transferred_length || in nvme_fc_fcpio_done()
1996 op->rsp_iu.ersp_result || in nvme_fc_fcpio_done()
1997 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done()
1999 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2000 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " in nvme_fc_fcpio_done()
2003 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), in nvme_fc_fcpio_done()
2004 be32_to_cpu(op->rsp_iu.xfrd_len), in nvme_fc_fcpio_done()
2005 freq->transferred_length, in nvme_fc_fcpio_done()
2006 op->rsp_iu.ersp_result, in nvme_fc_fcpio_done()
2007 sqe->common.command_id, in nvme_fc_fcpio_done()
2008 cqe->command_id); in nvme_fc_fcpio_done()
2011 result = cqe->result; in nvme_fc_fcpio_done()
2012 status = cqe->status; in nvme_fc_fcpio_done()
2017 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2018 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " in nvme_fc_fcpio_done()
2020 ctrl->cnum, freq->rcv_rsplen); in nvme_fc_fcpio_done()
2027 if (op->flags & FCOP_FLAGS_AEN) { in nvme_fc_fcpio_done()
2028 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); in nvme_fc_fcpio_done()
2029 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2030 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_fcpio_done()
2031 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ in nvme_fc_fcpio_done()
2032 nvme_fc_ctrl_put(ctrl); in nvme_fc_fcpio_done()
2036 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2042 nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING) in nvme_fc_fcpio_done()
2043 queue_work(nvme_reset_wq, &ctrl->ioerr_work); in nvme_fc_fcpio_done()
2047 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_request() argument
2053 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in __nvme_fc_init_request()
2057 op->fcp_req.cmdaddr = &op->cmd_iu; in __nvme_fc_init_request()
2058 op->fcp_req.cmdlen = sizeof(op->cmd_iu); in __nvme_fc_init_request()
2059 op->fcp_req.rspaddr = &op->rsp_iu; in __nvme_fc_init_request()
2060 op->fcp_req.rsplen = sizeof(op->rsp_iu); in __nvme_fc_init_request()
2061 op->fcp_req.done = nvme_fc_fcpio_done; in __nvme_fc_init_request()
2062 op->ctrl = ctrl; in __nvme_fc_init_request()
2063 op->queue = queue; in __nvme_fc_init_request()
2064 op->rq = rq; in __nvme_fc_init_request()
2065 op->rqno = rqno; in __nvme_fc_init_request()
2067 cmdiu->format_id = NVME_CMD_FORMAT_ID; in __nvme_fc_init_request()
2068 cmdiu->fc_id = NVME_CMD_FC_ID; in __nvme_fc_init_request()
2069 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); in __nvme_fc_init_request()
2070 if (queue->qnum) in __nvme_fc_init_request()
2071 cmdiu->rsv_cat = fccmnd_set_cat_css(0, in __nvme_fc_init_request()
2074 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); in __nvme_fc_init_request()
2076 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2077 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_init_request()
2078 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { in __nvme_fc_init_request()
2079 dev_err(ctrl->dev, in __nvme_fc_init_request()
2080 "FCP Op failed - cmdiu dma mapping failed.\n"); in __nvme_fc_init_request()
2081 ret = -EFAULT; in __nvme_fc_init_request()
2085 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2086 &op->rsp_iu, sizeof(op->rsp_iu), in __nvme_fc_init_request()
2088 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { in __nvme_fc_init_request()
2089 dev_err(ctrl->dev, in __nvme_fc_init_request()
2090 "FCP Op failed - rspiu dma mapping failed.\n"); in __nvme_fc_init_request()
2091 ret = -EFAULT; in __nvme_fc_init_request()
2094 atomic_set(&op->state, FCPOP_STATE_IDLE); in __nvme_fc_init_request()
2103 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); in nvme_fc_init_request() local
2105 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2106 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request()
2109 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); in nvme_fc_init_request()
2112 op->op.fcp_req.first_sgl = op->sgl; in nvme_fc_init_request()
2113 op->op.fcp_req.private = &op->priv[0]; in nvme_fc_init_request()
2114 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_fc_init_request()
2115 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; in nvme_fc_init_request()
2120 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_aen_ops() argument
2128 aen_op = ctrl->aen_ops; in nvme_fc_init_aen_ops()
2130 if (ctrl->lport->ops->fcprqst_priv_sz) { in nvme_fc_init_aen_ops()
2131 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, in nvme_fc_init_aen_ops()
2134 return -ENOMEM; in nvme_fc_init_aen_ops()
2137 cmdiu = &aen_op->cmd_iu; in nvme_fc_init_aen_ops()
2138 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops()
2139 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops()
2147 aen_op->flags = FCOP_FLAGS_AEN; in nvme_fc_init_aen_ops()
2148 aen_op->fcp_req.private = private; in nvme_fc_init_aen_ops()
2151 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops()
2153 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops()
2159 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_term_aen_ops() argument
2164 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_fc_term_aen_ops()
2165 aen_op = ctrl->aen_ops; in nvme_fc_term_aen_ops()
2167 __nvme_fc_exit_request(ctrl, aen_op); in nvme_fc_term_aen_ops()
2169 kfree(aen_op->fcp_req.private); in nvme_fc_term_aen_ops()
2170 aen_op->fcp_req.private = NULL; in nvme_fc_term_aen_ops()
2177 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data); in __nvme_fc_init_hctx() local
2178 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx()
2180 hctx->driver_data = queue; in __nvme_fc_init_hctx()
2181 queue->hctx = hctx; in __nvme_fc_init_hctx()
2199 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) in nvme_fc_init_queue() argument
2203 queue = &ctrl->queues[idx]; in nvme_fc_init_queue()
2205 queue->ctrl = ctrl; in nvme_fc_init_queue()
2206 queue->qnum = idx; in nvme_fc_init_queue()
2207 atomic_set(&queue->csn, 0); in nvme_fc_init_queue()
2208 queue->dev = ctrl->dev; in nvme_fc_init_queue()
2211 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_fc_init_queue()
2213 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_fc_init_queue()
2217 * and CQEs and dma map them - mapping their respective entries in nvme_fc_init_queue()
2222 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload in nvme_fc_init_queue()
2238 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) in nvme_fc_free_queue()
2241 clear_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_free_queue()
2248 queue->connection_id = 0; in nvme_fc_free_queue()
2249 atomic_set(&queue->csn, 0); in nvme_fc_free_queue()
2253 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_delete_hw_queue() argument
2256 if (ctrl->lport->ops->delete_queue) in __nvme_fc_delete_hw_queue()
2257 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, in __nvme_fc_delete_hw_queue()
2258 queue->lldd_handle); in __nvme_fc_delete_hw_queue()
2259 queue->lldd_handle = NULL; in __nvme_fc_delete_hw_queue()
2263 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_free_io_queues() argument
2267 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues()
2268 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues()
2272 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_create_hw_queue() argument
2277 queue->lldd_handle = NULL; in __nvme_fc_create_hw_queue()
2278 if (ctrl->lport->ops->create_queue) in __nvme_fc_create_hw_queue()
2279 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, in __nvme_fc_create_hw_queue()
2280 qidx, qsize, &queue->lldd_handle); in __nvme_fc_create_hw_queue()
2286 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_hw_io_queues() argument
2288 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues()
2291 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues()
2292 __nvme_fc_delete_hw_queue(ctrl, queue, i); in nvme_fc_delete_hw_io_queues()
2296 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_create_hw_io_queues() argument
2298 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues()
2301 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues()
2302 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); in nvme_fc_create_hw_io_queues()
2310 for (; i > 0; i--) in nvme_fc_create_hw_io_queues()
2311 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues()
2316 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_connect_io_queues() argument
2320 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues()
2321 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues()
2325 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); in nvme_fc_connect_io_queues()
2329 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); in nvme_fc_connect_io_queues()
2336 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_io_queues() argument
2340 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues()
2341 nvme_fc_init_queue(ctrl, i); in nvme_fc_init_io_queues()
2347 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_free() local
2351 if (ctrl->ctrl.tagset) in nvme_fc_ctrl_free()
2352 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_fc_ctrl_free()
2355 spin_lock_irqsave(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2356 list_del(&ctrl->ctrl_list); in nvme_fc_ctrl_free()
2357 spin_unlock_irqrestore(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2359 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_ctrl_free()
2360 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_fc_ctrl_free()
2362 kfree(ctrl->queues); in nvme_fc_ctrl_free()
2364 put_device(ctrl->dev); in nvme_fc_ctrl_free()
2365 nvme_fc_rport_put(ctrl->rport); in nvme_fc_ctrl_free()
2367 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_ctrl_free()
2368 if (ctrl->ctrl.opts) in nvme_fc_ctrl_free()
2369 nvmf_free_options(ctrl->ctrl.opts); in nvme_fc_ctrl_free()
2370 kfree(ctrl); in nvme_fc_ctrl_free()
2374 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_put() argument
2376 kref_put(&ctrl->ref, nvme_fc_ctrl_free); in nvme_fc_ctrl_put()
2380 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_get() argument
2382 return kref_get_unless_zero(&ctrl->ref); in nvme_fc_ctrl_get()
2386 * All accesses from nvme core layer done - can now free the
2392 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_free_ctrl() local
2394 WARN_ON(nctrl != &ctrl->ctrl); in nvme_fc_free_ctrl()
2396 nvme_fc_ctrl_put(ctrl); in nvme_fc_free_ctrl()
2415 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_terminate_exchange() local
2418 op->nreq.flags |= NVME_REQ_CANCELLED; in nvme_fc_terminate_exchange()
2419 __nvme_fc_abort_op(ctrl, op); in nvme_fc_terminate_exchange()
2433 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) in __nvme_fc_abort_outstanding_ios() argument
2441 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2442 for (q = 1; q < ctrl->ctrl.queue_count; q++) in __nvme_fc_abort_outstanding_ios()
2443 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); in __nvme_fc_abort_outstanding_ios()
2445 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in __nvme_fc_abort_outstanding_ios()
2459 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2460 nvme_quiesce_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2461 nvme_sync_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2462 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios()
2463 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2464 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios()
2466 nvme_unquiesce_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2470 * Other transports, which don't have link-level contexts bound in __nvme_fc_abort_outstanding_ios()
2484 nvme_quiesce_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2485 blk_sync_queue(ctrl->ctrl.admin_q); in __nvme_fc_abort_outstanding_ios()
2486 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in __nvme_fc_abort_outstanding_ios()
2487 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2488 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in __nvme_fc_abort_outstanding_ios()
2490 nvme_unquiesce_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2494 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) in nvme_fc_error_recovery() argument
2496 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_fc_error_recovery()
2506 __nvme_fc_abort_outstanding_ios(ctrl, true); in nvme_fc_error_recovery()
2507 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2508 "NVME-FC{%d}: transport error during (re)connect\n", in nvme_fc_error_recovery()
2509 ctrl->cnum); in nvme_fc_error_recovery()
2513 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ in nvme_fc_error_recovery()
2517 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2518 "NVME-FC{%d}: transport association event: %s\n", in nvme_fc_error_recovery()
2519 ctrl->cnum, errmsg); in nvme_fc_error_recovery()
2520 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2521 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); in nvme_fc_error_recovery()
2523 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_error_recovery()
2529 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_timeout() local
2530 u16 qnum = op->queue->qnum; in nvme_fc_timeout()
2531 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_timeout()
2532 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout()
2538 dev_info(ctrl->ctrl.device, in nvme_fc_timeout()
2539 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: " in nvme_fc_timeout()
2541 ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype, in nvme_fc_timeout()
2543 sqe->common.cdw10, sqe->common.cdw11); in nvme_fc_timeout()
2544 if (__nvme_fc_abort_op(ctrl, op)) in nvme_fc_timeout()
2545 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); in nvme_fc_timeout()
2556 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_map_data() argument
2559 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_map_data()
2562 freq->sg_cnt = 0; in nvme_fc_map_data()
2567 freq->sg_table.sgl = freq->first_sgl; in nvme_fc_map_data()
2568 ret = sg_alloc_table_chained(&freq->sg_table, in nvme_fc_map_data()
2569 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, in nvme_fc_map_data()
2572 return -ENOMEM; in nvme_fc_map_data()
2574 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); in nvme_fc_map_data()
2575 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); in nvme_fc_map_data()
2576 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, in nvme_fc_map_data()
2577 op->nents, rq_dma_dir(rq)); in nvme_fc_map_data()
2578 if (unlikely(freq->sg_cnt <= 0)) { in nvme_fc_map_data()
2579 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_map_data()
2580 freq->sg_cnt = 0; in nvme_fc_map_data()
2581 return -EFAULT; in nvme_fc_map_data()
2591 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_unmap_data() argument
2594 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_unmap_data()
2596 if (!freq->sg_cnt) in nvme_fc_unmap_data()
2599 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, in nvme_fc_unmap_data()
2602 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_unmap_data()
2604 freq->sg_cnt = 0; in nvme_fc_unmap_data()
2626 * So - while the operation is outstanding to the LLDD, there is a link
2631 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_start_fcp_op() argument
2635 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_start_fcp_op()
2636 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_start_fcp_op()
2643 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_start_fcp_op()
2646 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_start_fcp_op()
2649 /* format the FC-NVME CMD IU and fcp_req */ in nvme_fc_start_fcp_op()
2650 cmdiu->connection_id = cpu_to_be64(queue->connection_id); in nvme_fc_start_fcp_op()
2651 cmdiu->data_len = cpu_to_be32(data_len); in nvme_fc_start_fcp_op()
2654 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; in nvme_fc_start_fcp_op()
2657 cmdiu->flags = FCNVME_CMD_FLAGS_READ; in nvme_fc_start_fcp_op()
2660 cmdiu->flags = 0; in nvme_fc_start_fcp_op()
2663 op->fcp_req.payload_length = data_len; in nvme_fc_start_fcp_op()
2664 op->fcp_req.io_dir = io_dir; in nvme_fc_start_fcp_op()
2665 op->fcp_req.transferred_length = 0; in nvme_fc_start_fcp_op()
2666 op->fcp_req.rcv_rsplen = 0; in nvme_fc_start_fcp_op()
2667 op->fcp_req.status = NVME_SC_SUCCESS; in nvme_fc_start_fcp_op()
2668 op->fcp_req.sqid = cpu_to_le16(queue->qnum); in nvme_fc_start_fcp_op()
2672 * as well as those by FC-NVME spec. in nvme_fc_start_fcp_op()
2674 WARN_ON_ONCE(sqe->common.metadata); in nvme_fc_start_fcp_op()
2675 sqe->common.flags |= NVME_CMD_SGL_METABUF; in nvme_fc_start_fcp_op()
2678 * format SQE DPTR field per FC-NVME rules: in nvme_fc_start_fcp_op()
2680 * subtype=0xA Transport-specific value in nvme_fc_start_fcp_op()
2684 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_fc_start_fcp_op()
2686 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); in nvme_fc_start_fcp_op()
2687 sqe->rw.dptr.sgl.addr = 0; in nvme_fc_start_fcp_op()
2689 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2690 ret = nvme_fc_map_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2692 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2693 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2694 if (ret == -ENOMEM || ret == -EAGAIN) in nvme_fc_start_fcp_op()
2700 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, in nvme_fc_start_fcp_op()
2701 sizeof(op->cmd_iu), DMA_TO_DEVICE); in nvme_fc_start_fcp_op()
2703 atomic_set(&op->state, FCPOP_STATE_ACTIVE); in nvme_fc_start_fcp_op()
2705 if (!(op->flags & FCOP_FLAGS_AEN)) in nvme_fc_start_fcp_op()
2706 nvme_start_request(op->rq); in nvme_fc_start_fcp_op()
2708 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); in nvme_fc_start_fcp_op()
2709 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, in nvme_fc_start_fcp_op()
2710 &ctrl->rport->remoteport, in nvme_fc_start_fcp_op()
2711 queue->lldd_handle, &op->fcp_req); in nvme_fc_start_fcp_op()
2717 * no - as the connection won't be live. If it is a command in nvme_fc_start_fcp_op()
2718 * post-connect, it's possible a gap in csn may be created. in nvme_fc_start_fcp_op()
2726 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_start_fcp_op()
2727 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_start_fcp_op()
2729 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2730 nvme_fc_unmap_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2731 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2734 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2736 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && in nvme_fc_start_fcp_op()
2737 ret != -EBUSY) in nvme_fc_start_fcp_op()
2750 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_fc_queue_rq()
2751 struct nvme_fc_queue *queue = hctx->driver_data; in nvme_fc_queue_rq()
2752 struct nvme_fc_ctrl *ctrl = queue->ctrl; in nvme_fc_queue_rq() local
2753 struct request *rq = bd->rq; in nvme_fc_queue_rq()
2756 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_queue_rq()
2760 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || in nvme_fc_queue_rq()
2761 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_fc_queue_rq()
2762 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_fc_queue_rq()
2770 * as WRITE ZEROES will return a non-zero rq payload_bytes yet in nvme_fc_queue_rq()
2786 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); in nvme_fc_queue_rq()
2792 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); in nvme_fc_submit_async_event() local
2796 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) in nvme_fc_submit_async_event()
2799 aen_op = &ctrl->aen_ops[0]; in nvme_fc_submit_async_event()
2801 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, in nvme_fc_submit_async_event()
2804 dev_err(ctrl->ctrl.device, in nvme_fc_submit_async_event()
2812 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_complete_rq() local
2814 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_complete_rq()
2815 op->flags &= ~FCOP_FLAGS_TERMIO; in nvme_fc_complete_rq()
2817 nvme_fc_unmap_data(ctrl, rq, op); in nvme_fc_complete_rq()
2819 nvme_fc_ctrl_put(ctrl); in nvme_fc_complete_rq()
2824 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); in nvme_fc_map_queues() local
2827 for (i = 0; i < set->nr_maps; i++) { in nvme_fc_map_queues()
2828 struct blk_mq_queue_map *map = &set->map[i]; in nvme_fc_map_queues()
2830 if (!map->nr_queues) { in nvme_fc_map_queues()
2836 if (ctrl->lport->ops->map_queues) in nvme_fc_map_queues()
2837 ctrl->lport->ops->map_queues(&ctrl->lport->localport, in nvme_fc_map_queues()
2855 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_io_queues() argument
2857 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_io_queues()
2861 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_create_io_queues()
2862 ctrl->lport->ops->max_hw_queues); in nvme_fc_create_io_queues()
2863 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_create_io_queues()
2865 dev_info(ctrl->ctrl.device, in nvme_fc_create_io_queues()
2870 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues()
2874 nvme_fc_init_io_queues(ctrl); in nvme_fc_create_io_queues()
2876 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, in nvme_fc_create_io_queues()
2879 ctrl->lport->ops->fcprqst_priv_sz)); in nvme_fc_create_io_queues()
2883 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2887 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2891 ctrl->ioq_live = true; in nvme_fc_create_io_queues()
2896 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_create_io_queues()
2898 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_fc_create_io_queues()
2899 nvme_fc_free_io_queues(ctrl); in nvme_fc_create_io_queues()
2902 ctrl->ctrl.tagset = NULL; in nvme_fc_create_io_queues()
2908 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_recreate_io_queues() argument
2910 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_recreate_io_queues()
2911 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; in nvme_fc_recreate_io_queues()
2915 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_recreate_io_queues()
2916 ctrl->lport->ops->max_hw_queues); in nvme_fc_recreate_io_queues()
2917 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_recreate_io_queues()
2919 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2925 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2928 return -ENOSPC; in nvme_fc_recreate_io_queues()
2931 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_recreate_io_queues()
2933 if (ctrl->ctrl.queue_count == 1) in nvme_fc_recreate_io_queues()
2937 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2940 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); in nvme_fc_recreate_io_queues()
2943 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2947 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2954 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_recreate_io_queues()
2956 nvme_fc_free_io_queues(ctrl); in nvme_fc_recreate_io_queues()
2963 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_active_on_lport()
2965 atomic_inc(&lport->act_rport_cnt); in nvme_fc_rport_active_on_lport()
2971 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_inactive_on_lport()
2974 cnt = atomic_dec_return(&lport->act_rport_cnt); in nvme_fc_rport_inactive_on_lport()
2975 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_rport_inactive_on_lport()
2976 lport->ops->localport_delete(&lport->localport); in nvme_fc_rport_inactive_on_lport()
2980 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_active_on_rport() argument
2982 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_active_on_rport()
2985 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_ctlr_active_on_rport()
2988 cnt = atomic_inc_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_active_on_rport()
2996 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_inactive_on_rport() argument
2998 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_inactive_on_rport()
2999 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_ctlr_inactive_on_rport()
3002 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ in nvme_fc_ctlr_inactive_on_rport()
3004 cnt = atomic_dec_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_inactive_on_rport()
3006 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_ctlr_inactive_on_rport()
3007 lport->ops->remoteport_delete(&rport->remoteport); in nvme_fc_ctlr_inactive_on_rport()
3019 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_association() argument
3021 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_association()
3026 ++ctrl->ctrl.nr_reconnects; in nvme_fc_create_association()
3028 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_association()
3029 return -ENODEV; in nvme_fc_create_association()
3031 if (nvme_fc_ctlr_active_on_rport(ctrl)) in nvme_fc_create_association()
3032 return -ENOTUNIQ; in nvme_fc_create_association()
3034 dev_info(ctrl->ctrl.device, in nvme_fc_create_association()
3035 "NVME-FC{%d}: create association : host wwpn 0x%016llx " in nvme_fc_create_association()
3037 ctrl->cnum, ctrl->lport->localport.port_name, in nvme_fc_create_association()
3038 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); in nvme_fc_create_association()
3040 clear_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_create_association()
3046 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, in nvme_fc_create_association()
3051 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], in nvme_fc_create_association()
3056 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3060 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in nvme_fc_create_association()
3065 * todo:- add code to check if ctrl attributes changed from in nvme_fc_create_association()
3069 ret = nvme_enable_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3070 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3071 ret = -EIO; in nvme_fc_create_association()
3075 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; in nvme_fc_create_association()
3076 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
3077 (ilog2(SZ_4K) - 9); in nvme_fc_create_association()
3079 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3081 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); in nvme_fc_create_association()
3084 if (test_bit(ASSOC_FAILED, &ctrl->flags)) { in nvme_fc_create_association()
3085 ret = -EIO; in nvme_fc_create_association()
3090 /* FC-NVME does not have other data in the capsule */ in nvme_fc_create_association()
3091 if (ctrl->ctrl.icdoff) { in nvme_fc_create_association()
3092 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", in nvme_fc_create_association()
3093 ctrl->ctrl.icdoff); in nvme_fc_create_association()
3098 /* FC-NVME supports normal SGL Data Block Descriptors */ in nvme_fc_create_association()
3099 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { in nvme_fc_create_association()
3100 dev_err(ctrl->ctrl.device, in nvme_fc_create_association()
3106 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_fc_create_association()
3108 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3109 "queue_size %zu > ctrl maxcmd %u, reducing " in nvme_fc_create_association()
3111 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_fc_create_association()
3112 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_fc_create_association()
3113 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_create_association()
3116 ret = nvme_fc_init_aen_ops(ctrl); in nvme_fc_create_association()
3124 if (ctrl->ctrl.queue_count > 1) { in nvme_fc_create_association()
3125 if (!ctrl->ioq_live) in nvme_fc_create_association()
3126 ret = nvme_fc_create_io_queues(ctrl); in nvme_fc_create_association()
3128 ret = nvme_fc_recreate_io_queues(ctrl); in nvme_fc_create_association()
3130 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3131 ret = -EIO; in nvme_fc_create_association()
3135 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) { in nvme_fc_create_association()
3136 ret = -EIO; in nvme_fc_create_association()
3140 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_create_association()
3141 nvme_start_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3146 nvme_fc_term_aen_ops(ctrl); in nvme_fc_create_association()
3148 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_fc_create_association()
3150 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3151 "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n", in nvme_fc_create_association()
3152 ctrl->cnum, ctrl->association_id, ret); in nvme_fc_create_association()
3153 /* send a Disconnect(association) LS to fc-nvme target */ in nvme_fc_create_association()
3154 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_create_association()
3155 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3156 ctrl->association_id = 0; in nvme_fc_create_association()
3157 disls = ctrl->rcv_disconn; in nvme_fc_create_association()
3158 ctrl->rcv_disconn = NULL; in nvme_fc_create_association()
3159 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3163 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_create_association()
3165 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_create_association()
3166 clear_bit(ASSOC_ACTIVE, &ctrl->flags); in nvme_fc_create_association()
3167 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_create_association()
3180 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_association() argument
3185 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_delete_association()
3188 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3189 set_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3190 ctrl->iocnt = 0; in nvme_fc_delete_association()
3191 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3193 __nvme_fc_abort_outstanding_ios(ctrl, false); in nvme_fc_delete_association()
3196 nvme_fc_abort_aen_ops(ctrl); in nvme_fc_delete_association()
3199 spin_lock_irq(&ctrl->lock); in nvme_fc_delete_association()
3200 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); in nvme_fc_delete_association()
3201 clear_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3202 spin_unlock_irq(&ctrl->lock); in nvme_fc_delete_association()
3204 nvme_fc_term_aen_ops(ctrl); in nvme_fc_delete_association()
3207 * send a Disconnect(association) LS to fc-nvme target in nvme_fc_delete_association()
3212 if (ctrl->association_id) in nvme_fc_delete_association()
3213 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_delete_association()
3215 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3216 ctrl->association_id = 0; in nvme_fc_delete_association()
3217 disls = ctrl->rcv_disconn; in nvme_fc_delete_association()
3218 ctrl->rcv_disconn = NULL; in nvme_fc_delete_association()
3219 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3227 if (ctrl->ctrl.tagset) { in nvme_fc_delete_association()
3228 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_delete_association()
3229 nvme_fc_free_io_queues(ctrl); in nvme_fc_delete_association()
3232 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_delete_association()
3233 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_delete_association()
3235 /* re-enable the admin_q so anything new can fast fail */ in nvme_fc_delete_association()
3236 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_delete_association()
3239 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_fc_delete_association()
3241 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_delete_association()
3247 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_delete_ctrl() local
3249 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_delete_ctrl()
3250 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_delete_ctrl()
3255 nvme_fc_delete_association(ctrl); in nvme_fc_delete_ctrl()
3259 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) in nvme_fc_reconnect_or_delete() argument
3261 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_reconnect_or_delete()
3262 struct nvme_fc_remote_port *portptr = &rport->remoteport; in nvme_fc_reconnect_or_delete()
3263 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; in nvme_fc_reconnect_or_delete()
3266 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING) in nvme_fc_reconnect_or_delete()
3269 if (portptr->port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reconnect_or_delete()
3270 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3271 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", in nvme_fc_reconnect_or_delete()
3272 ctrl->cnum, status); in nvme_fc_reconnect_or_delete()
3273 } else if (time_after_eq(jiffies, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3276 if (recon && nvmf_should_reconnect(&ctrl->ctrl, status)) { in nvme_fc_reconnect_or_delete()
3277 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3278 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3279 "NVME-FC{%d}: Reconnect attempt in %ld " in nvme_fc_reconnect_or_delete()
3281 ctrl->cnum, recon_delay / HZ); in nvme_fc_reconnect_or_delete()
3282 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3283 recon_delay = rport->dev_loss_end - jiffies; in nvme_fc_reconnect_or_delete()
3285 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); in nvme_fc_reconnect_or_delete()
3287 if (portptr->port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reconnect_or_delete()
3289 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3290 "NVME-FC{%d}: reconnect failure\n", in nvme_fc_reconnect_or_delete()
3291 ctrl->cnum); in nvme_fc_reconnect_or_delete()
3293 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3294 "NVME-FC{%d}: Max reconnect attempts " in nvme_fc_reconnect_or_delete()
3296 ctrl->cnum, ctrl->ctrl.nr_reconnects); in nvme_fc_reconnect_or_delete()
3298 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3299 "NVME-FC{%d}: dev_loss_tmo (%d) expired " in nvme_fc_reconnect_or_delete()
3301 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, in nvme_fc_reconnect_or_delete()
3302 (ctrl->ctrl.opts->max_reconnects * in nvme_fc_reconnect_or_delete()
3303 ctrl->ctrl.opts->reconnect_delay))); in nvme_fc_reconnect_or_delete()
3304 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); in nvme_fc_reconnect_or_delete()
3311 struct nvme_fc_ctrl *ctrl = in nvme_fc_reset_ctrl_work() local
3312 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); in nvme_fc_reset_ctrl_work()
3314 nvme_stop_ctrl(&ctrl->ctrl); in nvme_fc_reset_ctrl_work()
3317 nvme_fc_delete_association(ctrl); in nvme_fc_reset_ctrl_work()
3319 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_fc_reset_ctrl_work()
3320 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3321 "NVME-FC{%d}: error_recovery: Couldn't change state " in nvme_fc_reset_ctrl_work()
3322 "to CONNECTING\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3324 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reset_ctrl_work()
3325 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_reset_ctrl_work()
3326 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3327 "NVME-FC{%d}: failed to schedule connect " in nvme_fc_reset_ctrl_work()
3328 "after reset\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3330 flush_delayed_work(&ctrl->connect_work); in nvme_fc_reset_ctrl_work()
3333 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); in nvme_fc_reset_ctrl_work()
3357 struct nvme_fc_ctrl *ctrl = in nvme_fc_connect_ctrl_work() local
3361 ret = nvme_fc_create_association(ctrl); in nvme_fc_connect_ctrl_work()
3363 nvme_fc_reconnect_or_delete(ctrl, ret); in nvme_fc_connect_ctrl_work()
3365 dev_info(ctrl->ctrl.device, in nvme_fc_connect_ctrl_work()
3366 "NVME-FC{%d}: controller connect complete\n", in nvme_fc_connect_ctrl_work()
3367 ctrl->cnum); in nvme_fc_connect_ctrl_work()
3393 struct nvme_fc_ctrl *ctrl; in nvme_fc_existing_controller() local
3397 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_existing_controller()
3398 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_existing_controller()
3399 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); in nvme_fc_existing_controller()
3403 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_existing_controller()
3409 nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, in nvme_fc_alloc_ctrl() argument
3412 struct nvme_fc_ctrl *ctrl; in nvme_fc_alloc_ctrl() local
3415 if (!(rport->remoteport.port_role & in nvme_fc_alloc_ctrl()
3417 ret = -EBADR; in nvme_fc_alloc_ctrl()
3421 if (!opts->duplicate_connect && in nvme_fc_alloc_ctrl()
3423 ret = -EALREADY; in nvme_fc_alloc_ctrl()
3427 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_fc_alloc_ctrl()
3428 if (!ctrl) { in nvme_fc_alloc_ctrl()
3429 ret = -ENOMEM; in nvme_fc_alloc_ctrl()
3435 ret = -ENOSPC; in nvme_fc_alloc_ctrl()
3443 if (opts->max_reconnects != -1 && in nvme_fc_alloc_ctrl()
3444 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && in nvme_fc_alloc_ctrl()
3445 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { in nvme_fc_alloc_ctrl()
3446 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; in nvme_fc_alloc_ctrl()
3447 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; in nvme_fc_alloc_ctrl()
3448 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, in nvme_fc_alloc_ctrl()
3449 opts->reconnect_delay); in nvme_fc_alloc_ctrl()
3452 ctrl->ctrl.opts = opts; in nvme_fc_alloc_ctrl()
3453 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_alloc_ctrl()
3454 INIT_LIST_HEAD(&ctrl->ctrl_list); in nvme_fc_alloc_ctrl()
3455 ctrl->lport = lport; in nvme_fc_alloc_ctrl()
3456 ctrl->rport = rport; in nvme_fc_alloc_ctrl()
3457 ctrl->dev = lport->dev; in nvme_fc_alloc_ctrl()
3458 ctrl->cnum = idx; in nvme_fc_alloc_ctrl()
3459 ctrl->ioq_live = false; in nvme_fc_alloc_ctrl()
3460 init_waitqueue_head(&ctrl->ioabort_wait); in nvme_fc_alloc_ctrl()
3462 get_device(ctrl->dev); in nvme_fc_alloc_ctrl()
3463 kref_init(&ctrl->ref); in nvme_fc_alloc_ctrl()
3465 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); in nvme_fc_alloc_ctrl()
3466 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); in nvme_fc_alloc_ctrl()
3467 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); in nvme_fc_alloc_ctrl()
3468 spin_lock_init(&ctrl->lock); in nvme_fc_alloc_ctrl()
3471 ctrl->ctrl.queue_count = min_t(unsigned int, in nvme_fc_alloc_ctrl()
3472 opts->nr_io_queues, in nvme_fc_alloc_ctrl()
3473 lport->ops->max_hw_queues); in nvme_fc_alloc_ctrl()
3474 ctrl->ctrl.queue_count++; /* +1 for admin queue */ in nvme_fc_alloc_ctrl()
3476 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_alloc_ctrl()
3477 ctrl->ctrl.kato = opts->kato; in nvme_fc_alloc_ctrl()
3478 ctrl->ctrl.cntlid = 0xffff; in nvme_fc_alloc_ctrl()
3480 ret = -ENOMEM; in nvme_fc_alloc_ctrl()
3481 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, in nvme_fc_alloc_ctrl()
3483 if (!ctrl->queues) in nvme_fc_alloc_ctrl()
3486 nvme_fc_init_queue(ctrl, 0); in nvme_fc_alloc_ctrl()
3495 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); in nvme_fc_alloc_ctrl()
3498 if (lport->dev) in nvme_fc_alloc_ctrl()
3499 ctrl->ctrl.numa_node = dev_to_node(lport->dev); in nvme_fc_alloc_ctrl()
3501 return ctrl; in nvme_fc_alloc_ctrl()
3504 kfree(ctrl->queues); in nvme_fc_alloc_ctrl()
3506 put_device(ctrl->dev); in nvme_fc_alloc_ctrl()
3507 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_alloc_ctrl()
3509 kfree(ctrl); in nvme_fc_alloc_ctrl()
3516 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, in nvme_fc_init_ctrl() argument
3519 struct nvme_fc_ctrl *ctrl; in nvme_fc_init_ctrl() local
3523 ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport); in nvme_fc_init_ctrl()
3524 if (IS_ERR(ctrl)) in nvme_fc_init_ctrl()
3525 return ERR_CAST(ctrl); in nvme_fc_init_ctrl()
3527 ret = nvme_add_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3531 ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, in nvme_fc_init_ctrl()
3534 ctrl->lport->ops->fcprqst_priv_sz)); in nvme_fc_init_ctrl()
3538 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_init_ctrl()
3539 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); in nvme_fc_init_ctrl()
3540 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_init_ctrl()
3542 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_fc_init_ctrl()
3543 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3544 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); in nvme_fc_init_ctrl()
3548 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_init_ctrl()
3549 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3550 "NVME-FC{%d}: failed to schedule initial connect\n", in nvme_fc_init_ctrl()
3551 ctrl->cnum); in nvme_fc_init_ctrl()
3555 flush_delayed_work(&ctrl->connect_work); in nvme_fc_init_ctrl()
3557 dev_info(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3558 "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n", in nvme_fc_init_ctrl()
3559 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn); in nvme_fc_init_ctrl()
3561 return &ctrl->ctrl; in nvme_fc_init_ctrl()
3564 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); in nvme_fc_init_ctrl()
3565 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_init_ctrl()
3566 cancel_work_sync(&ctrl->ctrl.reset_work); in nvme_fc_init_ctrl()
3567 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_init_ctrl()
3569 ctrl->ctrl.opts = NULL; in nvme_fc_init_ctrl()
3571 /* initiate nvme ctrl ref counting teardown */ in nvme_fc_init_ctrl()
3572 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3575 /* Remove core ctrl ref. */ in nvme_fc_init_ctrl()
3576 nvme_put_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3587 return ERR_PTR(-EIO); in nvme_fc_init_ctrl()
3601 return -EINVAL; in __nvme_fc_parse_u64()
3616 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
3621 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
3623 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
3628 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
3630 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
3641 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
3645 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
3652 return -EINVAL; in nvme_fc_parse_traddr()
3656 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) in nvme_fc_create_ctrl() argument
3660 struct nvme_ctrl *ctrl; in nvme_fc_create_ctrl() local
3666 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3668 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3670 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3672 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3677 if (lport->localport.node_name != laddr.nn || in nvme_fc_create_ctrl()
3678 lport->localport.port_name != laddr.pn || in nvme_fc_create_ctrl()
3679 lport->localport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3682 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_create_ctrl()
3683 if (rport->remoteport.node_name != raddr.nn || in nvme_fc_create_ctrl()
3684 rport->remoteport.port_name != raddr.pn || in nvme_fc_create_ctrl()
3685 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3694 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); in nvme_fc_create_ctrl()
3695 if (IS_ERR(ctrl)) in nvme_fc_create_ctrl()
3697 return ctrl; in nvme_fc_create_ctrl()
3702 pr_warn("%s: %s - %s combination not found\n", in nvme_fc_create_ctrl()
3703 __func__, opts->traddr, opts->host_traddr); in nvme_fc_create_ctrl()
3704 return ERR_PTR(-ENOENT); in nvme_fc_create_ctrl()
3719 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, in nvme_fc_nvme_discovery_store() argument
3731 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_nvme_discovery_store()
3753 if (list_empty(&rport->disc_list)) in nvme_fc_nvme_discovery_store()
3754 list_add_tail(&rport->disc_list, in nvme_fc_nvme_discovery_store()
3763 list_del_init(&rport->disc_list); in nvme_fc_nvme_discovery_store()
3766 lport = rport->lport; in nvme_fc_nvme_discovery_store()
3798 return -EINVAL; in fc_parse_cgrpid()
3800 return -EINVAL; in fc_parse_cgrpid()
3807 static ssize_t fc_appid_store(struct device *dev, in fc_appid_store() argument
3817 if (buf[count-1] == '\n') in fc_appid_store()
3818 count--; in fc_appid_store()
3821 return -EINVAL; in fc_appid_store()
3825 return -EINVAL; in fc_appid_store()
3826 appid_len = count - cgrpid_len - 1; in fc_appid_store()
3828 return -EINVAL; in fc_appid_store()
3869 * the FC-isms that are currently under scsi and now being in nvme_fc_init_module()
3874 * As we need something to post FC-specific udev events to, in nvme_fc_init_module()
3887 * Create a device for the FC-centric udev events in nvme_fc_init_module()
3914 struct nvme_fc_ctrl *ctrl; in nvme_fc_delete_controllers() local
3916 spin_lock(&rport->lock); in nvme_fc_delete_controllers()
3917 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_delete_controllers()
3918 dev_warn(ctrl->ctrl.device, in nvme_fc_delete_controllers()
3919 "NVME-FC{%d}: transport unloading: deleting ctrl\n", in nvme_fc_delete_controllers()
3920 ctrl->cnum); in nvme_fc_delete_controllers()
3921 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_delete_controllers()
3923 spin_unlock(&rport->lock); in nvme_fc_delete_controllers()
3934 list_for_each_entry(rport, &lport->endp_list, endp_list) in nvme_fc_exit_module()