Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/blk-mq.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
60 /* desired maximum for a single sequence - if sg list allows it */
93 struct list_head fcp_list; /* tgtport->fcp_list */
187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
192 return (assoc->association_id | qid); in nvmet_fc_makeconnid()
253 /* *********************** FC-NVME DMA Handling **************************** */
318 s->dma_address = 0L; in fc_map_sg()
320 s->dma_length = s->length; in fc_map_sg()
342 /* ********************** FC-NVME LS XMT Handling ************************* */
348 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req()
349 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_finish_ls_req()
352 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
354 if (!lsop->req_queued) { in __nvmet_fc_finish_ls_req()
355 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
359 list_del(&lsop->lsreq_list); in __nvmet_fc_finish_ls_req()
361 lsop->req_queued = false; in __nvmet_fc_finish_ls_req()
363 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
365 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
366 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_finish_ls_req()
370 queue_work(nvmet_wq, &tgtport->put_work); in __nvmet_fc_finish_ls_req()
378 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_send_ls_req()
382 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
383 return -EOPNOTSUPP; in __nvmet_fc_send_ls_req()
386 return -ESHUTDOWN; in __nvmet_fc_send_ls_req()
388 lsreq->done = done; in __nvmet_fc_send_ls_req()
389 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
390 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
392 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
393 lsreq->rqstlen + lsreq->rsplen, in __nvmet_fc_send_ls_req()
395 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
396 ret = -EFAULT; in __nvmet_fc_send_ls_req()
399 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvmet_fc_send_ls_req()
401 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
403 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
405 lsop->req_queued = true; in __nvmet_fc_send_ls_req()
407 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
409 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
417 lsop->ls_error = ret; in __nvmet_fc_send_ls_req()
418 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
419 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
420 list_del(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
421 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
422 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
423 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_send_ls_req()
449 /* fc-nvme target doesn't care about success or failure of cmd */ in nvmet_fc_disconnect_assoc_done()
455 * This routine sends a FC-NVME LS to disconnect (aka terminate)
456 * the FC-NVME Association. Terminating the association also
457 * terminates the FC-NVME connections (per queue, both admin and io
459 * down, and the related FC-NVME Association ID and Connection IDs
462 * The behavior of the fc-nvme target is such that it's
465 * connectivity with the fc-nvme host, so the target may never get a
468 * continue on with terminating the association. If the fc-nvme host
474 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc()
486 if (!tgtport->ops->ls_req || assoc->hostport->invalid) in nvmet_fc_xmt_disconnect_assoc()
491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
493 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
495 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
501 lsreq = &lsop->ls_req; in nvmet_fc_xmt_disconnect_assoc()
502 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
503 lsreq->private = (void *)&discon_acc[1]; in nvmet_fc_xmt_disconnect_assoc()
505 lsreq->private = NULL; in nvmet_fc_xmt_disconnect_assoc()
507 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
508 lsop->hosthandle = assoc->hostport->hosthandle; in nvmet_fc_xmt_disconnect_assoc()
511 assoc->association_id); in nvmet_fc_xmt_disconnect_assoc()
516 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
518 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
524 /* *********************** FC-NVME Port Management ************************ */
536 return -ENOMEM; in nvmet_fc_alloc_ls_iodlist()
538 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
541 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist()
542 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
543 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
545 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + in nvmet_fc_alloc_ls_iodlist()
548 if (!iod->rqstbuf) in nvmet_fc_alloc_ls_iodlist()
551 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; in nvmet_fc_alloc_ls_iodlist()
553 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
554 sizeof(*iod->rspbuf), in nvmet_fc_alloc_ls_iodlist()
556 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
563 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
564 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
565 for (iod--, i--; i >= 0; iod--, i--) { in nvmet_fc_alloc_ls_iodlist()
566 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
567 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_alloc_ls_iodlist()
568 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
569 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
574 return -EFAULT; in nvmet_fc_alloc_ls_iodlist()
580 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
584 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
585 iod->rspdma, sizeof(*iod->rspbuf), in nvmet_fc_free_ls_iodlist()
587 kfree(iod->rqstbuf); in nvmet_fc_free_ls_iodlist()
588 list_del(&iod->ls_rcv_list); in nvmet_fc_free_ls_iodlist()
590 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
599 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
600 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
603 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
604 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
615 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
616 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
617 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
624 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist()
627 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
628 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
629 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
630 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
631 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
632 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
633 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
634 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
635 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
636 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
638 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
639 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
640 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
641 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
642 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
643 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
644 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
646 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
647 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
659 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist()
662 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
663 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
664 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
665 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
674 lockdep_assert_held(&queue->qlock); in nvmet_fc_alloc_fcp_iod()
676 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
679 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
680 fod->active = true; in nvmet_fc_alloc_fcp_iod()
696 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req()
702 fcpreq->hwqid = queue->qid ? in nvmet_fc_queue_fcp_req()
703 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
715 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
723 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
724 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
728 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
729 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
731 fcpreq->nvmet_fc_private = NULL; in nvmet_fc_free_fcp_iod()
733 fod->active = false; in nvmet_fc_free_fcp_iod()
734 fod->abort = false; in nvmet_fc_free_fcp_iod()
735 fod->aborted = false; in nvmet_fc_free_fcp_iod()
736 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
737 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
739 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
744 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_free_fcp_iod()
748 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
749 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
753 /* Re-use the fod for the next pending cmd that was deferred */ in nvmet_fc_free_fcp_iod()
754 list_del(&deferfcp->req_list); in nvmet_fc_free_fcp_iod()
756 fcpreq = deferfcp->fcp_req; in nvmet_fc_free_fcp_iod()
759 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); in nvmet_fc_free_fcp_iod()
761 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
764 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
767 fcpreq->rspaddr = NULL; in nvmet_fc_free_fcp_iod()
768 fcpreq->rsplen = 0; in nvmet_fc_free_fcp_iod()
769 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
770 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
771 fod->active = true; in nvmet_fc_free_fcp_iod()
774 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
781 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
798 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, in nvmet_fc_alloc_target_queue()
799 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
800 assoc->a_id, qid); in nvmet_fc_alloc_target_queue()
801 if (!queue->work_q) in nvmet_fc_alloc_target_queue()
804 queue->qid = qid; in nvmet_fc_alloc_target_queue()
805 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue()
806 queue->assoc = assoc; in nvmet_fc_alloc_target_queue()
807 INIT_LIST_HEAD(&queue->fod_list); in nvmet_fc_alloc_target_queue()
808 INIT_LIST_HEAD(&queue->avail_defer_list); in nvmet_fc_alloc_target_queue()
809 INIT_LIST_HEAD(&queue->pending_cmd_list); in nvmet_fc_alloc_target_queue()
810 atomic_set(&queue->connected, 0); in nvmet_fc_alloc_target_queue()
811 atomic_set(&queue->sqtail, 0); in nvmet_fc_alloc_target_queue()
812 atomic_set(&queue->rsn, 1); in nvmet_fc_alloc_target_queue()
813 atomic_set(&queue->zrspcnt, 0); in nvmet_fc_alloc_target_queue()
814 spin_lock_init(&queue->qlock); in nvmet_fc_alloc_target_queue()
815 kref_init(&queue->ref); in nvmet_fc_alloc_target_queue()
817 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
819 nvmet_cq_init(&queue->nvme_cq); in nvmet_fc_alloc_target_queue()
820 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); in nvmet_fc_alloc_target_queue()
824 WARN_ON(assoc->queues[qid]); in nvmet_fc_alloc_target_queue()
825 assoc->queues[qid] = queue; in nvmet_fc_alloc_target_queue()
830 nvmet_cq_put(&queue->nvme_cq); in nvmet_fc_alloc_target_queue()
831 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
832 destroy_workqueue(queue->work_q); in nvmet_fc_alloc_target_queue()
845 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
847 destroy_workqueue(queue->work_q); in nvmet_fc_tgt_queue_free()
855 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); in nvmet_fc_tgt_q_put()
861 return kref_get_unless_zero(&queue->ref); in nvmet_fc_tgt_q_get()
868 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue()
869 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue()
875 disconnect = atomic_xchg(&queue->connected, 0); in nvmet_fc_delete_target_queue()
881 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
883 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
884 if (fod->active) { in nvmet_fc_delete_target_queue()
885 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
886 fod->abort = true; in nvmet_fc_delete_target_queue()
892 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
893 fod->aborted = true; in nvmet_fc_delete_target_queue()
894 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
895 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
896 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
898 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
903 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, in nvmet_fc_delete_target_queue()
905 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
910 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_delete_target_queue()
915 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
916 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
918 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
919 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
921 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
922 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
924 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
925 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
932 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
934 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
936 flush_workqueue(queue->work_q); in nvmet_fc_delete_target_queue()
938 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_fc_delete_target_queue()
939 nvmet_cq_put(&queue->nvme_cq); in nvmet_fc_delete_target_queue()
957 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
958 if (association_id == assoc->association_id) { in nvmet_fc_find_target_queue()
959 queue = assoc->queues[qid]; in nvmet_fc_find_target_queue()
961 (!atomic_read(&queue->connected) || in nvmet_fc_find_target_queue()
977 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free()
980 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
981 list_del(&hostport->host_list); in nvmet_fc_hostport_free()
982 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
983 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
984 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
992 kref_put(&hostport->ref, nvmet_fc_hostport_free); in nvmet_fc_hostport_put()
998 return kref_get_unless_zero(&hostport->ref); in nvmet_fc_hostport_get()
1006 lockdep_assert_held(&tgtport->lock); in nvmet_fc_match_hostport()
1008 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_match_hostport()
1009 if (host->hosthandle == hosthandle && !host->invalid) { in nvmet_fc_match_hostport()
1032 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1034 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1041 return ERR_PTR(-ENOMEM); in nvmet_fc_alloc_hostport()
1043 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1051 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1052 newhost->hosthandle = hosthandle; in nvmet_fc_alloc_hostport()
1053 INIT_LIST_HEAD(&newhost->host_list); in nvmet_fc_alloc_hostport()
1054 kref_init(&newhost->ref); in nvmet_fc_alloc_hostport()
1056 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1058 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1068 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_assoc_work()
1078 nvmet_fc_tgtport_get(assoc->tgtport); in nvmet_fc_schedule_delete_assoc()
1079 if (!queue_work(nvmet_wq, &assoc->del_work)) in nvmet_fc_schedule_delete_assoc()
1080 nvmet_fc_tgtport_put(assoc->tgtport); in nvmet_fc_schedule_delete_assoc()
1090 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { in nvmet_fc_assoc_exists()
1091 if (association_id == a->association_id) { in nvmet_fc_assoc_exists()
1110 if (!tgtport->pe) in nvmet_fc_alloc_target_assoc()
1117 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1121 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1122 if (IS_ERR(assoc->hostport)) in nvmet_fc_alloc_target_assoc()
1125 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1127 assoc->a_id = idx; in nvmet_fc_alloc_target_assoc()
1128 INIT_LIST_HEAD(&assoc->a_list); in nvmet_fc_alloc_target_assoc()
1129 kref_init(&assoc->ref); in nvmet_fc_alloc_target_assoc()
1130 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); in nvmet_fc_alloc_target_assoc()
1131 atomic_set(&assoc->terminating, 0); in nvmet_fc_alloc_target_assoc()
1135 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); in nvmet_fc_alloc_target_assoc()
1138 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1140 assoc->association_id = ran; in nvmet_fc_alloc_target_assoc()
1141 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1144 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1150 ida_free(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free()
1166 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_target_assoc_free()
1167 if (assoc->queues[i]) in nvmet_fc_target_assoc_free()
1168 nvmet_fc_delete_target_queue(assoc->queues[i]); in nvmet_fc_target_assoc_free()
1174 nvmet_fc_hostport_put(assoc->hostport); in nvmet_fc_target_assoc_free()
1175 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1176 oldls = assoc->rcv_disconn; in nvmet_fc_target_assoc_free()
1177 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1181 ida_free(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1182 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1184 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1191 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); in nvmet_fc_tgt_a_put()
1197 return kref_get_unless_zero(&assoc->ref); in nvmet_fc_tgt_a_get()
1203 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc()
1207 terminating = atomic_xchg(&assoc->terminating, 1); in nvmet_fc_delete_target_assoc()
1213 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1214 list_del_rcu(&assoc->a_list); in nvmet_fc_delete_target_assoc()
1215 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1219 /* ensure all in-flight I/Os have been processed */ in nvmet_fc_delete_target_assoc()
1220 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_delete_target_assoc()
1221 if (assoc->queues[i]) in nvmet_fc_delete_target_assoc()
1222 flush_workqueue(assoc->queues[i]->work_q); in nvmet_fc_delete_target_assoc()
1225 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1227 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1240 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1241 if (association_id == assoc->association_id) { in nvmet_fc_find_target_assoc()
1261 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1262 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1264 pe->port = port; in nvmet_fc_portentry_bind()
1265 port->priv = pe; in nvmet_fc_portentry_bind()
1267 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1268 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1269 INIT_LIST_HEAD(&pe->pe_list); in nvmet_fc_portentry_bind()
1271 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); in nvmet_fc_portentry_bind()
1280 if (pe->tgtport) { in nvmet_fc_portentry_unbind()
1281 nvmet_fc_tgtport_put(pe->tgtport); in nvmet_fc_portentry_unbind()
1282 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1284 list_del(&pe->pe_list); in nvmet_fc_portentry_unbind()
1291 * re-registration can resume operation.
1300 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1302 nvmet_fc_tgtport_put(pe->tgtport); in nvmet_fc_portentry_unbind_tgt()
1303 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1305 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1325 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1326 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1330 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1331 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1332 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1340 * nvmet_fc_register_targetport - transport entry point called by an
1354 * (ex: -ENXIO) upon failure.
1366 if (!template->xmt_ls_rsp || !template->fcp_op || in nvmet_fc_register_targetport()
1367 !template->fcp_abort || in nvmet_fc_register_targetport()
1368 !template->fcp_req_release || !template->targetport_delete || in nvmet_fc_register_targetport()
1369 !template->max_hw_queues || !template->max_sgl_segments || in nvmet_fc_register_targetport()
1370 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvmet_fc_register_targetport()
1371 ret = -EINVAL; in nvmet_fc_register_targetport()
1375 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), in nvmet_fc_register_targetport()
1378 ret = -ENOMEM; in nvmet_fc_register_targetport()
1384 ret = -ENOSPC; in nvmet_fc_register_targetport()
1389 ret = -ENODEV; in nvmet_fc_register_targetport()
1393 newrec->fc_target_port.node_name = pinfo->node_name; in nvmet_fc_register_targetport()
1394 newrec->fc_target_port.port_name = pinfo->port_name; in nvmet_fc_register_targetport()
1395 if (template->target_priv_sz) in nvmet_fc_register_targetport()
1396 newrec->fc_target_port.private = &newrec[1]; in nvmet_fc_register_targetport()
1398 newrec->fc_target_port.private = NULL; in nvmet_fc_register_targetport()
1399 newrec->fc_target_port.port_id = pinfo->port_id; in nvmet_fc_register_targetport()
1400 newrec->fc_target_port.port_num = idx; in nvmet_fc_register_targetport()
1401 INIT_LIST_HEAD(&newrec->tgt_list); in nvmet_fc_register_targetport()
1402 newrec->dev = dev; in nvmet_fc_register_targetport()
1403 newrec->ops = template; in nvmet_fc_register_targetport()
1404 spin_lock_init(&newrec->lock); in nvmet_fc_register_targetport()
1405 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvmet_fc_register_targetport()
1406 INIT_LIST_HEAD(&newrec->ls_req_list); in nvmet_fc_register_targetport()
1407 INIT_LIST_HEAD(&newrec->ls_busylist); in nvmet_fc_register_targetport()
1408 INIT_LIST_HEAD(&newrec->assoc_list); in nvmet_fc_register_targetport()
1409 INIT_LIST_HEAD(&newrec->host_list); in nvmet_fc_register_targetport()
1410 kref_init(&newrec->ref); in nvmet_fc_register_targetport()
1411 ida_init(&newrec->assoc_cnt); in nvmet_fc_register_targetport()
1412 newrec->max_sg_cnt = template->max_sgl_segments; in nvmet_fc_register_targetport()
1413 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); in nvmet_fc_register_targetport()
1417 ret = -ENOMEM; in nvmet_fc_register_targetport()
1424 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); in nvmet_fc_register_targetport()
1427 *portptr = &newrec->fc_target_port; in nvmet_fc_register_targetport()
1448 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1453 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1456 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1458 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1468 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1474 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1483 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1493 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1496 * The nvmet-fc layer ensures that any references to the hosthandle
1506 * retries by the nvmet-fc transport. The nvmet-fc transport may
1508 * NVME associations. The nvmet-fc transport will call the
1509 * ops->host_release() callback to notify the LLDD that all references
1530 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1532 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1533 if (assoc->hostport->hosthandle != hosthandle) in nvmet_fc_invalidate_host()
1537 assoc->hostport->invalid = 1; in nvmet_fc_invalidate_host()
1542 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1544 /* if there's nothing to wait for - call the callback */ in nvmet_fc_invalidate_host()
1545 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1546 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1571 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1572 queue = assoc->queues[0]; in nvmet_fc_delete_ctrl()
1573 if (queue && queue->nvme_sq.ctrl == ctrl) { in nvmet_fc_delete_ctrl()
1602 iod = tgtport->iod; in nvmet_fc_free_pending_reqs()
1604 cancel_work(&iod->work); in nvmet_fc_free_pending_reqs()
1611 while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, in nvmet_fc_free_pending_reqs()
1613 list_del(&lsop->lsreq_list); in nvmet_fc_free_pending_reqs()
1615 if (!lsop->req_queued) in nvmet_fc_free_pending_reqs()
1618 lsreq = &lsop->ls_req; in nvmet_fc_free_pending_reqs()
1619 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in nvmet_fc_free_pending_reqs()
1620 (lsreq->rqstlen + lsreq->rsplen), in nvmet_fc_free_pending_reqs()
1628 * nvmet_fc_unregister_targetport - transport entry point called by an
1636 * (ex: -ENXIO) upon failure.
1645 list_del(&tgtport->tgt_list); in nvmet_fc_unregister_targetport()
1663 /* ********************** FC-NVME LS RCV Handling ************************* */
1670 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; in nvmet_fc_ls_create_association()
1671 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; in nvmet_fc_ls_create_association()
1678 * FC-NVME spec changes. There are initiators sending different in nvmet_fc_ls_create_association()
1685 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) in nvmet_fc_ls_create_association()
1687 else if (be32_to_cpu(rqst->desc_list_len) < in nvmet_fc_ls_create_association()
1690 else if (rqst->assoc_cmd.desc_tag != in nvmet_fc_ls_create_association()
1693 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < in nvmet_fc_ls_create_association()
1696 else if (!rqst->assoc_cmd.ersp_ratio || in nvmet_fc_ls_create_association()
1697 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= in nvmet_fc_ls_create_association()
1698 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association()
1703 iod->assoc = nvmet_fc_alloc_target_assoc( in nvmet_fc_ls_create_association()
1704 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1705 if (!iod->assoc) in nvmet_fc_ls_create_association()
1708 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, in nvmet_fc_ls_create_association()
1709 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association()
1712 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_association()
1718 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1721 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_association()
1722 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_association()
1728 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); in nvmet_fc_ls_create_association()
1729 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_association()
1730 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_association()
1732 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1734 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1738 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_association()
1744 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvmet_fc_ls_create_association()
1745 acc->associd.desc_len = in nvmet_fc_ls_create_association()
1748 acc->associd.association_id = in nvmet_fc_ls_create_association()
1749 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); in nvmet_fc_ls_create_association()
1750 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_association()
1751 acc->connectid.desc_len = in nvmet_fc_ls_create_association()
1754 acc->connectid.connection_id = acc->associd.association_id; in nvmet_fc_ls_create_association()
1761 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; in nvmet_fc_ls_create_connection()
1762 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; in nvmet_fc_ls_create_connection()
1768 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) in nvmet_fc_ls_create_connection()
1770 else if (rqst->desc_list_len != in nvmet_fc_ls_create_connection()
1774 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) in nvmet_fc_ls_create_connection()
1776 else if (rqst->associd.desc_len != in nvmet_fc_ls_create_connection()
1780 else if (rqst->connect_cmd.desc_tag != in nvmet_fc_ls_create_connection()
1783 else if (rqst->connect_cmd.desc_len != in nvmet_fc_ls_create_connection()
1787 else if (!rqst->connect_cmd.ersp_ratio || in nvmet_fc_ls_create_connection()
1788 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= in nvmet_fc_ls_create_connection()
1789 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection()
1794 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1795 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_create_connection()
1796 if (!iod->assoc) in nvmet_fc_ls_create_connection()
1799 queue = nvmet_fc_alloc_target_queue(iod->assoc, in nvmet_fc_ls_create_connection()
1800 be16_to_cpu(rqst->connect_cmd.qid), in nvmet_fc_ls_create_connection()
1801 be16_to_cpu(rqst->connect_cmd.sqsize)); in nvmet_fc_ls_create_connection()
1806 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_connection()
1811 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1814 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_connection()
1815 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_connection()
1823 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); in nvmet_fc_ls_create_connection()
1824 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_connection()
1825 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_connection()
1829 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_connection()
1834 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_connection()
1835 acc->connectid.desc_len = in nvmet_fc_ls_create_connection()
1838 acc->connectid.connection_id = in nvmet_fc_ls_create_connection()
1839 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, in nvmet_fc_ls_create_connection()
1840 be16_to_cpu(rqst->connect_cmd.qid))); in nvmet_fc_ls_create_connection()
1852 &iod->rqstbuf->rq_dis_assoc; in nvmet_fc_ls_disconnect()
1854 &iod->rspbuf->rsp_dis_assoc; in nvmet_fc_ls_disconnect()
1862 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); in nvmet_fc_ls_disconnect()
1864 /* match an active association - takes an assoc ref if !NULL */ in nvmet_fc_ls_disconnect()
1866 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_disconnect()
1867 iod->assoc = assoc; in nvmet_fc_ls_disconnect()
1873 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1876 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_disconnect()
1877 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1887 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_disconnect()
1903 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1904 oldls = assoc->rcv_disconn; in nvmet_fc_ls_disconnect()
1905 assoc->rcv_disconn = iod; in nvmet_fc_ls_disconnect()
1906 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1909 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1912 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1914 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvmet_fc_ls_disconnect()
1915 sizeof(*iod->rspbuf), in nvmet_fc_ls_disconnect()
1917 rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1940 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; in nvmet_fc_xmt_ls_rsp_done()
1941 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done()
1943 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1944 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp_done()
1955 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1956 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp()
1958 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1960 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1964 * Actual processing routine for received FC-NVME LS Requests from the LLD
1970 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; in nvmet_fc_handle_ls_rqst()
1973 iod->lsrsp->nvme_fc_private = iod; in nvmet_fc_handle_ls_rqst()
1974 iod->lsrsp->rspbuf = iod->rspbuf; in nvmet_fc_handle_ls_rqst()
1975 iod->lsrsp->rspdma = iod->rspdma; in nvmet_fc_handle_ls_rqst()
1976 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; in nvmet_fc_handle_ls_rqst()
1978 iod->lsrsp->rsplen = 0; in nvmet_fc_handle_ls_rqst()
1980 iod->assoc = NULL; in nvmet_fc_handle_ls_rqst()
1987 switch (w0->ls_cmd) { in nvmet_fc_handle_ls_rqst()
2001 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, in nvmet_fc_handle_ls_rqst()
2002 sizeof(*iod->rspbuf), w0->ls_cmd, in nvmet_fc_handle_ls_rqst()
2011 * Actual processing routine for received FC-NVME LS Requests from the LLD
2018 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work()
2025 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2028 * The nvmet-fc layer will copy payload to an internal structure for
2053 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2055 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2056 nvmefc_ls_names[w0->ls_cmd] : "", in nvmet_fc_rcv_ls_req()
2058 return -E2BIG; in nvmet_fc_rcv_ls_req()
2062 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2064 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2065 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2066 return -ESHUTDOWN; in nvmet_fc_rcv_ls_req()
2071 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2073 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2074 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2076 return -ENOENT; in nvmet_fc_rcv_ls_req()
2079 iod->lsrsp = lsrsp; in nvmet_fc_rcv_ls_req()
2080 iod->fcpreq = NULL; in nvmet_fc_rcv_ls_req()
2081 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); in nvmet_fc_rcv_ls_req()
2082 iod->rqstdatalen = lsreqbuf_len; in nvmet_fc_rcv_ls_req()
2083 iod->hosthandle = hosthandle; in nvmet_fc_rcv_ls_req()
2085 queue_work(nvmet_wq, &iod->work); in nvmet_fc_rcv_ls_req()
2104 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2108 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2109 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2110 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2111 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2114 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2125 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2128 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2129 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2131 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2132 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2133 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2143 sqtail = atomic_read(&q->sqtail) % q->sqsize; in queue_90percent_full()
2145 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); in queue_90percent_full()
2146 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); in queue_90percent_full()
2157 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2158 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2159 struct nvme_completion *cqe = &ersp->cqe; in nvmet_fc_prep_fcp_rsp() local
2160 u32 *cqewd = (u32 *)cqe; in nvmet_fc_prep_fcp_rsp()
2164 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2165 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2167 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2171 * Note: to send a 0's response, the NVME-FC host transport will in nvmet_fc_prep_fcp_rsp()
2172 * recreate the CQE. The host transport knows: sq id, SQHD (last in nvmet_fc_prep_fcp_rsp()
2174 * zero-filled CQE with those known fields filled in. Transport in nvmet_fc_prep_fcp_rsp()
2175 * must send an ersp for any condition where the cqe won't match in nvmet_fc_prep_fcp_rsp()
2178 * Here are the FC-NVME mandated cases where we must send an ersp: in nvmet_fc_prep_fcp_rsp()
2180 * force fabric commands to send ersp's (not in FC-NVME but good in nvmet_fc_prep_fcp_rsp()
2182 * normal cmds: any time status is non-zero, or status is zero in nvmet_fc_prep_fcp_rsp()
2183 * but words 0 or 1 are non-zero. in nvmet_fc_prep_fcp_rsp()
2188 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2189 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2191 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2192 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || in nvmet_fc_prep_fcp_rsp()
2193 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || in nvmet_fc_prep_fcp_rsp()
2194 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2197 /* re-set the fields */ in nvmet_fc_prep_fcp_rsp()
2198 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2199 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2203 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2205 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); in nvmet_fc_prep_fcp_rsp()
2206 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2207 ersp->rsn = cpu_to_be32(rsn); in nvmet_fc_prep_fcp_rsp()
2208 ersp->xfrd_len = cpu_to_be32(xfr_length); in nvmet_fc_prep_fcp_rsp()
2209 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2212 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2213 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2222 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2231 /* no need to take lock - lock was taken earlier to get here */ in nvmet_fc_abort_op()
2232 if (!fod->aborted) in nvmet_fc_abort_op()
2233 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2235 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2244 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2245 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2249 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2258 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2259 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2261 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2265 fcpreq->op = op; in nvmet_fc_transfer_fcp_data()
2266 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2267 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; in nvmet_fc_transfer_fcp_data()
2278 fcpreq->sg = sg; in nvmet_fc_transfer_fcp_data()
2279 fcpreq->sg_cnt = 0; in nvmet_fc_transfer_fcp_data()
2281 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2283 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2287 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { in nvmet_fc_transfer_fcp_data()
2288 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2293 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2295 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2297 fcpreq->transfer_length = tlen; in nvmet_fc_transfer_fcp_data()
2298 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2299 fcpreq->fcp_error = 0; in nvmet_fc_transfer_fcp_data()
2300 fcpreq->rsplen = 0; in nvmet_fc_transfer_fcp_data()
2303 * If the last READDATA request: check if LLDD supports in nvmet_fc_transfer_fcp_data()
2307 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2308 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2309 fcpreq->op = NVMET_FCOP_READDATA_RSP; in nvmet_fc_transfer_fcp_data()
2313 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2320 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2323 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2324 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2325 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2326 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2328 fcpreq->fcp_error = ret; in nvmet_fc_transfer_fcp_data()
2329 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2330 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2338 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2339 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2343 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { in __nvmet_fc_fod_op_abort()
2344 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2361 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2362 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2366 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2367 abort = fod->abort; in nvmet_fc_fod_op_done()
2368 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2369 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2371 switch (fcpreq->op) { in nvmet_fc_fod_op_done()
2376 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2377 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2378 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2379 fod->abort = true; in nvmet_fc_fod_op_done()
2380 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2382 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2386 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2387 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2388 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2389 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2390 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2399 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2406 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2407 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2414 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { in nvmet_fc_fod_op_done()
2417 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2421 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2422 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2441 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2452 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done()
2464 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2465 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done() local
2469 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2470 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2471 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2473 /* if we have a CQE, snoop the last sq_head value */ in __nvmet_fc_fcp_nvme_cmd_done()
2475 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2484 /* fudge up a failed CQE status for our transport error */ in __nvmet_fc_fcp_nvme_cmd_done()
2485 memset(cqe, 0, sizeof(*cqe)); in __nvmet_fc_fcp_nvme_cmd_done()
2486 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2487 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2488 cqe->command_id = sqe->command_id; in __nvmet_fc_fcp_nvme_cmd_done()
2489 cqe->status = cpu_to_le16(status); in __nvmet_fc_fcp_nvme_cmd_done()
2493 * try to push the data even if the SQE status is non-zero. in __nvmet_fc_fcp_nvme_cmd_done()
2497 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2504 /* writes & no data - fall thru */ in __nvmet_fc_fcp_nvme_cmd_done()
2518 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2525 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2531 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2532 u32 xfrlen = be32_to_cpu(cmdiu->data_len); in nvmet_fc_handle_fcp_rqst()
2544 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2546 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { in nvmet_fc_handle_fcp_rqst()
2547 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2548 if (!nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2550 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { in nvmet_fc_handle_fcp_rqst()
2551 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2552 if (nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2555 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2560 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2561 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2562 if (!tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2564 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2567 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2569 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2570 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2572 ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2580 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2583 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2585 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2588 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2592 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2593 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2594 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2596 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2608 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2616 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2619 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2632 * asynchronously received - its possible for a command to be received
2639 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2645 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2651 * transport will return a non-zero status indicating the error.
2652 * In all cases other than -EOVERFLOW, the transport has not accepted the
2676 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || in nvmet_fc_rcv_fcp_req()
2677 (cmdiu->fc_id != NVME_CMD_FC_ID) || in nvmet_fc_rcv_fcp_req()
2678 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) in nvmet_fc_rcv_fcp_req()
2679 return -EIO; in nvmet_fc_rcv_fcp_req()
2682 be64_to_cpu(cmdiu->connection_id)); in nvmet_fc_rcv_fcp_req()
2684 return -ENOTCONN; in nvmet_fc_rcv_fcp_req()
2693 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2697 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2699 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2700 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2702 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2709 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2710 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2713 return -ENOENT; in nvmet_fc_rcv_fcp_req()
2716 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, in nvmet_fc_rcv_fcp_req()
2719 /* Just re-use one that was previously allocated */ in nvmet_fc_rcv_fcp_req()
2720 list_del(&deferfcp->req_list); in nvmet_fc_rcv_fcp_req()
2722 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2729 return -ENOMEM; in nvmet_fc_rcv_fcp_req()
2731 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2735 fcpreq->rspaddr = cmdiubuf; in nvmet_fc_rcv_fcp_req()
2736 fcpreq->rsplen = cmdiubuf_len; in nvmet_fc_rcv_fcp_req()
2737 deferfcp->fcp_req = fcpreq; in nvmet_fc_rcv_fcp_req()
2740 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); in nvmet_fc_rcv_fcp_req()
2744 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2746 return -EOVERFLOW; in nvmet_fc_rcv_fcp_req()
2751 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2757 * (template_ops->fcp_req_release() has not been called).
2777 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort()
2781 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2785 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2787 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2788 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2794 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2795 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2796 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2797 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2799 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2815 return -EINVAL; in __nvme_fc_parse_u64()
2830 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
2835 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
2837 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
2842 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
2844 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
2855 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
2859 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
2866 return -EINVAL; in nvme_fc_parse_traddr()
2879 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || in nvmet_fc_add_port()
2880 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) in nvmet_fc_add_port()
2881 return -EINVAL; in nvmet_fc_add_port()
2885 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, in nvmet_fc_add_port()
2886 sizeof(port->disc_addr.traddr)); in nvmet_fc_add_port()
2892 return -ENOMEM; in nvmet_fc_add_port()
2894 ret = -ENXIO; in nvmet_fc_add_port()
2897 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2898 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2903 if (!tgtport->pe) { in nvmet_fc_add_port()
2907 ret = -EALREADY; in nvmet_fc_add_port()
2924 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_remove_port()
2929 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) in nvmet_fc_remove_port()
2930 tgtport = pe->tgtport; in nvmet_fc_remove_port()
2947 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_discovery_chg()
2952 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) in nvmet_fc_discovery_chg()
2953 tgtport = pe->tgtport; in nvmet_fc_discovery_chg()
2959 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2960 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()
2969 struct nvmet_sq *sq = ctrl->sqs[0]; in nvmet_fc_host_traddr()
2972 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; in nvmet_fc_host_traddr()
2973 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; in nvmet_fc_host_traddr()
2978 return -ENODEV; in nvmet_fc_host_traddr()
2980 ret = -ENODEV; in nvmet_fc_host_traddr()
2984 if (tgtport->ops->host_traddr) { in nvmet_fc_host_traddr()
2985 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); in nvmet_fc_host_traddr()
2988 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); in nvmet_fc_host_traddr()
3019 /* sanity check - all lports should be removed */ in nvmet_fc_exit_module()