Lines Matching +full:ctrl +full:- +full:b
1 // SPDX-License-Identifier: GPL-2.0
7 * REDS Institute, HEIG-VD, HES-SO, Switzerland
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
31 * allow up to 128 page-sized segments. For the maximum allowed,
36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
84 struct nvmet_pci_epf_ctrl *ctrl; member
129 struct nvmet_pci_epf_ctrl *ctrl; member
209 struct nvmet_pci_epf_ctrl ctrl; member
226 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_read32() argument
229 __le32 *bar_reg = ctrl->bar + off; in nvmet_pci_epf_bar_read32()
234 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_write32() argument
237 __le32 *bar_reg = ctrl->bar + off; in nvmet_pci_epf_bar_write32()
242 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_read64() argument
245 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | in nvmet_pci_epf_bar_read64()
246 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); in nvmet_pci_epf_bar_read64()
249 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_write64() argument
252 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); in nvmet_pci_epf_bar_write64()
253 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); in nvmet_pci_epf_bar_write64()
259 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_mem_map()
261 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_mem_map()
268 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_mem_unmap()
270 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); in nvmet_pci_epf_mem_unmap()
286 return chan->device->dev == filter->dev && in nvmet_pci_epf_dma_filter()
287 (filter->dma_mask & caps.directions); in nvmet_pci_epf_dma_filter()
292 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_init_dma()
293 struct device *dev = &epf->dev; in nvmet_pci_epf_init_dma()
298 mutex_init(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_init_dma()
299 mutex_init(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_init_dma()
304 filter.dev = epf->epc->dev.parent; in nvmet_pci_epf_init_dma()
311 nvme_epf->dma_rx_chan = chan; in nvmet_pci_epf_init_dma()
318 nvme_epf->dma_tx_chan = chan; in nvmet_pci_epf_init_dma()
320 nvme_epf->dma_enabled = true; in nvmet_pci_epf_init_dma()
322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", in nvmet_pci_epf_init_dma()
326 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", in nvmet_pci_epf_init_dma()
333 dma_release_channel(nvme_epf->dma_rx_chan); in nvmet_pci_epf_init_dma()
334 nvme_epf->dma_rx_chan = NULL; in nvmet_pci_epf_init_dma()
337 mutex_destroy(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_init_dma()
338 mutex_destroy(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_init_dma()
339 nvme_epf->dma_enabled = false; in nvmet_pci_epf_init_dma()
341 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); in nvmet_pci_epf_init_dma()
346 if (!nvme_epf->dma_enabled) in nvmet_pci_epf_deinit_dma()
349 dma_release_channel(nvme_epf->dma_tx_chan); in nvmet_pci_epf_deinit_dma()
350 nvme_epf->dma_tx_chan = NULL; in nvmet_pci_epf_deinit_dma()
351 dma_release_channel(nvme_epf->dma_rx_chan); in nvmet_pci_epf_deinit_dma()
352 nvme_epf->dma_rx_chan = NULL; in nvmet_pci_epf_deinit_dma()
353 mutex_destroy(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_deinit_dma()
354 mutex_destroy(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_deinit_dma()
355 nvme_epf->dma_enabled = false; in nvmet_pci_epf_deinit_dma()
361 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_dma_transfer()
364 struct device *dev = &epf->dev; in nvmet_pci_epf_dma_transfer()
374 lock = &nvme_epf->dma_rx_lock; in nvmet_pci_epf_dma_transfer()
375 chan = nvme_epf->dma_rx_chan; in nvmet_pci_epf_dma_transfer()
377 sconf.src_addr = seg->pci_addr; in nvmet_pci_epf_dma_transfer()
380 lock = &nvme_epf->dma_tx_lock; in nvmet_pci_epf_dma_transfer()
381 chan = nvme_epf->dma_tx_chan; in nvmet_pci_epf_dma_transfer()
383 sconf.dst_addr = seg->pci_addr; in nvmet_pci_epf_dma_transfer()
386 return -EINVAL; in nvmet_pci_epf_dma_transfer()
392 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); in nvmet_pci_epf_dma_transfer()
403 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, in nvmet_pci_epf_dma_transfer()
407 ret = -EIO; in nvmet_pci_epf_dma_transfer()
420 ret = -EIO; in nvmet_pci_epf_dma_transfer()
426 dma_unmap_single(dma_dev, dma_addr, seg->length, dir); in nvmet_pci_epf_dma_transfer()
437 u64 pci_addr = seg->pci_addr; in nvmet_pci_epf_mmio_transfer()
438 u32 length = seg->length; in nvmet_pci_epf_mmio_transfer()
439 void *buf = seg->buf; in nvmet_pci_epf_mmio_transfer()
441 int ret = -EINVAL; in nvmet_pci_epf_mmio_transfer()
447 mutex_lock(&nvme_epf->mmio_lock); in nvmet_pci_epf_mmio_transfer()
462 ret = -EINVAL; in nvmet_pci_epf_mmio_transfer()
468 length -= map.pci_size; in nvmet_pci_epf_mmio_transfer()
474 mutex_unlock(&nvme_epf->mmio_lock); in nvmet_pci_epf_mmio_transfer()
482 if (nvme_epf->dma_enabled) in nvmet_pci_epf_transfer_seg()
488 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_transfer() argument
498 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); in nvmet_pci_epf_transfer()
501 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_alloc_irq_vectors() argument
503 ctrl->irq_vectors = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_irq_vectors()
506 if (!ctrl->irq_vectors) in nvmet_pci_epf_alloc_irq_vectors()
507 return -ENOMEM; in nvmet_pci_epf_alloc_irq_vectors()
509 mutex_init(&ctrl->irq_lock); in nvmet_pci_epf_alloc_irq_vectors()
514 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_free_irq_vectors() argument
516 if (ctrl->irq_vectors) { in nvmet_pci_epf_free_irq_vectors()
517 mutex_destroy(&ctrl->irq_lock); in nvmet_pci_epf_free_irq_vectors()
518 kfree(ctrl->irq_vectors); in nvmet_pci_epf_free_irq_vectors()
519 ctrl->irq_vectors = NULL; in nvmet_pci_epf_free_irq_vectors()
524 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) in nvmet_pci_epf_find_irq_vector() argument
529 lockdep_assert_held(&ctrl->irq_lock); in nvmet_pci_epf_find_irq_vector()
531 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_find_irq_vector()
532 iv = &ctrl->irq_vectors[i]; in nvmet_pci_epf_find_irq_vector()
533 if (iv->ref && iv->vector == vector) in nvmet_pci_epf_find_irq_vector()
541 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) in nvmet_pci_epf_add_irq_vector() argument
546 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_add_irq_vector()
548 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); in nvmet_pci_epf_add_irq_vector()
550 iv->ref++; in nvmet_pci_epf_add_irq_vector()
554 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_add_irq_vector()
555 iv = &ctrl->irq_vectors[i]; in nvmet_pci_epf_add_irq_vector()
556 if (!iv->ref) in nvmet_pci_epf_add_irq_vector()
563 iv->ref = 1; in nvmet_pci_epf_add_irq_vector()
564 iv->vector = vector; in nvmet_pci_epf_add_irq_vector()
565 iv->nr_irqs = 0; in nvmet_pci_epf_add_irq_vector()
568 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_add_irq_vector()
573 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_remove_irq_vector() argument
578 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_remove_irq_vector()
580 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); in nvmet_pci_epf_remove_irq_vector()
582 iv->ref--; in nvmet_pci_epf_remove_irq_vector()
583 if (!iv->ref) { in nvmet_pci_epf_remove_irq_vector()
584 iv->vector = 0; in nvmet_pci_epf_remove_irq_vector()
585 iv->nr_irqs = 0; in nvmet_pci_epf_remove_irq_vector()
589 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_remove_irq_vector()
592 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_should_raise_irq() argument
595 struct nvmet_pci_epf_irq_vector *iv = cq->iv; in nvmet_pci_epf_should_raise_irq()
599 if (!cq->qid) in nvmet_pci_epf_should_raise_irq()
602 if (iv->cd) in nvmet_pci_epf_should_raise_irq()
606 ret = iv->nr_irqs > 0; in nvmet_pci_epf_should_raise_irq()
608 iv->nr_irqs++; in nvmet_pci_epf_should_raise_irq()
609 ret = iv->nr_irqs >= ctrl->irq_vector_threshold; in nvmet_pci_epf_should_raise_irq()
612 iv->nr_irqs = 0; in nvmet_pci_epf_should_raise_irq()
617 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_raise_irq() argument
620 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; in nvmet_pci_epf_raise_irq()
621 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_raise_irq()
624 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) || in nvmet_pci_epf_raise_irq()
625 !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_raise_irq()
628 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_raise_irq()
630 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) in nvmet_pci_epf_raise_irq()
633 switch (nvme_epf->irq_type) { in nvmet_pci_epf_raise_irq()
637 * If we fail to raise an MSI or MSI-X interrupt, it is likely in nvmet_pci_epf_raise_irq()
642 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_raise_irq()
643 nvme_epf->irq_type, cq->vector + 1); in nvmet_pci_epf_raise_irq()
644 if (!ret || !nvme_epf->epc_features->intx_capable) in nvmet_pci_epf_raise_irq()
648 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_raise_irq()
653 ret = -EINVAL; in nvmet_pci_epf_raise_irq()
658 dev_err_ratelimited(ctrl->dev, in nvmet_pci_epf_raise_irq()
660 cq->qid, ret); in nvmet_pci_epf_raise_irq()
663 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_raise_irq()
668 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); in nvmet_pci_epf_iod_name()
676 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; in nvmet_pci_epf_alloc_iod() local
679 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); in nvmet_pci_epf_alloc_iod()
684 iod->req.cmd = &iod->cmd; in nvmet_pci_epf_alloc_iod()
685 iod->req.cqe = &iod->cqe; in nvmet_pci_epf_alloc_iod()
686 iod->req.port = ctrl->port; in nvmet_pci_epf_alloc_iod()
687 iod->ctrl = ctrl; in nvmet_pci_epf_alloc_iod()
688 iod->sq = sq; in nvmet_pci_epf_alloc_iod()
689 iod->cq = &ctrl->cq[sq->qid]; in nvmet_pci_epf_alloc_iod()
690 INIT_LIST_HEAD(&iod->link); in nvmet_pci_epf_alloc_iod()
691 iod->dma_dir = DMA_NONE; in nvmet_pci_epf_alloc_iod()
692 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); in nvmet_pci_epf_alloc_iod()
693 init_completion(&iod->done); in nvmet_pci_epf_alloc_iod()
705 int nr_segs = iod->nr_data_segs + nsegs; in nvmet_pci_epf_alloc_iod_data_segs()
707 segs = krealloc(iod->data_segs, in nvmet_pci_epf_alloc_iod_data_segs()
711 return -ENOMEM; in nvmet_pci_epf_alloc_iod_data_segs()
713 iod->nr_data_segs = nr_segs; in nvmet_pci_epf_alloc_iod_data_segs()
714 iod->data_segs = segs; in nvmet_pci_epf_alloc_iod_data_segs()
723 if (iod->data_segs) { in nvmet_pci_epf_free_iod()
724 for (i = 0; i < iod->nr_data_segs; i++) in nvmet_pci_epf_free_iod()
725 kfree(iod->data_segs[i].buf); in nvmet_pci_epf_free_iod()
726 if (iod->data_segs != &iod->data_seg) in nvmet_pci_epf_free_iod()
727 kfree(iod->data_segs); in nvmet_pci_epf_free_iod()
729 if (iod->data_sgt.nents > 1) in nvmet_pci_epf_free_iod()
730 sg_free_table(&iod->data_sgt); in nvmet_pci_epf_free_iod()
731 mempool_free(iod, &iod->ctrl->iod_pool); in nvmet_pci_epf_free_iod()
736 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; in nvmet_pci_epf_transfer_iod_data()
737 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; in nvmet_pci_epf_transfer_iod_data()
741 for (i = 0; i < iod->nr_data_segs; i++, seg++) { in nvmet_pci_epf_transfer_iod_data()
742 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); in nvmet_pci_epf_transfer_iod_data()
744 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; in nvmet_pci_epf_transfer_iod_data()
752 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_prp_ofst() argument
755 return prp & ctrl->mps_mask; in nvmet_pci_epf_prp_ofst()
758 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_prp_size() argument
761 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); in nvmet_pci_epf_prp_size()
767 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, in nvmet_pci_epf_get_prp_list() argument
770 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; in nvmet_pci_epf_get_prp_list()
781 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); in nvmet_pci_epf_get_prp_list()
782 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); in nvmet_pci_epf_get_prp_list()
789 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_prp_list() argument
792 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_prp_list()
795 size_t transfer_len = iod->data_len; in nvmet_pci_epf_iod_parse_prp_list()
801 prps = kzalloc(ctrl->mps, GFP_KERNEL); in nvmet_pci_epf_iod_parse_prp_list()
811 prp = le64_to_cpu(cmd->common.dptr.prp1); in nvmet_pci_epf_iod_parse_prp_list()
815 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); in nvmet_pci_epf_iod_parse_prp_list()
816 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; in nvmet_pci_epf_iod_parse_prp_list()
823 seg = &iod->data_segs[0]; in nvmet_pci_epf_iod_parse_prp_list()
824 seg->pci_addr = prp; in nvmet_pci_epf_iod_parse_prp_list()
825 seg->length = nvmet_pci_epf_prp_size(ctrl, prp); in nvmet_pci_epf_iod_parse_prp_list()
827 size = seg->length; in nvmet_pci_epf_iod_parse_prp_list()
835 prp = le64_to_cpu(cmd->common.dptr.prp2); in nvmet_pci_epf_iod_parse_prp_list()
840 xfer_len = transfer_len - size; in nvmet_pci_epf_iod_parse_prp_list()
843 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, in nvmet_pci_epf_iod_parse_prp_list()
858 if (xfer_len > ctrl->mps && i == nr_prps - 1) { in nvmet_pci_epf_iod_parse_prp_list()
865 if (nvmet_pci_epf_prp_ofst(ctrl, prp)) in nvmet_pci_epf_iod_parse_prp_list()
871 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) in nvmet_pci_epf_iod_parse_prp_list()
875 seg->pci_addr = prp; in nvmet_pci_epf_iod_parse_prp_list()
876 seg->length = 0; in nvmet_pci_epf_iod_parse_prp_list()
880 prp_size = min_t(size_t, ctrl->mps, xfer_len); in nvmet_pci_epf_iod_parse_prp_list()
881 seg->length += prp_size; in nvmet_pci_epf_iod_parse_prp_list()
888 iod->nr_data_segs = nr_segs; in nvmet_pci_epf_iod_parse_prp_list()
892 dev_err(ctrl->dev, in nvmet_pci_epf_iod_parse_prp_list()
893 "PRPs transfer length mismatch: got %zu B, need %zu B\n", in nvmet_pci_epf_iod_parse_prp_list()
903 dev_err(ctrl->dev, "PRPs list invalid offset\n"); in nvmet_pci_epf_iod_parse_prp_list()
904 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
908 dev_err(ctrl->dev, "PRPs list invalid field\n"); in nvmet_pci_epf_iod_parse_prp_list()
909 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
913 dev_err(ctrl->dev, "PRPs list internal error\n"); in nvmet_pci_epf_iod_parse_prp_list()
914 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
918 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_list()
921 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_prp_simple() argument
924 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_prp_simple()
925 size_t transfer_len = iod->data_len; in nvmet_pci_epf_iod_parse_prp_simple()
930 prp1 = le64_to_cpu(cmd->common.dptr.prp1); in nvmet_pci_epf_iod_parse_prp_simple()
931 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); in nvmet_pci_epf_iod_parse_prp_simple()
935 prp2 = le64_to_cpu(cmd->common.dptr.prp2); in nvmet_pci_epf_iod_parse_prp_simple()
937 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_simple()
938 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_simple()
940 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { in nvmet_pci_epf_iod_parse_prp_simple()
941 iod->status = in nvmet_pci_epf_iod_parse_prp_simple()
943 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_simple()
950 iod->nr_data_segs = 1; in nvmet_pci_epf_iod_parse_prp_simple()
951 iod->data_segs = &iod->data_seg; in nvmet_pci_epf_iod_parse_prp_simple()
952 iod->data_segs[0].pci_addr = prp1; in nvmet_pci_epf_iod_parse_prp_simple()
953 iod->data_segs[0].length = transfer_len; in nvmet_pci_epf_iod_parse_prp_simple()
959 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_simple()
963 iod->data_segs[0].pci_addr = prp1; in nvmet_pci_epf_iod_parse_prp_simple()
964 iod->data_segs[0].length = prp1_size; in nvmet_pci_epf_iod_parse_prp_simple()
965 iod->data_segs[1].pci_addr = prp2; in nvmet_pci_epf_iod_parse_prp_simple()
966 iod->data_segs[1].length = transfer_len - prp1_size; in nvmet_pci_epf_iod_parse_prp_simple()
973 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_iod_parse_prps() local
974 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); in nvmet_pci_epf_iod_parse_prps()
978 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); in nvmet_pci_epf_iod_parse_prps()
980 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prps()
981 return -EINVAL; in nvmet_pci_epf_iod_parse_prps()
984 if (iod->data_len + ofst <= ctrl->mps * 2) in nvmet_pci_epf_iod_parse_prps()
985 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); in nvmet_pci_epf_iod_parse_prps()
987 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); in nvmet_pci_epf_iod_parse_prps()
995 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_get_sgl_segment() argument
999 u32 length = le32_to_cpu(desc->length); in nvmet_pci_epf_get_sgl_segment()
1007 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, in nvmet_pci_epf_get_sgl_segment()
1016 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || in nvmet_pci_epf_get_sgl_segment()
1017 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { in nvmet_pci_epf_get_sgl_segment()
1023 *desc = sgls[nr_descs - 1]; in nvmet_pci_epf_get_sgl_segment()
1024 nr_descs--; in nvmet_pci_epf_get_sgl_segment()
1027 desc->length = 0; in nvmet_pci_epf_get_sgl_segment()
1035 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_sgl_segments() argument
1038 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_sgl_segments()
1039 struct nvme_sgl_desc seg = cmd->common.dptr.sgl; in nvmet_pci_epf_iod_parse_sgl_segments()
1050 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1051 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1055 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); in nvmet_pci_epf_iod_parse_sgl_segments()
1057 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1058 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1064 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1074 iod->status = NVME_SC_SGL_INVALID_TYPE | in nvmet_pci_epf_iod_parse_sgl_segments()
1078 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); in nvmet_pci_epf_iod_parse_sgl_segments()
1079 iod->data_segs[n].length = le32_to_cpu(sgls[i].length); in nvmet_pci_epf_iod_parse_sgl_segments()
1087 if (iod->status != NVME_SC_SUCCESS) { in nvmet_pci_epf_iod_parse_sgl_segments()
1089 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1097 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_iod_parse_sgls() local
1098 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; in nvmet_pci_epf_iod_parse_sgls()
1100 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { in nvmet_pci_epf_iod_parse_sgls()
1102 iod->nr_data_segs = 1; in nvmet_pci_epf_iod_parse_sgls()
1103 iod->data_segs = &iod->data_seg; in nvmet_pci_epf_iod_parse_sgls()
1104 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); in nvmet_pci_epf_iod_parse_sgls()
1105 iod->data_seg.length = le32_to_cpu(sgl->length); in nvmet_pci_epf_iod_parse_sgls()
1109 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); in nvmet_pci_epf_iod_parse_sgls()
1114 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_alloc_iod_data_buf() local
1115 struct nvmet_req *req = &iod->req; in nvmet_pci_epf_alloc_iod_data_buf()
1120 if (iod->data_len > ctrl->mdts) { in nvmet_pci_epf_alloc_iod_data_buf()
1121 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_alloc_iod_data_buf()
1122 return -EINVAL; in nvmet_pci_epf_alloc_iod_data_buf()
1129 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) in nvmet_pci_epf_alloc_iod_data_buf()
1137 if (iod->nr_data_segs == 1) { in nvmet_pci_epf_alloc_iod_data_buf()
1138 sg_init_table(&iod->data_sgl, 1); in nvmet_pci_epf_alloc_iod_data_buf()
1139 iod->data_sgt.sgl = &iod->data_sgl; in nvmet_pci_epf_alloc_iod_data_buf()
1140 iod->data_sgt.nents = 1; in nvmet_pci_epf_alloc_iod_data_buf()
1141 iod->data_sgt.orig_nents = 1; in nvmet_pci_epf_alloc_iod_data_buf()
1143 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, in nvmet_pci_epf_alloc_iod_data_buf()
1149 for_each_sgtable_sg(&iod->data_sgt, sg, i) { in nvmet_pci_epf_alloc_iod_data_buf()
1150 seg = &iod->data_segs[i]; in nvmet_pci_epf_alloc_iod_data_buf()
1151 seg->buf = kmalloc(seg->length, GFP_KERNEL); in nvmet_pci_epf_alloc_iod_data_buf()
1152 if (!seg->buf) in nvmet_pci_epf_alloc_iod_data_buf()
1154 sg_set_buf(sg, seg->buf, seg->length); in nvmet_pci_epf_alloc_iod_data_buf()
1157 req->transfer_len = iod->data_len; in nvmet_pci_epf_alloc_iod_data_buf()
1158 req->sg = iod->data_sgt.sgl; in nvmet_pci_epf_alloc_iod_data_buf()
1159 req->sg_cnt = iod->data_sgt.nents; in nvmet_pci_epf_alloc_iod_data_buf()
1164 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_alloc_iod_data_buf()
1165 return -ENOMEM; in nvmet_pci_epf_alloc_iod_data_buf()
1170 struct nvmet_pci_epf_queue *cq = iod->cq; in nvmet_pci_epf_complete_iod()
1174 iod->status = le16_to_cpu(iod->cqe.status) >> 1; in nvmet_pci_epf_complete_iod()
1175 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) in nvmet_pci_epf_complete_iod()
1176 dev_err(iod->ctrl->dev, in nvmet_pci_epf_complete_iod()
1178 iod->sq->qid, nvmet_pci_epf_iod_name(iod), in nvmet_pci_epf_complete_iod()
1179 iod->cmd.common.opcode, iod->status); in nvmet_pci_epf_complete_iod()
1185 spin_lock_irqsave(&cq->lock, flags); in nvmet_pci_epf_complete_iod()
1186 list_add_tail(&iod->link, &cq->list); in nvmet_pci_epf_complete_iod()
1187 queue_delayed_work(system_highpri_wq, &cq->work, 0); in nvmet_pci_epf_complete_iod()
1188 spin_unlock_irqrestore(&cq->lock, flags); in nvmet_pci_epf_complete_iod()
1196 spin_lock_irqsave(&queue->lock, flags); in nvmet_pci_epf_drain_queue()
1197 while (!list_empty(&queue->list)) { in nvmet_pci_epf_drain_queue()
1198 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, in nvmet_pci_epf_drain_queue()
1200 list_del_init(&iod->link); in nvmet_pci_epf_drain_queue()
1203 spin_unlock_irqrestore(&queue->lock, flags); in nvmet_pci_epf_drain_queue()
1209 list_add_tail(&port->entry, &nvmet_pci_epf_ports); in nvmet_pci_epf_add_port()
1217 list_del_init(&port->entry); in nvmet_pci_epf_remove_port()
1222 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) in nvmet_pci_epf_find_port() argument
1228 if (p->disc_addr.portid == portid) { in nvmet_pci_epf_find_port()
1243 iod->status = le16_to_cpu(req->cqe->status) >> 1; in nvmet_pci_epf_queue_response()
1249 if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { in nvmet_pci_epf_queue_response()
1254 complete(&iod->done); in nvmet_pci_epf_queue_response()
1259 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_get_mdts() local
1260 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; in nvmet_pci_epf_get_mdts()
1262 return ilog2(ctrl->mdts) - page_shift; in nvmet_pci_epf_get_mdts()
1268 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_create_cq() local
1269 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; in nvmet_pci_epf_create_cq()
1273 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) in nvmet_pci_epf_create_cq()
1279 cq->pci_addr = pci_addr; in nvmet_pci_epf_create_cq()
1280 cq->qid = cqid; in nvmet_pci_epf_create_cq()
1281 cq->depth = qsize + 1; in nvmet_pci_epf_create_cq()
1282 cq->vector = vector; in nvmet_pci_epf_create_cq()
1283 cq->head = 0; in nvmet_pci_epf_create_cq()
1284 cq->tail = 0; in nvmet_pci_epf_create_cq()
1285 cq->phase = 1; in nvmet_pci_epf_create_cq()
1286 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); in nvmet_pci_epf_create_cq()
1287 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); in nvmet_pci_epf_create_cq()
1290 cq->qes = sizeof(struct nvme_completion); in nvmet_pci_epf_create_cq()
1292 cq->qes = ctrl->io_cqes; in nvmet_pci_epf_create_cq()
1293 cq->pci_size = cq->qes * cq->depth; in nvmet_pci_epf_create_cq()
1296 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); in nvmet_pci_epf_create_cq()
1297 if (!cq->iv) in nvmet_pci_epf_create_cq()
1299 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); in nvmet_pci_epf_create_cq()
1302 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); in nvmet_pci_epf_create_cq()
1310 ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size, in nvmet_pci_epf_create_cq()
1311 &cq->pci_map); in nvmet_pci_epf_create_cq()
1313 dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n", in nvmet_pci_epf_create_cq()
1314 cq->qid, ret); in nvmet_pci_epf_create_cq()
1318 if (cq->pci_map.pci_size < cq->pci_size) { in nvmet_pci_epf_create_cq()
1319 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", in nvmet_pci_epf_create_cq()
1320 cq->qid); in nvmet_pci_epf_create_cq()
1324 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); in nvmet_pci_epf_create_cq()
1326 if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_create_cq()
1327 dev_dbg(ctrl->dev, in nvmet_pci_epf_create_cq()
1328 "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", in nvmet_pci_epf_create_cq()
1329 cqid, qsize, cq->qes, cq->vector); in nvmet_pci_epf_create_cq()
1331 dev_dbg(ctrl->dev, in nvmet_pci_epf_create_cq()
1332 "CQ[%u]: %u entries of %zu B, IRQ disabled\n", in nvmet_pci_epf_create_cq()
1333 cqid, qsize, cq->qes); in nvmet_pci_epf_create_cq()
1338 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); in nvmet_pci_epf_create_cq()
1342 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_create_cq()
1343 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); in nvmet_pci_epf_create_cq()
1349 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_delete_cq() local
1350 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; in nvmet_pci_epf_delete_cq()
1352 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) in nvmet_pci_epf_delete_cq()
1355 cancel_delayed_work_sync(&cq->work); in nvmet_pci_epf_delete_cq()
1357 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_delete_cq()
1358 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); in nvmet_pci_epf_delete_cq()
1359 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); in nvmet_pci_epf_delete_cq()
1360 nvmet_cq_put(&cq->nvme_cq); in nvmet_pci_epf_delete_cq()
1368 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_create_sq() local
1369 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; in nvmet_pci_epf_create_sq()
1370 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; in nvmet_pci_epf_create_sq()
1373 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_create_sq()
1379 sq->pci_addr = pci_addr; in nvmet_pci_epf_create_sq()
1380 sq->qid = sqid; in nvmet_pci_epf_create_sq()
1381 sq->depth = qsize + 1; in nvmet_pci_epf_create_sq()
1382 sq->head = 0; in nvmet_pci_epf_create_sq()
1383 sq->tail = 0; in nvmet_pci_epf_create_sq()
1384 sq->phase = 0; in nvmet_pci_epf_create_sq()
1385 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); in nvmet_pci_epf_create_sq()
1386 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); in nvmet_pci_epf_create_sq()
1388 sq->qes = 1UL << NVME_ADM_SQES; in nvmet_pci_epf_create_sq()
1390 sq->qes = ctrl->io_sqes; in nvmet_pci_epf_create_sq()
1391 sq->pci_size = sq->qes * sq->depth; in nvmet_pci_epf_create_sq()
1393 status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid, in nvmet_pci_epf_create_sq()
1394 sq->depth); in nvmet_pci_epf_create_sq()
1398 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, in nvmet_pci_epf_create_sq()
1399 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); in nvmet_pci_epf_create_sq()
1400 if (!sq->iod_wq) { in nvmet_pci_epf_create_sq()
1401 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); in nvmet_pci_epf_create_sq()
1406 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); in nvmet_pci_epf_create_sq()
1408 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", in nvmet_pci_epf_create_sq()
1409 sqid, qsize, sq->qes); in nvmet_pci_epf_create_sq()
1414 nvmet_sq_destroy(&sq->nvme_sq); in nvmet_pci_epf_create_sq()
1420 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_delete_sq() local
1421 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; in nvmet_pci_epf_delete_sq()
1423 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_delete_sq()
1426 destroy_workqueue(sq->iod_wq); in nvmet_pci_epf_delete_sq()
1427 sq->iod_wq = NULL; in nvmet_pci_epf_delete_sq()
1431 if (sq->nvme_sq.ctrl) in nvmet_pci_epf_delete_sq()
1432 nvmet_sq_destroy(&sq->nvme_sq); in nvmet_pci_epf_delete_sq()
1440 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_get_feat() local
1450 if (!ctrl->sq_ab) in nvmet_pci_epf_get_feat()
1451 arb->ab = 0x7; in nvmet_pci_epf_get_feat()
1453 arb->ab = ilog2(ctrl->sq_ab); in nvmet_pci_epf_get_feat()
1458 irqc->thr = ctrl->irq_vector_threshold; in nvmet_pci_epf_get_feat()
1459 irqc->time = 0; in nvmet_pci_epf_get_feat()
1464 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_get_feat()
1465 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); in nvmet_pci_epf_get_feat()
1467 irqcfg->cd = iv->cd; in nvmet_pci_epf_get_feat()
1472 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_get_feat()
1483 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_set_feat() local
1493 if (arb->ab == 0x7) in nvmet_pci_epf_set_feat()
1494 ctrl->sq_ab = 0; in nvmet_pci_epf_set_feat()
1496 ctrl->sq_ab = 1 << arb->ab; in nvmet_pci_epf_set_feat()
1505 ctrl->irq_vector_threshold = irqc->thr + 1; in nvmet_pci_epf_set_feat()
1510 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_set_feat()
1511 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); in nvmet_pci_epf_set_feat()
1513 iv->cd = irqcfg->cd; in nvmet_pci_epf_set_feat()
1518 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_set_feat()
1543 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_init_queue() argument
1549 queue = &ctrl->sq[qid]; in nvmet_pci_epf_init_queue()
1551 queue = &ctrl->cq[qid]; in nvmet_pci_epf_init_queue()
1552 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); in nvmet_pci_epf_init_queue()
1554 queue->ctrl = ctrl; in nvmet_pci_epf_init_queue()
1555 queue->qid = qid; in nvmet_pci_epf_init_queue()
1556 spin_lock_init(&queue->lock); in nvmet_pci_epf_init_queue()
1557 INIT_LIST_HEAD(&queue->list); in nvmet_pci_epf_init_queue()
1560 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_alloc_queues() argument
1564 ctrl->sq = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_queues()
1566 if (!ctrl->sq) in nvmet_pci_epf_alloc_queues()
1567 return -ENOMEM; in nvmet_pci_epf_alloc_queues()
1569 ctrl->cq = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_queues()
1571 if (!ctrl->cq) { in nvmet_pci_epf_alloc_queues()
1572 kfree(ctrl->sq); in nvmet_pci_epf_alloc_queues()
1573 ctrl->sq = NULL; in nvmet_pci_epf_alloc_queues()
1574 return -ENOMEM; in nvmet_pci_epf_alloc_queues()
1577 for (qid = 0; qid < ctrl->nr_queues; qid++) { in nvmet_pci_epf_alloc_queues()
1578 nvmet_pci_epf_init_queue(ctrl, qid, true); in nvmet_pci_epf_alloc_queues()
1579 nvmet_pci_epf_init_queue(ctrl, qid, false); in nvmet_pci_epf_alloc_queues()
1585 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_free_queues() argument
1587 kfree(ctrl->sq); in nvmet_pci_epf_free_queues()
1588 ctrl->sq = NULL; in nvmet_pci_epf_free_queues()
1589 kfree(ctrl->cq); in nvmet_pci_epf_free_queues()
1590 ctrl->cq = NULL; in nvmet_pci_epf_free_queues()
1597 struct nvmet_req *req = &iod->req; in nvmet_pci_epf_exec_iod_work()
1600 if (!iod->ctrl->link_up) { in nvmet_pci_epf_exec_iod_work()
1605 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { in nvmet_pci_epf_exec_iod_work()
1606 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; in nvmet_pci_epf_exec_iod_work()
1615 if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) in nvmet_pci_epf_exec_iod_work()
1618 iod->data_len = nvmet_req_transfer_len(req); in nvmet_pci_epf_exec_iod_work()
1619 if (iod->data_len) { in nvmet_pci_epf_exec_iod_work()
1622 * PCI root-complex host. in nvmet_pci_epf_exec_iod_work()
1624 if (nvme_is_write(&iod->cmd)) in nvmet_pci_epf_exec_iod_work()
1625 iod->dma_dir = DMA_FROM_DEVICE; in nvmet_pci_epf_exec_iod_work()
1627 iod->dma_dir = DMA_TO_DEVICE; in nvmet_pci_epf_exec_iod_work()
1634 if (!ret && iod->dma_dir == DMA_FROM_DEVICE) in nvmet_pci_epf_exec_iod_work()
1642 req->execute(req); in nvmet_pci_epf_exec_iod_work()
1649 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) in nvmet_pci_epf_exec_iod_work()
1652 wait_for_completion(&iod->done); in nvmet_pci_epf_exec_iod_work()
1654 if (iod->status != NVME_SC_SUCCESS) in nvmet_pci_epf_exec_iod_work()
1657 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); in nvmet_pci_epf_exec_iod_work()
1664 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_process_sq() argument
1669 u16 head = sq->head; in nvmet_pci_epf_process_sq()
1671 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); in nvmet_pci_epf_process_sq()
1672 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { in nvmet_pci_epf_process_sq()
1678 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, in nvmet_pci_epf_process_sq()
1679 sq->pci_addr + head * sq->qes, in nvmet_pci_epf_process_sq()
1680 sq->qes, DMA_FROM_DEVICE); in nvmet_pci_epf_process_sq()
1687 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", in nvmet_pci_epf_process_sq()
1688 sq->qid, head, sq->tail, in nvmet_pci_epf_process_sq()
1692 if (head == sq->depth) in nvmet_pci_epf_process_sq()
1694 WRITE_ONCE(sq->head, head); in nvmet_pci_epf_process_sq()
1697 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); in nvmet_pci_epf_process_sq()
1699 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); in nvmet_pci_epf_process_sq()
1707 struct nvmet_pci_epf_ctrl *ctrl = in nvmet_pci_epf_poll_sqs_work() local
1714 while (ctrl->link_up && ctrl->enabled) { in nvmet_pci_epf_poll_sqs_work()
1716 /* Do round-robin arbitration. */ in nvmet_pci_epf_poll_sqs_work()
1717 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_poll_sqs_work()
1718 sq = &ctrl->sq[i]; in nvmet_pci_epf_poll_sqs_work()
1719 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_poll_sqs_work()
1721 if (nvmet_pci_epf_process_sq(ctrl, sq)) in nvmet_pci_epf_poll_sqs_work()
1752 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); in nvmet_pci_epf_poll_sqs_work()
1759 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; in nvmet_pci_epf_cq_work() local
1765 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { in nvmet_pci_epf_cq_work()
1768 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); in nvmet_pci_epf_cq_work()
1769 if (cq->head == cq->tail + 1) { in nvmet_pci_epf_cq_work()
1770 ret = -EAGAIN; in nvmet_pci_epf_cq_work()
1774 spin_lock_irqsave(&cq->lock, flags); in nvmet_pci_epf_cq_work()
1775 iod = list_first_entry_or_null(&cq->list, in nvmet_pci_epf_cq_work()
1778 list_del_init(&iod->link); in nvmet_pci_epf_cq_work()
1779 spin_unlock_irqrestore(&cq->lock, flags); in nvmet_pci_epf_cq_work()
1786 * executed (req->execute() called), the CQE is already in nvmet_pci_epf_cq_work()
1791 cqe = &iod->cqe; in nvmet_pci_epf_cq_work()
1792 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); in nvmet_pci_epf_cq_work()
1793 cqe->sq_id = cpu_to_le16(iod->sq->qid); in nvmet_pci_epf_cq_work()
1794 cqe->command_id = iod->cmd.common.command_id; in nvmet_pci_epf_cq_work()
1795 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); in nvmet_pci_epf_cq_work()
1797 dev_dbg(ctrl->dev, in nvmet_pci_epf_cq_work()
1799 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, in nvmet_pci_epf_cq_work()
1800 le64_to_cpu(cqe->result.u64), cq->head, cq->tail, in nvmet_pci_epf_cq_work()
1801 cq->phase); in nvmet_pci_epf_cq_work()
1803 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, in nvmet_pci_epf_cq_work()
1804 cqe, cq->qes); in nvmet_pci_epf_cq_work()
1806 cq->tail++; in nvmet_pci_epf_cq_work()
1807 if (cq->tail >= cq->depth) { in nvmet_pci_epf_cq_work()
1808 cq->tail = 0; in nvmet_pci_epf_cq_work()
1809 cq->phase ^= 1; in nvmet_pci_epf_cq_work()
1815 nvmet_pci_epf_raise_irq(ctrl, cq, false); in nvmet_pci_epf_cq_work()
1825 nvmet_pci_epf_raise_irq(ctrl, cq, true); in nvmet_pci_epf_cq_work()
1828 queue_delayed_work(system_highpri_wq, &cq->work, in nvmet_pci_epf_cq_work()
1832 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_clear_ctrl_config() argument
1834 struct nvmet_ctrl *tctrl = ctrl->tctrl; in nvmet_pci_epf_clear_ctrl_config()
1837 tctrl->csts = 0; in nvmet_pci_epf_clear_ctrl_config()
1838 ctrl->csts = 0; in nvmet_pci_epf_clear_ctrl_config()
1839 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); in nvmet_pci_epf_clear_ctrl_config()
1842 tctrl->cc = 0; in nvmet_pci_epf_clear_ctrl_config()
1843 ctrl->cc = 0; in nvmet_pci_epf_clear_ctrl_config()
1844 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); in nvmet_pci_epf_clear_ctrl_config()
1847 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_enable_ctrl() argument
1853 if (ctrl->enabled) in nvmet_pci_epf_enable_ctrl()
1856 dev_info(ctrl->dev, "Enabling controller\n"); in nvmet_pci_epf_enable_ctrl()
1858 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; in nvmet_pci_epf_enable_ctrl()
1859 ctrl->mps = 1UL << ctrl->mps_shift; in nvmet_pci_epf_enable_ctrl()
1860 ctrl->mps_mask = ctrl->mps - 1; in nvmet_pci_epf_enable_ctrl()
1862 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); in nvmet_pci_epf_enable_ctrl()
1863 if (ctrl->io_sqes < sizeof(struct nvme_command)) { in nvmet_pci_epf_enable_ctrl()
1864 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", in nvmet_pci_epf_enable_ctrl()
1865 ctrl->io_sqes, sizeof(struct nvme_command)); in nvmet_pci_epf_enable_ctrl()
1869 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); in nvmet_pci_epf_enable_ctrl()
1870 if (ctrl->io_cqes < sizeof(struct nvme_completion)) { in nvmet_pci_epf_enable_ctrl()
1871 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", in nvmet_pci_epf_enable_ctrl()
1872 ctrl->io_cqes, sizeof(struct nvme_completion)); in nvmet_pci_epf_enable_ctrl()
1877 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); in nvmet_pci_epf_enable_ctrl()
1878 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); in nvmet_pci_epf_enable_ctrl()
1879 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); in nvmet_pci_epf_enable_ctrl()
1883 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, in nvmet_pci_epf_enable_ctrl()
1887 dev_err(ctrl->dev, "Failed to create admin completion queue\n"); in nvmet_pci_epf_enable_ctrl()
1893 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0, in nvmet_pci_epf_enable_ctrl()
1896 dev_err(ctrl->dev, "Failed to create admin submission queue\n"); in nvmet_pci_epf_enable_ctrl()
1897 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); in nvmet_pci_epf_enable_ctrl()
1901 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; in nvmet_pci_epf_enable_ctrl()
1902 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; in nvmet_pci_epf_enable_ctrl()
1903 ctrl->enabled = true; in nvmet_pci_epf_enable_ctrl()
1904 ctrl->csts = NVME_CSTS_RDY; in nvmet_pci_epf_enable_ctrl()
1907 schedule_delayed_work(&ctrl->poll_sqs, 0); in nvmet_pci_epf_enable_ctrl()
1912 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_enable_ctrl()
1913 return -EINVAL; in nvmet_pci_epf_enable_ctrl()
1916 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_disable_ctrl() argument
1921 if (!ctrl->enabled) in nvmet_pci_epf_disable_ctrl()
1924 dev_info(ctrl->dev, "%s controller\n", in nvmet_pci_epf_disable_ctrl()
1927 ctrl->enabled = false; in nvmet_pci_epf_disable_ctrl()
1928 cancel_delayed_work_sync(&ctrl->poll_sqs); in nvmet_pci_epf_disable_ctrl()
1931 for (qid = 1; qid < ctrl->nr_queues; qid++) in nvmet_pci_epf_disable_ctrl()
1932 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); in nvmet_pci_epf_disable_ctrl()
1934 for (qid = 1; qid < ctrl->nr_queues; qid++) in nvmet_pci_epf_disable_ctrl()
1935 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); in nvmet_pci_epf_disable_ctrl()
1938 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); in nvmet_pci_epf_disable_ctrl()
1939 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); in nvmet_pci_epf_disable_ctrl()
1941 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_pci_epf_disable_ctrl()
1943 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_pci_epf_disable_ctrl()
1944 ctrl->cc &= ~NVME_CC_ENABLE; in nvmet_pci_epf_disable_ctrl()
1945 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); in nvmet_pci_epf_disable_ctrl()
1951 struct nvmet_pci_epf_ctrl *ctrl = in nvmet_pci_epf_poll_cc_work() local
1956 if (!ctrl->tctrl) in nvmet_pci_epf_poll_cc_work()
1959 old_cc = ctrl->cc; in nvmet_pci_epf_poll_cc_work()
1960 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); in nvmet_pci_epf_poll_cc_work()
1964 ctrl->cc = new_cc; in nvmet_pci_epf_poll_cc_work()
1967 ret = nvmet_pci_epf_enable_ctrl(ctrl); in nvmet_pci_epf_poll_cc_work()
1973 nvmet_pci_epf_disable_ctrl(ctrl, false); in nvmet_pci_epf_poll_cc_work()
1976 nvmet_pci_epf_disable_ctrl(ctrl, true); in nvmet_pci_epf_poll_cc_work()
1979 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_pci_epf_poll_cc_work()
1981 nvmet_update_cc(ctrl->tctrl, ctrl->cc); in nvmet_pci_epf_poll_cc_work()
1982 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); in nvmet_pci_epf_poll_cc_work()
1985 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); in nvmet_pci_epf_poll_cc_work()
1988 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_init_bar() argument
1990 struct nvmet_ctrl *tctrl = ctrl->tctrl; in nvmet_pci_epf_init_bar()
1992 ctrl->bar = ctrl->nvme_epf->reg_bar; in nvmet_pci_epf_init_bar()
1995 ctrl->cap = tctrl->cap; in nvmet_pci_epf_init_bar()
1998 ctrl->cap |= 0x1ULL << 16; in nvmet_pci_epf_init_bar()
2000 /* Set Doorbell stride to 4B (DSTRB). */ in nvmet_pci_epf_init_bar()
2001 ctrl->cap &= ~GENMASK_ULL(35, 32); in nvmet_pci_epf_init_bar()
2004 ctrl->cap &= ~(0x1ULL << 36); in nvmet_pci_epf_init_bar()
2007 ctrl->cap &= ~(0x1ULL << 45); in nvmet_pci_epf_init_bar()
2010 ctrl->cap &= ~(0x1ULL << 56); in nvmet_pci_epf_init_bar()
2013 ctrl->cap &= ~(0x1ULL << 57); in nvmet_pci_epf_init_bar()
2015 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); in nvmet_pci_epf_init_bar()
2016 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); in nvmet_pci_epf_init_bar()
2018 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_init_bar()
2024 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_create_ctrl() local
2030 memset(ctrl, 0, sizeof(*ctrl)); in nvmet_pci_epf_create_ctrl()
2031 ctrl->dev = &nvme_epf->epf->dev; in nvmet_pci_epf_create_ctrl()
2032 mutex_init(&ctrl->irq_lock); in nvmet_pci_epf_create_ctrl()
2033 ctrl->nvme_epf = nvme_epf; in nvmet_pci_epf_create_ctrl()
2034 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; in nvmet_pci_epf_create_ctrl()
2035 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); in nvmet_pci_epf_create_ctrl()
2036 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); in nvmet_pci_epf_create_ctrl()
2038 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, in nvmet_pci_epf_create_ctrl()
2042 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); in nvmet_pci_epf_create_ctrl()
2046 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); in nvmet_pci_epf_create_ctrl()
2047 if (!ctrl->port) { in nvmet_pci_epf_create_ctrl()
2048 dev_err(ctrl->dev, "Port not found\n"); in nvmet_pci_epf_create_ctrl()
2049 ret = -EINVAL; in nvmet_pci_epf_create_ctrl()
2056 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); in nvmet_pci_epf_create_ctrl()
2057 args.port = ctrl->port; in nvmet_pci_epf_create_ctrl()
2058 args.subsysnqn = nvme_epf->subsysnqn; in nvmet_pci_epf_create_ctrl()
2064 ctrl->tctrl = nvmet_alloc_ctrl(&args); in nvmet_pci_epf_create_ctrl()
2065 if (!ctrl->tctrl) { in nvmet_pci_epf_create_ctrl()
2066 dev_err(ctrl->dev, "Failed to create target controller\n"); in nvmet_pci_epf_create_ctrl()
2067 ret = -ENOMEM; in nvmet_pci_epf_create_ctrl()
2070 ctrl->tctrl->drvdata = ctrl; in nvmet_pci_epf_create_ctrl()
2073 if (ctrl->tctrl->pi_support) { in nvmet_pci_epf_create_ctrl()
2074 dev_err(ctrl->dev, in nvmet_pci_epf_create_ctrl()
2076 ret = -ENOTSUPP; in nvmet_pci_epf_create_ctrl()
2081 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); in nvmet_pci_epf_create_ctrl()
2082 ret = nvmet_pci_epf_alloc_queues(ctrl); in nvmet_pci_epf_create_ctrl()
2090 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); in nvmet_pci_epf_create_ctrl()
2094 dev_info(ctrl->dev, in nvmet_pci_epf_create_ctrl()
2095 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", in nvmet_pci_epf_create_ctrl()
2096 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, in nvmet_pci_epf_create_ctrl()
2097 ctrl->mdts); in nvmet_pci_epf_create_ctrl()
2100 nvmet_pci_epf_init_bar(ctrl); in nvmet_pci_epf_create_ctrl()
2105 nvmet_pci_epf_free_queues(ctrl); in nvmet_pci_epf_create_ctrl()
2107 nvmet_ctrl_put(ctrl->tctrl); in nvmet_pci_epf_create_ctrl()
2108 ctrl->tctrl = NULL; in nvmet_pci_epf_create_ctrl()
2110 mempool_exit(&ctrl->iod_pool); in nvmet_pci_epf_create_ctrl()
2114 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_start_ctrl() argument
2117 dev_info(ctrl->dev, "PCI link up\n"); in nvmet_pci_epf_start_ctrl()
2118 ctrl->link_up = true; in nvmet_pci_epf_start_ctrl()
2120 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); in nvmet_pci_epf_start_ctrl()
2123 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_stop_ctrl() argument
2125 dev_info(ctrl->dev, "PCI link down\n"); in nvmet_pci_epf_stop_ctrl()
2126 ctrl->link_up = false; in nvmet_pci_epf_stop_ctrl()
2128 cancel_delayed_work_sync(&ctrl->poll_cc); in nvmet_pci_epf_stop_ctrl()
2130 nvmet_pci_epf_disable_ctrl(ctrl, false); in nvmet_pci_epf_stop_ctrl()
2131 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_stop_ctrl()
2134 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_destroy_ctrl() argument
2136 if (!ctrl->tctrl) in nvmet_pci_epf_destroy_ctrl()
2139 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", in nvmet_pci_epf_destroy_ctrl()
2140 ctrl->tctrl->subsys->subsysnqn); in nvmet_pci_epf_destroy_ctrl()
2142 nvmet_pci_epf_stop_ctrl(ctrl); in nvmet_pci_epf_destroy_ctrl()
2144 nvmet_pci_epf_free_queues(ctrl); in nvmet_pci_epf_destroy_ctrl()
2145 nvmet_pci_epf_free_irq_vectors(ctrl); in nvmet_pci_epf_destroy_ctrl()
2147 nvmet_ctrl_put(ctrl->tctrl); in nvmet_pci_epf_destroy_ctrl()
2148 ctrl->tctrl = NULL; in nvmet_pci_epf_destroy_ctrl()
2150 mempool_exit(&ctrl->iod_pool); in nvmet_pci_epf_destroy_ctrl()
2155 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_configure_bar()
2156 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_configure_bar()
2165 dev_err(&epf->dev, "BAR 0 is not free\n"); in nvmet_pci_epf_configure_bar()
2166 return -ENODEV; in nvmet_pci_epf_configure_bar()
2172 * is required to be 64-bit. Thus, for interoperability, always set the in nvmet_pci_epf_configure_bar()
2173 * type to 64-bit. In the rare case that the PCI EPC does not support in nvmet_pci_epf_configure_bar()
2174 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, in nvmet_pci_epf_configure_bar()
2177 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; in nvmet_pci_epf_configure_bar()
2181 * enough space for the doorbells, followed by the MSI-X table in nvmet_pci_epf_configure_bar()
2187 if (epc_features->msix_capable) { in nvmet_pci_epf_configure_bar()
2190 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; in nvmet_pci_epf_configure_bar()
2191 nvme_epf->msix_table_offset = reg_size; in nvmet_pci_epf_configure_bar()
2192 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); in nvmet_pci_epf_configure_bar()
2197 if (epc_features->bar[BAR_0].type == BAR_FIXED) { in nvmet_pci_epf_configure_bar()
2198 if (reg_size > epc_features->bar[BAR_0].fixed_size) { in nvmet_pci_epf_configure_bar()
2199 dev_err(&epf->dev, in nvmet_pci_epf_configure_bar()
2200 "BAR 0 size %llu B too small, need %zu B\n", in nvmet_pci_epf_configure_bar()
2201 epc_features->bar[BAR_0].fixed_size, in nvmet_pci_epf_configure_bar()
2203 return -ENOMEM; in nvmet_pci_epf_configure_bar()
2205 reg_bar_size = epc_features->bar[BAR_0].fixed_size; in nvmet_pci_epf_configure_bar()
2207 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); in nvmet_pci_epf_configure_bar()
2210 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, in nvmet_pci_epf_configure_bar()
2212 if (!nvme_epf->reg_bar) { in nvmet_pci_epf_configure_bar()
2213 dev_err(&epf->dev, "Failed to allocate BAR 0\n"); in nvmet_pci_epf_configure_bar()
2214 return -ENOMEM; in nvmet_pci_epf_configure_bar()
2216 memset(nvme_epf->reg_bar, 0, reg_bar_size); in nvmet_pci_epf_configure_bar()
2223 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_free_bar()
2225 if (!nvme_epf->reg_bar) in nvmet_pci_epf_free_bar()
2228 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); in nvmet_pci_epf_free_bar()
2229 nvme_epf->reg_bar = NULL; in nvmet_pci_epf_free_bar()
2234 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_clear_bar()
2236 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_clear_bar()
2237 &epf->bar[BAR_0]); in nvmet_pci_epf_clear_bar()
2242 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_init_irq()
2243 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_init_irq()
2246 /* Enable MSI-X if supported, otherwise, use MSI. */ in nvmet_pci_epf_init_irq()
2247 if (epc_features->msix_capable && epf->msix_interrupts) { in nvmet_pci_epf_init_irq()
2248 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_init_irq()
2249 epf->msix_interrupts, BAR_0, in nvmet_pci_epf_init_irq()
2250 nvme_epf->msix_table_offset); in nvmet_pci_epf_init_irq()
2252 dev_err(&epf->dev, "Failed to configure MSI-X\n"); in nvmet_pci_epf_init_irq()
2256 nvme_epf->nr_vectors = epf->msix_interrupts; in nvmet_pci_epf_init_irq()
2257 nvme_epf->irq_type = PCI_IRQ_MSIX; in nvmet_pci_epf_init_irq()
2262 if (epc_features->msi_capable && epf->msi_interrupts) { in nvmet_pci_epf_init_irq()
2263 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_init_irq()
2264 epf->msi_interrupts); in nvmet_pci_epf_init_irq()
2266 dev_err(&epf->dev, "Failed to configure MSI\n"); in nvmet_pci_epf_init_irq()
2270 nvme_epf->nr_vectors = epf->msi_interrupts; in nvmet_pci_epf_init_irq()
2271 nvme_epf->irq_type = PCI_IRQ_MSI; in nvmet_pci_epf_init_irq()
2276 /* MSI and MSI-X are not supported: fall back to INTx. */ in nvmet_pci_epf_init_irq()
2277 nvme_epf->nr_vectors = 1; in nvmet_pci_epf_init_irq()
2278 nvme_epf->irq_type = PCI_IRQ_INTX; in nvmet_pci_epf_init_irq()
2286 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_epc_init()
2287 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_epc_init() local
2292 if (epf->vfunc_no > 0) { in nvmet_pci_epf_epc_init()
2293 dev_err(&epf->dev, "Virtual functions are not supported\n"); in nvmet_pci_epf_epc_init()
2294 return -EINVAL; in nvmet_pci_epf_epc_init()
2301 if (epc_features->msix_capable && epf->msix_interrupts) { in nvmet_pci_epf_epc_init()
2302 dev_info(&epf->dev, in nvmet_pci_epf_epc_init()
2303 "PCI endpoint controller supports MSI-X, %u vectors\n", in nvmet_pci_epf_epc_init()
2304 epf->msix_interrupts); in nvmet_pci_epf_epc_init()
2305 max_nr_queues = min(max_nr_queues, epf->msix_interrupts); in nvmet_pci_epf_epc_init()
2306 } else if (epc_features->msi_capable && epf->msi_interrupts) { in nvmet_pci_epf_epc_init()
2307 dev_info(&epf->dev, in nvmet_pci_epf_epc_init()
2309 epf->msi_interrupts); in nvmet_pci_epf_epc_init()
2310 max_nr_queues = min(max_nr_queues, epf->msi_interrupts); in nvmet_pci_epf_epc_init()
2314 dev_err(&epf->dev, "Invalid maximum number of queues %u\n", in nvmet_pci_epf_epc_init()
2316 return -EINVAL; in nvmet_pci_epf_epc_init()
2322 dev_err(&epf->dev, in nvmet_pci_epf_epc_init()
2329 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; in nvmet_pci_epf_epc_init()
2330 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; in nvmet_pci_epf_epc_init()
2331 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_epc_init()
2332 epf->header); in nvmet_pci_epf_epc_init()
2334 dev_err(&epf->dev, in nvmet_pci_epf_epc_init()
2339 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_epc_init()
2340 &epf->bar[BAR_0]); in nvmet_pci_epf_epc_init()
2342 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); in nvmet_pci_epf_epc_init()
2354 if (!epc_features->linkup_notifier) in nvmet_pci_epf_epc_init()
2355 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_epc_init()
2362 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_epc_init()
2369 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_epc_deinit() local
2371 nvmet_pci_epf_destroy_ctrl(ctrl); in nvmet_pci_epf_epc_deinit()
2380 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_link_up() local
2382 nvmet_pci_epf_start_ctrl(ctrl); in nvmet_pci_epf_link_up()
2390 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_link_down() local
2392 nvmet_pci_epf_stop_ctrl(ctrl); in nvmet_pci_epf_link_down()
2408 struct pci_epc *epc = epf->epc; in nvmet_pci_epf_bind()
2412 return -EINVAL; in nvmet_pci_epf_bind()
2414 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); in nvmet_pci_epf_bind()
2416 dev_err(&epf->dev, "epc_features not implemented\n"); in nvmet_pci_epf_bind()
2417 return -EOPNOTSUPP; in nvmet_pci_epf_bind()
2419 nvme_epf->epc_features = epc_features; in nvmet_pci_epf_bind()
2433 struct pci_epc *epc = epf->epc; in nvmet_pci_epf_unbind()
2435 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_unbind()
2437 if (epc->init_complete) { in nvmet_pci_epf_unbind()
2450 .subclass_code = 0x08, /* Non-Volatile Memory controller */
2460 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); in nvmet_pci_epf_probe()
2462 return -ENOMEM; in nvmet_pci_epf_probe()
2464 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); in nvmet_pci_epf_probe()
2468 nvme_epf->epf = epf; in nvmet_pci_epf_probe()
2469 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; in nvmet_pci_epf_probe()
2471 epf->event_ops = &nvmet_pci_epf_event_ops; in nvmet_pci_epf_probe()
2472 epf->header = &nvme_epf_pci_header; in nvmet_pci_epf_probe()
2486 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); in nvmet_pci_epf_portid_show()
2497 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_portid_store()
2498 return -EBUSY; in nvmet_pci_epf_portid_store()
2501 return -EINVAL; in nvmet_pci_epf_portid_store()
2504 return -EINVAL; in nvmet_pci_epf_portid_store()
2506 nvme_epf->portid = cpu_to_le16(portid); in nvmet_pci_epf_portid_store()
2519 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); in nvmet_pci_epf_subsysnqn_show()
2529 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_subsysnqn_store()
2530 return -EBUSY; in nvmet_pci_epf_subsysnqn_store()
2533 return -EINVAL; in nvmet_pci_epf_subsysnqn_store()
2535 strscpy(nvme_epf->subsysnqn, page, len); in nvmet_pci_epf_subsysnqn_store()
2547 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); in nvmet_pci_epf_mdts_kb_show()
2558 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_mdts_kb_store()
2559 return -EBUSY; in nvmet_pci_epf_mdts_kb_store()
2570 return -EINVAL; in nvmet_pci_epf_mdts_kb_store()
2572 nvme_epf->mdts_kb = mdts_kb; in nvmet_pci_epf_mdts_kb_store()
2596 config_group_init_type_name(&nvme_epf->group, "nvme", in nvmet_pci_epf_add_cfs()
2599 return &nvme_epf->group; in nvmet_pci_epf_add_cfs()