1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe PCI Endpoint Function target driver. 4 * 5 * Copyright (c) 2024, Western Digital Corporation or its affiliates. 6 * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com> 7 * REDS Institute, HEIG-VD, HES-SO, Switzerland 8 */ 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/io.h> 14 #include <linux/mempool.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/nvme.h> 18 #include <linux/pci_ids.h> 19 #include <linux/pci-epc.h> 20 #include <linux/pci-epf.h> 21 #include <linux/pci_regs.h> 22 #include <linux/slab.h> 23 24 #include "nvmet.h" 25 26 static LIST_HEAD(nvmet_pci_epf_ports); 27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); 28 29 /* 30 * Default and maximum allowed data transfer size. For the default, 31 * allow up to 128 page-sized segments. For the maximum allowed, 32 * use 4 times the default (which is completely arbitrary). 33 */ 34 #define NVMET_PCI_EPF_MAX_SEGS 128 35 #define NVMET_PCI_EPF_MDTS_KB \ 36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10)) 37 #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4) 38 39 /* 40 * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an 41 * interrupt vector to the host. This default 8 is completely arbitrary and can 42 * be changed by the host with a nvme_set_features command. 43 */ 44 #define NVMET_PCI_EPF_IV_THRESHOLD 8 45 46 /* 47 * BAR CC register and SQ polling intervals. 48 */ 49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10) 50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) 51 #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000) 52 53 /* 54 * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ. 55 */ 56 #define NVMET_PCI_EPF_SQ_AB 8 57 58 /* 59 * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ 60 * is full, in which case we retry the CQ processing after this interval. 61 */ 62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) 63 64 enum nvmet_pci_epf_queue_flags { 65 NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */ 66 NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ 67 }; 68 69 /* 70 * IRQ vector descriptor. 71 */ 72 struct nvmet_pci_epf_irq_vector { 73 unsigned int vector; 74 unsigned int ref; 75 bool cd; 76 int nr_irqs; 77 }; 78 79 struct nvmet_pci_epf_queue { 80 union { 81 struct nvmet_sq nvme_sq; 82 struct nvmet_cq nvme_cq; 83 }; 84 struct nvmet_pci_epf_ctrl *ctrl; 85 unsigned long flags; 86 87 u64 pci_addr; 88 size_t pci_size; 89 struct pci_epc_map pci_map; 90 91 u16 qid; 92 u16 depth; 93 u16 vector; 94 u16 head; 95 u16 tail; 96 u16 phase; 97 u32 db; 98 99 size_t qes; 100 101 struct nvmet_pci_epf_irq_vector *iv; 102 struct workqueue_struct *iod_wq; 103 struct delayed_work work; 104 spinlock_t lock; 105 struct list_head list; 106 }; 107 108 /* 109 * PCI Root Complex (RC) address data segment for mapping an admin or 110 * I/O command buffer @buf of @length bytes to the PCI address @pci_addr. 111 */ 112 struct nvmet_pci_epf_segment { 113 void *buf; 114 u64 pci_addr; 115 u32 length; 116 }; 117 118 /* 119 * Command descriptors. 120 */ 121 struct nvmet_pci_epf_iod { 122 struct list_head link; 123 124 struct nvmet_req req; 125 struct nvme_command cmd; 126 struct nvme_completion cqe; 127 unsigned int status; 128 129 struct nvmet_pci_epf_ctrl *ctrl; 130 131 struct nvmet_pci_epf_queue *sq; 132 struct nvmet_pci_epf_queue *cq; 133 134 /* Data transfer size and direction for the command. */ 135 size_t data_len; 136 enum dma_data_direction dma_dir; 137 138 /* 139 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we 140 * use only @data_seg. Otherwise, the array of segments @data_segs is 141 * allocated to manage multiple PCI address data segments. @data_sgl and 142 * @data_sgt are used to setup the command request for execution by the 143 * target core. 144 */ 145 unsigned int nr_data_segs; 146 struct nvmet_pci_epf_segment data_seg; 147 struct nvmet_pci_epf_segment *data_segs; 148 struct scatterlist data_sgl; 149 struct sg_table data_sgt; 150 151 struct work_struct work; 152 struct completion done; 153 }; 154 155 /* 156 * PCI target controller private data. 157 */ 158 struct nvmet_pci_epf_ctrl { 159 struct nvmet_pci_epf *nvme_epf; 160 struct nvmet_port *port; 161 struct nvmet_ctrl *tctrl; 162 struct device *dev; 163 164 unsigned int nr_queues; 165 struct nvmet_pci_epf_queue *sq; 166 struct nvmet_pci_epf_queue *cq; 167 unsigned int sq_ab; 168 169 mempool_t iod_pool; 170 void *bar; 171 u64 cap; 172 u32 cc; 173 u32 csts; 174 175 size_t io_sqes; 176 size_t io_cqes; 177 178 size_t mps_shift; 179 size_t mps; 180 size_t mps_mask; 181 182 unsigned int mdts; 183 184 struct delayed_work poll_cc; 185 struct delayed_work poll_sqs; 186 187 struct mutex irq_lock; 188 struct nvmet_pci_epf_irq_vector *irq_vectors; 189 unsigned int irq_vector_threshold; 190 191 bool link_up; 192 bool enabled; 193 }; 194 195 /* 196 * PCI EPF driver private data. 197 */ 198 struct nvmet_pci_epf { 199 struct pci_epf *epf; 200 201 const struct pci_epc_features *epc_features; 202 203 void *reg_bar; 204 size_t msix_table_offset; 205 206 unsigned int irq_type; 207 unsigned int nr_vectors; 208 209 struct nvmet_pci_epf_ctrl ctrl; 210 211 bool dma_enabled; 212 struct dma_chan *dma_tx_chan; 213 struct mutex dma_tx_lock; 214 struct dma_chan *dma_rx_chan; 215 struct mutex dma_rx_lock; 216 217 struct mutex mmio_lock; 218 219 /* PCI endpoint function configfs attributes. */ 220 struct config_group group; 221 __le16 portid; 222 char subsysnqn[NVMF_NQN_SIZE]; 223 unsigned int mdts_kb; 224 }; 225 226 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, 227 u32 off) 228 { 229 __le32 *bar_reg = ctrl->bar + off; 230 231 return le32_to_cpu(READ_ONCE(*bar_reg)); 232 } 233 234 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, 235 u32 off, u32 val) 236 { 237 __le32 *bar_reg = ctrl->bar + off; 238 239 WRITE_ONCE(*bar_reg, cpu_to_le32(val)); 240 } 241 242 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, 243 u32 off) 244 { 245 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | 246 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); 247 } 248 249 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, 250 u32 off, u64 val) 251 { 252 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); 253 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); 254 } 255 256 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf, 257 u64 pci_addr, size_t size, struct pci_epc_map *map) 258 { 259 struct pci_epf *epf = nvme_epf->epf; 260 261 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, 262 pci_addr, size, map); 263 } 264 265 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf, 266 struct pci_epc_map *map) 267 { 268 struct pci_epf *epf = nvme_epf->epf; 269 270 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); 271 } 272 273 struct nvmet_pci_epf_dma_filter { 274 struct device *dev; 275 u32 dma_mask; 276 }; 277 278 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg) 279 { 280 struct nvmet_pci_epf_dma_filter *filter = arg; 281 struct dma_slave_caps caps; 282 283 memset(&caps, 0, sizeof(caps)); 284 dma_get_slave_caps(chan, &caps); 285 286 return chan->device->dev == filter->dev && 287 (filter->dma_mask & caps.directions); 288 } 289 290 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf) 291 { 292 struct pci_epf *epf = nvme_epf->epf; 293 struct device *dev = &epf->dev; 294 struct nvmet_pci_epf_dma_filter filter; 295 struct dma_chan *chan; 296 dma_cap_mask_t mask; 297 298 mutex_init(&nvme_epf->dma_rx_lock); 299 mutex_init(&nvme_epf->dma_tx_lock); 300 301 dma_cap_zero(mask); 302 dma_cap_set(DMA_SLAVE, mask); 303 304 filter.dev = epf->epc->dev.parent; 305 filter.dma_mask = BIT(DMA_DEV_TO_MEM); 306 307 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); 308 if (!chan) 309 goto out_dma_no_rx; 310 311 nvme_epf->dma_rx_chan = chan; 312 313 filter.dma_mask = BIT(DMA_MEM_TO_DEV); 314 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); 315 if (!chan) 316 goto out_dma_no_tx; 317 318 nvme_epf->dma_tx_chan = chan; 319 320 nvme_epf->dma_enabled = true; 321 322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", 323 dma_chan_name(chan), 324 dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 325 326 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", 327 dma_chan_name(chan), 328 dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 329 330 return; 331 332 out_dma_no_tx: 333 dma_release_channel(nvme_epf->dma_rx_chan); 334 nvme_epf->dma_rx_chan = NULL; 335 336 out_dma_no_rx: 337 mutex_destroy(&nvme_epf->dma_rx_lock); 338 mutex_destroy(&nvme_epf->dma_tx_lock); 339 nvme_epf->dma_enabled = false; 340 341 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); 342 } 343 344 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf) 345 { 346 if (!nvme_epf->dma_enabled) 347 return; 348 349 dma_release_channel(nvme_epf->dma_tx_chan); 350 nvme_epf->dma_tx_chan = NULL; 351 dma_release_channel(nvme_epf->dma_rx_chan); 352 nvme_epf->dma_rx_chan = NULL; 353 mutex_destroy(&nvme_epf->dma_rx_lock); 354 mutex_destroy(&nvme_epf->dma_tx_lock); 355 nvme_epf->dma_enabled = false; 356 } 357 358 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf, 359 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 360 { 361 struct pci_epf *epf = nvme_epf->epf; 362 struct dma_async_tx_descriptor *desc; 363 struct dma_slave_config sconf = {}; 364 struct device *dev = &epf->dev; 365 struct device *dma_dev; 366 struct dma_chan *chan; 367 dma_cookie_t cookie; 368 dma_addr_t dma_addr; 369 struct mutex *lock; 370 int ret; 371 372 switch (dir) { 373 case DMA_FROM_DEVICE: 374 lock = &nvme_epf->dma_rx_lock; 375 chan = nvme_epf->dma_rx_chan; 376 sconf.direction = DMA_DEV_TO_MEM; 377 sconf.src_addr = seg->pci_addr; 378 break; 379 case DMA_TO_DEVICE: 380 lock = &nvme_epf->dma_tx_lock; 381 chan = nvme_epf->dma_tx_chan; 382 sconf.direction = DMA_MEM_TO_DEV; 383 sconf.dst_addr = seg->pci_addr; 384 break; 385 default: 386 return -EINVAL; 387 } 388 389 mutex_lock(lock); 390 391 dma_dev = dmaengine_get_dma_device(chan); 392 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); 393 ret = dma_mapping_error(dma_dev, dma_addr); 394 if (ret) 395 goto unlock; 396 397 ret = dmaengine_slave_config(chan, &sconf); 398 if (ret) { 399 dev_err(dev, "Failed to configure DMA channel\n"); 400 goto unmap; 401 } 402 403 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, 404 sconf.direction, DMA_CTRL_ACK); 405 if (!desc) { 406 dev_err(dev, "Failed to prepare DMA\n"); 407 ret = -EIO; 408 goto unmap; 409 } 410 411 cookie = dmaengine_submit(desc); 412 ret = dma_submit_error(cookie); 413 if (ret) { 414 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret); 415 goto unmap; 416 } 417 418 if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) { 419 dev_err(dev, "DMA transfer failed\n"); 420 ret = -EIO; 421 } 422 423 dmaengine_terminate_sync(chan); 424 425 unmap: 426 dma_unmap_single(dma_dev, dma_addr, seg->length, dir); 427 428 unlock: 429 mutex_unlock(lock); 430 431 return ret; 432 } 433 434 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf, 435 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 436 { 437 u64 pci_addr = seg->pci_addr; 438 u32 length = seg->length; 439 void *buf = seg->buf; 440 struct pci_epc_map map; 441 int ret = -EINVAL; 442 443 /* 444 * Note: MMIO transfers do not need serialization but this is a 445 * simple way to avoid using too many mapping windows. 446 */ 447 mutex_lock(&nvme_epf->mmio_lock); 448 449 while (length) { 450 ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map); 451 if (ret) 452 break; 453 454 switch (dir) { 455 case DMA_FROM_DEVICE: 456 memcpy_fromio(buf, map.virt_addr, map.pci_size); 457 break; 458 case DMA_TO_DEVICE: 459 memcpy_toio(map.virt_addr, buf, map.pci_size); 460 break; 461 default: 462 ret = -EINVAL; 463 goto unlock; 464 } 465 466 pci_addr += map.pci_size; 467 buf += map.pci_size; 468 length -= map.pci_size; 469 470 nvmet_pci_epf_mem_unmap(nvme_epf, &map); 471 } 472 473 unlock: 474 mutex_unlock(&nvme_epf->mmio_lock); 475 476 return ret; 477 } 478 479 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf, 480 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 481 { 482 if (nvme_epf->dma_enabled) 483 return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir); 484 485 return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir); 486 } 487 488 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, 489 void *buf, u64 pci_addr, u32 length, 490 enum dma_data_direction dir) 491 { 492 struct nvmet_pci_epf_segment seg = { 493 .buf = buf, 494 .pci_addr = pci_addr, 495 .length = length, 496 }; 497 498 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); 499 } 500 501 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) 502 { 503 ctrl->irq_vectors = kcalloc(ctrl->nr_queues, 504 sizeof(struct nvmet_pci_epf_irq_vector), 505 GFP_KERNEL); 506 if (!ctrl->irq_vectors) 507 return -ENOMEM; 508 509 mutex_init(&ctrl->irq_lock); 510 511 return 0; 512 } 513 514 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) 515 { 516 if (ctrl->irq_vectors) { 517 mutex_destroy(&ctrl->irq_lock); 518 kfree(ctrl->irq_vectors); 519 ctrl->irq_vectors = NULL; 520 } 521 } 522 523 static struct nvmet_pci_epf_irq_vector * 524 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) 525 { 526 struct nvmet_pci_epf_irq_vector *iv; 527 int i; 528 529 lockdep_assert_held(&ctrl->irq_lock); 530 531 for (i = 0; i < ctrl->nr_queues; i++) { 532 iv = &ctrl->irq_vectors[i]; 533 if (iv->ref && iv->vector == vector) 534 return iv; 535 } 536 537 return NULL; 538 } 539 540 static struct nvmet_pci_epf_irq_vector * 541 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) 542 { 543 struct nvmet_pci_epf_irq_vector *iv; 544 int i; 545 546 mutex_lock(&ctrl->irq_lock); 547 548 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); 549 if (iv) { 550 iv->ref++; 551 goto unlock; 552 } 553 554 for (i = 0; i < ctrl->nr_queues; i++) { 555 iv = &ctrl->irq_vectors[i]; 556 if (!iv->ref) 557 break; 558 } 559 560 if (WARN_ON_ONCE(!iv)) 561 goto unlock; 562 563 iv->ref = 1; 564 iv->vector = vector; 565 iv->nr_irqs = 0; 566 567 unlock: 568 mutex_unlock(&ctrl->irq_lock); 569 570 return iv; 571 } 572 573 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, 574 u16 vector) 575 { 576 struct nvmet_pci_epf_irq_vector *iv; 577 578 mutex_lock(&ctrl->irq_lock); 579 580 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); 581 if (iv) { 582 iv->ref--; 583 if (!iv->ref) { 584 iv->vector = 0; 585 iv->nr_irqs = 0; 586 } 587 } 588 589 mutex_unlock(&ctrl->irq_lock); 590 } 591 592 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, 593 struct nvmet_pci_epf_queue *cq, bool force) 594 { 595 struct nvmet_pci_epf_irq_vector *iv = cq->iv; 596 bool ret; 597 598 /* IRQ coalescing for the admin queue is not allowed. */ 599 if (!cq->qid) 600 return true; 601 602 if (iv->cd) 603 return true; 604 605 if (force) { 606 ret = iv->nr_irqs > 0; 607 } else { 608 iv->nr_irqs++; 609 ret = iv->nr_irqs >= ctrl->irq_vector_threshold; 610 } 611 if (ret) 612 iv->nr_irqs = 0; 613 614 return ret; 615 } 616 617 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, 618 struct nvmet_pci_epf_queue *cq, bool force) 619 { 620 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; 621 struct pci_epf *epf = nvme_epf->epf; 622 int ret = 0; 623 624 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) || 625 !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 626 return; 627 628 mutex_lock(&ctrl->irq_lock); 629 630 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) 631 goto unlock; 632 633 switch (nvme_epf->irq_type) { 634 case PCI_IRQ_MSIX: 635 case PCI_IRQ_MSI: 636 /* 637 * If we fail to raise an MSI or MSI-X interrupt, it is likely 638 * because the host is using legacy INTX IRQs (e.g. BIOS, 639 * grub), but we can fallback to the INTX type only if the 640 * endpoint controller supports this type. 641 */ 642 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, 643 nvme_epf->irq_type, cq->vector + 1); 644 if (!ret || !nvme_epf->epc_features->intx_capable) 645 break; 646 fallthrough; 647 case PCI_IRQ_INTX: 648 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, 649 PCI_IRQ_INTX, 0); 650 break; 651 default: 652 WARN_ON_ONCE(1); 653 ret = -EINVAL; 654 break; 655 } 656 657 if (ret) 658 dev_err_ratelimited(ctrl->dev, 659 "CQ[%u]: Failed to raise IRQ (err=%d)\n", 660 cq->qid, ret); 661 662 unlock: 663 mutex_unlock(&ctrl->irq_lock); 664 } 665 666 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) 667 { 668 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); 669 } 670 671 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work); 672 673 static struct nvmet_pci_epf_iod * 674 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq) 675 { 676 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; 677 struct nvmet_pci_epf_iod *iod; 678 679 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); 680 if (unlikely(!iod)) 681 return NULL; 682 683 memset(iod, 0, sizeof(*iod)); 684 iod->req.cmd = &iod->cmd; 685 iod->req.cqe = &iod->cqe; 686 iod->req.port = ctrl->port; 687 iod->ctrl = ctrl; 688 iod->sq = sq; 689 iod->cq = &ctrl->cq[sq->qid]; 690 INIT_LIST_HEAD(&iod->link); 691 iod->dma_dir = DMA_NONE; 692 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); 693 init_completion(&iod->done); 694 695 return iod; 696 } 697 698 /* 699 * Allocate or grow a command table of PCI segments. 700 */ 701 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod, 702 int nsegs) 703 { 704 struct nvmet_pci_epf_segment *segs; 705 int nr_segs = iod->nr_data_segs + nsegs; 706 707 segs = krealloc(iod->data_segs, 708 nr_segs * sizeof(struct nvmet_pci_epf_segment), 709 GFP_KERNEL | __GFP_ZERO); 710 if (!segs) 711 return -ENOMEM; 712 713 iod->nr_data_segs = nr_segs; 714 iod->data_segs = segs; 715 716 return 0; 717 } 718 719 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod) 720 { 721 int i; 722 723 if (iod->data_segs) { 724 for (i = 0; i < iod->nr_data_segs; i++) 725 kfree(iod->data_segs[i].buf); 726 if (iod->data_segs != &iod->data_seg) 727 kfree(iod->data_segs); 728 } 729 if (iod->data_sgt.nents > 1) 730 sg_free_table(&iod->data_sgt); 731 mempool_free(iod, &iod->ctrl->iod_pool); 732 } 733 734 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod) 735 { 736 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; 737 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; 738 int i, ret; 739 740 /* Split the data transfer according to the PCI segments. */ 741 for (i = 0; i < iod->nr_data_segs; i++, seg++) { 742 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); 743 if (ret) { 744 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; 745 return ret; 746 } 747 } 748 749 return 0; 750 } 751 752 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, 753 u64 prp) 754 { 755 return prp & ctrl->mps_mask; 756 } 757 758 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, 759 u64 prp) 760 { 761 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); 762 } 763 764 /* 765 * Transfer a PRP list from the host and return the number of prps. 766 */ 767 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, 768 size_t xfer_len, __le64 *prps) 769 { 770 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; 771 u32 length; 772 int ret; 773 774 /* 775 * Compute the number of PRPs required for the number of bytes to 776 * transfer (xfer_len). If this number overflows the memory page size 777 * with the PRP list pointer specified, only return the space available 778 * in the memory page, the last PRP in there will be a PRP list pointer 779 * to the remaining PRPs. 780 */ 781 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); 782 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); 783 if (ret) 784 return ret; 785 786 return length >> 3; 787 } 788 789 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, 790 struct nvmet_pci_epf_iod *iod) 791 { 792 struct nvme_command *cmd = &iod->cmd; 793 struct nvmet_pci_epf_segment *seg; 794 size_t size = 0, ofst, prp_size, xfer_len; 795 size_t transfer_len = iod->data_len; 796 int nr_segs, nr_prps = 0; 797 u64 pci_addr, prp; 798 int i = 0, ret; 799 __le64 *prps; 800 801 prps = kzalloc(ctrl->mps, GFP_KERNEL); 802 if (!prps) 803 goto err_internal; 804 805 /* 806 * Allocate PCI segments for the command: this considers the worst case 807 * scenario where all prps are discontiguous, so get as many segments 808 * as we can have prps. In practice, most of the time, we will have 809 * far less PCI segments than prps. 810 */ 811 prp = le64_to_cpu(cmd->common.dptr.prp1); 812 if (!prp) 813 goto err_invalid_field; 814 815 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); 816 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; 817 818 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); 819 if (ret) 820 goto err_internal; 821 822 /* Set the first segment using prp1. */ 823 seg = &iod->data_segs[0]; 824 seg->pci_addr = prp; 825 seg->length = nvmet_pci_epf_prp_size(ctrl, prp); 826 827 size = seg->length; 828 pci_addr = prp + size; 829 nr_segs = 1; 830 831 /* 832 * Now build the PCI address segments using the PRP lists, starting 833 * from prp2. 834 */ 835 prp = le64_to_cpu(cmd->common.dptr.prp2); 836 if (!prp) 837 goto err_invalid_field; 838 839 while (size < transfer_len) { 840 xfer_len = transfer_len - size; 841 842 if (!nr_prps) { 843 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, 844 xfer_len, prps); 845 if (nr_prps < 0) 846 goto err_internal; 847 848 i = 0; 849 ofst = 0; 850 } 851 852 /* Current entry */ 853 prp = le64_to_cpu(prps[i]); 854 if (!prp) 855 goto err_invalid_field; 856 857 /* Did we reach the last PRP entry of the list? */ 858 if (xfer_len > ctrl->mps && i == nr_prps - 1) { 859 /* We need more PRPs: PRP is a list pointer. */ 860 nr_prps = 0; 861 continue; 862 } 863 864 /* Only the first PRP is allowed to have an offset. */ 865 if (nvmet_pci_epf_prp_ofst(ctrl, prp)) 866 goto err_invalid_offset; 867 868 if (prp != pci_addr) { 869 /* Discontiguous prp: new segment. */ 870 nr_segs++; 871 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) 872 goto err_internal; 873 874 seg++; 875 seg->pci_addr = prp; 876 seg->length = 0; 877 pci_addr = prp; 878 } 879 880 prp_size = min_t(size_t, ctrl->mps, xfer_len); 881 seg->length += prp_size; 882 pci_addr += prp_size; 883 size += prp_size; 884 885 i++; 886 } 887 888 iod->nr_data_segs = nr_segs; 889 ret = 0; 890 891 if (size != transfer_len) { 892 dev_err(ctrl->dev, 893 "PRPs transfer length mismatch: got %zu B, need %zu B\n", 894 size, transfer_len); 895 goto err_internal; 896 } 897 898 kfree(prps); 899 900 return 0; 901 902 err_invalid_offset: 903 dev_err(ctrl->dev, "PRPs list invalid offset\n"); 904 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 905 goto err; 906 907 err_invalid_field: 908 dev_err(ctrl->dev, "PRPs list invalid field\n"); 909 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 910 goto err; 911 912 err_internal: 913 dev_err(ctrl->dev, "PRPs list internal error\n"); 914 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 915 916 err: 917 kfree(prps); 918 return -EINVAL; 919 } 920 921 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, 922 struct nvmet_pci_epf_iod *iod) 923 { 924 struct nvme_command *cmd = &iod->cmd; 925 size_t transfer_len = iod->data_len; 926 int ret, nr_segs = 1; 927 u64 prp1, prp2 = 0; 928 size_t prp1_size; 929 930 prp1 = le64_to_cpu(cmd->common.dptr.prp1); 931 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); 932 933 /* For commands crossing a page boundary, we should have prp2. */ 934 if (transfer_len > prp1_size) { 935 prp2 = le64_to_cpu(cmd->common.dptr.prp2); 936 if (!prp2) { 937 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 938 return -EINVAL; 939 } 940 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { 941 iod->status = 942 NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 943 return -EINVAL; 944 } 945 if (prp2 != prp1 + prp1_size) 946 nr_segs = 2; 947 } 948 949 if (nr_segs == 1) { 950 iod->nr_data_segs = 1; 951 iod->data_segs = &iod->data_seg; 952 iod->data_segs[0].pci_addr = prp1; 953 iod->data_segs[0].length = transfer_len; 954 return 0; 955 } 956 957 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); 958 if (ret) { 959 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 960 return ret; 961 } 962 963 iod->data_segs[0].pci_addr = prp1; 964 iod->data_segs[0].length = prp1_size; 965 iod->data_segs[1].pci_addr = prp2; 966 iod->data_segs[1].length = transfer_len - prp1_size; 967 968 return 0; 969 } 970 971 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod) 972 { 973 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 974 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); 975 size_t ofst; 976 977 /* Get the PCI address segments for the command using its PRPs. */ 978 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); 979 if (ofst & 0x3) { 980 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 981 return -EINVAL; 982 } 983 984 if (iod->data_len + ofst <= ctrl->mps * 2) 985 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); 986 987 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); 988 } 989 990 /* 991 * Transfer an SGL segment from the host and return the number of data 992 * descriptors and the next segment descriptor, if any. 993 */ 994 static struct nvme_sgl_desc * 995 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, 996 struct nvme_sgl_desc *desc, unsigned int *nr_sgls) 997 { 998 struct nvme_sgl_desc *sgls; 999 u32 length = le32_to_cpu(desc->length); 1000 int nr_descs, ret; 1001 void *buf; 1002 1003 buf = kmalloc(length, GFP_KERNEL); 1004 if (!buf) 1005 return NULL; 1006 1007 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, 1008 DMA_FROM_DEVICE); 1009 if (ret) { 1010 kfree(buf); 1011 return NULL; 1012 } 1013 1014 sgls = buf; 1015 nr_descs = length / sizeof(struct nvme_sgl_desc); 1016 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || 1017 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { 1018 /* 1019 * We have another SGL segment following this one: do not count 1020 * it as a regular data SGL descriptor and return it to the 1021 * caller. 1022 */ 1023 *desc = sgls[nr_descs - 1]; 1024 nr_descs--; 1025 } else { 1026 /* We do not have another SGL segment after this one. */ 1027 desc->length = 0; 1028 } 1029 1030 *nr_sgls = nr_descs; 1031 1032 return sgls; 1033 } 1034 1035 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, 1036 struct nvmet_pci_epf_iod *iod) 1037 { 1038 struct nvme_command *cmd = &iod->cmd; 1039 struct nvme_sgl_desc seg = cmd->common.dptr.sgl; 1040 struct nvme_sgl_desc *sgls = NULL; 1041 int n = 0, i, nr_sgls; 1042 int ret; 1043 1044 /* 1045 * We do not support inline data nor keyed SGLs, so we should be seeing 1046 * only segment descriptors. 1047 */ 1048 if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) && 1049 seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { 1050 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; 1051 return -EIO; 1052 } 1053 1054 while (seg.length) { 1055 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); 1056 if (!sgls) { 1057 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1058 return -EIO; 1059 } 1060 1061 /* Grow the PCI segment table as needed. */ 1062 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls); 1063 if (ret) { 1064 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1065 goto out; 1066 } 1067 1068 /* 1069 * Parse the SGL descriptors to build the PCI segment table, 1070 * checking the descriptor type as we go. 1071 */ 1072 for (i = 0; i < nr_sgls; i++) { 1073 if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) { 1074 iod->status = NVME_SC_SGL_INVALID_TYPE | 1075 NVME_STATUS_DNR; 1076 goto out; 1077 } 1078 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); 1079 iod->data_segs[n].length = le32_to_cpu(sgls[i].length); 1080 n++; 1081 } 1082 1083 kfree(sgls); 1084 } 1085 1086 out: 1087 if (iod->status != NVME_SC_SUCCESS) { 1088 kfree(sgls); 1089 return -EIO; 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod) 1096 { 1097 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 1098 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; 1099 1100 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { 1101 /* Single data descriptor case. */ 1102 iod->nr_data_segs = 1; 1103 iod->data_segs = &iod->data_seg; 1104 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); 1105 iod->data_seg.length = le32_to_cpu(sgl->length); 1106 return 0; 1107 } 1108 1109 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); 1110 } 1111 1112 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod) 1113 { 1114 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 1115 struct nvmet_req *req = &iod->req; 1116 struct nvmet_pci_epf_segment *seg; 1117 struct scatterlist *sg; 1118 int ret, i; 1119 1120 if (iod->data_len > ctrl->mdts) { 1121 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1122 return -EINVAL; 1123 } 1124 1125 /* 1126 * Get the PCI address segments for the command data buffer using either 1127 * its SGLs or PRPs. 1128 */ 1129 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) 1130 ret = nvmet_pci_epf_iod_parse_sgls(iod); 1131 else 1132 ret = nvmet_pci_epf_iod_parse_prps(iod); 1133 if (ret) 1134 return ret; 1135 1136 /* Get a command buffer using SGLs matching the PCI segments. */ 1137 if (iod->nr_data_segs == 1) { 1138 sg_init_table(&iod->data_sgl, 1); 1139 iod->data_sgt.sgl = &iod->data_sgl; 1140 iod->data_sgt.nents = 1; 1141 iod->data_sgt.orig_nents = 1; 1142 } else { 1143 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, 1144 GFP_KERNEL); 1145 if (ret) 1146 goto err_nomem; 1147 } 1148 1149 for_each_sgtable_sg(&iod->data_sgt, sg, i) { 1150 seg = &iod->data_segs[i]; 1151 seg->buf = kmalloc(seg->length, GFP_KERNEL); 1152 if (!seg->buf) 1153 goto err_nomem; 1154 sg_set_buf(sg, seg->buf, seg->length); 1155 } 1156 1157 req->transfer_len = iod->data_len; 1158 req->sg = iod->data_sgt.sgl; 1159 req->sg_cnt = iod->data_sgt.nents; 1160 1161 return 0; 1162 1163 err_nomem: 1164 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1165 return -ENOMEM; 1166 } 1167 1168 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod) 1169 { 1170 struct nvmet_pci_epf_queue *cq = iod->cq; 1171 unsigned long flags; 1172 1173 /* Print an error message for failed commands, except AENs. */ 1174 iod->status = le16_to_cpu(iod->cqe.status) >> 1; 1175 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) 1176 dev_err(iod->ctrl->dev, 1177 "CQ[%d]: Command %s (0x%x) status 0x%0x\n", 1178 iod->sq->qid, nvmet_pci_epf_iod_name(iod), 1179 iod->cmd.common.opcode, iod->status); 1180 1181 /* 1182 * Add the command to the list of completed commands and schedule the 1183 * CQ work. 1184 */ 1185 spin_lock_irqsave(&cq->lock, flags); 1186 list_add_tail(&iod->link, &cq->list); 1187 queue_delayed_work(system_highpri_wq, &cq->work, 0); 1188 spin_unlock_irqrestore(&cq->lock, flags); 1189 } 1190 1191 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue) 1192 { 1193 struct nvmet_pci_epf_iod *iod; 1194 unsigned long flags; 1195 1196 spin_lock_irqsave(&queue->lock, flags); 1197 while (!list_empty(&queue->list)) { 1198 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, 1199 link); 1200 list_del_init(&iod->link); 1201 nvmet_pci_epf_free_iod(iod); 1202 } 1203 spin_unlock_irqrestore(&queue->lock, flags); 1204 } 1205 1206 static int nvmet_pci_epf_add_port(struct nvmet_port *port) 1207 { 1208 mutex_lock(&nvmet_pci_epf_ports_mutex); 1209 list_add_tail(&port->entry, &nvmet_pci_epf_ports); 1210 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1211 return 0; 1212 } 1213 1214 static void nvmet_pci_epf_remove_port(struct nvmet_port *port) 1215 { 1216 mutex_lock(&nvmet_pci_epf_ports_mutex); 1217 list_del_init(&port->entry); 1218 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1219 } 1220 1221 static struct nvmet_port * 1222 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) 1223 { 1224 struct nvmet_port *p, *port = NULL; 1225 1226 mutex_lock(&nvmet_pci_epf_ports_mutex); 1227 list_for_each_entry(p, &nvmet_pci_epf_ports, entry) { 1228 if (p->disc_addr.portid == portid) { 1229 port = p; 1230 break; 1231 } 1232 } 1233 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1234 1235 return port; 1236 } 1237 1238 static void nvmet_pci_epf_queue_response(struct nvmet_req *req) 1239 { 1240 struct nvmet_pci_epf_iod *iod = 1241 container_of(req, struct nvmet_pci_epf_iod, req); 1242 1243 iod->status = le16_to_cpu(req->cqe->status) >> 1; 1244 1245 /* 1246 * If the command failed or we have no data to transfer, complete the 1247 * command immediately. 1248 */ 1249 if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { 1250 nvmet_pci_epf_complete_iod(iod); 1251 return; 1252 } 1253 1254 complete(&iod->done); 1255 } 1256 1257 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl) 1258 { 1259 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1260 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; 1261 1262 return ilog2(ctrl->mdts) - page_shift; 1263 } 1264 1265 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, 1266 u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector) 1267 { 1268 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1269 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; 1270 u16 status; 1271 int ret; 1272 1273 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) 1274 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1275 1276 if (!(flags & NVME_QUEUE_PHYS_CONTIG)) 1277 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; 1278 1279 cq->pci_addr = pci_addr; 1280 cq->qid = cqid; 1281 cq->depth = qsize + 1; 1282 cq->vector = vector; 1283 cq->head = 0; 1284 cq->tail = 0; 1285 cq->phase = 1; 1286 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); 1287 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); 1288 1289 if (!cqid) 1290 cq->qes = sizeof(struct nvme_completion); 1291 else 1292 cq->qes = ctrl->io_cqes; 1293 cq->pci_size = cq->qes * cq->depth; 1294 1295 if (flags & NVME_CQ_IRQ_ENABLED) { 1296 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); 1297 if (!cq->iv) 1298 return NVME_SC_INTERNAL | NVME_STATUS_DNR; 1299 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); 1300 } 1301 1302 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); 1303 if (status != NVME_SC_SUCCESS) 1304 goto err; 1305 1306 /* 1307 * Map the CQ PCI address space and since PCI endpoint controllers may 1308 * return a partial mapping, check that the mapping is large enough. 1309 */ 1310 ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size, 1311 &cq->pci_map); 1312 if (ret) { 1313 dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n", 1314 cq->qid, ret); 1315 goto err_internal; 1316 } 1317 1318 if (cq->pci_map.pci_size < cq->pci_size) { 1319 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", 1320 cq->qid); 1321 goto err_unmap_queue; 1322 } 1323 1324 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); 1325 1326 if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 1327 dev_dbg(ctrl->dev, 1328 "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", 1329 cqid, qsize, cq->qes, cq->vector); 1330 else 1331 dev_dbg(ctrl->dev, 1332 "CQ[%u]: %u entries of %zu B, IRQ disabled\n", 1333 cqid, qsize, cq->qes); 1334 1335 return NVME_SC_SUCCESS; 1336 1337 err_unmap_queue: 1338 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); 1339 err_internal: 1340 status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1341 err: 1342 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 1343 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); 1344 return status; 1345 } 1346 1347 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) 1348 { 1349 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1350 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; 1351 1352 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) 1353 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1354 1355 cancel_delayed_work_sync(&cq->work); 1356 nvmet_pci_epf_drain_queue(cq); 1357 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 1358 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); 1359 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); 1360 nvmet_cq_put(&cq->nvme_cq); 1361 1362 return NVME_SC_SUCCESS; 1363 } 1364 1365 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, 1366 u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr) 1367 { 1368 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1369 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; 1370 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; 1371 u16 status; 1372 1373 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1374 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1375 1376 if (!(flags & NVME_QUEUE_PHYS_CONTIG)) 1377 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; 1378 1379 sq->pci_addr = pci_addr; 1380 sq->qid = sqid; 1381 sq->depth = qsize + 1; 1382 sq->head = 0; 1383 sq->tail = 0; 1384 sq->phase = 0; 1385 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); 1386 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); 1387 if (!sqid) 1388 sq->qes = 1UL << NVME_ADM_SQES; 1389 else 1390 sq->qes = ctrl->io_sqes; 1391 sq->pci_size = sq->qes * sq->depth; 1392 1393 status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid, 1394 sq->depth); 1395 if (status != NVME_SC_SUCCESS) 1396 return status; 1397 1398 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, 1399 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); 1400 if (!sq->iod_wq) { 1401 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); 1402 status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1403 goto out_destroy_sq; 1404 } 1405 1406 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); 1407 1408 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", 1409 sqid, qsize, sq->qes); 1410 1411 return NVME_SC_SUCCESS; 1412 1413 out_destroy_sq: 1414 nvmet_sq_destroy(&sq->nvme_sq); 1415 return status; 1416 } 1417 1418 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid) 1419 { 1420 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1421 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; 1422 1423 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1424 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1425 1426 destroy_workqueue(sq->iod_wq); 1427 sq->iod_wq = NULL; 1428 1429 nvmet_pci_epf_drain_queue(sq); 1430 1431 if (sq->nvme_sq.ctrl) 1432 nvmet_sq_destroy(&sq->nvme_sq); 1433 1434 return NVME_SC_SUCCESS; 1435 } 1436 1437 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl, 1438 u8 feat, void *data) 1439 { 1440 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1441 struct nvmet_feat_arbitration *arb; 1442 struct nvmet_feat_irq_coalesce *irqc; 1443 struct nvmet_feat_irq_config *irqcfg; 1444 struct nvmet_pci_epf_irq_vector *iv; 1445 u16 status; 1446 1447 switch (feat) { 1448 case NVME_FEAT_ARBITRATION: 1449 arb = data; 1450 if (!ctrl->sq_ab) 1451 arb->ab = 0x7; 1452 else 1453 arb->ab = ilog2(ctrl->sq_ab); 1454 return NVME_SC_SUCCESS; 1455 1456 case NVME_FEAT_IRQ_COALESCE: 1457 irqc = data; 1458 irqc->thr = ctrl->irq_vector_threshold; 1459 irqc->time = 0; 1460 return NVME_SC_SUCCESS; 1461 1462 case NVME_FEAT_IRQ_CONFIG: 1463 irqcfg = data; 1464 mutex_lock(&ctrl->irq_lock); 1465 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); 1466 if (iv) { 1467 irqcfg->cd = iv->cd; 1468 status = NVME_SC_SUCCESS; 1469 } else { 1470 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1471 } 1472 mutex_unlock(&ctrl->irq_lock); 1473 return status; 1474 1475 default: 1476 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1477 } 1478 } 1479 1480 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl, 1481 u8 feat, void *data) 1482 { 1483 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1484 struct nvmet_feat_arbitration *arb; 1485 struct nvmet_feat_irq_coalesce *irqc; 1486 struct nvmet_feat_irq_config *irqcfg; 1487 struct nvmet_pci_epf_irq_vector *iv; 1488 u16 status; 1489 1490 switch (feat) { 1491 case NVME_FEAT_ARBITRATION: 1492 arb = data; 1493 if (arb->ab == 0x7) 1494 ctrl->sq_ab = 0; 1495 else 1496 ctrl->sq_ab = 1 << arb->ab; 1497 return NVME_SC_SUCCESS; 1498 1499 case NVME_FEAT_IRQ_COALESCE: 1500 /* 1501 * Since we do not implement precise IRQ coalescing timing, 1502 * ignore the time field. 1503 */ 1504 irqc = data; 1505 ctrl->irq_vector_threshold = irqc->thr + 1; 1506 return NVME_SC_SUCCESS; 1507 1508 case NVME_FEAT_IRQ_CONFIG: 1509 irqcfg = data; 1510 mutex_lock(&ctrl->irq_lock); 1511 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); 1512 if (iv) { 1513 iv->cd = irqcfg->cd; 1514 status = NVME_SC_SUCCESS; 1515 } else { 1516 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1517 } 1518 mutex_unlock(&ctrl->irq_lock); 1519 return status; 1520 1521 default: 1522 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1523 } 1524 } 1525 1526 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = { 1527 .owner = THIS_MODULE, 1528 .type = NVMF_TRTYPE_PCI, 1529 .add_port = nvmet_pci_epf_add_port, 1530 .remove_port = nvmet_pci_epf_remove_port, 1531 .queue_response = nvmet_pci_epf_queue_response, 1532 .get_mdts = nvmet_pci_epf_get_mdts, 1533 .create_cq = nvmet_pci_epf_create_cq, 1534 .delete_cq = nvmet_pci_epf_delete_cq, 1535 .create_sq = nvmet_pci_epf_create_sq, 1536 .delete_sq = nvmet_pci_epf_delete_sq, 1537 .get_feature = nvmet_pci_epf_get_feat, 1538 .set_feature = nvmet_pci_epf_set_feat, 1539 }; 1540 1541 static void nvmet_pci_epf_cq_work(struct work_struct *work); 1542 1543 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, 1544 unsigned int qid, bool sq) 1545 { 1546 struct nvmet_pci_epf_queue *queue; 1547 1548 if (sq) { 1549 queue = &ctrl->sq[qid]; 1550 } else { 1551 queue = &ctrl->cq[qid]; 1552 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); 1553 } 1554 queue->ctrl = ctrl; 1555 queue->qid = qid; 1556 spin_lock_init(&queue->lock); 1557 INIT_LIST_HEAD(&queue->list); 1558 } 1559 1560 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) 1561 { 1562 unsigned int qid; 1563 1564 ctrl->sq = kcalloc(ctrl->nr_queues, 1565 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); 1566 if (!ctrl->sq) 1567 return -ENOMEM; 1568 1569 ctrl->cq = kcalloc(ctrl->nr_queues, 1570 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); 1571 if (!ctrl->cq) { 1572 kfree(ctrl->sq); 1573 ctrl->sq = NULL; 1574 return -ENOMEM; 1575 } 1576 1577 for (qid = 0; qid < ctrl->nr_queues; qid++) { 1578 nvmet_pci_epf_init_queue(ctrl, qid, true); 1579 nvmet_pci_epf_init_queue(ctrl, qid, false); 1580 } 1581 1582 return 0; 1583 } 1584 1585 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) 1586 { 1587 kfree(ctrl->sq); 1588 ctrl->sq = NULL; 1589 kfree(ctrl->cq); 1590 ctrl->cq = NULL; 1591 } 1592 1593 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) 1594 { 1595 struct nvmet_pci_epf_iod *iod = 1596 container_of(work, struct nvmet_pci_epf_iod, work); 1597 struct nvmet_req *req = &iod->req; 1598 int ret; 1599 1600 if (!iod->ctrl->link_up) { 1601 nvmet_pci_epf_free_iod(iod); 1602 return; 1603 } 1604 1605 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { 1606 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1607 goto complete; 1608 } 1609 1610 /* 1611 * If nvmet_req_init() fails (e.g., unsupported opcode) it will call 1612 * __nvmet_req_complete() internally which will call 1613 * nvmet_pci_epf_queue_response() and will complete the command directly. 1614 */ 1615 if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) 1616 return; 1617 1618 iod->data_len = nvmet_req_transfer_len(req); 1619 if (iod->data_len) { 1620 /* 1621 * Get the data DMA transfer direction. Here "device" means the 1622 * PCI root-complex host. 1623 */ 1624 if (nvme_is_write(&iod->cmd)) 1625 iod->dma_dir = DMA_FROM_DEVICE; 1626 else 1627 iod->dma_dir = DMA_TO_DEVICE; 1628 1629 /* 1630 * Setup the command data buffer and get the command data from 1631 * the host if needed. 1632 */ 1633 ret = nvmet_pci_epf_alloc_iod_data_buf(iod); 1634 if (!ret && iod->dma_dir == DMA_FROM_DEVICE) 1635 ret = nvmet_pci_epf_transfer_iod_data(iod); 1636 if (ret) { 1637 nvmet_req_uninit(req); 1638 goto complete; 1639 } 1640 } 1641 1642 req->execute(req); 1643 1644 /* 1645 * If we do not have data to transfer after the command execution 1646 * finishes, nvmet_pci_epf_queue_response() will complete the command 1647 * directly. No need to wait for the completion in this case. 1648 */ 1649 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) 1650 return; 1651 1652 wait_for_completion(&iod->done); 1653 1654 if (iod->status != NVME_SC_SUCCESS) 1655 return; 1656 1657 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); 1658 nvmet_pci_epf_transfer_iod_data(iod); 1659 1660 complete: 1661 nvmet_pci_epf_complete_iod(iod); 1662 } 1663 1664 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, 1665 struct nvmet_pci_epf_queue *sq) 1666 { 1667 struct nvmet_pci_epf_iod *iod; 1668 int ret, n = 0; 1669 u16 head = sq->head; 1670 1671 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); 1672 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { 1673 iod = nvmet_pci_epf_alloc_iod(sq); 1674 if (!iod) 1675 break; 1676 1677 /* Get the NVMe command submitted by the host. */ 1678 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, 1679 sq->pci_addr + head * sq->qes, 1680 sq->qes, DMA_FROM_DEVICE); 1681 if (ret) { 1682 /* Not much we can do... */ 1683 nvmet_pci_epf_free_iod(iod); 1684 break; 1685 } 1686 1687 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", 1688 sq->qid, head, sq->tail, 1689 nvmet_pci_epf_iod_name(iod)); 1690 1691 head++; 1692 if (head == sq->depth) 1693 head = 0; 1694 WRITE_ONCE(sq->head, head); 1695 n++; 1696 1697 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); 1698 1699 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); 1700 } 1701 1702 return n; 1703 } 1704 1705 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) 1706 { 1707 struct nvmet_pci_epf_ctrl *ctrl = 1708 container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); 1709 struct nvmet_pci_epf_queue *sq; 1710 unsigned long limit = jiffies; 1711 unsigned long last = 0; 1712 int i, nr_sqs; 1713 1714 while (ctrl->link_up && ctrl->enabled) { 1715 nr_sqs = 0; 1716 /* Do round-robin arbitration. */ 1717 for (i = 0; i < ctrl->nr_queues; i++) { 1718 sq = &ctrl->sq[i]; 1719 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1720 continue; 1721 if (nvmet_pci_epf_process_sq(ctrl, sq)) 1722 nr_sqs++; 1723 } 1724 1725 /* 1726 * If we have been running for a while, reschedule to let other 1727 * tasks run and to avoid RCU stalls. 1728 */ 1729 if (time_is_before_jiffies(limit + secs_to_jiffies(1))) { 1730 cond_resched(); 1731 limit = jiffies; 1732 continue; 1733 } 1734 1735 if (nr_sqs) { 1736 last = jiffies; 1737 continue; 1738 } 1739 1740 /* 1741 * If we have not received any command on any queue for more 1742 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and 1743 * reschedule. This avoids "burning" a CPU when the controller 1744 * is idle for a long time. 1745 */ 1746 if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE)) 1747 break; 1748 1749 cpu_relax(); 1750 } 1751 1752 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); 1753 } 1754 1755 static void nvmet_pci_epf_cq_work(struct work_struct *work) 1756 { 1757 struct nvmet_pci_epf_queue *cq = 1758 container_of(work, struct nvmet_pci_epf_queue, work.work); 1759 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; 1760 struct nvme_completion *cqe; 1761 struct nvmet_pci_epf_iod *iod; 1762 unsigned long flags; 1763 int ret = 0, n = 0; 1764 1765 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { 1766 1767 /* Check that the CQ is not full. */ 1768 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); 1769 if (cq->head == cq->tail + 1) { 1770 ret = -EAGAIN; 1771 break; 1772 } 1773 1774 spin_lock_irqsave(&cq->lock, flags); 1775 iod = list_first_entry_or_null(&cq->list, 1776 struct nvmet_pci_epf_iod, link); 1777 if (iod) 1778 list_del_init(&iod->link); 1779 spin_unlock_irqrestore(&cq->lock, flags); 1780 1781 if (!iod) 1782 break; 1783 1784 /* 1785 * Post the IOD completion entry. If the IOD request was 1786 * executed (req->execute() called), the CQE is already 1787 * initialized. However, the IOD may have been failed before 1788 * that, leaving the CQE not properly initialized. So always 1789 * initialize it here. 1790 */ 1791 cqe = &iod->cqe; 1792 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); 1793 cqe->sq_id = cpu_to_le16(iod->sq->qid); 1794 cqe->command_id = iod->cmd.common.command_id; 1795 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); 1796 1797 dev_dbg(ctrl->dev, 1798 "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n", 1799 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, 1800 le64_to_cpu(cqe->result.u64), cq->head, cq->tail, 1801 cq->phase); 1802 1803 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, 1804 cqe, cq->qes); 1805 1806 cq->tail++; 1807 if (cq->tail >= cq->depth) { 1808 cq->tail = 0; 1809 cq->phase ^= 1; 1810 } 1811 1812 nvmet_pci_epf_free_iod(iod); 1813 1814 /* Signal the host. */ 1815 nvmet_pci_epf_raise_irq(ctrl, cq, false); 1816 n++; 1817 } 1818 1819 /* 1820 * We do not support precise IRQ coalescing time (100ns units as per 1821 * NVMe specifications). So if we have posted completion entries without 1822 * reaching the interrupt coalescing threshold, raise an interrupt. 1823 */ 1824 if (n) 1825 nvmet_pci_epf_raise_irq(ctrl, cq, true); 1826 1827 if (ret < 0) 1828 queue_delayed_work(system_highpri_wq, &cq->work, 1829 NVMET_PCI_EPF_CQ_RETRY_INTERVAL); 1830 } 1831 1832 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) 1833 { 1834 struct nvmet_ctrl *tctrl = ctrl->tctrl; 1835 1836 /* Initialize controller status. */ 1837 tctrl->csts = 0; 1838 ctrl->csts = 0; 1839 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); 1840 1841 /* Initialize controller configuration and start polling. */ 1842 tctrl->cc = 0; 1843 ctrl->cc = 0; 1844 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); 1845 } 1846 1847 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 1848 { 1849 u64 pci_addr, asq, acq; 1850 u32 aqa; 1851 u16 status, qsize; 1852 1853 if (ctrl->enabled) 1854 return 0; 1855 1856 dev_info(ctrl->dev, "Enabling controller\n"); 1857 1858 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; 1859 ctrl->mps = 1UL << ctrl->mps_shift; 1860 ctrl->mps_mask = ctrl->mps - 1; 1861 1862 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); 1863 if (ctrl->io_sqes < sizeof(struct nvme_command)) { 1864 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", 1865 ctrl->io_sqes, sizeof(struct nvme_command)); 1866 goto err; 1867 } 1868 1869 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); 1870 if (ctrl->io_cqes < sizeof(struct nvme_completion)) { 1871 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", 1872 ctrl->io_cqes, sizeof(struct nvme_completion)); 1873 goto err; 1874 } 1875 1876 /* Create the admin queue. */ 1877 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); 1878 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); 1879 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); 1880 1881 qsize = (aqa & 0x0fff0000) >> 16; 1882 pci_addr = acq & GENMASK_ULL(63, 12); 1883 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, 1884 NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG, 1885 qsize, pci_addr, 0); 1886 if (status != NVME_SC_SUCCESS) { 1887 dev_err(ctrl->dev, "Failed to create admin completion queue\n"); 1888 goto err; 1889 } 1890 1891 qsize = aqa & 0x00000fff; 1892 pci_addr = asq & GENMASK_ULL(63, 12); 1893 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0, 1894 NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr); 1895 if (status != NVME_SC_SUCCESS) { 1896 dev_err(ctrl->dev, "Failed to create admin submission queue\n"); 1897 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); 1898 goto err; 1899 } 1900 1901 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; 1902 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; 1903 ctrl->enabled = true; 1904 ctrl->csts = NVME_CSTS_RDY; 1905 1906 /* Start polling the controller SQs. */ 1907 schedule_delayed_work(&ctrl->poll_sqs, 0); 1908 1909 return 0; 1910 1911 err: 1912 nvmet_pci_epf_clear_ctrl_config(ctrl); 1913 return -EINVAL; 1914 } 1915 1916 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, 1917 bool shutdown) 1918 { 1919 int qid; 1920 1921 if (!ctrl->enabled) 1922 return; 1923 1924 dev_info(ctrl->dev, "%s controller\n", 1925 shutdown ? "Shutting down" : "Disabling"); 1926 1927 ctrl->enabled = false; 1928 cancel_delayed_work_sync(&ctrl->poll_sqs); 1929 1930 /* Delete all I/O queues first. */ 1931 for (qid = 1; qid < ctrl->nr_queues; qid++) 1932 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); 1933 1934 for (qid = 1; qid < ctrl->nr_queues; qid++) 1935 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); 1936 1937 /* Delete the admin queue last. */ 1938 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); 1939 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); 1940 1941 ctrl->csts &= ~NVME_CSTS_RDY; 1942 if (shutdown) { 1943 ctrl->csts |= NVME_CSTS_SHST_CMPLT; 1944 ctrl->cc &= ~NVME_CC_ENABLE; 1945 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); 1946 } 1947 } 1948 1949 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) 1950 { 1951 struct nvmet_pci_epf_ctrl *ctrl = 1952 container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work); 1953 u32 old_cc, new_cc; 1954 int ret; 1955 1956 if (!ctrl->tctrl) 1957 return; 1958 1959 old_cc = ctrl->cc; 1960 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); 1961 if (new_cc == old_cc) 1962 goto reschedule_work; 1963 1964 ctrl->cc = new_cc; 1965 1966 if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) { 1967 ret = nvmet_pci_epf_enable_ctrl(ctrl); 1968 if (ret) 1969 goto reschedule_work; 1970 } 1971 1972 if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) 1973 nvmet_pci_epf_disable_ctrl(ctrl, false); 1974 1975 if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) 1976 nvmet_pci_epf_disable_ctrl(ctrl, true); 1977 1978 if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) 1979 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; 1980 1981 nvmet_update_cc(ctrl->tctrl, ctrl->cc); 1982 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); 1983 1984 reschedule_work: 1985 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); 1986 } 1987 1988 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) 1989 { 1990 struct nvmet_ctrl *tctrl = ctrl->tctrl; 1991 1992 ctrl->bar = ctrl->nvme_epf->reg_bar; 1993 1994 /* Copy the target controller capabilities as a base. */ 1995 ctrl->cap = tctrl->cap; 1996 1997 /* Contiguous Queues Required (CQR). */ 1998 ctrl->cap |= 0x1ULL << 16; 1999 2000 /* Set Doorbell stride to 4B (DSTRB). */ 2001 ctrl->cap &= ~GENMASK_ULL(35, 32); 2002 2003 /* Clear NVM Subsystem Reset Supported (NSSRS). */ 2004 ctrl->cap &= ~(0x1ULL << 36); 2005 2006 /* Clear Boot Partition Support (BPS). */ 2007 ctrl->cap &= ~(0x1ULL << 45); 2008 2009 /* Clear Persistent Memory Region Supported (PMRS). */ 2010 ctrl->cap &= ~(0x1ULL << 56); 2011 2012 /* Clear Controller Memory Buffer Supported (CMBS). */ 2013 ctrl->cap &= ~(0x1ULL << 57); 2014 2015 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); 2016 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); 2017 2018 nvmet_pci_epf_clear_ctrl_config(ctrl); 2019 } 2020 2021 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, 2022 unsigned int max_nr_queues) 2023 { 2024 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2025 struct nvmet_alloc_ctrl_args args = {}; 2026 char hostnqn[NVMF_NQN_SIZE]; 2027 uuid_t id; 2028 int ret; 2029 2030 memset(ctrl, 0, sizeof(*ctrl)); 2031 ctrl->dev = &nvme_epf->epf->dev; 2032 mutex_init(&ctrl->irq_lock); 2033 ctrl->nvme_epf = nvme_epf; 2034 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; 2035 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); 2036 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); 2037 2038 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, 2039 max_nr_queues * NVMET_MAX_QUEUE_SIZE, 2040 sizeof(struct nvmet_pci_epf_iod)); 2041 if (ret) { 2042 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); 2043 return ret; 2044 } 2045 2046 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); 2047 if (!ctrl->port) { 2048 dev_err(ctrl->dev, "Port not found\n"); 2049 ret = -EINVAL; 2050 goto out_mempool_exit; 2051 } 2052 2053 /* Create the target controller. */ 2054 uuid_gen(&id); 2055 snprintf(hostnqn, NVMF_NQN_SIZE, 2056 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); 2057 args.port = ctrl->port; 2058 args.subsysnqn = nvme_epf->subsysnqn; 2059 memset(&id, 0, sizeof(uuid_t)); 2060 args.hostid = &id; 2061 args.hostnqn = hostnqn; 2062 args.ops = &nvmet_pci_epf_fabrics_ops; 2063 2064 ctrl->tctrl = nvmet_alloc_ctrl(&args); 2065 if (!ctrl->tctrl) { 2066 dev_err(ctrl->dev, "Failed to create target controller\n"); 2067 ret = -ENOMEM; 2068 goto out_mempool_exit; 2069 } 2070 ctrl->tctrl->drvdata = ctrl; 2071 2072 /* We do not support protection information for now. */ 2073 if (ctrl->tctrl->pi_support) { 2074 dev_err(ctrl->dev, 2075 "Protection information (PI) is not supported\n"); 2076 ret = -ENOTSUPP; 2077 goto out_put_ctrl; 2078 } 2079 2080 /* Allocate our queues, up to the maximum number. */ 2081 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); 2082 ret = nvmet_pci_epf_alloc_queues(ctrl); 2083 if (ret) 2084 goto out_put_ctrl; 2085 2086 /* 2087 * Allocate the IRQ vectors descriptors. We cannot have more than the 2088 * maximum number of queues. 2089 */ 2090 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); 2091 if (ret) 2092 goto out_free_queues; 2093 2094 dev_info(ctrl->dev, 2095 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", 2096 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, 2097 ctrl->mdts); 2098 2099 /* Initialize BAR 0 using the target controller CAP. */ 2100 nvmet_pci_epf_init_bar(ctrl); 2101 2102 return 0; 2103 2104 out_free_queues: 2105 nvmet_pci_epf_free_queues(ctrl); 2106 out_put_ctrl: 2107 nvmet_ctrl_put(ctrl->tctrl); 2108 ctrl->tctrl = NULL; 2109 out_mempool_exit: 2110 mempool_exit(&ctrl->iod_pool); 2111 return ret; 2112 } 2113 2114 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2115 { 2116 2117 dev_info(ctrl->dev, "PCI link up\n"); 2118 ctrl->link_up = true; 2119 2120 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); 2121 } 2122 2123 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2124 { 2125 dev_info(ctrl->dev, "PCI link down\n"); 2126 ctrl->link_up = false; 2127 2128 cancel_delayed_work_sync(&ctrl->poll_cc); 2129 2130 nvmet_pci_epf_disable_ctrl(ctrl, false); 2131 nvmet_pci_epf_clear_ctrl_config(ctrl); 2132 } 2133 2134 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2135 { 2136 if (!ctrl->tctrl) 2137 return; 2138 2139 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", 2140 ctrl->tctrl->subsys->subsysnqn); 2141 2142 nvmet_pci_epf_stop_ctrl(ctrl); 2143 2144 nvmet_pci_epf_free_queues(ctrl); 2145 nvmet_pci_epf_free_irq_vectors(ctrl); 2146 2147 nvmet_ctrl_put(ctrl->tctrl); 2148 ctrl->tctrl = NULL; 2149 2150 mempool_exit(&ctrl->iod_pool); 2151 } 2152 2153 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf) 2154 { 2155 struct pci_epf *epf = nvme_epf->epf; 2156 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2157 size_t reg_size, reg_bar_size; 2158 size_t msix_table_size = 0; 2159 2160 /* 2161 * The first free BAR will be our register BAR and per NVMe 2162 * specifications, it must be BAR 0. 2163 */ 2164 if (pci_epc_get_first_free_bar(epc_features) != BAR_0) { 2165 dev_err(&epf->dev, "BAR 0 is not free\n"); 2166 return -ENODEV; 2167 } 2168 2169 /* 2170 * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims 2171 * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type 2172 * is required to be 64-bit. Thus, for interoperability, always set the 2173 * type to 64-bit. In the rare case that the PCI EPC does not support 2174 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, 2175 * and we will return failure back to the user. 2176 */ 2177 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; 2178 2179 /* 2180 * Calculate the size of the register bar: NVMe registers first with 2181 * enough space for the doorbells, followed by the MSI-X table 2182 * if supported. 2183 */ 2184 reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32)); 2185 reg_size = ALIGN(reg_size, 8); 2186 2187 if (epc_features->msix_capable) { 2188 size_t pba_size; 2189 2190 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 2191 nvme_epf->msix_table_offset = reg_size; 2192 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 2193 2194 reg_size += msix_table_size + pba_size; 2195 } 2196 2197 if (epc_features->bar[BAR_0].type == BAR_FIXED) { 2198 if (reg_size > epc_features->bar[BAR_0].fixed_size) { 2199 dev_err(&epf->dev, 2200 "BAR 0 size %llu B too small, need %zu B\n", 2201 epc_features->bar[BAR_0].fixed_size, 2202 reg_size); 2203 return -ENOMEM; 2204 } 2205 reg_bar_size = epc_features->bar[BAR_0].fixed_size; 2206 } else { 2207 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); 2208 } 2209 2210 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, 2211 epc_features, PRIMARY_INTERFACE); 2212 if (!nvme_epf->reg_bar) { 2213 dev_err(&epf->dev, "Failed to allocate BAR 0\n"); 2214 return -ENOMEM; 2215 } 2216 memset(nvme_epf->reg_bar, 0, reg_bar_size); 2217 2218 return 0; 2219 } 2220 2221 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf) 2222 { 2223 struct pci_epf *epf = nvme_epf->epf; 2224 2225 if (!nvme_epf->reg_bar) 2226 return; 2227 2228 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); 2229 nvme_epf->reg_bar = NULL; 2230 } 2231 2232 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf) 2233 { 2234 struct pci_epf *epf = nvme_epf->epf; 2235 2236 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, 2237 &epf->bar[BAR_0]); 2238 } 2239 2240 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf) 2241 { 2242 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2243 struct pci_epf *epf = nvme_epf->epf; 2244 int ret; 2245 2246 /* Enable MSI-X if supported, otherwise, use MSI. */ 2247 if (epc_features->msix_capable && epf->msix_interrupts) { 2248 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, 2249 epf->msix_interrupts, BAR_0, 2250 nvme_epf->msix_table_offset); 2251 if (ret) { 2252 dev_err(&epf->dev, "Failed to configure MSI-X\n"); 2253 return ret; 2254 } 2255 2256 nvme_epf->nr_vectors = epf->msix_interrupts; 2257 nvme_epf->irq_type = PCI_IRQ_MSIX; 2258 2259 return 0; 2260 } 2261 2262 if (epc_features->msi_capable && epf->msi_interrupts) { 2263 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, 2264 epf->msi_interrupts); 2265 if (ret) { 2266 dev_err(&epf->dev, "Failed to configure MSI\n"); 2267 return ret; 2268 } 2269 2270 nvme_epf->nr_vectors = epf->msi_interrupts; 2271 nvme_epf->irq_type = PCI_IRQ_MSI; 2272 2273 return 0; 2274 } 2275 2276 /* MSI and MSI-X are not supported: fall back to INTx. */ 2277 nvme_epf->nr_vectors = 1; 2278 nvme_epf->irq_type = PCI_IRQ_INTX; 2279 2280 return 0; 2281 } 2282 2283 static int nvmet_pci_epf_epc_init(struct pci_epf *epf) 2284 { 2285 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2286 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2287 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2288 unsigned int max_nr_queues = NVMET_NR_QUEUES; 2289 int ret; 2290 2291 /* For now, do not support virtual functions. */ 2292 if (epf->vfunc_no > 0) { 2293 dev_err(&epf->dev, "Virtual functions are not supported\n"); 2294 return -EINVAL; 2295 } 2296 2297 /* 2298 * Cap the maximum number of queues we can support on the controller 2299 * with the number of IRQs we can use. 2300 */ 2301 if (epc_features->msix_capable && epf->msix_interrupts) { 2302 dev_info(&epf->dev, 2303 "PCI endpoint controller supports MSI-X, %u vectors\n", 2304 epf->msix_interrupts); 2305 max_nr_queues = min(max_nr_queues, epf->msix_interrupts); 2306 } else if (epc_features->msi_capable && epf->msi_interrupts) { 2307 dev_info(&epf->dev, 2308 "PCI endpoint controller supports MSI, %u vectors\n", 2309 epf->msi_interrupts); 2310 max_nr_queues = min(max_nr_queues, epf->msi_interrupts); 2311 } 2312 2313 if (max_nr_queues < 2) { 2314 dev_err(&epf->dev, "Invalid maximum number of queues %u\n", 2315 max_nr_queues); 2316 return -EINVAL; 2317 } 2318 2319 /* Create the target controller. */ 2320 ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues); 2321 if (ret) { 2322 dev_err(&epf->dev, 2323 "Failed to create NVMe PCI target controller (err=%d)\n", 2324 ret); 2325 return ret; 2326 } 2327 2328 /* Set device ID, class, etc. */ 2329 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; 2330 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; 2331 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, 2332 epf->header); 2333 if (ret) { 2334 dev_err(&epf->dev, 2335 "Failed to write configuration header (err=%d)\n", ret); 2336 goto out_destroy_ctrl; 2337 } 2338 2339 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, 2340 &epf->bar[BAR_0]); 2341 if (ret) { 2342 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); 2343 goto out_destroy_ctrl; 2344 } 2345 2346 /* 2347 * Enable interrupts and start polling the controller BAR if we do not 2348 * have a link up notifier. 2349 */ 2350 ret = nvmet_pci_epf_init_irq(nvme_epf); 2351 if (ret) 2352 goto out_clear_bar; 2353 2354 if (!epc_features->linkup_notifier) 2355 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); 2356 2357 return 0; 2358 2359 out_clear_bar: 2360 nvmet_pci_epf_clear_bar(nvme_epf); 2361 out_destroy_ctrl: 2362 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); 2363 return ret; 2364 } 2365 2366 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) 2367 { 2368 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2369 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2370 2371 nvmet_pci_epf_destroy_ctrl(ctrl); 2372 2373 nvmet_pci_epf_deinit_dma(nvme_epf); 2374 nvmet_pci_epf_clear_bar(nvme_epf); 2375 } 2376 2377 static int nvmet_pci_epf_link_up(struct pci_epf *epf) 2378 { 2379 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2380 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2381 2382 nvmet_pci_epf_start_ctrl(ctrl); 2383 2384 return 0; 2385 } 2386 2387 static int nvmet_pci_epf_link_down(struct pci_epf *epf) 2388 { 2389 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2390 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2391 2392 nvmet_pci_epf_stop_ctrl(ctrl); 2393 2394 return 0; 2395 } 2396 2397 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = { 2398 .epc_init = nvmet_pci_epf_epc_init, 2399 .epc_deinit = nvmet_pci_epf_epc_deinit, 2400 .link_up = nvmet_pci_epf_link_up, 2401 .link_down = nvmet_pci_epf_link_down, 2402 }; 2403 2404 static int nvmet_pci_epf_bind(struct pci_epf *epf) 2405 { 2406 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2407 const struct pci_epc_features *epc_features; 2408 struct pci_epc *epc = epf->epc; 2409 int ret; 2410 2411 if (WARN_ON_ONCE(!epc)) 2412 return -EINVAL; 2413 2414 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 2415 if (!epc_features) { 2416 dev_err(&epf->dev, "epc_features not implemented\n"); 2417 return -EOPNOTSUPP; 2418 } 2419 nvme_epf->epc_features = epc_features; 2420 2421 ret = nvmet_pci_epf_configure_bar(nvme_epf); 2422 if (ret) 2423 return ret; 2424 2425 nvmet_pci_epf_init_dma(nvme_epf); 2426 2427 return 0; 2428 } 2429 2430 static void nvmet_pci_epf_unbind(struct pci_epf *epf) 2431 { 2432 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2433 struct pci_epc *epc = epf->epc; 2434 2435 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); 2436 2437 if (epc->init_complete) { 2438 nvmet_pci_epf_deinit_dma(nvme_epf); 2439 nvmet_pci_epf_clear_bar(nvme_epf); 2440 } 2441 2442 nvmet_pci_epf_free_bar(nvme_epf); 2443 } 2444 2445 static struct pci_epf_header nvme_epf_pci_header = { 2446 .vendorid = PCI_ANY_ID, 2447 .deviceid = PCI_ANY_ID, 2448 .progif_code = 0x02, /* NVM Express */ 2449 .baseclass_code = PCI_BASE_CLASS_STORAGE, 2450 .subclass_code = 0x08, /* Non-Volatile Memory controller */ 2451 .interrupt_pin = PCI_INTERRUPT_INTA, 2452 }; 2453 2454 static int nvmet_pci_epf_probe(struct pci_epf *epf, 2455 const struct pci_epf_device_id *id) 2456 { 2457 struct nvmet_pci_epf *nvme_epf; 2458 int ret; 2459 2460 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); 2461 if (!nvme_epf) 2462 return -ENOMEM; 2463 2464 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); 2465 if (ret) 2466 return ret; 2467 2468 nvme_epf->epf = epf; 2469 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; 2470 2471 epf->event_ops = &nvmet_pci_epf_event_ops; 2472 epf->header = &nvme_epf_pci_header; 2473 epf_set_drvdata(epf, nvme_epf); 2474 2475 return 0; 2476 } 2477 2478 #define to_nvme_epf(epf_group) \ 2479 container_of(epf_group, struct nvmet_pci_epf, group) 2480 2481 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page) 2482 { 2483 struct config_group *group = to_config_group(item); 2484 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2485 2486 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); 2487 } 2488 2489 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item, 2490 const char *page, size_t len) 2491 { 2492 struct config_group *group = to_config_group(item); 2493 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2494 u16 portid; 2495 2496 /* Do not allow setting this when the function is already started. */ 2497 if (nvme_epf->ctrl.tctrl) 2498 return -EBUSY; 2499 2500 if (!len) 2501 return -EINVAL; 2502 2503 if (kstrtou16(page, 0, &portid)) 2504 return -EINVAL; 2505 2506 nvme_epf->portid = cpu_to_le16(portid); 2507 2508 return len; 2509 } 2510 2511 CONFIGFS_ATTR(nvmet_pci_epf_, portid); 2512 2513 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item, 2514 char *page) 2515 { 2516 struct config_group *group = to_config_group(item); 2517 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2518 2519 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); 2520 } 2521 2522 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item, 2523 const char *page, size_t len) 2524 { 2525 struct config_group *group = to_config_group(item); 2526 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2527 2528 /* Do not allow setting this when the function is already started. */ 2529 if (nvme_epf->ctrl.tctrl) 2530 return -EBUSY; 2531 2532 if (!len) 2533 return -EINVAL; 2534 2535 strscpy(nvme_epf->subsysnqn, page, len); 2536 2537 return len; 2538 } 2539 2540 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn); 2541 2542 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page) 2543 { 2544 struct config_group *group = to_config_group(item); 2545 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2546 2547 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); 2548 } 2549 2550 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item, 2551 const char *page, size_t len) 2552 { 2553 struct config_group *group = to_config_group(item); 2554 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2555 unsigned long mdts_kb; 2556 int ret; 2557 2558 if (nvme_epf->ctrl.tctrl) 2559 return -EBUSY; 2560 2561 ret = kstrtoul(page, 0, &mdts_kb); 2562 if (ret) 2563 return ret; 2564 if (!mdts_kb) 2565 mdts_kb = NVMET_PCI_EPF_MDTS_KB; 2566 else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB) 2567 mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB; 2568 2569 if (!is_power_of_2(mdts_kb)) 2570 return -EINVAL; 2571 2572 nvme_epf->mdts_kb = mdts_kb; 2573 2574 return len; 2575 } 2576 2577 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb); 2578 2579 static struct configfs_attribute *nvmet_pci_epf_attrs[] = { 2580 &nvmet_pci_epf_attr_portid, 2581 &nvmet_pci_epf_attr_subsysnqn, 2582 &nvmet_pci_epf_attr_mdts_kb, 2583 NULL, 2584 }; 2585 2586 static const struct config_item_type nvmet_pci_epf_group_type = { 2587 .ct_attrs = nvmet_pci_epf_attrs, 2588 .ct_owner = THIS_MODULE, 2589 }; 2590 2591 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf, 2592 struct config_group *group) 2593 { 2594 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2595 2596 config_group_init_type_name(&nvme_epf->group, "nvme", 2597 &nvmet_pci_epf_group_type); 2598 2599 return &nvme_epf->group; 2600 } 2601 2602 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = { 2603 { .name = "nvmet_pci_epf" }, 2604 {}, 2605 }; 2606 2607 static struct pci_epf_ops nvmet_pci_epf_ops = { 2608 .bind = nvmet_pci_epf_bind, 2609 .unbind = nvmet_pci_epf_unbind, 2610 .add_cfs = nvmet_pci_epf_add_cfs, 2611 }; 2612 2613 static struct pci_epf_driver nvmet_pci_epf_driver = { 2614 .driver.name = "nvmet_pci_epf", 2615 .probe = nvmet_pci_epf_probe, 2616 .id_table = nvmet_pci_epf_ids, 2617 .ops = &nvmet_pci_epf_ops, 2618 .owner = THIS_MODULE, 2619 }; 2620 2621 static int __init nvmet_pci_epf_init_module(void) 2622 { 2623 int ret; 2624 2625 ret = pci_epf_register_driver(&nvmet_pci_epf_driver); 2626 if (ret) 2627 return ret; 2628 2629 ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops); 2630 if (ret) { 2631 pci_epf_unregister_driver(&nvmet_pci_epf_driver); 2632 return ret; 2633 } 2634 2635 return 0; 2636 } 2637 2638 static void __exit nvmet_pci_epf_cleanup_module(void) 2639 { 2640 nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops); 2641 pci_epf_unregister_driver(&nvmet_pci_epf_driver); 2642 } 2643 2644 module_init(nvmet_pci_epf_init_module); 2645 module_exit(nvmet_pci_epf_cleanup_module); 2646 2647 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver"); 2648 MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>"); 2649 MODULE_LICENSE("GPL"); 2650