1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe PCI Endpoint Function target driver. 4 * 5 * Copyright (c) 2024, Western Digital Corporation or its affiliates. 6 * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com> 7 * REDS Institute, HEIG-VD, HES-SO, Switzerland 8 */ 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/io.h> 14 #include <linux/mempool.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/nvme.h> 18 #include <linux/pci_ids.h> 19 #include <linux/pci-epc.h> 20 #include <linux/pci-epf.h> 21 #include <linux/pci_regs.h> 22 #include <linux/slab.h> 23 24 #include "nvmet.h" 25 26 static LIST_HEAD(nvmet_pci_epf_ports); 27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); 28 29 /* 30 * Default and maximum allowed data transfer size. For the default, 31 * allow up to 128 page-sized segments. For the maximum allowed, 32 * use 4 times the default (which is completely arbitrary). 33 */ 34 #define NVMET_PCI_EPF_MAX_SEGS 128 35 #define NVMET_PCI_EPF_MDTS_KB \ 36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10)) 37 #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4) 38 39 /* 40 * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an 41 * interrupt vector to the host. This default 8 is completely arbitrary and can 42 * be changed by the host with a nvme_set_features command. 43 */ 44 #define NVMET_PCI_EPF_IV_THRESHOLD 8 45 46 /* 47 * BAR CC register and SQ polling intervals. 48 */ 49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10) 50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) 51 #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000) 52 53 /* 54 * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ. 55 */ 56 #define NVMET_PCI_EPF_SQ_AB 8 57 58 /* 59 * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ 60 * is full, in which case we retry the CQ processing after this interval. 61 */ 62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) 63 64 enum nvmet_pci_epf_queue_flags { 65 NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */ 66 NVMET_PCI_EPF_Q_LIVE, /* The queue is live */ 67 NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ 68 }; 69 70 /* 71 * IRQ vector descriptor. 72 */ 73 struct nvmet_pci_epf_irq_vector { 74 unsigned int vector; 75 unsigned int ref; 76 bool cd; 77 int nr_irqs; 78 }; 79 80 struct nvmet_pci_epf_queue { 81 union { 82 struct nvmet_sq nvme_sq; 83 struct nvmet_cq nvme_cq; 84 }; 85 struct nvmet_pci_epf_ctrl *ctrl; 86 unsigned long flags; 87 88 u64 pci_addr; 89 size_t pci_size; 90 struct pci_epc_map pci_map; 91 92 u16 qid; 93 u16 depth; 94 u16 vector; 95 u16 head; 96 u16 tail; 97 u16 phase; 98 u32 db; 99 100 size_t qes; 101 102 struct nvmet_pci_epf_irq_vector *iv; 103 struct workqueue_struct *iod_wq; 104 struct delayed_work work; 105 spinlock_t lock; 106 struct list_head list; 107 }; 108 109 /* 110 * PCI Root Complex (RC) address data segment for mapping an admin or 111 * I/O command buffer @buf of @length bytes to the PCI address @pci_addr. 112 */ 113 struct nvmet_pci_epf_segment { 114 void *buf; 115 u64 pci_addr; 116 u32 length; 117 }; 118 119 /* 120 * Command descriptors. 121 */ 122 struct nvmet_pci_epf_iod { 123 struct list_head link; 124 125 struct nvmet_req req; 126 struct nvme_command cmd; 127 struct nvme_completion cqe; 128 unsigned int status; 129 130 struct nvmet_pci_epf_ctrl *ctrl; 131 132 struct nvmet_pci_epf_queue *sq; 133 struct nvmet_pci_epf_queue *cq; 134 135 /* Data transfer size and direction for the command. */ 136 size_t data_len; 137 enum dma_data_direction dma_dir; 138 139 /* 140 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we 141 * use only @data_seg. Otherwise, the array of segments @data_segs is 142 * allocated to manage multiple PCI address data segments. @data_sgl and 143 * @data_sgt are used to setup the command request for execution by the 144 * target core. 145 */ 146 unsigned int nr_data_segs; 147 struct nvmet_pci_epf_segment data_seg; 148 struct nvmet_pci_epf_segment *data_segs; 149 struct scatterlist data_sgl; 150 struct sg_table data_sgt; 151 152 struct work_struct work; 153 struct completion done; 154 }; 155 156 /* 157 * PCI target controller private data. 158 */ 159 struct nvmet_pci_epf_ctrl { 160 struct nvmet_pci_epf *nvme_epf; 161 struct nvmet_port *port; 162 struct nvmet_ctrl *tctrl; 163 struct device *dev; 164 165 unsigned int nr_queues; 166 struct nvmet_pci_epf_queue *sq; 167 struct nvmet_pci_epf_queue *cq; 168 unsigned int sq_ab; 169 170 mempool_t iod_pool; 171 void *bar; 172 u64 cap; 173 u32 cc; 174 u32 csts; 175 176 size_t io_sqes; 177 size_t io_cqes; 178 179 size_t mps_shift; 180 size_t mps; 181 size_t mps_mask; 182 183 unsigned int mdts; 184 185 struct delayed_work poll_cc; 186 struct delayed_work poll_sqs; 187 188 struct mutex irq_lock; 189 struct nvmet_pci_epf_irq_vector *irq_vectors; 190 unsigned int irq_vector_threshold; 191 192 bool link_up; 193 bool enabled; 194 }; 195 196 /* 197 * PCI EPF driver private data. 198 */ 199 struct nvmet_pci_epf { 200 struct pci_epf *epf; 201 202 const struct pci_epc_features *epc_features; 203 204 void *reg_bar; 205 size_t msix_table_offset; 206 207 unsigned int irq_type; 208 unsigned int nr_vectors; 209 210 struct nvmet_pci_epf_ctrl ctrl; 211 212 bool dma_enabled; 213 struct dma_chan *dma_tx_chan; 214 struct mutex dma_tx_lock; 215 struct dma_chan *dma_rx_chan; 216 struct mutex dma_rx_lock; 217 218 struct mutex mmio_lock; 219 220 /* PCI endpoint function configfs attributes. */ 221 struct config_group group; 222 __le16 portid; 223 char subsysnqn[NVMF_NQN_SIZE]; 224 unsigned int mdts_kb; 225 }; 226 227 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, 228 u32 off) 229 { 230 __le32 *bar_reg = ctrl->bar + off; 231 232 return le32_to_cpu(READ_ONCE(*bar_reg)); 233 } 234 235 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, 236 u32 off, u32 val) 237 { 238 __le32 *bar_reg = ctrl->bar + off; 239 240 WRITE_ONCE(*bar_reg, cpu_to_le32(val)); 241 } 242 243 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, 244 u32 off) 245 { 246 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | 247 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); 248 } 249 250 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, 251 u32 off, u64 val) 252 { 253 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); 254 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); 255 } 256 257 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf, 258 u64 pci_addr, size_t size, struct pci_epc_map *map) 259 { 260 struct pci_epf *epf = nvme_epf->epf; 261 262 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, 263 pci_addr, size, map); 264 } 265 266 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf, 267 struct pci_epc_map *map) 268 { 269 struct pci_epf *epf = nvme_epf->epf; 270 271 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); 272 } 273 274 struct nvmet_pci_epf_dma_filter { 275 struct device *dev; 276 u32 dma_mask; 277 }; 278 279 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg) 280 { 281 struct nvmet_pci_epf_dma_filter *filter = arg; 282 struct dma_slave_caps caps; 283 284 memset(&caps, 0, sizeof(caps)); 285 dma_get_slave_caps(chan, &caps); 286 287 return chan->device->dev == filter->dev && 288 (filter->dma_mask & caps.directions); 289 } 290 291 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf) 292 { 293 struct pci_epf *epf = nvme_epf->epf; 294 struct device *dev = &epf->dev; 295 struct nvmet_pci_epf_dma_filter filter; 296 struct dma_chan *chan; 297 dma_cap_mask_t mask; 298 299 mutex_init(&nvme_epf->dma_rx_lock); 300 mutex_init(&nvme_epf->dma_tx_lock); 301 302 dma_cap_zero(mask); 303 dma_cap_set(DMA_SLAVE, mask); 304 305 filter.dev = epf->epc->dev.parent; 306 filter.dma_mask = BIT(DMA_DEV_TO_MEM); 307 308 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); 309 if (!chan) 310 goto out_dma_no_rx; 311 312 nvme_epf->dma_rx_chan = chan; 313 314 filter.dma_mask = BIT(DMA_MEM_TO_DEV); 315 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); 316 if (!chan) 317 goto out_dma_no_tx; 318 319 nvme_epf->dma_tx_chan = chan; 320 321 nvme_epf->dma_enabled = true; 322 323 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", 324 dma_chan_name(chan), 325 dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 326 327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", 328 dma_chan_name(chan), 329 dma_get_max_seg_size(dmaengine_get_dma_device(chan))); 330 331 return; 332 333 out_dma_no_tx: 334 dma_release_channel(nvme_epf->dma_rx_chan); 335 nvme_epf->dma_rx_chan = NULL; 336 337 out_dma_no_rx: 338 mutex_destroy(&nvme_epf->dma_rx_lock); 339 mutex_destroy(&nvme_epf->dma_tx_lock); 340 nvme_epf->dma_enabled = false; 341 342 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); 343 } 344 345 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf) 346 { 347 if (!nvme_epf->dma_enabled) 348 return; 349 350 dma_release_channel(nvme_epf->dma_tx_chan); 351 nvme_epf->dma_tx_chan = NULL; 352 dma_release_channel(nvme_epf->dma_rx_chan); 353 nvme_epf->dma_rx_chan = NULL; 354 mutex_destroy(&nvme_epf->dma_rx_lock); 355 mutex_destroy(&nvme_epf->dma_tx_lock); 356 nvme_epf->dma_enabled = false; 357 } 358 359 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf, 360 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 361 { 362 struct pci_epf *epf = nvme_epf->epf; 363 struct dma_async_tx_descriptor *desc; 364 struct dma_slave_config sconf = {}; 365 struct device *dev = &epf->dev; 366 struct device *dma_dev; 367 struct dma_chan *chan; 368 dma_cookie_t cookie; 369 dma_addr_t dma_addr; 370 struct mutex *lock; 371 int ret; 372 373 switch (dir) { 374 case DMA_FROM_DEVICE: 375 lock = &nvme_epf->dma_rx_lock; 376 chan = nvme_epf->dma_rx_chan; 377 sconf.direction = DMA_DEV_TO_MEM; 378 sconf.src_addr = seg->pci_addr; 379 break; 380 case DMA_TO_DEVICE: 381 lock = &nvme_epf->dma_tx_lock; 382 chan = nvme_epf->dma_tx_chan; 383 sconf.direction = DMA_MEM_TO_DEV; 384 sconf.dst_addr = seg->pci_addr; 385 break; 386 default: 387 return -EINVAL; 388 } 389 390 mutex_lock(lock); 391 392 dma_dev = dmaengine_get_dma_device(chan); 393 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); 394 ret = dma_mapping_error(dma_dev, dma_addr); 395 if (ret) 396 goto unlock; 397 398 ret = dmaengine_slave_config(chan, &sconf); 399 if (ret) { 400 dev_err(dev, "Failed to configure DMA channel\n"); 401 goto unmap; 402 } 403 404 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, 405 sconf.direction, DMA_CTRL_ACK); 406 if (!desc) { 407 dev_err(dev, "Failed to prepare DMA\n"); 408 ret = -EIO; 409 goto unmap; 410 } 411 412 cookie = dmaengine_submit(desc); 413 ret = dma_submit_error(cookie); 414 if (ret) { 415 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret); 416 goto unmap; 417 } 418 419 if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) { 420 dev_err(dev, "DMA transfer failed\n"); 421 ret = -EIO; 422 } 423 424 dmaengine_terminate_sync(chan); 425 426 unmap: 427 dma_unmap_single(dma_dev, dma_addr, seg->length, dir); 428 429 unlock: 430 mutex_unlock(lock); 431 432 return ret; 433 } 434 435 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf, 436 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 437 { 438 u64 pci_addr = seg->pci_addr; 439 u32 length = seg->length; 440 void *buf = seg->buf; 441 struct pci_epc_map map; 442 int ret = -EINVAL; 443 444 /* 445 * Note: MMIO transfers do not need serialization but this is a 446 * simple way to avoid using too many mapping windows. 447 */ 448 mutex_lock(&nvme_epf->mmio_lock); 449 450 while (length) { 451 ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map); 452 if (ret) 453 break; 454 455 switch (dir) { 456 case DMA_FROM_DEVICE: 457 memcpy_fromio(buf, map.virt_addr, map.pci_size); 458 break; 459 case DMA_TO_DEVICE: 460 memcpy_toio(map.virt_addr, buf, map.pci_size); 461 break; 462 default: 463 ret = -EINVAL; 464 goto unlock; 465 } 466 467 pci_addr += map.pci_size; 468 buf += map.pci_size; 469 length -= map.pci_size; 470 471 nvmet_pci_epf_mem_unmap(nvme_epf, &map); 472 } 473 474 unlock: 475 mutex_unlock(&nvme_epf->mmio_lock); 476 477 return ret; 478 } 479 480 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf, 481 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) 482 { 483 if (nvme_epf->dma_enabled) 484 return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir); 485 486 return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir); 487 } 488 489 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, 490 void *buf, u64 pci_addr, u32 length, 491 enum dma_data_direction dir) 492 { 493 struct nvmet_pci_epf_segment seg = { 494 .buf = buf, 495 .pci_addr = pci_addr, 496 .length = length, 497 }; 498 499 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); 500 } 501 502 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) 503 { 504 ctrl->irq_vectors = kcalloc(ctrl->nr_queues, 505 sizeof(struct nvmet_pci_epf_irq_vector), 506 GFP_KERNEL); 507 if (!ctrl->irq_vectors) 508 return -ENOMEM; 509 510 mutex_init(&ctrl->irq_lock); 511 512 return 0; 513 } 514 515 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) 516 { 517 if (ctrl->irq_vectors) { 518 mutex_destroy(&ctrl->irq_lock); 519 kfree(ctrl->irq_vectors); 520 ctrl->irq_vectors = NULL; 521 } 522 } 523 524 static struct nvmet_pci_epf_irq_vector * 525 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) 526 { 527 struct nvmet_pci_epf_irq_vector *iv; 528 int i; 529 530 lockdep_assert_held(&ctrl->irq_lock); 531 532 for (i = 0; i < ctrl->nr_queues; i++) { 533 iv = &ctrl->irq_vectors[i]; 534 if (iv->ref && iv->vector == vector) 535 return iv; 536 } 537 538 return NULL; 539 } 540 541 static struct nvmet_pci_epf_irq_vector * 542 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) 543 { 544 struct nvmet_pci_epf_irq_vector *iv; 545 int i; 546 547 mutex_lock(&ctrl->irq_lock); 548 549 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); 550 if (iv) { 551 iv->ref++; 552 goto unlock; 553 } 554 555 for (i = 0; i < ctrl->nr_queues; i++) { 556 iv = &ctrl->irq_vectors[i]; 557 if (!iv->ref) 558 break; 559 } 560 561 if (WARN_ON_ONCE(!iv)) 562 goto unlock; 563 564 iv->ref = 1; 565 iv->vector = vector; 566 iv->nr_irqs = 0; 567 568 unlock: 569 mutex_unlock(&ctrl->irq_lock); 570 571 return iv; 572 } 573 574 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, 575 u16 vector) 576 { 577 struct nvmet_pci_epf_irq_vector *iv; 578 579 mutex_lock(&ctrl->irq_lock); 580 581 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); 582 if (iv) { 583 iv->ref--; 584 if (!iv->ref) { 585 iv->vector = 0; 586 iv->nr_irqs = 0; 587 } 588 } 589 590 mutex_unlock(&ctrl->irq_lock); 591 } 592 593 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, 594 struct nvmet_pci_epf_queue *cq, bool force) 595 { 596 struct nvmet_pci_epf_irq_vector *iv = cq->iv; 597 bool ret; 598 599 if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 600 return false; 601 602 /* IRQ coalescing for the admin queue is not allowed. */ 603 if (!cq->qid) 604 return true; 605 606 if (iv->cd) 607 return true; 608 609 if (force) { 610 ret = iv->nr_irqs > 0; 611 } else { 612 iv->nr_irqs++; 613 ret = iv->nr_irqs >= ctrl->irq_vector_threshold; 614 } 615 if (ret) 616 iv->nr_irqs = 0; 617 618 return ret; 619 } 620 621 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, 622 struct nvmet_pci_epf_queue *cq, bool force) 623 { 624 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; 625 struct pci_epf *epf = nvme_epf->epf; 626 int ret = 0; 627 628 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) 629 return; 630 631 mutex_lock(&ctrl->irq_lock); 632 633 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) 634 goto unlock; 635 636 switch (nvme_epf->irq_type) { 637 case PCI_IRQ_MSIX: 638 case PCI_IRQ_MSI: 639 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, 640 nvme_epf->irq_type, cq->vector + 1); 641 if (!ret) 642 break; 643 /* 644 * If we got an error, it is likely because the host is using 645 * legacy IRQs (e.g. BIOS, grub). 646 */ 647 fallthrough; 648 case PCI_IRQ_INTX: 649 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, 650 PCI_IRQ_INTX, 0); 651 break; 652 default: 653 WARN_ON_ONCE(1); 654 ret = -EINVAL; 655 break; 656 } 657 658 if (ret) 659 dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret); 660 661 unlock: 662 mutex_unlock(&ctrl->irq_lock); 663 } 664 665 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) 666 { 667 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); 668 } 669 670 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work); 671 672 static struct nvmet_pci_epf_iod * 673 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq) 674 { 675 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; 676 struct nvmet_pci_epf_iod *iod; 677 678 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); 679 if (unlikely(!iod)) 680 return NULL; 681 682 memset(iod, 0, sizeof(*iod)); 683 iod->req.cmd = &iod->cmd; 684 iod->req.cqe = &iod->cqe; 685 iod->req.port = ctrl->port; 686 iod->ctrl = ctrl; 687 iod->sq = sq; 688 iod->cq = &ctrl->cq[sq->qid]; 689 INIT_LIST_HEAD(&iod->link); 690 iod->dma_dir = DMA_NONE; 691 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); 692 init_completion(&iod->done); 693 694 return iod; 695 } 696 697 /* 698 * Allocate or grow a command table of PCI segments. 699 */ 700 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod, 701 int nsegs) 702 { 703 struct nvmet_pci_epf_segment *segs; 704 int nr_segs = iod->nr_data_segs + nsegs; 705 706 segs = krealloc(iod->data_segs, 707 nr_segs * sizeof(struct nvmet_pci_epf_segment), 708 GFP_KERNEL | __GFP_ZERO); 709 if (!segs) 710 return -ENOMEM; 711 712 iod->nr_data_segs = nr_segs; 713 iod->data_segs = segs; 714 715 return 0; 716 } 717 718 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod) 719 { 720 int i; 721 722 if (iod->data_segs) { 723 for (i = 0; i < iod->nr_data_segs; i++) 724 kfree(iod->data_segs[i].buf); 725 if (iod->data_segs != &iod->data_seg) 726 kfree(iod->data_segs); 727 } 728 if (iod->data_sgt.nents > 1) 729 sg_free_table(&iod->data_sgt); 730 mempool_free(iod, &iod->ctrl->iod_pool); 731 } 732 733 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod) 734 { 735 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; 736 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; 737 int i, ret; 738 739 /* Split the data transfer according to the PCI segments. */ 740 for (i = 0; i < iod->nr_data_segs; i++, seg++) { 741 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); 742 if (ret) { 743 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; 744 return ret; 745 } 746 } 747 748 return 0; 749 } 750 751 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, 752 u64 prp) 753 { 754 return prp & ctrl->mps_mask; 755 } 756 757 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, 758 u64 prp) 759 { 760 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); 761 } 762 763 /* 764 * Transfer a PRP list from the host and return the number of prps. 765 */ 766 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, 767 size_t xfer_len, __le64 *prps) 768 { 769 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; 770 u32 length; 771 int ret; 772 773 /* 774 * Compute the number of PRPs required for the number of bytes to 775 * transfer (xfer_len). If this number overflows the memory page size 776 * with the PRP list pointer specified, only return the space available 777 * in the memory page, the last PRP in there will be a PRP list pointer 778 * to the remaining PRPs. 779 */ 780 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); 781 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); 782 if (ret) 783 return ret; 784 785 return length >> 3; 786 } 787 788 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, 789 struct nvmet_pci_epf_iod *iod) 790 { 791 struct nvme_command *cmd = &iod->cmd; 792 struct nvmet_pci_epf_segment *seg; 793 size_t size = 0, ofst, prp_size, xfer_len; 794 size_t transfer_len = iod->data_len; 795 int nr_segs, nr_prps = 0; 796 u64 pci_addr, prp; 797 int i = 0, ret; 798 __le64 *prps; 799 800 prps = kzalloc(ctrl->mps, GFP_KERNEL); 801 if (!prps) 802 goto err_internal; 803 804 /* 805 * Allocate PCI segments for the command: this considers the worst case 806 * scenario where all prps are discontiguous, so get as many segments 807 * as we can have prps. In practice, most of the time, we will have 808 * far less PCI segments than prps. 809 */ 810 prp = le64_to_cpu(cmd->common.dptr.prp1); 811 if (!prp) 812 goto err_invalid_field; 813 814 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); 815 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; 816 817 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); 818 if (ret) 819 goto err_internal; 820 821 /* Set the first segment using prp1. */ 822 seg = &iod->data_segs[0]; 823 seg->pci_addr = prp; 824 seg->length = nvmet_pci_epf_prp_size(ctrl, prp); 825 826 size = seg->length; 827 pci_addr = prp + size; 828 nr_segs = 1; 829 830 /* 831 * Now build the PCI address segments using the PRP lists, starting 832 * from prp2. 833 */ 834 prp = le64_to_cpu(cmd->common.dptr.prp2); 835 if (!prp) 836 goto err_invalid_field; 837 838 while (size < transfer_len) { 839 xfer_len = transfer_len - size; 840 841 if (!nr_prps) { 842 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, 843 xfer_len, prps); 844 if (nr_prps < 0) 845 goto err_internal; 846 847 i = 0; 848 ofst = 0; 849 } 850 851 /* Current entry */ 852 prp = le64_to_cpu(prps[i]); 853 if (!prp) 854 goto err_invalid_field; 855 856 /* Did we reach the last PRP entry of the list? */ 857 if (xfer_len > ctrl->mps && i == nr_prps - 1) { 858 /* We need more PRPs: PRP is a list pointer. */ 859 nr_prps = 0; 860 continue; 861 } 862 863 /* Only the first PRP is allowed to have an offset. */ 864 if (nvmet_pci_epf_prp_ofst(ctrl, prp)) 865 goto err_invalid_offset; 866 867 if (prp != pci_addr) { 868 /* Discontiguous prp: new segment. */ 869 nr_segs++; 870 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) 871 goto err_internal; 872 873 seg++; 874 seg->pci_addr = prp; 875 seg->length = 0; 876 pci_addr = prp; 877 } 878 879 prp_size = min_t(size_t, ctrl->mps, xfer_len); 880 seg->length += prp_size; 881 pci_addr += prp_size; 882 size += prp_size; 883 884 i++; 885 } 886 887 iod->nr_data_segs = nr_segs; 888 ret = 0; 889 890 if (size != transfer_len) { 891 dev_err(ctrl->dev, 892 "PRPs transfer length mismatch: got %zu B, need %zu B\n", 893 size, transfer_len); 894 goto err_internal; 895 } 896 897 kfree(prps); 898 899 return 0; 900 901 err_invalid_offset: 902 dev_err(ctrl->dev, "PRPs list invalid offset\n"); 903 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 904 goto err; 905 906 err_invalid_field: 907 dev_err(ctrl->dev, "PRPs list invalid field\n"); 908 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 909 goto err; 910 911 err_internal: 912 dev_err(ctrl->dev, "PRPs list internal error\n"); 913 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 914 915 err: 916 kfree(prps); 917 return -EINVAL; 918 } 919 920 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, 921 struct nvmet_pci_epf_iod *iod) 922 { 923 struct nvme_command *cmd = &iod->cmd; 924 size_t transfer_len = iod->data_len; 925 int ret, nr_segs = 1; 926 u64 prp1, prp2 = 0; 927 size_t prp1_size; 928 929 prp1 = le64_to_cpu(cmd->common.dptr.prp1); 930 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); 931 932 /* For commands crossing a page boundary, we should have prp2. */ 933 if (transfer_len > prp1_size) { 934 prp2 = le64_to_cpu(cmd->common.dptr.prp2); 935 if (!prp2) { 936 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 937 return -EINVAL; 938 } 939 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { 940 iod->status = 941 NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 942 return -EINVAL; 943 } 944 if (prp2 != prp1 + prp1_size) 945 nr_segs = 2; 946 } 947 948 if (nr_segs == 1) { 949 iod->nr_data_segs = 1; 950 iod->data_segs = &iod->data_seg; 951 iod->data_segs[0].pci_addr = prp1; 952 iod->data_segs[0].length = transfer_len; 953 return 0; 954 } 955 956 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); 957 if (ret) { 958 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 959 return ret; 960 } 961 962 iod->data_segs[0].pci_addr = prp1; 963 iod->data_segs[0].length = prp1_size; 964 iod->data_segs[1].pci_addr = prp2; 965 iod->data_segs[1].length = transfer_len - prp1_size; 966 967 return 0; 968 } 969 970 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod) 971 { 972 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 973 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); 974 size_t ofst; 975 976 /* Get the PCI address segments for the command using its PRPs. */ 977 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); 978 if (ofst & 0x3) { 979 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; 980 return -EINVAL; 981 } 982 983 if (iod->data_len + ofst <= ctrl->mps * 2) 984 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); 985 986 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); 987 } 988 989 /* 990 * Transfer an SGL segment from the host and return the number of data 991 * descriptors and the next segment descriptor, if any. 992 */ 993 static struct nvme_sgl_desc * 994 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, 995 struct nvme_sgl_desc *desc, unsigned int *nr_sgls) 996 { 997 struct nvme_sgl_desc *sgls; 998 u32 length = le32_to_cpu(desc->length); 999 int nr_descs, ret; 1000 void *buf; 1001 1002 buf = kmalloc(length, GFP_KERNEL); 1003 if (!buf) 1004 return NULL; 1005 1006 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, 1007 DMA_FROM_DEVICE); 1008 if (ret) { 1009 kfree(buf); 1010 return NULL; 1011 } 1012 1013 sgls = buf; 1014 nr_descs = length / sizeof(struct nvme_sgl_desc); 1015 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || 1016 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { 1017 /* 1018 * We have another SGL segment following this one: do not count 1019 * it as a regular data SGL descriptor and return it to the 1020 * caller. 1021 */ 1022 *desc = sgls[nr_descs - 1]; 1023 nr_descs--; 1024 } else { 1025 /* We do not have another SGL segment after this one. */ 1026 desc->length = 0; 1027 } 1028 1029 *nr_sgls = nr_descs; 1030 1031 return sgls; 1032 } 1033 1034 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, 1035 struct nvmet_pci_epf_iod *iod) 1036 { 1037 struct nvme_command *cmd = &iod->cmd; 1038 struct nvme_sgl_desc seg = cmd->common.dptr.sgl; 1039 struct nvme_sgl_desc *sgls = NULL; 1040 int n = 0, i, nr_sgls; 1041 int ret; 1042 1043 /* 1044 * We do not support inline data nor keyed SGLs, so we should be seeing 1045 * only segment descriptors. 1046 */ 1047 if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) && 1048 seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { 1049 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; 1050 return -EIO; 1051 } 1052 1053 while (seg.length) { 1054 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); 1055 if (!sgls) { 1056 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1057 return -EIO; 1058 } 1059 1060 /* Grow the PCI segment table as needed. */ 1061 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls); 1062 if (ret) { 1063 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1064 goto out; 1065 } 1066 1067 /* 1068 * Parse the SGL descriptors to build the PCI segment table, 1069 * checking the descriptor type as we go. 1070 */ 1071 for (i = 0; i < nr_sgls; i++) { 1072 if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) { 1073 iod->status = NVME_SC_SGL_INVALID_TYPE | 1074 NVME_STATUS_DNR; 1075 goto out; 1076 } 1077 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); 1078 iod->data_segs[n].length = le32_to_cpu(sgls[i].length); 1079 n++; 1080 } 1081 1082 kfree(sgls); 1083 } 1084 1085 out: 1086 if (iod->status != NVME_SC_SUCCESS) { 1087 kfree(sgls); 1088 return -EIO; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod) 1095 { 1096 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 1097 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; 1098 1099 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { 1100 /* Single data descriptor case. */ 1101 iod->nr_data_segs = 1; 1102 iod->data_segs = &iod->data_seg; 1103 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); 1104 iod->data_seg.length = le32_to_cpu(sgl->length); 1105 return 0; 1106 } 1107 1108 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); 1109 } 1110 1111 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod) 1112 { 1113 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; 1114 struct nvmet_req *req = &iod->req; 1115 struct nvmet_pci_epf_segment *seg; 1116 struct scatterlist *sg; 1117 int ret, i; 1118 1119 if (iod->data_len > ctrl->mdts) { 1120 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1121 return -EINVAL; 1122 } 1123 1124 /* 1125 * Get the PCI address segments for the command data buffer using either 1126 * its SGLs or PRPs. 1127 */ 1128 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) 1129 ret = nvmet_pci_epf_iod_parse_sgls(iod); 1130 else 1131 ret = nvmet_pci_epf_iod_parse_prps(iod); 1132 if (ret) 1133 return ret; 1134 1135 /* Get a command buffer using SGLs matching the PCI segments. */ 1136 if (iod->nr_data_segs == 1) { 1137 sg_init_table(&iod->data_sgl, 1); 1138 iod->data_sgt.sgl = &iod->data_sgl; 1139 iod->data_sgt.nents = 1; 1140 iod->data_sgt.orig_nents = 1; 1141 } else { 1142 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, 1143 GFP_KERNEL); 1144 if (ret) 1145 goto err_nomem; 1146 } 1147 1148 for_each_sgtable_sg(&iod->data_sgt, sg, i) { 1149 seg = &iod->data_segs[i]; 1150 seg->buf = kmalloc(seg->length, GFP_KERNEL); 1151 if (!seg->buf) 1152 goto err_nomem; 1153 sg_set_buf(sg, seg->buf, seg->length); 1154 } 1155 1156 req->transfer_len = iod->data_len; 1157 req->sg = iod->data_sgt.sgl; 1158 req->sg_cnt = iod->data_sgt.nents; 1159 1160 return 0; 1161 1162 err_nomem: 1163 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1164 return -ENOMEM; 1165 } 1166 1167 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod) 1168 { 1169 struct nvmet_pci_epf_queue *cq = iod->cq; 1170 unsigned long flags; 1171 1172 /* Print an error message for failed commands, except AENs. */ 1173 iod->status = le16_to_cpu(iod->cqe.status) >> 1; 1174 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) 1175 dev_err(iod->ctrl->dev, 1176 "CQ[%d]: Command %s (0x%x) status 0x%0x\n", 1177 iod->sq->qid, nvmet_pci_epf_iod_name(iod), 1178 iod->cmd.common.opcode, iod->status); 1179 1180 /* 1181 * Add the command to the list of completed commands and schedule the 1182 * CQ work. 1183 */ 1184 spin_lock_irqsave(&cq->lock, flags); 1185 list_add_tail(&iod->link, &cq->list); 1186 queue_delayed_work(system_highpri_wq, &cq->work, 0); 1187 spin_unlock_irqrestore(&cq->lock, flags); 1188 } 1189 1190 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue) 1191 { 1192 struct nvmet_pci_epf_iod *iod; 1193 unsigned long flags; 1194 1195 spin_lock_irqsave(&queue->lock, flags); 1196 while (!list_empty(&queue->list)) { 1197 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, 1198 link); 1199 list_del_init(&iod->link); 1200 nvmet_pci_epf_free_iod(iod); 1201 } 1202 spin_unlock_irqrestore(&queue->lock, flags); 1203 } 1204 1205 static int nvmet_pci_epf_add_port(struct nvmet_port *port) 1206 { 1207 mutex_lock(&nvmet_pci_epf_ports_mutex); 1208 list_add_tail(&port->entry, &nvmet_pci_epf_ports); 1209 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1210 return 0; 1211 } 1212 1213 static void nvmet_pci_epf_remove_port(struct nvmet_port *port) 1214 { 1215 mutex_lock(&nvmet_pci_epf_ports_mutex); 1216 list_del_init(&port->entry); 1217 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1218 } 1219 1220 static struct nvmet_port * 1221 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) 1222 { 1223 struct nvmet_port *p, *port = NULL; 1224 1225 mutex_lock(&nvmet_pci_epf_ports_mutex); 1226 list_for_each_entry(p, &nvmet_pci_epf_ports, entry) { 1227 if (p->disc_addr.portid == portid) { 1228 port = p; 1229 break; 1230 } 1231 } 1232 mutex_unlock(&nvmet_pci_epf_ports_mutex); 1233 1234 return port; 1235 } 1236 1237 static void nvmet_pci_epf_queue_response(struct nvmet_req *req) 1238 { 1239 struct nvmet_pci_epf_iod *iod = 1240 container_of(req, struct nvmet_pci_epf_iod, req); 1241 1242 iod->status = le16_to_cpu(req->cqe->status) >> 1; 1243 1244 /* If we have no data to transfer, directly complete the command. */ 1245 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { 1246 nvmet_pci_epf_complete_iod(iod); 1247 return; 1248 } 1249 1250 complete(&iod->done); 1251 } 1252 1253 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl) 1254 { 1255 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1256 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; 1257 1258 return ilog2(ctrl->mdts) - page_shift; 1259 } 1260 1261 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, 1262 u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector) 1263 { 1264 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1265 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; 1266 u16 status; 1267 int ret; 1268 1269 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) 1270 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1271 1272 if (!(flags & NVME_QUEUE_PHYS_CONTIG)) 1273 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; 1274 1275 cq->pci_addr = pci_addr; 1276 cq->qid = cqid; 1277 cq->depth = qsize + 1; 1278 cq->vector = vector; 1279 cq->head = 0; 1280 cq->tail = 0; 1281 cq->phase = 1; 1282 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); 1283 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); 1284 1285 if (!cqid) 1286 cq->qes = sizeof(struct nvme_completion); 1287 else 1288 cq->qes = ctrl->io_cqes; 1289 cq->pci_size = cq->qes * cq->depth; 1290 1291 if (flags & NVME_CQ_IRQ_ENABLED) { 1292 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); 1293 if (!cq->iv) 1294 return NVME_SC_INTERNAL | NVME_STATUS_DNR; 1295 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); 1296 } 1297 1298 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); 1299 if (status != NVME_SC_SUCCESS) 1300 goto err; 1301 1302 /* 1303 * Map the CQ PCI address space and since PCI endpoint controllers may 1304 * return a partial mapping, check that the mapping is large enough. 1305 */ 1306 ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size, 1307 &cq->pci_map); 1308 if (ret) { 1309 dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n", 1310 cq->qid, ret); 1311 goto err_internal; 1312 } 1313 1314 if (cq->pci_map.pci_size < cq->pci_size) { 1315 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", 1316 cq->qid); 1317 goto err_unmap_queue; 1318 } 1319 1320 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); 1321 1322 dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", 1323 cqid, qsize, cq->qes, cq->vector); 1324 1325 return NVME_SC_SUCCESS; 1326 1327 err_unmap_queue: 1328 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); 1329 err_internal: 1330 status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1331 err: 1332 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) 1333 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); 1334 return status; 1335 } 1336 1337 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) 1338 { 1339 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1340 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; 1341 1342 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) 1343 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1344 1345 cancel_delayed_work_sync(&cq->work); 1346 nvmet_pci_epf_drain_queue(cq); 1347 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); 1348 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); 1349 1350 return NVME_SC_SUCCESS; 1351 } 1352 1353 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, 1354 u16 sqid, u16 flags, u16 qsize, u64 pci_addr) 1355 { 1356 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1357 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; 1358 u16 status; 1359 1360 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1361 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1362 1363 if (!(flags & NVME_QUEUE_PHYS_CONTIG)) 1364 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; 1365 1366 sq->pci_addr = pci_addr; 1367 sq->qid = sqid; 1368 sq->depth = qsize + 1; 1369 sq->head = 0; 1370 sq->tail = 0; 1371 sq->phase = 0; 1372 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); 1373 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); 1374 if (!sqid) 1375 sq->qes = 1UL << NVME_ADM_SQES; 1376 else 1377 sq->qes = ctrl->io_sqes; 1378 sq->pci_size = sq->qes * sq->depth; 1379 1380 status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); 1381 if (status != NVME_SC_SUCCESS) 1382 return status; 1383 1384 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, 1385 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); 1386 if (!sq->iod_wq) { 1387 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); 1388 status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 1389 goto out_destroy_sq; 1390 } 1391 1392 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); 1393 1394 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", 1395 sqid, qsize, sq->qes); 1396 1397 return NVME_SC_SUCCESS; 1398 1399 out_destroy_sq: 1400 nvmet_sq_destroy(&sq->nvme_sq); 1401 return status; 1402 } 1403 1404 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid) 1405 { 1406 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1407 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; 1408 1409 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1410 return NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1411 1412 destroy_workqueue(sq->iod_wq); 1413 sq->iod_wq = NULL; 1414 1415 nvmet_pci_epf_drain_queue(sq); 1416 1417 if (sq->nvme_sq.ctrl) 1418 nvmet_sq_destroy(&sq->nvme_sq); 1419 1420 return NVME_SC_SUCCESS; 1421 } 1422 1423 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl, 1424 u8 feat, void *data) 1425 { 1426 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1427 struct nvmet_feat_arbitration *arb; 1428 struct nvmet_feat_irq_coalesce *irqc; 1429 struct nvmet_feat_irq_config *irqcfg; 1430 struct nvmet_pci_epf_irq_vector *iv; 1431 u16 status; 1432 1433 switch (feat) { 1434 case NVME_FEAT_ARBITRATION: 1435 arb = data; 1436 if (!ctrl->sq_ab) 1437 arb->ab = 0x7; 1438 else 1439 arb->ab = ilog2(ctrl->sq_ab); 1440 return NVME_SC_SUCCESS; 1441 1442 case NVME_FEAT_IRQ_COALESCE: 1443 irqc = data; 1444 irqc->thr = ctrl->irq_vector_threshold; 1445 irqc->time = 0; 1446 return NVME_SC_SUCCESS; 1447 1448 case NVME_FEAT_IRQ_CONFIG: 1449 irqcfg = data; 1450 mutex_lock(&ctrl->irq_lock); 1451 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); 1452 if (iv) { 1453 irqcfg->cd = iv->cd; 1454 status = NVME_SC_SUCCESS; 1455 } else { 1456 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1457 } 1458 mutex_unlock(&ctrl->irq_lock); 1459 return status; 1460 1461 default: 1462 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1463 } 1464 } 1465 1466 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl, 1467 u8 feat, void *data) 1468 { 1469 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; 1470 struct nvmet_feat_arbitration *arb; 1471 struct nvmet_feat_irq_coalesce *irqc; 1472 struct nvmet_feat_irq_config *irqcfg; 1473 struct nvmet_pci_epf_irq_vector *iv; 1474 u16 status; 1475 1476 switch (feat) { 1477 case NVME_FEAT_ARBITRATION: 1478 arb = data; 1479 if (arb->ab == 0x7) 1480 ctrl->sq_ab = 0; 1481 else 1482 ctrl->sq_ab = 1 << arb->ab; 1483 return NVME_SC_SUCCESS; 1484 1485 case NVME_FEAT_IRQ_COALESCE: 1486 /* 1487 * Since we do not implement precise IRQ coalescing timing, 1488 * ignore the time field. 1489 */ 1490 irqc = data; 1491 ctrl->irq_vector_threshold = irqc->thr + 1; 1492 return NVME_SC_SUCCESS; 1493 1494 case NVME_FEAT_IRQ_CONFIG: 1495 irqcfg = data; 1496 mutex_lock(&ctrl->irq_lock); 1497 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); 1498 if (iv) { 1499 iv->cd = irqcfg->cd; 1500 status = NVME_SC_SUCCESS; 1501 } else { 1502 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1503 } 1504 mutex_unlock(&ctrl->irq_lock); 1505 return status; 1506 1507 default: 1508 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1509 } 1510 } 1511 1512 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = { 1513 .owner = THIS_MODULE, 1514 .type = NVMF_TRTYPE_PCI, 1515 .add_port = nvmet_pci_epf_add_port, 1516 .remove_port = nvmet_pci_epf_remove_port, 1517 .queue_response = nvmet_pci_epf_queue_response, 1518 .get_mdts = nvmet_pci_epf_get_mdts, 1519 .create_cq = nvmet_pci_epf_create_cq, 1520 .delete_cq = nvmet_pci_epf_delete_cq, 1521 .create_sq = nvmet_pci_epf_create_sq, 1522 .delete_sq = nvmet_pci_epf_delete_sq, 1523 .get_feature = nvmet_pci_epf_get_feat, 1524 .set_feature = nvmet_pci_epf_set_feat, 1525 }; 1526 1527 static void nvmet_pci_epf_cq_work(struct work_struct *work); 1528 1529 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, 1530 unsigned int qid, bool sq) 1531 { 1532 struct nvmet_pci_epf_queue *queue; 1533 1534 if (sq) { 1535 queue = &ctrl->sq[qid]; 1536 set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags); 1537 } else { 1538 queue = &ctrl->cq[qid]; 1539 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); 1540 } 1541 queue->ctrl = ctrl; 1542 queue->qid = qid; 1543 spin_lock_init(&queue->lock); 1544 INIT_LIST_HEAD(&queue->list); 1545 } 1546 1547 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) 1548 { 1549 unsigned int qid; 1550 1551 ctrl->sq = kcalloc(ctrl->nr_queues, 1552 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); 1553 if (!ctrl->sq) 1554 return -ENOMEM; 1555 1556 ctrl->cq = kcalloc(ctrl->nr_queues, 1557 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); 1558 if (!ctrl->cq) { 1559 kfree(ctrl->sq); 1560 ctrl->sq = NULL; 1561 return -ENOMEM; 1562 } 1563 1564 for (qid = 0; qid < ctrl->nr_queues; qid++) { 1565 nvmet_pci_epf_init_queue(ctrl, qid, true); 1566 nvmet_pci_epf_init_queue(ctrl, qid, false); 1567 } 1568 1569 return 0; 1570 } 1571 1572 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) 1573 { 1574 kfree(ctrl->sq); 1575 ctrl->sq = NULL; 1576 kfree(ctrl->cq); 1577 ctrl->cq = NULL; 1578 } 1579 1580 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) 1581 { 1582 struct nvmet_pci_epf_iod *iod = 1583 container_of(work, struct nvmet_pci_epf_iod, work); 1584 struct nvmet_req *req = &iod->req; 1585 int ret; 1586 1587 if (!iod->ctrl->link_up) { 1588 nvmet_pci_epf_free_iod(iod); 1589 return; 1590 } 1591 1592 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { 1593 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; 1594 goto complete; 1595 } 1596 1597 if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq, 1598 &nvmet_pci_epf_fabrics_ops)) 1599 goto complete; 1600 1601 iod->data_len = nvmet_req_transfer_len(req); 1602 if (iod->data_len) { 1603 /* 1604 * Get the data DMA transfer direction. Here "device" means the 1605 * PCI root-complex host. 1606 */ 1607 if (nvme_is_write(&iod->cmd)) 1608 iod->dma_dir = DMA_FROM_DEVICE; 1609 else 1610 iod->dma_dir = DMA_TO_DEVICE; 1611 1612 /* 1613 * Setup the command data buffer and get the command data from 1614 * the host if needed. 1615 */ 1616 ret = nvmet_pci_epf_alloc_iod_data_buf(iod); 1617 if (!ret && iod->dma_dir == DMA_FROM_DEVICE) 1618 ret = nvmet_pci_epf_transfer_iod_data(iod); 1619 if (ret) { 1620 nvmet_req_uninit(req); 1621 goto complete; 1622 } 1623 } 1624 1625 req->execute(req); 1626 1627 /* 1628 * If we do not have data to transfer after the command execution 1629 * finishes, nvmet_pci_epf_queue_response() will complete the command 1630 * directly. No need to wait for the completion in this case. 1631 */ 1632 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) 1633 return; 1634 1635 wait_for_completion(&iod->done); 1636 1637 if (iod->status == NVME_SC_SUCCESS) { 1638 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); 1639 nvmet_pci_epf_transfer_iod_data(iod); 1640 } 1641 1642 complete: 1643 nvmet_pci_epf_complete_iod(iod); 1644 } 1645 1646 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, 1647 struct nvmet_pci_epf_queue *sq) 1648 { 1649 struct nvmet_pci_epf_iod *iod; 1650 int ret, n = 0; 1651 u16 head = sq->head; 1652 1653 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); 1654 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { 1655 iod = nvmet_pci_epf_alloc_iod(sq); 1656 if (!iod) 1657 break; 1658 1659 /* Get the NVMe command submitted by the host. */ 1660 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, 1661 sq->pci_addr + head * sq->qes, 1662 sq->qes, DMA_FROM_DEVICE); 1663 if (ret) { 1664 /* Not much we can do... */ 1665 nvmet_pci_epf_free_iod(iod); 1666 break; 1667 } 1668 1669 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", 1670 sq->qid, head, sq->tail, 1671 nvmet_pci_epf_iod_name(iod)); 1672 1673 head++; 1674 if (head == sq->depth) 1675 head = 0; 1676 WRITE_ONCE(sq->head, head); 1677 n++; 1678 1679 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); 1680 1681 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); 1682 } 1683 1684 return n; 1685 } 1686 1687 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) 1688 { 1689 struct nvmet_pci_epf_ctrl *ctrl = 1690 container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); 1691 struct nvmet_pci_epf_queue *sq; 1692 unsigned long limit = jiffies; 1693 unsigned long last = 0; 1694 int i, nr_sqs; 1695 1696 while (ctrl->link_up && ctrl->enabled) { 1697 nr_sqs = 0; 1698 /* Do round-robin arbitration. */ 1699 for (i = 0; i < ctrl->nr_queues; i++) { 1700 sq = &ctrl->sq[i]; 1701 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) 1702 continue; 1703 if (nvmet_pci_epf_process_sq(ctrl, sq)) 1704 nr_sqs++; 1705 } 1706 1707 /* 1708 * If we have been running for a while, reschedule to let other 1709 * tasks run and to avoid RCU stalls. 1710 */ 1711 if (time_is_before_jiffies(limit + secs_to_jiffies(1))) { 1712 cond_resched(); 1713 limit = jiffies; 1714 continue; 1715 } 1716 1717 if (nr_sqs) { 1718 last = jiffies; 1719 continue; 1720 } 1721 1722 /* 1723 * If we have not received any command on any queue for more 1724 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and 1725 * reschedule. This avoids "burning" a CPU when the controller 1726 * is idle for a long time. 1727 */ 1728 if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE)) 1729 break; 1730 1731 cpu_relax(); 1732 } 1733 1734 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); 1735 } 1736 1737 static void nvmet_pci_epf_cq_work(struct work_struct *work) 1738 { 1739 struct nvmet_pci_epf_queue *cq = 1740 container_of(work, struct nvmet_pci_epf_queue, work.work); 1741 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; 1742 struct nvme_completion *cqe; 1743 struct nvmet_pci_epf_iod *iod; 1744 unsigned long flags; 1745 int ret = 0, n = 0; 1746 1747 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { 1748 1749 /* Check that the CQ is not full. */ 1750 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); 1751 if (cq->head == cq->tail + 1) { 1752 ret = -EAGAIN; 1753 break; 1754 } 1755 1756 spin_lock_irqsave(&cq->lock, flags); 1757 iod = list_first_entry_or_null(&cq->list, 1758 struct nvmet_pci_epf_iod, link); 1759 if (iod) 1760 list_del_init(&iod->link); 1761 spin_unlock_irqrestore(&cq->lock, flags); 1762 1763 if (!iod) 1764 break; 1765 1766 /* 1767 * Post the IOD completion entry. If the IOD request was 1768 * executed (req->execute() called), the CQE is already 1769 * initialized. However, the IOD may have been failed before 1770 * that, leaving the CQE not properly initialized. So always 1771 * initialize it here. 1772 */ 1773 cqe = &iod->cqe; 1774 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); 1775 cqe->sq_id = cpu_to_le16(iod->sq->qid); 1776 cqe->command_id = iod->cmd.common.command_id; 1777 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); 1778 1779 dev_dbg(ctrl->dev, 1780 "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n", 1781 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, 1782 le64_to_cpu(cqe->result.u64), cq->head, cq->tail, 1783 cq->phase); 1784 1785 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, 1786 cqe, cq->qes); 1787 1788 cq->tail++; 1789 if (cq->tail >= cq->depth) { 1790 cq->tail = 0; 1791 cq->phase ^= 1; 1792 } 1793 1794 nvmet_pci_epf_free_iod(iod); 1795 1796 /* Signal the host. */ 1797 nvmet_pci_epf_raise_irq(ctrl, cq, false); 1798 n++; 1799 } 1800 1801 /* 1802 * We do not support precise IRQ coalescing time (100ns units as per 1803 * NVMe specifications). So if we have posted completion entries without 1804 * reaching the interrupt coalescing threshold, raise an interrupt. 1805 */ 1806 if (n) 1807 nvmet_pci_epf_raise_irq(ctrl, cq, true); 1808 1809 if (ret < 0) 1810 queue_delayed_work(system_highpri_wq, &cq->work, 1811 NVMET_PCI_EPF_CQ_RETRY_INTERVAL); 1812 } 1813 1814 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) 1815 { 1816 struct nvmet_ctrl *tctrl = ctrl->tctrl; 1817 1818 /* Initialize controller status. */ 1819 tctrl->csts = 0; 1820 ctrl->csts = 0; 1821 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); 1822 1823 /* Initialize controller configuration and start polling. */ 1824 tctrl->cc = 0; 1825 ctrl->cc = 0; 1826 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); 1827 } 1828 1829 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 1830 { 1831 u64 pci_addr, asq, acq; 1832 u32 aqa; 1833 u16 status, qsize; 1834 1835 if (ctrl->enabled) 1836 return 0; 1837 1838 dev_info(ctrl->dev, "Enabling controller\n"); 1839 1840 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; 1841 ctrl->mps = 1UL << ctrl->mps_shift; 1842 ctrl->mps_mask = ctrl->mps - 1; 1843 1844 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); 1845 if (ctrl->io_sqes < sizeof(struct nvme_command)) { 1846 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", 1847 ctrl->io_sqes, sizeof(struct nvme_command)); 1848 goto err; 1849 } 1850 1851 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); 1852 if (ctrl->io_cqes < sizeof(struct nvme_completion)) { 1853 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", 1854 ctrl->io_sqes, sizeof(struct nvme_completion)); 1855 goto err; 1856 } 1857 1858 /* Create the admin queue. */ 1859 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); 1860 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); 1861 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); 1862 1863 qsize = (aqa & 0x0fff0000) >> 16; 1864 pci_addr = acq & GENMASK_ULL(63, 12); 1865 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, 1866 NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG, 1867 qsize, pci_addr, 0); 1868 if (status != NVME_SC_SUCCESS) { 1869 dev_err(ctrl->dev, "Failed to create admin completion queue\n"); 1870 goto err; 1871 } 1872 1873 qsize = aqa & 0x00000fff; 1874 pci_addr = asq & GENMASK_ULL(63, 12); 1875 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG, 1876 qsize, pci_addr); 1877 if (status != NVME_SC_SUCCESS) { 1878 dev_err(ctrl->dev, "Failed to create admin submission queue\n"); 1879 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); 1880 goto err; 1881 } 1882 1883 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; 1884 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; 1885 ctrl->enabled = true; 1886 ctrl->csts = NVME_CSTS_RDY; 1887 1888 /* Start polling the controller SQs. */ 1889 schedule_delayed_work(&ctrl->poll_sqs, 0); 1890 1891 return 0; 1892 1893 err: 1894 nvmet_pci_epf_clear_ctrl_config(ctrl); 1895 return -EINVAL; 1896 } 1897 1898 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, 1899 bool shutdown) 1900 { 1901 int qid; 1902 1903 if (!ctrl->enabled) 1904 return; 1905 1906 dev_info(ctrl->dev, "%s controller\n", 1907 shutdown ? "Shutting down" : "Disabling"); 1908 1909 ctrl->enabled = false; 1910 cancel_delayed_work_sync(&ctrl->poll_sqs); 1911 1912 /* Delete all I/O queues first. */ 1913 for (qid = 1; qid < ctrl->nr_queues; qid++) 1914 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); 1915 1916 for (qid = 1; qid < ctrl->nr_queues; qid++) 1917 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); 1918 1919 /* Delete the admin queue last. */ 1920 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); 1921 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); 1922 1923 ctrl->csts &= ~NVME_CSTS_RDY; 1924 if (shutdown) { 1925 ctrl->csts |= NVME_CSTS_SHST_CMPLT; 1926 ctrl->cc &= ~NVME_CC_ENABLE; 1927 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); 1928 } 1929 } 1930 1931 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) 1932 { 1933 struct nvmet_pci_epf_ctrl *ctrl = 1934 container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work); 1935 u32 old_cc, new_cc; 1936 int ret; 1937 1938 if (!ctrl->tctrl) 1939 return; 1940 1941 old_cc = ctrl->cc; 1942 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); 1943 if (new_cc == old_cc) 1944 goto reschedule_work; 1945 1946 ctrl->cc = new_cc; 1947 1948 if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) { 1949 ret = nvmet_pci_epf_enable_ctrl(ctrl); 1950 if (ret) 1951 goto reschedule_work; 1952 } 1953 1954 if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) 1955 nvmet_pci_epf_disable_ctrl(ctrl, false); 1956 1957 if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) 1958 nvmet_pci_epf_disable_ctrl(ctrl, true); 1959 1960 if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) 1961 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; 1962 1963 nvmet_update_cc(ctrl->tctrl, ctrl->cc); 1964 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); 1965 1966 reschedule_work: 1967 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); 1968 } 1969 1970 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) 1971 { 1972 struct nvmet_ctrl *tctrl = ctrl->tctrl; 1973 1974 ctrl->bar = ctrl->nvme_epf->reg_bar; 1975 1976 /* Copy the target controller capabilities as a base. */ 1977 ctrl->cap = tctrl->cap; 1978 1979 /* Contiguous Queues Required (CQR). */ 1980 ctrl->cap |= 0x1ULL << 16; 1981 1982 /* Set Doorbell stride to 4B (DSTRB). */ 1983 ctrl->cap &= ~GENMASK_ULL(35, 32); 1984 1985 /* Clear NVM Subsystem Reset Supported (NSSRS). */ 1986 ctrl->cap &= ~(0x1ULL << 36); 1987 1988 /* Clear Boot Partition Support (BPS). */ 1989 ctrl->cap &= ~(0x1ULL << 45); 1990 1991 /* Clear Persistent Memory Region Supported (PMRS). */ 1992 ctrl->cap &= ~(0x1ULL << 56); 1993 1994 /* Clear Controller Memory Buffer Supported (CMBS). */ 1995 ctrl->cap &= ~(0x1ULL << 57); 1996 1997 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); 1998 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); 1999 2000 nvmet_pci_epf_clear_ctrl_config(ctrl); 2001 } 2002 2003 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, 2004 unsigned int max_nr_queues) 2005 { 2006 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2007 struct nvmet_alloc_ctrl_args args = {}; 2008 char hostnqn[NVMF_NQN_SIZE]; 2009 uuid_t id; 2010 int ret; 2011 2012 memset(ctrl, 0, sizeof(*ctrl)); 2013 ctrl->dev = &nvme_epf->epf->dev; 2014 mutex_init(&ctrl->irq_lock); 2015 ctrl->nvme_epf = nvme_epf; 2016 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; 2017 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); 2018 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); 2019 2020 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, 2021 max_nr_queues * NVMET_MAX_QUEUE_SIZE, 2022 sizeof(struct nvmet_pci_epf_iod)); 2023 if (ret) { 2024 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); 2025 return ret; 2026 } 2027 2028 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); 2029 if (!ctrl->port) { 2030 dev_err(ctrl->dev, "Port not found\n"); 2031 ret = -EINVAL; 2032 goto out_mempool_exit; 2033 } 2034 2035 /* Create the target controller. */ 2036 uuid_gen(&id); 2037 snprintf(hostnqn, NVMF_NQN_SIZE, 2038 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); 2039 args.port = ctrl->port; 2040 args.subsysnqn = nvme_epf->subsysnqn; 2041 memset(&id, 0, sizeof(uuid_t)); 2042 args.hostid = &id; 2043 args.hostnqn = hostnqn; 2044 args.ops = &nvmet_pci_epf_fabrics_ops; 2045 2046 ctrl->tctrl = nvmet_alloc_ctrl(&args); 2047 if (!ctrl->tctrl) { 2048 dev_err(ctrl->dev, "Failed to create target controller\n"); 2049 ret = -ENOMEM; 2050 goto out_mempool_exit; 2051 } 2052 ctrl->tctrl->drvdata = ctrl; 2053 2054 /* We do not support protection information for now. */ 2055 if (ctrl->tctrl->pi_support) { 2056 dev_err(ctrl->dev, 2057 "Protection information (PI) is not supported\n"); 2058 ret = -ENOTSUPP; 2059 goto out_put_ctrl; 2060 } 2061 2062 /* Allocate our queues, up to the maximum number. */ 2063 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); 2064 ret = nvmet_pci_epf_alloc_queues(ctrl); 2065 if (ret) 2066 goto out_put_ctrl; 2067 2068 /* 2069 * Allocate the IRQ vectors descriptors. We cannot have more than the 2070 * maximum number of queues. 2071 */ 2072 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); 2073 if (ret) 2074 goto out_free_queues; 2075 2076 dev_info(ctrl->dev, 2077 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", 2078 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, 2079 ctrl->mdts); 2080 2081 /* Initialize BAR 0 using the target controller CAP. */ 2082 nvmet_pci_epf_init_bar(ctrl); 2083 2084 return 0; 2085 2086 out_free_queues: 2087 nvmet_pci_epf_free_queues(ctrl); 2088 out_put_ctrl: 2089 nvmet_ctrl_put(ctrl->tctrl); 2090 ctrl->tctrl = NULL; 2091 out_mempool_exit: 2092 mempool_exit(&ctrl->iod_pool); 2093 return ret; 2094 } 2095 2096 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2097 { 2098 2099 dev_info(ctrl->dev, "PCI link up\n"); 2100 ctrl->link_up = true; 2101 2102 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); 2103 } 2104 2105 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2106 { 2107 dev_info(ctrl->dev, "PCI link down\n"); 2108 ctrl->link_up = false; 2109 2110 cancel_delayed_work_sync(&ctrl->poll_cc); 2111 2112 nvmet_pci_epf_disable_ctrl(ctrl, false); 2113 nvmet_pci_epf_clear_ctrl_config(ctrl); 2114 } 2115 2116 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) 2117 { 2118 if (!ctrl->tctrl) 2119 return; 2120 2121 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", 2122 ctrl->tctrl->subsys->subsysnqn); 2123 2124 nvmet_pci_epf_stop_ctrl(ctrl); 2125 2126 nvmet_pci_epf_free_queues(ctrl); 2127 nvmet_pci_epf_free_irq_vectors(ctrl); 2128 2129 nvmet_ctrl_put(ctrl->tctrl); 2130 ctrl->tctrl = NULL; 2131 2132 mempool_exit(&ctrl->iod_pool); 2133 } 2134 2135 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf) 2136 { 2137 struct pci_epf *epf = nvme_epf->epf; 2138 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2139 size_t reg_size, reg_bar_size; 2140 size_t msix_table_size = 0; 2141 2142 /* 2143 * The first free BAR will be our register BAR and per NVMe 2144 * specifications, it must be BAR 0. 2145 */ 2146 if (pci_epc_get_first_free_bar(epc_features) != BAR_0) { 2147 dev_err(&epf->dev, "BAR 0 is not free\n"); 2148 return -ENODEV; 2149 } 2150 2151 /* 2152 * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims 2153 * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type 2154 * is required to be 64-bit. Thus, for interoperability, always set the 2155 * type to 64-bit. In the rare case that the PCI EPC does not support 2156 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, 2157 * and we will return failure back to the user. 2158 */ 2159 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; 2160 2161 /* 2162 * Calculate the size of the register bar: NVMe registers first with 2163 * enough space for the doorbells, followed by the MSI-X table 2164 * if supported. 2165 */ 2166 reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32)); 2167 reg_size = ALIGN(reg_size, 8); 2168 2169 if (epc_features->msix_capable) { 2170 size_t pba_size; 2171 2172 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 2173 nvme_epf->msix_table_offset = reg_size; 2174 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 2175 2176 reg_size += msix_table_size + pba_size; 2177 } 2178 2179 if (epc_features->bar[BAR_0].type == BAR_FIXED) { 2180 if (reg_size > epc_features->bar[BAR_0].fixed_size) { 2181 dev_err(&epf->dev, 2182 "BAR 0 size %llu B too small, need %zu B\n", 2183 epc_features->bar[BAR_0].fixed_size, 2184 reg_size); 2185 return -ENOMEM; 2186 } 2187 reg_bar_size = epc_features->bar[BAR_0].fixed_size; 2188 } else { 2189 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); 2190 } 2191 2192 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, 2193 epc_features, PRIMARY_INTERFACE); 2194 if (!nvme_epf->reg_bar) { 2195 dev_err(&epf->dev, "Failed to allocate BAR 0\n"); 2196 return -ENOMEM; 2197 } 2198 memset(nvme_epf->reg_bar, 0, reg_bar_size); 2199 2200 return 0; 2201 } 2202 2203 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf) 2204 { 2205 struct pci_epf *epf = nvme_epf->epf; 2206 2207 if (!nvme_epf->reg_bar) 2208 return; 2209 2210 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); 2211 nvme_epf->reg_bar = NULL; 2212 } 2213 2214 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf) 2215 { 2216 struct pci_epf *epf = nvme_epf->epf; 2217 2218 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, 2219 &epf->bar[BAR_0]); 2220 } 2221 2222 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf) 2223 { 2224 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2225 struct pci_epf *epf = nvme_epf->epf; 2226 int ret; 2227 2228 /* Enable MSI-X if supported, otherwise, use MSI. */ 2229 if (epc_features->msix_capable && epf->msix_interrupts) { 2230 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, 2231 epf->msix_interrupts, BAR_0, 2232 nvme_epf->msix_table_offset); 2233 if (ret) { 2234 dev_err(&epf->dev, "Failed to configure MSI-X\n"); 2235 return ret; 2236 } 2237 2238 nvme_epf->nr_vectors = epf->msix_interrupts; 2239 nvme_epf->irq_type = PCI_IRQ_MSIX; 2240 2241 return 0; 2242 } 2243 2244 if (epc_features->msi_capable && epf->msi_interrupts) { 2245 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, 2246 epf->msi_interrupts); 2247 if (ret) { 2248 dev_err(&epf->dev, "Failed to configure MSI\n"); 2249 return ret; 2250 } 2251 2252 nvme_epf->nr_vectors = epf->msi_interrupts; 2253 nvme_epf->irq_type = PCI_IRQ_MSI; 2254 2255 return 0; 2256 } 2257 2258 /* MSI and MSI-X are not supported: fall back to INTx. */ 2259 nvme_epf->nr_vectors = 1; 2260 nvme_epf->irq_type = PCI_IRQ_INTX; 2261 2262 return 0; 2263 } 2264 2265 static int nvmet_pci_epf_epc_init(struct pci_epf *epf) 2266 { 2267 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2268 const struct pci_epc_features *epc_features = nvme_epf->epc_features; 2269 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2270 unsigned int max_nr_queues = NVMET_NR_QUEUES; 2271 int ret; 2272 2273 /* For now, do not support virtual functions. */ 2274 if (epf->vfunc_no > 0) { 2275 dev_err(&epf->dev, "Virtual functions are not supported\n"); 2276 return -EINVAL; 2277 } 2278 2279 /* 2280 * Cap the maximum number of queues we can support on the controller 2281 * with the number of IRQs we can use. 2282 */ 2283 if (epc_features->msix_capable && epf->msix_interrupts) { 2284 dev_info(&epf->dev, 2285 "PCI endpoint controller supports MSI-X, %u vectors\n", 2286 epf->msix_interrupts); 2287 max_nr_queues = min(max_nr_queues, epf->msix_interrupts); 2288 } else if (epc_features->msi_capable && epf->msi_interrupts) { 2289 dev_info(&epf->dev, 2290 "PCI endpoint controller supports MSI, %u vectors\n", 2291 epf->msi_interrupts); 2292 max_nr_queues = min(max_nr_queues, epf->msi_interrupts); 2293 } 2294 2295 if (max_nr_queues < 2) { 2296 dev_err(&epf->dev, "Invalid maximum number of queues %u\n", 2297 max_nr_queues); 2298 return -EINVAL; 2299 } 2300 2301 /* Create the target controller. */ 2302 ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues); 2303 if (ret) { 2304 dev_err(&epf->dev, 2305 "Failed to create NVMe PCI target controller (err=%d)\n", 2306 ret); 2307 return ret; 2308 } 2309 2310 /* Set device ID, class, etc. */ 2311 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; 2312 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; 2313 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, 2314 epf->header); 2315 if (ret) { 2316 dev_err(&epf->dev, 2317 "Failed to write configuration header (err=%d)\n", ret); 2318 goto out_destroy_ctrl; 2319 } 2320 2321 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, 2322 &epf->bar[BAR_0]); 2323 if (ret) { 2324 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); 2325 goto out_destroy_ctrl; 2326 } 2327 2328 /* 2329 * Enable interrupts and start polling the controller BAR if we do not 2330 * have a link up notifier. 2331 */ 2332 ret = nvmet_pci_epf_init_irq(nvme_epf); 2333 if (ret) 2334 goto out_clear_bar; 2335 2336 if (!epc_features->linkup_notifier) 2337 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); 2338 2339 return 0; 2340 2341 out_clear_bar: 2342 nvmet_pci_epf_clear_bar(nvme_epf); 2343 out_destroy_ctrl: 2344 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); 2345 return ret; 2346 } 2347 2348 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) 2349 { 2350 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2351 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2352 2353 nvmet_pci_epf_destroy_ctrl(ctrl); 2354 2355 nvmet_pci_epf_deinit_dma(nvme_epf); 2356 nvmet_pci_epf_clear_bar(nvme_epf); 2357 } 2358 2359 static int nvmet_pci_epf_link_up(struct pci_epf *epf) 2360 { 2361 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2362 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2363 2364 nvmet_pci_epf_start_ctrl(ctrl); 2365 2366 return 0; 2367 } 2368 2369 static int nvmet_pci_epf_link_down(struct pci_epf *epf) 2370 { 2371 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2372 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; 2373 2374 nvmet_pci_epf_stop_ctrl(ctrl); 2375 2376 return 0; 2377 } 2378 2379 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = { 2380 .epc_init = nvmet_pci_epf_epc_init, 2381 .epc_deinit = nvmet_pci_epf_epc_deinit, 2382 .link_up = nvmet_pci_epf_link_up, 2383 .link_down = nvmet_pci_epf_link_down, 2384 }; 2385 2386 static int nvmet_pci_epf_bind(struct pci_epf *epf) 2387 { 2388 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2389 const struct pci_epc_features *epc_features; 2390 struct pci_epc *epc = epf->epc; 2391 int ret; 2392 2393 if (WARN_ON_ONCE(!epc)) 2394 return -EINVAL; 2395 2396 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 2397 if (!epc_features) { 2398 dev_err(&epf->dev, "epc_features not implemented\n"); 2399 return -EOPNOTSUPP; 2400 } 2401 nvme_epf->epc_features = epc_features; 2402 2403 ret = nvmet_pci_epf_configure_bar(nvme_epf); 2404 if (ret) 2405 return ret; 2406 2407 nvmet_pci_epf_init_dma(nvme_epf); 2408 2409 return 0; 2410 } 2411 2412 static void nvmet_pci_epf_unbind(struct pci_epf *epf) 2413 { 2414 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2415 struct pci_epc *epc = epf->epc; 2416 2417 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); 2418 2419 if (epc->init_complete) { 2420 nvmet_pci_epf_deinit_dma(nvme_epf); 2421 nvmet_pci_epf_clear_bar(nvme_epf); 2422 } 2423 2424 nvmet_pci_epf_free_bar(nvme_epf); 2425 } 2426 2427 static struct pci_epf_header nvme_epf_pci_header = { 2428 .vendorid = PCI_ANY_ID, 2429 .deviceid = PCI_ANY_ID, 2430 .progif_code = 0x02, /* NVM Express */ 2431 .baseclass_code = PCI_BASE_CLASS_STORAGE, 2432 .subclass_code = 0x08, /* Non-Volatile Memory controller */ 2433 .interrupt_pin = PCI_INTERRUPT_INTA, 2434 }; 2435 2436 static int nvmet_pci_epf_probe(struct pci_epf *epf, 2437 const struct pci_epf_device_id *id) 2438 { 2439 struct nvmet_pci_epf *nvme_epf; 2440 int ret; 2441 2442 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); 2443 if (!nvme_epf) 2444 return -ENOMEM; 2445 2446 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); 2447 if (ret) 2448 return ret; 2449 2450 nvme_epf->epf = epf; 2451 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; 2452 2453 epf->event_ops = &nvmet_pci_epf_event_ops; 2454 epf->header = &nvme_epf_pci_header; 2455 epf_set_drvdata(epf, nvme_epf); 2456 2457 return 0; 2458 } 2459 2460 #define to_nvme_epf(epf_group) \ 2461 container_of(epf_group, struct nvmet_pci_epf, group) 2462 2463 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page) 2464 { 2465 struct config_group *group = to_config_group(item); 2466 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2467 2468 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); 2469 } 2470 2471 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item, 2472 const char *page, size_t len) 2473 { 2474 struct config_group *group = to_config_group(item); 2475 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2476 u16 portid; 2477 2478 /* Do not allow setting this when the function is already started. */ 2479 if (nvme_epf->ctrl.tctrl) 2480 return -EBUSY; 2481 2482 if (!len) 2483 return -EINVAL; 2484 2485 if (kstrtou16(page, 0, &portid)) 2486 return -EINVAL; 2487 2488 nvme_epf->portid = cpu_to_le16(portid); 2489 2490 return len; 2491 } 2492 2493 CONFIGFS_ATTR(nvmet_pci_epf_, portid); 2494 2495 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item, 2496 char *page) 2497 { 2498 struct config_group *group = to_config_group(item); 2499 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2500 2501 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); 2502 } 2503 2504 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item, 2505 const char *page, size_t len) 2506 { 2507 struct config_group *group = to_config_group(item); 2508 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2509 2510 /* Do not allow setting this when the function is already started. */ 2511 if (nvme_epf->ctrl.tctrl) 2512 return -EBUSY; 2513 2514 if (!len) 2515 return -EINVAL; 2516 2517 strscpy(nvme_epf->subsysnqn, page, len); 2518 2519 return len; 2520 } 2521 2522 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn); 2523 2524 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page) 2525 { 2526 struct config_group *group = to_config_group(item); 2527 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2528 2529 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); 2530 } 2531 2532 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item, 2533 const char *page, size_t len) 2534 { 2535 struct config_group *group = to_config_group(item); 2536 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); 2537 unsigned long mdts_kb; 2538 int ret; 2539 2540 if (nvme_epf->ctrl.tctrl) 2541 return -EBUSY; 2542 2543 ret = kstrtoul(page, 0, &mdts_kb); 2544 if (ret) 2545 return ret; 2546 if (!mdts_kb) 2547 mdts_kb = NVMET_PCI_EPF_MDTS_KB; 2548 else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB) 2549 mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB; 2550 2551 if (!is_power_of_2(mdts_kb)) 2552 return -EINVAL; 2553 2554 nvme_epf->mdts_kb = mdts_kb; 2555 2556 return len; 2557 } 2558 2559 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb); 2560 2561 static struct configfs_attribute *nvmet_pci_epf_attrs[] = { 2562 &nvmet_pci_epf_attr_portid, 2563 &nvmet_pci_epf_attr_subsysnqn, 2564 &nvmet_pci_epf_attr_mdts_kb, 2565 NULL, 2566 }; 2567 2568 static const struct config_item_type nvmet_pci_epf_group_type = { 2569 .ct_attrs = nvmet_pci_epf_attrs, 2570 .ct_owner = THIS_MODULE, 2571 }; 2572 2573 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf, 2574 struct config_group *group) 2575 { 2576 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); 2577 2578 config_group_init_type_name(&nvme_epf->group, "nvme", 2579 &nvmet_pci_epf_group_type); 2580 2581 return &nvme_epf->group; 2582 } 2583 2584 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = { 2585 { .name = "nvmet_pci_epf" }, 2586 {}, 2587 }; 2588 2589 static struct pci_epf_ops nvmet_pci_epf_ops = { 2590 .bind = nvmet_pci_epf_bind, 2591 .unbind = nvmet_pci_epf_unbind, 2592 .add_cfs = nvmet_pci_epf_add_cfs, 2593 }; 2594 2595 static struct pci_epf_driver nvmet_pci_epf_driver = { 2596 .driver.name = "nvmet_pci_epf", 2597 .probe = nvmet_pci_epf_probe, 2598 .id_table = nvmet_pci_epf_ids, 2599 .ops = &nvmet_pci_epf_ops, 2600 .owner = THIS_MODULE, 2601 }; 2602 2603 static int __init nvmet_pci_epf_init_module(void) 2604 { 2605 int ret; 2606 2607 ret = pci_epf_register_driver(&nvmet_pci_epf_driver); 2608 if (ret) 2609 return ret; 2610 2611 ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops); 2612 if (ret) { 2613 pci_epf_unregister_driver(&nvmet_pci_epf_driver); 2614 return ret; 2615 } 2616 2617 return 0; 2618 } 2619 2620 static void __exit nvmet_pci_epf_cleanup_module(void) 2621 { 2622 nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops); 2623 pci_epf_unregister_driver(&nvmet_pci_epf_driver); 2624 } 2625 2626 module_init(nvmet_pci_epf_init_module); 2627 module_exit(nvmet_pci_epf_cleanup_module); 2628 2629 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver"); 2630 MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>"); 2631 MODULE_LICENSE("GPL"); 2632