1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe PCI Endpoint Function target driver.
4 *
5 * Copyright (c) 2024, Western Digital Corporation or its affiliates.
6 * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
7 * REDS Institute, HEIG-VD, HES-SO, Switzerland
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/io.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/nvme.h>
18 #include <linux/pci_ids.h>
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci_regs.h>
22 #include <linux/slab.h>
23
24 #include "nvmet.h"
25
26 static LIST_HEAD(nvmet_pci_epf_ports);
27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
28
29 /*
30 * Default and maximum allowed data transfer size. For the default,
31 * allow up to 128 page-sized segments. For the maximum allowed,
32 * use 4 times the default (which is completely arbitrary).
33 */
34 #define NVMET_PCI_EPF_MAX_SEGS 128
35 #define NVMET_PCI_EPF_MDTS_KB \
36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
37 #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
38
39 /*
40 * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
41 * interrupt vector to the host. This default 8 is completely arbitrary and can
42 * be changed by the host with a nvme_set_features command.
43 */
44 #define NVMET_PCI_EPF_IV_THRESHOLD 8
45
46 /*
47 * BAR CC register and SQ polling intervals.
48 */
49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10)
50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
51 #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
52
53 /*
54 * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
55 */
56 #define NVMET_PCI_EPF_SQ_AB 8
57
58 /*
59 * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
60 * is full, in which case we retry the CQ processing after this interval.
61 */
62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
63
64 enum nvmet_pci_epf_queue_flags {
65 NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */
66 NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
67 };
68
69 /*
70 * IRQ vector descriptor.
71 */
72 struct nvmet_pci_epf_irq_vector {
73 unsigned int vector;
74 unsigned int ref;
75 bool cd;
76 int nr_irqs;
77 };
78
79 struct nvmet_pci_epf_queue {
80 union {
81 struct nvmet_sq nvme_sq;
82 struct nvmet_cq nvme_cq;
83 };
84 struct nvmet_pci_epf_ctrl *ctrl;
85 unsigned long flags;
86
87 u64 pci_addr;
88 size_t pci_size;
89 struct pci_epc_map pci_map;
90
91 u16 qid;
92 u16 depth;
93 u16 vector;
94 u16 head;
95 u16 tail;
96 u16 phase;
97 u32 db;
98
99 size_t qes;
100
101 struct nvmet_pci_epf_irq_vector *iv;
102 struct workqueue_struct *iod_wq;
103 struct delayed_work work;
104 spinlock_t lock;
105 struct list_head list;
106 };
107
108 /*
109 * PCI Root Complex (RC) address data segment for mapping an admin or
110 * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
111 */
112 struct nvmet_pci_epf_segment {
113 void *buf;
114 u64 pci_addr;
115 u32 length;
116 };
117
118 /*
119 * Command descriptors.
120 */
121 struct nvmet_pci_epf_iod {
122 struct list_head link;
123
124 struct nvmet_req req;
125 struct nvme_command cmd;
126 struct nvme_completion cqe;
127 unsigned int status;
128
129 struct nvmet_pci_epf_ctrl *ctrl;
130
131 struct nvmet_pci_epf_queue *sq;
132 struct nvmet_pci_epf_queue *cq;
133
134 /* Data transfer size and direction for the command. */
135 size_t data_len;
136 enum dma_data_direction dma_dir;
137
138 /*
139 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
140 * use only @data_seg. Otherwise, the array of segments @data_segs is
141 * allocated to manage multiple PCI address data segments. @data_sgl and
142 * @data_sgt are used to setup the command request for execution by the
143 * target core.
144 */
145 unsigned int nr_data_segs;
146 struct nvmet_pci_epf_segment data_seg;
147 struct nvmet_pci_epf_segment *data_segs;
148 struct scatterlist data_sgl;
149 struct sg_table data_sgt;
150
151 struct work_struct work;
152 struct completion done;
153 };
154
155 /*
156 * PCI target controller private data.
157 */
158 struct nvmet_pci_epf_ctrl {
159 struct nvmet_pci_epf *nvme_epf;
160 struct nvmet_port *port;
161 struct nvmet_ctrl *tctrl;
162 struct device *dev;
163
164 unsigned int nr_queues;
165 struct nvmet_pci_epf_queue *sq;
166 struct nvmet_pci_epf_queue *cq;
167 unsigned int sq_ab;
168
169 mempool_t iod_pool;
170 void *bar;
171 u64 cap;
172 u32 cc;
173 u32 csts;
174
175 size_t io_sqes;
176 size_t io_cqes;
177
178 size_t mps_shift;
179 size_t mps;
180 size_t mps_mask;
181
182 unsigned int mdts;
183
184 struct delayed_work poll_cc;
185 struct delayed_work poll_sqs;
186
187 struct mutex irq_lock;
188 struct nvmet_pci_epf_irq_vector *irq_vectors;
189 unsigned int irq_vector_threshold;
190
191 bool link_up;
192 bool enabled;
193 };
194
195 /*
196 * PCI EPF driver private data.
197 */
198 struct nvmet_pci_epf {
199 struct pci_epf *epf;
200
201 const struct pci_epc_features *epc_features;
202
203 void *reg_bar;
204 size_t msix_table_offset;
205
206 unsigned int irq_type;
207 unsigned int nr_vectors;
208
209 struct nvmet_pci_epf_ctrl ctrl;
210
211 bool dma_enabled;
212 struct dma_chan *dma_tx_chan;
213 struct mutex dma_tx_lock;
214 struct dma_chan *dma_rx_chan;
215 struct mutex dma_rx_lock;
216
217 struct mutex mmio_lock;
218
219 /* PCI endpoint function configfs attributes. */
220 struct config_group group;
221 __le16 portid;
222 char subsysnqn[NVMF_NQN_SIZE];
223 unsigned int mdts_kb;
224 };
225
nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl * ctrl,u32 off)226 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
227 u32 off)
228 {
229 __le32 *bar_reg = ctrl->bar + off;
230
231 return le32_to_cpu(READ_ONCE(*bar_reg));
232 }
233
nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl * ctrl,u32 off,u32 val)234 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
235 u32 off, u32 val)
236 {
237 __le32 *bar_reg = ctrl->bar + off;
238
239 WRITE_ONCE(*bar_reg, cpu_to_le32(val));
240 }
241
nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl * ctrl,u32 off)242 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
243 u32 off)
244 {
245 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
246 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
247 }
248
nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl * ctrl,u32 off,u64 val)249 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
250 u32 off, u64 val)
251 {
252 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
253 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
254 }
255
nvmet_pci_epf_mem_map(struct nvmet_pci_epf * nvme_epf,u64 pci_addr,size_t size,struct pci_epc_map * map)256 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
257 u64 pci_addr, size_t size, struct pci_epc_map *map)
258 {
259 struct pci_epf *epf = nvme_epf->epf;
260
261 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
262 pci_addr, size, map);
263 }
264
nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf * nvme_epf,struct pci_epc_map * map)265 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
266 struct pci_epc_map *map)
267 {
268 struct pci_epf *epf = nvme_epf->epf;
269
270 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
271 }
272
273 struct nvmet_pci_epf_dma_filter {
274 struct device *dev;
275 u32 dma_mask;
276 };
277
nvmet_pci_epf_dma_filter(struct dma_chan * chan,void * arg)278 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
279 {
280 struct nvmet_pci_epf_dma_filter *filter = arg;
281 struct dma_slave_caps caps;
282
283 memset(&caps, 0, sizeof(caps));
284 dma_get_slave_caps(chan, &caps);
285
286 return chan->device->dev == filter->dev &&
287 (filter->dma_mask & caps.directions);
288 }
289
nvmet_pci_epf_init_dma(struct nvmet_pci_epf * nvme_epf)290 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
291 {
292 struct pci_epf *epf = nvme_epf->epf;
293 struct device *dev = &epf->dev;
294 struct nvmet_pci_epf_dma_filter filter;
295 struct dma_chan *chan;
296 dma_cap_mask_t mask;
297
298 mutex_init(&nvme_epf->dma_rx_lock);
299 mutex_init(&nvme_epf->dma_tx_lock);
300
301 dma_cap_zero(mask);
302 dma_cap_set(DMA_SLAVE, mask);
303
304 filter.dev = epf->epc->dev.parent;
305 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
306
307 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
308 if (!chan)
309 goto out_dma_no_rx;
310
311 nvme_epf->dma_rx_chan = chan;
312
313 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
314 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
315 if (!chan)
316 goto out_dma_no_tx;
317
318 nvme_epf->dma_tx_chan = chan;
319
320 nvme_epf->dma_enabled = true;
321
322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
323 dma_chan_name(nvme_epf->dma_rx_chan),
324 dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
325 dma_rx_chan)));
326
327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
328 dma_chan_name(nvme_epf->dma_tx_chan),
329 dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
330 dma_tx_chan)));
331
332 return;
333
334 out_dma_no_tx:
335 dma_release_channel(nvme_epf->dma_rx_chan);
336 nvme_epf->dma_rx_chan = NULL;
337
338 out_dma_no_rx:
339 mutex_destroy(&nvme_epf->dma_rx_lock);
340 mutex_destroy(&nvme_epf->dma_tx_lock);
341 nvme_epf->dma_enabled = false;
342
343 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
344 }
345
nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf * nvme_epf)346 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
347 {
348 if (!nvme_epf->dma_enabled)
349 return;
350
351 dma_release_channel(nvme_epf->dma_tx_chan);
352 nvme_epf->dma_tx_chan = NULL;
353 dma_release_channel(nvme_epf->dma_rx_chan);
354 nvme_epf->dma_rx_chan = NULL;
355 mutex_destroy(&nvme_epf->dma_rx_lock);
356 mutex_destroy(&nvme_epf->dma_tx_lock);
357 nvme_epf->dma_enabled = false;
358 }
359
nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)360 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
361 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
362 {
363 struct pci_epf *epf = nvme_epf->epf;
364 struct dma_async_tx_descriptor *desc;
365 struct dma_slave_config sconf = {};
366 struct device *dev = &epf->dev;
367 struct device *dma_dev;
368 struct dma_chan *chan;
369 dma_cookie_t cookie;
370 dma_addr_t dma_addr;
371 struct mutex *lock;
372 int ret;
373
374 switch (dir) {
375 case DMA_FROM_DEVICE:
376 lock = &nvme_epf->dma_rx_lock;
377 chan = nvme_epf->dma_rx_chan;
378 sconf.direction = DMA_DEV_TO_MEM;
379 sconf.src_addr = seg->pci_addr;
380 break;
381 case DMA_TO_DEVICE:
382 lock = &nvme_epf->dma_tx_lock;
383 chan = nvme_epf->dma_tx_chan;
384 sconf.direction = DMA_MEM_TO_DEV;
385 sconf.dst_addr = seg->pci_addr;
386 break;
387 default:
388 return -EINVAL;
389 }
390
391 mutex_lock(lock);
392
393 dma_dev = dmaengine_get_dma_device(chan);
394 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
395 ret = dma_mapping_error(dma_dev, dma_addr);
396 if (ret)
397 goto unlock;
398
399 ret = dmaengine_slave_config(chan, &sconf);
400 if (ret) {
401 dev_err(dev, "Failed to configure DMA channel\n");
402 goto unmap;
403 }
404
405 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
406 sconf.direction, DMA_CTRL_ACK);
407 if (!desc) {
408 dev_err(dev, "Failed to prepare DMA\n");
409 ret = -EIO;
410 goto unmap;
411 }
412
413 cookie = dmaengine_submit(desc);
414 ret = dma_submit_error(cookie);
415 if (ret) {
416 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
417 goto unmap;
418 }
419
420 if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
421 dev_err(dev, "DMA transfer failed\n");
422 ret = -EIO;
423 }
424
425 dmaengine_terminate_sync(chan);
426
427 unmap:
428 dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
429
430 unlock:
431 mutex_unlock(lock);
432
433 return ret;
434 }
435
nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)436 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
437 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
438 {
439 u64 pci_addr = seg->pci_addr;
440 u32 length = seg->length;
441 void *buf = seg->buf;
442 struct pci_epc_map map;
443 int ret = -EINVAL;
444
445 /*
446 * Note: MMIO transfers do not need serialization but this is a
447 * simple way to avoid using too many mapping windows.
448 */
449 mutex_lock(&nvme_epf->mmio_lock);
450
451 while (length) {
452 ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
453 if (ret)
454 break;
455
456 switch (dir) {
457 case DMA_FROM_DEVICE:
458 memcpy_fromio(buf, map.virt_addr, map.pci_size);
459 break;
460 case DMA_TO_DEVICE:
461 memcpy_toio(map.virt_addr, buf, map.pci_size);
462 break;
463 default:
464 ret = -EINVAL;
465 goto unlock;
466 }
467
468 pci_addr += map.pci_size;
469 buf += map.pci_size;
470 length -= map.pci_size;
471
472 nvmet_pci_epf_mem_unmap(nvme_epf, &map);
473 }
474
475 unlock:
476 mutex_unlock(&nvme_epf->mmio_lock);
477
478 return ret;
479 }
480
nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)481 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
482 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
483 {
484 if (nvme_epf->dma_enabled)
485 return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
486
487 return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
488 }
489
nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl * ctrl,void * buf,u64 pci_addr,u32 length,enum dma_data_direction dir)490 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
491 void *buf, u64 pci_addr, u32 length,
492 enum dma_data_direction dir)
493 {
494 struct nvmet_pci_epf_segment seg = {
495 .buf = buf,
496 .pci_addr = pci_addr,
497 .length = length,
498 };
499
500 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
501 }
502
nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl * ctrl)503 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
504 {
505 ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
506 sizeof(struct nvmet_pci_epf_irq_vector),
507 GFP_KERNEL);
508 if (!ctrl->irq_vectors)
509 return -ENOMEM;
510
511 mutex_init(&ctrl->irq_lock);
512
513 return 0;
514 }
515
nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl * ctrl)516 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
517 {
518 if (ctrl->irq_vectors) {
519 mutex_destroy(&ctrl->irq_lock);
520 kfree(ctrl->irq_vectors);
521 ctrl->irq_vectors = NULL;
522 }
523 }
524
525 static struct nvmet_pci_epf_irq_vector *
nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)526 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
527 {
528 struct nvmet_pci_epf_irq_vector *iv;
529 int i;
530
531 lockdep_assert_held(&ctrl->irq_lock);
532
533 for (i = 0; i < ctrl->nr_queues; i++) {
534 iv = &ctrl->irq_vectors[i];
535 if (iv->ref && iv->vector == vector)
536 return iv;
537 }
538
539 return NULL;
540 }
541
542 static struct nvmet_pci_epf_irq_vector *
nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)543 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
544 {
545 struct nvmet_pci_epf_irq_vector *iv;
546 int i;
547
548 mutex_lock(&ctrl->irq_lock);
549
550 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
551 if (iv) {
552 iv->ref++;
553 goto unlock;
554 }
555
556 for (i = 0; i < ctrl->nr_queues; i++) {
557 iv = &ctrl->irq_vectors[i];
558 if (!iv->ref)
559 break;
560 }
561
562 if (WARN_ON_ONCE(!iv))
563 goto unlock;
564
565 iv->ref = 1;
566 iv->vector = vector;
567 iv->nr_irqs = 0;
568
569 unlock:
570 mutex_unlock(&ctrl->irq_lock);
571
572 return iv;
573 }
574
nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)575 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
576 u16 vector)
577 {
578 struct nvmet_pci_epf_irq_vector *iv;
579
580 mutex_lock(&ctrl->irq_lock);
581
582 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
583 if (iv) {
584 iv->ref--;
585 if (!iv->ref) {
586 iv->vector = 0;
587 iv->nr_irqs = 0;
588 }
589 }
590
591 mutex_unlock(&ctrl->irq_lock);
592 }
593
nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * cq,bool force)594 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
595 struct nvmet_pci_epf_queue *cq, bool force)
596 {
597 struct nvmet_pci_epf_irq_vector *iv = cq->iv;
598 bool ret;
599
600 /* IRQ coalescing for the admin queue is not allowed. */
601 if (!cq->qid)
602 return true;
603
604 if (iv->cd)
605 return true;
606
607 if (force) {
608 ret = iv->nr_irqs > 0;
609 } else {
610 iv->nr_irqs++;
611 ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
612 }
613 if (ret)
614 iv->nr_irqs = 0;
615
616 return ret;
617 }
618
nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * cq,bool force)619 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
620 struct nvmet_pci_epf_queue *cq, bool force)
621 {
622 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
623 struct pci_epf *epf = nvme_epf->epf;
624 int ret = 0;
625
626 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
627 !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
628 return;
629
630 mutex_lock(&ctrl->irq_lock);
631
632 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
633 goto unlock;
634
635 switch (nvme_epf->irq_type) {
636 case PCI_IRQ_MSIX:
637 case PCI_IRQ_MSI:
638 /*
639 * If we fail to raise an MSI or MSI-X interrupt, it is likely
640 * because the host is using legacy INTX IRQs (e.g. BIOS,
641 * grub), but we can fallback to the INTX type only if the
642 * endpoint controller supports this type.
643 */
644 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
645 nvme_epf->irq_type, cq->vector + 1);
646 if (!ret || !nvme_epf->epc_features->intx_capable)
647 break;
648 fallthrough;
649 case PCI_IRQ_INTX:
650 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
651 PCI_IRQ_INTX, 0);
652 break;
653 default:
654 WARN_ON_ONCE(1);
655 ret = -EINVAL;
656 break;
657 }
658
659 if (ret)
660 dev_err_ratelimited(ctrl->dev,
661 "CQ[%u]: Failed to raise IRQ (err=%d)\n",
662 cq->qid, ret);
663
664 unlock:
665 mutex_unlock(&ctrl->irq_lock);
666 }
667
nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod * iod)668 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
669 {
670 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
671 }
672
673 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
674
675 static struct nvmet_pci_epf_iod *
nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue * sq)676 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
677 {
678 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
679 struct nvmet_pci_epf_iod *iod;
680
681 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
682 if (unlikely(!iod))
683 return NULL;
684
685 memset(iod, 0, sizeof(*iod));
686 iod->req.cmd = &iod->cmd;
687 iod->req.cqe = &iod->cqe;
688 iod->req.port = ctrl->port;
689 iod->ctrl = ctrl;
690 iod->sq = sq;
691 iod->cq = &ctrl->cq[sq->qid];
692 INIT_LIST_HEAD(&iod->link);
693 iod->dma_dir = DMA_NONE;
694 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
695 init_completion(&iod->done);
696
697 return iod;
698 }
699
700 /*
701 * Allocate or grow a command table of PCI segments.
702 */
nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod * iod,int nsegs)703 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
704 int nsegs)
705 {
706 struct nvmet_pci_epf_segment *segs;
707 int nr_segs = iod->nr_data_segs + nsegs;
708
709 segs = krealloc(iod->data_segs,
710 nr_segs * sizeof(struct nvmet_pci_epf_segment),
711 GFP_KERNEL | __GFP_ZERO);
712 if (!segs)
713 return -ENOMEM;
714
715 iod->nr_data_segs = nr_segs;
716 iod->data_segs = segs;
717
718 return 0;
719 }
720
nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod * iod)721 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
722 {
723 int i;
724
725 if (iod->data_segs) {
726 for (i = 0; i < iod->nr_data_segs; i++)
727 kfree(iod->data_segs[i].buf);
728 if (iod->data_segs != &iod->data_seg)
729 kfree(iod->data_segs);
730 }
731 if (iod->data_sgt.nents > 1)
732 sg_free_table(&iod->data_sgt);
733 mempool_free(iod, &iod->ctrl->iod_pool);
734 }
735
nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod * iod)736 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
737 {
738 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
739 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
740 int i, ret;
741
742 /* Split the data transfer according to the PCI segments. */
743 for (i = 0; i < iod->nr_data_segs; i++, seg++) {
744 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
745 if (ret) {
746 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
747 return ret;
748 }
749 }
750
751 return 0;
752 }
753
nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl * ctrl,u64 prp)754 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
755 u64 prp)
756 {
757 return prp & ctrl->mps_mask;
758 }
759
nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl * ctrl,u64 prp)760 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
761 u64 prp)
762 {
763 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
764 }
765
766 /*
767 * Transfer a PRP list from the host and return the number of prps.
768 */
nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl * ctrl,u64 prp,size_t xfer_len,__le64 * prps)769 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
770 size_t xfer_len, __le64 *prps)
771 {
772 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
773 u32 length;
774 int ret;
775
776 /*
777 * Compute the number of PRPs required for the number of bytes to
778 * transfer (xfer_len). If this number overflows the memory page size
779 * with the PRP list pointer specified, only return the space available
780 * in the memory page, the last PRP in there will be a PRP list pointer
781 * to the remaining PRPs.
782 */
783 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
784 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
785 if (ret)
786 return ret;
787
788 return length >> 3;
789 }
790
nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)791 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
792 struct nvmet_pci_epf_iod *iod)
793 {
794 struct nvme_command *cmd = &iod->cmd;
795 struct nvmet_pci_epf_segment *seg;
796 size_t size = 0, ofst, prp_size, xfer_len;
797 size_t transfer_len = iod->data_len;
798 int nr_segs, nr_prps = 0;
799 u64 pci_addr, prp;
800 int i = 0, ret;
801 __le64 *prps;
802
803 prps = kzalloc(ctrl->mps, GFP_KERNEL);
804 if (!prps)
805 goto err_internal;
806
807 /*
808 * Allocate PCI segments for the command: this considers the worst case
809 * scenario where all prps are discontiguous, so get as many segments
810 * as we can have prps. In practice, most of the time, we will have
811 * far less PCI segments than prps.
812 */
813 prp = le64_to_cpu(cmd->common.dptr.prp1);
814 if (!prp)
815 goto err_invalid_field;
816
817 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
818 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
819
820 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
821 if (ret)
822 goto err_internal;
823
824 /* Set the first segment using prp1. */
825 seg = &iod->data_segs[0];
826 seg->pci_addr = prp;
827 seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
828
829 size = seg->length;
830 pci_addr = prp + size;
831 nr_segs = 1;
832
833 /*
834 * Now build the PCI address segments using the PRP lists, starting
835 * from prp2.
836 */
837 prp = le64_to_cpu(cmd->common.dptr.prp2);
838 if (!prp)
839 goto err_invalid_field;
840
841 while (size < transfer_len) {
842 xfer_len = transfer_len - size;
843
844 if (!nr_prps) {
845 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
846 xfer_len, prps);
847 if (nr_prps < 0)
848 goto err_internal;
849
850 i = 0;
851 ofst = 0;
852 }
853
854 /* Current entry */
855 prp = le64_to_cpu(prps[i]);
856 if (!prp)
857 goto err_invalid_field;
858
859 /* Did we reach the last PRP entry of the list? */
860 if (xfer_len > ctrl->mps && i == nr_prps - 1) {
861 /* We need more PRPs: PRP is a list pointer. */
862 nr_prps = 0;
863 continue;
864 }
865
866 /* Only the first PRP is allowed to have an offset. */
867 if (nvmet_pci_epf_prp_ofst(ctrl, prp))
868 goto err_invalid_offset;
869
870 if (prp != pci_addr) {
871 /* Discontiguous prp: new segment. */
872 nr_segs++;
873 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
874 goto err_internal;
875
876 seg++;
877 seg->pci_addr = prp;
878 seg->length = 0;
879 pci_addr = prp;
880 }
881
882 prp_size = min_t(size_t, ctrl->mps, xfer_len);
883 seg->length += prp_size;
884 pci_addr += prp_size;
885 size += prp_size;
886
887 i++;
888 }
889
890 iod->nr_data_segs = nr_segs;
891 ret = 0;
892
893 if (size != transfer_len) {
894 dev_err(ctrl->dev,
895 "PRPs transfer length mismatch: got %zu B, need %zu B\n",
896 size, transfer_len);
897 goto err_internal;
898 }
899
900 kfree(prps);
901
902 return 0;
903
904 err_invalid_offset:
905 dev_err(ctrl->dev, "PRPs list invalid offset\n");
906 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
907 goto err;
908
909 err_invalid_field:
910 dev_err(ctrl->dev, "PRPs list invalid field\n");
911 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
912 goto err;
913
914 err_internal:
915 dev_err(ctrl->dev, "PRPs list internal error\n");
916 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
917
918 err:
919 kfree(prps);
920 return -EINVAL;
921 }
922
nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)923 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
924 struct nvmet_pci_epf_iod *iod)
925 {
926 struct nvme_command *cmd = &iod->cmd;
927 size_t transfer_len = iod->data_len;
928 int ret, nr_segs = 1;
929 u64 prp1, prp2 = 0;
930 size_t prp1_size;
931
932 prp1 = le64_to_cpu(cmd->common.dptr.prp1);
933 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
934
935 /* For commands crossing a page boundary, we should have prp2. */
936 if (transfer_len > prp1_size) {
937 prp2 = le64_to_cpu(cmd->common.dptr.prp2);
938 if (!prp2) {
939 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
940 return -EINVAL;
941 }
942 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
943 iod->status =
944 NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
945 return -EINVAL;
946 }
947 if (prp2 != prp1 + prp1_size)
948 nr_segs = 2;
949 }
950
951 if (nr_segs == 1) {
952 iod->nr_data_segs = 1;
953 iod->data_segs = &iod->data_seg;
954 iod->data_segs[0].pci_addr = prp1;
955 iod->data_segs[0].length = transfer_len;
956 return 0;
957 }
958
959 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
960 if (ret) {
961 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
962 return ret;
963 }
964
965 iod->data_segs[0].pci_addr = prp1;
966 iod->data_segs[0].length = prp1_size;
967 iod->data_segs[1].pci_addr = prp2;
968 iod->data_segs[1].length = transfer_len - prp1_size;
969
970 return 0;
971 }
972
nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod * iod)973 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
974 {
975 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
976 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
977 size_t ofst;
978
979 /* Get the PCI address segments for the command using its PRPs. */
980 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
981 if (ofst & 0x3) {
982 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
983 return -EINVAL;
984 }
985
986 if (iod->data_len + ofst <= ctrl->mps * 2)
987 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
988
989 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
990 }
991
992 /*
993 * Transfer an SGL segment from the host and return the number of data
994 * descriptors and the next segment descriptor, if any.
995 */
996 static struct nvme_sgl_desc *
nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl * ctrl,struct nvme_sgl_desc * desc,unsigned int * nr_sgls)997 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
998 struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
999 {
1000 struct nvme_sgl_desc *sgls;
1001 u32 length = le32_to_cpu(desc->length);
1002 int nr_descs, ret;
1003 void *buf;
1004
1005 buf = kmalloc(length, GFP_KERNEL);
1006 if (!buf)
1007 return NULL;
1008
1009 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
1010 DMA_FROM_DEVICE);
1011 if (ret) {
1012 kfree(buf);
1013 return NULL;
1014 }
1015
1016 sgls = buf;
1017 nr_descs = length / sizeof(struct nvme_sgl_desc);
1018 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
1019 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1020 /*
1021 * We have another SGL segment following this one: do not count
1022 * it as a regular data SGL descriptor and return it to the
1023 * caller.
1024 */
1025 *desc = sgls[nr_descs - 1];
1026 nr_descs--;
1027 } else {
1028 /* We do not have another SGL segment after this one. */
1029 desc->length = 0;
1030 }
1031
1032 *nr_sgls = nr_descs;
1033
1034 return sgls;
1035 }
1036
nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)1037 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
1038 struct nvmet_pci_epf_iod *iod)
1039 {
1040 struct nvme_command *cmd = &iod->cmd;
1041 struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
1042 struct nvme_sgl_desc *sgls = NULL;
1043 int n = 0, i, nr_sgls;
1044 int ret;
1045
1046 /*
1047 * We do not support inline data nor keyed SGLs, so we should be seeing
1048 * only segment descriptors.
1049 */
1050 if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
1051 seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1052 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
1053 return -EIO;
1054 }
1055
1056 while (seg.length) {
1057 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
1058 if (!sgls) {
1059 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1060 return -EIO;
1061 }
1062
1063 /* Grow the PCI segment table as needed. */
1064 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
1065 if (ret) {
1066 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1067 goto out;
1068 }
1069
1070 /*
1071 * Parse the SGL descriptors to build the PCI segment table,
1072 * checking the descriptor type as we go.
1073 */
1074 for (i = 0; i < nr_sgls; i++) {
1075 if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
1076 iod->status = NVME_SC_SGL_INVALID_TYPE |
1077 NVME_STATUS_DNR;
1078 goto out;
1079 }
1080 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
1081 iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
1082 n++;
1083 }
1084
1085 kfree(sgls);
1086 }
1087
1088 out:
1089 if (iod->status != NVME_SC_SUCCESS) {
1090 kfree(sgls);
1091 return -EIO;
1092 }
1093
1094 return 0;
1095 }
1096
nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod * iod)1097 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
1098 {
1099 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1100 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
1101
1102 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
1103 /* Single data descriptor case. */
1104 iod->nr_data_segs = 1;
1105 iod->data_segs = &iod->data_seg;
1106 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
1107 iod->data_seg.length = le32_to_cpu(sgl->length);
1108 return 0;
1109 }
1110
1111 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
1112 }
1113
nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod * iod)1114 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
1115 {
1116 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1117 struct nvmet_req *req = &iod->req;
1118 struct nvmet_pci_epf_segment *seg;
1119 struct scatterlist *sg;
1120 int ret, i;
1121
1122 if (iod->data_len > ctrl->mdts) {
1123 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1124 return -EINVAL;
1125 }
1126
1127 /*
1128 * Get the PCI address segments for the command data buffer using either
1129 * its SGLs or PRPs.
1130 */
1131 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
1132 ret = nvmet_pci_epf_iod_parse_sgls(iod);
1133 else
1134 ret = nvmet_pci_epf_iod_parse_prps(iod);
1135 if (ret)
1136 return ret;
1137
1138 /* Get a command buffer using SGLs matching the PCI segments. */
1139 if (iod->nr_data_segs == 1) {
1140 sg_init_table(&iod->data_sgl, 1);
1141 iod->data_sgt.sgl = &iod->data_sgl;
1142 iod->data_sgt.nents = 1;
1143 iod->data_sgt.orig_nents = 1;
1144 } else {
1145 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
1146 GFP_KERNEL);
1147 if (ret)
1148 goto err_nomem;
1149 }
1150
1151 for_each_sgtable_sg(&iod->data_sgt, sg, i) {
1152 seg = &iod->data_segs[i];
1153 seg->buf = kmalloc(seg->length, GFP_KERNEL);
1154 if (!seg->buf)
1155 goto err_nomem;
1156 sg_set_buf(sg, seg->buf, seg->length);
1157 }
1158
1159 req->transfer_len = iod->data_len;
1160 req->sg = iod->data_sgt.sgl;
1161 req->sg_cnt = iod->data_sgt.nents;
1162
1163 return 0;
1164
1165 err_nomem:
1166 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1167 return -ENOMEM;
1168 }
1169
nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod * iod)1170 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
1171 {
1172 struct nvmet_pci_epf_queue *cq = iod->cq;
1173 unsigned long flags;
1174
1175 /* Print an error message for failed commands, except AENs. */
1176 iod->status = le16_to_cpu(iod->cqe.status) >> 1;
1177 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
1178 dev_err(iod->ctrl->dev,
1179 "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
1180 iod->sq->qid, nvmet_pci_epf_iod_name(iod),
1181 iod->cmd.common.opcode, iod->status);
1182
1183 /*
1184 * Add the command to the list of completed commands and schedule the
1185 * CQ work.
1186 */
1187 spin_lock_irqsave(&cq->lock, flags);
1188 list_add_tail(&iod->link, &cq->list);
1189 queue_delayed_work(system_highpri_wq, &cq->work, 0);
1190 spin_unlock_irqrestore(&cq->lock, flags);
1191 }
1192
nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue * queue)1193 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
1194 {
1195 struct nvmet_pci_epf_iod *iod;
1196 unsigned long flags;
1197
1198 spin_lock_irqsave(&queue->lock, flags);
1199 while (!list_empty(&queue->list)) {
1200 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
1201 link);
1202 list_del_init(&iod->link);
1203 nvmet_pci_epf_free_iod(iod);
1204 }
1205 spin_unlock_irqrestore(&queue->lock, flags);
1206 }
1207
nvmet_pci_epf_add_port(struct nvmet_port * port)1208 static int nvmet_pci_epf_add_port(struct nvmet_port *port)
1209 {
1210 mutex_lock(&nvmet_pci_epf_ports_mutex);
1211 list_add_tail(&port->entry, &nvmet_pci_epf_ports);
1212 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1213 return 0;
1214 }
1215
nvmet_pci_epf_remove_port(struct nvmet_port * port)1216 static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
1217 {
1218 mutex_lock(&nvmet_pci_epf_ports_mutex);
1219 list_del_init(&port->entry);
1220 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1221 }
1222
1223 static struct nvmet_port *
nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl * ctrl,__le16 portid)1224 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
1225 {
1226 struct nvmet_port *p, *port = NULL;
1227
1228 mutex_lock(&nvmet_pci_epf_ports_mutex);
1229 list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
1230 if (p->disc_addr.portid == portid) {
1231 port = p;
1232 break;
1233 }
1234 }
1235 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1236
1237 return port;
1238 }
1239
nvmet_pci_epf_queue_response(struct nvmet_req * req)1240 static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
1241 {
1242 struct nvmet_pci_epf_iod *iod =
1243 container_of(req, struct nvmet_pci_epf_iod, req);
1244
1245 iod->status = le16_to_cpu(req->cqe->status) >> 1;
1246
1247 /*
1248 * If the command failed or we have no data to transfer, complete the
1249 * command immediately.
1250 */
1251 if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
1252 nvmet_pci_epf_complete_iod(iod);
1253 return;
1254 }
1255
1256 complete(&iod->done);
1257 }
1258
nvmet_pci_epf_get_mdts(const struct nvmet_ctrl * tctrl)1259 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
1260 {
1261 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1262 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
1263
1264 return ilog2(ctrl->mdts) - page_shift;
1265 }
1266
nvmet_pci_epf_create_cq(struct nvmet_ctrl * tctrl,u16 cqid,u16 flags,u16 qsize,u64 pci_addr,u16 vector)1267 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
1268 u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
1269 {
1270 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1271 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1272 u16 status;
1273 int ret;
1274
1275 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1276 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1277
1278 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1279 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1280
1281 cq->pci_addr = pci_addr;
1282 cq->qid = cqid;
1283 cq->depth = qsize + 1;
1284 cq->vector = vector;
1285 cq->head = 0;
1286 cq->tail = 0;
1287 cq->phase = 1;
1288 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
1289 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
1290
1291 if (!cqid)
1292 cq->qes = sizeof(struct nvme_completion);
1293 else
1294 cq->qes = ctrl->io_cqes;
1295 cq->pci_size = cq->qes * cq->depth;
1296
1297 if (flags & NVME_CQ_IRQ_ENABLED) {
1298 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
1299 if (!cq->iv)
1300 return NVME_SC_INTERNAL | NVME_STATUS_DNR;
1301 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
1302 }
1303
1304 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
1305 if (status != NVME_SC_SUCCESS)
1306 goto err;
1307
1308 /*
1309 * Map the CQ PCI address space and since PCI endpoint controllers may
1310 * return a partial mapping, check that the mapping is large enough.
1311 */
1312 ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
1313 &cq->pci_map);
1314 if (ret) {
1315 dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
1316 cq->qid, ret);
1317 goto err_internal;
1318 }
1319
1320 if (cq->pci_map.pci_size < cq->pci_size) {
1321 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
1322 cq->qid);
1323 goto err_unmap_queue;
1324 }
1325
1326 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
1327
1328 if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1329 dev_dbg(ctrl->dev,
1330 "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1331 cqid, qsize, cq->qes, cq->vector);
1332 else
1333 dev_dbg(ctrl->dev,
1334 "CQ[%u]: %u entries of %zu B, IRQ disabled\n",
1335 cqid, qsize, cq->qes);
1336
1337 return NVME_SC_SUCCESS;
1338
1339 err_unmap_queue:
1340 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1341 err_internal:
1342 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1343 err:
1344 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1345 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1346 return status;
1347 }
1348
nvmet_pci_epf_delete_cq(struct nvmet_ctrl * tctrl,u16 cqid)1349 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
1350 {
1351 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1352 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1353
1354 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1355 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1356
1357 cancel_delayed_work_sync(&cq->work);
1358 nvmet_pci_epf_drain_queue(cq);
1359 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1360 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1361 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1362 nvmet_cq_put(&cq->nvme_cq);
1363
1364 return NVME_SC_SUCCESS;
1365 }
1366
nvmet_pci_epf_create_sq(struct nvmet_ctrl * tctrl,u16 sqid,u16 cqid,u16 flags,u16 qsize,u64 pci_addr)1367 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
1368 u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr)
1369 {
1370 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1371 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1372 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1373 u16 status;
1374
1375 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1376 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1377
1378 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1379 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1380
1381 sq->pci_addr = pci_addr;
1382 sq->qid = sqid;
1383 sq->depth = qsize + 1;
1384 sq->head = 0;
1385 sq->tail = 0;
1386 sq->phase = 0;
1387 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
1388 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
1389 if (!sqid)
1390 sq->qes = 1UL << NVME_ADM_SQES;
1391 else
1392 sq->qes = ctrl->io_sqes;
1393 sq->pci_size = sq->qes * sq->depth;
1394
1395 status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid,
1396 sq->depth);
1397 if (status != NVME_SC_SUCCESS)
1398 return status;
1399
1400 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
1401 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
1402 if (!sq->iod_wq) {
1403 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
1404 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1405 goto out_destroy_sq;
1406 }
1407
1408 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
1409
1410 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
1411 sqid, qsize, sq->qes);
1412
1413 return NVME_SC_SUCCESS;
1414
1415 out_destroy_sq:
1416 nvmet_sq_destroy(&sq->nvme_sq);
1417 return status;
1418 }
1419
nvmet_pci_epf_delete_sq(struct nvmet_ctrl * tctrl,u16 sqid)1420 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
1421 {
1422 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1423 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1424
1425 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1426 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1427
1428 destroy_workqueue(sq->iod_wq);
1429 sq->iod_wq = NULL;
1430
1431 nvmet_pci_epf_drain_queue(sq);
1432
1433 if (sq->nvme_sq.ctrl)
1434 nvmet_sq_destroy(&sq->nvme_sq);
1435
1436 return NVME_SC_SUCCESS;
1437 }
1438
nvmet_pci_epf_get_feat(const struct nvmet_ctrl * tctrl,u8 feat,void * data)1439 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
1440 u8 feat, void *data)
1441 {
1442 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1443 struct nvmet_feat_arbitration *arb;
1444 struct nvmet_feat_irq_coalesce *irqc;
1445 struct nvmet_feat_irq_config *irqcfg;
1446 struct nvmet_pci_epf_irq_vector *iv;
1447 u16 status;
1448
1449 switch (feat) {
1450 case NVME_FEAT_ARBITRATION:
1451 arb = data;
1452 if (!ctrl->sq_ab)
1453 arb->ab = 0x7;
1454 else
1455 arb->ab = ilog2(ctrl->sq_ab);
1456 return NVME_SC_SUCCESS;
1457
1458 case NVME_FEAT_IRQ_COALESCE:
1459 irqc = data;
1460 irqc->thr = ctrl->irq_vector_threshold;
1461 irqc->time = 0;
1462 return NVME_SC_SUCCESS;
1463
1464 case NVME_FEAT_IRQ_CONFIG:
1465 irqcfg = data;
1466 mutex_lock(&ctrl->irq_lock);
1467 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1468 if (iv) {
1469 irqcfg->cd = iv->cd;
1470 status = NVME_SC_SUCCESS;
1471 } else {
1472 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1473 }
1474 mutex_unlock(&ctrl->irq_lock);
1475 return status;
1476
1477 default:
1478 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1479 }
1480 }
1481
nvmet_pci_epf_set_feat(const struct nvmet_ctrl * tctrl,u8 feat,void * data)1482 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
1483 u8 feat, void *data)
1484 {
1485 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1486 struct nvmet_feat_arbitration *arb;
1487 struct nvmet_feat_irq_coalesce *irqc;
1488 struct nvmet_feat_irq_config *irqcfg;
1489 struct nvmet_pci_epf_irq_vector *iv;
1490 u16 status;
1491
1492 switch (feat) {
1493 case NVME_FEAT_ARBITRATION:
1494 arb = data;
1495 if (arb->ab == 0x7)
1496 ctrl->sq_ab = 0;
1497 else
1498 ctrl->sq_ab = 1 << arb->ab;
1499 return NVME_SC_SUCCESS;
1500
1501 case NVME_FEAT_IRQ_COALESCE:
1502 /*
1503 * Since we do not implement precise IRQ coalescing timing,
1504 * ignore the time field.
1505 */
1506 irqc = data;
1507 ctrl->irq_vector_threshold = irqc->thr + 1;
1508 return NVME_SC_SUCCESS;
1509
1510 case NVME_FEAT_IRQ_CONFIG:
1511 irqcfg = data;
1512 mutex_lock(&ctrl->irq_lock);
1513 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1514 if (iv) {
1515 iv->cd = irqcfg->cd;
1516 status = NVME_SC_SUCCESS;
1517 } else {
1518 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1519 }
1520 mutex_unlock(&ctrl->irq_lock);
1521 return status;
1522
1523 default:
1524 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1525 }
1526 }
1527
1528 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
1529 .owner = THIS_MODULE,
1530 .type = NVMF_TRTYPE_PCI,
1531 .add_port = nvmet_pci_epf_add_port,
1532 .remove_port = nvmet_pci_epf_remove_port,
1533 .queue_response = nvmet_pci_epf_queue_response,
1534 .get_mdts = nvmet_pci_epf_get_mdts,
1535 .create_cq = nvmet_pci_epf_create_cq,
1536 .delete_cq = nvmet_pci_epf_delete_cq,
1537 .create_sq = nvmet_pci_epf_create_sq,
1538 .delete_sq = nvmet_pci_epf_delete_sq,
1539 .get_feature = nvmet_pci_epf_get_feat,
1540 .set_feature = nvmet_pci_epf_set_feat,
1541 };
1542
1543 static void nvmet_pci_epf_cq_work(struct work_struct *work);
1544
nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl * ctrl,unsigned int qid,bool sq)1545 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
1546 unsigned int qid, bool sq)
1547 {
1548 struct nvmet_pci_epf_queue *queue;
1549
1550 if (sq) {
1551 queue = &ctrl->sq[qid];
1552 } else {
1553 queue = &ctrl->cq[qid];
1554 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
1555 }
1556 queue->ctrl = ctrl;
1557 queue->qid = qid;
1558 spin_lock_init(&queue->lock);
1559 INIT_LIST_HEAD(&queue->list);
1560 }
1561
nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl * ctrl)1562 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
1563 {
1564 unsigned int qid;
1565
1566 ctrl->sq = kcalloc(ctrl->nr_queues,
1567 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1568 if (!ctrl->sq)
1569 return -ENOMEM;
1570
1571 ctrl->cq = kcalloc(ctrl->nr_queues,
1572 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1573 if (!ctrl->cq) {
1574 kfree(ctrl->sq);
1575 ctrl->sq = NULL;
1576 return -ENOMEM;
1577 }
1578
1579 for (qid = 0; qid < ctrl->nr_queues; qid++) {
1580 nvmet_pci_epf_init_queue(ctrl, qid, true);
1581 nvmet_pci_epf_init_queue(ctrl, qid, false);
1582 }
1583
1584 return 0;
1585 }
1586
nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl * ctrl)1587 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
1588 {
1589 kfree(ctrl->sq);
1590 ctrl->sq = NULL;
1591 kfree(ctrl->cq);
1592 ctrl->cq = NULL;
1593 }
1594
nvmet_pci_epf_exec_iod_work(struct work_struct * work)1595 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
1596 {
1597 struct nvmet_pci_epf_iod *iod =
1598 container_of(work, struct nvmet_pci_epf_iod, work);
1599 struct nvmet_req *req = &iod->req;
1600 int ret;
1601
1602 if (!iod->ctrl->link_up) {
1603 nvmet_pci_epf_free_iod(iod);
1604 return;
1605 }
1606
1607 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
1608 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1609 goto complete;
1610 }
1611
1612 /*
1613 * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
1614 * __nvmet_req_complete() internally which will call
1615 * nvmet_pci_epf_queue_response() and will complete the command directly.
1616 */
1617 if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
1618 return;
1619
1620 iod->data_len = nvmet_req_transfer_len(req);
1621 if (iod->data_len) {
1622 /*
1623 * Get the data DMA transfer direction. Here "device" means the
1624 * PCI root-complex host.
1625 */
1626 if (nvme_is_write(&iod->cmd))
1627 iod->dma_dir = DMA_FROM_DEVICE;
1628 else
1629 iod->dma_dir = DMA_TO_DEVICE;
1630
1631 /*
1632 * Setup the command data buffer and get the command data from
1633 * the host if needed.
1634 */
1635 ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
1636 if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
1637 ret = nvmet_pci_epf_transfer_iod_data(iod);
1638 if (ret) {
1639 nvmet_req_uninit(req);
1640 goto complete;
1641 }
1642 }
1643
1644 req->execute(req);
1645
1646 /*
1647 * If we do not have data to transfer after the command execution
1648 * finishes, nvmet_pci_epf_queue_response() will complete the command
1649 * directly. No need to wait for the completion in this case.
1650 */
1651 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
1652 return;
1653
1654 wait_for_completion(&iod->done);
1655
1656 if (iod->status != NVME_SC_SUCCESS)
1657 return;
1658
1659 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
1660 nvmet_pci_epf_transfer_iod_data(iod);
1661
1662 complete:
1663 nvmet_pci_epf_complete_iod(iod);
1664 }
1665
nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * sq)1666 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
1667 struct nvmet_pci_epf_queue *sq)
1668 {
1669 struct nvmet_pci_epf_iod *iod;
1670 int ret, n = 0;
1671 u16 head = sq->head;
1672
1673 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1674 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
1675 iod = nvmet_pci_epf_alloc_iod(sq);
1676 if (!iod)
1677 break;
1678
1679 /* Get the NVMe command submitted by the host. */
1680 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
1681 sq->pci_addr + head * sq->qes,
1682 sq->qes, DMA_FROM_DEVICE);
1683 if (ret) {
1684 /* Not much we can do... */
1685 nvmet_pci_epf_free_iod(iod);
1686 break;
1687 }
1688
1689 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
1690 sq->qid, head, sq->tail,
1691 nvmet_pci_epf_iod_name(iod));
1692
1693 head++;
1694 if (head == sq->depth)
1695 head = 0;
1696 WRITE_ONCE(sq->head, head);
1697 n++;
1698
1699 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
1700
1701 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1702 }
1703
1704 return n;
1705 }
1706
nvmet_pci_epf_poll_sqs_work(struct work_struct * work)1707 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
1708 {
1709 struct nvmet_pci_epf_ctrl *ctrl =
1710 container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
1711 struct nvmet_pci_epf_queue *sq;
1712 unsigned long limit = jiffies;
1713 unsigned long last = 0;
1714 int i, nr_sqs;
1715
1716 while (ctrl->link_up && ctrl->enabled) {
1717 nr_sqs = 0;
1718 /* Do round-robin arbitration. */
1719 for (i = 0; i < ctrl->nr_queues; i++) {
1720 sq = &ctrl->sq[i];
1721 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1722 continue;
1723 if (nvmet_pci_epf_process_sq(ctrl, sq))
1724 nr_sqs++;
1725 }
1726
1727 /*
1728 * If we have been running for a while, reschedule to let other
1729 * tasks run and to avoid RCU stalls.
1730 */
1731 if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
1732 cond_resched();
1733 limit = jiffies;
1734 continue;
1735 }
1736
1737 if (nr_sqs) {
1738 last = jiffies;
1739 continue;
1740 }
1741
1742 /*
1743 * If we have not received any command on any queue for more
1744 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
1745 * reschedule. This avoids "burning" a CPU when the controller
1746 * is idle for a long time.
1747 */
1748 if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
1749 break;
1750
1751 cpu_relax();
1752 }
1753
1754 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
1755 }
1756
nvmet_pci_epf_cq_work(struct work_struct * work)1757 static void nvmet_pci_epf_cq_work(struct work_struct *work)
1758 {
1759 struct nvmet_pci_epf_queue *cq =
1760 container_of(work, struct nvmet_pci_epf_queue, work.work);
1761 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
1762 struct nvme_completion *cqe;
1763 struct nvmet_pci_epf_iod *iod;
1764 unsigned long flags;
1765 int ret = 0, n = 0;
1766
1767 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
1768
1769 /* Check that the CQ is not full. */
1770 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
1771 if (cq->head == cq->tail + 1) {
1772 ret = -EAGAIN;
1773 break;
1774 }
1775
1776 spin_lock_irqsave(&cq->lock, flags);
1777 iod = list_first_entry_or_null(&cq->list,
1778 struct nvmet_pci_epf_iod, link);
1779 if (iod)
1780 list_del_init(&iod->link);
1781 spin_unlock_irqrestore(&cq->lock, flags);
1782
1783 if (!iod)
1784 break;
1785
1786 /*
1787 * Post the IOD completion entry. If the IOD request was
1788 * executed (req->execute() called), the CQE is already
1789 * initialized. However, the IOD may have been failed before
1790 * that, leaving the CQE not properly initialized. So always
1791 * initialize it here.
1792 */
1793 cqe = &iod->cqe;
1794 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head));
1795 cqe->sq_id = cpu_to_le16(iod->sq->qid);
1796 cqe->command_id = iod->cmd.common.command_id;
1797 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
1798
1799 dev_dbg(ctrl->dev,
1800 "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
1801 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
1802 le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
1803 cq->phase);
1804
1805 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
1806 cqe, cq->qes);
1807
1808 cq->tail++;
1809 if (cq->tail >= cq->depth) {
1810 cq->tail = 0;
1811 cq->phase ^= 1;
1812 }
1813
1814 nvmet_pci_epf_free_iod(iod);
1815
1816 /* Signal the host. */
1817 nvmet_pci_epf_raise_irq(ctrl, cq, false);
1818 n++;
1819 }
1820
1821 /*
1822 * We do not support precise IRQ coalescing time (100ns units as per
1823 * NVMe specifications). So if we have posted completion entries without
1824 * reaching the interrupt coalescing threshold, raise an interrupt.
1825 */
1826 if (n)
1827 nvmet_pci_epf_raise_irq(ctrl, cq, true);
1828
1829 if (ret < 0)
1830 queue_delayed_work(system_highpri_wq, &cq->work,
1831 NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
1832 }
1833
nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl * ctrl)1834 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl)
1835 {
1836 struct nvmet_ctrl *tctrl = ctrl->tctrl;
1837
1838 /* Initialize controller status. */
1839 tctrl->csts = 0;
1840 ctrl->csts = 0;
1841 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1842
1843 /* Initialize controller configuration and start polling. */
1844 tctrl->cc = 0;
1845 ctrl->cc = 0;
1846 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1847 }
1848
nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl * ctrl)1849 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
1850 {
1851 u64 pci_addr, asq, acq;
1852 u32 aqa;
1853 u16 status, qsize;
1854
1855 if (ctrl->enabled)
1856 return 0;
1857
1858 dev_info(ctrl->dev, "Enabling controller\n");
1859
1860 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
1861 ctrl->mps = 1UL << ctrl->mps_shift;
1862 ctrl->mps_mask = ctrl->mps - 1;
1863
1864 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
1865 if (ctrl->io_sqes < sizeof(struct nvme_command)) {
1866 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
1867 ctrl->io_sqes, sizeof(struct nvme_command));
1868 goto err;
1869 }
1870
1871 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
1872 if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
1873 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
1874 ctrl->io_cqes, sizeof(struct nvme_completion));
1875 goto err;
1876 }
1877
1878 /* Create the admin queue. */
1879 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
1880 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
1881 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
1882
1883 qsize = (aqa & 0x0fff0000) >> 16;
1884 pci_addr = acq & GENMASK_ULL(63, 12);
1885 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
1886 NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
1887 qsize, pci_addr, 0);
1888 if (status != NVME_SC_SUCCESS) {
1889 dev_err(ctrl->dev, "Failed to create admin completion queue\n");
1890 goto err;
1891 }
1892
1893 qsize = aqa & 0x00000fff;
1894 pci_addr = asq & GENMASK_ULL(63, 12);
1895 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0,
1896 NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr);
1897 if (status != NVME_SC_SUCCESS) {
1898 dev_err(ctrl->dev, "Failed to create admin submission queue\n");
1899 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1900 goto err;
1901 }
1902
1903 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
1904 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
1905 ctrl->enabled = true;
1906 ctrl->csts = NVME_CSTS_RDY;
1907
1908 /* Start polling the controller SQs. */
1909 schedule_delayed_work(&ctrl->poll_sqs, 0);
1910
1911 return 0;
1912
1913 err:
1914 nvmet_pci_epf_clear_ctrl_config(ctrl);
1915 return -EINVAL;
1916 }
1917
nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl * ctrl,bool shutdown)1918 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl,
1919 bool shutdown)
1920 {
1921 int qid;
1922
1923 if (!ctrl->enabled)
1924 return;
1925
1926 dev_info(ctrl->dev, "%s controller\n",
1927 shutdown ? "Shutting down" : "Disabling");
1928
1929 ctrl->enabled = false;
1930 cancel_delayed_work_sync(&ctrl->poll_sqs);
1931
1932 /* Delete all I/O queues first. */
1933 for (qid = 1; qid < ctrl->nr_queues; qid++)
1934 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
1935
1936 for (qid = 1; qid < ctrl->nr_queues; qid++)
1937 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
1938
1939 /* Delete the admin queue last. */
1940 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
1941 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1942
1943 ctrl->csts &= ~NVME_CSTS_RDY;
1944 if (shutdown) {
1945 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1946 ctrl->cc &= ~NVME_CC_ENABLE;
1947 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1948 }
1949 }
1950
nvmet_pci_epf_poll_cc_work(struct work_struct * work)1951 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
1952 {
1953 struct nvmet_pci_epf_ctrl *ctrl =
1954 container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
1955 u32 old_cc, new_cc;
1956 int ret;
1957
1958 if (!ctrl->tctrl)
1959 return;
1960
1961 old_cc = ctrl->cc;
1962 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
1963 if (new_cc == old_cc)
1964 goto reschedule_work;
1965
1966 ctrl->cc = new_cc;
1967
1968 if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
1969 ret = nvmet_pci_epf_enable_ctrl(ctrl);
1970 if (ret)
1971 goto reschedule_work;
1972 }
1973
1974 if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
1975 nvmet_pci_epf_disable_ctrl(ctrl, false);
1976
1977 if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc))
1978 nvmet_pci_epf_disable_ctrl(ctrl, true);
1979
1980 if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
1981 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1982
1983 nvmet_update_cc(ctrl->tctrl, ctrl->cc);
1984 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1985
1986 reschedule_work:
1987 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
1988 }
1989
nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl * ctrl)1990 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
1991 {
1992 struct nvmet_ctrl *tctrl = ctrl->tctrl;
1993
1994 ctrl->bar = ctrl->nvme_epf->reg_bar;
1995
1996 /* Copy the target controller capabilities as a base. */
1997 ctrl->cap = tctrl->cap;
1998
1999 /* Contiguous Queues Required (CQR). */
2000 ctrl->cap |= 0x1ULL << 16;
2001
2002 /* Set Doorbell stride to 4B (DSTRB). */
2003 ctrl->cap &= ~GENMASK_ULL(35, 32);
2004
2005 /* Clear NVM Subsystem Reset Supported (NSSRS). */
2006 ctrl->cap &= ~(0x1ULL << 36);
2007
2008 /* Clear Boot Partition Support (BPS). */
2009 ctrl->cap &= ~(0x1ULL << 45);
2010
2011 /* Clear Persistent Memory Region Supported (PMRS). */
2012 ctrl->cap &= ~(0x1ULL << 56);
2013
2014 /* Clear Controller Memory Buffer Supported (CMBS). */
2015 ctrl->cap &= ~(0x1ULL << 57);
2016
2017 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
2018 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
2019
2020 nvmet_pci_epf_clear_ctrl_config(ctrl);
2021 }
2022
nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf * nvme_epf,unsigned int max_nr_queues)2023 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
2024 unsigned int max_nr_queues)
2025 {
2026 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2027 struct nvmet_alloc_ctrl_args args = {};
2028 char hostnqn[NVMF_NQN_SIZE];
2029 uuid_t id;
2030 int ret;
2031
2032 memset(ctrl, 0, sizeof(*ctrl));
2033 ctrl->dev = &nvme_epf->epf->dev;
2034 mutex_init(&ctrl->irq_lock);
2035 ctrl->nvme_epf = nvme_epf;
2036 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
2037 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
2038 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
2039
2040 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
2041 max_nr_queues * NVMET_MAX_QUEUE_SIZE,
2042 sizeof(struct nvmet_pci_epf_iod));
2043 if (ret) {
2044 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
2045 return ret;
2046 }
2047
2048 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
2049 if (!ctrl->port) {
2050 dev_err(ctrl->dev, "Port not found\n");
2051 ret = -EINVAL;
2052 goto out_mempool_exit;
2053 }
2054
2055 /* Create the target controller. */
2056 uuid_gen(&id);
2057 snprintf(hostnqn, NVMF_NQN_SIZE,
2058 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
2059 args.port = ctrl->port;
2060 args.subsysnqn = nvme_epf->subsysnqn;
2061 memset(&id, 0, sizeof(uuid_t));
2062 args.hostid = &id;
2063 args.hostnqn = hostnqn;
2064 args.ops = &nvmet_pci_epf_fabrics_ops;
2065
2066 ctrl->tctrl = nvmet_alloc_ctrl(&args);
2067 if (!ctrl->tctrl) {
2068 dev_err(ctrl->dev, "Failed to create target controller\n");
2069 ret = -ENOMEM;
2070 goto out_mempool_exit;
2071 }
2072 ctrl->tctrl->drvdata = ctrl;
2073
2074 /* We do not support protection information for now. */
2075 if (ctrl->tctrl->pi_support) {
2076 dev_err(ctrl->dev,
2077 "Protection information (PI) is not supported\n");
2078 ret = -ENOTSUPP;
2079 goto out_put_ctrl;
2080 }
2081
2082 /* Allocate our queues, up to the maximum number. */
2083 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
2084 ret = nvmet_pci_epf_alloc_queues(ctrl);
2085 if (ret)
2086 goto out_put_ctrl;
2087
2088 /*
2089 * Allocate the IRQ vectors descriptors. We cannot have more than the
2090 * maximum number of queues.
2091 */
2092 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
2093 if (ret)
2094 goto out_free_queues;
2095
2096 dev_info(ctrl->dev,
2097 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
2098 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
2099 ctrl->mdts);
2100
2101 /* Initialize BAR 0 using the target controller CAP. */
2102 nvmet_pci_epf_init_bar(ctrl);
2103
2104 return 0;
2105
2106 out_free_queues:
2107 nvmet_pci_epf_free_queues(ctrl);
2108 out_put_ctrl:
2109 nvmet_ctrl_put(ctrl->tctrl);
2110 ctrl->tctrl = NULL;
2111 out_mempool_exit:
2112 mempool_exit(&ctrl->iod_pool);
2113 return ret;
2114 }
2115
nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2116 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2117 {
2118
2119 dev_info(ctrl->dev, "PCI link up\n");
2120 ctrl->link_up = true;
2121
2122 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
2123 }
2124
nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2125 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2126 {
2127 dev_info(ctrl->dev, "PCI link down\n");
2128 ctrl->link_up = false;
2129
2130 cancel_delayed_work_sync(&ctrl->poll_cc);
2131
2132 nvmet_pci_epf_disable_ctrl(ctrl, false);
2133 nvmet_pci_epf_clear_ctrl_config(ctrl);
2134 }
2135
nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2136 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2137 {
2138 if (!ctrl->tctrl)
2139 return;
2140
2141 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
2142 ctrl->tctrl->subsys->subsysnqn);
2143
2144 nvmet_pci_epf_stop_ctrl(ctrl);
2145
2146 nvmet_pci_epf_free_queues(ctrl);
2147 nvmet_pci_epf_free_irq_vectors(ctrl);
2148
2149 nvmet_ctrl_put(ctrl->tctrl);
2150 ctrl->tctrl = NULL;
2151
2152 mempool_exit(&ctrl->iod_pool);
2153 }
2154
nvmet_pci_epf_configure_bar(struct nvmet_pci_epf * nvme_epf)2155 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
2156 {
2157 struct pci_epf *epf = nvme_epf->epf;
2158 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2159 size_t reg_size, reg_bar_size;
2160 size_t msix_table_size = 0;
2161
2162 /*
2163 * The first free BAR will be our register BAR and per NVMe
2164 * specifications, it must be BAR 0.
2165 */
2166 if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
2167 dev_err(&epf->dev, "BAR 0 is not free\n");
2168 return -ENODEV;
2169 }
2170
2171 /*
2172 * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims
2173 * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type
2174 * is required to be 64-bit. Thus, for interoperability, always set the
2175 * type to 64-bit. In the rare case that the PCI EPC does not support
2176 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail,
2177 * and we will return failure back to the user.
2178 */
2179 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2180
2181 /*
2182 * Calculate the size of the register bar: NVMe registers first with
2183 * enough space for the doorbells, followed by the MSI-X table
2184 * if supported.
2185 */
2186 reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
2187 reg_size = ALIGN(reg_size, 8);
2188
2189 if (epc_features->msix_capable) {
2190 size_t pba_size;
2191
2192 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
2193 nvme_epf->msix_table_offset = reg_size;
2194 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
2195
2196 reg_size += msix_table_size + pba_size;
2197 }
2198
2199 if (epc_features->bar[BAR_0].type == BAR_FIXED) {
2200 if (reg_size > epc_features->bar[BAR_0].fixed_size) {
2201 dev_err(&epf->dev,
2202 "BAR 0 size %llu B too small, need %zu B\n",
2203 epc_features->bar[BAR_0].fixed_size,
2204 reg_size);
2205 return -ENOMEM;
2206 }
2207 reg_bar_size = epc_features->bar[BAR_0].fixed_size;
2208 } else {
2209 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
2210 }
2211
2212 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
2213 epc_features, PRIMARY_INTERFACE);
2214 if (!nvme_epf->reg_bar) {
2215 dev_err(&epf->dev, "Failed to allocate BAR 0\n");
2216 return -ENOMEM;
2217 }
2218 memset(nvme_epf->reg_bar, 0, reg_bar_size);
2219
2220 return 0;
2221 }
2222
nvmet_pci_epf_free_bar(struct nvmet_pci_epf * nvme_epf)2223 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
2224 {
2225 struct pci_epf *epf = nvme_epf->epf;
2226
2227 if (!nvme_epf->reg_bar)
2228 return;
2229
2230 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
2231 nvme_epf->reg_bar = NULL;
2232 }
2233
nvmet_pci_epf_clear_bar(struct nvmet_pci_epf * nvme_epf)2234 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
2235 {
2236 struct pci_epf *epf = nvme_epf->epf;
2237
2238 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
2239 &epf->bar[BAR_0]);
2240 }
2241
nvmet_pci_epf_init_irq(struct nvmet_pci_epf * nvme_epf)2242 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
2243 {
2244 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2245 struct pci_epf *epf = nvme_epf->epf;
2246 int ret;
2247
2248 /* Enable MSI-X if supported, otherwise, use MSI. */
2249 if (epc_features->msix_capable && epf->msix_interrupts) {
2250 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
2251 epf->msix_interrupts, BAR_0,
2252 nvme_epf->msix_table_offset);
2253 if (ret) {
2254 dev_err(&epf->dev, "Failed to configure MSI-X\n");
2255 return ret;
2256 }
2257
2258 nvme_epf->nr_vectors = epf->msix_interrupts;
2259 nvme_epf->irq_type = PCI_IRQ_MSIX;
2260
2261 return 0;
2262 }
2263
2264 if (epc_features->msi_capable && epf->msi_interrupts) {
2265 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
2266 epf->msi_interrupts);
2267 if (ret) {
2268 dev_err(&epf->dev, "Failed to configure MSI\n");
2269 return ret;
2270 }
2271
2272 nvme_epf->nr_vectors = epf->msi_interrupts;
2273 nvme_epf->irq_type = PCI_IRQ_MSI;
2274
2275 return 0;
2276 }
2277
2278 /* MSI and MSI-X are not supported: fall back to INTx. */
2279 nvme_epf->nr_vectors = 1;
2280 nvme_epf->irq_type = PCI_IRQ_INTX;
2281
2282 return 0;
2283 }
2284
nvmet_pci_epf_epc_init(struct pci_epf * epf)2285 static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
2286 {
2287 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2288 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2289 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2290 unsigned int max_nr_queues = NVMET_NR_QUEUES;
2291 int ret;
2292
2293 /* For now, do not support virtual functions. */
2294 if (epf->vfunc_no > 0) {
2295 dev_err(&epf->dev, "Virtual functions are not supported\n");
2296 return -EINVAL;
2297 }
2298
2299 /*
2300 * Cap the maximum number of queues we can support on the controller
2301 * with the number of IRQs we can use.
2302 */
2303 if (epc_features->msix_capable && epf->msix_interrupts) {
2304 dev_info(&epf->dev,
2305 "PCI endpoint controller supports MSI-X, %u vectors\n",
2306 epf->msix_interrupts);
2307 max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
2308 } else if (epc_features->msi_capable && epf->msi_interrupts) {
2309 dev_info(&epf->dev,
2310 "PCI endpoint controller supports MSI, %u vectors\n",
2311 epf->msi_interrupts);
2312 max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
2313 }
2314
2315 if (max_nr_queues < 2) {
2316 dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
2317 max_nr_queues);
2318 return -EINVAL;
2319 }
2320
2321 /* Create the target controller. */
2322 ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
2323 if (ret) {
2324 dev_err(&epf->dev,
2325 "Failed to create NVMe PCI target controller (err=%d)\n",
2326 ret);
2327 return ret;
2328 }
2329
2330 nvmet_pci_epf_init_dma(nvme_epf);
2331
2332 /* Set device ID, class, etc. */
2333 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
2334 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
2335 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
2336 epf->header);
2337 if (ret) {
2338 dev_err(&epf->dev,
2339 "Failed to write configuration header (err=%d)\n", ret);
2340 goto out_destroy_ctrl;
2341 }
2342
2343 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
2344 &epf->bar[BAR_0]);
2345 if (ret) {
2346 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
2347 goto out_destroy_ctrl;
2348 }
2349
2350 /*
2351 * Enable interrupts and start polling the controller BAR if we do not
2352 * have a link up notifier.
2353 */
2354 ret = nvmet_pci_epf_init_irq(nvme_epf);
2355 if (ret)
2356 goto out_clear_bar;
2357
2358 if (!epc_features->linkup_notifier)
2359 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
2360
2361 return 0;
2362
2363 out_clear_bar:
2364 nvmet_pci_epf_clear_bar(nvme_epf);
2365 out_destroy_ctrl:
2366 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2367 return ret;
2368 }
2369
nvmet_pci_epf_epc_deinit(struct pci_epf * epf)2370 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
2371 {
2372 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2373 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2374
2375 nvmet_pci_epf_destroy_ctrl(ctrl);
2376
2377 nvmet_pci_epf_deinit_dma(nvme_epf);
2378 nvmet_pci_epf_clear_bar(nvme_epf);
2379 }
2380
nvmet_pci_epf_link_up(struct pci_epf * epf)2381 static int nvmet_pci_epf_link_up(struct pci_epf *epf)
2382 {
2383 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2384 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2385
2386 nvmet_pci_epf_start_ctrl(ctrl);
2387
2388 return 0;
2389 }
2390
nvmet_pci_epf_link_down(struct pci_epf * epf)2391 static int nvmet_pci_epf_link_down(struct pci_epf *epf)
2392 {
2393 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2394 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2395
2396 nvmet_pci_epf_stop_ctrl(ctrl);
2397
2398 return 0;
2399 }
2400
2401 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
2402 .epc_init = nvmet_pci_epf_epc_init,
2403 .epc_deinit = nvmet_pci_epf_epc_deinit,
2404 .link_up = nvmet_pci_epf_link_up,
2405 .link_down = nvmet_pci_epf_link_down,
2406 };
2407
nvmet_pci_epf_bind(struct pci_epf * epf)2408 static int nvmet_pci_epf_bind(struct pci_epf *epf)
2409 {
2410 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2411 const struct pci_epc_features *epc_features;
2412 struct pci_epc *epc = epf->epc;
2413 int ret;
2414
2415 if (WARN_ON_ONCE(!epc))
2416 return -EINVAL;
2417
2418 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
2419 if (!epc_features) {
2420 dev_err(&epf->dev, "epc_features not implemented\n");
2421 return -EOPNOTSUPP;
2422 }
2423 nvme_epf->epc_features = epc_features;
2424
2425 ret = nvmet_pci_epf_configure_bar(nvme_epf);
2426 if (ret)
2427 return ret;
2428
2429 return 0;
2430 }
2431
nvmet_pci_epf_unbind(struct pci_epf * epf)2432 static void nvmet_pci_epf_unbind(struct pci_epf *epf)
2433 {
2434 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2435 struct pci_epc *epc = epf->epc;
2436
2437 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2438
2439 if (epc->init_complete) {
2440 nvmet_pci_epf_deinit_dma(nvme_epf);
2441 nvmet_pci_epf_clear_bar(nvme_epf);
2442 }
2443
2444 nvmet_pci_epf_free_bar(nvme_epf);
2445 }
2446
2447 static struct pci_epf_header nvme_epf_pci_header = {
2448 .vendorid = PCI_ANY_ID,
2449 .deviceid = PCI_ANY_ID,
2450 .progif_code = 0x02, /* NVM Express */
2451 .baseclass_code = PCI_BASE_CLASS_STORAGE,
2452 .subclass_code = 0x08, /* Non-Volatile Memory controller */
2453 .interrupt_pin = PCI_INTERRUPT_INTA,
2454 };
2455
nvmet_pci_epf_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)2456 static int nvmet_pci_epf_probe(struct pci_epf *epf,
2457 const struct pci_epf_device_id *id)
2458 {
2459 struct nvmet_pci_epf *nvme_epf;
2460 int ret;
2461
2462 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
2463 if (!nvme_epf)
2464 return -ENOMEM;
2465
2466 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
2467 if (ret)
2468 return ret;
2469
2470 nvme_epf->epf = epf;
2471 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2472
2473 epf->event_ops = &nvmet_pci_epf_event_ops;
2474 epf->header = &nvme_epf_pci_header;
2475 epf_set_drvdata(epf, nvme_epf);
2476
2477 return 0;
2478 }
2479
2480 #define to_nvme_epf(epf_group) \
2481 container_of(epf_group, struct nvmet_pci_epf, group)
2482
nvmet_pci_epf_portid_show(struct config_item * item,char * page)2483 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
2484 {
2485 struct config_group *group = to_config_group(item);
2486 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2487
2488 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
2489 }
2490
nvmet_pci_epf_portid_store(struct config_item * item,const char * page,size_t len)2491 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
2492 const char *page, size_t len)
2493 {
2494 struct config_group *group = to_config_group(item);
2495 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2496 u16 portid;
2497
2498 /* Do not allow setting this when the function is already started. */
2499 if (nvme_epf->ctrl.tctrl)
2500 return -EBUSY;
2501
2502 if (!len)
2503 return -EINVAL;
2504
2505 if (kstrtou16(page, 0, &portid))
2506 return -EINVAL;
2507
2508 nvme_epf->portid = cpu_to_le16(portid);
2509
2510 return len;
2511 }
2512
2513 CONFIGFS_ATTR(nvmet_pci_epf_, portid);
2514
nvmet_pci_epf_subsysnqn_show(struct config_item * item,char * page)2515 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
2516 char *page)
2517 {
2518 struct config_group *group = to_config_group(item);
2519 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2520
2521 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
2522 }
2523
nvmet_pci_epf_subsysnqn_store(struct config_item * item,const char * page,size_t len)2524 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
2525 const char *page, size_t len)
2526 {
2527 struct config_group *group = to_config_group(item);
2528 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2529
2530 /* Do not allow setting this when the function is already started. */
2531 if (nvme_epf->ctrl.tctrl)
2532 return -EBUSY;
2533
2534 if (!len)
2535 return -EINVAL;
2536
2537 strscpy(nvme_epf->subsysnqn, page, len);
2538
2539 return len;
2540 }
2541
2542 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
2543
nvmet_pci_epf_mdts_kb_show(struct config_item * item,char * page)2544 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
2545 {
2546 struct config_group *group = to_config_group(item);
2547 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2548
2549 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
2550 }
2551
nvmet_pci_epf_mdts_kb_store(struct config_item * item,const char * page,size_t len)2552 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
2553 const char *page, size_t len)
2554 {
2555 struct config_group *group = to_config_group(item);
2556 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2557 unsigned long mdts_kb;
2558 int ret;
2559
2560 if (nvme_epf->ctrl.tctrl)
2561 return -EBUSY;
2562
2563 ret = kstrtoul(page, 0, &mdts_kb);
2564 if (ret)
2565 return ret;
2566 if (!mdts_kb)
2567 mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2568 else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
2569 mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
2570
2571 if (!is_power_of_2(mdts_kb))
2572 return -EINVAL;
2573
2574 nvme_epf->mdts_kb = mdts_kb;
2575
2576 return len;
2577 }
2578
2579 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
2580
2581 static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
2582 &nvmet_pci_epf_attr_portid,
2583 &nvmet_pci_epf_attr_subsysnqn,
2584 &nvmet_pci_epf_attr_mdts_kb,
2585 NULL,
2586 };
2587
2588 static const struct config_item_type nvmet_pci_epf_group_type = {
2589 .ct_attrs = nvmet_pci_epf_attrs,
2590 .ct_owner = THIS_MODULE,
2591 };
2592
nvmet_pci_epf_add_cfs(struct pci_epf * epf,struct config_group * group)2593 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
2594 struct config_group *group)
2595 {
2596 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2597
2598 config_group_init_type_name(&nvme_epf->group, "nvme",
2599 &nvmet_pci_epf_group_type);
2600
2601 return &nvme_epf->group;
2602 }
2603
2604 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
2605 { .name = "nvmet_pci_epf" },
2606 {},
2607 };
2608
2609 static struct pci_epf_ops nvmet_pci_epf_ops = {
2610 .bind = nvmet_pci_epf_bind,
2611 .unbind = nvmet_pci_epf_unbind,
2612 .add_cfs = nvmet_pci_epf_add_cfs,
2613 };
2614
2615 static struct pci_epf_driver nvmet_pci_epf_driver = {
2616 .driver.name = "nvmet_pci_epf",
2617 .probe = nvmet_pci_epf_probe,
2618 .id_table = nvmet_pci_epf_ids,
2619 .ops = &nvmet_pci_epf_ops,
2620 .owner = THIS_MODULE,
2621 };
2622
nvmet_pci_epf_init_module(void)2623 static int __init nvmet_pci_epf_init_module(void)
2624 {
2625 int ret;
2626
2627 ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
2628 if (ret)
2629 return ret;
2630
2631 ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
2632 if (ret) {
2633 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2634 return ret;
2635 }
2636
2637 return 0;
2638 }
2639
nvmet_pci_epf_cleanup_module(void)2640 static void __exit nvmet_pci_epf_cleanup_module(void)
2641 {
2642 nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
2643 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2644 }
2645
2646 module_init(nvmet_pci_epf_init_module);
2647 module_exit(nvmet_pci_epf_cleanup_module);
2648
2649 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
2650 MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
2651 MODULE_LICENSE("GPL");
2652