1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe PCI Endpoint Function target driver.
4 *
5 * Copyright (c) 2024, Western Digital Corporation or its affiliates.
6 * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
7 * REDS Institute, HEIG-VD, HES-SO, Switzerland
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/io.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/nvme.h>
18 #include <linux/pci_ids.h>
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci_regs.h>
22 #include <linux/slab.h>
23
24 #include "nvmet.h"
25
26 static LIST_HEAD(nvmet_pci_epf_ports);
27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
28
29 /*
30 * Default and maximum allowed data transfer size. For the default,
31 * allow up to 128 page-sized segments. For the maximum allowed,
32 * use 4 times the default (which is completely arbitrary).
33 */
34 #define NVMET_PCI_EPF_MAX_SEGS 128
35 #define NVMET_PCI_EPF_MDTS_KB \
36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
37 #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
38
39 /*
40 * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
41 * interrupt vector to the host. This default 8 is completely arbitrary and can
42 * be changed by the host with a nvme_set_features command.
43 */
44 #define NVMET_PCI_EPF_IV_THRESHOLD 8
45
46 /*
47 * BAR CC register and SQ polling intervals.
48 */
49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10)
50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
51 #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
52
53 /*
54 * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
55 */
56 #define NVMET_PCI_EPF_SQ_AB 8
57
58 /*
59 * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
60 * is full, in which case we retry the CQ processing after this interval.
61 */
62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
63
64 enum nvmet_pci_epf_queue_flags {
65 NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */
66 NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
67 };
68
69 /*
70 * IRQ vector descriptor.
71 */
72 struct nvmet_pci_epf_irq_vector {
73 unsigned int vector;
74 unsigned int ref;
75 bool cd;
76 int nr_irqs;
77 };
78
79 struct nvmet_pci_epf_queue {
80 union {
81 struct nvmet_sq nvme_sq;
82 struct nvmet_cq nvme_cq;
83 };
84 struct nvmet_pci_epf_ctrl *ctrl;
85 unsigned long flags;
86
87 u64 pci_addr;
88 size_t pci_size;
89 struct pci_epc_map pci_map;
90
91 u16 qid;
92 u16 depth;
93 u16 vector;
94 u16 head;
95 u16 tail;
96 u16 phase;
97 u32 db;
98
99 size_t qes;
100
101 struct nvmet_pci_epf_irq_vector *iv;
102 struct workqueue_struct *iod_wq;
103 struct delayed_work work;
104 spinlock_t lock;
105 struct list_head list;
106 };
107
108 /*
109 * PCI Root Complex (RC) address data segment for mapping an admin or
110 * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
111 */
112 struct nvmet_pci_epf_segment {
113 void *buf;
114 u64 pci_addr;
115 u32 length;
116 };
117
118 /*
119 * Command descriptors.
120 */
121 struct nvmet_pci_epf_iod {
122 struct list_head link;
123
124 struct nvmet_req req;
125 struct nvme_command cmd;
126 struct nvme_completion cqe;
127 unsigned int status;
128
129 struct nvmet_pci_epf_ctrl *ctrl;
130
131 struct nvmet_pci_epf_queue *sq;
132 struct nvmet_pci_epf_queue *cq;
133
134 /* Data transfer size and direction for the command. */
135 size_t data_len;
136 enum dma_data_direction dma_dir;
137
138 /*
139 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
140 * use only @data_seg. Otherwise, the array of segments @data_segs is
141 * allocated to manage multiple PCI address data segments. @data_sgl and
142 * @data_sgt are used to setup the command request for execution by the
143 * target core.
144 */
145 unsigned int nr_data_segs;
146 struct nvmet_pci_epf_segment data_seg;
147 struct nvmet_pci_epf_segment *data_segs;
148 struct scatterlist data_sgl;
149 struct sg_table data_sgt;
150
151 struct work_struct work;
152 struct completion done;
153 };
154
155 /*
156 * PCI target controller private data.
157 */
158 struct nvmet_pci_epf_ctrl {
159 struct nvmet_pci_epf *nvme_epf;
160 struct nvmet_port *port;
161 struct nvmet_ctrl *tctrl;
162 struct device *dev;
163
164 unsigned int nr_queues;
165 struct nvmet_pci_epf_queue *sq;
166 struct nvmet_pci_epf_queue *cq;
167 unsigned int sq_ab;
168
169 mempool_t iod_pool;
170 void *bar;
171 u64 cap;
172 u32 cc;
173 u32 csts;
174
175 size_t io_sqes;
176 size_t io_cqes;
177
178 size_t mps_shift;
179 size_t mps;
180 size_t mps_mask;
181
182 unsigned int mdts;
183
184 struct delayed_work poll_cc;
185 struct delayed_work poll_sqs;
186
187 struct mutex irq_lock;
188 struct nvmet_pci_epf_irq_vector *irq_vectors;
189 unsigned int irq_vector_threshold;
190
191 bool link_up;
192 bool enabled;
193 };
194
195 /*
196 * PCI EPF driver private data.
197 */
198 struct nvmet_pci_epf {
199 struct pci_epf *epf;
200
201 const struct pci_epc_features *epc_features;
202
203 void *reg_bar;
204 size_t msix_table_offset;
205
206 unsigned int irq_type;
207 unsigned int nr_vectors;
208
209 struct nvmet_pci_epf_ctrl ctrl;
210
211 bool dma_enabled;
212 struct dma_chan *dma_tx_chan;
213 struct mutex dma_tx_lock;
214 struct dma_chan *dma_rx_chan;
215 struct mutex dma_rx_lock;
216
217 struct mutex mmio_lock;
218
219 /* PCI endpoint function configfs attributes. */
220 struct config_group group;
221 __le16 portid;
222 char subsysnqn[NVMF_NQN_SIZE];
223 unsigned int mdts_kb;
224 };
225
nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl * ctrl,u32 off)226 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
227 u32 off)
228 {
229 __le32 *bar_reg = ctrl->bar + off;
230
231 return le32_to_cpu(READ_ONCE(*bar_reg));
232 }
233
nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl * ctrl,u32 off,u32 val)234 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
235 u32 off, u32 val)
236 {
237 __le32 *bar_reg = ctrl->bar + off;
238
239 WRITE_ONCE(*bar_reg, cpu_to_le32(val));
240 }
241
nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl * ctrl,u32 off)242 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
243 u32 off)
244 {
245 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
246 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
247 }
248
nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl * ctrl,u32 off,u64 val)249 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
250 u32 off, u64 val)
251 {
252 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
253 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
254 }
255
nvmet_pci_epf_mem_map(struct nvmet_pci_epf * nvme_epf,u64 pci_addr,size_t size,struct pci_epc_map * map)256 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
257 u64 pci_addr, size_t size, struct pci_epc_map *map)
258 {
259 struct pci_epf *epf = nvme_epf->epf;
260
261 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
262 pci_addr, size, map);
263 }
264
nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf * nvme_epf,struct pci_epc_map * map)265 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
266 struct pci_epc_map *map)
267 {
268 struct pci_epf *epf = nvme_epf->epf;
269
270 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
271 }
272
273 struct nvmet_pci_epf_dma_filter {
274 struct device *dev;
275 u32 dma_mask;
276 };
277
nvmet_pci_epf_dma_filter(struct dma_chan * chan,void * arg)278 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
279 {
280 struct nvmet_pci_epf_dma_filter *filter = arg;
281 struct dma_slave_caps caps;
282
283 memset(&caps, 0, sizeof(caps));
284 dma_get_slave_caps(chan, &caps);
285
286 return chan->device->dev == filter->dev &&
287 (filter->dma_mask & caps.directions);
288 }
289
nvmet_pci_epf_init_dma(struct nvmet_pci_epf * nvme_epf)290 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
291 {
292 struct pci_epf *epf = nvme_epf->epf;
293 struct device *dev = &epf->dev;
294 struct nvmet_pci_epf_dma_filter filter;
295 struct dma_chan *chan;
296 dma_cap_mask_t mask;
297
298 mutex_init(&nvme_epf->dma_rx_lock);
299 mutex_init(&nvme_epf->dma_tx_lock);
300
301 dma_cap_zero(mask);
302 dma_cap_set(DMA_SLAVE, mask);
303
304 filter.dev = epf->epc->dev.parent;
305 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
306
307 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
308 if (!chan)
309 goto out_dma_no_rx;
310
311 nvme_epf->dma_rx_chan = chan;
312
313 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
314 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
315 if (!chan)
316 goto out_dma_no_tx;
317
318 nvme_epf->dma_tx_chan = chan;
319
320 nvme_epf->dma_enabled = true;
321
322 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
323 dma_chan_name(nvme_epf->dma_rx_chan),
324 dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
325 dma_rx_chan)));
326
327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
328 dma_chan_name(nvme_epf->dma_tx_chan),
329 dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
330 dma_tx_chan)));
331
332 return;
333
334 out_dma_no_tx:
335 dma_release_channel(nvme_epf->dma_rx_chan);
336 nvme_epf->dma_rx_chan = NULL;
337
338 out_dma_no_rx:
339 mutex_destroy(&nvme_epf->dma_rx_lock);
340 mutex_destroy(&nvme_epf->dma_tx_lock);
341 nvme_epf->dma_enabled = false;
342
343 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
344 }
345
nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf * nvme_epf)346 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
347 {
348 if (!nvme_epf->dma_enabled)
349 return;
350
351 dma_release_channel(nvme_epf->dma_tx_chan);
352 nvme_epf->dma_tx_chan = NULL;
353 dma_release_channel(nvme_epf->dma_rx_chan);
354 nvme_epf->dma_rx_chan = NULL;
355 mutex_destroy(&nvme_epf->dma_rx_lock);
356 mutex_destroy(&nvme_epf->dma_tx_lock);
357 nvme_epf->dma_enabled = false;
358 }
359
nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)360 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
361 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
362 {
363 struct pci_epf *epf = nvme_epf->epf;
364 struct dma_async_tx_descriptor *desc;
365 struct dma_slave_config sconf = {};
366 struct device *dev = &epf->dev;
367 struct device *dma_dev;
368 struct dma_chan *chan;
369 dma_cookie_t cookie;
370 dma_addr_t dma_addr;
371 struct mutex *lock;
372 int ret;
373
374 switch (dir) {
375 case DMA_FROM_DEVICE:
376 lock = &nvme_epf->dma_rx_lock;
377 chan = nvme_epf->dma_rx_chan;
378 sconf.direction = DMA_DEV_TO_MEM;
379 sconf.src_addr = seg->pci_addr;
380 break;
381 case DMA_TO_DEVICE:
382 lock = &nvme_epf->dma_tx_lock;
383 chan = nvme_epf->dma_tx_chan;
384 sconf.direction = DMA_MEM_TO_DEV;
385 sconf.dst_addr = seg->pci_addr;
386 break;
387 default:
388 return -EINVAL;
389 }
390
391 mutex_lock(lock);
392
393 dma_dev = dmaengine_get_dma_device(chan);
394 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
395 ret = dma_mapping_error(dma_dev, dma_addr);
396 if (ret)
397 goto unlock;
398
399 ret = dmaengine_slave_config(chan, &sconf);
400 if (ret) {
401 dev_err(dev, "Failed to configure DMA channel\n");
402 goto unmap;
403 }
404
405 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
406 sconf.direction, DMA_CTRL_ACK);
407 if (!desc) {
408 dev_err(dev, "Failed to prepare DMA\n");
409 ret = -EIO;
410 goto unmap;
411 }
412
413 cookie = dmaengine_submit(desc);
414 ret = dma_submit_error(cookie);
415 if (ret) {
416 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
417 goto unmap;
418 }
419
420 if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
421 dev_err(dev, "DMA transfer failed\n");
422 ret = -EIO;
423 }
424
425 dmaengine_terminate_sync(chan);
426
427 unmap:
428 dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
429
430 unlock:
431 mutex_unlock(lock);
432
433 return ret;
434 }
435
nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)436 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
437 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
438 {
439 u64 pci_addr = seg->pci_addr;
440 u32 length = seg->length;
441 void *buf = seg->buf;
442 struct pci_epc_map map;
443 int ret = -EINVAL;
444
445 /*
446 * Note: MMIO transfers do not need serialization but this is a
447 * simple way to avoid using too many mapping windows.
448 */
449 mutex_lock(&nvme_epf->mmio_lock);
450
451 while (length) {
452 ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
453 if (ret)
454 break;
455
456 switch (dir) {
457 case DMA_FROM_DEVICE:
458 memcpy_fromio(buf, map.virt_addr, map.pci_size);
459 break;
460 case DMA_TO_DEVICE:
461 memcpy_toio(map.virt_addr, buf, map.pci_size);
462 break;
463 default:
464 ret = -EINVAL;
465 goto unlock;
466 }
467
468 pci_addr += map.pci_size;
469 buf += map.pci_size;
470 length -= map.pci_size;
471
472 nvmet_pci_epf_mem_unmap(nvme_epf, &map);
473 }
474
475 unlock:
476 mutex_unlock(&nvme_epf->mmio_lock);
477
478 return ret;
479 }
480
nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf * nvme_epf,struct nvmet_pci_epf_segment * seg,enum dma_data_direction dir)481 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
482 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
483 {
484 if (nvme_epf->dma_enabled)
485 return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
486
487 return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
488 }
489
nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl * ctrl,void * buf,u64 pci_addr,u32 length,enum dma_data_direction dir)490 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
491 void *buf, u64 pci_addr, u32 length,
492 enum dma_data_direction dir)
493 {
494 struct nvmet_pci_epf_segment seg = {
495 .buf = buf,
496 .pci_addr = pci_addr,
497 .length = length,
498 };
499
500 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
501 }
502
nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl * ctrl)503 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
504 {
505 ctrl->irq_vectors = kzalloc_objs(struct nvmet_pci_epf_irq_vector,
506 ctrl->nr_queues);
507 if (!ctrl->irq_vectors)
508 return -ENOMEM;
509
510 mutex_init(&ctrl->irq_lock);
511
512 return 0;
513 }
514
nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl * ctrl)515 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
516 {
517 if (ctrl->irq_vectors) {
518 mutex_destroy(&ctrl->irq_lock);
519 kfree(ctrl->irq_vectors);
520 ctrl->irq_vectors = NULL;
521 }
522 }
523
524 static struct nvmet_pci_epf_irq_vector *
nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)525 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
526 {
527 struct nvmet_pci_epf_irq_vector *iv;
528 int i;
529
530 lockdep_assert_held(&ctrl->irq_lock);
531
532 for (i = 0; i < ctrl->nr_queues; i++) {
533 iv = &ctrl->irq_vectors[i];
534 if (iv->ref && iv->vector == vector)
535 return iv;
536 }
537
538 return NULL;
539 }
540
541 static struct nvmet_pci_epf_irq_vector *
nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)542 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
543 {
544 struct nvmet_pci_epf_irq_vector *iv;
545 int i;
546
547 mutex_lock(&ctrl->irq_lock);
548
549 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
550 if (iv) {
551 iv->ref++;
552 goto unlock;
553 }
554
555 for (i = 0; i < ctrl->nr_queues; i++) {
556 iv = &ctrl->irq_vectors[i];
557 if (!iv->ref)
558 break;
559 }
560
561 if (WARN_ON_ONCE(!iv))
562 goto unlock;
563
564 iv->ref = 1;
565 iv->vector = vector;
566 iv->nr_irqs = 0;
567
568 unlock:
569 mutex_unlock(&ctrl->irq_lock);
570
571 return iv;
572 }
573
nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl * ctrl,u16 vector)574 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
575 u16 vector)
576 {
577 struct nvmet_pci_epf_irq_vector *iv;
578
579 mutex_lock(&ctrl->irq_lock);
580
581 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
582 if (iv) {
583 iv->ref--;
584 if (!iv->ref) {
585 iv->vector = 0;
586 iv->nr_irqs = 0;
587 }
588 }
589
590 mutex_unlock(&ctrl->irq_lock);
591 }
592
nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * cq,bool force)593 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
594 struct nvmet_pci_epf_queue *cq, bool force)
595 {
596 struct nvmet_pci_epf_irq_vector *iv = cq->iv;
597 bool ret;
598
599 /* IRQ coalescing for the admin queue is not allowed. */
600 if (!cq->qid)
601 return true;
602
603 if (iv->cd)
604 return true;
605
606 if (force) {
607 ret = iv->nr_irqs > 0;
608 } else {
609 iv->nr_irqs++;
610 ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
611 }
612 if (ret)
613 iv->nr_irqs = 0;
614
615 return ret;
616 }
617
nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * cq,bool force)618 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
619 struct nvmet_pci_epf_queue *cq, bool force)
620 {
621 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
622 struct pci_epf *epf = nvme_epf->epf;
623 int ret = 0;
624
625 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
626 !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
627 return;
628
629 mutex_lock(&ctrl->irq_lock);
630
631 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
632 goto unlock;
633
634 switch (nvme_epf->irq_type) {
635 case PCI_IRQ_MSIX:
636 case PCI_IRQ_MSI:
637 /*
638 * If we fail to raise an MSI or MSI-X interrupt, it is likely
639 * because the host is using legacy INTX IRQs (e.g. BIOS,
640 * grub), but we can fallback to the INTX type only if the
641 * endpoint controller supports this type.
642 */
643 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
644 nvme_epf->irq_type, cq->vector + 1);
645 if (!ret || !nvme_epf->epc_features->intx_capable)
646 break;
647 fallthrough;
648 case PCI_IRQ_INTX:
649 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
650 PCI_IRQ_INTX, 0);
651 break;
652 default:
653 WARN_ON_ONCE(1);
654 ret = -EINVAL;
655 break;
656 }
657
658 if (ret)
659 dev_err_ratelimited(ctrl->dev,
660 "CQ[%u]: Failed to raise IRQ (err=%d)\n",
661 cq->qid, ret);
662
663 unlock:
664 mutex_unlock(&ctrl->irq_lock);
665 }
666
nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod * iod)667 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
668 {
669 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
670 }
671
672 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
673
674 static struct nvmet_pci_epf_iod *
nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue * sq)675 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
676 {
677 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
678 struct nvmet_pci_epf_iod *iod;
679
680 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
681 if (unlikely(!iod))
682 return NULL;
683
684 memset(iod, 0, sizeof(*iod));
685 iod->req.cmd = &iod->cmd;
686 iod->req.cqe = &iod->cqe;
687 iod->req.port = ctrl->port;
688 iod->ctrl = ctrl;
689 iod->sq = sq;
690 iod->cq = &ctrl->cq[sq->qid];
691 INIT_LIST_HEAD(&iod->link);
692 iod->dma_dir = DMA_NONE;
693 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
694 init_completion(&iod->done);
695
696 return iod;
697 }
698
699 /*
700 * Allocate or grow a command table of PCI segments.
701 */
nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod * iod,int nsegs)702 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
703 int nsegs)
704 {
705 struct nvmet_pci_epf_segment *segs;
706 int nr_segs = iod->nr_data_segs + nsegs;
707
708 segs = krealloc(iod->data_segs,
709 nr_segs * sizeof(struct nvmet_pci_epf_segment),
710 GFP_KERNEL | __GFP_ZERO);
711 if (!segs)
712 return -ENOMEM;
713
714 iod->nr_data_segs = nr_segs;
715 iod->data_segs = segs;
716
717 return 0;
718 }
719
nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod * iod)720 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
721 {
722 int i;
723
724 if (iod->data_segs) {
725 for (i = 0; i < iod->nr_data_segs; i++)
726 kfree(iod->data_segs[i].buf);
727 if (iod->data_segs != &iod->data_seg)
728 kfree(iod->data_segs);
729 }
730 if (iod->data_sgt.nents > 1)
731 sg_free_table(&iod->data_sgt);
732 mempool_free(iod, &iod->ctrl->iod_pool);
733 }
734
nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod * iod)735 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
736 {
737 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
738 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
739 int i, ret;
740
741 /* Split the data transfer according to the PCI segments. */
742 for (i = 0; i < iod->nr_data_segs; i++, seg++) {
743 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
744 if (ret) {
745 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
746 return ret;
747 }
748 }
749
750 return 0;
751 }
752
nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl * ctrl,u64 prp)753 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
754 u64 prp)
755 {
756 return prp & ctrl->mps_mask;
757 }
758
nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl * ctrl,u64 prp)759 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
760 u64 prp)
761 {
762 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
763 }
764
765 /*
766 * Transfer a PRP list from the host and return the number of prps.
767 */
nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl * ctrl,u64 prp,size_t xfer_len,__le64 * prps)768 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
769 size_t xfer_len, __le64 *prps)
770 {
771 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
772 u32 length;
773 int ret;
774
775 /*
776 * Compute the number of PRPs required for the number of bytes to
777 * transfer (xfer_len). If this number overflows the memory page size
778 * with the PRP list pointer specified, only return the space available
779 * in the memory page, the last PRP in there will be a PRP list pointer
780 * to the remaining PRPs.
781 */
782 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
783 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
784 if (ret)
785 return ret;
786
787 return length >> 3;
788 }
789
nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)790 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
791 struct nvmet_pci_epf_iod *iod)
792 {
793 struct nvme_command *cmd = &iod->cmd;
794 struct nvmet_pci_epf_segment *seg;
795 size_t size = 0, ofst, prp_size, xfer_len;
796 size_t transfer_len = iod->data_len;
797 int nr_segs, nr_prps = 0;
798 u64 pci_addr, prp;
799 int i = 0, ret;
800 __le64 *prps;
801
802 prps = kzalloc(ctrl->mps, GFP_KERNEL);
803 if (!prps)
804 goto err_internal;
805
806 /*
807 * Allocate PCI segments for the command: this considers the worst case
808 * scenario where all prps are discontiguous, so get as many segments
809 * as we can have prps. In practice, most of the time, we will have
810 * far less PCI segments than prps.
811 */
812 prp = le64_to_cpu(cmd->common.dptr.prp1);
813 if (!prp)
814 goto err_invalid_field;
815
816 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
817 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
818
819 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
820 if (ret)
821 goto err_internal;
822
823 /* Set the first segment using prp1. */
824 seg = &iod->data_segs[0];
825 seg->pci_addr = prp;
826 seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
827
828 size = seg->length;
829 pci_addr = prp + size;
830 nr_segs = 1;
831
832 /*
833 * Now build the PCI address segments using the PRP lists, starting
834 * from prp2.
835 */
836 prp = le64_to_cpu(cmd->common.dptr.prp2);
837 if (!prp)
838 goto err_invalid_field;
839
840 while (size < transfer_len) {
841 xfer_len = transfer_len - size;
842
843 if (!nr_prps) {
844 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
845 xfer_len, prps);
846 if (nr_prps < 0)
847 goto err_internal;
848
849 i = 0;
850 ofst = 0;
851 }
852
853 /* Current entry */
854 prp = le64_to_cpu(prps[i]);
855 if (!prp)
856 goto err_invalid_field;
857
858 /* Did we reach the last PRP entry of the list? */
859 if (xfer_len > ctrl->mps && i == nr_prps - 1) {
860 /* We need more PRPs: PRP is a list pointer. */
861 nr_prps = 0;
862 continue;
863 }
864
865 /* Only the first PRP is allowed to have an offset. */
866 if (nvmet_pci_epf_prp_ofst(ctrl, prp))
867 goto err_invalid_offset;
868
869 if (prp != pci_addr) {
870 /* Discontiguous prp: new segment. */
871 nr_segs++;
872 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
873 goto err_internal;
874
875 seg++;
876 seg->pci_addr = prp;
877 seg->length = 0;
878 pci_addr = prp;
879 }
880
881 prp_size = min_t(size_t, ctrl->mps, xfer_len);
882 seg->length += prp_size;
883 pci_addr += prp_size;
884 size += prp_size;
885
886 i++;
887 }
888
889 iod->nr_data_segs = nr_segs;
890 ret = 0;
891
892 if (size != transfer_len) {
893 dev_err(ctrl->dev,
894 "PRPs transfer length mismatch: got %zu B, need %zu B\n",
895 size, transfer_len);
896 goto err_internal;
897 }
898
899 kfree(prps);
900
901 return 0;
902
903 err_invalid_offset:
904 dev_err(ctrl->dev, "PRPs list invalid offset\n");
905 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
906 goto err;
907
908 err_invalid_field:
909 dev_err(ctrl->dev, "PRPs list invalid field\n");
910 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
911 goto err;
912
913 err_internal:
914 dev_err(ctrl->dev, "PRPs list internal error\n");
915 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
916
917 err:
918 kfree(prps);
919 return -EINVAL;
920 }
921
nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)922 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
923 struct nvmet_pci_epf_iod *iod)
924 {
925 struct nvme_command *cmd = &iod->cmd;
926 size_t transfer_len = iod->data_len;
927 int ret, nr_segs = 1;
928 u64 prp1, prp2 = 0;
929 size_t prp1_size;
930
931 prp1 = le64_to_cpu(cmd->common.dptr.prp1);
932 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
933
934 /* For commands crossing a page boundary, we should have prp2. */
935 if (transfer_len > prp1_size) {
936 prp2 = le64_to_cpu(cmd->common.dptr.prp2);
937 if (!prp2) {
938 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
939 return -EINVAL;
940 }
941 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
942 iod->status =
943 NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
944 return -EINVAL;
945 }
946 if (prp2 != prp1 + prp1_size)
947 nr_segs = 2;
948 }
949
950 if (nr_segs == 1) {
951 iod->nr_data_segs = 1;
952 iod->data_segs = &iod->data_seg;
953 iod->data_segs[0].pci_addr = prp1;
954 iod->data_segs[0].length = transfer_len;
955 return 0;
956 }
957
958 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
959 if (ret) {
960 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
961 return ret;
962 }
963
964 iod->data_segs[0].pci_addr = prp1;
965 iod->data_segs[0].length = prp1_size;
966 iod->data_segs[1].pci_addr = prp2;
967 iod->data_segs[1].length = transfer_len - prp1_size;
968
969 return 0;
970 }
971
nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod * iod)972 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
973 {
974 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
975 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
976 size_t ofst;
977
978 /* Get the PCI address segments for the command using its PRPs. */
979 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
980 if (ofst & 0x3) {
981 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
982 return -EINVAL;
983 }
984
985 if (iod->data_len + ofst <= ctrl->mps * 2)
986 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
987
988 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
989 }
990
991 /*
992 * Transfer an SGL segment from the host and return the number of data
993 * descriptors and the next segment descriptor, if any.
994 */
995 static struct nvme_sgl_desc *
nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl * ctrl,struct nvme_sgl_desc * desc,unsigned int * nr_sgls)996 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
997 struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
998 {
999 struct nvme_sgl_desc *sgls;
1000 u32 length = le32_to_cpu(desc->length);
1001 int nr_descs, ret;
1002 void *buf;
1003
1004 buf = kmalloc(length, GFP_KERNEL);
1005 if (!buf)
1006 return NULL;
1007
1008 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
1009 DMA_FROM_DEVICE);
1010 if (ret) {
1011 kfree(buf);
1012 return NULL;
1013 }
1014
1015 sgls = buf;
1016 nr_descs = length / sizeof(struct nvme_sgl_desc);
1017 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
1018 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1019 /*
1020 * We have another SGL segment following this one: do not count
1021 * it as a regular data SGL descriptor and return it to the
1022 * caller.
1023 */
1024 *desc = sgls[nr_descs - 1];
1025 nr_descs--;
1026 } else {
1027 /* We do not have another SGL segment after this one. */
1028 desc->length = 0;
1029 }
1030
1031 *nr_sgls = nr_descs;
1032
1033 return sgls;
1034 }
1035
nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod)1036 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
1037 struct nvmet_pci_epf_iod *iod)
1038 {
1039 struct nvme_command *cmd = &iod->cmd;
1040 struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
1041 struct nvme_sgl_desc *sgls = NULL;
1042 int n = 0, i, nr_sgls;
1043 int ret;
1044
1045 /*
1046 * We do not support inline data nor keyed SGLs, so we should be seeing
1047 * only segment descriptors.
1048 */
1049 if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
1050 seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1051 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
1052 return -EIO;
1053 }
1054
1055 while (seg.length) {
1056 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
1057 if (!sgls) {
1058 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1059 return -EIO;
1060 }
1061
1062 /* Grow the PCI segment table as needed. */
1063 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
1064 if (ret) {
1065 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1066 goto out;
1067 }
1068
1069 /*
1070 * Parse the SGL descriptors to build the PCI segment table,
1071 * checking the descriptor type as we go.
1072 */
1073 for (i = 0; i < nr_sgls; i++) {
1074 if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
1075 iod->status = NVME_SC_SGL_INVALID_TYPE |
1076 NVME_STATUS_DNR;
1077 goto out;
1078 }
1079 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
1080 iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
1081 n++;
1082 }
1083
1084 kfree(sgls);
1085 }
1086
1087 out:
1088 if (iod->status != NVME_SC_SUCCESS) {
1089 kfree(sgls);
1090 return -EIO;
1091 }
1092
1093 return 0;
1094 }
1095
nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod * iod)1096 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
1097 {
1098 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1099 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
1100
1101 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
1102 /* Single data descriptor case. */
1103 iod->nr_data_segs = 1;
1104 iod->data_segs = &iod->data_seg;
1105 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
1106 iod->data_seg.length = le32_to_cpu(sgl->length);
1107 return 0;
1108 }
1109
1110 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
1111 }
1112
nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod * iod)1113 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
1114 {
1115 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1116 struct nvmet_req *req = &iod->req;
1117 struct nvmet_pci_epf_segment *seg;
1118 struct scatterlist *sg;
1119 int ret, i;
1120
1121 if (iod->data_len > ctrl->mdts) {
1122 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1123 return -EINVAL;
1124 }
1125
1126 /*
1127 * Get the PCI address segments for the command data buffer using either
1128 * its SGLs or PRPs.
1129 */
1130 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
1131 ret = nvmet_pci_epf_iod_parse_sgls(iod);
1132 else
1133 ret = nvmet_pci_epf_iod_parse_prps(iod);
1134 if (ret)
1135 return ret;
1136
1137 /* Get a command buffer using SGLs matching the PCI segments. */
1138 if (iod->nr_data_segs == 1) {
1139 sg_init_table(&iod->data_sgl, 1);
1140 iod->data_sgt.sgl = &iod->data_sgl;
1141 iod->data_sgt.nents = 1;
1142 iod->data_sgt.orig_nents = 1;
1143 } else {
1144 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
1145 GFP_KERNEL);
1146 if (ret)
1147 goto err_nomem;
1148 }
1149
1150 for_each_sgtable_sg(&iod->data_sgt, sg, i) {
1151 seg = &iod->data_segs[i];
1152 seg->buf = kmalloc(seg->length, GFP_KERNEL);
1153 if (!seg->buf)
1154 goto err_nomem;
1155 sg_set_buf(sg, seg->buf, seg->length);
1156 }
1157
1158 req->transfer_len = iod->data_len;
1159 req->sg = iod->data_sgt.sgl;
1160 req->sg_cnt = iod->data_sgt.nents;
1161
1162 return 0;
1163
1164 err_nomem:
1165 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1166 return -ENOMEM;
1167 }
1168
nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod * iod)1169 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
1170 {
1171 struct nvmet_pci_epf_queue *cq = iod->cq;
1172 unsigned long flags;
1173
1174 /* Print an error message for failed commands, except AENs. */
1175 iod->status = le16_to_cpu(iod->cqe.status) >> 1;
1176 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
1177 dev_err(iod->ctrl->dev,
1178 "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
1179 iod->sq->qid, nvmet_pci_epf_iod_name(iod),
1180 iod->cmd.common.opcode, iod->status);
1181
1182 /*
1183 * Add the command to the list of completed commands and schedule the
1184 * CQ work.
1185 */
1186 spin_lock_irqsave(&cq->lock, flags);
1187 list_add_tail(&iod->link, &cq->list);
1188 queue_delayed_work(system_highpri_wq, &cq->work, 0);
1189 spin_unlock_irqrestore(&cq->lock, flags);
1190 }
1191
nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue * queue)1192 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
1193 {
1194 struct nvmet_pci_epf_iod *iod;
1195 unsigned long flags;
1196
1197 spin_lock_irqsave(&queue->lock, flags);
1198 while (!list_empty(&queue->list)) {
1199 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
1200 link);
1201 list_del_init(&iod->link);
1202 nvmet_pci_epf_free_iod(iod);
1203 }
1204 spin_unlock_irqrestore(&queue->lock, flags);
1205 }
1206
nvmet_pci_epf_add_port(struct nvmet_port * port)1207 static int nvmet_pci_epf_add_port(struct nvmet_port *port)
1208 {
1209 mutex_lock(&nvmet_pci_epf_ports_mutex);
1210 list_add_tail(&port->entry, &nvmet_pci_epf_ports);
1211 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1212 return 0;
1213 }
1214
nvmet_pci_epf_remove_port(struct nvmet_port * port)1215 static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
1216 {
1217 mutex_lock(&nvmet_pci_epf_ports_mutex);
1218 list_del_init(&port->entry);
1219 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1220 }
1221
1222 static struct nvmet_port *
nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl * ctrl,__le16 portid)1223 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
1224 {
1225 struct nvmet_port *p, *port = NULL;
1226
1227 mutex_lock(&nvmet_pci_epf_ports_mutex);
1228 list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
1229 if (p->disc_addr.portid == portid) {
1230 port = p;
1231 break;
1232 }
1233 }
1234 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1235
1236 return port;
1237 }
1238
nvmet_pci_epf_queue_response(struct nvmet_req * req)1239 static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
1240 {
1241 struct nvmet_pci_epf_iod *iod =
1242 container_of(req, struct nvmet_pci_epf_iod, req);
1243
1244 iod->status = le16_to_cpu(req->cqe->status) >> 1;
1245
1246 /*
1247 * If the command failed or we have no data to transfer, complete the
1248 * command immediately.
1249 */
1250 if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
1251 nvmet_pci_epf_complete_iod(iod);
1252 return;
1253 }
1254
1255 complete(&iod->done);
1256 }
1257
nvmet_pci_epf_get_mdts(const struct nvmet_ctrl * tctrl)1258 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
1259 {
1260 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1261 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
1262
1263 return ilog2(ctrl->mdts) - page_shift;
1264 }
1265
nvmet_pci_epf_create_cq(struct nvmet_ctrl * tctrl,u16 cqid,u16 flags,u16 qsize,u64 pci_addr,u16 vector)1266 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
1267 u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
1268 {
1269 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1270 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1271 u16 status;
1272 int ret;
1273
1274 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1275 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1276
1277 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1278 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1279
1280 cq->pci_addr = pci_addr;
1281 cq->qid = cqid;
1282 cq->depth = qsize + 1;
1283 cq->vector = vector;
1284 cq->head = 0;
1285 cq->tail = 0;
1286 cq->phase = 1;
1287 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
1288 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
1289
1290 if (!cqid)
1291 cq->qes = sizeof(struct nvme_completion);
1292 else
1293 cq->qes = ctrl->io_cqes;
1294 cq->pci_size = cq->qes * cq->depth;
1295
1296 if (flags & NVME_CQ_IRQ_ENABLED) {
1297 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
1298 if (!cq->iv)
1299 return NVME_SC_INTERNAL | NVME_STATUS_DNR;
1300 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
1301 }
1302
1303 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
1304 if (status != NVME_SC_SUCCESS)
1305 goto err;
1306
1307 /*
1308 * Map the CQ PCI address space and since PCI endpoint controllers may
1309 * return a partial mapping, check that the mapping is large enough.
1310 */
1311 ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
1312 &cq->pci_map);
1313 if (ret) {
1314 dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
1315 cq->qid, ret);
1316 goto err_internal;
1317 }
1318
1319 if (cq->pci_map.pci_size < cq->pci_size) {
1320 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
1321 cq->qid);
1322 goto err_unmap_queue;
1323 }
1324
1325 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
1326
1327 if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1328 dev_dbg(ctrl->dev,
1329 "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1330 cqid, qsize, cq->qes, cq->vector);
1331 else
1332 dev_dbg(ctrl->dev,
1333 "CQ[%u]: %u entries of %zu B, IRQ disabled\n",
1334 cqid, qsize, cq->qes);
1335
1336 return NVME_SC_SUCCESS;
1337
1338 err_unmap_queue:
1339 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1340 err_internal:
1341 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1342 err:
1343 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1344 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1345 return status;
1346 }
1347
nvmet_pci_epf_delete_cq(struct nvmet_ctrl * tctrl,u16 cqid)1348 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
1349 {
1350 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1351 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1352
1353 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1354 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1355
1356 cancel_delayed_work_sync(&cq->work);
1357 nvmet_pci_epf_drain_queue(cq);
1358 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1359 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1360 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1361 nvmet_cq_put(&cq->nvme_cq);
1362
1363 return NVME_SC_SUCCESS;
1364 }
1365
nvmet_pci_epf_create_sq(struct nvmet_ctrl * tctrl,u16 sqid,u16 cqid,u16 flags,u16 qsize,u64 pci_addr)1366 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
1367 u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr)
1368 {
1369 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1370 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1371 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1372 u16 status;
1373
1374 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1375 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1376
1377 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1378 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1379
1380 sq->pci_addr = pci_addr;
1381 sq->qid = sqid;
1382 sq->depth = qsize + 1;
1383 sq->head = 0;
1384 sq->tail = 0;
1385 sq->phase = 0;
1386 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
1387 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
1388 if (!sqid)
1389 sq->qes = 1UL << NVME_ADM_SQES;
1390 else
1391 sq->qes = ctrl->io_sqes;
1392 sq->pci_size = sq->qes * sq->depth;
1393
1394 status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid,
1395 sq->depth);
1396 if (status != NVME_SC_SUCCESS)
1397 return status;
1398
1399 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
1400 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
1401 if (!sq->iod_wq) {
1402 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
1403 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1404 goto out_destroy_sq;
1405 }
1406
1407 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
1408
1409 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
1410 sqid, qsize, sq->qes);
1411
1412 return NVME_SC_SUCCESS;
1413
1414 out_destroy_sq:
1415 nvmet_sq_destroy(&sq->nvme_sq);
1416 return status;
1417 }
1418
nvmet_pci_epf_delete_sq(struct nvmet_ctrl * tctrl,u16 sqid)1419 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
1420 {
1421 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1422 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1423
1424 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1425 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1426
1427 destroy_workqueue(sq->iod_wq);
1428 sq->iod_wq = NULL;
1429
1430 nvmet_pci_epf_drain_queue(sq);
1431
1432 if (sq->nvme_sq.ctrl)
1433 nvmet_sq_destroy(&sq->nvme_sq);
1434
1435 return NVME_SC_SUCCESS;
1436 }
1437
nvmet_pci_epf_get_feat(const struct nvmet_ctrl * tctrl,u8 feat,void * data)1438 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
1439 u8 feat, void *data)
1440 {
1441 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1442 struct nvmet_feat_arbitration *arb;
1443 struct nvmet_feat_irq_coalesce *irqc;
1444 struct nvmet_feat_irq_config *irqcfg;
1445 struct nvmet_pci_epf_irq_vector *iv;
1446 u16 status;
1447
1448 switch (feat) {
1449 case NVME_FEAT_ARBITRATION:
1450 arb = data;
1451 if (!ctrl->sq_ab)
1452 arb->ab = 0x7;
1453 else
1454 arb->ab = ilog2(ctrl->sq_ab);
1455 return NVME_SC_SUCCESS;
1456
1457 case NVME_FEAT_IRQ_COALESCE:
1458 irqc = data;
1459 irqc->thr = ctrl->irq_vector_threshold;
1460 irqc->time = 0;
1461 return NVME_SC_SUCCESS;
1462
1463 case NVME_FEAT_IRQ_CONFIG:
1464 irqcfg = data;
1465 mutex_lock(&ctrl->irq_lock);
1466 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1467 if (iv) {
1468 irqcfg->cd = iv->cd;
1469 status = NVME_SC_SUCCESS;
1470 } else {
1471 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1472 }
1473 mutex_unlock(&ctrl->irq_lock);
1474 return status;
1475
1476 default:
1477 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1478 }
1479 }
1480
nvmet_pci_epf_set_feat(const struct nvmet_ctrl * tctrl,u8 feat,void * data)1481 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
1482 u8 feat, void *data)
1483 {
1484 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1485 struct nvmet_feat_arbitration *arb;
1486 struct nvmet_feat_irq_coalesce *irqc;
1487 struct nvmet_feat_irq_config *irqcfg;
1488 struct nvmet_pci_epf_irq_vector *iv;
1489 u16 status;
1490
1491 switch (feat) {
1492 case NVME_FEAT_ARBITRATION:
1493 arb = data;
1494 if (arb->ab == 0x7)
1495 ctrl->sq_ab = 0;
1496 else
1497 ctrl->sq_ab = 1 << arb->ab;
1498 return NVME_SC_SUCCESS;
1499
1500 case NVME_FEAT_IRQ_COALESCE:
1501 /*
1502 * Since we do not implement precise IRQ coalescing timing,
1503 * ignore the time field.
1504 */
1505 irqc = data;
1506 ctrl->irq_vector_threshold = irqc->thr + 1;
1507 return NVME_SC_SUCCESS;
1508
1509 case NVME_FEAT_IRQ_CONFIG:
1510 irqcfg = data;
1511 mutex_lock(&ctrl->irq_lock);
1512 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1513 if (iv) {
1514 iv->cd = irqcfg->cd;
1515 status = NVME_SC_SUCCESS;
1516 } else {
1517 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1518 }
1519 mutex_unlock(&ctrl->irq_lock);
1520 return status;
1521
1522 default:
1523 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1524 }
1525 }
1526
1527 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
1528 .owner = THIS_MODULE,
1529 .type = NVMF_TRTYPE_PCI,
1530 .add_port = nvmet_pci_epf_add_port,
1531 .remove_port = nvmet_pci_epf_remove_port,
1532 .queue_response = nvmet_pci_epf_queue_response,
1533 .get_mdts = nvmet_pci_epf_get_mdts,
1534 .create_cq = nvmet_pci_epf_create_cq,
1535 .delete_cq = nvmet_pci_epf_delete_cq,
1536 .create_sq = nvmet_pci_epf_create_sq,
1537 .delete_sq = nvmet_pci_epf_delete_sq,
1538 .get_feature = nvmet_pci_epf_get_feat,
1539 .set_feature = nvmet_pci_epf_set_feat,
1540 };
1541
1542 static void nvmet_pci_epf_cq_work(struct work_struct *work);
1543
nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl * ctrl,unsigned int qid,bool sq)1544 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
1545 unsigned int qid, bool sq)
1546 {
1547 struct nvmet_pci_epf_queue *queue;
1548
1549 if (sq) {
1550 queue = &ctrl->sq[qid];
1551 } else {
1552 queue = &ctrl->cq[qid];
1553 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
1554 }
1555 queue->ctrl = ctrl;
1556 queue->qid = qid;
1557 spin_lock_init(&queue->lock);
1558 INIT_LIST_HEAD(&queue->list);
1559 }
1560
nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl * ctrl)1561 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
1562 {
1563 unsigned int qid;
1564
1565 ctrl->sq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues);
1566 if (!ctrl->sq)
1567 return -ENOMEM;
1568
1569 ctrl->cq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues);
1570 if (!ctrl->cq) {
1571 kfree(ctrl->sq);
1572 ctrl->sq = NULL;
1573 return -ENOMEM;
1574 }
1575
1576 for (qid = 0; qid < ctrl->nr_queues; qid++) {
1577 nvmet_pci_epf_init_queue(ctrl, qid, true);
1578 nvmet_pci_epf_init_queue(ctrl, qid, false);
1579 }
1580
1581 return 0;
1582 }
1583
nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl * ctrl)1584 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
1585 {
1586 kfree(ctrl->sq);
1587 ctrl->sq = NULL;
1588 kfree(ctrl->cq);
1589 ctrl->cq = NULL;
1590 }
1591
nvmet_pci_epf_exec_iod_work(struct work_struct * work)1592 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
1593 {
1594 struct nvmet_pci_epf_iod *iod =
1595 container_of(work, struct nvmet_pci_epf_iod, work);
1596 struct nvmet_req *req = &iod->req;
1597 int ret;
1598
1599 if (!iod->ctrl->link_up) {
1600 nvmet_pci_epf_free_iod(iod);
1601 return;
1602 }
1603
1604 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
1605 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1606 goto complete;
1607 }
1608
1609 /*
1610 * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
1611 * __nvmet_req_complete() internally which will call
1612 * nvmet_pci_epf_queue_response() and will complete the command directly.
1613 */
1614 if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
1615 return;
1616
1617 iod->data_len = nvmet_req_transfer_len(req);
1618 if (iod->data_len) {
1619 /*
1620 * Get the data DMA transfer direction. Here "device" means the
1621 * PCI root-complex host.
1622 */
1623 if (nvme_is_write(&iod->cmd))
1624 iod->dma_dir = DMA_FROM_DEVICE;
1625 else
1626 iod->dma_dir = DMA_TO_DEVICE;
1627
1628 /*
1629 * Setup the command data buffer and get the command data from
1630 * the host if needed.
1631 */
1632 ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
1633 if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
1634 ret = nvmet_pci_epf_transfer_iod_data(iod);
1635 if (ret) {
1636 nvmet_req_uninit(req);
1637 goto complete;
1638 }
1639 }
1640
1641 req->execute(req);
1642
1643 /*
1644 * If we do not have data to transfer after the command execution
1645 * finishes, nvmet_pci_epf_queue_response() will complete the command
1646 * directly. No need to wait for the completion in this case.
1647 */
1648 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
1649 return;
1650
1651 wait_for_completion(&iod->done);
1652
1653 if (iod->status != NVME_SC_SUCCESS)
1654 return;
1655
1656 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
1657 nvmet_pci_epf_transfer_iod_data(iod);
1658
1659 complete:
1660 nvmet_pci_epf_complete_iod(iod);
1661 }
1662
nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_queue * sq)1663 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
1664 struct nvmet_pci_epf_queue *sq)
1665 {
1666 struct nvmet_pci_epf_iod *iod;
1667 int ret, n = 0;
1668 u16 head = sq->head;
1669
1670 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1671 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
1672 iod = nvmet_pci_epf_alloc_iod(sq);
1673 if (!iod)
1674 break;
1675
1676 /* Get the NVMe command submitted by the host. */
1677 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
1678 sq->pci_addr + head * sq->qes,
1679 sq->qes, DMA_FROM_DEVICE);
1680 if (ret) {
1681 /* Not much we can do... */
1682 nvmet_pci_epf_free_iod(iod);
1683 break;
1684 }
1685
1686 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
1687 sq->qid, head, sq->tail,
1688 nvmet_pci_epf_iod_name(iod));
1689
1690 head++;
1691 if (head == sq->depth)
1692 head = 0;
1693 WRITE_ONCE(sq->head, head);
1694 n++;
1695
1696 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
1697
1698 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1699 }
1700
1701 return n;
1702 }
1703
nvmet_pci_epf_poll_sqs_work(struct work_struct * work)1704 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
1705 {
1706 struct nvmet_pci_epf_ctrl *ctrl =
1707 container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
1708 struct nvmet_pci_epf_queue *sq;
1709 unsigned long limit = jiffies;
1710 unsigned long last = 0;
1711 int i, nr_sqs;
1712
1713 while (ctrl->link_up && ctrl->enabled) {
1714 nr_sqs = 0;
1715 /* Do round-robin arbitration. */
1716 for (i = 0; i < ctrl->nr_queues; i++) {
1717 sq = &ctrl->sq[i];
1718 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1719 continue;
1720 if (nvmet_pci_epf_process_sq(ctrl, sq))
1721 nr_sqs++;
1722 }
1723
1724 /*
1725 * If we have been running for a while, reschedule to let other
1726 * tasks run and to avoid RCU stalls.
1727 */
1728 if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
1729 cond_resched();
1730 limit = jiffies;
1731 continue;
1732 }
1733
1734 if (nr_sqs) {
1735 last = jiffies;
1736 continue;
1737 }
1738
1739 /*
1740 * If we have not received any command on any queue for more
1741 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
1742 * reschedule. This avoids "burning" a CPU when the controller
1743 * is idle for a long time.
1744 */
1745 if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
1746 break;
1747
1748 cpu_relax();
1749 }
1750
1751 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
1752 }
1753
nvmet_pci_epf_cq_work(struct work_struct * work)1754 static void nvmet_pci_epf_cq_work(struct work_struct *work)
1755 {
1756 struct nvmet_pci_epf_queue *cq =
1757 container_of(work, struct nvmet_pci_epf_queue, work.work);
1758 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
1759 struct nvme_completion *cqe;
1760 struct nvmet_pci_epf_iod *iod;
1761 unsigned long flags;
1762 int ret = 0, n = 0;
1763
1764 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
1765
1766 /* Check that the CQ is not full. */
1767 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
1768 if (cq->head == cq->tail + 1) {
1769 ret = -EAGAIN;
1770 break;
1771 }
1772
1773 spin_lock_irqsave(&cq->lock, flags);
1774 iod = list_first_entry_or_null(&cq->list,
1775 struct nvmet_pci_epf_iod, link);
1776 if (iod)
1777 list_del_init(&iod->link);
1778 spin_unlock_irqrestore(&cq->lock, flags);
1779
1780 if (!iod)
1781 break;
1782
1783 /*
1784 * Post the IOD completion entry. If the IOD request was
1785 * executed (req->execute() called), the CQE is already
1786 * initialized. However, the IOD may have been failed before
1787 * that, leaving the CQE not properly initialized. So always
1788 * initialize it here.
1789 */
1790 cqe = &iod->cqe;
1791 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head));
1792 cqe->sq_id = cpu_to_le16(iod->sq->qid);
1793 cqe->command_id = iod->cmd.common.command_id;
1794 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
1795
1796 dev_dbg(ctrl->dev,
1797 "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
1798 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
1799 le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
1800 cq->phase);
1801
1802 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
1803 cqe, cq->qes);
1804
1805 cq->tail++;
1806 if (cq->tail >= cq->depth) {
1807 cq->tail = 0;
1808 cq->phase ^= 1;
1809 }
1810
1811 nvmet_pci_epf_free_iod(iod);
1812
1813 /* Signal the host. */
1814 nvmet_pci_epf_raise_irq(ctrl, cq, false);
1815 n++;
1816 }
1817
1818 /*
1819 * We do not support precise IRQ coalescing time (100ns units as per
1820 * NVMe specifications). So if we have posted completion entries without
1821 * reaching the interrupt coalescing threshold, raise an interrupt.
1822 */
1823 if (n)
1824 nvmet_pci_epf_raise_irq(ctrl, cq, true);
1825
1826 if (ret < 0)
1827 queue_delayed_work(system_highpri_wq, &cq->work,
1828 NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
1829 }
1830
nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl * ctrl)1831 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl)
1832 {
1833 struct nvmet_ctrl *tctrl = ctrl->tctrl;
1834
1835 /* Initialize controller status. */
1836 tctrl->csts = 0;
1837 ctrl->csts = 0;
1838 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1839
1840 /* Initialize controller configuration and start polling. */
1841 tctrl->cc = 0;
1842 ctrl->cc = 0;
1843 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1844 }
1845
nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl * ctrl)1846 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
1847 {
1848 u64 pci_addr, asq, acq;
1849 u32 aqa;
1850 u16 status, qsize;
1851
1852 if (ctrl->enabled)
1853 return 0;
1854
1855 dev_info(ctrl->dev, "Enabling controller\n");
1856
1857 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
1858 ctrl->mps = 1UL << ctrl->mps_shift;
1859 ctrl->mps_mask = ctrl->mps - 1;
1860
1861 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
1862 if (ctrl->io_sqes < sizeof(struct nvme_command)) {
1863 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
1864 ctrl->io_sqes, sizeof(struct nvme_command));
1865 goto err;
1866 }
1867
1868 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
1869 if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
1870 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
1871 ctrl->io_cqes, sizeof(struct nvme_completion));
1872 goto err;
1873 }
1874
1875 /* Create the admin queue. */
1876 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
1877 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
1878 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
1879
1880 qsize = (aqa & 0x0fff0000) >> 16;
1881 pci_addr = acq & GENMASK_ULL(63, 12);
1882 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
1883 NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
1884 qsize, pci_addr, 0);
1885 if (status != NVME_SC_SUCCESS) {
1886 dev_err(ctrl->dev, "Failed to create admin completion queue\n");
1887 goto err;
1888 }
1889
1890 qsize = aqa & 0x00000fff;
1891 pci_addr = asq & GENMASK_ULL(63, 12);
1892 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0,
1893 NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr);
1894 if (status != NVME_SC_SUCCESS) {
1895 dev_err(ctrl->dev, "Failed to create admin submission queue\n");
1896 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1897 goto err;
1898 }
1899
1900 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
1901 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
1902 ctrl->enabled = true;
1903 ctrl->csts = NVME_CSTS_RDY;
1904
1905 /* Start polling the controller SQs. */
1906 schedule_delayed_work(&ctrl->poll_sqs, 0);
1907
1908 return 0;
1909
1910 err:
1911 nvmet_pci_epf_clear_ctrl_config(ctrl);
1912 return -EINVAL;
1913 }
1914
nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl * ctrl,bool shutdown)1915 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl,
1916 bool shutdown)
1917 {
1918 int qid;
1919
1920 if (!ctrl->enabled)
1921 return;
1922
1923 dev_info(ctrl->dev, "%s controller\n",
1924 shutdown ? "Shutting down" : "Disabling");
1925
1926 ctrl->enabled = false;
1927 cancel_delayed_work_sync(&ctrl->poll_sqs);
1928
1929 /* Delete all I/O queues first. */
1930 for (qid = 1; qid < ctrl->nr_queues; qid++)
1931 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
1932
1933 for (qid = 1; qid < ctrl->nr_queues; qid++)
1934 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
1935
1936 /* Delete the admin queue last. */
1937 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
1938 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1939
1940 ctrl->csts &= ~NVME_CSTS_RDY;
1941 if (shutdown) {
1942 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1943 ctrl->cc &= ~NVME_CC_ENABLE;
1944 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1945 }
1946 }
1947
nvmet_pci_epf_poll_cc_work(struct work_struct * work)1948 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
1949 {
1950 struct nvmet_pci_epf_ctrl *ctrl =
1951 container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
1952 u32 old_cc, new_cc;
1953 int ret;
1954
1955 if (!ctrl->tctrl)
1956 return;
1957
1958 old_cc = ctrl->cc;
1959 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
1960 if (new_cc == old_cc)
1961 goto reschedule_work;
1962
1963 ctrl->cc = new_cc;
1964
1965 if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
1966 ret = nvmet_pci_epf_enable_ctrl(ctrl);
1967 if (ret)
1968 goto reschedule_work;
1969 }
1970
1971 if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
1972 nvmet_pci_epf_disable_ctrl(ctrl, false);
1973
1974 if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc))
1975 nvmet_pci_epf_disable_ctrl(ctrl, true);
1976
1977 if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
1978 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1979
1980 nvmet_update_cc(ctrl->tctrl, ctrl->cc);
1981 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1982
1983 reschedule_work:
1984 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
1985 }
1986
nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl * ctrl)1987 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
1988 {
1989 struct nvmet_ctrl *tctrl = ctrl->tctrl;
1990
1991 ctrl->bar = ctrl->nvme_epf->reg_bar;
1992
1993 /* Copy the target controller capabilities as a base. */
1994 ctrl->cap = tctrl->cap;
1995
1996 /* Contiguous Queues Required (CQR). */
1997 ctrl->cap |= 0x1ULL << 16;
1998
1999 /* Set Doorbell stride to 4B (DSTRB). */
2000 ctrl->cap &= ~GENMASK_ULL(35, 32);
2001
2002 /* Clear NVM Subsystem Reset Supported (NSSRS). */
2003 ctrl->cap &= ~(0x1ULL << 36);
2004
2005 /* Clear Boot Partition Support (BPS). */
2006 ctrl->cap &= ~(0x1ULL << 45);
2007
2008 /* Clear Persistent Memory Region Supported (PMRS). */
2009 ctrl->cap &= ~(0x1ULL << 56);
2010
2011 /* Clear Controller Memory Buffer Supported (CMBS). */
2012 ctrl->cap &= ~(0x1ULL << 57);
2013
2014 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
2015 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
2016
2017 nvmet_pci_epf_clear_ctrl_config(ctrl);
2018 }
2019
nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf * nvme_epf,unsigned int max_nr_queues)2020 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
2021 unsigned int max_nr_queues)
2022 {
2023 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2024 struct nvmet_alloc_ctrl_args args = {};
2025 char hostnqn[NVMF_NQN_SIZE];
2026 uuid_t id;
2027 int ret;
2028
2029 memset(ctrl, 0, sizeof(*ctrl));
2030 ctrl->dev = &nvme_epf->epf->dev;
2031 mutex_init(&ctrl->irq_lock);
2032 ctrl->nvme_epf = nvme_epf;
2033 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
2034 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
2035 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
2036
2037 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
2038 max_nr_queues * NVMET_MAX_QUEUE_SIZE,
2039 sizeof(struct nvmet_pci_epf_iod));
2040 if (ret) {
2041 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
2042 return ret;
2043 }
2044
2045 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
2046 if (!ctrl->port) {
2047 dev_err(ctrl->dev, "Port not found\n");
2048 ret = -EINVAL;
2049 goto out_mempool_exit;
2050 }
2051
2052 /* Create the target controller. */
2053 uuid_gen(&id);
2054 snprintf(hostnqn, NVMF_NQN_SIZE,
2055 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
2056 args.port = ctrl->port;
2057 args.subsysnqn = nvme_epf->subsysnqn;
2058 memset(&id, 0, sizeof(uuid_t));
2059 args.hostid = &id;
2060 args.hostnqn = hostnqn;
2061 args.ops = &nvmet_pci_epf_fabrics_ops;
2062
2063 ctrl->tctrl = nvmet_alloc_ctrl(&args);
2064 if (!ctrl->tctrl) {
2065 dev_err(ctrl->dev, "Failed to create target controller\n");
2066 ret = -ENOMEM;
2067 goto out_mempool_exit;
2068 }
2069 ctrl->tctrl->drvdata = ctrl;
2070
2071 /* We do not support protection information for now. */
2072 if (ctrl->tctrl->pi_support) {
2073 dev_err(ctrl->dev,
2074 "Protection information (PI) is not supported\n");
2075 ret = -ENOTSUPP;
2076 goto out_put_ctrl;
2077 }
2078
2079 /* Allocate our queues, up to the maximum number. */
2080 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
2081 ret = nvmet_pci_epf_alloc_queues(ctrl);
2082 if (ret)
2083 goto out_put_ctrl;
2084
2085 /*
2086 * Allocate the IRQ vectors descriptors. We cannot have more than the
2087 * maximum number of queues.
2088 */
2089 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
2090 if (ret)
2091 goto out_free_queues;
2092
2093 dev_info(ctrl->dev,
2094 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
2095 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
2096 ctrl->mdts);
2097
2098 /* Initialize BAR 0 using the target controller CAP. */
2099 nvmet_pci_epf_init_bar(ctrl);
2100
2101 return 0;
2102
2103 out_free_queues:
2104 nvmet_pci_epf_free_queues(ctrl);
2105 out_put_ctrl:
2106 nvmet_ctrl_put(ctrl->tctrl);
2107 ctrl->tctrl = NULL;
2108 out_mempool_exit:
2109 mempool_exit(&ctrl->iod_pool);
2110 return ret;
2111 }
2112
nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2113 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2114 {
2115
2116 dev_info(ctrl->dev, "PCI link up\n");
2117 ctrl->link_up = true;
2118
2119 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
2120 }
2121
nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2122 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2123 {
2124 dev_info(ctrl->dev, "PCI link down\n");
2125 ctrl->link_up = false;
2126
2127 cancel_delayed_work_sync(&ctrl->poll_cc);
2128
2129 nvmet_pci_epf_disable_ctrl(ctrl, false);
2130 nvmet_pci_epf_clear_ctrl_config(ctrl);
2131 }
2132
nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl * ctrl)2133 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2134 {
2135 if (!ctrl->tctrl)
2136 return;
2137
2138 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
2139 ctrl->tctrl->subsys->subsysnqn);
2140
2141 nvmet_pci_epf_stop_ctrl(ctrl);
2142
2143 nvmet_pci_epf_free_queues(ctrl);
2144 nvmet_pci_epf_free_irq_vectors(ctrl);
2145
2146 nvmet_ctrl_put(ctrl->tctrl);
2147 ctrl->tctrl = NULL;
2148
2149 mempool_exit(&ctrl->iod_pool);
2150 }
2151
nvmet_pci_epf_configure_bar(struct nvmet_pci_epf * nvme_epf)2152 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
2153 {
2154 struct pci_epf *epf = nvme_epf->epf;
2155 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2156 size_t reg_size, reg_bar_size;
2157 size_t msix_table_size = 0;
2158
2159 /*
2160 * The first free BAR will be our register BAR and per NVMe
2161 * specifications, it must be BAR 0.
2162 */
2163 if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
2164 dev_err(&epf->dev, "BAR 0 is not free\n");
2165 return -ENODEV;
2166 }
2167
2168 /*
2169 * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims
2170 * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type
2171 * is required to be 64-bit. Thus, for interoperability, always set the
2172 * type to 64-bit. In the rare case that the PCI EPC does not support
2173 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail,
2174 * and we will return failure back to the user.
2175 */
2176 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2177
2178 /*
2179 * Calculate the size of the register bar: NVMe registers first with
2180 * enough space for the doorbells, followed by the MSI-X table
2181 * if supported.
2182 */
2183 reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
2184 reg_size = ALIGN(reg_size, 8);
2185
2186 if (epc_features->msix_capable) {
2187 size_t pba_size;
2188
2189 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
2190 nvme_epf->msix_table_offset = reg_size;
2191 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
2192
2193 reg_size += msix_table_size + pba_size;
2194 }
2195
2196 if (epc_features->bar[BAR_0].type == BAR_FIXED) {
2197 if (reg_size > epc_features->bar[BAR_0].fixed_size) {
2198 dev_err(&epf->dev,
2199 "BAR 0 size %llu B too small, need %zu B\n",
2200 epc_features->bar[BAR_0].fixed_size,
2201 reg_size);
2202 return -ENOMEM;
2203 }
2204 reg_bar_size = epc_features->bar[BAR_0].fixed_size;
2205 } else {
2206 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
2207 }
2208
2209 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
2210 epc_features, PRIMARY_INTERFACE);
2211 if (!nvme_epf->reg_bar) {
2212 dev_err(&epf->dev, "Failed to allocate BAR 0\n");
2213 return -ENOMEM;
2214 }
2215 memset(nvme_epf->reg_bar, 0, reg_bar_size);
2216
2217 return 0;
2218 }
2219
nvmet_pci_epf_free_bar(struct nvmet_pci_epf * nvme_epf)2220 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
2221 {
2222 struct pci_epf *epf = nvme_epf->epf;
2223
2224 if (!nvme_epf->reg_bar)
2225 return;
2226
2227 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
2228 nvme_epf->reg_bar = NULL;
2229 }
2230
nvmet_pci_epf_clear_bar(struct nvmet_pci_epf * nvme_epf)2231 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
2232 {
2233 struct pci_epf *epf = nvme_epf->epf;
2234
2235 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
2236 &epf->bar[BAR_0]);
2237 }
2238
nvmet_pci_epf_init_irq(struct nvmet_pci_epf * nvme_epf)2239 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
2240 {
2241 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2242 struct pci_epf *epf = nvme_epf->epf;
2243 int ret;
2244
2245 /* Enable MSI-X if supported, otherwise, use MSI. */
2246 if (epc_features->msix_capable && epf->msix_interrupts) {
2247 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
2248 epf->msix_interrupts, BAR_0,
2249 nvme_epf->msix_table_offset);
2250 if (ret) {
2251 dev_err(&epf->dev, "Failed to configure MSI-X\n");
2252 return ret;
2253 }
2254
2255 nvme_epf->nr_vectors = epf->msix_interrupts;
2256 nvme_epf->irq_type = PCI_IRQ_MSIX;
2257
2258 return 0;
2259 }
2260
2261 if (epc_features->msi_capable && epf->msi_interrupts) {
2262 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
2263 epf->msi_interrupts);
2264 if (ret) {
2265 dev_err(&epf->dev, "Failed to configure MSI\n");
2266 return ret;
2267 }
2268
2269 nvme_epf->nr_vectors = epf->msi_interrupts;
2270 nvme_epf->irq_type = PCI_IRQ_MSI;
2271
2272 return 0;
2273 }
2274
2275 /* MSI and MSI-X are not supported: fall back to INTx. */
2276 nvme_epf->nr_vectors = 1;
2277 nvme_epf->irq_type = PCI_IRQ_INTX;
2278
2279 return 0;
2280 }
2281
nvmet_pci_epf_epc_init(struct pci_epf * epf)2282 static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
2283 {
2284 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2285 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2286 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2287 unsigned int max_nr_queues = NVMET_NR_QUEUES;
2288 int ret;
2289
2290 /* For now, do not support virtual functions. */
2291 if (epf->vfunc_no > 0) {
2292 dev_err(&epf->dev, "Virtual functions are not supported\n");
2293 return -EINVAL;
2294 }
2295
2296 /*
2297 * Cap the maximum number of queues we can support on the controller
2298 * with the number of IRQs we can use.
2299 */
2300 if (epc_features->msix_capable && epf->msix_interrupts) {
2301 dev_info(&epf->dev,
2302 "PCI endpoint controller supports MSI-X, %u vectors\n",
2303 epf->msix_interrupts);
2304 max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
2305 } else if (epc_features->msi_capable && epf->msi_interrupts) {
2306 dev_info(&epf->dev,
2307 "PCI endpoint controller supports MSI, %u vectors\n",
2308 epf->msi_interrupts);
2309 max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
2310 }
2311
2312 if (max_nr_queues < 2) {
2313 dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
2314 max_nr_queues);
2315 return -EINVAL;
2316 }
2317
2318 /* Create the target controller. */
2319 ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
2320 if (ret) {
2321 dev_err(&epf->dev,
2322 "Failed to create NVMe PCI target controller (err=%d)\n",
2323 ret);
2324 return ret;
2325 }
2326
2327 nvmet_pci_epf_init_dma(nvme_epf);
2328
2329 /* Set device ID, class, etc. */
2330 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
2331 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
2332 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
2333 epf->header);
2334 if (ret) {
2335 dev_err(&epf->dev,
2336 "Failed to write configuration header (err=%d)\n", ret);
2337 goto out_destroy_ctrl;
2338 }
2339
2340 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
2341 &epf->bar[BAR_0]);
2342 if (ret) {
2343 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
2344 goto out_destroy_ctrl;
2345 }
2346
2347 /*
2348 * Enable interrupts and start polling the controller BAR if we do not
2349 * have a link up notifier.
2350 */
2351 ret = nvmet_pci_epf_init_irq(nvme_epf);
2352 if (ret)
2353 goto out_clear_bar;
2354
2355 if (!epc_features->linkup_notifier)
2356 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
2357
2358 return 0;
2359
2360 out_clear_bar:
2361 nvmet_pci_epf_clear_bar(nvme_epf);
2362 out_destroy_ctrl:
2363 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2364 return ret;
2365 }
2366
nvmet_pci_epf_epc_deinit(struct pci_epf * epf)2367 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
2368 {
2369 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2370 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2371
2372 nvmet_pci_epf_destroy_ctrl(ctrl);
2373
2374 nvmet_pci_epf_deinit_dma(nvme_epf);
2375 nvmet_pci_epf_clear_bar(nvme_epf);
2376 }
2377
nvmet_pci_epf_link_up(struct pci_epf * epf)2378 static int nvmet_pci_epf_link_up(struct pci_epf *epf)
2379 {
2380 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2381 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2382
2383 nvmet_pci_epf_start_ctrl(ctrl);
2384
2385 return 0;
2386 }
2387
nvmet_pci_epf_link_down(struct pci_epf * epf)2388 static int nvmet_pci_epf_link_down(struct pci_epf *epf)
2389 {
2390 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2391 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2392
2393 nvmet_pci_epf_stop_ctrl(ctrl);
2394
2395 return 0;
2396 }
2397
2398 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
2399 .epc_init = nvmet_pci_epf_epc_init,
2400 .epc_deinit = nvmet_pci_epf_epc_deinit,
2401 .link_up = nvmet_pci_epf_link_up,
2402 .link_down = nvmet_pci_epf_link_down,
2403 };
2404
nvmet_pci_epf_bind(struct pci_epf * epf)2405 static int nvmet_pci_epf_bind(struct pci_epf *epf)
2406 {
2407 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2408 const struct pci_epc_features *epc_features;
2409 struct pci_epc *epc = epf->epc;
2410 int ret;
2411
2412 if (WARN_ON_ONCE(!epc))
2413 return -EINVAL;
2414
2415 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
2416 if (!epc_features) {
2417 dev_err(&epf->dev, "epc_features not implemented\n");
2418 return -EOPNOTSUPP;
2419 }
2420 nvme_epf->epc_features = epc_features;
2421
2422 ret = nvmet_pci_epf_configure_bar(nvme_epf);
2423 if (ret)
2424 return ret;
2425
2426 return 0;
2427 }
2428
nvmet_pci_epf_unbind(struct pci_epf * epf)2429 static void nvmet_pci_epf_unbind(struct pci_epf *epf)
2430 {
2431 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2432 struct pci_epc *epc = epf->epc;
2433
2434 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2435
2436 if (epc->init_complete) {
2437 nvmet_pci_epf_deinit_dma(nvme_epf);
2438 nvmet_pci_epf_clear_bar(nvme_epf);
2439 }
2440
2441 nvmet_pci_epf_free_bar(nvme_epf);
2442 }
2443
2444 static struct pci_epf_header nvme_epf_pci_header = {
2445 .vendorid = PCI_ANY_ID,
2446 .deviceid = PCI_ANY_ID,
2447 .progif_code = 0x02, /* NVM Express */
2448 .baseclass_code = PCI_BASE_CLASS_STORAGE,
2449 .subclass_code = 0x08, /* Non-Volatile Memory controller */
2450 .interrupt_pin = PCI_INTERRUPT_INTA,
2451 };
2452
nvmet_pci_epf_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)2453 static int nvmet_pci_epf_probe(struct pci_epf *epf,
2454 const struct pci_epf_device_id *id)
2455 {
2456 struct nvmet_pci_epf *nvme_epf;
2457 int ret;
2458
2459 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
2460 if (!nvme_epf)
2461 return -ENOMEM;
2462
2463 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
2464 if (ret)
2465 return ret;
2466
2467 nvme_epf->epf = epf;
2468 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2469
2470 epf->event_ops = &nvmet_pci_epf_event_ops;
2471 epf->header = &nvme_epf_pci_header;
2472 epf_set_drvdata(epf, nvme_epf);
2473
2474 return 0;
2475 }
2476
2477 #define to_nvme_epf(epf_group) \
2478 container_of(epf_group, struct nvmet_pci_epf, group)
2479
nvmet_pci_epf_portid_show(struct config_item * item,char * page)2480 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
2481 {
2482 struct config_group *group = to_config_group(item);
2483 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2484
2485 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
2486 }
2487
nvmet_pci_epf_portid_store(struct config_item * item,const char * page,size_t len)2488 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
2489 const char *page, size_t len)
2490 {
2491 struct config_group *group = to_config_group(item);
2492 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2493 u16 portid;
2494
2495 /* Do not allow setting this when the function is already started. */
2496 if (nvme_epf->ctrl.tctrl)
2497 return -EBUSY;
2498
2499 if (!len)
2500 return -EINVAL;
2501
2502 if (kstrtou16(page, 0, &portid))
2503 return -EINVAL;
2504
2505 nvme_epf->portid = cpu_to_le16(portid);
2506
2507 return len;
2508 }
2509
2510 CONFIGFS_ATTR(nvmet_pci_epf_, portid);
2511
nvmet_pci_epf_subsysnqn_show(struct config_item * item,char * page)2512 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
2513 char *page)
2514 {
2515 struct config_group *group = to_config_group(item);
2516 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2517
2518 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
2519 }
2520
nvmet_pci_epf_subsysnqn_store(struct config_item * item,const char * page,size_t len)2521 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
2522 const char *page, size_t len)
2523 {
2524 struct config_group *group = to_config_group(item);
2525 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2526
2527 /* Do not allow setting this when the function is already started. */
2528 if (nvme_epf->ctrl.tctrl)
2529 return -EBUSY;
2530
2531 if (!len)
2532 return -EINVAL;
2533
2534 strscpy(nvme_epf->subsysnqn, page, len);
2535
2536 return len;
2537 }
2538
2539 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
2540
nvmet_pci_epf_mdts_kb_show(struct config_item * item,char * page)2541 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
2542 {
2543 struct config_group *group = to_config_group(item);
2544 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2545
2546 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
2547 }
2548
nvmet_pci_epf_mdts_kb_store(struct config_item * item,const char * page,size_t len)2549 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
2550 const char *page, size_t len)
2551 {
2552 struct config_group *group = to_config_group(item);
2553 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2554 unsigned long mdts_kb;
2555 int ret;
2556
2557 if (nvme_epf->ctrl.tctrl)
2558 return -EBUSY;
2559
2560 ret = kstrtoul(page, 0, &mdts_kb);
2561 if (ret)
2562 return ret;
2563 if (!mdts_kb)
2564 mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2565 else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
2566 mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
2567
2568 if (!is_power_of_2(mdts_kb))
2569 return -EINVAL;
2570
2571 nvme_epf->mdts_kb = mdts_kb;
2572
2573 return len;
2574 }
2575
2576 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
2577
2578 static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
2579 &nvmet_pci_epf_attr_portid,
2580 &nvmet_pci_epf_attr_subsysnqn,
2581 &nvmet_pci_epf_attr_mdts_kb,
2582 NULL,
2583 };
2584
2585 static const struct config_item_type nvmet_pci_epf_group_type = {
2586 .ct_attrs = nvmet_pci_epf_attrs,
2587 .ct_owner = THIS_MODULE,
2588 };
2589
nvmet_pci_epf_add_cfs(struct pci_epf * epf,struct config_group * group)2590 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
2591 struct config_group *group)
2592 {
2593 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2594
2595 config_group_init_type_name(&nvme_epf->group, "nvme",
2596 &nvmet_pci_epf_group_type);
2597
2598 return &nvme_epf->group;
2599 }
2600
2601 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
2602 { .name = "nvmet_pci_epf" },
2603 {},
2604 };
2605
2606 static struct pci_epf_ops nvmet_pci_epf_ops = {
2607 .bind = nvmet_pci_epf_bind,
2608 .unbind = nvmet_pci_epf_unbind,
2609 .add_cfs = nvmet_pci_epf_add_cfs,
2610 };
2611
2612 static struct pci_epf_driver nvmet_pci_epf_driver = {
2613 .driver.name = "nvmet_pci_epf",
2614 .probe = nvmet_pci_epf_probe,
2615 .id_table = nvmet_pci_epf_ids,
2616 .ops = &nvmet_pci_epf_ops,
2617 .owner = THIS_MODULE,
2618 };
2619
nvmet_pci_epf_init_module(void)2620 static int __init nvmet_pci_epf_init_module(void)
2621 {
2622 int ret;
2623
2624 ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
2625 if (ret)
2626 return ret;
2627
2628 ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
2629 if (ret) {
2630 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2631 return ret;
2632 }
2633
2634 return 0;
2635 }
2636
nvmet_pci_epf_cleanup_module(void)2637 static void __exit nvmet_pci_epf_cleanup_module(void)
2638 {
2639 nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
2640 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2641 }
2642
2643 module_init(nvmet_pci_epf_init_module);
2644 module_exit(nvmet_pci_epf_cleanup_module);
2645
2646 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
2647 MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
2648 MODULE_LICENSE("GPL");
2649