xref: /linux/drivers/nvme/target/pci-epf.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe PCI Endpoint Function target driver.
4  *
5  * Copyright (c) 2024, Western Digital Corporation or its affiliates.
6  * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
7  *                     REDS Institute, HEIG-VD, HES-SO, Switzerland
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/io.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/nvme.h>
18 #include <linux/pci_ids.h>
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci_regs.h>
22 #include <linux/slab.h>
23 
24 #include "nvmet.h"
25 
26 static LIST_HEAD(nvmet_pci_epf_ports);
27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
28 
29 /*
30  * Default and maximum allowed data transfer size. For the default,
31  * allow up to 128 page-sized segments. For the maximum allowed,
32  * use 4 times the default (which is completely arbitrary).
33  */
34 #define NVMET_PCI_EPF_MAX_SEGS		128
35 #define NVMET_PCI_EPF_MDTS_KB		\
36 	(NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
37 #define NVMET_PCI_EPF_MAX_MDTS_KB	(NVMET_PCI_EPF_MDTS_KB * 4)
38 
39 /*
40  * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
41  * interrupt vector to the host. This default 8 is completely arbitrary and can
42  * be changed by the host with a nvme_set_features command.
43  */
44 #define NVMET_PCI_EPF_IV_THRESHOLD	8
45 
46 /*
47  * BAR CC register and SQ polling intervals.
48  */
49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL	msecs_to_jiffies(10)
50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL	msecs_to_jiffies(5)
51 #define NVMET_PCI_EPF_SQ_POLL_IDLE	msecs_to_jiffies(5000)
52 
53 /*
54  * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
55  */
56 #define NVMET_PCI_EPF_SQ_AB		8
57 
58 /*
59  * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
60  * is full, in which case we retry the CQ processing after this interval.
61  */
62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL	msecs_to_jiffies(1)
63 
64 enum nvmet_pci_epf_queue_flags {
65 	NVMET_PCI_EPF_Q_LIVE = 0,	/* The queue is live */
66 	NVMET_PCI_EPF_Q_IRQ_ENABLED,	/* IRQ is enabled for this queue */
67 };
68 
69 /*
70  * IRQ vector descriptor.
71  */
72 struct nvmet_pci_epf_irq_vector {
73 	unsigned int	vector;
74 	unsigned int	ref;
75 	bool		cd;
76 	int		nr_irqs;
77 };
78 
79 struct nvmet_pci_epf_queue {
80 	union {
81 		struct nvmet_sq		nvme_sq;
82 		struct nvmet_cq		nvme_cq;
83 	};
84 	struct nvmet_pci_epf_ctrl	*ctrl;
85 	unsigned long			flags;
86 
87 	u64				pci_addr;
88 	size_t				pci_size;
89 	struct pci_epc_map		pci_map;
90 
91 	u16				qid;
92 	u16				depth;
93 	u16				vector;
94 	u16				head;
95 	u16				tail;
96 	u16				phase;
97 	u32				db;
98 
99 	size_t				qes;
100 
101 	struct nvmet_pci_epf_irq_vector	*iv;
102 	struct workqueue_struct		*iod_wq;
103 	struct delayed_work		work;
104 	spinlock_t			lock;
105 	struct list_head		list;
106 };
107 
108 /*
109  * PCI Root Complex (RC) address data segment for mapping an admin or
110  * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
111  */
112 struct nvmet_pci_epf_segment {
113 	void				*buf;
114 	u64				pci_addr;
115 	u32				length;
116 };
117 
118 /*
119  * Command descriptors.
120  */
121 struct nvmet_pci_epf_iod {
122 	struct list_head		link;
123 
124 	struct nvmet_req		req;
125 	struct nvme_command		cmd;
126 	struct nvme_completion		cqe;
127 	unsigned int			status;
128 
129 	struct nvmet_pci_epf_ctrl	*ctrl;
130 
131 	struct nvmet_pci_epf_queue	*sq;
132 	struct nvmet_pci_epf_queue	*cq;
133 
134 	/* Data transfer size and direction for the command. */
135 	size_t				data_len;
136 	enum dma_data_direction		dma_dir;
137 
138 	/*
139 	 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
140 	 * use only @data_seg. Otherwise, the array of segments @data_segs is
141 	 * allocated to manage multiple PCI address data segments. @data_sgl and
142 	 * @data_sgt are used to setup the command request for execution by the
143 	 * target core.
144 	 */
145 	unsigned int			nr_data_segs;
146 	struct nvmet_pci_epf_segment	data_seg;
147 	struct nvmet_pci_epf_segment	*data_segs;
148 	struct scatterlist		data_sgl;
149 	struct sg_table			data_sgt;
150 
151 	struct work_struct		work;
152 	struct completion		done;
153 };
154 
155 /*
156  * PCI target controller private data.
157  */
158 struct nvmet_pci_epf_ctrl {
159 	struct nvmet_pci_epf		*nvme_epf;
160 	struct nvmet_port		*port;
161 	struct nvmet_ctrl		*tctrl;
162 	struct device			*dev;
163 
164 	unsigned int			nr_queues;
165 	struct nvmet_pci_epf_queue	*sq;
166 	struct nvmet_pci_epf_queue	*cq;
167 	unsigned int			sq_ab;
168 
169 	mempool_t			iod_pool;
170 	void				*bar;
171 	u64				cap;
172 	u32				cc;
173 	u32				csts;
174 
175 	size_t				io_sqes;
176 	size_t				io_cqes;
177 
178 	size_t				mps_shift;
179 	size_t				mps;
180 	size_t				mps_mask;
181 
182 	unsigned int			mdts;
183 
184 	struct delayed_work		poll_cc;
185 	struct delayed_work		poll_sqs;
186 
187 	struct mutex			irq_lock;
188 	struct nvmet_pci_epf_irq_vector	*irq_vectors;
189 	unsigned int			irq_vector_threshold;
190 
191 	bool				link_up;
192 	bool				enabled;
193 };
194 
195 /*
196  * PCI EPF driver private data.
197  */
198 struct nvmet_pci_epf {
199 	struct pci_epf			*epf;
200 
201 	const struct pci_epc_features	*epc_features;
202 
203 	void				*reg_bar;
204 	size_t				msix_table_offset;
205 
206 	unsigned int			irq_type;
207 	unsigned int			nr_vectors;
208 
209 	struct nvmet_pci_epf_ctrl	ctrl;
210 
211 	bool				dma_enabled;
212 	struct dma_chan			*dma_tx_chan;
213 	struct mutex			dma_tx_lock;
214 	struct dma_chan			*dma_rx_chan;
215 	struct mutex			dma_rx_lock;
216 
217 	struct mutex			mmio_lock;
218 
219 	/* PCI endpoint function configfs attributes. */
220 	struct config_group		group;
221 	__le16				portid;
222 	char				subsysnqn[NVMF_NQN_SIZE];
223 	unsigned int			mdts_kb;
224 };
225 
226 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
227 					   u32 off)
228 {
229 	__le32 *bar_reg = ctrl->bar + off;
230 
231 	return le32_to_cpu(READ_ONCE(*bar_reg));
232 }
233 
234 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
235 					     u32 off, u32 val)
236 {
237 	__le32 *bar_reg = ctrl->bar + off;
238 
239 	WRITE_ONCE(*bar_reg, cpu_to_le32(val));
240 }
241 
242 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
243 					   u32 off)
244 {
245 	return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
246 		((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
247 }
248 
249 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
250 					     u32 off, u64 val)
251 {
252 	nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
253 	nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
254 }
255 
256 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
257 		u64 pci_addr, size_t size, struct pci_epc_map *map)
258 {
259 	struct pci_epf *epf = nvme_epf->epf;
260 
261 	return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
262 			       pci_addr, size, map);
263 }
264 
265 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
266 					   struct pci_epc_map *map)
267 {
268 	struct pci_epf *epf = nvme_epf->epf;
269 
270 	pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
271 }
272 
273 struct nvmet_pci_epf_dma_filter {
274 	struct device *dev;
275 	u32 dma_mask;
276 };
277 
278 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
279 {
280 	struct nvmet_pci_epf_dma_filter *filter = arg;
281 	struct dma_slave_caps caps;
282 
283 	memset(&caps, 0, sizeof(caps));
284 	dma_get_slave_caps(chan, &caps);
285 
286 	return chan->device->dev == filter->dev &&
287 		(filter->dma_mask & caps.directions);
288 }
289 
290 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
291 {
292 	struct pci_epf *epf = nvme_epf->epf;
293 	struct device *dev = &epf->dev;
294 	struct nvmet_pci_epf_dma_filter filter;
295 	struct dma_chan *chan;
296 	dma_cap_mask_t mask;
297 
298 	mutex_init(&nvme_epf->dma_rx_lock);
299 	mutex_init(&nvme_epf->dma_tx_lock);
300 
301 	dma_cap_zero(mask);
302 	dma_cap_set(DMA_SLAVE, mask);
303 
304 	filter.dev = epf->epc->dev.parent;
305 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
306 
307 	chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
308 	if (!chan)
309 		goto out_dma_no_rx;
310 
311 	nvme_epf->dma_rx_chan = chan;
312 
313 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
314 	chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
315 	if (!chan)
316 		goto out_dma_no_tx;
317 
318 	nvme_epf->dma_tx_chan = chan;
319 
320 	nvme_epf->dma_enabled = true;
321 
322 	dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
323 		dma_chan_name(chan),
324 		dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
325 
326 	dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
327 		dma_chan_name(chan),
328 		dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
329 
330 	return;
331 
332 out_dma_no_tx:
333 	dma_release_channel(nvme_epf->dma_rx_chan);
334 	nvme_epf->dma_rx_chan = NULL;
335 
336 out_dma_no_rx:
337 	mutex_destroy(&nvme_epf->dma_rx_lock);
338 	mutex_destroy(&nvme_epf->dma_tx_lock);
339 	nvme_epf->dma_enabled = false;
340 
341 	dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
342 }
343 
344 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
345 {
346 	if (!nvme_epf->dma_enabled)
347 		return;
348 
349 	dma_release_channel(nvme_epf->dma_tx_chan);
350 	nvme_epf->dma_tx_chan = NULL;
351 	dma_release_channel(nvme_epf->dma_rx_chan);
352 	nvme_epf->dma_rx_chan = NULL;
353 	mutex_destroy(&nvme_epf->dma_rx_lock);
354 	mutex_destroy(&nvme_epf->dma_tx_lock);
355 	nvme_epf->dma_enabled = false;
356 }
357 
358 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
359 		struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
360 {
361 	struct pci_epf *epf = nvme_epf->epf;
362 	struct dma_async_tx_descriptor *desc;
363 	struct dma_slave_config sconf = {};
364 	struct device *dev = &epf->dev;
365 	struct device *dma_dev;
366 	struct dma_chan *chan;
367 	dma_cookie_t cookie;
368 	dma_addr_t dma_addr;
369 	struct mutex *lock;
370 	int ret;
371 
372 	switch (dir) {
373 	case DMA_FROM_DEVICE:
374 		lock = &nvme_epf->dma_rx_lock;
375 		chan = nvme_epf->dma_rx_chan;
376 		sconf.direction = DMA_DEV_TO_MEM;
377 		sconf.src_addr = seg->pci_addr;
378 		break;
379 	case DMA_TO_DEVICE:
380 		lock = &nvme_epf->dma_tx_lock;
381 		chan = nvme_epf->dma_tx_chan;
382 		sconf.direction = DMA_MEM_TO_DEV;
383 		sconf.dst_addr = seg->pci_addr;
384 		break;
385 	default:
386 		return -EINVAL;
387 	}
388 
389 	mutex_lock(lock);
390 
391 	dma_dev = dmaengine_get_dma_device(chan);
392 	dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
393 	ret = dma_mapping_error(dma_dev, dma_addr);
394 	if (ret)
395 		goto unlock;
396 
397 	ret = dmaengine_slave_config(chan, &sconf);
398 	if (ret) {
399 		dev_err(dev, "Failed to configure DMA channel\n");
400 		goto unmap;
401 	}
402 
403 	desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
404 					   sconf.direction, DMA_CTRL_ACK);
405 	if (!desc) {
406 		dev_err(dev, "Failed to prepare DMA\n");
407 		ret = -EIO;
408 		goto unmap;
409 	}
410 
411 	cookie = dmaengine_submit(desc);
412 	ret = dma_submit_error(cookie);
413 	if (ret) {
414 		dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
415 		goto unmap;
416 	}
417 
418 	if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
419 		dev_err(dev, "DMA transfer failed\n");
420 		ret = -EIO;
421 	}
422 
423 	dmaengine_terminate_sync(chan);
424 
425 unmap:
426 	dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
427 
428 unlock:
429 	mutex_unlock(lock);
430 
431 	return ret;
432 }
433 
434 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
435 		struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
436 {
437 	u64 pci_addr = seg->pci_addr;
438 	u32 length = seg->length;
439 	void *buf = seg->buf;
440 	struct pci_epc_map map;
441 	int ret = -EINVAL;
442 
443 	/*
444 	 * Note: MMIO transfers do not need serialization but this is a
445 	 * simple way to avoid using too many mapping windows.
446 	 */
447 	mutex_lock(&nvme_epf->mmio_lock);
448 
449 	while (length) {
450 		ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
451 		if (ret)
452 			break;
453 
454 		switch (dir) {
455 		case DMA_FROM_DEVICE:
456 			memcpy_fromio(buf, map.virt_addr, map.pci_size);
457 			break;
458 		case DMA_TO_DEVICE:
459 			memcpy_toio(map.virt_addr, buf, map.pci_size);
460 			break;
461 		default:
462 			ret = -EINVAL;
463 			goto unlock;
464 		}
465 
466 		pci_addr += map.pci_size;
467 		buf += map.pci_size;
468 		length -= map.pci_size;
469 
470 		nvmet_pci_epf_mem_unmap(nvme_epf, &map);
471 	}
472 
473 unlock:
474 	mutex_unlock(&nvme_epf->mmio_lock);
475 
476 	return ret;
477 }
478 
479 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
480 		struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
481 {
482 	if (nvme_epf->dma_enabled)
483 		return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
484 
485 	return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
486 }
487 
488 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
489 					 void *buf, u64 pci_addr, u32 length,
490 					 enum dma_data_direction dir)
491 {
492 	struct nvmet_pci_epf_segment seg = {
493 		.buf = buf,
494 		.pci_addr = pci_addr,
495 		.length = length,
496 	};
497 
498 	return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
499 }
500 
501 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
502 {
503 	ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
504 				    sizeof(struct nvmet_pci_epf_irq_vector),
505 				    GFP_KERNEL);
506 	if (!ctrl->irq_vectors)
507 		return -ENOMEM;
508 
509 	mutex_init(&ctrl->irq_lock);
510 
511 	return 0;
512 }
513 
514 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
515 {
516 	if (ctrl->irq_vectors) {
517 		mutex_destroy(&ctrl->irq_lock);
518 		kfree(ctrl->irq_vectors);
519 		ctrl->irq_vectors = NULL;
520 	}
521 }
522 
523 static struct nvmet_pci_epf_irq_vector *
524 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
525 {
526 	struct nvmet_pci_epf_irq_vector *iv;
527 	int i;
528 
529 	lockdep_assert_held(&ctrl->irq_lock);
530 
531 	for (i = 0; i < ctrl->nr_queues; i++) {
532 		iv = &ctrl->irq_vectors[i];
533 		if (iv->ref && iv->vector == vector)
534 			return iv;
535 	}
536 
537 	return NULL;
538 }
539 
540 static struct nvmet_pci_epf_irq_vector *
541 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
542 {
543 	struct nvmet_pci_epf_irq_vector *iv;
544 	int i;
545 
546 	mutex_lock(&ctrl->irq_lock);
547 
548 	iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
549 	if (iv) {
550 		iv->ref++;
551 		goto unlock;
552 	}
553 
554 	for (i = 0; i < ctrl->nr_queues; i++) {
555 		iv = &ctrl->irq_vectors[i];
556 		if (!iv->ref)
557 			break;
558 	}
559 
560 	if (WARN_ON_ONCE(!iv))
561 		goto unlock;
562 
563 	iv->ref = 1;
564 	iv->vector = vector;
565 	iv->nr_irqs = 0;
566 
567 unlock:
568 	mutex_unlock(&ctrl->irq_lock);
569 
570 	return iv;
571 }
572 
573 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
574 					    u16 vector)
575 {
576 	struct nvmet_pci_epf_irq_vector *iv;
577 
578 	mutex_lock(&ctrl->irq_lock);
579 
580 	iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
581 	if (iv) {
582 		iv->ref--;
583 		if (!iv->ref) {
584 			iv->vector = 0;
585 			iv->nr_irqs = 0;
586 		}
587 	}
588 
589 	mutex_unlock(&ctrl->irq_lock);
590 }
591 
592 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
593 		struct nvmet_pci_epf_queue *cq, bool force)
594 {
595 	struct nvmet_pci_epf_irq_vector *iv = cq->iv;
596 	bool ret;
597 
598 	/* IRQ coalescing for the admin queue is not allowed. */
599 	if (!cq->qid)
600 		return true;
601 
602 	if (iv->cd)
603 		return true;
604 
605 	if (force) {
606 		ret = iv->nr_irqs > 0;
607 	} else {
608 		iv->nr_irqs++;
609 		ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
610 	}
611 	if (ret)
612 		iv->nr_irqs = 0;
613 
614 	return ret;
615 }
616 
617 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
618 		struct nvmet_pci_epf_queue *cq, bool force)
619 {
620 	struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
621 	struct pci_epf *epf = nvme_epf->epf;
622 	int ret = 0;
623 
624 	if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
625 	    !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
626 		return;
627 
628 	mutex_lock(&ctrl->irq_lock);
629 
630 	if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
631 		goto unlock;
632 
633 	switch (nvme_epf->irq_type) {
634 	case PCI_IRQ_MSIX:
635 	case PCI_IRQ_MSI:
636 		/*
637 		 * If we fail to raise an MSI or MSI-X interrupt, it is likely
638 		 * because the host is using legacy INTX IRQs (e.g. BIOS,
639 		 * grub), but we can fallback to the INTX type only if the
640 		 * endpoint controller supports this type.
641 		 */
642 		ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
643 					nvme_epf->irq_type, cq->vector + 1);
644 		if (!ret || !nvme_epf->epc_features->intx_capable)
645 			break;
646 		fallthrough;
647 	case PCI_IRQ_INTX:
648 		ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
649 					PCI_IRQ_INTX, 0);
650 		break;
651 	default:
652 		WARN_ON_ONCE(1);
653 		ret = -EINVAL;
654 		break;
655 	}
656 
657 	if (ret)
658 		dev_err_ratelimited(ctrl->dev,
659 				    "CQ[%u]: Failed to raise IRQ (err=%d)\n",
660 				    cq->qid, ret);
661 
662 unlock:
663 	mutex_unlock(&ctrl->irq_lock);
664 }
665 
666 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
667 {
668 	return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
669 }
670 
671 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
672 
673 static struct nvmet_pci_epf_iod *
674 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
675 {
676 	struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
677 	struct nvmet_pci_epf_iod *iod;
678 
679 	iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
680 	if (unlikely(!iod))
681 		return NULL;
682 
683 	memset(iod, 0, sizeof(*iod));
684 	iod->req.cmd = &iod->cmd;
685 	iod->req.cqe = &iod->cqe;
686 	iod->req.port = ctrl->port;
687 	iod->ctrl = ctrl;
688 	iod->sq = sq;
689 	iod->cq = &ctrl->cq[sq->qid];
690 	INIT_LIST_HEAD(&iod->link);
691 	iod->dma_dir = DMA_NONE;
692 	INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
693 	init_completion(&iod->done);
694 
695 	return iod;
696 }
697 
698 /*
699  * Allocate or grow a command table of PCI segments.
700  */
701 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
702 					     int nsegs)
703 {
704 	struct nvmet_pci_epf_segment *segs;
705 	int nr_segs = iod->nr_data_segs + nsegs;
706 
707 	segs = krealloc(iod->data_segs,
708 			nr_segs * sizeof(struct nvmet_pci_epf_segment),
709 			GFP_KERNEL | __GFP_ZERO);
710 	if (!segs)
711 		return -ENOMEM;
712 
713 	iod->nr_data_segs = nr_segs;
714 	iod->data_segs = segs;
715 
716 	return 0;
717 }
718 
719 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
720 {
721 	int i;
722 
723 	if (iod->data_segs) {
724 		for (i = 0; i < iod->nr_data_segs; i++)
725 			kfree(iod->data_segs[i].buf);
726 		if (iod->data_segs != &iod->data_seg)
727 			kfree(iod->data_segs);
728 	}
729 	if (iod->data_sgt.nents > 1)
730 		sg_free_table(&iod->data_sgt);
731 	mempool_free(iod, &iod->ctrl->iod_pool);
732 }
733 
734 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
735 {
736 	struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
737 	struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
738 	int i, ret;
739 
740 	/* Split the data transfer according to the PCI segments. */
741 	for (i = 0; i < iod->nr_data_segs; i++, seg++) {
742 		ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
743 		if (ret) {
744 			iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
745 			return ret;
746 		}
747 	}
748 
749 	return 0;
750 }
751 
752 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
753 					 u64 prp)
754 {
755 	return prp & ctrl->mps_mask;
756 }
757 
758 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
759 					    u64 prp)
760 {
761 	return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
762 }
763 
764 /*
765  * Transfer a PRP list from the host and return the number of prps.
766  */
767 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
768 				      size_t xfer_len, __le64 *prps)
769 {
770 	size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
771 	u32 length;
772 	int ret;
773 
774 	/*
775 	 * Compute the number of PRPs required for the number of bytes to
776 	 * transfer (xfer_len). If this number overflows the memory page size
777 	 * with the PRP list pointer specified, only return the space available
778 	 * in the memory page, the last PRP in there will be a PRP list pointer
779 	 * to the remaining PRPs.
780 	 */
781 	length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
782 	ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
783 	if (ret)
784 		return ret;
785 
786 	return length >> 3;
787 }
788 
789 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
790 					    struct nvmet_pci_epf_iod *iod)
791 {
792 	struct nvme_command *cmd = &iod->cmd;
793 	struct nvmet_pci_epf_segment *seg;
794 	size_t size = 0, ofst, prp_size, xfer_len;
795 	size_t transfer_len = iod->data_len;
796 	int nr_segs, nr_prps = 0;
797 	u64 pci_addr, prp;
798 	int i = 0, ret;
799 	__le64 *prps;
800 
801 	prps = kzalloc(ctrl->mps, GFP_KERNEL);
802 	if (!prps)
803 		goto err_internal;
804 
805 	/*
806 	 * Allocate PCI segments for the command: this considers the worst case
807 	 * scenario where all prps are discontiguous, so get as many segments
808 	 * as we can have prps. In practice, most of the time, we will have
809 	 * far less PCI segments than prps.
810 	 */
811 	prp = le64_to_cpu(cmd->common.dptr.prp1);
812 	if (!prp)
813 		goto err_invalid_field;
814 
815 	ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
816 	nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
817 
818 	ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
819 	if (ret)
820 		goto err_internal;
821 
822 	/* Set the first segment using prp1. */
823 	seg = &iod->data_segs[0];
824 	seg->pci_addr = prp;
825 	seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
826 
827 	size = seg->length;
828 	pci_addr = prp + size;
829 	nr_segs = 1;
830 
831 	/*
832 	 * Now build the PCI address segments using the PRP lists, starting
833 	 * from prp2.
834 	 */
835 	prp = le64_to_cpu(cmd->common.dptr.prp2);
836 	if (!prp)
837 		goto err_invalid_field;
838 
839 	while (size < transfer_len) {
840 		xfer_len = transfer_len - size;
841 
842 		if (!nr_prps) {
843 			nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
844 							     xfer_len, prps);
845 			if (nr_prps < 0)
846 				goto err_internal;
847 
848 			i = 0;
849 			ofst = 0;
850 		}
851 
852 		/* Current entry */
853 		prp = le64_to_cpu(prps[i]);
854 		if (!prp)
855 			goto err_invalid_field;
856 
857 		/* Did we reach the last PRP entry of the list? */
858 		if (xfer_len > ctrl->mps && i == nr_prps - 1) {
859 			/* We need more PRPs: PRP is a list pointer. */
860 			nr_prps = 0;
861 			continue;
862 		}
863 
864 		/* Only the first PRP is allowed to have an offset. */
865 		if (nvmet_pci_epf_prp_ofst(ctrl, prp))
866 			goto err_invalid_offset;
867 
868 		if (prp != pci_addr) {
869 			/* Discontiguous prp: new segment. */
870 			nr_segs++;
871 			if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
872 				goto err_internal;
873 
874 			seg++;
875 			seg->pci_addr = prp;
876 			seg->length = 0;
877 			pci_addr = prp;
878 		}
879 
880 		prp_size = min_t(size_t, ctrl->mps, xfer_len);
881 		seg->length += prp_size;
882 		pci_addr += prp_size;
883 		size += prp_size;
884 
885 		i++;
886 	}
887 
888 	iod->nr_data_segs = nr_segs;
889 	ret = 0;
890 
891 	if (size != transfer_len) {
892 		dev_err(ctrl->dev,
893 			"PRPs transfer length mismatch: got %zu B, need %zu B\n",
894 			size, transfer_len);
895 		goto err_internal;
896 	}
897 
898 	kfree(prps);
899 
900 	return 0;
901 
902 err_invalid_offset:
903 	dev_err(ctrl->dev, "PRPs list invalid offset\n");
904 	iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
905 	goto err;
906 
907 err_invalid_field:
908 	dev_err(ctrl->dev, "PRPs list invalid field\n");
909 	iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
910 	goto err;
911 
912 err_internal:
913 	dev_err(ctrl->dev, "PRPs list internal error\n");
914 	iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
915 
916 err:
917 	kfree(prps);
918 	return -EINVAL;
919 }
920 
921 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
922 					      struct nvmet_pci_epf_iod *iod)
923 {
924 	struct nvme_command *cmd = &iod->cmd;
925 	size_t transfer_len = iod->data_len;
926 	int ret, nr_segs = 1;
927 	u64 prp1, prp2 = 0;
928 	size_t prp1_size;
929 
930 	prp1 = le64_to_cpu(cmd->common.dptr.prp1);
931 	prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
932 
933 	/* For commands crossing a page boundary, we should have prp2. */
934 	if (transfer_len > prp1_size) {
935 		prp2 = le64_to_cpu(cmd->common.dptr.prp2);
936 		if (!prp2) {
937 			iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
938 			return -EINVAL;
939 		}
940 		if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
941 			iod->status =
942 				NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
943 			return -EINVAL;
944 		}
945 		if (prp2 != prp1 + prp1_size)
946 			nr_segs = 2;
947 	}
948 
949 	if (nr_segs == 1) {
950 		iod->nr_data_segs = 1;
951 		iod->data_segs = &iod->data_seg;
952 		iod->data_segs[0].pci_addr = prp1;
953 		iod->data_segs[0].length = transfer_len;
954 		return 0;
955 	}
956 
957 	ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
958 	if (ret) {
959 		iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
960 		return ret;
961 	}
962 
963 	iod->data_segs[0].pci_addr = prp1;
964 	iod->data_segs[0].length = prp1_size;
965 	iod->data_segs[1].pci_addr = prp2;
966 	iod->data_segs[1].length = transfer_len - prp1_size;
967 
968 	return 0;
969 }
970 
971 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
972 {
973 	struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
974 	u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
975 	size_t ofst;
976 
977 	/* Get the PCI address segments for the command using its PRPs. */
978 	ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
979 	if (ofst & 0x3) {
980 		iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
981 		return -EINVAL;
982 	}
983 
984 	if (iod->data_len + ofst <= ctrl->mps * 2)
985 		return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
986 
987 	return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
988 }
989 
990 /*
991  * Transfer an SGL segment from the host and return the number of data
992  * descriptors and the next segment descriptor, if any.
993  */
994 static struct nvme_sgl_desc *
995 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
996 			      struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
997 {
998 	struct nvme_sgl_desc *sgls;
999 	u32 length = le32_to_cpu(desc->length);
1000 	int nr_descs, ret;
1001 	void *buf;
1002 
1003 	buf = kmalloc(length, GFP_KERNEL);
1004 	if (!buf)
1005 		return NULL;
1006 
1007 	ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
1008 				     DMA_FROM_DEVICE);
1009 	if (ret) {
1010 		kfree(buf);
1011 		return NULL;
1012 	}
1013 
1014 	sgls = buf;
1015 	nr_descs = length / sizeof(struct nvme_sgl_desc);
1016 	if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
1017 	    sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1018 		/*
1019 		 * We have another SGL segment following this one: do not count
1020 		 * it as a regular data SGL descriptor and return it to the
1021 		 * caller.
1022 		 */
1023 		*desc = sgls[nr_descs - 1];
1024 		nr_descs--;
1025 	} else {
1026 		/* We do not have another SGL segment after this one. */
1027 		desc->length = 0;
1028 	}
1029 
1030 	*nr_sgls = nr_descs;
1031 
1032 	return sgls;
1033 }
1034 
1035 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
1036 						struct nvmet_pci_epf_iod *iod)
1037 {
1038 	struct nvme_command *cmd = &iod->cmd;
1039 	struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
1040 	struct nvme_sgl_desc *sgls = NULL;
1041 	int n = 0, i, nr_sgls;
1042 	int ret;
1043 
1044 	/*
1045 	 * We do not support inline data nor keyed SGLs, so we should be seeing
1046 	 * only segment descriptors.
1047 	 */
1048 	if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
1049 	    seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1050 		iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
1051 		return -EIO;
1052 	}
1053 
1054 	while (seg.length) {
1055 		sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
1056 		if (!sgls) {
1057 			iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1058 			return -EIO;
1059 		}
1060 
1061 		/* Grow the PCI segment table as needed. */
1062 		ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
1063 		if (ret) {
1064 			iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1065 			goto out;
1066 		}
1067 
1068 		/*
1069 		 * Parse the SGL descriptors to build the PCI segment table,
1070 		 * checking the descriptor type as we go.
1071 		 */
1072 		for (i = 0; i < nr_sgls; i++) {
1073 			if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
1074 				iod->status = NVME_SC_SGL_INVALID_TYPE |
1075 					NVME_STATUS_DNR;
1076 				goto out;
1077 			}
1078 			iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
1079 			iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
1080 			n++;
1081 		}
1082 
1083 		kfree(sgls);
1084 	}
1085 
1086  out:
1087 	if (iod->status != NVME_SC_SUCCESS) {
1088 		kfree(sgls);
1089 		return -EIO;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
1096 {
1097 	struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1098 	struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
1099 
1100 	if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
1101 		/* Single data descriptor case. */
1102 		iod->nr_data_segs = 1;
1103 		iod->data_segs = &iod->data_seg;
1104 		iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
1105 		iod->data_seg.length = le32_to_cpu(sgl->length);
1106 		return 0;
1107 	}
1108 
1109 	return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
1110 }
1111 
1112 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
1113 {
1114 	struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1115 	struct nvmet_req *req = &iod->req;
1116 	struct nvmet_pci_epf_segment *seg;
1117 	struct scatterlist *sg;
1118 	int ret, i;
1119 
1120 	if (iod->data_len > ctrl->mdts) {
1121 		iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1122 		return -EINVAL;
1123 	}
1124 
1125 	/*
1126 	 * Get the PCI address segments for the command data buffer using either
1127 	 * its SGLs or PRPs.
1128 	 */
1129 	if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
1130 		ret = nvmet_pci_epf_iod_parse_sgls(iod);
1131 	else
1132 		ret = nvmet_pci_epf_iod_parse_prps(iod);
1133 	if (ret)
1134 		return ret;
1135 
1136 	/* Get a command buffer using SGLs matching the PCI segments. */
1137 	if (iod->nr_data_segs == 1) {
1138 		sg_init_table(&iod->data_sgl, 1);
1139 		iod->data_sgt.sgl = &iod->data_sgl;
1140 		iod->data_sgt.nents = 1;
1141 		iod->data_sgt.orig_nents = 1;
1142 	} else {
1143 		ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
1144 				     GFP_KERNEL);
1145 		if (ret)
1146 			goto err_nomem;
1147 	}
1148 
1149 	for_each_sgtable_sg(&iod->data_sgt, sg, i) {
1150 		seg = &iod->data_segs[i];
1151 		seg->buf = kmalloc(seg->length, GFP_KERNEL);
1152 		if (!seg->buf)
1153 			goto err_nomem;
1154 		sg_set_buf(sg, seg->buf, seg->length);
1155 	}
1156 
1157 	req->transfer_len = iod->data_len;
1158 	req->sg = iod->data_sgt.sgl;
1159 	req->sg_cnt = iod->data_sgt.nents;
1160 
1161 	return 0;
1162 
1163 err_nomem:
1164 	iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1165 	return -ENOMEM;
1166 }
1167 
1168 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
1169 {
1170 	struct nvmet_pci_epf_queue *cq = iod->cq;
1171 	unsigned long flags;
1172 
1173 	/* Print an error message for failed commands, except AENs. */
1174 	iod->status = le16_to_cpu(iod->cqe.status) >> 1;
1175 	if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
1176 		dev_err(iod->ctrl->dev,
1177 			"CQ[%d]: Command %s (0x%x) status 0x%0x\n",
1178 			iod->sq->qid, nvmet_pci_epf_iod_name(iod),
1179 			iod->cmd.common.opcode, iod->status);
1180 
1181 	/*
1182 	 * Add the command to the list of completed commands and schedule the
1183 	 * CQ work.
1184 	 */
1185 	spin_lock_irqsave(&cq->lock, flags);
1186 	list_add_tail(&iod->link, &cq->list);
1187 	queue_delayed_work(system_highpri_wq, &cq->work, 0);
1188 	spin_unlock_irqrestore(&cq->lock, flags);
1189 }
1190 
1191 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
1192 {
1193 	struct nvmet_pci_epf_iod *iod;
1194 	unsigned long flags;
1195 
1196 	spin_lock_irqsave(&queue->lock, flags);
1197 	while (!list_empty(&queue->list)) {
1198 		iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
1199 				       link);
1200 		list_del_init(&iod->link);
1201 		nvmet_pci_epf_free_iod(iod);
1202 	}
1203 	spin_unlock_irqrestore(&queue->lock, flags);
1204 }
1205 
1206 static int nvmet_pci_epf_add_port(struct nvmet_port *port)
1207 {
1208 	mutex_lock(&nvmet_pci_epf_ports_mutex);
1209 	list_add_tail(&port->entry, &nvmet_pci_epf_ports);
1210 	mutex_unlock(&nvmet_pci_epf_ports_mutex);
1211 	return 0;
1212 }
1213 
1214 static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
1215 {
1216 	mutex_lock(&nvmet_pci_epf_ports_mutex);
1217 	list_del_init(&port->entry);
1218 	mutex_unlock(&nvmet_pci_epf_ports_mutex);
1219 }
1220 
1221 static struct nvmet_port *
1222 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
1223 {
1224 	struct nvmet_port *p, *port = NULL;
1225 
1226 	mutex_lock(&nvmet_pci_epf_ports_mutex);
1227 	list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
1228 		if (p->disc_addr.portid == portid) {
1229 			port = p;
1230 			break;
1231 		}
1232 	}
1233 	mutex_unlock(&nvmet_pci_epf_ports_mutex);
1234 
1235 	return port;
1236 }
1237 
1238 static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
1239 {
1240 	struct nvmet_pci_epf_iod *iod =
1241 		container_of(req, struct nvmet_pci_epf_iod, req);
1242 
1243 	iod->status = le16_to_cpu(req->cqe->status) >> 1;
1244 
1245 	/* If we have no data to transfer, directly complete the command. */
1246 	if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
1247 		nvmet_pci_epf_complete_iod(iod);
1248 		return;
1249 	}
1250 
1251 	complete(&iod->done);
1252 }
1253 
1254 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
1255 {
1256 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1257 	int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
1258 
1259 	return ilog2(ctrl->mdts) - page_shift;
1260 }
1261 
1262 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
1263 		u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
1264 {
1265 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1266 	struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1267 	u16 status;
1268 	int ret;
1269 
1270 	if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1271 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1272 
1273 	if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1274 		return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1275 
1276 	cq->pci_addr = pci_addr;
1277 	cq->qid = cqid;
1278 	cq->depth = qsize + 1;
1279 	cq->vector = vector;
1280 	cq->head = 0;
1281 	cq->tail = 0;
1282 	cq->phase = 1;
1283 	cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
1284 	nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
1285 
1286 	if (!cqid)
1287 		cq->qes = sizeof(struct nvme_completion);
1288 	else
1289 		cq->qes = ctrl->io_cqes;
1290 	cq->pci_size = cq->qes * cq->depth;
1291 
1292 	if (flags & NVME_CQ_IRQ_ENABLED) {
1293 		cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
1294 		if (!cq->iv)
1295 			return NVME_SC_INTERNAL | NVME_STATUS_DNR;
1296 		set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
1297 	}
1298 
1299 	status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
1300 	if (status != NVME_SC_SUCCESS)
1301 		goto err;
1302 
1303 	/*
1304 	 * Map the CQ PCI address space and since PCI endpoint controllers may
1305 	 * return a partial mapping, check that the mapping is large enough.
1306 	 */
1307 	ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
1308 				    &cq->pci_map);
1309 	if (ret) {
1310 		dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
1311 			cq->qid, ret);
1312 		goto err_internal;
1313 	}
1314 
1315 	if (cq->pci_map.pci_size < cq->pci_size) {
1316 		dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
1317 			cq->qid);
1318 		goto err_unmap_queue;
1319 	}
1320 
1321 	set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
1322 
1323 	if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1324 		dev_dbg(ctrl->dev,
1325 			"CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1326 			cqid, qsize, cq->qes, cq->vector);
1327 	else
1328 		dev_dbg(ctrl->dev,
1329 			"CQ[%u]: %u entries of %zu B, IRQ disabled\n",
1330 			cqid, qsize, cq->qes);
1331 
1332 	return NVME_SC_SUCCESS;
1333 
1334 err_unmap_queue:
1335 	nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1336 err_internal:
1337 	status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1338 err:
1339 	if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1340 		nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1341 	return status;
1342 }
1343 
1344 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
1345 {
1346 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1347 	struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1348 
1349 	if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1350 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1351 
1352 	cancel_delayed_work_sync(&cq->work);
1353 	nvmet_pci_epf_drain_queue(cq);
1354 	if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
1355 		nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1356 	nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
1357 	nvmet_cq_put(&cq->nvme_cq);
1358 
1359 	return NVME_SC_SUCCESS;
1360 }
1361 
1362 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
1363 		u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr)
1364 {
1365 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1366 	struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1367 	struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1368 	u16 status;
1369 
1370 	if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1371 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1372 
1373 	if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1374 		return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1375 
1376 	sq->pci_addr = pci_addr;
1377 	sq->qid = sqid;
1378 	sq->depth = qsize + 1;
1379 	sq->head = 0;
1380 	sq->tail = 0;
1381 	sq->phase = 0;
1382 	sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
1383 	nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
1384 	if (!sqid)
1385 		sq->qes = 1UL << NVME_ADM_SQES;
1386 	else
1387 		sq->qes = ctrl->io_sqes;
1388 	sq->pci_size = sq->qes * sq->depth;
1389 
1390 	status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid,
1391 			sq->depth);
1392 	if (status != NVME_SC_SUCCESS)
1393 		return status;
1394 
1395 	sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
1396 				min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
1397 	if (!sq->iod_wq) {
1398 		dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
1399 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1400 		goto out_destroy_sq;
1401 	}
1402 
1403 	set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
1404 
1405 	dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
1406 		sqid, qsize, sq->qes);
1407 
1408 	return NVME_SC_SUCCESS;
1409 
1410 out_destroy_sq:
1411 	nvmet_sq_destroy(&sq->nvme_sq);
1412 	return status;
1413 }
1414 
1415 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
1416 {
1417 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1418 	struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1419 
1420 	if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1421 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1422 
1423 	destroy_workqueue(sq->iod_wq);
1424 	sq->iod_wq = NULL;
1425 
1426 	nvmet_pci_epf_drain_queue(sq);
1427 
1428 	if (sq->nvme_sq.ctrl)
1429 		nvmet_sq_destroy(&sq->nvme_sq);
1430 
1431 	return NVME_SC_SUCCESS;
1432 }
1433 
1434 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
1435 				  u8 feat, void *data)
1436 {
1437 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1438 	struct nvmet_feat_arbitration *arb;
1439 	struct nvmet_feat_irq_coalesce *irqc;
1440 	struct nvmet_feat_irq_config *irqcfg;
1441 	struct nvmet_pci_epf_irq_vector *iv;
1442 	u16 status;
1443 
1444 	switch (feat) {
1445 	case NVME_FEAT_ARBITRATION:
1446 		arb = data;
1447 		if (!ctrl->sq_ab)
1448 			arb->ab = 0x7;
1449 		else
1450 			arb->ab = ilog2(ctrl->sq_ab);
1451 		return NVME_SC_SUCCESS;
1452 
1453 	case NVME_FEAT_IRQ_COALESCE:
1454 		irqc = data;
1455 		irqc->thr = ctrl->irq_vector_threshold;
1456 		irqc->time = 0;
1457 		return NVME_SC_SUCCESS;
1458 
1459 	case NVME_FEAT_IRQ_CONFIG:
1460 		irqcfg = data;
1461 		mutex_lock(&ctrl->irq_lock);
1462 		iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1463 		if (iv) {
1464 			irqcfg->cd = iv->cd;
1465 			status = NVME_SC_SUCCESS;
1466 		} else {
1467 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1468 		}
1469 		mutex_unlock(&ctrl->irq_lock);
1470 		return status;
1471 
1472 	default:
1473 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1474 	}
1475 }
1476 
1477 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
1478 				  u8 feat, void *data)
1479 {
1480 	struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1481 	struct nvmet_feat_arbitration *arb;
1482 	struct nvmet_feat_irq_coalesce *irqc;
1483 	struct nvmet_feat_irq_config *irqcfg;
1484 	struct nvmet_pci_epf_irq_vector *iv;
1485 	u16 status;
1486 
1487 	switch (feat) {
1488 	case NVME_FEAT_ARBITRATION:
1489 		arb = data;
1490 		if (arb->ab == 0x7)
1491 			ctrl->sq_ab = 0;
1492 		else
1493 			ctrl->sq_ab = 1 << arb->ab;
1494 		return NVME_SC_SUCCESS;
1495 
1496 	case NVME_FEAT_IRQ_COALESCE:
1497 		/*
1498 		 * Since we do not implement precise IRQ coalescing timing,
1499 		 * ignore the time field.
1500 		 */
1501 		irqc = data;
1502 		ctrl->irq_vector_threshold = irqc->thr + 1;
1503 		return NVME_SC_SUCCESS;
1504 
1505 	case NVME_FEAT_IRQ_CONFIG:
1506 		irqcfg = data;
1507 		mutex_lock(&ctrl->irq_lock);
1508 		iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1509 		if (iv) {
1510 			iv->cd = irqcfg->cd;
1511 			status = NVME_SC_SUCCESS;
1512 		} else {
1513 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1514 		}
1515 		mutex_unlock(&ctrl->irq_lock);
1516 		return status;
1517 
1518 	default:
1519 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1520 	}
1521 }
1522 
1523 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
1524 	.owner		= THIS_MODULE,
1525 	.type		= NVMF_TRTYPE_PCI,
1526 	.add_port	= nvmet_pci_epf_add_port,
1527 	.remove_port	= nvmet_pci_epf_remove_port,
1528 	.queue_response = nvmet_pci_epf_queue_response,
1529 	.get_mdts	= nvmet_pci_epf_get_mdts,
1530 	.create_cq	= nvmet_pci_epf_create_cq,
1531 	.delete_cq	= nvmet_pci_epf_delete_cq,
1532 	.create_sq	= nvmet_pci_epf_create_sq,
1533 	.delete_sq	= nvmet_pci_epf_delete_sq,
1534 	.get_feature	= nvmet_pci_epf_get_feat,
1535 	.set_feature	= nvmet_pci_epf_set_feat,
1536 };
1537 
1538 static void nvmet_pci_epf_cq_work(struct work_struct *work);
1539 
1540 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
1541 				     unsigned int qid, bool sq)
1542 {
1543 	struct nvmet_pci_epf_queue *queue;
1544 
1545 	if (sq) {
1546 		queue = &ctrl->sq[qid];
1547 	} else {
1548 		queue = &ctrl->cq[qid];
1549 		INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
1550 	}
1551 	queue->ctrl = ctrl;
1552 	queue->qid = qid;
1553 	spin_lock_init(&queue->lock);
1554 	INIT_LIST_HEAD(&queue->list);
1555 }
1556 
1557 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
1558 {
1559 	unsigned int qid;
1560 
1561 	ctrl->sq = kcalloc(ctrl->nr_queues,
1562 			   sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1563 	if (!ctrl->sq)
1564 		return -ENOMEM;
1565 
1566 	ctrl->cq = kcalloc(ctrl->nr_queues,
1567 			   sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1568 	if (!ctrl->cq) {
1569 		kfree(ctrl->sq);
1570 		ctrl->sq = NULL;
1571 		return -ENOMEM;
1572 	}
1573 
1574 	for (qid = 0; qid < ctrl->nr_queues; qid++) {
1575 		nvmet_pci_epf_init_queue(ctrl, qid, true);
1576 		nvmet_pci_epf_init_queue(ctrl, qid, false);
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
1583 {
1584 	kfree(ctrl->sq);
1585 	ctrl->sq = NULL;
1586 	kfree(ctrl->cq);
1587 	ctrl->cq = NULL;
1588 }
1589 
1590 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
1591 {
1592 	struct nvmet_pci_epf_iod *iod =
1593 		container_of(work, struct nvmet_pci_epf_iod, work);
1594 	struct nvmet_req *req = &iod->req;
1595 	int ret;
1596 
1597 	if (!iod->ctrl->link_up) {
1598 		nvmet_pci_epf_free_iod(iod);
1599 		return;
1600 	}
1601 
1602 	if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
1603 		iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1604 		goto complete;
1605 	}
1606 
1607 	if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
1608 		goto complete;
1609 
1610 	iod->data_len = nvmet_req_transfer_len(req);
1611 	if (iod->data_len) {
1612 		/*
1613 		 * Get the data DMA transfer direction. Here "device" means the
1614 		 * PCI root-complex host.
1615 		 */
1616 		if (nvme_is_write(&iod->cmd))
1617 			iod->dma_dir = DMA_FROM_DEVICE;
1618 		else
1619 			iod->dma_dir = DMA_TO_DEVICE;
1620 
1621 		/*
1622 		 * Setup the command data buffer and get the command data from
1623 		 * the host if needed.
1624 		 */
1625 		ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
1626 		if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
1627 			ret = nvmet_pci_epf_transfer_iod_data(iod);
1628 		if (ret) {
1629 			nvmet_req_uninit(req);
1630 			goto complete;
1631 		}
1632 	}
1633 
1634 	req->execute(req);
1635 
1636 	/*
1637 	 * If we do not have data to transfer after the command execution
1638 	 * finishes, nvmet_pci_epf_queue_response() will complete the command
1639 	 * directly. No need to wait for the completion in this case.
1640 	 */
1641 	if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
1642 		return;
1643 
1644 	wait_for_completion(&iod->done);
1645 
1646 	if (iod->status == NVME_SC_SUCCESS) {
1647 		WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
1648 		nvmet_pci_epf_transfer_iod_data(iod);
1649 	}
1650 
1651 complete:
1652 	nvmet_pci_epf_complete_iod(iod);
1653 }
1654 
1655 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
1656 				    struct nvmet_pci_epf_queue *sq)
1657 {
1658 	struct nvmet_pci_epf_iod *iod;
1659 	int ret, n = 0;
1660 	u16 head = sq->head;
1661 
1662 	sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1663 	while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
1664 		iod = nvmet_pci_epf_alloc_iod(sq);
1665 		if (!iod)
1666 			break;
1667 
1668 		/* Get the NVMe command submitted by the host. */
1669 		ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
1670 					     sq->pci_addr + head * sq->qes,
1671 					     sq->qes, DMA_FROM_DEVICE);
1672 		if (ret) {
1673 			/* Not much we can do... */
1674 			nvmet_pci_epf_free_iod(iod);
1675 			break;
1676 		}
1677 
1678 		dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
1679 			sq->qid, head, sq->tail,
1680 			nvmet_pci_epf_iod_name(iod));
1681 
1682 		head++;
1683 		if (head == sq->depth)
1684 			head = 0;
1685 		WRITE_ONCE(sq->head, head);
1686 		n++;
1687 
1688 		queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
1689 
1690 		sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1691 	}
1692 
1693 	return n;
1694 }
1695 
1696 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
1697 {
1698 	struct nvmet_pci_epf_ctrl *ctrl =
1699 		container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
1700 	struct nvmet_pci_epf_queue *sq;
1701 	unsigned long limit = jiffies;
1702 	unsigned long last = 0;
1703 	int i, nr_sqs;
1704 
1705 	while (ctrl->link_up && ctrl->enabled) {
1706 		nr_sqs = 0;
1707 		/* Do round-robin arbitration. */
1708 		for (i = 0; i < ctrl->nr_queues; i++) {
1709 			sq = &ctrl->sq[i];
1710 			if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1711 				continue;
1712 			if (nvmet_pci_epf_process_sq(ctrl, sq))
1713 				nr_sqs++;
1714 		}
1715 
1716 		/*
1717 		 * If we have been running for a while, reschedule to let other
1718 		 * tasks run and to avoid RCU stalls.
1719 		 */
1720 		if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
1721 			cond_resched();
1722 			limit = jiffies;
1723 			continue;
1724 		}
1725 
1726 		if (nr_sqs) {
1727 			last = jiffies;
1728 			continue;
1729 		}
1730 
1731 		/*
1732 		 * If we have not received any command on any queue for more
1733 		 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
1734 		 * reschedule. This avoids "burning" a CPU when the controller
1735 		 * is idle for a long time.
1736 		 */
1737 		if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
1738 			break;
1739 
1740 		cpu_relax();
1741 	}
1742 
1743 	schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
1744 }
1745 
1746 static void nvmet_pci_epf_cq_work(struct work_struct *work)
1747 {
1748 	struct nvmet_pci_epf_queue *cq =
1749 		container_of(work, struct nvmet_pci_epf_queue, work.work);
1750 	struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
1751 	struct nvme_completion *cqe;
1752 	struct nvmet_pci_epf_iod *iod;
1753 	unsigned long flags;
1754 	int ret = 0, n = 0;
1755 
1756 	while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
1757 
1758 		/* Check that the CQ is not full. */
1759 		cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
1760 		if (cq->head == cq->tail + 1) {
1761 			ret = -EAGAIN;
1762 			break;
1763 		}
1764 
1765 		spin_lock_irqsave(&cq->lock, flags);
1766 		iod = list_first_entry_or_null(&cq->list,
1767 					       struct nvmet_pci_epf_iod, link);
1768 		if (iod)
1769 			list_del_init(&iod->link);
1770 		spin_unlock_irqrestore(&cq->lock, flags);
1771 
1772 		if (!iod)
1773 			break;
1774 
1775 		/*
1776 		 * Post the IOD completion entry. If the IOD request was
1777 		 * executed (req->execute() called), the CQE is already
1778 		 * initialized. However, the IOD may have been failed before
1779 		 * that, leaving the CQE not properly initialized. So always
1780 		 * initialize it here.
1781 		 */
1782 		cqe = &iod->cqe;
1783 		cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head));
1784 		cqe->sq_id = cpu_to_le16(iod->sq->qid);
1785 		cqe->command_id = iod->cmd.common.command_id;
1786 		cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
1787 
1788 		dev_dbg(ctrl->dev,
1789 			"CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
1790 			cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
1791 			le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
1792 			cq->phase);
1793 
1794 		memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
1795 			    cqe, cq->qes);
1796 
1797 		cq->tail++;
1798 		if (cq->tail >= cq->depth) {
1799 			cq->tail = 0;
1800 			cq->phase ^= 1;
1801 		}
1802 
1803 		nvmet_pci_epf_free_iod(iod);
1804 
1805 		/* Signal the host. */
1806 		nvmet_pci_epf_raise_irq(ctrl, cq, false);
1807 		n++;
1808 	}
1809 
1810 	/*
1811 	 * We do not support precise IRQ coalescing time (100ns units as per
1812 	 * NVMe specifications). So if we have posted completion entries without
1813 	 * reaching the interrupt coalescing threshold, raise an interrupt.
1814 	 */
1815 	if (n)
1816 		nvmet_pci_epf_raise_irq(ctrl, cq, true);
1817 
1818 	if (ret < 0)
1819 		queue_delayed_work(system_highpri_wq, &cq->work,
1820 				   NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
1821 }
1822 
1823 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl)
1824 {
1825 	struct nvmet_ctrl *tctrl = ctrl->tctrl;
1826 
1827 	/* Initialize controller status. */
1828 	tctrl->csts = 0;
1829 	ctrl->csts = 0;
1830 	nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1831 
1832 	/* Initialize controller configuration and start polling. */
1833 	tctrl->cc = 0;
1834 	ctrl->cc = 0;
1835 	nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1836 }
1837 
1838 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
1839 {
1840 	u64 pci_addr, asq, acq;
1841 	u32 aqa;
1842 	u16 status, qsize;
1843 
1844 	if (ctrl->enabled)
1845 		return 0;
1846 
1847 	dev_info(ctrl->dev, "Enabling controller\n");
1848 
1849 	ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
1850 	ctrl->mps = 1UL << ctrl->mps_shift;
1851 	ctrl->mps_mask = ctrl->mps - 1;
1852 
1853 	ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
1854 	if (ctrl->io_sqes < sizeof(struct nvme_command)) {
1855 		dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
1856 			ctrl->io_sqes, sizeof(struct nvme_command));
1857 		goto err;
1858 	}
1859 
1860 	ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
1861 	if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
1862 		dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
1863 			ctrl->io_sqes, sizeof(struct nvme_completion));
1864 		goto err;
1865 	}
1866 
1867 	/* Create the admin queue. */
1868 	aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
1869 	asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
1870 	acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
1871 
1872 	qsize = (aqa & 0x0fff0000) >> 16;
1873 	pci_addr = acq & GENMASK_ULL(63, 12);
1874 	status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
1875 				NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
1876 				qsize, pci_addr, 0);
1877 	if (status != NVME_SC_SUCCESS) {
1878 		dev_err(ctrl->dev, "Failed to create admin completion queue\n");
1879 		goto err;
1880 	}
1881 
1882 	qsize = aqa & 0x00000fff;
1883 	pci_addr = asq & GENMASK_ULL(63, 12);
1884 	status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0,
1885 			NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr);
1886 	if (status != NVME_SC_SUCCESS) {
1887 		dev_err(ctrl->dev, "Failed to create admin submission queue\n");
1888 		nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1889 		goto err;
1890 	}
1891 
1892 	ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
1893 	ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
1894 	ctrl->enabled = true;
1895 	ctrl->csts = NVME_CSTS_RDY;
1896 
1897 	/* Start polling the controller SQs. */
1898 	schedule_delayed_work(&ctrl->poll_sqs, 0);
1899 
1900 	return 0;
1901 
1902 err:
1903 	nvmet_pci_epf_clear_ctrl_config(ctrl);
1904 	return -EINVAL;
1905 }
1906 
1907 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl,
1908 				       bool shutdown)
1909 {
1910 	int qid;
1911 
1912 	if (!ctrl->enabled)
1913 		return;
1914 
1915 	dev_info(ctrl->dev, "%s controller\n",
1916 		 shutdown ? "Shutting down" : "Disabling");
1917 
1918 	ctrl->enabled = false;
1919 	cancel_delayed_work_sync(&ctrl->poll_sqs);
1920 
1921 	/* Delete all I/O queues first. */
1922 	for (qid = 1; qid < ctrl->nr_queues; qid++)
1923 		nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
1924 
1925 	for (qid = 1; qid < ctrl->nr_queues; qid++)
1926 		nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
1927 
1928 	/* Delete the admin queue last. */
1929 	nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
1930 	nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1931 
1932 	ctrl->csts &= ~NVME_CSTS_RDY;
1933 	if (shutdown) {
1934 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1935 		ctrl->cc &= ~NVME_CC_ENABLE;
1936 		nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1937 	}
1938 }
1939 
1940 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
1941 {
1942 	struct nvmet_pci_epf_ctrl *ctrl =
1943 		container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
1944 	u32 old_cc, new_cc;
1945 	int ret;
1946 
1947 	if (!ctrl->tctrl)
1948 		return;
1949 
1950 	old_cc = ctrl->cc;
1951 	new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
1952 	if (new_cc == old_cc)
1953 		goto reschedule_work;
1954 
1955 	ctrl->cc = new_cc;
1956 
1957 	if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
1958 		ret = nvmet_pci_epf_enable_ctrl(ctrl);
1959 		if (ret)
1960 			goto reschedule_work;
1961 	}
1962 
1963 	if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
1964 		nvmet_pci_epf_disable_ctrl(ctrl, false);
1965 
1966 	if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc))
1967 		nvmet_pci_epf_disable_ctrl(ctrl, true);
1968 
1969 	if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
1970 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1971 
1972 	nvmet_update_cc(ctrl->tctrl, ctrl->cc);
1973 	nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1974 
1975 reschedule_work:
1976 	schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
1977 }
1978 
1979 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
1980 {
1981 	struct nvmet_ctrl *tctrl = ctrl->tctrl;
1982 
1983 	ctrl->bar = ctrl->nvme_epf->reg_bar;
1984 
1985 	/* Copy the target controller capabilities as a base. */
1986 	ctrl->cap = tctrl->cap;
1987 
1988 	/* Contiguous Queues Required (CQR). */
1989 	ctrl->cap |= 0x1ULL << 16;
1990 
1991 	/* Set Doorbell stride to 4B (DSTRB). */
1992 	ctrl->cap &= ~GENMASK_ULL(35, 32);
1993 
1994 	/* Clear NVM Subsystem Reset Supported (NSSRS). */
1995 	ctrl->cap &= ~(0x1ULL << 36);
1996 
1997 	/* Clear Boot Partition Support (BPS). */
1998 	ctrl->cap &= ~(0x1ULL << 45);
1999 
2000 	/* Clear Persistent Memory Region Supported (PMRS). */
2001 	ctrl->cap &= ~(0x1ULL << 56);
2002 
2003 	/* Clear Controller Memory Buffer Supported (CMBS). */
2004 	ctrl->cap &= ~(0x1ULL << 57);
2005 
2006 	nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
2007 	nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
2008 
2009 	nvmet_pci_epf_clear_ctrl_config(ctrl);
2010 }
2011 
2012 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
2013 				     unsigned int max_nr_queues)
2014 {
2015 	struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2016 	struct nvmet_alloc_ctrl_args args = {};
2017 	char hostnqn[NVMF_NQN_SIZE];
2018 	uuid_t id;
2019 	int ret;
2020 
2021 	memset(ctrl, 0, sizeof(*ctrl));
2022 	ctrl->dev = &nvme_epf->epf->dev;
2023 	mutex_init(&ctrl->irq_lock);
2024 	ctrl->nvme_epf = nvme_epf;
2025 	ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
2026 	INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
2027 	INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
2028 
2029 	ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
2030 					max_nr_queues * NVMET_MAX_QUEUE_SIZE,
2031 					sizeof(struct nvmet_pci_epf_iod));
2032 	if (ret) {
2033 		dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
2034 		return ret;
2035 	}
2036 
2037 	ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
2038 	if (!ctrl->port) {
2039 		dev_err(ctrl->dev, "Port not found\n");
2040 		ret = -EINVAL;
2041 		goto out_mempool_exit;
2042 	}
2043 
2044 	/* Create the target controller. */
2045 	uuid_gen(&id);
2046 	snprintf(hostnqn, NVMF_NQN_SIZE,
2047 		 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
2048 	args.port = ctrl->port;
2049 	args.subsysnqn = nvme_epf->subsysnqn;
2050 	memset(&id, 0, sizeof(uuid_t));
2051 	args.hostid = &id;
2052 	args.hostnqn = hostnqn;
2053 	args.ops = &nvmet_pci_epf_fabrics_ops;
2054 
2055 	ctrl->tctrl = nvmet_alloc_ctrl(&args);
2056 	if (!ctrl->tctrl) {
2057 		dev_err(ctrl->dev, "Failed to create target controller\n");
2058 		ret = -ENOMEM;
2059 		goto out_mempool_exit;
2060 	}
2061 	ctrl->tctrl->drvdata = ctrl;
2062 
2063 	/* We do not support protection information for now. */
2064 	if (ctrl->tctrl->pi_support) {
2065 		dev_err(ctrl->dev,
2066 			"Protection information (PI) is not supported\n");
2067 		ret = -ENOTSUPP;
2068 		goto out_put_ctrl;
2069 	}
2070 
2071 	/* Allocate our queues, up to the maximum number. */
2072 	ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
2073 	ret = nvmet_pci_epf_alloc_queues(ctrl);
2074 	if (ret)
2075 		goto out_put_ctrl;
2076 
2077 	/*
2078 	 * Allocate the IRQ vectors descriptors. We cannot have more than the
2079 	 * maximum number of queues.
2080 	 */
2081 	ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
2082 	if (ret)
2083 		goto out_free_queues;
2084 
2085 	dev_info(ctrl->dev,
2086 		 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
2087 		 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
2088 		 ctrl->mdts);
2089 
2090 	/* Initialize BAR 0 using the target controller CAP. */
2091 	nvmet_pci_epf_init_bar(ctrl);
2092 
2093 	return 0;
2094 
2095 out_free_queues:
2096 	nvmet_pci_epf_free_queues(ctrl);
2097 out_put_ctrl:
2098 	nvmet_ctrl_put(ctrl->tctrl);
2099 	ctrl->tctrl = NULL;
2100 out_mempool_exit:
2101 	mempool_exit(&ctrl->iod_pool);
2102 	return ret;
2103 }
2104 
2105 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2106 {
2107 
2108 	dev_info(ctrl->dev, "PCI link up\n");
2109 	ctrl->link_up = true;
2110 
2111 	schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
2112 }
2113 
2114 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2115 {
2116 	dev_info(ctrl->dev, "PCI link down\n");
2117 	ctrl->link_up = false;
2118 
2119 	cancel_delayed_work_sync(&ctrl->poll_cc);
2120 
2121 	nvmet_pci_epf_disable_ctrl(ctrl, false);
2122 	nvmet_pci_epf_clear_ctrl_config(ctrl);
2123 }
2124 
2125 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2126 {
2127 	if (!ctrl->tctrl)
2128 		return;
2129 
2130 	dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
2131 		 ctrl->tctrl->subsys->subsysnqn);
2132 
2133 	nvmet_pci_epf_stop_ctrl(ctrl);
2134 
2135 	nvmet_pci_epf_free_queues(ctrl);
2136 	nvmet_pci_epf_free_irq_vectors(ctrl);
2137 
2138 	nvmet_ctrl_put(ctrl->tctrl);
2139 	ctrl->tctrl = NULL;
2140 
2141 	mempool_exit(&ctrl->iod_pool);
2142 }
2143 
2144 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
2145 {
2146 	struct pci_epf *epf = nvme_epf->epf;
2147 	const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2148 	size_t reg_size, reg_bar_size;
2149 	size_t msix_table_size = 0;
2150 
2151 	/*
2152 	 * The first free BAR will be our register BAR and per NVMe
2153 	 * specifications, it must be BAR 0.
2154 	 */
2155 	if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
2156 		dev_err(&epf->dev, "BAR 0 is not free\n");
2157 		return -ENODEV;
2158 	}
2159 
2160 	/*
2161 	 * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims
2162 	 * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type
2163 	 * is required to be 64-bit. Thus, for interoperability, always set the
2164 	 * type to 64-bit. In the rare case that the PCI EPC does not support
2165 	 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail,
2166 	 * and we will return failure back to the user.
2167 	 */
2168 	epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2169 
2170 	/*
2171 	 * Calculate the size of the register bar: NVMe registers first with
2172 	 * enough space for the doorbells, followed by the MSI-X table
2173 	 * if supported.
2174 	 */
2175 	reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
2176 	reg_size = ALIGN(reg_size, 8);
2177 
2178 	if (epc_features->msix_capable) {
2179 		size_t pba_size;
2180 
2181 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
2182 		nvme_epf->msix_table_offset = reg_size;
2183 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
2184 
2185 		reg_size += msix_table_size + pba_size;
2186 	}
2187 
2188 	if (epc_features->bar[BAR_0].type == BAR_FIXED) {
2189 		if (reg_size > epc_features->bar[BAR_0].fixed_size) {
2190 			dev_err(&epf->dev,
2191 				"BAR 0 size %llu B too small, need %zu B\n",
2192 				epc_features->bar[BAR_0].fixed_size,
2193 				reg_size);
2194 			return -ENOMEM;
2195 		}
2196 		reg_bar_size = epc_features->bar[BAR_0].fixed_size;
2197 	} else {
2198 		reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
2199 	}
2200 
2201 	nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
2202 						epc_features, PRIMARY_INTERFACE);
2203 	if (!nvme_epf->reg_bar) {
2204 		dev_err(&epf->dev, "Failed to allocate BAR 0\n");
2205 		return -ENOMEM;
2206 	}
2207 	memset(nvme_epf->reg_bar, 0, reg_bar_size);
2208 
2209 	return 0;
2210 }
2211 
2212 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
2213 {
2214 	struct pci_epf *epf = nvme_epf->epf;
2215 
2216 	if (!nvme_epf->reg_bar)
2217 		return;
2218 
2219 	pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
2220 	nvme_epf->reg_bar = NULL;
2221 }
2222 
2223 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
2224 {
2225 	struct pci_epf *epf = nvme_epf->epf;
2226 
2227 	pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
2228 			  &epf->bar[BAR_0]);
2229 }
2230 
2231 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
2232 {
2233 	const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2234 	struct pci_epf *epf = nvme_epf->epf;
2235 	int ret;
2236 
2237 	/* Enable MSI-X if supported, otherwise, use MSI. */
2238 	if (epc_features->msix_capable && epf->msix_interrupts) {
2239 		ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
2240 				       epf->msix_interrupts, BAR_0,
2241 				       nvme_epf->msix_table_offset);
2242 		if (ret) {
2243 			dev_err(&epf->dev, "Failed to configure MSI-X\n");
2244 			return ret;
2245 		}
2246 
2247 		nvme_epf->nr_vectors = epf->msix_interrupts;
2248 		nvme_epf->irq_type = PCI_IRQ_MSIX;
2249 
2250 		return 0;
2251 	}
2252 
2253 	if (epc_features->msi_capable && epf->msi_interrupts) {
2254 		ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
2255 				      epf->msi_interrupts);
2256 		if (ret) {
2257 			dev_err(&epf->dev, "Failed to configure MSI\n");
2258 			return ret;
2259 		}
2260 
2261 		nvme_epf->nr_vectors = epf->msi_interrupts;
2262 		nvme_epf->irq_type = PCI_IRQ_MSI;
2263 
2264 		return 0;
2265 	}
2266 
2267 	/* MSI and MSI-X are not supported: fall back to INTx. */
2268 	nvme_epf->nr_vectors = 1;
2269 	nvme_epf->irq_type = PCI_IRQ_INTX;
2270 
2271 	return 0;
2272 }
2273 
2274 static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
2275 {
2276 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2277 	const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2278 	struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2279 	unsigned int max_nr_queues = NVMET_NR_QUEUES;
2280 	int ret;
2281 
2282 	/* For now, do not support virtual functions. */
2283 	if (epf->vfunc_no > 0) {
2284 		dev_err(&epf->dev, "Virtual functions are not supported\n");
2285 		return -EINVAL;
2286 	}
2287 
2288 	/*
2289 	 * Cap the maximum number of queues we can support on the controller
2290 	 * with the number of IRQs we can use.
2291 	 */
2292 	if (epc_features->msix_capable && epf->msix_interrupts) {
2293 		dev_info(&epf->dev,
2294 			 "PCI endpoint controller supports MSI-X, %u vectors\n",
2295 			 epf->msix_interrupts);
2296 		max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
2297 	} else if (epc_features->msi_capable && epf->msi_interrupts) {
2298 		dev_info(&epf->dev,
2299 			 "PCI endpoint controller supports MSI, %u vectors\n",
2300 			 epf->msi_interrupts);
2301 		max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
2302 	}
2303 
2304 	if (max_nr_queues < 2) {
2305 		dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
2306 			max_nr_queues);
2307 		return -EINVAL;
2308 	}
2309 
2310 	/* Create the target controller. */
2311 	ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
2312 	if (ret) {
2313 		dev_err(&epf->dev,
2314 			"Failed to create NVMe PCI target controller (err=%d)\n",
2315 			ret);
2316 		return ret;
2317 	}
2318 
2319 	/* Set device ID, class, etc. */
2320 	epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
2321 	epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
2322 	ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
2323 				   epf->header);
2324 	if (ret) {
2325 		dev_err(&epf->dev,
2326 			"Failed to write configuration header (err=%d)\n", ret);
2327 		goto out_destroy_ctrl;
2328 	}
2329 
2330 	ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
2331 			      &epf->bar[BAR_0]);
2332 	if (ret) {
2333 		dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
2334 		goto out_destroy_ctrl;
2335 	}
2336 
2337 	/*
2338 	 * Enable interrupts and start polling the controller BAR if we do not
2339 	 * have a link up notifier.
2340 	 */
2341 	ret = nvmet_pci_epf_init_irq(nvme_epf);
2342 	if (ret)
2343 		goto out_clear_bar;
2344 
2345 	if (!epc_features->linkup_notifier)
2346 		nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
2347 
2348 	return 0;
2349 
2350 out_clear_bar:
2351 	nvmet_pci_epf_clear_bar(nvme_epf);
2352 out_destroy_ctrl:
2353 	nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2354 	return ret;
2355 }
2356 
2357 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
2358 {
2359 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2360 	struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2361 
2362 	nvmet_pci_epf_destroy_ctrl(ctrl);
2363 
2364 	nvmet_pci_epf_deinit_dma(nvme_epf);
2365 	nvmet_pci_epf_clear_bar(nvme_epf);
2366 }
2367 
2368 static int nvmet_pci_epf_link_up(struct pci_epf *epf)
2369 {
2370 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2371 	struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2372 
2373 	nvmet_pci_epf_start_ctrl(ctrl);
2374 
2375 	return 0;
2376 }
2377 
2378 static int nvmet_pci_epf_link_down(struct pci_epf *epf)
2379 {
2380 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2381 	struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2382 
2383 	nvmet_pci_epf_stop_ctrl(ctrl);
2384 
2385 	return 0;
2386 }
2387 
2388 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
2389 	.epc_init = nvmet_pci_epf_epc_init,
2390 	.epc_deinit = nvmet_pci_epf_epc_deinit,
2391 	.link_up = nvmet_pci_epf_link_up,
2392 	.link_down = nvmet_pci_epf_link_down,
2393 };
2394 
2395 static int nvmet_pci_epf_bind(struct pci_epf *epf)
2396 {
2397 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2398 	const struct pci_epc_features *epc_features;
2399 	struct pci_epc *epc = epf->epc;
2400 	int ret;
2401 
2402 	if (WARN_ON_ONCE(!epc))
2403 		return -EINVAL;
2404 
2405 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
2406 	if (!epc_features) {
2407 		dev_err(&epf->dev, "epc_features not implemented\n");
2408 		return -EOPNOTSUPP;
2409 	}
2410 	nvme_epf->epc_features = epc_features;
2411 
2412 	ret = nvmet_pci_epf_configure_bar(nvme_epf);
2413 	if (ret)
2414 		return ret;
2415 
2416 	nvmet_pci_epf_init_dma(nvme_epf);
2417 
2418 	return 0;
2419 }
2420 
2421 static void nvmet_pci_epf_unbind(struct pci_epf *epf)
2422 {
2423 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2424 	struct pci_epc *epc = epf->epc;
2425 
2426 	nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2427 
2428 	if (epc->init_complete) {
2429 		nvmet_pci_epf_deinit_dma(nvme_epf);
2430 		nvmet_pci_epf_clear_bar(nvme_epf);
2431 	}
2432 
2433 	nvmet_pci_epf_free_bar(nvme_epf);
2434 }
2435 
2436 static struct pci_epf_header nvme_epf_pci_header = {
2437 	.vendorid	= PCI_ANY_ID,
2438 	.deviceid	= PCI_ANY_ID,
2439 	.progif_code	= 0x02, /* NVM Express */
2440 	.baseclass_code = PCI_BASE_CLASS_STORAGE,
2441 	.subclass_code	= 0x08, /* Non-Volatile Memory controller */
2442 	.interrupt_pin	= PCI_INTERRUPT_INTA,
2443 };
2444 
2445 static int nvmet_pci_epf_probe(struct pci_epf *epf,
2446 			       const struct pci_epf_device_id *id)
2447 {
2448 	struct nvmet_pci_epf *nvme_epf;
2449 	int ret;
2450 
2451 	nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
2452 	if (!nvme_epf)
2453 		return -ENOMEM;
2454 
2455 	ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
2456 	if (ret)
2457 		return ret;
2458 
2459 	nvme_epf->epf = epf;
2460 	nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2461 
2462 	epf->event_ops = &nvmet_pci_epf_event_ops;
2463 	epf->header = &nvme_epf_pci_header;
2464 	epf_set_drvdata(epf, nvme_epf);
2465 
2466 	return 0;
2467 }
2468 
2469 #define to_nvme_epf(epf_group)	\
2470 	container_of(epf_group, struct nvmet_pci_epf, group)
2471 
2472 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
2473 {
2474 	struct config_group *group = to_config_group(item);
2475 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2476 
2477 	return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
2478 }
2479 
2480 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
2481 					  const char *page, size_t len)
2482 {
2483 	struct config_group *group = to_config_group(item);
2484 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2485 	u16 portid;
2486 
2487 	/* Do not allow setting this when the function is already started. */
2488 	if (nvme_epf->ctrl.tctrl)
2489 		return -EBUSY;
2490 
2491 	if (!len)
2492 		return -EINVAL;
2493 
2494 	if (kstrtou16(page, 0, &portid))
2495 		return -EINVAL;
2496 
2497 	nvme_epf->portid = cpu_to_le16(portid);
2498 
2499 	return len;
2500 }
2501 
2502 CONFIGFS_ATTR(nvmet_pci_epf_, portid);
2503 
2504 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
2505 					    char *page)
2506 {
2507 	struct config_group *group = to_config_group(item);
2508 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2509 
2510 	return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
2511 }
2512 
2513 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
2514 					     const char *page, size_t len)
2515 {
2516 	struct config_group *group = to_config_group(item);
2517 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2518 
2519 	/* Do not allow setting this when the function is already started. */
2520 	if (nvme_epf->ctrl.tctrl)
2521 		return -EBUSY;
2522 
2523 	if (!len)
2524 		return -EINVAL;
2525 
2526 	strscpy(nvme_epf->subsysnqn, page, len);
2527 
2528 	return len;
2529 }
2530 
2531 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
2532 
2533 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
2534 {
2535 	struct config_group *group = to_config_group(item);
2536 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2537 
2538 	return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
2539 }
2540 
2541 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
2542 					   const char *page, size_t len)
2543 {
2544 	struct config_group *group = to_config_group(item);
2545 	struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2546 	unsigned long mdts_kb;
2547 	int ret;
2548 
2549 	if (nvme_epf->ctrl.tctrl)
2550 		return -EBUSY;
2551 
2552 	ret = kstrtoul(page, 0, &mdts_kb);
2553 	if (ret)
2554 		return ret;
2555 	if (!mdts_kb)
2556 		mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2557 	else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
2558 		mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
2559 
2560 	if (!is_power_of_2(mdts_kb))
2561 		return -EINVAL;
2562 
2563 	nvme_epf->mdts_kb = mdts_kb;
2564 
2565 	return len;
2566 }
2567 
2568 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
2569 
2570 static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
2571 	&nvmet_pci_epf_attr_portid,
2572 	&nvmet_pci_epf_attr_subsysnqn,
2573 	&nvmet_pci_epf_attr_mdts_kb,
2574 	NULL,
2575 };
2576 
2577 static const struct config_item_type nvmet_pci_epf_group_type = {
2578 	.ct_attrs	= nvmet_pci_epf_attrs,
2579 	.ct_owner	= THIS_MODULE,
2580 };
2581 
2582 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
2583 						  struct config_group *group)
2584 {
2585 	struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2586 
2587 	config_group_init_type_name(&nvme_epf->group, "nvme",
2588 				    &nvmet_pci_epf_group_type);
2589 
2590 	return &nvme_epf->group;
2591 }
2592 
2593 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
2594 	{ .name = "nvmet_pci_epf" },
2595 	{},
2596 };
2597 
2598 static struct pci_epf_ops nvmet_pci_epf_ops = {
2599 	.bind	= nvmet_pci_epf_bind,
2600 	.unbind	= nvmet_pci_epf_unbind,
2601 	.add_cfs = nvmet_pci_epf_add_cfs,
2602 };
2603 
2604 static struct pci_epf_driver nvmet_pci_epf_driver = {
2605 	.driver.name	= "nvmet_pci_epf",
2606 	.probe		= nvmet_pci_epf_probe,
2607 	.id_table	= nvmet_pci_epf_ids,
2608 	.ops		= &nvmet_pci_epf_ops,
2609 	.owner		= THIS_MODULE,
2610 };
2611 
2612 static int __init nvmet_pci_epf_init_module(void)
2613 {
2614 	int ret;
2615 
2616 	ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
2617 	if (ret)
2618 		return ret;
2619 
2620 	ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
2621 	if (ret) {
2622 		pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2623 		return ret;
2624 	}
2625 
2626 	return 0;
2627 }
2628 
2629 static void __exit nvmet_pci_epf_cleanup_module(void)
2630 {
2631 	nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
2632 	pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2633 }
2634 
2635 module_init(nvmet_pci_epf_init_module);
2636 module_exit(nvmet_pci_epf_cleanup_module);
2637 
2638 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
2639 MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
2640 MODULE_LICENSE("GPL");
2641