xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
17 
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
21 
22 #define IRQ_TYPE_LEGACY			0
23 #define IRQ_TYPE_MSI			1
24 #define IRQ_TYPE_MSIX			2
25 
26 #define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 #define COMMAND_READ			BIT(3)
30 #define COMMAND_WRITE			BIT(4)
31 #define COMMAND_COPY			BIT(5)
32 
33 #define STATUS_READ_SUCCESS		BIT(0)
34 #define STATUS_READ_FAIL		BIT(1)
35 #define STATUS_WRITE_SUCCESS		BIT(2)
36 #define STATUS_WRITE_FAIL		BIT(3)
37 #define STATUS_COPY_SUCCESS		BIT(4)
38 #define STATUS_COPY_FAIL		BIT(5)
39 #define STATUS_IRQ_RAISED		BIT(6)
40 #define STATUS_SRC_ADDR_INVALID		BIT(7)
41 #define STATUS_DST_ADDR_INVALID		BIT(8)
42 
43 #define FLAG_USE_DMA			BIT(0)
44 
45 #define TIMER_RESOLUTION		1
46 
47 static struct workqueue_struct *kpcitest_workqueue;
48 
49 struct pci_epf_test {
50 	void			*reg[PCI_STD_NUM_BARS];
51 	struct pci_epf		*epf;
52 	enum pci_barno		test_reg_bar;
53 	size_t			msix_table_offset;
54 	struct delayed_work	cmd_handler;
55 	struct dma_chan		*dma_chan_tx;
56 	struct dma_chan		*dma_chan_rx;
57 	struct completion	transfer_complete;
58 	bool			dma_supported;
59 	bool			dma_private;
60 	const struct pci_epc_features *epc_features;
61 };
62 
63 struct pci_epf_test_reg {
64 	u32	magic;
65 	u32	command;
66 	u32	status;
67 	u64	src_addr;
68 	u64	dst_addr;
69 	u32	size;
70 	u32	checksum;
71 	u32	irq_type;
72 	u32	irq_number;
73 	u32	flags;
74 } __packed;
75 
76 static struct pci_epf_header test_header = {
77 	.vendorid	= PCI_ANY_ID,
78 	.deviceid	= PCI_ANY_ID,
79 	.baseclass_code = PCI_CLASS_OTHERS,
80 	.interrupt_pin	= PCI_INTERRUPT_INTA,
81 };
82 
83 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
84 
85 static void pci_epf_test_dma_callback(void *param)
86 {
87 	struct pci_epf_test *epf_test = param;
88 
89 	complete(&epf_test->transfer_complete);
90 }
91 
92 /**
93  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
94  *				  data between PCIe EP and remote PCIe RC
95  * @epf_test: the EPF test device that performs the data transfer operation
96  * @dma_dst: The destination address of the data transfer. It can be a physical
97  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
98  * @dma_src: The source address of the data transfer. It can be a physical
99  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
100  * @len: The size of the data transfer
101  * @dma_remote: remote RC physical address
102  * @dir: DMA transfer direction
103  *
104  * Function that uses dmaengine API to transfer data between PCIe EP and remote
105  * PCIe RC. The source and destination address can be a physical address given
106  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
107  *
108  * The function returns '0' on success and negative value on failure.
109  */
110 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
111 				      dma_addr_t dma_dst, dma_addr_t dma_src,
112 				      size_t len, dma_addr_t dma_remote,
113 				      enum dma_transfer_direction dir)
114 {
115 	struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
116 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
117 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
118 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
119 	struct pci_epf *epf = epf_test->epf;
120 	struct dma_async_tx_descriptor *tx;
121 	struct dma_slave_config sconf = {};
122 	struct device *dev = &epf->dev;
123 	dma_cookie_t cookie;
124 	int ret;
125 
126 	if (IS_ERR_OR_NULL(chan)) {
127 		dev_err(dev, "Invalid DMA memcpy channel\n");
128 		return -EINVAL;
129 	}
130 
131 	if (epf_test->dma_private) {
132 		sconf.direction = dir;
133 		if (dir == DMA_MEM_TO_DEV)
134 			sconf.dst_addr = dma_remote;
135 		else
136 			sconf.src_addr = dma_remote;
137 
138 		if (dmaengine_slave_config(chan, &sconf)) {
139 			dev_err(dev, "DMA slave config fail\n");
140 			return -EIO;
141 		}
142 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
143 						 flags);
144 	} else {
145 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
146 					       flags);
147 	}
148 
149 	if (!tx) {
150 		dev_err(dev, "Failed to prepare DMA memcpy\n");
151 		return -EIO;
152 	}
153 
154 	tx->callback = pci_epf_test_dma_callback;
155 	tx->callback_param = epf_test;
156 	cookie = tx->tx_submit(tx);
157 	reinit_completion(&epf_test->transfer_complete);
158 
159 	ret = dma_submit_error(cookie);
160 	if (ret) {
161 		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
162 		return -EIO;
163 	}
164 
165 	dma_async_issue_pending(chan);
166 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
167 	if (ret < 0) {
168 		dmaengine_terminate_sync(chan);
169 		dev_err(dev, "DMA wait_for_completion_timeout\n");
170 		return -ETIMEDOUT;
171 	}
172 
173 	return 0;
174 }
175 
176 struct epf_dma_filter {
177 	struct device *dev;
178 	u32 dma_mask;
179 };
180 
181 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
182 {
183 	struct epf_dma_filter *filter = node;
184 	struct dma_slave_caps caps;
185 
186 	memset(&caps, 0, sizeof(caps));
187 	dma_get_slave_caps(chan, &caps);
188 
189 	return chan->device->dev == filter->dev
190 		&& (filter->dma_mask & caps.directions);
191 }
192 
193 /**
194  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
195  * @epf_test: the EPF test device that performs data transfer operation
196  *
197  * Function to initialize EPF test DMA channel.
198  */
199 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
200 {
201 	struct pci_epf *epf = epf_test->epf;
202 	struct device *dev = &epf->dev;
203 	struct epf_dma_filter filter;
204 	struct dma_chan *dma_chan;
205 	dma_cap_mask_t mask;
206 	int ret;
207 
208 	filter.dev = epf->epc->dev.parent;
209 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
210 
211 	dma_cap_zero(mask);
212 	dma_cap_set(DMA_SLAVE, mask);
213 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
214 	if (!dma_chan) {
215 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
216 		goto fail_back_tx;
217 	}
218 
219 	epf_test->dma_chan_rx = dma_chan;
220 
221 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
222 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
223 
224 	if (!dma_chan) {
225 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
226 		goto fail_back_rx;
227 	}
228 
229 	epf_test->dma_chan_tx = dma_chan;
230 	epf_test->dma_private = true;
231 
232 	init_completion(&epf_test->transfer_complete);
233 
234 	return 0;
235 
236 fail_back_rx:
237 	dma_release_channel(epf_test->dma_chan_rx);
238 	epf_test->dma_chan_tx = NULL;
239 
240 fail_back_tx:
241 	dma_cap_zero(mask);
242 	dma_cap_set(DMA_MEMCPY, mask);
243 
244 	dma_chan = dma_request_chan_by_mask(&mask);
245 	if (IS_ERR(dma_chan)) {
246 		ret = PTR_ERR(dma_chan);
247 		if (ret != -EPROBE_DEFER)
248 			dev_err(dev, "Failed to get DMA channel\n");
249 		return ret;
250 	}
251 	init_completion(&epf_test->transfer_complete);
252 
253 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
254 
255 	return 0;
256 }
257 
258 /**
259  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
260  * @epf_test: the EPF test device that performs data transfer operation
261  *
262  * Helper to cleanup EPF test DMA channel.
263  */
264 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
265 {
266 	if (!epf_test->dma_supported)
267 		return;
268 
269 	dma_release_channel(epf_test->dma_chan_tx);
270 	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
271 		epf_test->dma_chan_tx = NULL;
272 		epf_test->dma_chan_rx = NULL;
273 		return;
274 	}
275 
276 	dma_release_channel(epf_test->dma_chan_rx);
277 	epf_test->dma_chan_rx = NULL;
278 
279 	return;
280 }
281 
282 static void pci_epf_test_print_rate(const char *ops, u64 size,
283 				    struct timespec64 *start,
284 				    struct timespec64 *end, bool dma)
285 {
286 	struct timespec64 ts;
287 	u64 rate, ns;
288 
289 	ts = timespec64_sub(*end, *start);
290 
291 	/* convert both size (stored in 'rate') and time in terms of 'ns' */
292 	ns = timespec64_to_ns(&ts);
293 	rate = size * NSEC_PER_SEC;
294 
295 	/* Divide both size (stored in 'rate') and ns by a common factor */
296 	while (ns > UINT_MAX) {
297 		rate >>= 1;
298 		ns >>= 1;
299 	}
300 
301 	if (!ns)
302 		return;
303 
304 	/* calculate the rate */
305 	do_div(rate, (uint32_t)ns);
306 
307 	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
308 		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
309 		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
310 }
311 
312 static int pci_epf_test_copy(struct pci_epf_test *epf_test)
313 {
314 	int ret;
315 	bool use_dma;
316 	void __iomem *src_addr;
317 	void __iomem *dst_addr;
318 	phys_addr_t src_phys_addr;
319 	phys_addr_t dst_phys_addr;
320 	struct timespec64 start, end;
321 	struct pci_epf *epf = epf_test->epf;
322 	struct device *dev = &epf->dev;
323 	struct pci_epc *epc = epf->epc;
324 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
325 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
326 
327 	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
328 	if (!src_addr) {
329 		dev_err(dev, "Failed to allocate source address\n");
330 		reg->status = STATUS_SRC_ADDR_INVALID;
331 		ret = -ENOMEM;
332 		goto err;
333 	}
334 
335 	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
336 			       reg->src_addr, reg->size);
337 	if (ret) {
338 		dev_err(dev, "Failed to map source address\n");
339 		reg->status = STATUS_SRC_ADDR_INVALID;
340 		goto err_src_addr;
341 	}
342 
343 	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
344 	if (!dst_addr) {
345 		dev_err(dev, "Failed to allocate destination address\n");
346 		reg->status = STATUS_DST_ADDR_INVALID;
347 		ret = -ENOMEM;
348 		goto err_src_map_addr;
349 	}
350 
351 	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
352 			       reg->dst_addr, reg->size);
353 	if (ret) {
354 		dev_err(dev, "Failed to map destination address\n");
355 		reg->status = STATUS_DST_ADDR_INVALID;
356 		goto err_dst_addr;
357 	}
358 
359 	ktime_get_ts64(&start);
360 	use_dma = !!(reg->flags & FLAG_USE_DMA);
361 	if (use_dma) {
362 		if (!epf_test->dma_supported) {
363 			dev_err(dev, "Cannot transfer data using DMA\n");
364 			ret = -EINVAL;
365 			goto err_map_addr;
366 		}
367 
368 		if (epf_test->dma_private) {
369 			dev_err(dev, "Cannot transfer data using DMA\n");
370 			ret = -EINVAL;
371 			goto err_map_addr;
372 		}
373 
374 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
375 						 src_phys_addr, reg->size, 0,
376 						 DMA_MEM_TO_MEM);
377 		if (ret)
378 			dev_err(dev, "Data transfer failed\n");
379 	} else {
380 		void *buf;
381 
382 		buf = kzalloc(reg->size, GFP_KERNEL);
383 		if (!buf) {
384 			ret = -ENOMEM;
385 			goto err_map_addr;
386 		}
387 
388 		memcpy_fromio(buf, src_addr, reg->size);
389 		memcpy_toio(dst_addr, buf, reg->size);
390 		kfree(buf);
391 	}
392 	ktime_get_ts64(&end);
393 	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
394 
395 err_map_addr:
396 	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
397 
398 err_dst_addr:
399 	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
400 
401 err_src_map_addr:
402 	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
403 
404 err_src_addr:
405 	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
406 
407 err:
408 	return ret;
409 }
410 
411 static int pci_epf_test_read(struct pci_epf_test *epf_test)
412 {
413 	int ret;
414 	void __iomem *src_addr;
415 	void *buf;
416 	u32 crc32;
417 	bool use_dma;
418 	phys_addr_t phys_addr;
419 	phys_addr_t dst_phys_addr;
420 	struct timespec64 start, end;
421 	struct pci_epf *epf = epf_test->epf;
422 	struct device *dev = &epf->dev;
423 	struct pci_epc *epc = epf->epc;
424 	struct device *dma_dev = epf->epc->dev.parent;
425 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
426 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
427 
428 	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
429 	if (!src_addr) {
430 		dev_err(dev, "Failed to allocate address\n");
431 		reg->status = STATUS_SRC_ADDR_INVALID;
432 		ret = -ENOMEM;
433 		goto err;
434 	}
435 
436 	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
437 			       reg->src_addr, reg->size);
438 	if (ret) {
439 		dev_err(dev, "Failed to map address\n");
440 		reg->status = STATUS_SRC_ADDR_INVALID;
441 		goto err_addr;
442 	}
443 
444 	buf = kzalloc(reg->size, GFP_KERNEL);
445 	if (!buf) {
446 		ret = -ENOMEM;
447 		goto err_map_addr;
448 	}
449 
450 	use_dma = !!(reg->flags & FLAG_USE_DMA);
451 	if (use_dma) {
452 		if (!epf_test->dma_supported) {
453 			dev_err(dev, "Cannot transfer data using DMA\n");
454 			ret = -EINVAL;
455 			goto err_dma_map;
456 		}
457 
458 		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
459 					       DMA_FROM_DEVICE);
460 		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
461 			dev_err(dev, "Failed to map destination buffer addr\n");
462 			ret = -ENOMEM;
463 			goto err_dma_map;
464 		}
465 
466 		ktime_get_ts64(&start);
467 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
468 						 phys_addr, reg->size,
469 						 reg->src_addr, DMA_DEV_TO_MEM);
470 		if (ret)
471 			dev_err(dev, "Data transfer failed\n");
472 		ktime_get_ts64(&end);
473 
474 		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
475 				 DMA_FROM_DEVICE);
476 	} else {
477 		ktime_get_ts64(&start);
478 		memcpy_fromio(buf, src_addr, reg->size);
479 		ktime_get_ts64(&end);
480 	}
481 
482 	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
483 
484 	crc32 = crc32_le(~0, buf, reg->size);
485 	if (crc32 != reg->checksum)
486 		ret = -EIO;
487 
488 err_dma_map:
489 	kfree(buf);
490 
491 err_map_addr:
492 	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
493 
494 err_addr:
495 	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
496 
497 err:
498 	return ret;
499 }
500 
501 static int pci_epf_test_write(struct pci_epf_test *epf_test)
502 {
503 	int ret;
504 	void __iomem *dst_addr;
505 	void *buf;
506 	bool use_dma;
507 	phys_addr_t phys_addr;
508 	phys_addr_t src_phys_addr;
509 	struct timespec64 start, end;
510 	struct pci_epf *epf = epf_test->epf;
511 	struct device *dev = &epf->dev;
512 	struct pci_epc *epc = epf->epc;
513 	struct device *dma_dev = epf->epc->dev.parent;
514 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
515 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
516 
517 	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
518 	if (!dst_addr) {
519 		dev_err(dev, "Failed to allocate address\n");
520 		reg->status = STATUS_DST_ADDR_INVALID;
521 		ret = -ENOMEM;
522 		goto err;
523 	}
524 
525 	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
526 			       reg->dst_addr, reg->size);
527 	if (ret) {
528 		dev_err(dev, "Failed to map address\n");
529 		reg->status = STATUS_DST_ADDR_INVALID;
530 		goto err_addr;
531 	}
532 
533 	buf = kzalloc(reg->size, GFP_KERNEL);
534 	if (!buf) {
535 		ret = -ENOMEM;
536 		goto err_map_addr;
537 	}
538 
539 	get_random_bytes(buf, reg->size);
540 	reg->checksum = crc32_le(~0, buf, reg->size);
541 
542 	use_dma = !!(reg->flags & FLAG_USE_DMA);
543 	if (use_dma) {
544 		if (!epf_test->dma_supported) {
545 			dev_err(dev, "Cannot transfer data using DMA\n");
546 			ret = -EINVAL;
547 			goto err_dma_map;
548 		}
549 
550 		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
551 					       DMA_TO_DEVICE);
552 		if (dma_mapping_error(dma_dev, src_phys_addr)) {
553 			dev_err(dev, "Failed to map source buffer addr\n");
554 			ret = -ENOMEM;
555 			goto err_dma_map;
556 		}
557 
558 		ktime_get_ts64(&start);
559 
560 		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
561 						 src_phys_addr, reg->size,
562 						 reg->dst_addr,
563 						 DMA_MEM_TO_DEV);
564 		if (ret)
565 			dev_err(dev, "Data transfer failed\n");
566 		ktime_get_ts64(&end);
567 
568 		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
569 				 DMA_TO_DEVICE);
570 	} else {
571 		ktime_get_ts64(&start);
572 		memcpy_toio(dst_addr, buf, reg->size);
573 		ktime_get_ts64(&end);
574 	}
575 
576 	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
577 
578 	/*
579 	 * wait 1ms inorder for the write to complete. Without this delay L3
580 	 * error in observed in the host system.
581 	 */
582 	usleep_range(1000, 2000);
583 
584 err_dma_map:
585 	kfree(buf);
586 
587 err_map_addr:
588 	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
589 
590 err_addr:
591 	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
592 
593 err:
594 	return ret;
595 }
596 
597 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
598 				   u16 irq)
599 {
600 	struct pci_epf *epf = epf_test->epf;
601 	struct device *dev = &epf->dev;
602 	struct pci_epc *epc = epf->epc;
603 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
604 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
605 
606 	reg->status |= STATUS_IRQ_RAISED;
607 
608 	switch (irq_type) {
609 	case IRQ_TYPE_LEGACY:
610 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
611 				  PCI_EPC_IRQ_LEGACY, 0);
612 		break;
613 	case IRQ_TYPE_MSI:
614 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
615 				  PCI_EPC_IRQ_MSI, irq);
616 		break;
617 	case IRQ_TYPE_MSIX:
618 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
619 				  PCI_EPC_IRQ_MSIX, irq);
620 		break;
621 	default:
622 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
623 		break;
624 	}
625 }
626 
627 static void pci_epf_test_cmd_handler(struct work_struct *work)
628 {
629 	int ret;
630 	int count;
631 	u32 command;
632 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
633 						     cmd_handler.work);
634 	struct pci_epf *epf = epf_test->epf;
635 	struct device *dev = &epf->dev;
636 	struct pci_epc *epc = epf->epc;
637 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
638 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
639 
640 	command = reg->command;
641 	if (!command)
642 		goto reset_handler;
643 
644 	reg->command = 0;
645 	reg->status = 0;
646 
647 	if (reg->irq_type > IRQ_TYPE_MSIX) {
648 		dev_err(dev, "Failed to detect IRQ type\n");
649 		goto reset_handler;
650 	}
651 
652 	if (command & COMMAND_RAISE_LEGACY_IRQ) {
653 		reg->status = STATUS_IRQ_RAISED;
654 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
655 				  PCI_EPC_IRQ_LEGACY, 0);
656 		goto reset_handler;
657 	}
658 
659 	if (command & COMMAND_WRITE) {
660 		ret = pci_epf_test_write(epf_test);
661 		if (ret)
662 			reg->status |= STATUS_WRITE_FAIL;
663 		else
664 			reg->status |= STATUS_WRITE_SUCCESS;
665 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
666 				       reg->irq_number);
667 		goto reset_handler;
668 	}
669 
670 	if (command & COMMAND_READ) {
671 		ret = pci_epf_test_read(epf_test);
672 		if (!ret)
673 			reg->status |= STATUS_READ_SUCCESS;
674 		else
675 			reg->status |= STATUS_READ_FAIL;
676 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
677 				       reg->irq_number);
678 		goto reset_handler;
679 	}
680 
681 	if (command & COMMAND_COPY) {
682 		ret = pci_epf_test_copy(epf_test);
683 		if (!ret)
684 			reg->status |= STATUS_COPY_SUCCESS;
685 		else
686 			reg->status |= STATUS_COPY_FAIL;
687 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
688 				       reg->irq_number);
689 		goto reset_handler;
690 	}
691 
692 	if (command & COMMAND_RAISE_MSI_IRQ) {
693 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
694 		if (reg->irq_number > count || count <= 0)
695 			goto reset_handler;
696 		reg->status = STATUS_IRQ_RAISED;
697 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
698 				  PCI_EPC_IRQ_MSI, reg->irq_number);
699 		goto reset_handler;
700 	}
701 
702 	if (command & COMMAND_RAISE_MSIX_IRQ) {
703 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
704 		if (reg->irq_number > count || count <= 0)
705 			goto reset_handler;
706 		reg->status = STATUS_IRQ_RAISED;
707 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
708 				  PCI_EPC_IRQ_MSIX, reg->irq_number);
709 		goto reset_handler;
710 	}
711 
712 reset_handler:
713 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
714 			   msecs_to_jiffies(1));
715 }
716 
717 static void pci_epf_test_unbind(struct pci_epf *epf)
718 {
719 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
720 	struct pci_epc *epc = epf->epc;
721 	struct pci_epf_bar *epf_bar;
722 	int bar;
723 
724 	cancel_delayed_work(&epf_test->cmd_handler);
725 	pci_epf_test_clean_dma_chan(epf_test);
726 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
727 		epf_bar = &epf->bar[bar];
728 
729 		if (epf_test->reg[bar]) {
730 			pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
731 					  epf_bar);
732 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
733 					   PRIMARY_INTERFACE);
734 		}
735 	}
736 }
737 
738 static int pci_epf_test_set_bar(struct pci_epf *epf)
739 {
740 	int bar, add;
741 	int ret;
742 	struct pci_epf_bar *epf_bar;
743 	struct pci_epc *epc = epf->epc;
744 	struct device *dev = &epf->dev;
745 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
746 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
747 	const struct pci_epc_features *epc_features;
748 
749 	epc_features = epf_test->epc_features;
750 
751 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
752 		epf_bar = &epf->bar[bar];
753 		/*
754 		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
755 		 * if the specific implementation required a 64-bit BAR,
756 		 * even if we only requested a 32-bit BAR.
757 		 */
758 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
759 
760 		if (!!(epc_features->reserved_bar & (1 << bar)))
761 			continue;
762 
763 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
764 				      epf_bar);
765 		if (ret) {
766 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
767 					   PRIMARY_INTERFACE);
768 			dev_err(dev, "Failed to set BAR%d\n", bar);
769 			if (bar == test_reg_bar)
770 				return ret;
771 		}
772 	}
773 
774 	return 0;
775 }
776 
777 static int pci_epf_test_core_init(struct pci_epf *epf)
778 {
779 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
780 	struct pci_epf_header *header = epf->header;
781 	const struct pci_epc_features *epc_features;
782 	struct pci_epc *epc = epf->epc;
783 	struct device *dev = &epf->dev;
784 	bool msix_capable = false;
785 	bool msi_capable = true;
786 	int ret;
787 
788 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
789 	if (epc_features) {
790 		msix_capable = epc_features->msix_capable;
791 		msi_capable = epc_features->msi_capable;
792 	}
793 
794 	if (epf->vfunc_no <= 1) {
795 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
796 		if (ret) {
797 			dev_err(dev, "Configuration header write failed\n");
798 			return ret;
799 		}
800 	}
801 
802 	ret = pci_epf_test_set_bar(epf);
803 	if (ret)
804 		return ret;
805 
806 	if (msi_capable) {
807 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
808 				      epf->msi_interrupts);
809 		if (ret) {
810 			dev_err(dev, "MSI configuration failed\n");
811 			return ret;
812 		}
813 	}
814 
815 	if (msix_capable) {
816 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
817 				       epf->msix_interrupts,
818 				       epf_test->test_reg_bar,
819 				       epf_test->msix_table_offset);
820 		if (ret) {
821 			dev_err(dev, "MSI-X configuration failed\n");
822 			return ret;
823 		}
824 	}
825 
826 	return 0;
827 }
828 
829 static int pci_epf_test_link_up(struct pci_epf *epf)
830 {
831 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
832 
833 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
834 			   msecs_to_jiffies(1));
835 
836 	return 0;
837 }
838 
839 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
840 	.core_init = pci_epf_test_core_init,
841 	.link_up = pci_epf_test_link_up,
842 };
843 
844 static int pci_epf_test_alloc_space(struct pci_epf *epf)
845 {
846 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
847 	struct device *dev = &epf->dev;
848 	struct pci_epf_bar *epf_bar;
849 	size_t msix_table_size = 0;
850 	size_t test_reg_bar_size;
851 	size_t pba_size = 0;
852 	bool msix_capable;
853 	void *base;
854 	int bar, add;
855 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
856 	const struct pci_epc_features *epc_features;
857 	size_t test_reg_size;
858 
859 	epc_features = epf_test->epc_features;
860 
861 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
862 
863 	msix_capable = epc_features->msix_capable;
864 	if (msix_capable) {
865 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
866 		epf_test->msix_table_offset = test_reg_bar_size;
867 		/* Align to QWORD or 8 Bytes */
868 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
869 	}
870 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
871 
872 	if (epc_features->bar_fixed_size[test_reg_bar]) {
873 		if (test_reg_size > bar_size[test_reg_bar])
874 			return -ENOMEM;
875 		test_reg_size = bar_size[test_reg_bar];
876 	}
877 
878 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
879 				   epc_features->align, PRIMARY_INTERFACE);
880 	if (!base) {
881 		dev_err(dev, "Failed to allocated register space\n");
882 		return -ENOMEM;
883 	}
884 	epf_test->reg[test_reg_bar] = base;
885 
886 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
887 		epf_bar = &epf->bar[bar];
888 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
889 
890 		if (bar == test_reg_bar)
891 			continue;
892 
893 		if (!!(epc_features->reserved_bar & (1 << bar)))
894 			continue;
895 
896 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
897 					   epc_features->align,
898 					   PRIMARY_INTERFACE);
899 		if (!base)
900 			dev_err(dev, "Failed to allocate space for BAR%d\n",
901 				bar);
902 		epf_test->reg[bar] = base;
903 	}
904 
905 	return 0;
906 }
907 
908 static void pci_epf_configure_bar(struct pci_epf *epf,
909 				  const struct pci_epc_features *epc_features)
910 {
911 	struct pci_epf_bar *epf_bar;
912 	bool bar_fixed_64bit;
913 	int i;
914 
915 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
916 		epf_bar = &epf->bar[i];
917 		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
918 		if (bar_fixed_64bit)
919 			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
920 		if (epc_features->bar_fixed_size[i])
921 			bar_size[i] = epc_features->bar_fixed_size[i];
922 	}
923 }
924 
925 static int pci_epf_test_bind(struct pci_epf *epf)
926 {
927 	int ret;
928 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
929 	const struct pci_epc_features *epc_features;
930 	enum pci_barno test_reg_bar = BAR_0;
931 	struct pci_epc *epc = epf->epc;
932 	bool linkup_notifier = false;
933 	bool core_init_notifier = false;
934 
935 	if (WARN_ON_ONCE(!epc))
936 		return -EINVAL;
937 
938 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
939 	if (!epc_features) {
940 		dev_err(&epf->dev, "epc_features not implemented\n");
941 		return -EOPNOTSUPP;
942 	}
943 
944 	linkup_notifier = epc_features->linkup_notifier;
945 	core_init_notifier = epc_features->core_init_notifier;
946 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
947 	if (test_reg_bar < 0)
948 		return -EINVAL;
949 	pci_epf_configure_bar(epf, epc_features);
950 
951 	epf_test->test_reg_bar = test_reg_bar;
952 	epf_test->epc_features = epc_features;
953 
954 	ret = pci_epf_test_alloc_space(epf);
955 	if (ret)
956 		return ret;
957 
958 	if (!core_init_notifier) {
959 		ret = pci_epf_test_core_init(epf);
960 		if (ret)
961 			return ret;
962 	}
963 
964 	epf_test->dma_supported = true;
965 
966 	ret = pci_epf_test_init_dma_chan(epf_test);
967 	if (ret)
968 		epf_test->dma_supported = false;
969 
970 	if (!linkup_notifier && !core_init_notifier)
971 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
972 
973 	return 0;
974 }
975 
976 static const struct pci_epf_device_id pci_epf_test_ids[] = {
977 	{
978 		.name = "pci_epf_test",
979 	},
980 	{},
981 };
982 
983 static int pci_epf_test_probe(struct pci_epf *epf)
984 {
985 	struct pci_epf_test *epf_test;
986 	struct device *dev = &epf->dev;
987 
988 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
989 	if (!epf_test)
990 		return -ENOMEM;
991 
992 	epf->header = &test_header;
993 	epf_test->epf = epf;
994 
995 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
996 
997 	epf->event_ops = &pci_epf_test_event_ops;
998 
999 	epf_set_drvdata(epf, epf_test);
1000 	return 0;
1001 }
1002 
1003 static struct pci_epf_ops ops = {
1004 	.unbind	= pci_epf_test_unbind,
1005 	.bind	= pci_epf_test_bind,
1006 };
1007 
1008 static struct pci_epf_driver test_driver = {
1009 	.driver.name	= "pci_epf_test",
1010 	.probe		= pci_epf_test_probe,
1011 	.id_table	= pci_epf_test_ids,
1012 	.ops		= &ops,
1013 	.owner		= THIS_MODULE,
1014 };
1015 
1016 static int __init pci_epf_test_init(void)
1017 {
1018 	int ret;
1019 
1020 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1021 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1022 	if (!kpcitest_workqueue) {
1023 		pr_err("Failed to allocate the kpcitest work queue\n");
1024 		return -ENOMEM;
1025 	}
1026 
1027 	ret = pci_epf_register_driver(&test_driver);
1028 	if (ret) {
1029 		destroy_workqueue(kpcitest_workqueue);
1030 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1031 		return ret;
1032 	}
1033 
1034 	return 0;
1035 }
1036 module_init(pci_epf_test_init);
1037 
1038 static void __exit pci_epf_test_exit(void)
1039 {
1040 	if (kpcitest_workqueue)
1041 		destroy_workqueue(kpcitest_workqueue);
1042 	pci_epf_unregister_driver(&test_driver);
1043 }
1044 module_exit(pci_epf_test_exit);
1045 
1046 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1047 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1048 MODULE_LICENSE("GPL v2");
1049