xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
17 
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
21 
22 #define IRQ_TYPE_INTX			0
23 #define IRQ_TYPE_MSI			1
24 #define IRQ_TYPE_MSIX			2
25 
26 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 #define COMMAND_READ			BIT(3)
30 #define COMMAND_WRITE			BIT(4)
31 #define COMMAND_COPY			BIT(5)
32 
33 #define STATUS_READ_SUCCESS		BIT(0)
34 #define STATUS_READ_FAIL		BIT(1)
35 #define STATUS_WRITE_SUCCESS		BIT(2)
36 #define STATUS_WRITE_FAIL		BIT(3)
37 #define STATUS_COPY_SUCCESS		BIT(4)
38 #define STATUS_COPY_FAIL		BIT(5)
39 #define STATUS_IRQ_RAISED		BIT(6)
40 #define STATUS_SRC_ADDR_INVALID		BIT(7)
41 #define STATUS_DST_ADDR_INVALID		BIT(8)
42 
43 #define FLAG_USE_DMA			BIT(0)
44 
45 #define TIMER_RESOLUTION		1
46 
47 static struct workqueue_struct *kpcitest_workqueue;
48 
49 struct pci_epf_test {
50 	void			*reg[PCI_STD_NUM_BARS];
51 	struct pci_epf		*epf;
52 	enum pci_barno		test_reg_bar;
53 	size_t			msix_table_offset;
54 	struct delayed_work	cmd_handler;
55 	struct dma_chan		*dma_chan_tx;
56 	struct dma_chan		*dma_chan_rx;
57 	struct dma_chan		*transfer_chan;
58 	dma_cookie_t		transfer_cookie;
59 	enum dma_status		transfer_status;
60 	struct completion	transfer_complete;
61 	bool			dma_supported;
62 	bool			dma_private;
63 	const struct pci_epc_features *epc_features;
64 };
65 
66 struct pci_epf_test_reg {
67 	u32	magic;
68 	u32	command;
69 	u32	status;
70 	u64	src_addr;
71 	u64	dst_addr;
72 	u32	size;
73 	u32	checksum;
74 	u32	irq_type;
75 	u32	irq_number;
76 	u32	flags;
77 } __packed;
78 
79 static struct pci_epf_header test_header = {
80 	.vendorid	= PCI_ANY_ID,
81 	.deviceid	= PCI_ANY_ID,
82 	.baseclass_code = PCI_CLASS_OTHERS,
83 	.interrupt_pin	= PCI_INTERRUPT_INTA,
84 };
85 
86 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
87 
88 static void pci_epf_test_dma_callback(void *param)
89 {
90 	struct pci_epf_test *epf_test = param;
91 	struct dma_tx_state state;
92 
93 	epf_test->transfer_status =
94 		dmaengine_tx_status(epf_test->transfer_chan,
95 				    epf_test->transfer_cookie, &state);
96 	if (epf_test->transfer_status == DMA_COMPLETE ||
97 	    epf_test->transfer_status == DMA_ERROR)
98 		complete(&epf_test->transfer_complete);
99 }
100 
101 /**
102  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
103  *				  data between PCIe EP and remote PCIe RC
104  * @epf_test: the EPF test device that performs the data transfer operation
105  * @dma_dst: The destination address of the data transfer. It can be a physical
106  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
107  * @dma_src: The source address of the data transfer. It can be a physical
108  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
109  * @len: The size of the data transfer
110  * @dma_remote: remote RC physical address
111  * @dir: DMA transfer direction
112  *
113  * Function that uses dmaengine API to transfer data between PCIe EP and remote
114  * PCIe RC. The source and destination address can be a physical address given
115  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
116  *
117  * The function returns '0' on success and negative value on failure.
118  */
119 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
120 				      dma_addr_t dma_dst, dma_addr_t dma_src,
121 				      size_t len, dma_addr_t dma_remote,
122 				      enum dma_transfer_direction dir)
123 {
124 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
125 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
126 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
127 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
128 	struct pci_epf *epf = epf_test->epf;
129 	struct dma_async_tx_descriptor *tx;
130 	struct dma_slave_config sconf = {};
131 	struct device *dev = &epf->dev;
132 	int ret;
133 
134 	if (IS_ERR_OR_NULL(chan)) {
135 		dev_err(dev, "Invalid DMA memcpy channel\n");
136 		return -EINVAL;
137 	}
138 
139 	if (epf_test->dma_private) {
140 		sconf.direction = dir;
141 		if (dir == DMA_MEM_TO_DEV)
142 			sconf.dst_addr = dma_remote;
143 		else
144 			sconf.src_addr = dma_remote;
145 
146 		if (dmaengine_slave_config(chan, &sconf)) {
147 			dev_err(dev, "DMA slave config fail\n");
148 			return -EIO;
149 		}
150 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
151 						 flags);
152 	} else {
153 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
154 					       flags);
155 	}
156 
157 	if (!tx) {
158 		dev_err(dev, "Failed to prepare DMA memcpy\n");
159 		return -EIO;
160 	}
161 
162 	reinit_completion(&epf_test->transfer_complete);
163 	epf_test->transfer_chan = chan;
164 	tx->callback = pci_epf_test_dma_callback;
165 	tx->callback_param = epf_test;
166 	epf_test->transfer_cookie = dmaengine_submit(tx);
167 
168 	ret = dma_submit_error(epf_test->transfer_cookie);
169 	if (ret) {
170 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
171 		goto terminate;
172 	}
173 
174 	dma_async_issue_pending(chan);
175 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
176 	if (ret < 0) {
177 		dev_err(dev, "DMA wait_for_completion interrupted\n");
178 		goto terminate;
179 	}
180 
181 	if (epf_test->transfer_status == DMA_ERROR) {
182 		dev_err(dev, "DMA transfer failed\n");
183 		ret = -EIO;
184 	}
185 
186 terminate:
187 	dmaengine_terminate_sync(chan);
188 
189 	return ret;
190 }
191 
192 struct epf_dma_filter {
193 	struct device *dev;
194 	u32 dma_mask;
195 };
196 
197 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
198 {
199 	struct epf_dma_filter *filter = node;
200 	struct dma_slave_caps caps;
201 
202 	memset(&caps, 0, sizeof(caps));
203 	dma_get_slave_caps(chan, &caps);
204 
205 	return chan->device->dev == filter->dev
206 		&& (filter->dma_mask & caps.directions);
207 }
208 
209 /**
210  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
211  * @epf_test: the EPF test device that performs data transfer operation
212  *
213  * Function to initialize EPF test DMA channel.
214  */
215 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
216 {
217 	struct pci_epf *epf = epf_test->epf;
218 	struct device *dev = &epf->dev;
219 	struct epf_dma_filter filter;
220 	struct dma_chan *dma_chan;
221 	dma_cap_mask_t mask;
222 	int ret;
223 
224 	filter.dev = epf->epc->dev.parent;
225 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
226 
227 	dma_cap_zero(mask);
228 	dma_cap_set(DMA_SLAVE, mask);
229 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
230 	if (!dma_chan) {
231 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
232 		goto fail_back_tx;
233 	}
234 
235 	epf_test->dma_chan_rx = dma_chan;
236 
237 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
238 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
239 
240 	if (!dma_chan) {
241 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
242 		goto fail_back_rx;
243 	}
244 
245 	epf_test->dma_chan_tx = dma_chan;
246 	epf_test->dma_private = true;
247 
248 	init_completion(&epf_test->transfer_complete);
249 
250 	return 0;
251 
252 fail_back_rx:
253 	dma_release_channel(epf_test->dma_chan_rx);
254 	epf_test->dma_chan_tx = NULL;
255 
256 fail_back_tx:
257 	dma_cap_zero(mask);
258 	dma_cap_set(DMA_MEMCPY, mask);
259 
260 	dma_chan = dma_request_chan_by_mask(&mask);
261 	if (IS_ERR(dma_chan)) {
262 		ret = PTR_ERR(dma_chan);
263 		if (ret != -EPROBE_DEFER)
264 			dev_err(dev, "Failed to get DMA channel\n");
265 		return ret;
266 	}
267 	init_completion(&epf_test->transfer_complete);
268 
269 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
270 
271 	return 0;
272 }
273 
274 /**
275  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
276  * @epf_test: the EPF test device that performs data transfer operation
277  *
278  * Helper to cleanup EPF test DMA channel.
279  */
280 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
281 {
282 	if (!epf_test->dma_supported)
283 		return;
284 
285 	dma_release_channel(epf_test->dma_chan_tx);
286 	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
287 		epf_test->dma_chan_tx = NULL;
288 		epf_test->dma_chan_rx = NULL;
289 		return;
290 	}
291 
292 	dma_release_channel(epf_test->dma_chan_rx);
293 	epf_test->dma_chan_rx = NULL;
294 }
295 
296 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
297 				    const char *op, u64 size,
298 				    struct timespec64 *start,
299 				    struct timespec64 *end, bool dma)
300 {
301 	struct timespec64 ts = timespec64_sub(*end, *start);
302 	u64 rate = 0, ns;
303 
304 	/* calculate the rate */
305 	ns = timespec64_to_ns(&ts);
306 	if (ns)
307 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
308 
309 	dev_info(&epf_test->epf->dev,
310 		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
311 		 op, size, dma ? "YES" : "NO",
312 		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
313 }
314 
315 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
316 			      struct pci_epf_test_reg *reg)
317 {
318 	int ret = 0;
319 	struct timespec64 start, end;
320 	struct pci_epf *epf = epf_test->epf;
321 	struct pci_epc *epc = epf->epc;
322 	struct device *dev = &epf->dev;
323 	struct pci_epc_map src_map, dst_map;
324 	u64 src_addr = reg->src_addr;
325 	u64 dst_addr = reg->dst_addr;
326 	size_t copy_size = reg->size;
327 	ssize_t map_size = 0;
328 	void *copy_buf = NULL, *buf;
329 
330 	if (reg->flags & FLAG_USE_DMA) {
331 		if (epf_test->dma_private) {
332 			dev_err(dev, "Cannot transfer data using DMA\n");
333 			ret = -EINVAL;
334 			goto set_status;
335 		}
336 	} else {
337 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
338 		if (!copy_buf) {
339 			ret = -ENOMEM;
340 			goto set_status;
341 		}
342 		buf = copy_buf;
343 	}
344 
345 	while (copy_size) {
346 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
347 				      src_addr, copy_size, &src_map);
348 		if (ret) {
349 			dev_err(dev, "Failed to map source address\n");
350 			reg->status = STATUS_SRC_ADDR_INVALID;
351 			goto free_buf;
352 		}
353 
354 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
355 					   dst_addr, copy_size, &dst_map);
356 		if (ret) {
357 			dev_err(dev, "Failed to map destination address\n");
358 			reg->status = STATUS_DST_ADDR_INVALID;
359 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
360 					  &src_map);
361 			goto free_buf;
362 		}
363 
364 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
365 
366 		ktime_get_ts64(&start);
367 		if (reg->flags & FLAG_USE_DMA) {
368 			ret = pci_epf_test_data_transfer(epf_test,
369 					dst_map.phys_addr, src_map.phys_addr,
370 					map_size, 0, DMA_MEM_TO_MEM);
371 			if (ret) {
372 				dev_err(dev, "Data transfer failed\n");
373 				goto unmap;
374 			}
375 		} else {
376 			memcpy_fromio(buf, src_map.virt_addr, map_size);
377 			memcpy_toio(dst_map.virt_addr, buf, map_size);
378 			buf += map_size;
379 		}
380 		ktime_get_ts64(&end);
381 
382 		copy_size -= map_size;
383 		src_addr += map_size;
384 		dst_addr += map_size;
385 
386 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
387 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
388 		map_size = 0;
389 	}
390 
391 	pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start,
392 				&end, reg->flags & FLAG_USE_DMA);
393 
394 unmap:
395 	if (map_size) {
396 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
397 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
398 	}
399 
400 free_buf:
401 	kfree(copy_buf);
402 
403 set_status:
404 	if (!ret)
405 		reg->status |= STATUS_COPY_SUCCESS;
406 	else
407 		reg->status |= STATUS_COPY_FAIL;
408 }
409 
410 static void pci_epf_test_read(struct pci_epf_test *epf_test,
411 			      struct pci_epf_test_reg *reg)
412 {
413 	int ret = 0;
414 	void *src_buf, *buf;
415 	u32 crc32;
416 	struct pci_epc_map map;
417 	phys_addr_t dst_phys_addr;
418 	struct timespec64 start, end;
419 	struct pci_epf *epf = epf_test->epf;
420 	struct pci_epc *epc = epf->epc;
421 	struct device *dev = &epf->dev;
422 	struct device *dma_dev = epf->epc->dev.parent;
423 	u64 src_addr = reg->src_addr;
424 	size_t src_size = reg->size;
425 	ssize_t map_size = 0;
426 
427 	src_buf = kzalloc(src_size, GFP_KERNEL);
428 	if (!src_buf) {
429 		ret = -ENOMEM;
430 		goto set_status;
431 	}
432 	buf = src_buf;
433 
434 	while (src_size) {
435 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
436 					   src_addr, src_size, &map);
437 		if (ret) {
438 			dev_err(dev, "Failed to map address\n");
439 			reg->status = STATUS_SRC_ADDR_INVALID;
440 			goto free_buf;
441 		}
442 
443 		map_size = map.pci_size;
444 		if (reg->flags & FLAG_USE_DMA) {
445 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
446 						       DMA_FROM_DEVICE);
447 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
448 				dev_err(dev,
449 					"Failed to map destination buffer addr\n");
450 				ret = -ENOMEM;
451 				goto unmap;
452 			}
453 
454 			ktime_get_ts64(&start);
455 			ret = pci_epf_test_data_transfer(epf_test,
456 					dst_phys_addr, map.phys_addr,
457 					map_size, src_addr, DMA_DEV_TO_MEM);
458 			if (ret)
459 				dev_err(dev, "Data transfer failed\n");
460 			ktime_get_ts64(&end);
461 
462 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
463 					 DMA_FROM_DEVICE);
464 
465 			if (ret)
466 				goto unmap;
467 		} else {
468 			ktime_get_ts64(&start);
469 			memcpy_fromio(buf, map.virt_addr, map_size);
470 			ktime_get_ts64(&end);
471 		}
472 
473 		src_size -= map_size;
474 		src_addr += map_size;
475 		buf += map_size;
476 
477 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
478 		map_size = 0;
479 	}
480 
481 	pci_epf_test_print_rate(epf_test, "READ", reg->size, &start,
482 				&end, reg->flags & FLAG_USE_DMA);
483 
484 	crc32 = crc32_le(~0, src_buf, reg->size);
485 	if (crc32 != reg->checksum)
486 		ret = -EIO;
487 
488 unmap:
489 	if (map_size)
490 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
491 
492 free_buf:
493 	kfree(src_buf);
494 
495 set_status:
496 	if (!ret)
497 		reg->status |= STATUS_READ_SUCCESS;
498 	else
499 		reg->status |= STATUS_READ_FAIL;
500 }
501 
502 static void pci_epf_test_write(struct pci_epf_test *epf_test,
503 			       struct pci_epf_test_reg *reg)
504 {
505 	int ret = 0;
506 	void *dst_buf, *buf;
507 	struct pci_epc_map map;
508 	phys_addr_t src_phys_addr;
509 	struct timespec64 start, end;
510 	struct pci_epf *epf = epf_test->epf;
511 	struct pci_epc *epc = epf->epc;
512 	struct device *dev = &epf->dev;
513 	struct device *dma_dev = epf->epc->dev.parent;
514 	u64 dst_addr = reg->dst_addr;
515 	size_t dst_size = reg->size;
516 	ssize_t map_size = 0;
517 
518 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
519 	if (!dst_buf) {
520 		ret = -ENOMEM;
521 		goto set_status;
522 	}
523 	get_random_bytes(dst_buf, dst_size);
524 	reg->checksum = crc32_le(~0, dst_buf, dst_size);
525 	buf = dst_buf;
526 
527 	while (dst_size) {
528 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
529 					   dst_addr, dst_size, &map);
530 		if (ret) {
531 			dev_err(dev, "Failed to map address\n");
532 			reg->status = STATUS_DST_ADDR_INVALID;
533 			goto free_buf;
534 		}
535 
536 		map_size = map.pci_size;
537 		if (reg->flags & FLAG_USE_DMA) {
538 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
539 						       DMA_TO_DEVICE);
540 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
541 				dev_err(dev,
542 					"Failed to map source buffer addr\n");
543 				ret = -ENOMEM;
544 				goto unmap;
545 			}
546 
547 			ktime_get_ts64(&start);
548 
549 			ret = pci_epf_test_data_transfer(epf_test,
550 						map.phys_addr, src_phys_addr,
551 						map_size, dst_addr,
552 						DMA_MEM_TO_DEV);
553 			if (ret)
554 				dev_err(dev, "Data transfer failed\n");
555 			ktime_get_ts64(&end);
556 
557 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
558 					 DMA_TO_DEVICE);
559 
560 			if (ret)
561 				goto unmap;
562 		} else {
563 			ktime_get_ts64(&start);
564 			memcpy_toio(map.virt_addr, buf, map_size);
565 			ktime_get_ts64(&end);
566 		}
567 
568 		dst_size -= map_size;
569 		dst_addr += map_size;
570 		buf += map_size;
571 
572 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
573 		map_size = 0;
574 	}
575 
576 	pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start,
577 				&end, reg->flags & FLAG_USE_DMA);
578 
579 	/*
580 	 * wait 1ms inorder for the write to complete. Without this delay L3
581 	 * error in observed in the host system.
582 	 */
583 	usleep_range(1000, 2000);
584 
585 unmap:
586 	if (map_size)
587 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
588 
589 free_buf:
590 	kfree(dst_buf);
591 
592 set_status:
593 	if (!ret)
594 		reg->status |= STATUS_WRITE_SUCCESS;
595 	else
596 		reg->status |= STATUS_WRITE_FAIL;
597 }
598 
599 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
600 				   struct pci_epf_test_reg *reg)
601 {
602 	struct pci_epf *epf = epf_test->epf;
603 	struct device *dev = &epf->dev;
604 	struct pci_epc *epc = epf->epc;
605 	u32 status = reg->status | STATUS_IRQ_RAISED;
606 	int count;
607 
608 	/*
609 	 * Set the status before raising the IRQ to ensure that the host sees
610 	 * the updated value when it gets the IRQ.
611 	 */
612 	WRITE_ONCE(reg->status, status);
613 
614 	switch (reg->irq_type) {
615 	case IRQ_TYPE_INTX:
616 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
617 				  PCI_IRQ_INTX, 0);
618 		break;
619 	case IRQ_TYPE_MSI:
620 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
621 		if (reg->irq_number > count || count <= 0) {
622 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
623 				reg->irq_number, count);
624 			return;
625 		}
626 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
627 				  PCI_IRQ_MSI, reg->irq_number);
628 		break;
629 	case IRQ_TYPE_MSIX:
630 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
631 		if (reg->irq_number > count || count <= 0) {
632 			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
633 				reg->irq_number, count);
634 			return;
635 		}
636 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
637 				  PCI_IRQ_MSIX, reg->irq_number);
638 		break;
639 	default:
640 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
641 		break;
642 	}
643 }
644 
645 static void pci_epf_test_cmd_handler(struct work_struct *work)
646 {
647 	u32 command;
648 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
649 						     cmd_handler.work);
650 	struct pci_epf *epf = epf_test->epf;
651 	struct device *dev = &epf->dev;
652 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
653 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
654 
655 	command = READ_ONCE(reg->command);
656 	if (!command)
657 		goto reset_handler;
658 
659 	WRITE_ONCE(reg->command, 0);
660 	WRITE_ONCE(reg->status, 0);
661 
662 	if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
663 	    !epf_test->dma_supported) {
664 		dev_err(dev, "Cannot transfer data using DMA\n");
665 		goto reset_handler;
666 	}
667 
668 	if (reg->irq_type > IRQ_TYPE_MSIX) {
669 		dev_err(dev, "Failed to detect IRQ type\n");
670 		goto reset_handler;
671 	}
672 
673 	switch (command) {
674 	case COMMAND_RAISE_INTX_IRQ:
675 	case COMMAND_RAISE_MSI_IRQ:
676 	case COMMAND_RAISE_MSIX_IRQ:
677 		pci_epf_test_raise_irq(epf_test, reg);
678 		break;
679 	case COMMAND_WRITE:
680 		pci_epf_test_write(epf_test, reg);
681 		pci_epf_test_raise_irq(epf_test, reg);
682 		break;
683 	case COMMAND_READ:
684 		pci_epf_test_read(epf_test, reg);
685 		pci_epf_test_raise_irq(epf_test, reg);
686 		break;
687 	case COMMAND_COPY:
688 		pci_epf_test_copy(epf_test, reg);
689 		pci_epf_test_raise_irq(epf_test, reg);
690 		break;
691 	default:
692 		dev_err(dev, "Invalid command 0x%x\n", command);
693 		break;
694 	}
695 
696 reset_handler:
697 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
698 			   msecs_to_jiffies(1));
699 }
700 
701 static int pci_epf_test_set_bar(struct pci_epf *epf)
702 {
703 	int bar, ret;
704 	struct pci_epc *epc = epf->epc;
705 	struct device *dev = &epf->dev;
706 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
707 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
708 
709 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
710 		if (!epf_test->reg[bar])
711 			continue;
712 
713 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
714 				      &epf->bar[bar]);
715 		if (ret) {
716 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
717 					   PRIMARY_INTERFACE);
718 			dev_err(dev, "Failed to set BAR%d\n", bar);
719 			if (bar == test_reg_bar)
720 				return ret;
721 		}
722 	}
723 
724 	return 0;
725 }
726 
727 static void pci_epf_test_clear_bar(struct pci_epf *epf)
728 {
729 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
730 	struct pci_epc *epc = epf->epc;
731 	int bar;
732 
733 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
734 		if (!epf_test->reg[bar])
735 			continue;
736 
737 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
738 				  &epf->bar[bar]);
739 	}
740 }
741 
742 static int pci_epf_test_epc_init(struct pci_epf *epf)
743 {
744 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
745 	struct pci_epf_header *header = epf->header;
746 	const struct pci_epc_features *epc_features = epf_test->epc_features;
747 	struct pci_epc *epc = epf->epc;
748 	struct device *dev = &epf->dev;
749 	bool linkup_notifier = false;
750 	int ret;
751 
752 	epf_test->dma_supported = true;
753 
754 	ret = pci_epf_test_init_dma_chan(epf_test);
755 	if (ret)
756 		epf_test->dma_supported = false;
757 
758 	if (epf->vfunc_no <= 1) {
759 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
760 		if (ret) {
761 			dev_err(dev, "Configuration header write failed\n");
762 			return ret;
763 		}
764 	}
765 
766 	ret = pci_epf_test_set_bar(epf);
767 	if (ret)
768 		return ret;
769 
770 	if (epc_features->msi_capable) {
771 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
772 				      epf->msi_interrupts);
773 		if (ret) {
774 			dev_err(dev, "MSI configuration failed\n");
775 			return ret;
776 		}
777 	}
778 
779 	if (epc_features->msix_capable) {
780 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
781 				       epf->msix_interrupts,
782 				       epf_test->test_reg_bar,
783 				       epf_test->msix_table_offset);
784 		if (ret) {
785 			dev_err(dev, "MSI-X configuration failed\n");
786 			return ret;
787 		}
788 	}
789 
790 	linkup_notifier = epc_features->linkup_notifier;
791 	if (!linkup_notifier)
792 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
793 
794 	return 0;
795 }
796 
797 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
798 {
799 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
800 
801 	cancel_delayed_work_sync(&epf_test->cmd_handler);
802 	pci_epf_test_clean_dma_chan(epf_test);
803 	pci_epf_test_clear_bar(epf);
804 }
805 
806 static int pci_epf_test_link_up(struct pci_epf *epf)
807 {
808 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
809 
810 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
811 			   msecs_to_jiffies(1));
812 
813 	return 0;
814 }
815 
816 static int pci_epf_test_link_down(struct pci_epf *epf)
817 {
818 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
819 
820 	cancel_delayed_work_sync(&epf_test->cmd_handler);
821 
822 	return 0;
823 }
824 
825 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
826 	.epc_init = pci_epf_test_epc_init,
827 	.epc_deinit = pci_epf_test_epc_deinit,
828 	.link_up = pci_epf_test_link_up,
829 	.link_down = pci_epf_test_link_down,
830 };
831 
832 static int pci_epf_test_alloc_space(struct pci_epf *epf)
833 {
834 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
835 	struct device *dev = &epf->dev;
836 	size_t msix_table_size = 0;
837 	size_t test_reg_bar_size;
838 	size_t pba_size = 0;
839 	void *base;
840 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
841 	enum pci_barno bar;
842 	const struct pci_epc_features *epc_features = epf_test->epc_features;
843 	size_t test_reg_size;
844 
845 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
846 
847 	if (epc_features->msix_capable) {
848 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
849 		epf_test->msix_table_offset = test_reg_bar_size;
850 		/* Align to QWORD or 8 Bytes */
851 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
852 	}
853 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
854 
855 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
856 				   epc_features, PRIMARY_INTERFACE);
857 	if (!base) {
858 		dev_err(dev, "Failed to allocated register space\n");
859 		return -ENOMEM;
860 	}
861 	epf_test->reg[test_reg_bar] = base;
862 
863 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
864 		bar = pci_epc_get_next_free_bar(epc_features, bar);
865 		if (bar == NO_BAR)
866 			break;
867 
868 		if (bar == test_reg_bar)
869 			continue;
870 
871 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
872 					   epc_features, PRIMARY_INTERFACE);
873 		if (!base)
874 			dev_err(dev, "Failed to allocate space for BAR%d\n",
875 				bar);
876 		epf_test->reg[bar] = base;
877 	}
878 
879 	return 0;
880 }
881 
882 static void pci_epf_test_free_space(struct pci_epf *epf)
883 {
884 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
885 	int bar;
886 
887 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
888 		if (!epf_test->reg[bar])
889 			continue;
890 
891 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
892 				   PRIMARY_INTERFACE);
893 	}
894 }
895 
896 static int pci_epf_test_bind(struct pci_epf *epf)
897 {
898 	int ret;
899 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
900 	const struct pci_epc_features *epc_features;
901 	enum pci_barno test_reg_bar = BAR_0;
902 	struct pci_epc *epc = epf->epc;
903 
904 	if (WARN_ON_ONCE(!epc))
905 		return -EINVAL;
906 
907 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
908 	if (!epc_features) {
909 		dev_err(&epf->dev, "epc_features not implemented\n");
910 		return -EOPNOTSUPP;
911 	}
912 
913 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
914 	if (test_reg_bar < 0)
915 		return -EINVAL;
916 
917 	epf_test->test_reg_bar = test_reg_bar;
918 	epf_test->epc_features = epc_features;
919 
920 	ret = pci_epf_test_alloc_space(epf);
921 	if (ret)
922 		return ret;
923 
924 	return 0;
925 }
926 
927 static void pci_epf_test_unbind(struct pci_epf *epf)
928 {
929 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
930 	struct pci_epc *epc = epf->epc;
931 
932 	cancel_delayed_work_sync(&epf_test->cmd_handler);
933 	if (epc->init_complete) {
934 		pci_epf_test_clean_dma_chan(epf_test);
935 		pci_epf_test_clear_bar(epf);
936 	}
937 	pci_epf_test_free_space(epf);
938 }
939 
940 static const struct pci_epf_device_id pci_epf_test_ids[] = {
941 	{
942 		.name = "pci_epf_test",
943 	},
944 	{},
945 };
946 
947 static int pci_epf_test_probe(struct pci_epf *epf,
948 			      const struct pci_epf_device_id *id)
949 {
950 	struct pci_epf_test *epf_test;
951 	struct device *dev = &epf->dev;
952 
953 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
954 	if (!epf_test)
955 		return -ENOMEM;
956 
957 	epf->header = &test_header;
958 	epf_test->epf = epf;
959 
960 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
961 
962 	epf->event_ops = &pci_epf_test_event_ops;
963 
964 	epf_set_drvdata(epf, epf_test);
965 	return 0;
966 }
967 
968 static const struct pci_epf_ops ops = {
969 	.unbind	= pci_epf_test_unbind,
970 	.bind	= pci_epf_test_bind,
971 };
972 
973 static struct pci_epf_driver test_driver = {
974 	.driver.name	= "pci_epf_test",
975 	.probe		= pci_epf_test_probe,
976 	.id_table	= pci_epf_test_ids,
977 	.ops		= &ops,
978 	.owner		= THIS_MODULE,
979 };
980 
981 static int __init pci_epf_test_init(void)
982 {
983 	int ret;
984 
985 	kpcitest_workqueue = alloc_workqueue("kpcitest",
986 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
987 	if (!kpcitest_workqueue) {
988 		pr_err("Failed to allocate the kpcitest work queue\n");
989 		return -ENOMEM;
990 	}
991 
992 	ret = pci_epf_register_driver(&test_driver);
993 	if (ret) {
994 		destroy_workqueue(kpcitest_workqueue);
995 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
996 		return ret;
997 	}
998 
999 	return 0;
1000 }
1001 module_init(pci_epf_test_init);
1002 
1003 static void __exit pci_epf_test_exit(void)
1004 {
1005 	if (kpcitest_workqueue)
1006 		destroy_workqueue(kpcitest_workqueue);
1007 	pci_epf_unregister_driver(&test_driver);
1008 }
1009 module_exit(pci_epf_test_exit);
1010 
1011 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1012 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1013 MODULE_LICENSE("GPL v2");
1014