xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 86f5536004a61a0c797c14a248fc976f03f55cd5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
17 
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
21 
22 #define IRQ_TYPE_INTX			0
23 #define IRQ_TYPE_MSI			1
24 #define IRQ_TYPE_MSIX			2
25 
26 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 #define COMMAND_READ			BIT(3)
30 #define COMMAND_WRITE			BIT(4)
31 #define COMMAND_COPY			BIT(5)
32 
33 #define STATUS_READ_SUCCESS		BIT(0)
34 #define STATUS_READ_FAIL		BIT(1)
35 #define STATUS_WRITE_SUCCESS		BIT(2)
36 #define STATUS_WRITE_FAIL		BIT(3)
37 #define STATUS_COPY_SUCCESS		BIT(4)
38 #define STATUS_COPY_FAIL		BIT(5)
39 #define STATUS_IRQ_RAISED		BIT(6)
40 #define STATUS_SRC_ADDR_INVALID		BIT(7)
41 #define STATUS_DST_ADDR_INVALID		BIT(8)
42 
43 #define FLAG_USE_DMA			BIT(0)
44 
45 #define TIMER_RESOLUTION		1
46 
47 #define CAP_UNALIGNED_ACCESS		BIT(0)
48 
49 static struct workqueue_struct *kpcitest_workqueue;
50 
51 struct pci_epf_test {
52 	void			*reg[PCI_STD_NUM_BARS];
53 	struct pci_epf		*epf;
54 	enum pci_barno		test_reg_bar;
55 	size_t			msix_table_offset;
56 	struct delayed_work	cmd_handler;
57 	struct dma_chan		*dma_chan_tx;
58 	struct dma_chan		*dma_chan_rx;
59 	struct dma_chan		*transfer_chan;
60 	dma_cookie_t		transfer_cookie;
61 	enum dma_status		transfer_status;
62 	struct completion	transfer_complete;
63 	bool			dma_supported;
64 	bool			dma_private;
65 	const struct pci_epc_features *epc_features;
66 };
67 
68 struct pci_epf_test_reg {
69 	u32	magic;
70 	u32	command;
71 	u32	status;
72 	u64	src_addr;
73 	u64	dst_addr;
74 	u32	size;
75 	u32	checksum;
76 	u32	irq_type;
77 	u32	irq_number;
78 	u32	flags;
79 	u32	caps;
80 } __packed;
81 
82 static struct pci_epf_header test_header = {
83 	.vendorid	= PCI_ANY_ID,
84 	.deviceid	= PCI_ANY_ID,
85 	.baseclass_code = PCI_CLASS_OTHERS,
86 	.interrupt_pin	= PCI_INTERRUPT_INTA,
87 };
88 
89 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
90 
91 static void pci_epf_test_dma_callback(void *param)
92 {
93 	struct pci_epf_test *epf_test = param;
94 	struct dma_tx_state state;
95 
96 	epf_test->transfer_status =
97 		dmaengine_tx_status(epf_test->transfer_chan,
98 				    epf_test->transfer_cookie, &state);
99 	if (epf_test->transfer_status == DMA_COMPLETE ||
100 	    epf_test->transfer_status == DMA_ERROR)
101 		complete(&epf_test->transfer_complete);
102 }
103 
104 /**
105  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
106  *				  data between PCIe EP and remote PCIe RC
107  * @epf_test: the EPF test device that performs the data transfer operation
108  * @dma_dst: The destination address of the data transfer. It can be a physical
109  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
110  * @dma_src: The source address of the data transfer. It can be a physical
111  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
112  * @len: The size of the data transfer
113  * @dma_remote: remote RC physical address
114  * @dir: DMA transfer direction
115  *
116  * Function that uses dmaengine API to transfer data between PCIe EP and remote
117  * PCIe RC. The source and destination address can be a physical address given
118  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
119  *
120  * The function returns '0' on success and negative value on failure.
121  */
122 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
123 				      dma_addr_t dma_dst, dma_addr_t dma_src,
124 				      size_t len, dma_addr_t dma_remote,
125 				      enum dma_transfer_direction dir)
126 {
127 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
128 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
129 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
130 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
131 	struct pci_epf *epf = epf_test->epf;
132 	struct dma_async_tx_descriptor *tx;
133 	struct dma_slave_config sconf = {};
134 	struct device *dev = &epf->dev;
135 	int ret;
136 
137 	if (IS_ERR_OR_NULL(chan)) {
138 		dev_err(dev, "Invalid DMA memcpy channel\n");
139 		return -EINVAL;
140 	}
141 
142 	if (epf_test->dma_private) {
143 		sconf.direction = dir;
144 		if (dir == DMA_MEM_TO_DEV)
145 			sconf.dst_addr = dma_remote;
146 		else
147 			sconf.src_addr = dma_remote;
148 
149 		if (dmaengine_slave_config(chan, &sconf)) {
150 			dev_err(dev, "DMA slave config fail\n");
151 			return -EIO;
152 		}
153 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
154 						 flags);
155 	} else {
156 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
157 					       flags);
158 	}
159 
160 	if (!tx) {
161 		dev_err(dev, "Failed to prepare DMA memcpy\n");
162 		return -EIO;
163 	}
164 
165 	reinit_completion(&epf_test->transfer_complete);
166 	epf_test->transfer_chan = chan;
167 	tx->callback = pci_epf_test_dma_callback;
168 	tx->callback_param = epf_test;
169 	epf_test->transfer_cookie = dmaengine_submit(tx);
170 
171 	ret = dma_submit_error(epf_test->transfer_cookie);
172 	if (ret) {
173 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
174 		goto terminate;
175 	}
176 
177 	dma_async_issue_pending(chan);
178 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
179 	if (ret < 0) {
180 		dev_err(dev, "DMA wait_for_completion interrupted\n");
181 		goto terminate;
182 	}
183 
184 	if (epf_test->transfer_status == DMA_ERROR) {
185 		dev_err(dev, "DMA transfer failed\n");
186 		ret = -EIO;
187 	}
188 
189 terminate:
190 	dmaengine_terminate_sync(chan);
191 
192 	return ret;
193 }
194 
195 struct epf_dma_filter {
196 	struct device *dev;
197 	u32 dma_mask;
198 };
199 
200 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
201 {
202 	struct epf_dma_filter *filter = node;
203 	struct dma_slave_caps caps;
204 
205 	memset(&caps, 0, sizeof(caps));
206 	dma_get_slave_caps(chan, &caps);
207 
208 	return chan->device->dev == filter->dev
209 		&& (filter->dma_mask & caps.directions);
210 }
211 
212 /**
213  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
214  * @epf_test: the EPF test device that performs data transfer operation
215  *
216  * Function to initialize EPF test DMA channel.
217  */
218 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
219 {
220 	struct pci_epf *epf = epf_test->epf;
221 	struct device *dev = &epf->dev;
222 	struct epf_dma_filter filter;
223 	struct dma_chan *dma_chan;
224 	dma_cap_mask_t mask;
225 	int ret;
226 
227 	filter.dev = epf->epc->dev.parent;
228 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
229 
230 	dma_cap_zero(mask);
231 	dma_cap_set(DMA_SLAVE, mask);
232 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
233 	if (!dma_chan) {
234 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
235 		goto fail_back_tx;
236 	}
237 
238 	epf_test->dma_chan_rx = dma_chan;
239 
240 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
241 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
242 
243 	if (!dma_chan) {
244 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
245 		goto fail_back_rx;
246 	}
247 
248 	epf_test->dma_chan_tx = dma_chan;
249 	epf_test->dma_private = true;
250 
251 	init_completion(&epf_test->transfer_complete);
252 
253 	return 0;
254 
255 fail_back_rx:
256 	dma_release_channel(epf_test->dma_chan_rx);
257 	epf_test->dma_chan_rx = NULL;
258 
259 fail_back_tx:
260 	dma_cap_zero(mask);
261 	dma_cap_set(DMA_MEMCPY, mask);
262 
263 	dma_chan = dma_request_chan_by_mask(&mask);
264 	if (IS_ERR(dma_chan)) {
265 		ret = PTR_ERR(dma_chan);
266 		if (ret != -EPROBE_DEFER)
267 			dev_err(dev, "Failed to get DMA channel\n");
268 		return ret;
269 	}
270 	init_completion(&epf_test->transfer_complete);
271 
272 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
273 
274 	return 0;
275 }
276 
277 /**
278  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
279  * @epf_test: the EPF test device that performs data transfer operation
280  *
281  * Helper to cleanup EPF test DMA channel.
282  */
283 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
284 {
285 	if (!epf_test->dma_supported)
286 		return;
287 
288 	dma_release_channel(epf_test->dma_chan_tx);
289 	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
290 		epf_test->dma_chan_tx = NULL;
291 		epf_test->dma_chan_rx = NULL;
292 		return;
293 	}
294 
295 	dma_release_channel(epf_test->dma_chan_rx);
296 	epf_test->dma_chan_rx = NULL;
297 }
298 
299 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
300 				    const char *op, u64 size,
301 				    struct timespec64 *start,
302 				    struct timespec64 *end, bool dma)
303 {
304 	struct timespec64 ts = timespec64_sub(*end, *start);
305 	u64 rate = 0, ns;
306 
307 	/* calculate the rate */
308 	ns = timespec64_to_ns(&ts);
309 	if (ns)
310 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
311 
312 	dev_info(&epf_test->epf->dev,
313 		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
314 		 op, size, dma ? "YES" : "NO",
315 		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
316 }
317 
318 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
319 			      struct pci_epf_test_reg *reg)
320 {
321 	int ret = 0;
322 	struct timespec64 start, end;
323 	struct pci_epf *epf = epf_test->epf;
324 	struct pci_epc *epc = epf->epc;
325 	struct device *dev = &epf->dev;
326 	struct pci_epc_map src_map, dst_map;
327 	u64 src_addr = reg->src_addr;
328 	u64 dst_addr = reg->dst_addr;
329 	size_t copy_size = reg->size;
330 	ssize_t map_size = 0;
331 	void *copy_buf = NULL, *buf;
332 
333 	if (reg->flags & FLAG_USE_DMA) {
334 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
335 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
336 			ret = -EINVAL;
337 			goto set_status;
338 		}
339 	} else {
340 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
341 		if (!copy_buf) {
342 			ret = -ENOMEM;
343 			goto set_status;
344 		}
345 		buf = copy_buf;
346 	}
347 
348 	while (copy_size) {
349 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
350 				      src_addr, copy_size, &src_map);
351 		if (ret) {
352 			dev_err(dev, "Failed to map source address\n");
353 			reg->status = STATUS_SRC_ADDR_INVALID;
354 			goto free_buf;
355 		}
356 
357 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
358 					   dst_addr, copy_size, &dst_map);
359 		if (ret) {
360 			dev_err(dev, "Failed to map destination address\n");
361 			reg->status = STATUS_DST_ADDR_INVALID;
362 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
363 					  &src_map);
364 			goto free_buf;
365 		}
366 
367 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
368 
369 		ktime_get_ts64(&start);
370 		if (reg->flags & FLAG_USE_DMA) {
371 			ret = pci_epf_test_data_transfer(epf_test,
372 					dst_map.phys_addr, src_map.phys_addr,
373 					map_size, 0, DMA_MEM_TO_MEM);
374 			if (ret) {
375 				dev_err(dev, "Data transfer failed\n");
376 				goto unmap;
377 			}
378 		} else {
379 			memcpy_fromio(buf, src_map.virt_addr, map_size);
380 			memcpy_toio(dst_map.virt_addr, buf, map_size);
381 			buf += map_size;
382 		}
383 		ktime_get_ts64(&end);
384 
385 		copy_size -= map_size;
386 		src_addr += map_size;
387 		dst_addr += map_size;
388 
389 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
390 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
391 		map_size = 0;
392 	}
393 
394 	pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start,
395 				&end, reg->flags & FLAG_USE_DMA);
396 
397 unmap:
398 	if (map_size) {
399 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
400 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
401 	}
402 
403 free_buf:
404 	kfree(copy_buf);
405 
406 set_status:
407 	if (!ret)
408 		reg->status |= STATUS_COPY_SUCCESS;
409 	else
410 		reg->status |= STATUS_COPY_FAIL;
411 }
412 
413 static void pci_epf_test_read(struct pci_epf_test *epf_test,
414 			      struct pci_epf_test_reg *reg)
415 {
416 	int ret = 0;
417 	void *src_buf, *buf;
418 	u32 crc32;
419 	struct pci_epc_map map;
420 	phys_addr_t dst_phys_addr;
421 	struct timespec64 start, end;
422 	struct pci_epf *epf = epf_test->epf;
423 	struct pci_epc *epc = epf->epc;
424 	struct device *dev = &epf->dev;
425 	struct device *dma_dev = epf->epc->dev.parent;
426 	u64 src_addr = reg->src_addr;
427 	size_t src_size = reg->size;
428 	ssize_t map_size = 0;
429 
430 	src_buf = kzalloc(src_size, GFP_KERNEL);
431 	if (!src_buf) {
432 		ret = -ENOMEM;
433 		goto set_status;
434 	}
435 	buf = src_buf;
436 
437 	while (src_size) {
438 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
439 					   src_addr, src_size, &map);
440 		if (ret) {
441 			dev_err(dev, "Failed to map address\n");
442 			reg->status = STATUS_SRC_ADDR_INVALID;
443 			goto free_buf;
444 		}
445 
446 		map_size = map.pci_size;
447 		if (reg->flags & FLAG_USE_DMA) {
448 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
449 						       DMA_FROM_DEVICE);
450 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
451 				dev_err(dev,
452 					"Failed to map destination buffer addr\n");
453 				ret = -ENOMEM;
454 				goto unmap;
455 			}
456 
457 			ktime_get_ts64(&start);
458 			ret = pci_epf_test_data_transfer(epf_test,
459 					dst_phys_addr, map.phys_addr,
460 					map_size, src_addr, DMA_DEV_TO_MEM);
461 			if (ret)
462 				dev_err(dev, "Data transfer failed\n");
463 			ktime_get_ts64(&end);
464 
465 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
466 					 DMA_FROM_DEVICE);
467 
468 			if (ret)
469 				goto unmap;
470 		} else {
471 			ktime_get_ts64(&start);
472 			memcpy_fromio(buf, map.virt_addr, map_size);
473 			ktime_get_ts64(&end);
474 		}
475 
476 		src_size -= map_size;
477 		src_addr += map_size;
478 		buf += map_size;
479 
480 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
481 		map_size = 0;
482 	}
483 
484 	pci_epf_test_print_rate(epf_test, "READ", reg->size, &start,
485 				&end, reg->flags & FLAG_USE_DMA);
486 
487 	crc32 = crc32_le(~0, src_buf, reg->size);
488 	if (crc32 != reg->checksum)
489 		ret = -EIO;
490 
491 unmap:
492 	if (map_size)
493 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
494 
495 free_buf:
496 	kfree(src_buf);
497 
498 set_status:
499 	if (!ret)
500 		reg->status |= STATUS_READ_SUCCESS;
501 	else
502 		reg->status |= STATUS_READ_FAIL;
503 }
504 
505 static void pci_epf_test_write(struct pci_epf_test *epf_test,
506 			       struct pci_epf_test_reg *reg)
507 {
508 	int ret = 0;
509 	void *dst_buf, *buf;
510 	struct pci_epc_map map;
511 	phys_addr_t src_phys_addr;
512 	struct timespec64 start, end;
513 	struct pci_epf *epf = epf_test->epf;
514 	struct pci_epc *epc = epf->epc;
515 	struct device *dev = &epf->dev;
516 	struct device *dma_dev = epf->epc->dev.parent;
517 	u64 dst_addr = reg->dst_addr;
518 	size_t dst_size = reg->size;
519 	ssize_t map_size = 0;
520 
521 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
522 	if (!dst_buf) {
523 		ret = -ENOMEM;
524 		goto set_status;
525 	}
526 	get_random_bytes(dst_buf, dst_size);
527 	reg->checksum = crc32_le(~0, dst_buf, dst_size);
528 	buf = dst_buf;
529 
530 	while (dst_size) {
531 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
532 					   dst_addr, dst_size, &map);
533 		if (ret) {
534 			dev_err(dev, "Failed to map address\n");
535 			reg->status = STATUS_DST_ADDR_INVALID;
536 			goto free_buf;
537 		}
538 
539 		map_size = map.pci_size;
540 		if (reg->flags & FLAG_USE_DMA) {
541 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
542 						       DMA_TO_DEVICE);
543 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
544 				dev_err(dev,
545 					"Failed to map source buffer addr\n");
546 				ret = -ENOMEM;
547 				goto unmap;
548 			}
549 
550 			ktime_get_ts64(&start);
551 
552 			ret = pci_epf_test_data_transfer(epf_test,
553 						map.phys_addr, src_phys_addr,
554 						map_size, dst_addr,
555 						DMA_MEM_TO_DEV);
556 			if (ret)
557 				dev_err(dev, "Data transfer failed\n");
558 			ktime_get_ts64(&end);
559 
560 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
561 					 DMA_TO_DEVICE);
562 
563 			if (ret)
564 				goto unmap;
565 		} else {
566 			ktime_get_ts64(&start);
567 			memcpy_toio(map.virt_addr, buf, map_size);
568 			ktime_get_ts64(&end);
569 		}
570 
571 		dst_size -= map_size;
572 		dst_addr += map_size;
573 		buf += map_size;
574 
575 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
576 		map_size = 0;
577 	}
578 
579 	pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start,
580 				&end, reg->flags & FLAG_USE_DMA);
581 
582 	/*
583 	 * wait 1ms inorder for the write to complete. Without this delay L3
584 	 * error in observed in the host system.
585 	 */
586 	usleep_range(1000, 2000);
587 
588 unmap:
589 	if (map_size)
590 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
591 
592 free_buf:
593 	kfree(dst_buf);
594 
595 set_status:
596 	if (!ret)
597 		reg->status |= STATUS_WRITE_SUCCESS;
598 	else
599 		reg->status |= STATUS_WRITE_FAIL;
600 }
601 
602 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
603 				   struct pci_epf_test_reg *reg)
604 {
605 	struct pci_epf *epf = epf_test->epf;
606 	struct device *dev = &epf->dev;
607 	struct pci_epc *epc = epf->epc;
608 	u32 status = reg->status | STATUS_IRQ_RAISED;
609 	int count;
610 
611 	/*
612 	 * Set the status before raising the IRQ to ensure that the host sees
613 	 * the updated value when it gets the IRQ.
614 	 */
615 	WRITE_ONCE(reg->status, status);
616 
617 	switch (reg->irq_type) {
618 	case IRQ_TYPE_INTX:
619 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
620 				  PCI_IRQ_INTX, 0);
621 		break;
622 	case IRQ_TYPE_MSI:
623 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
624 		if (reg->irq_number > count || count <= 0) {
625 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
626 				reg->irq_number, count);
627 			return;
628 		}
629 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
630 				  PCI_IRQ_MSI, reg->irq_number);
631 		break;
632 	case IRQ_TYPE_MSIX:
633 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
634 		if (reg->irq_number > count || count <= 0) {
635 			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
636 				reg->irq_number, count);
637 			return;
638 		}
639 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
640 				  PCI_IRQ_MSIX, reg->irq_number);
641 		break;
642 	default:
643 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
644 		break;
645 	}
646 }
647 
648 static void pci_epf_test_cmd_handler(struct work_struct *work)
649 {
650 	u32 command;
651 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
652 						     cmd_handler.work);
653 	struct pci_epf *epf = epf_test->epf;
654 	struct device *dev = &epf->dev;
655 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
656 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
657 
658 	command = READ_ONCE(reg->command);
659 	if (!command)
660 		goto reset_handler;
661 
662 	WRITE_ONCE(reg->command, 0);
663 	WRITE_ONCE(reg->status, 0);
664 
665 	if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
666 	    !epf_test->dma_supported) {
667 		dev_err(dev, "Cannot transfer data using DMA\n");
668 		goto reset_handler;
669 	}
670 
671 	if (reg->irq_type > IRQ_TYPE_MSIX) {
672 		dev_err(dev, "Failed to detect IRQ type\n");
673 		goto reset_handler;
674 	}
675 
676 	switch (command) {
677 	case COMMAND_RAISE_INTX_IRQ:
678 	case COMMAND_RAISE_MSI_IRQ:
679 	case COMMAND_RAISE_MSIX_IRQ:
680 		pci_epf_test_raise_irq(epf_test, reg);
681 		break;
682 	case COMMAND_WRITE:
683 		pci_epf_test_write(epf_test, reg);
684 		pci_epf_test_raise_irq(epf_test, reg);
685 		break;
686 	case COMMAND_READ:
687 		pci_epf_test_read(epf_test, reg);
688 		pci_epf_test_raise_irq(epf_test, reg);
689 		break;
690 	case COMMAND_COPY:
691 		pci_epf_test_copy(epf_test, reg);
692 		pci_epf_test_raise_irq(epf_test, reg);
693 		break;
694 	default:
695 		dev_err(dev, "Invalid command 0x%x\n", command);
696 		break;
697 	}
698 
699 reset_handler:
700 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
701 			   msecs_to_jiffies(1));
702 }
703 
704 static int pci_epf_test_set_bar(struct pci_epf *epf)
705 {
706 	int bar, ret;
707 	struct pci_epc *epc = epf->epc;
708 	struct device *dev = &epf->dev;
709 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
710 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
711 
712 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
713 		if (!epf_test->reg[bar])
714 			continue;
715 
716 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
717 				      &epf->bar[bar]);
718 		if (ret) {
719 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
720 					   PRIMARY_INTERFACE);
721 			dev_err(dev, "Failed to set BAR%d\n", bar);
722 			if (bar == test_reg_bar)
723 				return ret;
724 		}
725 	}
726 
727 	return 0;
728 }
729 
730 static void pci_epf_test_clear_bar(struct pci_epf *epf)
731 {
732 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
733 	struct pci_epc *epc = epf->epc;
734 	int bar;
735 
736 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
737 		if (!epf_test->reg[bar])
738 			continue;
739 
740 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
741 				  &epf->bar[bar]);
742 	}
743 }
744 
745 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
746 {
747 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
748 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
749 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
750 	struct pci_epc *epc = epf->epc;
751 	u32 caps = 0;
752 
753 	if (epc->ops->align_addr)
754 		caps |= CAP_UNALIGNED_ACCESS;
755 
756 	reg->caps = cpu_to_le32(caps);
757 }
758 
759 static int pci_epf_test_epc_init(struct pci_epf *epf)
760 {
761 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
762 	struct pci_epf_header *header = epf->header;
763 	const struct pci_epc_features *epc_features = epf_test->epc_features;
764 	struct pci_epc *epc = epf->epc;
765 	struct device *dev = &epf->dev;
766 	bool linkup_notifier = false;
767 	int ret;
768 
769 	epf_test->dma_supported = true;
770 
771 	ret = pci_epf_test_init_dma_chan(epf_test);
772 	if (ret)
773 		epf_test->dma_supported = false;
774 
775 	if (epf->vfunc_no <= 1) {
776 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
777 		if (ret) {
778 			dev_err(dev, "Configuration header write failed\n");
779 			return ret;
780 		}
781 	}
782 
783 	pci_epf_test_set_capabilities(epf);
784 
785 	ret = pci_epf_test_set_bar(epf);
786 	if (ret)
787 		return ret;
788 
789 	if (epc_features->msi_capable) {
790 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
791 				      epf->msi_interrupts);
792 		if (ret) {
793 			dev_err(dev, "MSI configuration failed\n");
794 			return ret;
795 		}
796 	}
797 
798 	if (epc_features->msix_capable) {
799 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
800 				       epf->msix_interrupts,
801 				       epf_test->test_reg_bar,
802 				       epf_test->msix_table_offset);
803 		if (ret) {
804 			dev_err(dev, "MSI-X configuration failed\n");
805 			return ret;
806 		}
807 	}
808 
809 	linkup_notifier = epc_features->linkup_notifier;
810 	if (!linkup_notifier)
811 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
812 
813 	return 0;
814 }
815 
816 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
817 {
818 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
819 
820 	cancel_delayed_work_sync(&epf_test->cmd_handler);
821 	pci_epf_test_clean_dma_chan(epf_test);
822 	pci_epf_test_clear_bar(epf);
823 }
824 
825 static int pci_epf_test_link_up(struct pci_epf *epf)
826 {
827 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
828 
829 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
830 			   msecs_to_jiffies(1));
831 
832 	return 0;
833 }
834 
835 static int pci_epf_test_link_down(struct pci_epf *epf)
836 {
837 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
838 
839 	cancel_delayed_work_sync(&epf_test->cmd_handler);
840 
841 	return 0;
842 }
843 
844 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
845 	.epc_init = pci_epf_test_epc_init,
846 	.epc_deinit = pci_epf_test_epc_deinit,
847 	.link_up = pci_epf_test_link_up,
848 	.link_down = pci_epf_test_link_down,
849 };
850 
851 static int pci_epf_test_alloc_space(struct pci_epf *epf)
852 {
853 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
854 	struct device *dev = &epf->dev;
855 	size_t msix_table_size = 0;
856 	size_t test_reg_bar_size;
857 	size_t pba_size = 0;
858 	void *base;
859 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
860 	enum pci_barno bar;
861 	const struct pci_epc_features *epc_features = epf_test->epc_features;
862 	size_t test_reg_size;
863 
864 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
865 
866 	if (epc_features->msix_capable) {
867 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
868 		epf_test->msix_table_offset = test_reg_bar_size;
869 		/* Align to QWORD or 8 Bytes */
870 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
871 	}
872 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
873 
874 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
875 				   epc_features, PRIMARY_INTERFACE);
876 	if (!base) {
877 		dev_err(dev, "Failed to allocated register space\n");
878 		return -ENOMEM;
879 	}
880 	epf_test->reg[test_reg_bar] = base;
881 
882 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
883 		bar = pci_epc_get_next_free_bar(epc_features, bar);
884 		if (bar == NO_BAR)
885 			break;
886 
887 		if (bar == test_reg_bar)
888 			continue;
889 
890 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
891 					   epc_features, PRIMARY_INTERFACE);
892 		if (!base)
893 			dev_err(dev, "Failed to allocate space for BAR%d\n",
894 				bar);
895 		epf_test->reg[bar] = base;
896 	}
897 
898 	return 0;
899 }
900 
901 static void pci_epf_test_free_space(struct pci_epf *epf)
902 {
903 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
904 	int bar;
905 
906 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
907 		if (!epf_test->reg[bar])
908 			continue;
909 
910 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
911 				   PRIMARY_INTERFACE);
912 	}
913 }
914 
915 static int pci_epf_test_bind(struct pci_epf *epf)
916 {
917 	int ret;
918 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
919 	const struct pci_epc_features *epc_features;
920 	enum pci_barno test_reg_bar = BAR_0;
921 	struct pci_epc *epc = epf->epc;
922 
923 	if (WARN_ON_ONCE(!epc))
924 		return -EINVAL;
925 
926 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
927 	if (!epc_features) {
928 		dev_err(&epf->dev, "epc_features not implemented\n");
929 		return -EOPNOTSUPP;
930 	}
931 
932 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
933 	if (test_reg_bar < 0)
934 		return -EINVAL;
935 
936 	epf_test->test_reg_bar = test_reg_bar;
937 	epf_test->epc_features = epc_features;
938 
939 	ret = pci_epf_test_alloc_space(epf);
940 	if (ret)
941 		return ret;
942 
943 	return 0;
944 }
945 
946 static void pci_epf_test_unbind(struct pci_epf *epf)
947 {
948 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
949 	struct pci_epc *epc = epf->epc;
950 
951 	cancel_delayed_work_sync(&epf_test->cmd_handler);
952 	if (epc->init_complete) {
953 		pci_epf_test_clean_dma_chan(epf_test);
954 		pci_epf_test_clear_bar(epf);
955 	}
956 	pci_epf_test_free_space(epf);
957 }
958 
959 static const struct pci_epf_device_id pci_epf_test_ids[] = {
960 	{
961 		.name = "pci_epf_test",
962 	},
963 	{},
964 };
965 
966 static int pci_epf_test_probe(struct pci_epf *epf,
967 			      const struct pci_epf_device_id *id)
968 {
969 	struct pci_epf_test *epf_test;
970 	struct device *dev = &epf->dev;
971 
972 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
973 	if (!epf_test)
974 		return -ENOMEM;
975 
976 	epf->header = &test_header;
977 	epf_test->epf = epf;
978 
979 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
980 
981 	epf->event_ops = &pci_epf_test_event_ops;
982 
983 	epf_set_drvdata(epf, epf_test);
984 	return 0;
985 }
986 
987 static const struct pci_epf_ops ops = {
988 	.unbind	= pci_epf_test_unbind,
989 	.bind	= pci_epf_test_bind,
990 };
991 
992 static struct pci_epf_driver test_driver = {
993 	.driver.name	= "pci_epf_test",
994 	.probe		= pci_epf_test_probe,
995 	.id_table	= pci_epf_test_ids,
996 	.ops		= &ops,
997 	.owner		= THIS_MODULE,
998 };
999 
1000 static int __init pci_epf_test_init(void)
1001 {
1002 	int ret;
1003 
1004 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1005 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1006 	if (!kpcitest_workqueue) {
1007 		pr_err("Failed to allocate the kpcitest work queue\n");
1008 		return -ENOMEM;
1009 	}
1010 
1011 	ret = pci_epf_register_driver(&test_driver);
1012 	if (ret) {
1013 		destroy_workqueue(kpcitest_workqueue);
1014 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1015 		return ret;
1016 	}
1017 
1018 	return 0;
1019 }
1020 module_init(pci_epf_test_init);
1021 
1022 static void __exit pci_epf_test_exit(void)
1023 {
1024 	if (kpcitest_workqueue)
1025 		destroy_workqueue(kpcitest_workqueue);
1026 	pci_epf_unregister_driver(&test_driver);
1027 }
1028 module_exit(pci_epf_test_exit);
1029 
1030 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1031 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1032 MODULE_LICENSE("GPL v2");
1033