xref: /linux/drivers/misc/pci_endpoint_test.c (revision 1b5f3c51fbb8042efb314484b47b2092cdd40bf6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 
25 #include <linux/pci_regs.h>
26 
27 #include <uapi/linux/pcitest.h>
28 
29 #define DRV_MODULE_NAME				"pci-endpoint-test"
30 
31 #define IRQ_TYPE_UNDEFINED			-1
32 #define IRQ_TYPE_INTX				0
33 #define IRQ_TYPE_MSI				1
34 #define IRQ_TYPE_MSIX				2
35 
36 #define PCI_ENDPOINT_TEST_MAGIC			0x0
37 
38 #define PCI_ENDPOINT_TEST_COMMAND		0x4
39 #define COMMAND_RAISE_INTX_IRQ			BIT(0)
40 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
41 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
42 #define COMMAND_READ				BIT(3)
43 #define COMMAND_WRITE				BIT(4)
44 #define COMMAND_COPY				BIT(5)
45 
46 #define PCI_ENDPOINT_TEST_STATUS		0x8
47 #define STATUS_READ_SUCCESS			BIT(0)
48 #define STATUS_READ_FAIL			BIT(1)
49 #define STATUS_WRITE_SUCCESS			BIT(2)
50 #define STATUS_WRITE_FAIL			BIT(3)
51 #define STATUS_COPY_SUCCESS			BIT(4)
52 #define STATUS_COPY_FAIL			BIT(5)
53 #define STATUS_IRQ_RAISED			BIT(6)
54 #define STATUS_SRC_ADDR_INVALID			BIT(7)
55 #define STATUS_DST_ADDR_INVALID			BIT(8)
56 
57 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
58 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
59 
60 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
61 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
62 
63 #define PCI_ENDPOINT_TEST_SIZE			0x1c
64 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
65 
66 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
67 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
68 
69 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
70 #define FLAG_USE_DMA				BIT(0)
71 
72 #define PCI_ENDPOINT_TEST_CAPS			0x30
73 #define CAP_UNALIGNED_ACCESS			BIT(0)
74 
75 #define PCI_DEVICE_ID_TI_AM654			0xb00c
76 #define PCI_DEVICE_ID_TI_J7200			0xb00f
77 #define PCI_DEVICE_ID_TI_AM64			0xb010
78 #define PCI_DEVICE_ID_TI_J721S2		0xb013
79 #define PCI_DEVICE_ID_LS1088A			0x80c0
80 #define PCI_DEVICE_ID_IMX8			0x0808
81 
82 #define is_am654_pci_dev(pdev)		\
83 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
84 
85 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
86 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
87 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
88 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
89 #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
90 
91 #define PCI_VENDOR_ID_ROCKCHIP			0x1d87
92 #define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
93 
94 static DEFINE_IDA(pci_endpoint_test_ida);
95 
96 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
97 					    miscdev)
98 
99 static bool no_msi;
100 module_param(no_msi, bool, 0444);
101 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
102 
103 static int irq_type = IRQ_TYPE_MSI;
104 module_param(irq_type, int, 0444);
105 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
106 
107 enum pci_barno {
108 	BAR_0,
109 	BAR_1,
110 	BAR_2,
111 	BAR_3,
112 	BAR_4,
113 	BAR_5,
114 };
115 
116 struct pci_endpoint_test {
117 	struct pci_dev	*pdev;
118 	void __iomem	*base;
119 	void __iomem	*bar[PCI_STD_NUM_BARS];
120 	struct completion irq_raised;
121 	int		last_irq;
122 	int		num_irqs;
123 	int		irq_type;
124 	/* mutex to protect the ioctls */
125 	struct mutex	mutex;
126 	struct miscdevice miscdev;
127 	enum pci_barno test_reg_bar;
128 	size_t alignment;
129 	const char *name;
130 };
131 
132 struct pci_endpoint_test_data {
133 	enum pci_barno test_reg_bar;
134 	size_t alignment;
135 	int irq_type;
136 };
137 
138 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
139 					  u32 offset)
140 {
141 	return readl(test->base + offset);
142 }
143 
144 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
145 					    u32 offset, u32 value)
146 {
147 	writel(value, test->base + offset);
148 }
149 
150 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
151 {
152 	struct pci_endpoint_test *test = dev_id;
153 	u32 reg;
154 
155 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
156 	if (reg & STATUS_IRQ_RAISED) {
157 		test->last_irq = irq;
158 		complete(&test->irq_raised);
159 	}
160 
161 	return IRQ_HANDLED;
162 }
163 
164 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
165 {
166 	struct pci_dev *pdev = test->pdev;
167 
168 	pci_free_irq_vectors(pdev);
169 	test->irq_type = IRQ_TYPE_UNDEFINED;
170 }
171 
172 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
173 						int type)
174 {
175 	int irq;
176 	struct pci_dev *pdev = test->pdev;
177 	struct device *dev = &pdev->dev;
178 
179 	switch (type) {
180 	case IRQ_TYPE_INTX:
181 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
182 		if (irq < 0) {
183 			dev_err(dev, "Failed to get Legacy interrupt\n");
184 			return irq;
185 		}
186 
187 		break;
188 	case IRQ_TYPE_MSI:
189 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
190 		if (irq < 0) {
191 			dev_err(dev, "Failed to get MSI interrupts\n");
192 			return irq;
193 		}
194 
195 		break;
196 	case IRQ_TYPE_MSIX:
197 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198 		if (irq < 0) {
199 			dev_err(dev, "Failed to get MSI-X interrupts\n");
200 			return irq;
201 		}
202 
203 		break;
204 	default:
205 		dev_err(dev, "Invalid IRQ type selected\n");
206 		return -EINVAL;
207 	}
208 
209 	test->irq_type = type;
210 	test->num_irqs = irq;
211 
212 	return 0;
213 }
214 
215 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
216 {
217 	int i;
218 	struct pci_dev *pdev = test->pdev;
219 	struct device *dev = &pdev->dev;
220 
221 	for (i = 0; i < test->num_irqs; i++)
222 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
223 
224 	test->num_irqs = 0;
225 }
226 
227 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
228 {
229 	int i;
230 	int ret;
231 	struct pci_dev *pdev = test->pdev;
232 	struct device *dev = &pdev->dev;
233 
234 	for (i = 0; i < test->num_irqs; i++) {
235 		ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
236 				       pci_endpoint_test_irqhandler,
237 				       IRQF_SHARED, test->name, test);
238 		if (ret)
239 			goto fail;
240 	}
241 
242 	return 0;
243 
244 fail:
245 	switch (irq_type) {
246 	case IRQ_TYPE_INTX:
247 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
248 			pci_irq_vector(pdev, i));
249 		break;
250 	case IRQ_TYPE_MSI:
251 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
252 			pci_irq_vector(pdev, i),
253 			i + 1);
254 		break;
255 	case IRQ_TYPE_MSIX:
256 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
257 			pci_irq_vector(pdev, i),
258 			i + 1);
259 		break;
260 	}
261 
262 	return ret;
263 }
264 
265 static const u32 bar_test_pattern[] = {
266 	0xA0A0A0A0,
267 	0xA1A1A1A1,
268 	0xA2A2A2A2,
269 	0xA3A3A3A3,
270 	0xA4A4A4A4,
271 	0xA5A5A5A5,
272 };
273 
274 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
275 					enum pci_barno barno, int offset,
276 					void *write_buf, void *read_buf,
277 					int size)
278 {
279 	memset(write_buf, bar_test_pattern[barno], size);
280 	memcpy_toio(test->bar[barno] + offset, write_buf, size);
281 
282 	memcpy_fromio(read_buf, test->bar[barno] + offset, size);
283 
284 	return memcmp(write_buf, read_buf, size);
285 }
286 
287 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
288 				  enum pci_barno barno)
289 {
290 	int j, bar_size, buf_size, iters;
291 	void *write_buf __free(kfree) = NULL;
292 	void *read_buf __free(kfree) = NULL;
293 	struct pci_dev *pdev = test->pdev;
294 
295 	if (!test->bar[barno])
296 		return -ENOMEM;
297 
298 	bar_size = pci_resource_len(pdev, barno);
299 
300 	if (barno == test->test_reg_bar)
301 		bar_size = 0x4;
302 
303 	/*
304 	 * Allocate a buffer of max size 1MB, and reuse that buffer while
305 	 * iterating over the whole BAR size (which might be much larger).
306 	 */
307 	buf_size = min(SZ_1M, bar_size);
308 
309 	write_buf = kmalloc(buf_size, GFP_KERNEL);
310 	if (!write_buf)
311 		return -ENOMEM;
312 
313 	read_buf = kmalloc(buf_size, GFP_KERNEL);
314 	if (!read_buf)
315 		return -ENOMEM;
316 
317 	iters = bar_size / buf_size;
318 	for (j = 0; j < iters; j++)
319 		if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
320 						 write_buf, read_buf, buf_size))
321 			return -EIO;
322 
323 	return 0;
324 }
325 
326 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
327 {
328 	u32 val;
329 
330 	/* Keep the BAR pattern in the top byte. */
331 	val = bar_test_pattern[barno] & 0xff000000;
332 	/* Store the (partial) offset in the remaining bytes. */
333 	val |= offset & 0x00ffffff;
334 
335 	return val;
336 }
337 
338 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
339 					     enum pci_barno barno)
340 {
341 	struct pci_dev *pdev = test->pdev;
342 	int j, size;
343 
344 	size = pci_resource_len(pdev, barno);
345 
346 	if (barno == test->test_reg_bar)
347 		size = 0x4;
348 
349 	for (j = 0; j < size; j += 4)
350 		writel_relaxed(bar_test_pattern_with_offset(barno, j),
351 			       test->bar[barno] + j);
352 }
353 
354 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
355 					    enum pci_barno barno)
356 {
357 	struct pci_dev *pdev = test->pdev;
358 	struct device *dev = &pdev->dev;
359 	int j, size;
360 	u32 val;
361 
362 	size = pci_resource_len(pdev, barno);
363 
364 	if (barno == test->test_reg_bar)
365 		size = 0x4;
366 
367 	for (j = 0; j < size; j += 4) {
368 		u32 expected = bar_test_pattern_with_offset(barno, j);
369 
370 		val = readl_relaxed(test->bar[barno] + j);
371 		if (val != expected) {
372 			dev_err(dev,
373 				"BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
374 				barno, j, val, expected);
375 			return -EIO;
376 		}
377 	}
378 
379 	return 0;
380 }
381 
382 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
383 {
384 	enum pci_barno bar;
385 	bool ret;
386 
387 	/* Write all BARs in order (without reading). */
388 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
389 		if (test->bar[bar])
390 			pci_endpoint_test_bars_write_bar(test, bar);
391 
392 	/*
393 	 * Read all BARs in order (without writing).
394 	 * If there is an address translation issue on the EP, writing one BAR
395 	 * might have overwritten another BAR. Ensure that this is not the case.
396 	 * (Reading back the BAR directly after writing can not detect this.)
397 	 */
398 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
399 		if (test->bar[bar]) {
400 			ret = pci_endpoint_test_bars_read_bar(test, bar);
401 			if (!ret)
402 				return ret;
403 		}
404 	}
405 
406 	return 0;
407 }
408 
409 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
410 {
411 	u32 val;
412 
413 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
414 				 IRQ_TYPE_INTX);
415 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
416 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
417 				 COMMAND_RAISE_INTX_IRQ);
418 	val = wait_for_completion_timeout(&test->irq_raised,
419 					  msecs_to_jiffies(1000));
420 	if (!val)
421 		return -ETIMEDOUT;
422 
423 	return 0;
424 }
425 
426 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
427 				       u16 msi_num, bool msix)
428 {
429 	struct pci_dev *pdev = test->pdev;
430 	u32 val;
431 	int ret;
432 
433 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
434 				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
435 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
436 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
437 				 msix ? COMMAND_RAISE_MSIX_IRQ :
438 				 COMMAND_RAISE_MSI_IRQ);
439 	val = wait_for_completion_timeout(&test->irq_raised,
440 					  msecs_to_jiffies(1000));
441 	if (!val)
442 		return -ETIMEDOUT;
443 
444 	ret = pci_irq_vector(pdev, msi_num - 1);
445 	if (ret < 0)
446 		return ret;
447 
448 	if (ret != test->last_irq)
449 		return -EIO;
450 
451 	return 0;
452 }
453 
454 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
455 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
456 {
457 	if (!param->size) {
458 		dev_dbg(dev, "Data size is zero\n");
459 		return -EINVAL;
460 	}
461 
462 	if (param->size > SIZE_MAX - alignment) {
463 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
464 		return -EINVAL;
465 	}
466 
467 	return 0;
468 }
469 
470 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
471 				   unsigned long arg)
472 {
473 	struct pci_endpoint_test_xfer_param param;
474 	void *src_addr;
475 	void *dst_addr;
476 	u32 flags = 0;
477 	bool use_dma;
478 	size_t size;
479 	dma_addr_t src_phys_addr;
480 	dma_addr_t dst_phys_addr;
481 	struct pci_dev *pdev = test->pdev;
482 	struct device *dev = &pdev->dev;
483 	void *orig_src_addr;
484 	dma_addr_t orig_src_phys_addr;
485 	void *orig_dst_addr;
486 	dma_addr_t orig_dst_phys_addr;
487 	size_t offset;
488 	size_t alignment = test->alignment;
489 	int irq_type = test->irq_type;
490 	u32 src_crc32;
491 	u32 dst_crc32;
492 	int ret;
493 
494 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
495 	if (ret) {
496 		dev_err(dev, "Failed to get transfer param\n");
497 		return -EFAULT;
498 	}
499 
500 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
501 	if (ret)
502 		return ret;
503 
504 	size = param.size;
505 
506 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
507 	if (use_dma)
508 		flags |= FLAG_USE_DMA;
509 
510 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
511 		dev_err(dev, "Invalid IRQ type option\n");
512 		return -EINVAL;
513 	}
514 
515 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
516 	if (!orig_src_addr) {
517 		dev_err(dev, "Failed to allocate source buffer\n");
518 		return -ENOMEM;
519 	}
520 
521 	get_random_bytes(orig_src_addr, size + alignment);
522 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
523 					    size + alignment, DMA_TO_DEVICE);
524 	ret = dma_mapping_error(dev, orig_src_phys_addr);
525 	if (ret) {
526 		dev_err(dev, "failed to map source buffer address\n");
527 		goto err_src_phys_addr;
528 	}
529 
530 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
531 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
532 		offset = src_phys_addr - orig_src_phys_addr;
533 		src_addr = orig_src_addr + offset;
534 	} else {
535 		src_phys_addr = orig_src_phys_addr;
536 		src_addr = orig_src_addr;
537 	}
538 
539 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
540 				 lower_32_bits(src_phys_addr));
541 
542 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
543 				 upper_32_bits(src_phys_addr));
544 
545 	src_crc32 = crc32_le(~0, src_addr, size);
546 
547 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
548 	if (!orig_dst_addr) {
549 		dev_err(dev, "Failed to allocate destination address\n");
550 		ret = -ENOMEM;
551 		goto err_dst_addr;
552 	}
553 
554 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
555 					    size + alignment, DMA_FROM_DEVICE);
556 	ret = dma_mapping_error(dev, orig_dst_phys_addr);
557 	if (ret) {
558 		dev_err(dev, "failed to map destination buffer address\n");
559 		goto err_dst_phys_addr;
560 	}
561 
562 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
563 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
564 		offset = dst_phys_addr - orig_dst_phys_addr;
565 		dst_addr = orig_dst_addr + offset;
566 	} else {
567 		dst_phys_addr = orig_dst_phys_addr;
568 		dst_addr = orig_dst_addr;
569 	}
570 
571 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
572 				 lower_32_bits(dst_phys_addr));
573 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
574 				 upper_32_bits(dst_phys_addr));
575 
576 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
577 				 size);
578 
579 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
580 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
581 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
582 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
583 				 COMMAND_COPY);
584 
585 	wait_for_completion(&test->irq_raised);
586 
587 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
588 			 DMA_FROM_DEVICE);
589 
590 	dst_crc32 = crc32_le(~0, dst_addr, size);
591 	if (dst_crc32 != src_crc32)
592 		ret = -EIO;
593 
594 err_dst_phys_addr:
595 	kfree(orig_dst_addr);
596 
597 err_dst_addr:
598 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
599 			 DMA_TO_DEVICE);
600 
601 err_src_phys_addr:
602 	kfree(orig_src_addr);
603 	return ret;
604 }
605 
606 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
607 				    unsigned long arg)
608 {
609 	struct pci_endpoint_test_xfer_param param;
610 	u32 flags = 0;
611 	bool use_dma;
612 	u32 reg;
613 	void *addr;
614 	dma_addr_t phys_addr;
615 	struct pci_dev *pdev = test->pdev;
616 	struct device *dev = &pdev->dev;
617 	void *orig_addr;
618 	dma_addr_t orig_phys_addr;
619 	size_t offset;
620 	size_t alignment = test->alignment;
621 	int irq_type = test->irq_type;
622 	size_t size;
623 	u32 crc32;
624 	int ret;
625 
626 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
627 	if (ret) {
628 		dev_err(dev, "Failed to get transfer param\n");
629 		return -EFAULT;
630 	}
631 
632 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
633 	if (ret)
634 		return ret;
635 
636 	size = param.size;
637 
638 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
639 	if (use_dma)
640 		flags |= FLAG_USE_DMA;
641 
642 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
643 		dev_err(dev, "Invalid IRQ type option\n");
644 		return -EINVAL;
645 	}
646 
647 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
648 	if (!orig_addr) {
649 		dev_err(dev, "Failed to allocate address\n");
650 		return -ENOMEM;
651 	}
652 
653 	get_random_bytes(orig_addr, size + alignment);
654 
655 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
656 					DMA_TO_DEVICE);
657 	ret = dma_mapping_error(dev, orig_phys_addr);
658 	if (ret) {
659 		dev_err(dev, "failed to map source buffer address\n");
660 		goto err_phys_addr;
661 	}
662 
663 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
664 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
665 		offset = phys_addr - orig_phys_addr;
666 		addr = orig_addr + offset;
667 	} else {
668 		phys_addr = orig_phys_addr;
669 		addr = orig_addr;
670 	}
671 
672 	crc32 = crc32_le(~0, addr, size);
673 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
674 				 crc32);
675 
676 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
677 				 lower_32_bits(phys_addr));
678 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
679 				 upper_32_bits(phys_addr));
680 
681 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
682 
683 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
684 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
685 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
686 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
687 				 COMMAND_READ);
688 
689 	wait_for_completion(&test->irq_raised);
690 
691 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
692 	if (!(reg & STATUS_READ_SUCCESS))
693 		ret = -EIO;
694 
695 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
696 			 DMA_TO_DEVICE);
697 
698 err_phys_addr:
699 	kfree(orig_addr);
700 	return ret;
701 }
702 
703 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
704 				   unsigned long arg)
705 {
706 	struct pci_endpoint_test_xfer_param param;
707 	u32 flags = 0;
708 	bool use_dma;
709 	size_t size;
710 	void *addr;
711 	dma_addr_t phys_addr;
712 	struct pci_dev *pdev = test->pdev;
713 	struct device *dev = &pdev->dev;
714 	void *orig_addr;
715 	dma_addr_t orig_phys_addr;
716 	size_t offset;
717 	size_t alignment = test->alignment;
718 	int irq_type = test->irq_type;
719 	u32 crc32;
720 	int ret;
721 
722 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
723 	if (ret) {
724 		dev_err(dev, "Failed to get transfer param\n");
725 		return -EFAULT;
726 	}
727 
728 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
729 	if (ret)
730 		return ret;
731 
732 	size = param.size;
733 
734 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
735 	if (use_dma)
736 		flags |= FLAG_USE_DMA;
737 
738 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
739 		dev_err(dev, "Invalid IRQ type option\n");
740 		return -EINVAL;
741 	}
742 
743 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
744 	if (!orig_addr) {
745 		dev_err(dev, "Failed to allocate destination address\n");
746 		return -ENOMEM;
747 	}
748 
749 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
750 					DMA_FROM_DEVICE);
751 	ret = dma_mapping_error(dev, orig_phys_addr);
752 	if (ret) {
753 		dev_err(dev, "failed to map source buffer address\n");
754 		goto err_phys_addr;
755 	}
756 
757 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
758 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
759 		offset = phys_addr - orig_phys_addr;
760 		addr = orig_addr + offset;
761 	} else {
762 		phys_addr = orig_phys_addr;
763 		addr = orig_addr;
764 	}
765 
766 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
767 				 lower_32_bits(phys_addr));
768 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
769 				 upper_32_bits(phys_addr));
770 
771 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
772 
773 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
774 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
775 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
776 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
777 				 COMMAND_WRITE);
778 
779 	wait_for_completion(&test->irq_raised);
780 
781 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
782 			 DMA_FROM_DEVICE);
783 
784 	crc32 = crc32_le(~0, addr, size);
785 	if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
786 		ret = -EIO;
787 
788 err_phys_addr:
789 	kfree(orig_addr);
790 	return ret;
791 }
792 
793 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
794 {
795 	pci_endpoint_test_release_irq(test);
796 	pci_endpoint_test_free_irq_vectors(test);
797 
798 	return 0;
799 }
800 
801 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
802 				      int req_irq_type)
803 {
804 	struct pci_dev *pdev = test->pdev;
805 	struct device *dev = &pdev->dev;
806 	int ret;
807 
808 	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
809 		dev_err(dev, "Invalid IRQ type option\n");
810 		return -EINVAL;
811 	}
812 
813 	if (test->irq_type == req_irq_type)
814 		return 0;
815 
816 	pci_endpoint_test_release_irq(test);
817 	pci_endpoint_test_free_irq_vectors(test);
818 
819 	ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
820 	if (ret)
821 		return ret;
822 
823 	ret = pci_endpoint_test_request_irq(test);
824 	if (ret) {
825 		pci_endpoint_test_free_irq_vectors(test);
826 		return ret;
827 	}
828 
829 	return 0;
830 }
831 
832 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
833 				    unsigned long arg)
834 {
835 	int ret = -EINVAL;
836 	enum pci_barno bar;
837 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
838 	struct pci_dev *pdev = test->pdev;
839 
840 	mutex_lock(&test->mutex);
841 
842 	reinit_completion(&test->irq_raised);
843 	test->last_irq = -ENODATA;
844 
845 	switch (cmd) {
846 	case PCITEST_BAR:
847 		bar = arg;
848 		if (bar > BAR_5)
849 			goto ret;
850 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
851 			goto ret;
852 		ret = pci_endpoint_test_bar(test, bar);
853 		break;
854 	case PCITEST_BARS:
855 		ret = pci_endpoint_test_bars(test);
856 		break;
857 	case PCITEST_INTX_IRQ:
858 		ret = pci_endpoint_test_intx_irq(test);
859 		break;
860 	case PCITEST_MSI:
861 	case PCITEST_MSIX:
862 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
863 		break;
864 	case PCITEST_WRITE:
865 		ret = pci_endpoint_test_write(test, arg);
866 		break;
867 	case PCITEST_READ:
868 		ret = pci_endpoint_test_read(test, arg);
869 		break;
870 	case PCITEST_COPY:
871 		ret = pci_endpoint_test_copy(test, arg);
872 		break;
873 	case PCITEST_SET_IRQTYPE:
874 		ret = pci_endpoint_test_set_irq(test, arg);
875 		break;
876 	case PCITEST_GET_IRQTYPE:
877 		ret = irq_type;
878 		break;
879 	case PCITEST_CLEAR_IRQ:
880 		ret = pci_endpoint_test_clear_irq(test);
881 		break;
882 	}
883 
884 ret:
885 	mutex_unlock(&test->mutex);
886 	return ret;
887 }
888 
889 static const struct file_operations pci_endpoint_test_fops = {
890 	.owner = THIS_MODULE,
891 	.unlocked_ioctl = pci_endpoint_test_ioctl,
892 };
893 
894 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
895 {
896 	struct pci_dev *pdev = test->pdev;
897 	struct device *dev = &pdev->dev;
898 	u32 caps;
899 
900 	caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
901 	dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
902 
903 	/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
904 	if (caps & CAP_UNALIGNED_ACCESS)
905 		test->alignment = 0;
906 }
907 
908 static int pci_endpoint_test_probe(struct pci_dev *pdev,
909 				   const struct pci_device_id *ent)
910 {
911 	int ret;
912 	int id;
913 	char name[24];
914 	enum pci_barno bar;
915 	void __iomem *base;
916 	struct device *dev = &pdev->dev;
917 	struct pci_endpoint_test *test;
918 	struct pci_endpoint_test_data *data;
919 	enum pci_barno test_reg_bar = BAR_0;
920 	struct miscdevice *misc_device;
921 
922 	if (pci_is_bridge(pdev))
923 		return -ENODEV;
924 
925 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
926 	if (!test)
927 		return -ENOMEM;
928 
929 	test->test_reg_bar = 0;
930 	test->alignment = 0;
931 	test->pdev = pdev;
932 	test->irq_type = IRQ_TYPE_UNDEFINED;
933 
934 	if (no_msi)
935 		irq_type = IRQ_TYPE_INTX;
936 
937 	data = (struct pci_endpoint_test_data *)ent->driver_data;
938 	if (data) {
939 		test_reg_bar = data->test_reg_bar;
940 		test->test_reg_bar = test_reg_bar;
941 		test->alignment = data->alignment;
942 		irq_type = data->irq_type;
943 	}
944 
945 	init_completion(&test->irq_raised);
946 	mutex_init(&test->mutex);
947 
948 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
949 
950 	ret = pci_enable_device(pdev);
951 	if (ret) {
952 		dev_err(dev, "Cannot enable PCI device\n");
953 		return ret;
954 	}
955 
956 	ret = pci_request_regions(pdev, DRV_MODULE_NAME);
957 	if (ret) {
958 		dev_err(dev, "Cannot obtain PCI resources\n");
959 		goto err_disable_pdev;
960 	}
961 
962 	pci_set_master(pdev);
963 
964 	ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
965 	if (ret)
966 		goto err_disable_irq;
967 
968 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
969 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
970 			base = pci_ioremap_bar(pdev, bar);
971 			if (!base) {
972 				dev_err(dev, "Failed to read BAR%d\n", bar);
973 				WARN_ON(bar == test_reg_bar);
974 			}
975 			test->bar[bar] = base;
976 		}
977 	}
978 
979 	test->base = test->bar[test_reg_bar];
980 	if (!test->base) {
981 		ret = -ENOMEM;
982 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
983 			test_reg_bar);
984 		goto err_iounmap;
985 	}
986 
987 	pci_set_drvdata(pdev, test);
988 
989 	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
990 	if (id < 0) {
991 		ret = id;
992 		dev_err(dev, "Unable to get id\n");
993 		goto err_iounmap;
994 	}
995 
996 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
997 	test->name = kstrdup(name, GFP_KERNEL);
998 	if (!test->name) {
999 		ret = -ENOMEM;
1000 		goto err_ida_remove;
1001 	}
1002 
1003 	ret = pci_endpoint_test_request_irq(test);
1004 	if (ret)
1005 		goto err_kfree_test_name;
1006 
1007 	pci_endpoint_test_get_capabilities(test);
1008 
1009 	misc_device = &test->miscdev;
1010 	misc_device->minor = MISC_DYNAMIC_MINOR;
1011 	misc_device->name = kstrdup(name, GFP_KERNEL);
1012 	if (!misc_device->name) {
1013 		ret = -ENOMEM;
1014 		goto err_release_irq;
1015 	}
1016 	misc_device->parent = &pdev->dev;
1017 	misc_device->fops = &pci_endpoint_test_fops;
1018 
1019 	ret = misc_register(misc_device);
1020 	if (ret) {
1021 		dev_err(dev, "Failed to register device\n");
1022 		goto err_kfree_name;
1023 	}
1024 
1025 	return 0;
1026 
1027 err_kfree_name:
1028 	kfree(misc_device->name);
1029 
1030 err_release_irq:
1031 	pci_endpoint_test_release_irq(test);
1032 
1033 err_kfree_test_name:
1034 	kfree(test->name);
1035 
1036 err_ida_remove:
1037 	ida_free(&pci_endpoint_test_ida, id);
1038 
1039 err_iounmap:
1040 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1041 		if (test->bar[bar])
1042 			pci_iounmap(pdev, test->bar[bar]);
1043 	}
1044 
1045 err_disable_irq:
1046 	pci_endpoint_test_free_irq_vectors(test);
1047 	pci_release_regions(pdev);
1048 
1049 err_disable_pdev:
1050 	pci_disable_device(pdev);
1051 
1052 	return ret;
1053 }
1054 
1055 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1056 {
1057 	int id;
1058 	enum pci_barno bar;
1059 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1060 	struct miscdevice *misc_device = &test->miscdev;
1061 
1062 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1063 		return;
1064 	if (id < 0)
1065 		return;
1066 
1067 	pci_endpoint_test_release_irq(test);
1068 	pci_endpoint_test_free_irq_vectors(test);
1069 
1070 	misc_deregister(&test->miscdev);
1071 	kfree(misc_device->name);
1072 	kfree(test->name);
1073 	ida_free(&pci_endpoint_test_ida, id);
1074 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1075 		if (test->bar[bar])
1076 			pci_iounmap(pdev, test->bar[bar]);
1077 	}
1078 
1079 	pci_release_regions(pdev);
1080 	pci_disable_device(pdev);
1081 }
1082 
1083 static const struct pci_endpoint_test_data default_data = {
1084 	.test_reg_bar = BAR_0,
1085 	.alignment = SZ_4K,
1086 	.irq_type = IRQ_TYPE_MSI,
1087 };
1088 
1089 static const struct pci_endpoint_test_data am654_data = {
1090 	.test_reg_bar = BAR_2,
1091 	.alignment = SZ_64K,
1092 	.irq_type = IRQ_TYPE_MSI,
1093 };
1094 
1095 static const struct pci_endpoint_test_data j721e_data = {
1096 	.alignment = 256,
1097 	.irq_type = IRQ_TYPE_MSI,
1098 };
1099 
1100 static const struct pci_endpoint_test_data rk3588_data = {
1101 	.alignment = SZ_64K,
1102 	.irq_type = IRQ_TYPE_MSI,
1103 };
1104 
1105 /*
1106  * If the controller's Vendor/Device ID are programmable, you may be able to
1107  * use one of the existing entries for testing instead of adding a new one.
1108  */
1109 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1110 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1111 	  .driver_data = (kernel_ulong_t)&default_data,
1112 	},
1113 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1114 	  .driver_data = (kernel_ulong_t)&default_data,
1115 	},
1116 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1117 	  .driver_data = (kernel_ulong_t)&default_data,
1118 	},
1119 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1120 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1121 	  .driver_data = (kernel_ulong_t)&default_data,
1122 	},
1123 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1124 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1125 	  .driver_data = (kernel_ulong_t)&am654_data
1126 	},
1127 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1128 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1129 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1130 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1131 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1132 	  .driver_data = (kernel_ulong_t)&default_data,
1133 	},
1134 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1135 	  .driver_data = (kernel_ulong_t)&j721e_data,
1136 	},
1137 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1138 	  .driver_data = (kernel_ulong_t)&j721e_data,
1139 	},
1140 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1141 	  .driver_data = (kernel_ulong_t)&j721e_data,
1142 	},
1143 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1144 	  .driver_data = (kernel_ulong_t)&j721e_data,
1145 	},
1146 	{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1147 	  .driver_data = (kernel_ulong_t)&rk3588_data,
1148 	},
1149 	{ }
1150 };
1151 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1152 
1153 static struct pci_driver pci_endpoint_test_driver = {
1154 	.name		= DRV_MODULE_NAME,
1155 	.id_table	= pci_endpoint_test_tbl,
1156 	.probe		= pci_endpoint_test_probe,
1157 	.remove		= pci_endpoint_test_remove,
1158 	.sriov_configure = pci_sriov_configure_simple,
1159 };
1160 module_pci_driver(pci_endpoint_test_driver);
1161 
1162 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1163 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1164 MODULE_LICENSE("GPL v2");
1165