xref: /linux/drivers/misc/pci_endpoint_test.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 
25 #include <linux/pci_regs.h>
26 
27 #include <uapi/linux/pcitest.h>
28 
29 #define DRV_MODULE_NAME				"pci-endpoint-test"
30 
31 #define PCI_ENDPOINT_TEST_MAGIC			0x0
32 
33 #define PCI_ENDPOINT_TEST_COMMAND		0x4
34 #define COMMAND_RAISE_INTX_IRQ			BIT(0)
35 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
36 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
37 #define COMMAND_READ				BIT(3)
38 #define COMMAND_WRITE				BIT(4)
39 #define COMMAND_COPY				BIT(5)
40 #define COMMAND_ENABLE_DOORBELL			BIT(6)
41 #define COMMAND_DISABLE_DOORBELL		BIT(7)
42 #define COMMAND_BAR_SUBRANGE_SETUP		BIT(8)
43 #define COMMAND_BAR_SUBRANGE_CLEAR		BIT(9)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 #define STATUS_DOORBELL_SUCCESS			BIT(9)
56 #define STATUS_DOORBELL_ENABLE_SUCCESS		BIT(10)
57 #define STATUS_DOORBELL_ENABLE_FAIL		BIT(11)
58 #define STATUS_DOORBELL_DISABLE_SUCCESS		BIT(12)
59 #define STATUS_DOORBELL_DISABLE_FAIL		BIT(13)
60 #define STATUS_BAR_SUBRANGE_SETUP_SUCCESS	BIT(14)
61 #define STATUS_BAR_SUBRANGE_SETUP_FAIL		BIT(15)
62 #define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS	BIT(16)
63 #define STATUS_BAR_SUBRANGE_CLEAR_FAIL		BIT(17)
64 #define STATUS_NO_RESOURCE			BIT(18)
65 
66 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
67 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
68 
69 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
70 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
71 
72 #define PCI_ENDPOINT_TEST_SIZE			0x1c
73 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
74 
75 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
76 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
77 
78 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
79 
80 #define FLAG_USE_DMA				BIT(0)
81 
82 #define PCI_ENDPOINT_TEST_CAPS			0x30
83 #define CAP_UNALIGNED_ACCESS			BIT(0)
84 #define CAP_MSI					BIT(1)
85 #define CAP_MSIX				BIT(2)
86 #define CAP_INTX				BIT(3)
87 #define CAP_SUBRANGE_MAPPING			BIT(4)
88 #define CAP_DYNAMIC_INBOUND_MAPPING		BIT(5)
89 #define CAP_BAR0_RESERVED			BIT(6)
90 #define CAP_BAR1_RESERVED			BIT(7)
91 #define CAP_BAR2_RESERVED			BIT(8)
92 #define CAP_BAR3_RESERVED			BIT(9)
93 #define CAP_BAR4_RESERVED			BIT(10)
94 #define CAP_BAR5_RESERVED			BIT(11)
95 
96 #define PCI_ENDPOINT_TEST_DB_BAR		0x34
97 #define PCI_ENDPOINT_TEST_DB_OFFSET		0x38
98 #define PCI_ENDPOINT_TEST_DB_DATA		0x3c
99 
100 #define PCI_DEVICE_ID_TI_AM654			0xb00c
101 #define PCI_DEVICE_ID_TI_J7200			0xb00f
102 #define PCI_DEVICE_ID_TI_AM64			0xb010
103 #define PCI_DEVICE_ID_TI_J721S2		0xb013
104 #define PCI_DEVICE_ID_LS1088A			0x80c0
105 #define PCI_DEVICE_ID_IMX8			0x0808
106 
107 #define is_am654_pci_dev(pdev)		\
108 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
109 
110 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
111 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
112 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
113 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
114 #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
115 
116 #define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
117 
118 #define PCI_DEVICE_ID_NVIDIA_TEGRA194_EP	0x1ad4
119 #define PCI_DEVICE_ID_NVIDIA_TEGRA234_EP	0x229b
120 
121 #define PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB	2
122 
123 static DEFINE_IDA(pci_endpoint_test_ida);
124 
125 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
126 					    miscdev)
127 
128 enum pci_barno {
129 	BAR_0,
130 	BAR_1,
131 	BAR_2,
132 	BAR_3,
133 	BAR_4,
134 	BAR_5,
135 	NO_BAR = -1,
136 };
137 
138 struct pci_endpoint_test {
139 	struct pci_dev	*pdev;
140 	void __iomem	*base;
141 	void __iomem	*bar[PCI_STD_NUM_BARS];
142 	struct completion irq_raised;
143 	int		last_irq;
144 	int		num_irqs;
145 	int		irq_type;
146 	/* mutex to protect the ioctls */
147 	struct mutex	mutex;
148 	struct miscdevice miscdev;
149 	enum pci_barno test_reg_bar;
150 	size_t alignment;
151 	u32 ep_caps;
152 	const char *name;
153 };
154 
155 struct pci_endpoint_test_data {
156 	enum pci_barno test_reg_bar;
157 	size_t alignment;
158 };
159 
160 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
161 					  u32 offset)
162 {
163 	return readl(test->base + offset);
164 }
165 
166 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
167 					    u32 offset, u32 value)
168 {
169 	writel(value, test->base + offset);
170 }
171 
172 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
173 {
174 	struct pci_endpoint_test *test = dev_id;
175 	u32 reg;
176 
177 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
178 	if (reg & STATUS_IRQ_RAISED) {
179 		test->last_irq = irq;
180 		complete(&test->irq_raised);
181 	}
182 
183 	return IRQ_HANDLED;
184 }
185 
186 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
187 {
188 	struct pci_dev *pdev = test->pdev;
189 
190 	pci_free_irq_vectors(pdev);
191 	test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
192 }
193 
194 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
195 						int type)
196 {
197 	int irq;
198 	struct pci_dev *pdev = test->pdev;
199 	struct device *dev = &pdev->dev;
200 
201 	switch (type) {
202 	case PCITEST_IRQ_TYPE_INTX:
203 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
204 		if (irq < 0) {
205 			dev_err(dev, "Failed to get Legacy interrupt\n");
206 			return irq;
207 		}
208 
209 		break;
210 	case PCITEST_IRQ_TYPE_MSI:
211 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
212 		if (irq < 0) {
213 			dev_err(dev, "Failed to get MSI interrupts\n");
214 			return irq;
215 		}
216 
217 		break;
218 	case PCITEST_IRQ_TYPE_MSIX:
219 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
220 		if (irq < 0) {
221 			dev_err(dev, "Failed to get MSI-X interrupts\n");
222 			return irq;
223 		}
224 
225 		break;
226 	default:
227 		dev_err(dev, "Invalid IRQ type selected\n");
228 		return -EINVAL;
229 	}
230 
231 	test->irq_type = type;
232 	test->num_irqs = irq;
233 
234 	return 0;
235 }
236 
237 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
238 {
239 	int i;
240 	struct pci_dev *pdev = test->pdev;
241 
242 	for (i = 0; i < test->num_irqs; i++)
243 		free_irq(pci_irq_vector(pdev, i), test);
244 
245 	test->num_irqs = 0;
246 }
247 
248 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
249 {
250 	int i;
251 	int ret;
252 	struct pci_dev *pdev = test->pdev;
253 	struct device *dev = &pdev->dev;
254 
255 	for (i = 0; i < test->num_irqs; i++) {
256 		ret = request_irq(pci_irq_vector(pdev, i),
257 				  pci_endpoint_test_irqhandler, IRQF_SHARED,
258 				  test->name, test);
259 		if (ret)
260 			goto fail;
261 	}
262 
263 	return 0;
264 
265 fail:
266 	switch (test->irq_type) {
267 	case PCITEST_IRQ_TYPE_INTX:
268 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
269 			pci_irq_vector(pdev, i));
270 		break;
271 	case PCITEST_IRQ_TYPE_MSI:
272 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
273 			pci_irq_vector(pdev, i),
274 			i + 1);
275 		break;
276 	case PCITEST_IRQ_TYPE_MSIX:
277 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
278 			pci_irq_vector(pdev, i),
279 			i + 1);
280 		break;
281 	}
282 
283 	test->num_irqs = i;
284 	pci_endpoint_test_release_irq(test);
285 
286 	return ret;
287 }
288 
289 static bool bar_is_reserved(struct pci_endpoint_test *test, enum pci_barno bar)
290 {
291 	return test->ep_caps & BIT(bar + __fls(CAP_BAR0_RESERVED));
292 }
293 
294 static const u32 bar_test_pattern[] = {
295 	0xA0A0A0A0,
296 	0xA1A1A1A1,
297 	0xA2A2A2A2,
298 	0xA3A3A3A3,
299 	0xA4A4A4A4,
300 	0xA5A5A5A5,
301 };
302 
303 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
304 					enum pci_barno barno,
305 					resource_size_t offset, void *write_buf,
306 					void *read_buf, int size)
307 {
308 	memset(write_buf, bar_test_pattern[barno], size);
309 	memcpy_toio(test->bar[barno] + offset, write_buf, size);
310 
311 	memcpy_fromio(read_buf, test->bar[barno] + offset, size);
312 
313 	return memcmp(write_buf, read_buf, size);
314 }
315 
316 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
317 				  enum pci_barno barno)
318 {
319 	resource_size_t bar_size, offset = 0;
320 	void *write_buf __free(kfree) = NULL;
321 	void *read_buf __free(kfree) = NULL;
322 	struct pci_dev *pdev = test->pdev;
323 	int buf_size;
324 
325 	bar_size = pci_resource_len(pdev, barno);
326 	if (!bar_size)
327 		return -ENODATA;
328 
329 	if (!test->bar[barno])
330 		return -ENOMEM;
331 
332 	if (barno == test->test_reg_bar)
333 		bar_size = 0x4;
334 
335 	/*
336 	 * Allocate a buffer of max size 1MB, and reuse that buffer while
337 	 * iterating over the whole BAR size (which might be much larger).
338 	 */
339 	buf_size = min(SZ_1M, bar_size);
340 
341 	write_buf = kmalloc(buf_size, GFP_KERNEL);
342 	if (!write_buf)
343 		return -ENOMEM;
344 
345 	read_buf = kmalloc(buf_size, GFP_KERNEL);
346 	if (!read_buf)
347 		return -ENOMEM;
348 
349 	while (offset < bar_size) {
350 		if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
351 						 read_buf, buf_size))
352 			return -EIO;
353 		offset += buf_size;
354 	}
355 
356 	return 0;
357 }
358 
359 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
360 {
361 	u32 val;
362 
363 	/* Keep the BAR pattern in the top byte. */
364 	val = bar_test_pattern[barno] & 0xff000000;
365 	/* Store the (partial) offset in the remaining bytes. */
366 	val |= offset & 0x00ffffff;
367 
368 	return val;
369 }
370 
371 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
372 					     enum pci_barno barno)
373 {
374 	struct pci_dev *pdev = test->pdev;
375 	int j, size;
376 
377 	size = pci_resource_len(pdev, barno);
378 
379 	if (barno == test->test_reg_bar)
380 		size = 0x4;
381 
382 	for (j = 0; j < size; j += 4)
383 		writel_relaxed(bar_test_pattern_with_offset(barno, j),
384 			       test->bar[barno] + j);
385 }
386 
387 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
388 					    enum pci_barno barno)
389 {
390 	struct pci_dev *pdev = test->pdev;
391 	struct device *dev = &pdev->dev;
392 	int j, size;
393 	u32 val;
394 
395 	size = pci_resource_len(pdev, barno);
396 
397 	if (barno == test->test_reg_bar)
398 		size = 0x4;
399 
400 	for (j = 0; j < size; j += 4) {
401 		u32 expected = bar_test_pattern_with_offset(barno, j);
402 
403 		val = readl_relaxed(test->bar[barno] + j);
404 		if (val != expected) {
405 			dev_err(dev,
406 				"BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
407 				barno, j, val, expected);
408 			return -EIO;
409 		}
410 	}
411 
412 	return 0;
413 }
414 
415 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
416 {
417 	enum pci_barno bar;
418 	int ret;
419 
420 	/* Write all BARs in order (without reading). */
421 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
422 		if (test->bar[bar] && !bar_is_reserved(test, bar))
423 			pci_endpoint_test_bars_write_bar(test, bar);
424 
425 	/*
426 	 * Read all BARs in order (without writing).
427 	 * If there is an address translation issue on the EP, writing one BAR
428 	 * might have overwritten another BAR. Ensure that this is not the case.
429 	 * (Reading back the BAR directly after writing can not detect this.)
430 	 */
431 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
432 		if (test->bar[bar] && !bar_is_reserved(test, bar)) {
433 			ret = pci_endpoint_test_bars_read_bar(test, bar);
434 			if (ret)
435 				return ret;
436 		}
437 	}
438 
439 	return 0;
440 }
441 
442 static u8 pci_endpoint_test_subrange_sig_byte(enum pci_barno barno,
443 					      unsigned int subno)
444 {
445 	return 0x50 + (barno * 8) + subno;
446 }
447 
448 static u8 pci_endpoint_test_subrange_test_byte(enum pci_barno barno,
449 					       unsigned int subno)
450 {
451 	return 0xa0 + (barno * 8) + subno;
452 }
453 
454 static int pci_endpoint_test_bar_subrange_cmd(struct pci_endpoint_test *test,
455 					      enum pci_barno barno, u32 command,
456 					      u32 ok_bit, u32 fail_bit)
457 {
458 	struct pci_dev *pdev = test->pdev;
459 	struct device *dev = &pdev->dev;
460 	int irq_type = test->irq_type;
461 	u32 status;
462 
463 	if (irq_type < PCITEST_IRQ_TYPE_INTX ||
464 	    irq_type > PCITEST_IRQ_TYPE_MSIX) {
465 		dev_err(dev, "Invalid IRQ type\n");
466 		return -EINVAL;
467 	}
468 
469 	reinit_completion(&test->irq_raised);
470 
471 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
472 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
473 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
474 	/* Reuse SIZE as a command parameter: bar number. */
475 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, barno);
476 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, command);
477 
478 	if (!wait_for_completion_timeout(&test->irq_raised,
479 					 msecs_to_jiffies(1000)))
480 		return -ETIMEDOUT;
481 
482 	status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
483 	if (status & fail_bit)
484 		return (status & STATUS_NO_RESOURCE) ? -ENOSPC : -EIO;
485 
486 	if (!(status & ok_bit))
487 		return -EIO;
488 
489 	return 0;
490 }
491 
492 static int pci_endpoint_test_bar_subrange_setup(struct pci_endpoint_test *test,
493 						enum pci_barno barno)
494 {
495 	return pci_endpoint_test_bar_subrange_cmd(test, barno,
496 			COMMAND_BAR_SUBRANGE_SETUP,
497 			STATUS_BAR_SUBRANGE_SETUP_SUCCESS,
498 			STATUS_BAR_SUBRANGE_SETUP_FAIL);
499 }
500 
501 static int pci_endpoint_test_bar_subrange_clear(struct pci_endpoint_test *test,
502 						enum pci_barno barno)
503 {
504 	return pci_endpoint_test_bar_subrange_cmd(test, barno,
505 			COMMAND_BAR_SUBRANGE_CLEAR,
506 			STATUS_BAR_SUBRANGE_CLEAR_SUCCESS,
507 			STATUS_BAR_SUBRANGE_CLEAR_FAIL);
508 }
509 
510 static int pci_endpoint_test_bar_subrange(struct pci_endpoint_test *test,
511 					  enum pci_barno barno)
512 {
513 	u32 nsub = PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB;
514 	struct device *dev = &test->pdev->dev;
515 	size_t sub_size, buf_size;
516 	resource_size_t bar_size;
517 	void __iomem *bar_addr;
518 	void *read_buf = NULL;
519 	int ret, clear_ret;
520 	size_t off, chunk;
521 	u32 i, exp, val;
522 	u8 pattern;
523 
524 	if (!(test->ep_caps & CAP_SUBRANGE_MAPPING))
525 		return -EOPNOTSUPP;
526 
527 	/*
528 	 * The test register BAR is not safe to reprogram and write/read
529 	 * over its full size. BAR_TEST already special-cases it to a tiny
530 	 * range. For subrange mapping tests, let's simply skip it.
531 	 */
532 	if (barno == test->test_reg_bar)
533 		return -EBUSY;
534 
535 	bar_size = pci_resource_len(test->pdev, barno);
536 	if (!bar_size)
537 		return -ENODATA;
538 
539 	bar_addr = test->bar[barno];
540 	if (!bar_addr)
541 		return -ENOMEM;
542 
543 	ret = pci_endpoint_test_bar_subrange_setup(test, barno);
544 	if (ret)
545 		return ret;
546 
547 	if (bar_size % nsub || bar_size / nsub > SIZE_MAX) {
548 		ret = -EINVAL;
549 		goto out_clear;
550 	}
551 
552 	sub_size = bar_size / nsub;
553 	if (sub_size < sizeof(u32)) {
554 		ret = -EINVAL;
555 		goto out_clear;
556 	}
557 
558 	/* Limit the temporary buffer size */
559 	buf_size = min_t(size_t, sub_size, SZ_1M);
560 
561 	read_buf = kmalloc(buf_size, GFP_KERNEL);
562 	if (!read_buf) {
563 		ret = -ENOMEM;
564 		goto out_clear;
565 	}
566 
567 	/*
568 	 * Step 1: verify EP-provided signature per subrange. This detects
569 	 * whether the EP actually applied the submap order.
570 	 */
571 	for (i = 0; i < nsub; i++) {
572 		exp = (u32)pci_endpoint_test_subrange_sig_byte(barno, i) *
573 			0x01010101U;
574 		val = ioread32(bar_addr + (i * sub_size));
575 		if (val != exp) {
576 			dev_err(dev,
577 				"BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n",
578 				barno, i, (size_t)i * sub_size, exp, val);
579 			ret = -EIO;
580 			goto out_clear;
581 		}
582 		val = ioread32(bar_addr + (i * sub_size) + sub_size - sizeof(u32));
583 		if (val != exp) {
584 			dev_err(dev,
585 				"BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n",
586 				barno, i,
587 				((size_t)i * sub_size) + sub_size - sizeof(u32),
588 				exp, val);
589 			ret = -EIO;
590 			goto out_clear;
591 		}
592 	}
593 
594 	/* Step 2: write unique pattern per subrange (write all first). */
595 	for (i = 0; i < nsub; i++) {
596 		pattern = pci_endpoint_test_subrange_test_byte(barno, i);
597 		memset_io(bar_addr + (i * sub_size), pattern, sub_size);
598 	}
599 
600 	/* Step 3: read back and verify (read all after all writes). */
601 	for (i = 0; i < nsub; i++) {
602 		pattern = pci_endpoint_test_subrange_test_byte(barno, i);
603 		for (off = 0; off < sub_size; off += chunk) {
604 			void *bad;
605 
606 			chunk = min_t(size_t, buf_size, sub_size - off);
607 			memcpy_fromio(read_buf, bar_addr + (i * sub_size) + off,
608 				      chunk);
609 			bad = memchr_inv(read_buf, pattern, chunk);
610 			if (bad) {
611 				size_t bad_off = (u8 *)bad - (u8 *)read_buf;
612 
613 				dev_err(dev,
614 					"BAR%d subrange%u data mismatch @%#zx (pattern %#02x)\n",
615 					barno, i, (size_t)i * sub_size + off + bad_off,
616 					pattern);
617 				ret = -EIO;
618 				goto out_clear;
619 			}
620 		}
621 	}
622 
623 out_clear:
624 	kfree(read_buf);
625 	clear_ret = pci_endpoint_test_bar_subrange_clear(test, barno);
626 	return ret ?: clear_ret;
627 }
628 
629 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
630 {
631 	u32 val;
632 
633 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
634 				 PCITEST_IRQ_TYPE_INTX);
635 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
636 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
637 				 COMMAND_RAISE_INTX_IRQ);
638 	val = wait_for_completion_timeout(&test->irq_raised,
639 					  msecs_to_jiffies(1000));
640 	if (!val)
641 		return -ETIMEDOUT;
642 
643 	return 0;
644 }
645 
646 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
647 				       u16 msi_num, bool msix)
648 {
649 	struct pci_dev *pdev = test->pdev;
650 	u32 val;
651 	int irq;
652 
653 	irq = pci_irq_vector(pdev, msi_num - 1);
654 	if (irq < 0)
655 		return irq;
656 
657 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
658 				 msix ? PCITEST_IRQ_TYPE_MSIX :
659 				 PCITEST_IRQ_TYPE_MSI);
660 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
661 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
662 				 msix ? COMMAND_RAISE_MSIX_IRQ :
663 				 COMMAND_RAISE_MSI_IRQ);
664 	val = wait_for_completion_timeout(&test->irq_raised,
665 					  msecs_to_jiffies(1000));
666 	if (!val)
667 		return -ETIMEDOUT;
668 
669 	if (irq != test->last_irq)
670 		return -EIO;
671 
672 	return 0;
673 }
674 
675 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
676 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
677 {
678 	if (!param->size) {
679 		dev_dbg(dev, "Data size is zero\n");
680 		return -EINVAL;
681 	}
682 
683 	if (param->size > SIZE_MAX - alignment) {
684 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
685 		return -EINVAL;
686 	}
687 
688 	return 0;
689 }
690 
691 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
692 				   unsigned long arg)
693 {
694 	struct pci_endpoint_test_xfer_param param;
695 	void *src_addr;
696 	void *dst_addr;
697 	u32 flags = 0;
698 	bool use_dma;
699 	size_t size;
700 	dma_addr_t src_phys_addr;
701 	dma_addr_t dst_phys_addr;
702 	struct pci_dev *pdev = test->pdev;
703 	struct device *dev = &pdev->dev;
704 	void *orig_src_addr;
705 	dma_addr_t orig_src_phys_addr;
706 	void *orig_dst_addr;
707 	dma_addr_t orig_dst_phys_addr;
708 	size_t offset;
709 	size_t alignment = test->alignment;
710 	int irq_type = test->irq_type;
711 	u32 src_crc32;
712 	u32 dst_crc32;
713 	int ret;
714 
715 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
716 	if (ret) {
717 		dev_err(dev, "Failed to get transfer param\n");
718 		return -EFAULT;
719 	}
720 
721 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
722 	if (ret)
723 		return ret;
724 
725 	size = param.size;
726 
727 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
728 	if (use_dma)
729 		flags |= FLAG_USE_DMA;
730 
731 	if (irq_type < PCITEST_IRQ_TYPE_INTX ||
732 	    irq_type > PCITEST_IRQ_TYPE_MSIX) {
733 		dev_err(dev, "Invalid IRQ type option\n");
734 		return -EINVAL;
735 	}
736 
737 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
738 	if (!orig_src_addr) {
739 		dev_err(dev, "Failed to allocate source buffer\n");
740 		return -ENOMEM;
741 	}
742 
743 	get_random_bytes(orig_src_addr, size + alignment);
744 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
745 					    size + alignment, DMA_TO_DEVICE);
746 	ret = dma_mapping_error(dev, orig_src_phys_addr);
747 	if (ret) {
748 		dev_err(dev, "failed to map source buffer address\n");
749 		goto err_src_phys_addr;
750 	}
751 
752 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
753 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
754 		offset = src_phys_addr - orig_src_phys_addr;
755 		src_addr = orig_src_addr + offset;
756 	} else {
757 		src_phys_addr = orig_src_phys_addr;
758 		src_addr = orig_src_addr;
759 	}
760 
761 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
762 				 lower_32_bits(src_phys_addr));
763 
764 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
765 				 upper_32_bits(src_phys_addr));
766 
767 	src_crc32 = crc32_le(~0, src_addr, size);
768 
769 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
770 	if (!orig_dst_addr) {
771 		dev_err(dev, "Failed to allocate destination address\n");
772 		ret = -ENOMEM;
773 		goto err_dst_addr;
774 	}
775 
776 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
777 					    size + alignment, DMA_FROM_DEVICE);
778 	ret = dma_mapping_error(dev, orig_dst_phys_addr);
779 	if (ret) {
780 		dev_err(dev, "failed to map destination buffer address\n");
781 		goto err_dst_phys_addr;
782 	}
783 
784 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
785 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
786 		offset = dst_phys_addr - orig_dst_phys_addr;
787 		dst_addr = orig_dst_addr + offset;
788 	} else {
789 		dst_phys_addr = orig_dst_phys_addr;
790 		dst_addr = orig_dst_addr;
791 	}
792 
793 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
794 				 lower_32_bits(dst_phys_addr));
795 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
796 				 upper_32_bits(dst_phys_addr));
797 
798 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
799 				 size);
800 
801 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
802 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
803 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
804 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
805 				 COMMAND_COPY);
806 
807 	wait_for_completion(&test->irq_raised);
808 
809 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
810 			 DMA_FROM_DEVICE);
811 
812 	dst_crc32 = crc32_le(~0, dst_addr, size);
813 	if (dst_crc32 != src_crc32)
814 		ret = -EIO;
815 
816 err_dst_phys_addr:
817 	kfree(orig_dst_addr);
818 
819 err_dst_addr:
820 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
821 			 DMA_TO_DEVICE);
822 
823 err_src_phys_addr:
824 	kfree(orig_src_addr);
825 	return ret;
826 }
827 
828 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
829 				    unsigned long arg)
830 {
831 	struct pci_endpoint_test_xfer_param param;
832 	u32 flags = 0;
833 	bool use_dma;
834 	u32 reg;
835 	void *addr;
836 	dma_addr_t phys_addr;
837 	struct pci_dev *pdev = test->pdev;
838 	struct device *dev = &pdev->dev;
839 	void *orig_addr;
840 	dma_addr_t orig_phys_addr;
841 	size_t offset;
842 	size_t alignment = test->alignment;
843 	int irq_type = test->irq_type;
844 	size_t size;
845 	u32 crc32;
846 	int ret;
847 
848 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
849 	if (ret) {
850 		dev_err(dev, "Failed to get transfer param\n");
851 		return -EFAULT;
852 	}
853 
854 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
855 	if (ret)
856 		return ret;
857 
858 	size = param.size;
859 
860 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
861 	if (use_dma)
862 		flags |= FLAG_USE_DMA;
863 
864 	if (irq_type < PCITEST_IRQ_TYPE_INTX ||
865 	    irq_type > PCITEST_IRQ_TYPE_MSIX) {
866 		dev_err(dev, "Invalid IRQ type option\n");
867 		return -EINVAL;
868 	}
869 
870 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
871 	if (!orig_addr) {
872 		dev_err(dev, "Failed to allocate address\n");
873 		return -ENOMEM;
874 	}
875 
876 	get_random_bytes(orig_addr, size + alignment);
877 
878 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
879 					DMA_TO_DEVICE);
880 	ret = dma_mapping_error(dev, orig_phys_addr);
881 	if (ret) {
882 		dev_err(dev, "failed to map source buffer address\n");
883 		goto err_phys_addr;
884 	}
885 
886 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
887 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
888 		offset = phys_addr - orig_phys_addr;
889 		addr = orig_addr + offset;
890 	} else {
891 		phys_addr = orig_phys_addr;
892 		addr = orig_addr;
893 	}
894 
895 	crc32 = crc32_le(~0, addr, size);
896 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
897 				 crc32);
898 
899 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
900 				 lower_32_bits(phys_addr));
901 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
902 				 upper_32_bits(phys_addr));
903 
904 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
905 
906 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
907 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
908 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
909 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
910 				 COMMAND_READ);
911 
912 	wait_for_completion(&test->irq_raised);
913 
914 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
915 	if (!(reg & STATUS_READ_SUCCESS))
916 		ret = -EIO;
917 
918 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
919 			 DMA_TO_DEVICE);
920 
921 err_phys_addr:
922 	kfree(orig_addr);
923 	return ret;
924 }
925 
926 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
927 				   unsigned long arg)
928 {
929 	struct pci_endpoint_test_xfer_param param;
930 	u32 flags = 0;
931 	bool use_dma;
932 	size_t size;
933 	void *addr;
934 	dma_addr_t phys_addr;
935 	struct pci_dev *pdev = test->pdev;
936 	struct device *dev = &pdev->dev;
937 	void *orig_addr;
938 	dma_addr_t orig_phys_addr;
939 	size_t offset;
940 	size_t alignment = test->alignment;
941 	int irq_type = test->irq_type;
942 	u32 crc32;
943 	int ret;
944 
945 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
946 	if (ret) {
947 		dev_err(dev, "Failed to get transfer param\n");
948 		return -EFAULT;
949 	}
950 
951 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
952 	if (ret)
953 		return ret;
954 
955 	size = param.size;
956 
957 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
958 	if (use_dma)
959 		flags |= FLAG_USE_DMA;
960 
961 	if (irq_type < PCITEST_IRQ_TYPE_INTX ||
962 	    irq_type > PCITEST_IRQ_TYPE_MSIX) {
963 		dev_err(dev, "Invalid IRQ type option\n");
964 		return -EINVAL;
965 	}
966 
967 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
968 	if (!orig_addr) {
969 		dev_err(dev, "Failed to allocate destination address\n");
970 		return -ENOMEM;
971 	}
972 
973 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
974 					DMA_FROM_DEVICE);
975 	ret = dma_mapping_error(dev, orig_phys_addr);
976 	if (ret) {
977 		dev_err(dev, "failed to map source buffer address\n");
978 		goto err_phys_addr;
979 	}
980 
981 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
982 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
983 		offset = phys_addr - orig_phys_addr;
984 		addr = orig_addr + offset;
985 	} else {
986 		phys_addr = orig_phys_addr;
987 		addr = orig_addr;
988 	}
989 
990 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
991 				 lower_32_bits(phys_addr));
992 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
993 				 upper_32_bits(phys_addr));
994 
995 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
996 
997 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
998 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
999 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
1000 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1001 				 COMMAND_WRITE);
1002 
1003 	wait_for_completion(&test->irq_raised);
1004 
1005 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
1006 			 DMA_FROM_DEVICE);
1007 
1008 	crc32 = crc32_le(~0, addr, size);
1009 	if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
1010 		ret = -EIO;
1011 
1012 err_phys_addr:
1013 	kfree(orig_addr);
1014 	return ret;
1015 }
1016 
1017 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
1018 {
1019 	pci_endpoint_test_release_irq(test);
1020 	pci_endpoint_test_free_irq_vectors(test);
1021 
1022 	return 0;
1023 }
1024 
1025 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
1026 				      int req_irq_type)
1027 {
1028 	struct pci_dev *pdev = test->pdev;
1029 	struct device *dev = &pdev->dev;
1030 	int ret;
1031 
1032 	if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
1033 	    req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
1034 		dev_err(dev, "Invalid IRQ type option\n");
1035 		return -EINVAL;
1036 	}
1037 
1038 	if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
1039 		if (test->ep_caps & CAP_MSI)
1040 			req_irq_type = PCITEST_IRQ_TYPE_MSI;
1041 		else if (test->ep_caps & CAP_MSIX)
1042 			req_irq_type = PCITEST_IRQ_TYPE_MSIX;
1043 		else if (test->ep_caps & CAP_INTX)
1044 			req_irq_type = PCITEST_IRQ_TYPE_INTX;
1045 		else
1046 			/* fallback to MSI if no caps defined */
1047 			req_irq_type = PCITEST_IRQ_TYPE_MSI;
1048 	}
1049 
1050 	if (test->irq_type == req_irq_type)
1051 		return 0;
1052 
1053 	pci_endpoint_test_release_irq(test);
1054 	pci_endpoint_test_free_irq_vectors(test);
1055 
1056 	ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
1057 	if (ret)
1058 		return ret;
1059 
1060 	ret = pci_endpoint_test_request_irq(test);
1061 	if (ret) {
1062 		pci_endpoint_test_free_irq_vectors(test);
1063 		return ret;
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
1070 {
1071 	struct pci_dev *pdev = test->pdev;
1072 	struct device *dev = &pdev->dev;
1073 	int irq_type = test->irq_type;
1074 	enum pci_barno bar;
1075 	u32 data, status;
1076 	u32 addr;
1077 	int left;
1078 
1079 	if (!(test->ep_caps & CAP_DYNAMIC_INBOUND_MAPPING))
1080 		return -EOPNOTSUPP;
1081 
1082 	if (irq_type < PCITEST_IRQ_TYPE_INTX ||
1083 	    irq_type > PCITEST_IRQ_TYPE_MSIX) {
1084 		dev_err(dev, "Invalid IRQ type\n");
1085 		return -EINVAL;
1086 	}
1087 
1088 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
1089 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
1090 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1091 				 COMMAND_ENABLE_DOORBELL);
1092 
1093 	left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1094 
1095 	status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1096 	if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
1097 		dev_err(dev, "Failed to enable doorbell\n");
1098 		return -EINVAL;
1099 	}
1100 
1101 	data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
1102 	addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
1103 	bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
1104 
1105 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
1106 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
1107 
1108 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
1109 
1110 	bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
1111 
1112 	writel(data, test->bar[bar] + addr);
1113 
1114 	left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1115 
1116 	status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1117 
1118 	if (!left || !(status & STATUS_DOORBELL_SUCCESS))
1119 		dev_err(dev, "Failed to trigger doorbell in endpoint\n");
1120 
1121 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1122 				 COMMAND_DISABLE_DOORBELL);
1123 
1124 	wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1125 
1126 	status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1127 
1128 	if (status & STATUS_DOORBELL_DISABLE_FAIL) {
1129 		dev_err(dev, "Failed to disable doorbell\n");
1130 		return -EINVAL;
1131 	}
1132 
1133 	if (!(status & STATUS_DOORBELL_SUCCESS))
1134 		return -EINVAL;
1135 
1136 	return 0;
1137 }
1138 
1139 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
1140 				    unsigned long arg)
1141 {
1142 	int ret = -EINVAL;
1143 	enum pci_barno bar;
1144 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
1145 	struct pci_dev *pdev = test->pdev;
1146 
1147 	mutex_lock(&test->mutex);
1148 
1149 	reinit_completion(&test->irq_raised);
1150 	test->last_irq = -ENODATA;
1151 
1152 	switch (cmd) {
1153 	case PCITEST_BAR:
1154 	case PCITEST_BAR_SUBRANGE:
1155 		bar = arg;
1156 		if (bar <= NO_BAR || bar > BAR_5)
1157 			goto ret;
1158 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
1159 			goto ret;
1160 
1161 		if (bar_is_reserved(test, bar)) {
1162 			ret = -ENOBUFS;
1163 			goto ret;
1164 		}
1165 
1166 		if (cmd == PCITEST_BAR)
1167 			ret = pci_endpoint_test_bar(test, bar);
1168 		else
1169 			ret = pci_endpoint_test_bar_subrange(test, bar);
1170 		break;
1171 	case PCITEST_BARS:
1172 		ret = pci_endpoint_test_bars(test);
1173 		break;
1174 	case PCITEST_INTX_IRQ:
1175 		ret = pci_endpoint_test_intx_irq(test);
1176 		break;
1177 	case PCITEST_MSI:
1178 	case PCITEST_MSIX:
1179 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
1180 		break;
1181 	case PCITEST_WRITE:
1182 		ret = pci_endpoint_test_write(test, arg);
1183 		break;
1184 	case PCITEST_READ:
1185 		ret = pci_endpoint_test_read(test, arg);
1186 		break;
1187 	case PCITEST_COPY:
1188 		ret = pci_endpoint_test_copy(test, arg);
1189 		break;
1190 	case PCITEST_SET_IRQTYPE:
1191 		ret = pci_endpoint_test_set_irq(test, arg);
1192 		break;
1193 	case PCITEST_GET_IRQTYPE:
1194 		ret = test->irq_type;
1195 		break;
1196 	case PCITEST_CLEAR_IRQ:
1197 		ret = pci_endpoint_test_clear_irq(test);
1198 		break;
1199 	case PCITEST_DOORBELL:
1200 		ret = pci_endpoint_test_doorbell(test);
1201 		break;
1202 	}
1203 
1204 ret:
1205 	mutex_unlock(&test->mutex);
1206 	return ret;
1207 }
1208 
1209 static const struct file_operations pci_endpoint_test_fops = {
1210 	.owner = THIS_MODULE,
1211 	.unlocked_ioctl = pci_endpoint_test_ioctl,
1212 };
1213 
1214 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
1215 {
1216 	struct pci_dev *pdev = test->pdev;
1217 	struct device *dev = &pdev->dev;
1218 
1219 	test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
1220 	dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
1221 
1222 	/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
1223 	if (test->ep_caps & CAP_UNALIGNED_ACCESS)
1224 		test->alignment = 0;
1225 }
1226 
1227 static int pci_endpoint_test_probe(struct pci_dev *pdev,
1228 				   const struct pci_device_id *ent)
1229 {
1230 	int ret;
1231 	int id;
1232 	char name[29];
1233 	enum pci_barno bar;
1234 	void __iomem *base;
1235 	struct device *dev = &pdev->dev;
1236 	struct pci_endpoint_test *test;
1237 	struct pci_endpoint_test_data *data;
1238 	enum pci_barno test_reg_bar = BAR_0;
1239 	struct miscdevice *misc_device;
1240 
1241 	if (pci_is_bridge(pdev))
1242 		return -ENODEV;
1243 
1244 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
1245 	if (!test)
1246 		return -ENOMEM;
1247 
1248 	test->pdev = pdev;
1249 	test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
1250 
1251 	data = (struct pci_endpoint_test_data *)ent->driver_data;
1252 	if (data) {
1253 		test_reg_bar = data->test_reg_bar;
1254 		test->test_reg_bar = test_reg_bar;
1255 		test->alignment = data->alignment;
1256 	}
1257 
1258 	init_completion(&test->irq_raised);
1259 	mutex_init(&test->mutex);
1260 
1261 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
1262 
1263 	ret = pci_enable_device(pdev);
1264 	if (ret) {
1265 		dev_err(dev, "Cannot enable PCI device\n");
1266 		return ret;
1267 	}
1268 
1269 	ret = pci_request_regions(pdev, DRV_MODULE_NAME);
1270 	if (ret) {
1271 		dev_err(dev, "Cannot obtain PCI resources\n");
1272 		goto err_disable_pdev;
1273 	}
1274 
1275 	pci_set_master(pdev);
1276 
1277 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1278 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1279 			base = pci_ioremap_bar(pdev, bar);
1280 			if (!base) {
1281 				dev_err(dev, "Failed to read BAR%d\n", bar);
1282 				WARN_ON(bar == test_reg_bar);
1283 			}
1284 			test->bar[bar] = base;
1285 		}
1286 	}
1287 
1288 	test->base = test->bar[test_reg_bar];
1289 	if (!test->base) {
1290 		ret = -ENOMEM;
1291 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
1292 			test_reg_bar);
1293 		goto err_iounmap;
1294 	}
1295 
1296 	pci_set_drvdata(pdev, test);
1297 
1298 	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
1299 	if (id < 0) {
1300 		ret = id;
1301 		dev_err(dev, "Unable to get id\n");
1302 		goto err_iounmap;
1303 	}
1304 
1305 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
1306 	test->name = kstrdup(name, GFP_KERNEL);
1307 	if (!test->name) {
1308 		ret = -ENOMEM;
1309 		goto err_ida_remove;
1310 	}
1311 
1312 	pci_endpoint_test_get_capabilities(test);
1313 
1314 	misc_device = &test->miscdev;
1315 	misc_device->minor = MISC_DYNAMIC_MINOR;
1316 	misc_device->name = kstrdup(name, GFP_KERNEL);
1317 	if (!misc_device->name) {
1318 		ret = -ENOMEM;
1319 		goto err_kfree_test_name;
1320 	}
1321 	misc_device->parent = &pdev->dev;
1322 	misc_device->fops = &pci_endpoint_test_fops;
1323 
1324 	ret = misc_register(misc_device);
1325 	if (ret) {
1326 		dev_err(dev, "Failed to register device\n");
1327 		goto err_kfree_name;
1328 	}
1329 
1330 	return 0;
1331 
1332 err_kfree_name:
1333 	kfree(misc_device->name);
1334 
1335 err_kfree_test_name:
1336 	kfree(test->name);
1337 
1338 err_ida_remove:
1339 	ida_free(&pci_endpoint_test_ida, id);
1340 
1341 err_iounmap:
1342 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1343 		if (test->bar[bar])
1344 			pci_iounmap(pdev, test->bar[bar]);
1345 	}
1346 
1347 	pci_release_regions(pdev);
1348 
1349 err_disable_pdev:
1350 	pci_disable_device(pdev);
1351 
1352 	return ret;
1353 }
1354 
1355 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1356 {
1357 	int id;
1358 	enum pci_barno bar;
1359 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1360 	struct miscdevice *misc_device = &test->miscdev;
1361 
1362 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1363 		return;
1364 	if (id < 0)
1365 		return;
1366 
1367 	pci_endpoint_test_release_irq(test);
1368 	pci_endpoint_test_free_irq_vectors(test);
1369 
1370 	misc_deregister(&test->miscdev);
1371 	kfree(misc_device->name);
1372 	kfree(test->name);
1373 	ida_free(&pci_endpoint_test_ida, id);
1374 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1375 		if (test->bar[bar])
1376 			pci_iounmap(pdev, test->bar[bar]);
1377 	}
1378 
1379 	pci_release_regions(pdev);
1380 	pci_disable_device(pdev);
1381 }
1382 
1383 static const struct pci_endpoint_test_data default_data = {
1384 	.test_reg_bar = BAR_0,
1385 	.alignment = SZ_4K,
1386 };
1387 
1388 static const struct pci_endpoint_test_data am654_data = {
1389 	.test_reg_bar = BAR_2,
1390 	.alignment = SZ_64K,
1391 };
1392 
1393 static const struct pci_endpoint_test_data j721e_data = {
1394 	.alignment = 256,
1395 };
1396 
1397 static const struct pci_endpoint_test_data rk3588_data = {
1398 	.alignment = SZ_64K,
1399 };
1400 
1401 /*
1402  * If the controller's Vendor/Device ID are programmable, you may be able to
1403  * use one of the existing entries for testing instead of adding a new one.
1404  */
1405 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1406 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1407 	  .driver_data = (kernel_ulong_t)&default_data,
1408 	},
1409 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1410 	  .driver_data = (kernel_ulong_t)&default_data,
1411 	},
1412 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1413 	  .driver_data = (kernel_ulong_t)&default_data,
1414 	},
1415 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1416 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1417 	  .driver_data = (kernel_ulong_t)&default_data,
1418 	},
1419 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1420 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1421 	  .driver_data = (kernel_ulong_t)&am654_data
1422 	},
1423 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1424 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1425 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1426 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1427 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1428 	  .driver_data = (kernel_ulong_t)&default_data,
1429 	},
1430 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1431 	  .driver_data = (kernel_ulong_t)&j721e_data,
1432 	},
1433 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1434 	  .driver_data = (kernel_ulong_t)&j721e_data,
1435 	},
1436 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1437 	  .driver_data = (kernel_ulong_t)&j721e_data,
1438 	},
1439 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1440 	  .driver_data = (kernel_ulong_t)&j721e_data,
1441 	},
1442 	{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1443 	  .driver_data = (kernel_ulong_t)&rk3588_data,
1444 	},
1445 	{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TEGRA194_EP),},
1446 	{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TEGRA234_EP),},
1447 	{ }
1448 };
1449 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1450 
1451 static struct pci_driver pci_endpoint_test_driver = {
1452 	.name		= DRV_MODULE_NAME,
1453 	.id_table	= pci_endpoint_test_tbl,
1454 	.probe		= pci_endpoint_test_probe,
1455 	.remove		= pci_endpoint_test_remove,
1456 	.sriov_configure = pci_sriov_configure_simple,
1457 };
1458 module_pci_driver(pci_endpoint_test_driver);
1459 
1460 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1461 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1462 MODULE_LICENSE("GPL v2");
1463