1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24
25 #include <linux/pci_regs.h>
26
27 #include <uapi/linux/pcitest.h>
28
29 #define DRV_MODULE_NAME "pci-endpoint-test"
30
31 #define PCI_ENDPOINT_TEST_MAGIC 0x0
32
33 #define PCI_ENDPOINT_TEST_COMMAND 0x4
34 #define COMMAND_RAISE_INTX_IRQ BIT(0)
35 #define COMMAND_RAISE_MSI_IRQ BIT(1)
36 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
37 #define COMMAND_READ BIT(3)
38 #define COMMAND_WRITE BIT(4)
39 #define COMMAND_COPY BIT(5)
40
41 #define PCI_ENDPOINT_TEST_STATUS 0x8
42 #define STATUS_READ_SUCCESS BIT(0)
43 #define STATUS_READ_FAIL BIT(1)
44 #define STATUS_WRITE_SUCCESS BIT(2)
45 #define STATUS_WRITE_FAIL BIT(3)
46 #define STATUS_COPY_SUCCESS BIT(4)
47 #define STATUS_COPY_FAIL BIT(5)
48 #define STATUS_IRQ_RAISED BIT(6)
49 #define STATUS_SRC_ADDR_INVALID BIT(7)
50 #define STATUS_DST_ADDR_INVALID BIT(8)
51
52 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
53 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
54
55 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
56 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
57
58 #define PCI_ENDPOINT_TEST_SIZE 0x1c
59 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
60
61 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
62 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
63
64 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
65 #define FLAG_USE_DMA BIT(0)
66
67 #define PCI_ENDPOINT_TEST_CAPS 0x30
68 #define CAP_UNALIGNED_ACCESS BIT(0)
69 #define CAP_MSI BIT(1)
70 #define CAP_MSIX BIT(2)
71 #define CAP_INTX BIT(3)
72
73 #define PCI_DEVICE_ID_TI_AM654 0xb00c
74 #define PCI_DEVICE_ID_TI_J7200 0xb00f
75 #define PCI_DEVICE_ID_TI_AM64 0xb010
76 #define PCI_DEVICE_ID_TI_J721S2 0xb013
77 #define PCI_DEVICE_ID_LS1088A 0x80c0
78 #define PCI_DEVICE_ID_IMX8 0x0808
79
80 #define is_am654_pci_dev(pdev) \
81 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
82
83 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
84 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
85 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
86 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
87 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
88
89 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
90
91 static DEFINE_IDA(pci_endpoint_test_ida);
92
93 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
94 miscdev)
95
96 enum pci_barno {
97 BAR_0,
98 BAR_1,
99 BAR_2,
100 BAR_3,
101 BAR_4,
102 BAR_5,
103 };
104
105 struct pci_endpoint_test {
106 struct pci_dev *pdev;
107 void __iomem *base;
108 void __iomem *bar[PCI_STD_NUM_BARS];
109 struct completion irq_raised;
110 int last_irq;
111 int num_irqs;
112 int irq_type;
113 /* mutex to protect the ioctls */
114 struct mutex mutex;
115 struct miscdevice miscdev;
116 enum pci_barno test_reg_bar;
117 size_t alignment;
118 u32 ep_caps;
119 const char *name;
120 };
121
122 struct pci_endpoint_test_data {
123 enum pci_barno test_reg_bar;
124 size_t alignment;
125 };
126
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)127 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
128 u32 offset)
129 {
130 return readl(test->base + offset);
131 }
132
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)133 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
134 u32 offset, u32 value)
135 {
136 writel(value, test->base + offset);
137 }
138
pci_endpoint_test_irqhandler(int irq,void * dev_id)139 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
140 {
141 struct pci_endpoint_test *test = dev_id;
142 u32 reg;
143
144 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
145 if (reg & STATUS_IRQ_RAISED) {
146 test->last_irq = irq;
147 complete(&test->irq_raised);
148 }
149
150 return IRQ_HANDLED;
151 }
152
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)153 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
154 {
155 struct pci_dev *pdev = test->pdev;
156
157 pci_free_irq_vectors(pdev);
158 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
159 }
160
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)161 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
162 int type)
163 {
164 int irq;
165 struct pci_dev *pdev = test->pdev;
166 struct device *dev = &pdev->dev;
167
168 switch (type) {
169 case PCITEST_IRQ_TYPE_INTX:
170 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
171 if (irq < 0) {
172 dev_err(dev, "Failed to get Legacy interrupt\n");
173 return irq;
174 }
175
176 break;
177 case PCITEST_IRQ_TYPE_MSI:
178 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
179 if (irq < 0) {
180 dev_err(dev, "Failed to get MSI interrupts\n");
181 return irq;
182 }
183
184 break;
185 case PCITEST_IRQ_TYPE_MSIX:
186 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
187 if (irq < 0) {
188 dev_err(dev, "Failed to get MSI-X interrupts\n");
189 return irq;
190 }
191
192 break;
193 default:
194 dev_err(dev, "Invalid IRQ type selected\n");
195 return -EINVAL;
196 }
197
198 test->irq_type = type;
199 test->num_irqs = irq;
200
201 return 0;
202 }
203
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)204 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
205 {
206 int i;
207 struct pci_dev *pdev = test->pdev;
208
209 for (i = 0; i < test->num_irqs; i++)
210 free_irq(pci_irq_vector(pdev, i), test);
211
212 test->num_irqs = 0;
213 }
214
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)215 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
216 {
217 int i;
218 int ret;
219 struct pci_dev *pdev = test->pdev;
220 struct device *dev = &pdev->dev;
221
222 for (i = 0; i < test->num_irqs; i++) {
223 ret = request_irq(pci_irq_vector(pdev, i),
224 pci_endpoint_test_irqhandler, IRQF_SHARED,
225 test->name, test);
226 if (ret)
227 goto fail;
228 }
229
230 return 0;
231
232 fail:
233 switch (test->irq_type) {
234 case PCITEST_IRQ_TYPE_INTX:
235 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
236 pci_irq_vector(pdev, i));
237 break;
238 case PCITEST_IRQ_TYPE_MSI:
239 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
240 pci_irq_vector(pdev, i),
241 i + 1);
242 break;
243 case PCITEST_IRQ_TYPE_MSIX:
244 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
245 pci_irq_vector(pdev, i),
246 i + 1);
247 break;
248 }
249
250 test->num_irqs = i;
251 pci_endpoint_test_release_irq(test);
252
253 return ret;
254 }
255
256 static const u32 bar_test_pattern[] = {
257 0xA0A0A0A0,
258 0xA1A1A1A1,
259 0xA2A2A2A2,
260 0xA3A3A3A3,
261 0xA4A4A4A4,
262 0xA5A5A5A5,
263 };
264
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,resource_size_t offset,void * write_buf,void * read_buf,int size)265 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
266 enum pci_barno barno,
267 resource_size_t offset, void *write_buf,
268 void *read_buf, int size)
269 {
270 memset(write_buf, bar_test_pattern[barno], size);
271 memcpy_toio(test->bar[barno] + offset, write_buf, size);
272
273 memcpy_fromio(read_buf, test->bar[barno] + offset, size);
274
275 return memcmp(write_buf, read_buf, size);
276 }
277
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)278 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
279 enum pci_barno barno)
280 {
281 resource_size_t bar_size, offset = 0;
282 void *write_buf __free(kfree) = NULL;
283 void *read_buf __free(kfree) = NULL;
284 struct pci_dev *pdev = test->pdev;
285 int buf_size;
286
287 bar_size = pci_resource_len(pdev, barno);
288 if (!bar_size)
289 return -ENODATA;
290
291 if (!test->bar[barno])
292 return -ENOMEM;
293
294 if (barno == test->test_reg_bar)
295 bar_size = 0x4;
296
297 /*
298 * Allocate a buffer of max size 1MB, and reuse that buffer while
299 * iterating over the whole BAR size (which might be much larger).
300 */
301 buf_size = min(SZ_1M, bar_size);
302
303 write_buf = kmalloc(buf_size, GFP_KERNEL);
304 if (!write_buf)
305 return -ENOMEM;
306
307 read_buf = kmalloc(buf_size, GFP_KERNEL);
308 if (!read_buf)
309 return -ENOMEM;
310
311 while (offset < bar_size) {
312 if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
313 read_buf, buf_size))
314 return -EIO;
315 offset += buf_size;
316 }
317
318 return 0;
319 }
320
bar_test_pattern_with_offset(enum pci_barno barno,int offset)321 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
322 {
323 u32 val;
324
325 /* Keep the BAR pattern in the top byte. */
326 val = bar_test_pattern[barno] & 0xff000000;
327 /* Store the (partial) offset in the remaining bytes. */
328 val |= offset & 0x00ffffff;
329
330 return val;
331 }
332
pci_endpoint_test_bars_write_bar(struct pci_endpoint_test * test,enum pci_barno barno)333 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
334 enum pci_barno barno)
335 {
336 struct pci_dev *pdev = test->pdev;
337 int j, size;
338
339 size = pci_resource_len(pdev, barno);
340
341 if (barno == test->test_reg_bar)
342 size = 0x4;
343
344 for (j = 0; j < size; j += 4)
345 writel_relaxed(bar_test_pattern_with_offset(barno, j),
346 test->bar[barno] + j);
347 }
348
pci_endpoint_test_bars_read_bar(struct pci_endpoint_test * test,enum pci_barno barno)349 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
350 enum pci_barno barno)
351 {
352 struct pci_dev *pdev = test->pdev;
353 struct device *dev = &pdev->dev;
354 int j, size;
355 u32 val;
356
357 size = pci_resource_len(pdev, barno);
358
359 if (barno == test->test_reg_bar)
360 size = 0x4;
361
362 for (j = 0; j < size; j += 4) {
363 u32 expected = bar_test_pattern_with_offset(barno, j);
364
365 val = readl_relaxed(test->bar[barno] + j);
366 if (val != expected) {
367 dev_err(dev,
368 "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
369 barno, j, val, expected);
370 return -EIO;
371 }
372 }
373
374 return 0;
375 }
376
pci_endpoint_test_bars(struct pci_endpoint_test * test)377 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
378 {
379 enum pci_barno bar;
380 int ret;
381
382 /* Write all BARs in order (without reading). */
383 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
384 if (test->bar[bar])
385 pci_endpoint_test_bars_write_bar(test, bar);
386
387 /*
388 * Read all BARs in order (without writing).
389 * If there is an address translation issue on the EP, writing one BAR
390 * might have overwritten another BAR. Ensure that this is not the case.
391 * (Reading back the BAR directly after writing can not detect this.)
392 */
393 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
394 if (test->bar[bar]) {
395 ret = pci_endpoint_test_bars_read_bar(test, bar);
396 if (ret)
397 return ret;
398 }
399 }
400
401 return 0;
402 }
403
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)404 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
405 {
406 u32 val;
407
408 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
409 PCITEST_IRQ_TYPE_INTX);
410 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
411 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
412 COMMAND_RAISE_INTX_IRQ);
413 val = wait_for_completion_timeout(&test->irq_raised,
414 msecs_to_jiffies(1000));
415 if (!val)
416 return -ETIMEDOUT;
417
418 return 0;
419 }
420
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)421 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
422 u16 msi_num, bool msix)
423 {
424 struct pci_dev *pdev = test->pdev;
425 u32 val;
426 int ret;
427
428 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
429 msix ? PCITEST_IRQ_TYPE_MSIX :
430 PCITEST_IRQ_TYPE_MSI);
431 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
432 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
433 msix ? COMMAND_RAISE_MSIX_IRQ :
434 COMMAND_RAISE_MSI_IRQ);
435 val = wait_for_completion_timeout(&test->irq_raised,
436 msecs_to_jiffies(1000));
437 if (!val)
438 return -ETIMEDOUT;
439
440 ret = pci_irq_vector(pdev, msi_num - 1);
441 if (ret < 0)
442 return ret;
443
444 if (ret != test->last_irq)
445 return -EIO;
446
447 return 0;
448 }
449
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)450 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
451 struct pci_endpoint_test_xfer_param *param, size_t alignment)
452 {
453 if (!param->size) {
454 dev_dbg(dev, "Data size is zero\n");
455 return -EINVAL;
456 }
457
458 if (param->size > SIZE_MAX - alignment) {
459 dev_dbg(dev, "Maximum transfer data size exceeded\n");
460 return -EINVAL;
461 }
462
463 return 0;
464 }
465
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)466 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
467 unsigned long arg)
468 {
469 struct pci_endpoint_test_xfer_param param;
470 void *src_addr;
471 void *dst_addr;
472 u32 flags = 0;
473 bool use_dma;
474 size_t size;
475 dma_addr_t src_phys_addr;
476 dma_addr_t dst_phys_addr;
477 struct pci_dev *pdev = test->pdev;
478 struct device *dev = &pdev->dev;
479 void *orig_src_addr;
480 dma_addr_t orig_src_phys_addr;
481 void *orig_dst_addr;
482 dma_addr_t orig_dst_phys_addr;
483 size_t offset;
484 size_t alignment = test->alignment;
485 int irq_type = test->irq_type;
486 u32 src_crc32;
487 u32 dst_crc32;
488 int ret;
489
490 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
491 if (ret) {
492 dev_err(dev, "Failed to get transfer param\n");
493 return -EFAULT;
494 }
495
496 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
497 if (ret)
498 return ret;
499
500 size = param.size;
501
502 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
503 if (use_dma)
504 flags |= FLAG_USE_DMA;
505
506 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
507 irq_type > PCITEST_IRQ_TYPE_MSIX) {
508 dev_err(dev, "Invalid IRQ type option\n");
509 return -EINVAL;
510 }
511
512 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
513 if (!orig_src_addr) {
514 dev_err(dev, "Failed to allocate source buffer\n");
515 return -ENOMEM;
516 }
517
518 get_random_bytes(orig_src_addr, size + alignment);
519 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
520 size + alignment, DMA_TO_DEVICE);
521 ret = dma_mapping_error(dev, orig_src_phys_addr);
522 if (ret) {
523 dev_err(dev, "failed to map source buffer address\n");
524 goto err_src_phys_addr;
525 }
526
527 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
528 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
529 offset = src_phys_addr - orig_src_phys_addr;
530 src_addr = orig_src_addr + offset;
531 } else {
532 src_phys_addr = orig_src_phys_addr;
533 src_addr = orig_src_addr;
534 }
535
536 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
537 lower_32_bits(src_phys_addr));
538
539 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
540 upper_32_bits(src_phys_addr));
541
542 src_crc32 = crc32_le(~0, src_addr, size);
543
544 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
545 if (!orig_dst_addr) {
546 dev_err(dev, "Failed to allocate destination address\n");
547 ret = -ENOMEM;
548 goto err_dst_addr;
549 }
550
551 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
552 size + alignment, DMA_FROM_DEVICE);
553 ret = dma_mapping_error(dev, orig_dst_phys_addr);
554 if (ret) {
555 dev_err(dev, "failed to map destination buffer address\n");
556 goto err_dst_phys_addr;
557 }
558
559 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
560 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
561 offset = dst_phys_addr - orig_dst_phys_addr;
562 dst_addr = orig_dst_addr + offset;
563 } else {
564 dst_phys_addr = orig_dst_phys_addr;
565 dst_addr = orig_dst_addr;
566 }
567
568 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
569 lower_32_bits(dst_phys_addr));
570 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
571 upper_32_bits(dst_phys_addr));
572
573 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
574 size);
575
576 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
577 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
578 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
579 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
580 COMMAND_COPY);
581
582 wait_for_completion(&test->irq_raised);
583
584 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
585 DMA_FROM_DEVICE);
586
587 dst_crc32 = crc32_le(~0, dst_addr, size);
588 if (dst_crc32 != src_crc32)
589 ret = -EIO;
590
591 err_dst_phys_addr:
592 kfree(orig_dst_addr);
593
594 err_dst_addr:
595 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
596 DMA_TO_DEVICE);
597
598 err_src_phys_addr:
599 kfree(orig_src_addr);
600 return ret;
601 }
602
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)603 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
604 unsigned long arg)
605 {
606 struct pci_endpoint_test_xfer_param param;
607 u32 flags = 0;
608 bool use_dma;
609 u32 reg;
610 void *addr;
611 dma_addr_t phys_addr;
612 struct pci_dev *pdev = test->pdev;
613 struct device *dev = &pdev->dev;
614 void *orig_addr;
615 dma_addr_t orig_phys_addr;
616 size_t offset;
617 size_t alignment = test->alignment;
618 int irq_type = test->irq_type;
619 size_t size;
620 u32 crc32;
621 int ret;
622
623 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
624 if (ret) {
625 dev_err(dev, "Failed to get transfer param\n");
626 return -EFAULT;
627 }
628
629 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
630 if (ret)
631 return ret;
632
633 size = param.size;
634
635 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
636 if (use_dma)
637 flags |= FLAG_USE_DMA;
638
639 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
640 irq_type > PCITEST_IRQ_TYPE_MSIX) {
641 dev_err(dev, "Invalid IRQ type option\n");
642 return -EINVAL;
643 }
644
645 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
646 if (!orig_addr) {
647 dev_err(dev, "Failed to allocate address\n");
648 return -ENOMEM;
649 }
650
651 get_random_bytes(orig_addr, size + alignment);
652
653 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
654 DMA_TO_DEVICE);
655 ret = dma_mapping_error(dev, orig_phys_addr);
656 if (ret) {
657 dev_err(dev, "failed to map source buffer address\n");
658 goto err_phys_addr;
659 }
660
661 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
662 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
663 offset = phys_addr - orig_phys_addr;
664 addr = orig_addr + offset;
665 } else {
666 phys_addr = orig_phys_addr;
667 addr = orig_addr;
668 }
669
670 crc32 = crc32_le(~0, addr, size);
671 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
672 crc32);
673
674 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
675 lower_32_bits(phys_addr));
676 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
677 upper_32_bits(phys_addr));
678
679 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
680
681 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
682 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
683 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
684 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
685 COMMAND_READ);
686
687 wait_for_completion(&test->irq_raised);
688
689 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
690 if (!(reg & STATUS_READ_SUCCESS))
691 ret = -EIO;
692
693 dma_unmap_single(dev, orig_phys_addr, size + alignment,
694 DMA_TO_DEVICE);
695
696 err_phys_addr:
697 kfree(orig_addr);
698 return ret;
699 }
700
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)701 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
702 unsigned long arg)
703 {
704 struct pci_endpoint_test_xfer_param param;
705 u32 flags = 0;
706 bool use_dma;
707 size_t size;
708 void *addr;
709 dma_addr_t phys_addr;
710 struct pci_dev *pdev = test->pdev;
711 struct device *dev = &pdev->dev;
712 void *orig_addr;
713 dma_addr_t orig_phys_addr;
714 size_t offset;
715 size_t alignment = test->alignment;
716 int irq_type = test->irq_type;
717 u32 crc32;
718 int ret;
719
720 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
721 if (ret) {
722 dev_err(dev, "Failed to get transfer param\n");
723 return -EFAULT;
724 }
725
726 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
727 if (ret)
728 return ret;
729
730 size = param.size;
731
732 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
733 if (use_dma)
734 flags |= FLAG_USE_DMA;
735
736 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
737 irq_type > PCITEST_IRQ_TYPE_MSIX) {
738 dev_err(dev, "Invalid IRQ type option\n");
739 return -EINVAL;
740 }
741
742 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
743 if (!orig_addr) {
744 dev_err(dev, "Failed to allocate destination address\n");
745 return -ENOMEM;
746 }
747
748 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
749 DMA_FROM_DEVICE);
750 ret = dma_mapping_error(dev, orig_phys_addr);
751 if (ret) {
752 dev_err(dev, "failed to map source buffer address\n");
753 goto err_phys_addr;
754 }
755
756 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
757 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
758 offset = phys_addr - orig_phys_addr;
759 addr = orig_addr + offset;
760 } else {
761 phys_addr = orig_phys_addr;
762 addr = orig_addr;
763 }
764
765 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
766 lower_32_bits(phys_addr));
767 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
768 upper_32_bits(phys_addr));
769
770 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
771
772 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
773 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
774 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
775 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
776 COMMAND_WRITE);
777
778 wait_for_completion(&test->irq_raised);
779
780 dma_unmap_single(dev, orig_phys_addr, size + alignment,
781 DMA_FROM_DEVICE);
782
783 crc32 = crc32_le(~0, addr, size);
784 if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
785 ret = -EIO;
786
787 err_phys_addr:
788 kfree(orig_addr);
789 return ret;
790 }
791
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)792 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
793 {
794 pci_endpoint_test_release_irq(test);
795 pci_endpoint_test_free_irq_vectors(test);
796
797 return 0;
798 }
799
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)800 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
801 int req_irq_type)
802 {
803 struct pci_dev *pdev = test->pdev;
804 struct device *dev = &pdev->dev;
805 int ret;
806
807 if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
808 req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
809 dev_err(dev, "Invalid IRQ type option\n");
810 return -EINVAL;
811 }
812
813 if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
814 if (test->ep_caps & CAP_MSI)
815 req_irq_type = PCITEST_IRQ_TYPE_MSI;
816 else if (test->ep_caps & CAP_MSIX)
817 req_irq_type = PCITEST_IRQ_TYPE_MSIX;
818 else if (test->ep_caps & CAP_INTX)
819 req_irq_type = PCITEST_IRQ_TYPE_INTX;
820 else
821 /* fallback to MSI if no caps defined */
822 req_irq_type = PCITEST_IRQ_TYPE_MSI;
823 }
824
825 if (test->irq_type == req_irq_type)
826 return 0;
827
828 pci_endpoint_test_release_irq(test);
829 pci_endpoint_test_free_irq_vectors(test);
830
831 ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
832 if (ret)
833 return ret;
834
835 ret = pci_endpoint_test_request_irq(test);
836 if (ret) {
837 pci_endpoint_test_free_irq_vectors(test);
838 return ret;
839 }
840
841 return 0;
842 }
843
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)844 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
845 unsigned long arg)
846 {
847 int ret = -EINVAL;
848 enum pci_barno bar;
849 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
850 struct pci_dev *pdev = test->pdev;
851
852 mutex_lock(&test->mutex);
853
854 reinit_completion(&test->irq_raised);
855 test->last_irq = -ENODATA;
856
857 switch (cmd) {
858 case PCITEST_BAR:
859 bar = arg;
860 if (bar > BAR_5)
861 goto ret;
862 if (is_am654_pci_dev(pdev) && bar == BAR_0)
863 goto ret;
864 ret = pci_endpoint_test_bar(test, bar);
865 break;
866 case PCITEST_BARS:
867 ret = pci_endpoint_test_bars(test);
868 break;
869 case PCITEST_INTX_IRQ:
870 ret = pci_endpoint_test_intx_irq(test);
871 break;
872 case PCITEST_MSI:
873 case PCITEST_MSIX:
874 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
875 break;
876 case PCITEST_WRITE:
877 ret = pci_endpoint_test_write(test, arg);
878 break;
879 case PCITEST_READ:
880 ret = pci_endpoint_test_read(test, arg);
881 break;
882 case PCITEST_COPY:
883 ret = pci_endpoint_test_copy(test, arg);
884 break;
885 case PCITEST_SET_IRQTYPE:
886 ret = pci_endpoint_test_set_irq(test, arg);
887 break;
888 case PCITEST_GET_IRQTYPE:
889 ret = test->irq_type;
890 break;
891 case PCITEST_CLEAR_IRQ:
892 ret = pci_endpoint_test_clear_irq(test);
893 break;
894 }
895
896 ret:
897 mutex_unlock(&test->mutex);
898 return ret;
899 }
900
901 static const struct file_operations pci_endpoint_test_fops = {
902 .owner = THIS_MODULE,
903 .unlocked_ioctl = pci_endpoint_test_ioctl,
904 };
905
pci_endpoint_test_get_capabilities(struct pci_endpoint_test * test)906 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
907 {
908 struct pci_dev *pdev = test->pdev;
909 struct device *dev = &pdev->dev;
910
911 test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
912 dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
913
914 /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
915 if (test->ep_caps & CAP_UNALIGNED_ACCESS)
916 test->alignment = 0;
917 }
918
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)919 static int pci_endpoint_test_probe(struct pci_dev *pdev,
920 const struct pci_device_id *ent)
921 {
922 int ret;
923 int id;
924 char name[29];
925 enum pci_barno bar;
926 void __iomem *base;
927 struct device *dev = &pdev->dev;
928 struct pci_endpoint_test *test;
929 struct pci_endpoint_test_data *data;
930 enum pci_barno test_reg_bar = BAR_0;
931 struct miscdevice *misc_device;
932
933 if (pci_is_bridge(pdev))
934 return -ENODEV;
935
936 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
937 if (!test)
938 return -ENOMEM;
939
940 test->test_reg_bar = 0;
941 test->alignment = 0;
942 test->pdev = pdev;
943 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
944
945 data = (struct pci_endpoint_test_data *)ent->driver_data;
946 if (data) {
947 test_reg_bar = data->test_reg_bar;
948 test->test_reg_bar = test_reg_bar;
949 test->alignment = data->alignment;
950 }
951
952 init_completion(&test->irq_raised);
953 mutex_init(&test->mutex);
954
955 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
956
957 ret = pci_enable_device(pdev);
958 if (ret) {
959 dev_err(dev, "Cannot enable PCI device\n");
960 return ret;
961 }
962
963 ret = pci_request_regions(pdev, DRV_MODULE_NAME);
964 if (ret) {
965 dev_err(dev, "Cannot obtain PCI resources\n");
966 goto err_disable_pdev;
967 }
968
969 pci_set_master(pdev);
970
971 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
972 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
973 base = pci_ioremap_bar(pdev, bar);
974 if (!base) {
975 dev_err(dev, "Failed to read BAR%d\n", bar);
976 WARN_ON(bar == test_reg_bar);
977 }
978 test->bar[bar] = base;
979 }
980 }
981
982 test->base = test->bar[test_reg_bar];
983 if (!test->base) {
984 ret = -ENOMEM;
985 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
986 test_reg_bar);
987 goto err_iounmap;
988 }
989
990 pci_set_drvdata(pdev, test);
991
992 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
993 if (id < 0) {
994 ret = id;
995 dev_err(dev, "Unable to get id\n");
996 goto err_iounmap;
997 }
998
999 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
1000 test->name = kstrdup(name, GFP_KERNEL);
1001 if (!test->name) {
1002 ret = -ENOMEM;
1003 goto err_ida_remove;
1004 }
1005
1006 pci_endpoint_test_get_capabilities(test);
1007
1008 misc_device = &test->miscdev;
1009 misc_device->minor = MISC_DYNAMIC_MINOR;
1010 misc_device->name = kstrdup(name, GFP_KERNEL);
1011 if (!misc_device->name) {
1012 ret = -ENOMEM;
1013 goto err_kfree_test_name;
1014 }
1015 misc_device->parent = &pdev->dev;
1016 misc_device->fops = &pci_endpoint_test_fops;
1017
1018 ret = misc_register(misc_device);
1019 if (ret) {
1020 dev_err(dev, "Failed to register device\n");
1021 goto err_kfree_name;
1022 }
1023
1024 return 0;
1025
1026 err_kfree_name:
1027 kfree(misc_device->name);
1028
1029 err_kfree_test_name:
1030 kfree(test->name);
1031
1032 err_ida_remove:
1033 ida_free(&pci_endpoint_test_ida, id);
1034
1035 err_iounmap:
1036 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1037 if (test->bar[bar])
1038 pci_iounmap(pdev, test->bar[bar]);
1039 }
1040
1041 pci_release_regions(pdev);
1042
1043 err_disable_pdev:
1044 pci_disable_device(pdev);
1045
1046 return ret;
1047 }
1048
pci_endpoint_test_remove(struct pci_dev * pdev)1049 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1050 {
1051 int id;
1052 enum pci_barno bar;
1053 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1054 struct miscdevice *misc_device = &test->miscdev;
1055
1056 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1057 return;
1058 if (id < 0)
1059 return;
1060
1061 pci_endpoint_test_release_irq(test);
1062 pci_endpoint_test_free_irq_vectors(test);
1063
1064 misc_deregister(&test->miscdev);
1065 kfree(misc_device->name);
1066 kfree(test->name);
1067 ida_free(&pci_endpoint_test_ida, id);
1068 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1069 if (test->bar[bar])
1070 pci_iounmap(pdev, test->bar[bar]);
1071 }
1072
1073 pci_release_regions(pdev);
1074 pci_disable_device(pdev);
1075 }
1076
1077 static const struct pci_endpoint_test_data default_data = {
1078 .test_reg_bar = BAR_0,
1079 .alignment = SZ_4K,
1080 };
1081
1082 static const struct pci_endpoint_test_data am654_data = {
1083 .test_reg_bar = BAR_2,
1084 .alignment = SZ_64K,
1085 };
1086
1087 static const struct pci_endpoint_test_data j721e_data = {
1088 .alignment = 256,
1089 };
1090
1091 static const struct pci_endpoint_test_data rk3588_data = {
1092 .alignment = SZ_64K,
1093 };
1094
1095 /*
1096 * If the controller's Vendor/Device ID are programmable, you may be able to
1097 * use one of the existing entries for testing instead of adding a new one.
1098 */
1099 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1100 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1101 .driver_data = (kernel_ulong_t)&default_data,
1102 },
1103 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1104 .driver_data = (kernel_ulong_t)&default_data,
1105 },
1106 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1107 .driver_data = (kernel_ulong_t)&default_data,
1108 },
1109 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1110 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1111 .driver_data = (kernel_ulong_t)&default_data,
1112 },
1113 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1114 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1115 .driver_data = (kernel_ulong_t)&am654_data
1116 },
1117 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1118 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1119 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1120 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1121 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1122 .driver_data = (kernel_ulong_t)&default_data,
1123 },
1124 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1125 .driver_data = (kernel_ulong_t)&j721e_data,
1126 },
1127 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1128 .driver_data = (kernel_ulong_t)&j721e_data,
1129 },
1130 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1131 .driver_data = (kernel_ulong_t)&j721e_data,
1132 },
1133 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1134 .driver_data = (kernel_ulong_t)&j721e_data,
1135 },
1136 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1137 .driver_data = (kernel_ulong_t)&rk3588_data,
1138 },
1139 { }
1140 };
1141 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1142
1143 static struct pci_driver pci_endpoint_test_driver = {
1144 .name = DRV_MODULE_NAME,
1145 .id_table = pci_endpoint_test_tbl,
1146 .probe = pci_endpoint_test_probe,
1147 .remove = pci_endpoint_test_remove,
1148 .sriov_configure = pci_sriov_configure_simple,
1149 };
1150 module_pci_driver(pci_endpoint_test_driver);
1151
1152 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1153 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1154 MODULE_LICENSE("GPL v2");
1155