1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24
25 #include <linux/pci_regs.h>
26
27 #include <uapi/linux/pcitest.h>
28
29 #define DRV_MODULE_NAME "pci-endpoint-test"
30
31 #define PCI_ENDPOINT_TEST_MAGIC 0x0
32
33 #define PCI_ENDPOINT_TEST_COMMAND 0x4
34 #define COMMAND_RAISE_INTX_IRQ BIT(0)
35 #define COMMAND_RAISE_MSI_IRQ BIT(1)
36 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
37 #define COMMAND_READ BIT(3)
38 #define COMMAND_WRITE BIT(4)
39 #define COMMAND_COPY BIT(5)
40 #define COMMAND_ENABLE_DOORBELL BIT(6)
41 #define COMMAND_DISABLE_DOORBELL BIT(7)
42
43 #define PCI_ENDPOINT_TEST_STATUS 0x8
44 #define STATUS_READ_SUCCESS BIT(0)
45 #define STATUS_READ_FAIL BIT(1)
46 #define STATUS_WRITE_SUCCESS BIT(2)
47 #define STATUS_WRITE_FAIL BIT(3)
48 #define STATUS_COPY_SUCCESS BIT(4)
49 #define STATUS_COPY_FAIL BIT(5)
50 #define STATUS_IRQ_RAISED BIT(6)
51 #define STATUS_SRC_ADDR_INVALID BIT(7)
52 #define STATUS_DST_ADDR_INVALID BIT(8)
53 #define STATUS_DOORBELL_SUCCESS BIT(9)
54 #define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
55 #define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
56 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
57 #define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
58
59 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
60 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
61
62 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
63 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
64
65 #define PCI_ENDPOINT_TEST_SIZE 0x1c
66 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
67
68 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
69 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
70
71 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
72
73 #define FLAG_USE_DMA BIT(0)
74
75 #define PCI_ENDPOINT_TEST_CAPS 0x30
76 #define CAP_UNALIGNED_ACCESS BIT(0)
77 #define CAP_MSI BIT(1)
78 #define CAP_MSIX BIT(2)
79 #define CAP_INTX BIT(3)
80
81 #define PCI_ENDPOINT_TEST_DB_BAR 0x34
82 #define PCI_ENDPOINT_TEST_DB_OFFSET 0x38
83 #define PCI_ENDPOINT_TEST_DB_DATA 0x3c
84
85 #define PCI_DEVICE_ID_TI_AM654 0xb00c
86 #define PCI_DEVICE_ID_TI_J7200 0xb00f
87 #define PCI_DEVICE_ID_TI_AM64 0xb010
88 #define PCI_DEVICE_ID_TI_J721S2 0xb013
89 #define PCI_DEVICE_ID_LS1088A 0x80c0
90 #define PCI_DEVICE_ID_IMX8 0x0808
91
92 #define is_am654_pci_dev(pdev) \
93 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
94
95 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
96 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
97 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
98 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
99 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
100
101 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
102
103 static DEFINE_IDA(pci_endpoint_test_ida);
104
105 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
106 miscdev)
107
108 enum pci_barno {
109 BAR_0,
110 BAR_1,
111 BAR_2,
112 BAR_3,
113 BAR_4,
114 BAR_5,
115 NO_BAR = -1,
116 };
117
118 struct pci_endpoint_test {
119 struct pci_dev *pdev;
120 void __iomem *base;
121 void __iomem *bar[PCI_STD_NUM_BARS];
122 struct completion irq_raised;
123 int last_irq;
124 int num_irqs;
125 int irq_type;
126 /* mutex to protect the ioctls */
127 struct mutex mutex;
128 struct miscdevice miscdev;
129 enum pci_barno test_reg_bar;
130 size_t alignment;
131 u32 ep_caps;
132 const char *name;
133 };
134
135 struct pci_endpoint_test_data {
136 enum pci_barno test_reg_bar;
137 size_t alignment;
138 };
139
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)140 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
141 u32 offset)
142 {
143 return readl(test->base + offset);
144 }
145
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)146 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
147 u32 offset, u32 value)
148 {
149 writel(value, test->base + offset);
150 }
151
pci_endpoint_test_irqhandler(int irq,void * dev_id)152 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
153 {
154 struct pci_endpoint_test *test = dev_id;
155 u32 reg;
156
157 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
158 if (reg & STATUS_IRQ_RAISED) {
159 test->last_irq = irq;
160 complete(&test->irq_raised);
161 }
162
163 return IRQ_HANDLED;
164 }
165
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)166 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
167 {
168 struct pci_dev *pdev = test->pdev;
169
170 pci_free_irq_vectors(pdev);
171 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
172 }
173
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)174 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
175 int type)
176 {
177 int irq;
178 struct pci_dev *pdev = test->pdev;
179 struct device *dev = &pdev->dev;
180
181 switch (type) {
182 case PCITEST_IRQ_TYPE_INTX:
183 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
184 if (irq < 0) {
185 dev_err(dev, "Failed to get Legacy interrupt\n");
186 return irq;
187 }
188
189 break;
190 case PCITEST_IRQ_TYPE_MSI:
191 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
192 if (irq < 0) {
193 dev_err(dev, "Failed to get MSI interrupts\n");
194 return irq;
195 }
196
197 break;
198 case PCITEST_IRQ_TYPE_MSIX:
199 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
200 if (irq < 0) {
201 dev_err(dev, "Failed to get MSI-X interrupts\n");
202 return irq;
203 }
204
205 break;
206 default:
207 dev_err(dev, "Invalid IRQ type selected\n");
208 return -EINVAL;
209 }
210
211 test->irq_type = type;
212 test->num_irqs = irq;
213
214 return 0;
215 }
216
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)217 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
218 {
219 int i;
220 struct pci_dev *pdev = test->pdev;
221
222 for (i = 0; i < test->num_irqs; i++)
223 free_irq(pci_irq_vector(pdev, i), test);
224
225 test->num_irqs = 0;
226 }
227
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)228 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229 {
230 int i;
231 int ret;
232 struct pci_dev *pdev = test->pdev;
233 struct device *dev = &pdev->dev;
234
235 for (i = 0; i < test->num_irqs; i++) {
236 ret = request_irq(pci_irq_vector(pdev, i),
237 pci_endpoint_test_irqhandler, IRQF_SHARED,
238 test->name, test);
239 if (ret)
240 goto fail;
241 }
242
243 return 0;
244
245 fail:
246 switch (test->irq_type) {
247 case PCITEST_IRQ_TYPE_INTX:
248 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249 pci_irq_vector(pdev, i));
250 break;
251 case PCITEST_IRQ_TYPE_MSI:
252 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253 pci_irq_vector(pdev, i),
254 i + 1);
255 break;
256 case PCITEST_IRQ_TYPE_MSIX:
257 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258 pci_irq_vector(pdev, i),
259 i + 1);
260 break;
261 }
262
263 test->num_irqs = i;
264 pci_endpoint_test_release_irq(test);
265
266 return ret;
267 }
268
269 static const u32 bar_test_pattern[] = {
270 0xA0A0A0A0,
271 0xA1A1A1A1,
272 0xA2A2A2A2,
273 0xA3A3A3A3,
274 0xA4A4A4A4,
275 0xA5A5A5A5,
276 };
277
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,resource_size_t offset,void * write_buf,void * read_buf,int size)278 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
279 enum pci_barno barno,
280 resource_size_t offset, void *write_buf,
281 void *read_buf, int size)
282 {
283 memset(write_buf, bar_test_pattern[barno], size);
284 memcpy_toio(test->bar[barno] + offset, write_buf, size);
285
286 memcpy_fromio(read_buf, test->bar[barno] + offset, size);
287
288 return memcmp(write_buf, read_buf, size);
289 }
290
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)291 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
292 enum pci_barno barno)
293 {
294 resource_size_t bar_size, offset = 0;
295 void *write_buf __free(kfree) = NULL;
296 void *read_buf __free(kfree) = NULL;
297 struct pci_dev *pdev = test->pdev;
298 int buf_size;
299
300 bar_size = pci_resource_len(pdev, barno);
301 if (!bar_size)
302 return -ENODATA;
303
304 if (!test->bar[barno])
305 return -ENOMEM;
306
307 if (barno == test->test_reg_bar)
308 bar_size = 0x4;
309
310 /*
311 * Allocate a buffer of max size 1MB, and reuse that buffer while
312 * iterating over the whole BAR size (which might be much larger).
313 */
314 buf_size = min(SZ_1M, bar_size);
315
316 write_buf = kmalloc(buf_size, GFP_KERNEL);
317 if (!write_buf)
318 return -ENOMEM;
319
320 read_buf = kmalloc(buf_size, GFP_KERNEL);
321 if (!read_buf)
322 return -ENOMEM;
323
324 while (offset < bar_size) {
325 if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
326 read_buf, buf_size))
327 return -EIO;
328 offset += buf_size;
329 }
330
331 return 0;
332 }
333
bar_test_pattern_with_offset(enum pci_barno barno,int offset)334 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
335 {
336 u32 val;
337
338 /* Keep the BAR pattern in the top byte. */
339 val = bar_test_pattern[barno] & 0xff000000;
340 /* Store the (partial) offset in the remaining bytes. */
341 val |= offset & 0x00ffffff;
342
343 return val;
344 }
345
pci_endpoint_test_bars_write_bar(struct pci_endpoint_test * test,enum pci_barno barno)346 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
347 enum pci_barno barno)
348 {
349 struct pci_dev *pdev = test->pdev;
350 int j, size;
351
352 size = pci_resource_len(pdev, barno);
353
354 if (barno == test->test_reg_bar)
355 size = 0x4;
356
357 for (j = 0; j < size; j += 4)
358 writel_relaxed(bar_test_pattern_with_offset(barno, j),
359 test->bar[barno] + j);
360 }
361
pci_endpoint_test_bars_read_bar(struct pci_endpoint_test * test,enum pci_barno barno)362 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
363 enum pci_barno barno)
364 {
365 struct pci_dev *pdev = test->pdev;
366 struct device *dev = &pdev->dev;
367 int j, size;
368 u32 val;
369
370 size = pci_resource_len(pdev, barno);
371
372 if (barno == test->test_reg_bar)
373 size = 0x4;
374
375 for (j = 0; j < size; j += 4) {
376 u32 expected = bar_test_pattern_with_offset(barno, j);
377
378 val = readl_relaxed(test->bar[barno] + j);
379 if (val != expected) {
380 dev_err(dev,
381 "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
382 barno, j, val, expected);
383 return -EIO;
384 }
385 }
386
387 return 0;
388 }
389
pci_endpoint_test_bars(struct pci_endpoint_test * test)390 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
391 {
392 enum pci_barno bar;
393 int ret;
394
395 /* Write all BARs in order (without reading). */
396 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
397 if (test->bar[bar])
398 pci_endpoint_test_bars_write_bar(test, bar);
399
400 /*
401 * Read all BARs in order (without writing).
402 * If there is an address translation issue on the EP, writing one BAR
403 * might have overwritten another BAR. Ensure that this is not the case.
404 * (Reading back the BAR directly after writing can not detect this.)
405 */
406 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
407 if (test->bar[bar]) {
408 ret = pci_endpoint_test_bars_read_bar(test, bar);
409 if (ret)
410 return ret;
411 }
412 }
413
414 return 0;
415 }
416
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)417 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
418 {
419 u32 val;
420
421 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
422 PCITEST_IRQ_TYPE_INTX);
423 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
424 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
425 COMMAND_RAISE_INTX_IRQ);
426 val = wait_for_completion_timeout(&test->irq_raised,
427 msecs_to_jiffies(1000));
428 if (!val)
429 return -ETIMEDOUT;
430
431 return 0;
432 }
433
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)434 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
435 u16 msi_num, bool msix)
436 {
437 struct pci_dev *pdev = test->pdev;
438 u32 val;
439 int ret;
440
441 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
442 msix ? PCITEST_IRQ_TYPE_MSIX :
443 PCITEST_IRQ_TYPE_MSI);
444 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
445 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
446 msix ? COMMAND_RAISE_MSIX_IRQ :
447 COMMAND_RAISE_MSI_IRQ);
448 val = wait_for_completion_timeout(&test->irq_raised,
449 msecs_to_jiffies(1000));
450 if (!val)
451 return -ETIMEDOUT;
452
453 ret = pci_irq_vector(pdev, msi_num - 1);
454 if (ret < 0)
455 return ret;
456
457 if (ret != test->last_irq)
458 return -EIO;
459
460 return 0;
461 }
462
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)463 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
464 struct pci_endpoint_test_xfer_param *param, size_t alignment)
465 {
466 if (!param->size) {
467 dev_dbg(dev, "Data size is zero\n");
468 return -EINVAL;
469 }
470
471 if (param->size > SIZE_MAX - alignment) {
472 dev_dbg(dev, "Maximum transfer data size exceeded\n");
473 return -EINVAL;
474 }
475
476 return 0;
477 }
478
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)479 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
480 unsigned long arg)
481 {
482 struct pci_endpoint_test_xfer_param param;
483 void *src_addr;
484 void *dst_addr;
485 u32 flags = 0;
486 bool use_dma;
487 size_t size;
488 dma_addr_t src_phys_addr;
489 dma_addr_t dst_phys_addr;
490 struct pci_dev *pdev = test->pdev;
491 struct device *dev = &pdev->dev;
492 void *orig_src_addr;
493 dma_addr_t orig_src_phys_addr;
494 void *orig_dst_addr;
495 dma_addr_t orig_dst_phys_addr;
496 size_t offset;
497 size_t alignment = test->alignment;
498 int irq_type = test->irq_type;
499 u32 src_crc32;
500 u32 dst_crc32;
501 int ret;
502
503 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
504 if (ret) {
505 dev_err(dev, "Failed to get transfer param\n");
506 return -EFAULT;
507 }
508
509 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
510 if (ret)
511 return ret;
512
513 size = param.size;
514
515 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
516 if (use_dma)
517 flags |= FLAG_USE_DMA;
518
519 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
520 irq_type > PCITEST_IRQ_TYPE_MSIX) {
521 dev_err(dev, "Invalid IRQ type option\n");
522 return -EINVAL;
523 }
524
525 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
526 if (!orig_src_addr) {
527 dev_err(dev, "Failed to allocate source buffer\n");
528 return -ENOMEM;
529 }
530
531 get_random_bytes(orig_src_addr, size + alignment);
532 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
533 size + alignment, DMA_TO_DEVICE);
534 ret = dma_mapping_error(dev, orig_src_phys_addr);
535 if (ret) {
536 dev_err(dev, "failed to map source buffer address\n");
537 goto err_src_phys_addr;
538 }
539
540 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
541 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
542 offset = src_phys_addr - orig_src_phys_addr;
543 src_addr = orig_src_addr + offset;
544 } else {
545 src_phys_addr = orig_src_phys_addr;
546 src_addr = orig_src_addr;
547 }
548
549 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
550 lower_32_bits(src_phys_addr));
551
552 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
553 upper_32_bits(src_phys_addr));
554
555 src_crc32 = crc32_le(~0, src_addr, size);
556
557 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
558 if (!orig_dst_addr) {
559 dev_err(dev, "Failed to allocate destination address\n");
560 ret = -ENOMEM;
561 goto err_dst_addr;
562 }
563
564 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
565 size + alignment, DMA_FROM_DEVICE);
566 ret = dma_mapping_error(dev, orig_dst_phys_addr);
567 if (ret) {
568 dev_err(dev, "failed to map destination buffer address\n");
569 goto err_dst_phys_addr;
570 }
571
572 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
573 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
574 offset = dst_phys_addr - orig_dst_phys_addr;
575 dst_addr = orig_dst_addr + offset;
576 } else {
577 dst_phys_addr = orig_dst_phys_addr;
578 dst_addr = orig_dst_addr;
579 }
580
581 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
582 lower_32_bits(dst_phys_addr));
583 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
584 upper_32_bits(dst_phys_addr));
585
586 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
587 size);
588
589 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
590 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
591 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
592 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
593 COMMAND_COPY);
594
595 wait_for_completion(&test->irq_raised);
596
597 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
598 DMA_FROM_DEVICE);
599
600 dst_crc32 = crc32_le(~0, dst_addr, size);
601 if (dst_crc32 != src_crc32)
602 ret = -EIO;
603
604 err_dst_phys_addr:
605 kfree(orig_dst_addr);
606
607 err_dst_addr:
608 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
609 DMA_TO_DEVICE);
610
611 err_src_phys_addr:
612 kfree(orig_src_addr);
613 return ret;
614 }
615
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)616 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
617 unsigned long arg)
618 {
619 struct pci_endpoint_test_xfer_param param;
620 u32 flags = 0;
621 bool use_dma;
622 u32 reg;
623 void *addr;
624 dma_addr_t phys_addr;
625 struct pci_dev *pdev = test->pdev;
626 struct device *dev = &pdev->dev;
627 void *orig_addr;
628 dma_addr_t orig_phys_addr;
629 size_t offset;
630 size_t alignment = test->alignment;
631 int irq_type = test->irq_type;
632 size_t size;
633 u32 crc32;
634 int ret;
635
636 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
637 if (ret) {
638 dev_err(dev, "Failed to get transfer param\n");
639 return -EFAULT;
640 }
641
642 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
643 if (ret)
644 return ret;
645
646 size = param.size;
647
648 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
649 if (use_dma)
650 flags |= FLAG_USE_DMA;
651
652 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
653 irq_type > PCITEST_IRQ_TYPE_MSIX) {
654 dev_err(dev, "Invalid IRQ type option\n");
655 return -EINVAL;
656 }
657
658 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
659 if (!orig_addr) {
660 dev_err(dev, "Failed to allocate address\n");
661 return -ENOMEM;
662 }
663
664 get_random_bytes(orig_addr, size + alignment);
665
666 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
667 DMA_TO_DEVICE);
668 ret = dma_mapping_error(dev, orig_phys_addr);
669 if (ret) {
670 dev_err(dev, "failed to map source buffer address\n");
671 goto err_phys_addr;
672 }
673
674 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
675 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
676 offset = phys_addr - orig_phys_addr;
677 addr = orig_addr + offset;
678 } else {
679 phys_addr = orig_phys_addr;
680 addr = orig_addr;
681 }
682
683 crc32 = crc32_le(~0, addr, size);
684 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
685 crc32);
686
687 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
688 lower_32_bits(phys_addr));
689 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
690 upper_32_bits(phys_addr));
691
692 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
693
694 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
695 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
696 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
697 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
698 COMMAND_READ);
699
700 wait_for_completion(&test->irq_raised);
701
702 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
703 if (!(reg & STATUS_READ_SUCCESS))
704 ret = -EIO;
705
706 dma_unmap_single(dev, orig_phys_addr, size + alignment,
707 DMA_TO_DEVICE);
708
709 err_phys_addr:
710 kfree(orig_addr);
711 return ret;
712 }
713
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)714 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
715 unsigned long arg)
716 {
717 struct pci_endpoint_test_xfer_param param;
718 u32 flags = 0;
719 bool use_dma;
720 size_t size;
721 void *addr;
722 dma_addr_t phys_addr;
723 struct pci_dev *pdev = test->pdev;
724 struct device *dev = &pdev->dev;
725 void *orig_addr;
726 dma_addr_t orig_phys_addr;
727 size_t offset;
728 size_t alignment = test->alignment;
729 int irq_type = test->irq_type;
730 u32 crc32;
731 int ret;
732
733 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
734 if (ret) {
735 dev_err(dev, "Failed to get transfer param\n");
736 return -EFAULT;
737 }
738
739 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
740 if (ret)
741 return ret;
742
743 size = param.size;
744
745 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
746 if (use_dma)
747 flags |= FLAG_USE_DMA;
748
749 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
750 irq_type > PCITEST_IRQ_TYPE_MSIX) {
751 dev_err(dev, "Invalid IRQ type option\n");
752 return -EINVAL;
753 }
754
755 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
756 if (!orig_addr) {
757 dev_err(dev, "Failed to allocate destination address\n");
758 return -ENOMEM;
759 }
760
761 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
762 DMA_FROM_DEVICE);
763 ret = dma_mapping_error(dev, orig_phys_addr);
764 if (ret) {
765 dev_err(dev, "failed to map source buffer address\n");
766 goto err_phys_addr;
767 }
768
769 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
770 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
771 offset = phys_addr - orig_phys_addr;
772 addr = orig_addr + offset;
773 } else {
774 phys_addr = orig_phys_addr;
775 addr = orig_addr;
776 }
777
778 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
779 lower_32_bits(phys_addr));
780 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
781 upper_32_bits(phys_addr));
782
783 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
784
785 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
786 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
787 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
788 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
789 COMMAND_WRITE);
790
791 wait_for_completion(&test->irq_raised);
792
793 dma_unmap_single(dev, orig_phys_addr, size + alignment,
794 DMA_FROM_DEVICE);
795
796 crc32 = crc32_le(~0, addr, size);
797 if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
798 ret = -EIO;
799
800 err_phys_addr:
801 kfree(orig_addr);
802 return ret;
803 }
804
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)805 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
806 {
807 pci_endpoint_test_release_irq(test);
808 pci_endpoint_test_free_irq_vectors(test);
809
810 return 0;
811 }
812
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)813 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
814 int req_irq_type)
815 {
816 struct pci_dev *pdev = test->pdev;
817 struct device *dev = &pdev->dev;
818 int ret;
819
820 if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
821 req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
822 dev_err(dev, "Invalid IRQ type option\n");
823 return -EINVAL;
824 }
825
826 if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
827 if (test->ep_caps & CAP_MSI)
828 req_irq_type = PCITEST_IRQ_TYPE_MSI;
829 else if (test->ep_caps & CAP_MSIX)
830 req_irq_type = PCITEST_IRQ_TYPE_MSIX;
831 else if (test->ep_caps & CAP_INTX)
832 req_irq_type = PCITEST_IRQ_TYPE_INTX;
833 else
834 /* fallback to MSI if no caps defined */
835 req_irq_type = PCITEST_IRQ_TYPE_MSI;
836 }
837
838 if (test->irq_type == req_irq_type)
839 return 0;
840
841 pci_endpoint_test_release_irq(test);
842 pci_endpoint_test_free_irq_vectors(test);
843
844 ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
845 if (ret)
846 return ret;
847
848 ret = pci_endpoint_test_request_irq(test);
849 if (ret) {
850 pci_endpoint_test_free_irq_vectors(test);
851 return ret;
852 }
853
854 return 0;
855 }
856
pci_endpoint_test_doorbell(struct pci_endpoint_test * test)857 static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
858 {
859 struct pci_dev *pdev = test->pdev;
860 struct device *dev = &pdev->dev;
861 int irq_type = test->irq_type;
862 enum pci_barno bar;
863 u32 data, status;
864 u32 addr;
865 int left;
866
867 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
868 irq_type > PCITEST_IRQ_TYPE_MSIX) {
869 dev_err(dev, "Invalid IRQ type\n");
870 return -EINVAL;
871 }
872
873 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
874 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
875 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
876 COMMAND_ENABLE_DOORBELL);
877
878 left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
879
880 status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
881 if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
882 dev_err(dev, "Failed to enable doorbell\n");
883 return -EINVAL;
884 }
885
886 data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
887 addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
888 bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
889
890 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
891 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
892
893 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
894
895 bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
896
897 writel(data, test->bar[bar] + addr);
898
899 left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
900
901 status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
902
903 if (!left || !(status & STATUS_DOORBELL_SUCCESS))
904 dev_err(dev, "Failed to trigger doorbell in endpoint\n");
905
906 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
907 COMMAND_DISABLE_DOORBELL);
908
909 wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
910
911 status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
912
913 if (status & STATUS_DOORBELL_DISABLE_FAIL) {
914 dev_err(dev, "Failed to disable doorbell\n");
915 return -EINVAL;
916 }
917
918 if (!(status & STATUS_DOORBELL_SUCCESS))
919 return -EINVAL;
920
921 return 0;
922 }
923
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)924 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
925 unsigned long arg)
926 {
927 int ret = -EINVAL;
928 enum pci_barno bar;
929 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
930 struct pci_dev *pdev = test->pdev;
931
932 mutex_lock(&test->mutex);
933
934 reinit_completion(&test->irq_raised);
935 test->last_irq = -ENODATA;
936
937 switch (cmd) {
938 case PCITEST_BAR:
939 bar = arg;
940 if (bar > BAR_5)
941 goto ret;
942 if (is_am654_pci_dev(pdev) && bar == BAR_0)
943 goto ret;
944 ret = pci_endpoint_test_bar(test, bar);
945 break;
946 case PCITEST_BARS:
947 ret = pci_endpoint_test_bars(test);
948 break;
949 case PCITEST_INTX_IRQ:
950 ret = pci_endpoint_test_intx_irq(test);
951 break;
952 case PCITEST_MSI:
953 case PCITEST_MSIX:
954 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
955 break;
956 case PCITEST_WRITE:
957 ret = pci_endpoint_test_write(test, arg);
958 break;
959 case PCITEST_READ:
960 ret = pci_endpoint_test_read(test, arg);
961 break;
962 case PCITEST_COPY:
963 ret = pci_endpoint_test_copy(test, arg);
964 break;
965 case PCITEST_SET_IRQTYPE:
966 ret = pci_endpoint_test_set_irq(test, arg);
967 break;
968 case PCITEST_GET_IRQTYPE:
969 ret = test->irq_type;
970 break;
971 case PCITEST_CLEAR_IRQ:
972 ret = pci_endpoint_test_clear_irq(test);
973 break;
974 case PCITEST_DOORBELL:
975 ret = pci_endpoint_test_doorbell(test);
976 break;
977 }
978
979 ret:
980 mutex_unlock(&test->mutex);
981 return ret;
982 }
983
984 static const struct file_operations pci_endpoint_test_fops = {
985 .owner = THIS_MODULE,
986 .unlocked_ioctl = pci_endpoint_test_ioctl,
987 };
988
pci_endpoint_test_get_capabilities(struct pci_endpoint_test * test)989 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
990 {
991 struct pci_dev *pdev = test->pdev;
992 struct device *dev = &pdev->dev;
993
994 test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
995 dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
996
997 /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
998 if (test->ep_caps & CAP_UNALIGNED_ACCESS)
999 test->alignment = 0;
1000 }
1001
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1002 static int pci_endpoint_test_probe(struct pci_dev *pdev,
1003 const struct pci_device_id *ent)
1004 {
1005 int ret;
1006 int id;
1007 char name[29];
1008 enum pci_barno bar;
1009 void __iomem *base;
1010 struct device *dev = &pdev->dev;
1011 struct pci_endpoint_test *test;
1012 struct pci_endpoint_test_data *data;
1013 enum pci_barno test_reg_bar = BAR_0;
1014 struct miscdevice *misc_device;
1015
1016 if (pci_is_bridge(pdev))
1017 return -ENODEV;
1018
1019 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
1020 if (!test)
1021 return -ENOMEM;
1022
1023 test->test_reg_bar = 0;
1024 test->alignment = 0;
1025 test->pdev = pdev;
1026 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
1027
1028 data = (struct pci_endpoint_test_data *)ent->driver_data;
1029 if (data) {
1030 test_reg_bar = data->test_reg_bar;
1031 test->test_reg_bar = test_reg_bar;
1032 test->alignment = data->alignment;
1033 }
1034
1035 init_completion(&test->irq_raised);
1036 mutex_init(&test->mutex);
1037
1038 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
1039
1040 ret = pci_enable_device(pdev);
1041 if (ret) {
1042 dev_err(dev, "Cannot enable PCI device\n");
1043 return ret;
1044 }
1045
1046 ret = pci_request_regions(pdev, DRV_MODULE_NAME);
1047 if (ret) {
1048 dev_err(dev, "Cannot obtain PCI resources\n");
1049 goto err_disable_pdev;
1050 }
1051
1052 pci_set_master(pdev);
1053
1054 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1055 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1056 base = pci_ioremap_bar(pdev, bar);
1057 if (!base) {
1058 dev_err(dev, "Failed to read BAR%d\n", bar);
1059 WARN_ON(bar == test_reg_bar);
1060 }
1061 test->bar[bar] = base;
1062 }
1063 }
1064
1065 test->base = test->bar[test_reg_bar];
1066 if (!test->base) {
1067 ret = -ENOMEM;
1068 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
1069 test_reg_bar);
1070 goto err_iounmap;
1071 }
1072
1073 pci_set_drvdata(pdev, test);
1074
1075 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
1076 if (id < 0) {
1077 ret = id;
1078 dev_err(dev, "Unable to get id\n");
1079 goto err_iounmap;
1080 }
1081
1082 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
1083 test->name = kstrdup(name, GFP_KERNEL);
1084 if (!test->name) {
1085 ret = -ENOMEM;
1086 goto err_ida_remove;
1087 }
1088
1089 pci_endpoint_test_get_capabilities(test);
1090
1091 misc_device = &test->miscdev;
1092 misc_device->minor = MISC_DYNAMIC_MINOR;
1093 misc_device->name = kstrdup(name, GFP_KERNEL);
1094 if (!misc_device->name) {
1095 ret = -ENOMEM;
1096 goto err_kfree_test_name;
1097 }
1098 misc_device->parent = &pdev->dev;
1099 misc_device->fops = &pci_endpoint_test_fops;
1100
1101 ret = misc_register(misc_device);
1102 if (ret) {
1103 dev_err(dev, "Failed to register device\n");
1104 goto err_kfree_name;
1105 }
1106
1107 return 0;
1108
1109 err_kfree_name:
1110 kfree(misc_device->name);
1111
1112 err_kfree_test_name:
1113 kfree(test->name);
1114
1115 err_ida_remove:
1116 ida_free(&pci_endpoint_test_ida, id);
1117
1118 err_iounmap:
1119 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1120 if (test->bar[bar])
1121 pci_iounmap(pdev, test->bar[bar]);
1122 }
1123
1124 pci_release_regions(pdev);
1125
1126 err_disable_pdev:
1127 pci_disable_device(pdev);
1128
1129 return ret;
1130 }
1131
pci_endpoint_test_remove(struct pci_dev * pdev)1132 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1133 {
1134 int id;
1135 enum pci_barno bar;
1136 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1137 struct miscdevice *misc_device = &test->miscdev;
1138
1139 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1140 return;
1141 if (id < 0)
1142 return;
1143
1144 pci_endpoint_test_release_irq(test);
1145 pci_endpoint_test_free_irq_vectors(test);
1146
1147 misc_deregister(&test->miscdev);
1148 kfree(misc_device->name);
1149 kfree(test->name);
1150 ida_free(&pci_endpoint_test_ida, id);
1151 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1152 if (test->bar[bar])
1153 pci_iounmap(pdev, test->bar[bar]);
1154 }
1155
1156 pci_release_regions(pdev);
1157 pci_disable_device(pdev);
1158 }
1159
1160 static const struct pci_endpoint_test_data default_data = {
1161 .test_reg_bar = BAR_0,
1162 .alignment = SZ_4K,
1163 };
1164
1165 static const struct pci_endpoint_test_data am654_data = {
1166 .test_reg_bar = BAR_2,
1167 .alignment = SZ_64K,
1168 };
1169
1170 static const struct pci_endpoint_test_data j721e_data = {
1171 .alignment = 256,
1172 };
1173
1174 static const struct pci_endpoint_test_data rk3588_data = {
1175 .alignment = SZ_64K,
1176 };
1177
1178 /*
1179 * If the controller's Vendor/Device ID are programmable, you may be able to
1180 * use one of the existing entries for testing instead of adding a new one.
1181 */
1182 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1183 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1184 .driver_data = (kernel_ulong_t)&default_data,
1185 },
1186 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1187 .driver_data = (kernel_ulong_t)&default_data,
1188 },
1189 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1190 .driver_data = (kernel_ulong_t)&default_data,
1191 },
1192 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1193 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1194 .driver_data = (kernel_ulong_t)&default_data,
1195 },
1196 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1197 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1198 .driver_data = (kernel_ulong_t)&am654_data
1199 },
1200 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1201 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1202 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1203 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1204 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1205 .driver_data = (kernel_ulong_t)&default_data,
1206 },
1207 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1208 .driver_data = (kernel_ulong_t)&j721e_data,
1209 },
1210 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1211 .driver_data = (kernel_ulong_t)&j721e_data,
1212 },
1213 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1214 .driver_data = (kernel_ulong_t)&j721e_data,
1215 },
1216 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1217 .driver_data = (kernel_ulong_t)&j721e_data,
1218 },
1219 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1220 .driver_data = (kernel_ulong_t)&rk3588_data,
1221 },
1222 { }
1223 };
1224 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1225
1226 static struct pci_driver pci_endpoint_test_driver = {
1227 .name = DRV_MODULE_NAME,
1228 .id_table = pci_endpoint_test_tbl,
1229 .probe = pci_endpoint_test_probe,
1230 .remove = pci_endpoint_test_remove,
1231 .sriov_configure = pci_sriov_configure_simple,
1232 };
1233 module_pci_driver(pci_endpoint_test_driver);
1234
1235 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1236 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1237 MODULE_LICENSE("GPL v2");
1238