1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24
25 #include <linux/pci_regs.h>
26
27 #include <uapi/linux/pcitest.h>
28
29 #define DRV_MODULE_NAME "pci-endpoint-test"
30
31 #define IRQ_TYPE_UNDEFINED -1
32 #define IRQ_TYPE_INTX 0
33 #define IRQ_TYPE_MSI 1
34 #define IRQ_TYPE_MSIX 2
35
36 #define PCI_ENDPOINT_TEST_MAGIC 0x0
37
38 #define PCI_ENDPOINT_TEST_COMMAND 0x4
39 #define COMMAND_RAISE_INTX_IRQ BIT(0)
40 #define COMMAND_RAISE_MSI_IRQ BIT(1)
41 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
42 #define COMMAND_READ BIT(3)
43 #define COMMAND_WRITE BIT(4)
44 #define COMMAND_COPY BIT(5)
45
46 #define PCI_ENDPOINT_TEST_STATUS 0x8
47 #define STATUS_READ_SUCCESS BIT(0)
48 #define STATUS_READ_FAIL BIT(1)
49 #define STATUS_WRITE_SUCCESS BIT(2)
50 #define STATUS_WRITE_FAIL BIT(3)
51 #define STATUS_COPY_SUCCESS BIT(4)
52 #define STATUS_COPY_FAIL BIT(5)
53 #define STATUS_IRQ_RAISED BIT(6)
54 #define STATUS_SRC_ADDR_INVALID BIT(7)
55 #define STATUS_DST_ADDR_INVALID BIT(8)
56
57 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
58 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
59
60 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
61 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
62
63 #define PCI_ENDPOINT_TEST_SIZE 0x1c
64 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
65
66 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
67 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
68
69 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
70 #define FLAG_USE_DMA BIT(0)
71
72 #define PCI_DEVICE_ID_TI_AM654 0xb00c
73 #define PCI_DEVICE_ID_TI_J7200 0xb00f
74 #define PCI_DEVICE_ID_TI_AM64 0xb010
75 #define PCI_DEVICE_ID_TI_J721S2 0xb013
76 #define PCI_DEVICE_ID_LS1088A 0x80c0
77 #define PCI_DEVICE_ID_IMX8 0x0808
78
79 #define is_am654_pci_dev(pdev) \
80 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
81
82 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
83 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
84 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
85 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
86 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
87
88 #define PCI_VENDOR_ID_ROCKCHIP 0x1d87
89 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
90
91 static DEFINE_IDA(pci_endpoint_test_ida);
92
93 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
94 miscdev)
95
96 static bool no_msi;
97 module_param(no_msi, bool, 0444);
98 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
99
100 static int irq_type = IRQ_TYPE_MSI;
101 module_param(irq_type, int, 0444);
102 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
103
104 enum pci_barno {
105 BAR_0,
106 BAR_1,
107 BAR_2,
108 BAR_3,
109 BAR_4,
110 BAR_5,
111 };
112
113 struct pci_endpoint_test {
114 struct pci_dev *pdev;
115 void __iomem *base;
116 void __iomem *bar[PCI_STD_NUM_BARS];
117 struct completion irq_raised;
118 int last_irq;
119 int num_irqs;
120 int irq_type;
121 /* mutex to protect the ioctls */
122 struct mutex mutex;
123 struct miscdevice miscdev;
124 enum pci_barno test_reg_bar;
125 size_t alignment;
126 const char *name;
127 };
128
129 struct pci_endpoint_test_data {
130 enum pci_barno test_reg_bar;
131 size_t alignment;
132 int irq_type;
133 };
134
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)135 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
136 u32 offset)
137 {
138 return readl(test->base + offset);
139 }
140
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)141 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
142 u32 offset, u32 value)
143 {
144 writel(value, test->base + offset);
145 }
146
pci_endpoint_test_irqhandler(int irq,void * dev_id)147 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
148 {
149 struct pci_endpoint_test *test = dev_id;
150 u32 reg;
151
152 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
153 if (reg & STATUS_IRQ_RAISED) {
154 test->last_irq = irq;
155 complete(&test->irq_raised);
156 }
157
158 return IRQ_HANDLED;
159 }
160
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)161 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
162 {
163 struct pci_dev *pdev = test->pdev;
164
165 pci_free_irq_vectors(pdev);
166 test->irq_type = IRQ_TYPE_UNDEFINED;
167 }
168
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)169 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
170 int type)
171 {
172 int irq = -1;
173 struct pci_dev *pdev = test->pdev;
174 struct device *dev = &pdev->dev;
175 bool res = true;
176
177 switch (type) {
178 case IRQ_TYPE_INTX:
179 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
180 if (irq < 0)
181 dev_err(dev, "Failed to get Legacy interrupt\n");
182 break;
183 case IRQ_TYPE_MSI:
184 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
185 if (irq < 0)
186 dev_err(dev, "Failed to get MSI interrupts\n");
187 break;
188 case IRQ_TYPE_MSIX:
189 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
190 if (irq < 0)
191 dev_err(dev, "Failed to get MSI-X interrupts\n");
192 break;
193 default:
194 dev_err(dev, "Invalid IRQ type selected\n");
195 }
196
197 if (irq < 0) {
198 irq = 0;
199 res = false;
200 }
201
202 test->irq_type = type;
203 test->num_irqs = irq;
204
205 return res;
206 }
207
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)208 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
209 {
210 int i;
211 struct pci_dev *pdev = test->pdev;
212 struct device *dev = &pdev->dev;
213
214 for (i = 0; i < test->num_irqs; i++)
215 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
216
217 test->num_irqs = 0;
218 }
219
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)220 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
221 {
222 int i;
223 int err;
224 struct pci_dev *pdev = test->pdev;
225 struct device *dev = &pdev->dev;
226
227 for (i = 0; i < test->num_irqs; i++) {
228 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
229 pci_endpoint_test_irqhandler,
230 IRQF_SHARED, test->name, test);
231 if (err)
232 goto fail;
233 }
234
235 return true;
236
237 fail:
238 switch (irq_type) {
239 case IRQ_TYPE_INTX:
240 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
241 pci_irq_vector(pdev, i));
242 break;
243 case IRQ_TYPE_MSI:
244 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
245 pci_irq_vector(pdev, i),
246 i + 1);
247 break;
248 case IRQ_TYPE_MSIX:
249 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
250 pci_irq_vector(pdev, i),
251 i + 1);
252 break;
253 }
254
255 return false;
256 }
257
258 static const u32 bar_test_pattern[] = {
259 0xA0A0A0A0,
260 0xA1A1A1A1,
261 0xA2A2A2A2,
262 0xA3A3A3A3,
263 0xA4A4A4A4,
264 0xA5A5A5A5,
265 };
266
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,int offset,void * write_buf,void * read_buf,int size)267 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
268 enum pci_barno barno, int offset,
269 void *write_buf, void *read_buf,
270 int size)
271 {
272 memset(write_buf, bar_test_pattern[barno], size);
273 memcpy_toio(test->bar[barno] + offset, write_buf, size);
274
275 memcpy_fromio(read_buf, test->bar[barno] + offset, size);
276
277 return memcmp(write_buf, read_buf, size);
278 }
279
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)280 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
281 enum pci_barno barno)
282 {
283 int j, bar_size, buf_size, iters, remain;
284 void *write_buf __free(kfree) = NULL;
285 void *read_buf __free(kfree) = NULL;
286 struct pci_dev *pdev = test->pdev;
287
288 if (!test->bar[barno])
289 return false;
290
291 bar_size = pci_resource_len(pdev, barno);
292
293 if (barno == test->test_reg_bar)
294 bar_size = 0x4;
295
296 /*
297 * Allocate a buffer of max size 1MB, and reuse that buffer while
298 * iterating over the whole BAR size (which might be much larger).
299 */
300 buf_size = min(SZ_1M, bar_size);
301
302 write_buf = kmalloc(buf_size, GFP_KERNEL);
303 if (!write_buf)
304 return false;
305
306 read_buf = kmalloc(buf_size, GFP_KERNEL);
307 if (!read_buf)
308 return false;
309
310 iters = bar_size / buf_size;
311 for (j = 0; j < iters; j++)
312 if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
313 write_buf, read_buf, buf_size))
314 return false;
315
316 remain = bar_size % buf_size;
317 if (remain)
318 if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
319 write_buf, read_buf, remain))
320 return false;
321
322 return true;
323 }
324
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)325 static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
326 {
327 u32 val;
328
329 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
330 IRQ_TYPE_INTX);
331 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
332 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
333 COMMAND_RAISE_INTX_IRQ);
334 val = wait_for_completion_timeout(&test->irq_raised,
335 msecs_to_jiffies(1000));
336 if (!val)
337 return false;
338
339 return true;
340 }
341
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)342 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
343 u16 msi_num, bool msix)
344 {
345 u32 val;
346 struct pci_dev *pdev = test->pdev;
347
348 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
349 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
350 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
351 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
352 msix ? COMMAND_RAISE_MSIX_IRQ :
353 COMMAND_RAISE_MSI_IRQ);
354 val = wait_for_completion_timeout(&test->irq_raised,
355 msecs_to_jiffies(1000));
356 if (!val)
357 return false;
358
359 return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
360 }
361
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)362 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
363 struct pci_endpoint_test_xfer_param *param, size_t alignment)
364 {
365 if (!param->size) {
366 dev_dbg(dev, "Data size is zero\n");
367 return -EINVAL;
368 }
369
370 if (param->size > SIZE_MAX - alignment) {
371 dev_dbg(dev, "Maximum transfer data size exceeded\n");
372 return -EINVAL;
373 }
374
375 return 0;
376 }
377
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)378 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
379 unsigned long arg)
380 {
381 struct pci_endpoint_test_xfer_param param;
382 bool ret = false;
383 void *src_addr;
384 void *dst_addr;
385 u32 flags = 0;
386 bool use_dma;
387 size_t size;
388 dma_addr_t src_phys_addr;
389 dma_addr_t dst_phys_addr;
390 struct pci_dev *pdev = test->pdev;
391 struct device *dev = &pdev->dev;
392 void *orig_src_addr;
393 dma_addr_t orig_src_phys_addr;
394 void *orig_dst_addr;
395 dma_addr_t orig_dst_phys_addr;
396 size_t offset;
397 size_t alignment = test->alignment;
398 int irq_type = test->irq_type;
399 u32 src_crc32;
400 u32 dst_crc32;
401 int err;
402
403 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
404 if (err) {
405 dev_err(dev, "Failed to get transfer param\n");
406 return false;
407 }
408
409 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
410 if (err)
411 return false;
412
413 size = param.size;
414
415 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
416 if (use_dma)
417 flags |= FLAG_USE_DMA;
418
419 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
420 dev_err(dev, "Invalid IRQ type option\n");
421 goto err;
422 }
423
424 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
425 if (!orig_src_addr) {
426 dev_err(dev, "Failed to allocate source buffer\n");
427 ret = false;
428 goto err;
429 }
430
431 get_random_bytes(orig_src_addr, size + alignment);
432 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
433 size + alignment, DMA_TO_DEVICE);
434 if (dma_mapping_error(dev, orig_src_phys_addr)) {
435 dev_err(dev, "failed to map source buffer address\n");
436 ret = false;
437 goto err_src_phys_addr;
438 }
439
440 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
441 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
442 offset = src_phys_addr - orig_src_phys_addr;
443 src_addr = orig_src_addr + offset;
444 } else {
445 src_phys_addr = orig_src_phys_addr;
446 src_addr = orig_src_addr;
447 }
448
449 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
450 lower_32_bits(src_phys_addr));
451
452 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
453 upper_32_bits(src_phys_addr));
454
455 src_crc32 = crc32_le(~0, src_addr, size);
456
457 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
458 if (!orig_dst_addr) {
459 dev_err(dev, "Failed to allocate destination address\n");
460 ret = false;
461 goto err_dst_addr;
462 }
463
464 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
465 size + alignment, DMA_FROM_DEVICE);
466 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
467 dev_err(dev, "failed to map destination buffer address\n");
468 ret = false;
469 goto err_dst_phys_addr;
470 }
471
472 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
473 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
474 offset = dst_phys_addr - orig_dst_phys_addr;
475 dst_addr = orig_dst_addr + offset;
476 } else {
477 dst_phys_addr = orig_dst_phys_addr;
478 dst_addr = orig_dst_addr;
479 }
480
481 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
482 lower_32_bits(dst_phys_addr));
483 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
484 upper_32_bits(dst_phys_addr));
485
486 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
487 size);
488
489 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
490 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
491 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
492 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
493 COMMAND_COPY);
494
495 wait_for_completion(&test->irq_raised);
496
497 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
498 DMA_FROM_DEVICE);
499
500 dst_crc32 = crc32_le(~0, dst_addr, size);
501 if (dst_crc32 == src_crc32)
502 ret = true;
503
504 err_dst_phys_addr:
505 kfree(orig_dst_addr);
506
507 err_dst_addr:
508 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
509 DMA_TO_DEVICE);
510
511 err_src_phys_addr:
512 kfree(orig_src_addr);
513
514 err:
515 return ret;
516 }
517
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)518 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
519 unsigned long arg)
520 {
521 struct pci_endpoint_test_xfer_param param;
522 bool ret = false;
523 u32 flags = 0;
524 bool use_dma;
525 u32 reg;
526 void *addr;
527 dma_addr_t phys_addr;
528 struct pci_dev *pdev = test->pdev;
529 struct device *dev = &pdev->dev;
530 void *orig_addr;
531 dma_addr_t orig_phys_addr;
532 size_t offset;
533 size_t alignment = test->alignment;
534 int irq_type = test->irq_type;
535 size_t size;
536 u32 crc32;
537 int err;
538
539 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
540 if (err != 0) {
541 dev_err(dev, "Failed to get transfer param\n");
542 return false;
543 }
544
545 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
546 if (err)
547 return false;
548
549 size = param.size;
550
551 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
552 if (use_dma)
553 flags |= FLAG_USE_DMA;
554
555 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
556 dev_err(dev, "Invalid IRQ type option\n");
557 goto err;
558 }
559
560 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
561 if (!orig_addr) {
562 dev_err(dev, "Failed to allocate address\n");
563 ret = false;
564 goto err;
565 }
566
567 get_random_bytes(orig_addr, size + alignment);
568
569 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
570 DMA_TO_DEVICE);
571 if (dma_mapping_error(dev, orig_phys_addr)) {
572 dev_err(dev, "failed to map source buffer address\n");
573 ret = false;
574 goto err_phys_addr;
575 }
576
577 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
578 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
579 offset = phys_addr - orig_phys_addr;
580 addr = orig_addr + offset;
581 } else {
582 phys_addr = orig_phys_addr;
583 addr = orig_addr;
584 }
585
586 crc32 = crc32_le(~0, addr, size);
587 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
588 crc32);
589
590 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
591 lower_32_bits(phys_addr));
592 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
593 upper_32_bits(phys_addr));
594
595 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
596
597 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
598 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
599 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
600 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
601 COMMAND_READ);
602
603 wait_for_completion(&test->irq_raised);
604
605 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
606 if (reg & STATUS_READ_SUCCESS)
607 ret = true;
608
609 dma_unmap_single(dev, orig_phys_addr, size + alignment,
610 DMA_TO_DEVICE);
611
612 err_phys_addr:
613 kfree(orig_addr);
614
615 err:
616 return ret;
617 }
618
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)619 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
620 unsigned long arg)
621 {
622 struct pci_endpoint_test_xfer_param param;
623 bool ret = false;
624 u32 flags = 0;
625 bool use_dma;
626 size_t size;
627 void *addr;
628 dma_addr_t phys_addr;
629 struct pci_dev *pdev = test->pdev;
630 struct device *dev = &pdev->dev;
631 void *orig_addr;
632 dma_addr_t orig_phys_addr;
633 size_t offset;
634 size_t alignment = test->alignment;
635 int irq_type = test->irq_type;
636 u32 crc32;
637 int err;
638
639 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
640 if (err) {
641 dev_err(dev, "Failed to get transfer param\n");
642 return false;
643 }
644
645 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
646 if (err)
647 return false;
648
649 size = param.size;
650
651 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
652 if (use_dma)
653 flags |= FLAG_USE_DMA;
654
655 if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
656 dev_err(dev, "Invalid IRQ type option\n");
657 goto err;
658 }
659
660 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
661 if (!orig_addr) {
662 dev_err(dev, "Failed to allocate destination address\n");
663 ret = false;
664 goto err;
665 }
666
667 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
668 DMA_FROM_DEVICE);
669 if (dma_mapping_error(dev, orig_phys_addr)) {
670 dev_err(dev, "failed to map source buffer address\n");
671 ret = false;
672 goto err_phys_addr;
673 }
674
675 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
676 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
677 offset = phys_addr - orig_phys_addr;
678 addr = orig_addr + offset;
679 } else {
680 phys_addr = orig_phys_addr;
681 addr = orig_addr;
682 }
683
684 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
685 lower_32_bits(phys_addr));
686 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
687 upper_32_bits(phys_addr));
688
689 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
690
691 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
692 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
693 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
694 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
695 COMMAND_WRITE);
696
697 wait_for_completion(&test->irq_raised);
698
699 dma_unmap_single(dev, orig_phys_addr, size + alignment,
700 DMA_FROM_DEVICE);
701
702 crc32 = crc32_le(~0, addr, size);
703 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
704 ret = true;
705
706 err_phys_addr:
707 kfree(orig_addr);
708 err:
709 return ret;
710 }
711
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)712 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
713 {
714 pci_endpoint_test_release_irq(test);
715 pci_endpoint_test_free_irq_vectors(test);
716 return true;
717 }
718
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)719 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
720 int req_irq_type)
721 {
722 struct pci_dev *pdev = test->pdev;
723 struct device *dev = &pdev->dev;
724
725 if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
726 dev_err(dev, "Invalid IRQ type option\n");
727 return false;
728 }
729
730 if (test->irq_type == req_irq_type)
731 return true;
732
733 pci_endpoint_test_release_irq(test);
734 pci_endpoint_test_free_irq_vectors(test);
735
736 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
737 goto err;
738
739 if (!pci_endpoint_test_request_irq(test))
740 goto err;
741
742 return true;
743
744 err:
745 pci_endpoint_test_free_irq_vectors(test);
746 return false;
747 }
748
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)749 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
750 unsigned long arg)
751 {
752 int ret = -EINVAL;
753 enum pci_barno bar;
754 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
755 struct pci_dev *pdev = test->pdev;
756
757 mutex_lock(&test->mutex);
758
759 reinit_completion(&test->irq_raised);
760 test->last_irq = -ENODATA;
761
762 switch (cmd) {
763 case PCITEST_BAR:
764 bar = arg;
765 if (bar > BAR_5)
766 goto ret;
767 if (is_am654_pci_dev(pdev) && bar == BAR_0)
768 goto ret;
769 ret = pci_endpoint_test_bar(test, bar);
770 break;
771 case PCITEST_INTX_IRQ:
772 ret = pci_endpoint_test_intx_irq(test);
773 break;
774 case PCITEST_MSI:
775 case PCITEST_MSIX:
776 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
777 break;
778 case PCITEST_WRITE:
779 ret = pci_endpoint_test_write(test, arg);
780 break;
781 case PCITEST_READ:
782 ret = pci_endpoint_test_read(test, arg);
783 break;
784 case PCITEST_COPY:
785 ret = pci_endpoint_test_copy(test, arg);
786 break;
787 case PCITEST_SET_IRQTYPE:
788 ret = pci_endpoint_test_set_irq(test, arg);
789 break;
790 case PCITEST_GET_IRQTYPE:
791 ret = irq_type;
792 break;
793 case PCITEST_CLEAR_IRQ:
794 ret = pci_endpoint_test_clear_irq(test);
795 break;
796 }
797
798 ret:
799 mutex_unlock(&test->mutex);
800 return ret;
801 }
802
803 static const struct file_operations pci_endpoint_test_fops = {
804 .owner = THIS_MODULE,
805 .unlocked_ioctl = pci_endpoint_test_ioctl,
806 };
807
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)808 static int pci_endpoint_test_probe(struct pci_dev *pdev,
809 const struct pci_device_id *ent)
810 {
811 int err;
812 int id;
813 char name[24];
814 enum pci_barno bar;
815 void __iomem *base;
816 struct device *dev = &pdev->dev;
817 struct pci_endpoint_test *test;
818 struct pci_endpoint_test_data *data;
819 enum pci_barno test_reg_bar = BAR_0;
820 struct miscdevice *misc_device;
821
822 if (pci_is_bridge(pdev))
823 return -ENODEV;
824
825 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
826 if (!test)
827 return -ENOMEM;
828
829 test->test_reg_bar = 0;
830 test->alignment = 0;
831 test->pdev = pdev;
832 test->irq_type = IRQ_TYPE_UNDEFINED;
833
834 if (no_msi)
835 irq_type = IRQ_TYPE_INTX;
836
837 data = (struct pci_endpoint_test_data *)ent->driver_data;
838 if (data) {
839 test_reg_bar = data->test_reg_bar;
840 test->test_reg_bar = test_reg_bar;
841 test->alignment = data->alignment;
842 irq_type = data->irq_type;
843 }
844
845 init_completion(&test->irq_raised);
846 mutex_init(&test->mutex);
847
848 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
849
850 err = pci_enable_device(pdev);
851 if (err) {
852 dev_err(dev, "Cannot enable PCI device\n");
853 return err;
854 }
855
856 err = pci_request_regions(pdev, DRV_MODULE_NAME);
857 if (err) {
858 dev_err(dev, "Cannot obtain PCI resources\n");
859 goto err_disable_pdev;
860 }
861
862 pci_set_master(pdev);
863
864 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
865 err = -EINVAL;
866 goto err_disable_irq;
867 }
868
869 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
870 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
871 base = pci_ioremap_bar(pdev, bar);
872 if (!base) {
873 dev_err(dev, "Failed to read BAR%d\n", bar);
874 WARN_ON(bar == test_reg_bar);
875 }
876 test->bar[bar] = base;
877 }
878 }
879
880 test->base = test->bar[test_reg_bar];
881 if (!test->base) {
882 err = -ENOMEM;
883 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
884 test_reg_bar);
885 goto err_iounmap;
886 }
887
888 pci_set_drvdata(pdev, test);
889
890 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
891 if (id < 0) {
892 err = id;
893 dev_err(dev, "Unable to get id\n");
894 goto err_iounmap;
895 }
896
897 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
898 test->name = kstrdup(name, GFP_KERNEL);
899 if (!test->name) {
900 err = -ENOMEM;
901 goto err_ida_remove;
902 }
903
904 if (!pci_endpoint_test_request_irq(test)) {
905 err = -EINVAL;
906 goto err_kfree_test_name;
907 }
908
909 misc_device = &test->miscdev;
910 misc_device->minor = MISC_DYNAMIC_MINOR;
911 misc_device->name = kstrdup(name, GFP_KERNEL);
912 if (!misc_device->name) {
913 err = -ENOMEM;
914 goto err_release_irq;
915 }
916 misc_device->parent = &pdev->dev;
917 misc_device->fops = &pci_endpoint_test_fops;
918
919 err = misc_register(misc_device);
920 if (err) {
921 dev_err(dev, "Failed to register device\n");
922 goto err_kfree_name;
923 }
924
925 return 0;
926
927 err_kfree_name:
928 kfree(misc_device->name);
929
930 err_release_irq:
931 pci_endpoint_test_release_irq(test);
932
933 err_kfree_test_name:
934 kfree(test->name);
935
936 err_ida_remove:
937 ida_free(&pci_endpoint_test_ida, id);
938
939 err_iounmap:
940 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
941 if (test->bar[bar])
942 pci_iounmap(pdev, test->bar[bar]);
943 }
944
945 err_disable_irq:
946 pci_endpoint_test_free_irq_vectors(test);
947 pci_release_regions(pdev);
948
949 err_disable_pdev:
950 pci_disable_device(pdev);
951
952 return err;
953 }
954
pci_endpoint_test_remove(struct pci_dev * pdev)955 static void pci_endpoint_test_remove(struct pci_dev *pdev)
956 {
957 int id;
958 enum pci_barno bar;
959 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
960 struct miscdevice *misc_device = &test->miscdev;
961
962 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
963 return;
964 if (id < 0)
965 return;
966
967 pci_endpoint_test_release_irq(test);
968 pci_endpoint_test_free_irq_vectors(test);
969
970 misc_deregister(&test->miscdev);
971 kfree(misc_device->name);
972 kfree(test->name);
973 ida_free(&pci_endpoint_test_ida, id);
974 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
975 if (test->bar[bar])
976 pci_iounmap(pdev, test->bar[bar]);
977 }
978
979 pci_release_regions(pdev);
980 pci_disable_device(pdev);
981 }
982
983 static const struct pci_endpoint_test_data default_data = {
984 .test_reg_bar = BAR_0,
985 .alignment = SZ_4K,
986 .irq_type = IRQ_TYPE_MSI,
987 };
988
989 static const struct pci_endpoint_test_data am654_data = {
990 .test_reg_bar = BAR_2,
991 .alignment = SZ_64K,
992 .irq_type = IRQ_TYPE_MSI,
993 };
994
995 static const struct pci_endpoint_test_data j721e_data = {
996 .alignment = 256,
997 .irq_type = IRQ_TYPE_MSI,
998 };
999
1000 static const struct pci_endpoint_test_data rk3588_data = {
1001 .alignment = SZ_64K,
1002 .irq_type = IRQ_TYPE_MSI,
1003 };
1004
1005 /*
1006 * If the controller's Vendor/Device ID are programmable, you may be able to
1007 * use one of the existing entries for testing instead of adding a new one.
1008 */
1009 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1010 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1011 .driver_data = (kernel_ulong_t)&default_data,
1012 },
1013 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1014 .driver_data = (kernel_ulong_t)&default_data,
1015 },
1016 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1017 .driver_data = (kernel_ulong_t)&default_data,
1018 },
1019 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1020 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1021 .driver_data = (kernel_ulong_t)&default_data,
1022 },
1023 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1024 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1025 .driver_data = (kernel_ulong_t)&am654_data
1026 },
1027 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1028 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1029 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1030 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1031 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1032 .driver_data = (kernel_ulong_t)&default_data,
1033 },
1034 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1035 .driver_data = (kernel_ulong_t)&j721e_data,
1036 },
1037 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1038 .driver_data = (kernel_ulong_t)&j721e_data,
1039 },
1040 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1041 .driver_data = (kernel_ulong_t)&j721e_data,
1042 },
1043 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1044 .driver_data = (kernel_ulong_t)&j721e_data,
1045 },
1046 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1047 .driver_data = (kernel_ulong_t)&rk3588_data,
1048 },
1049 { }
1050 };
1051 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1052
1053 static struct pci_driver pci_endpoint_test_driver = {
1054 .name = DRV_MODULE_NAME,
1055 .id_table = pci_endpoint_test_tbl,
1056 .probe = pci_endpoint_test_probe,
1057 .remove = pci_endpoint_test_remove,
1058 .sriov_configure = pci_sriov_configure_simple,
1059 };
1060 module_pci_driver(pci_endpoint_test_driver);
1061
1062 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1063 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1064 MODULE_LICENSE("GPL v2");
1065