1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/slab.h>
16 #include <linux/pci_ids.h>
17 #include <linux/random.h>
18
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci-ep-msi.h>
22 #include <linux/pci_regs.h>
23
24 #define IRQ_TYPE_INTX 0
25 #define IRQ_TYPE_MSI 1
26 #define IRQ_TYPE_MSIX 2
27
28 #define COMMAND_RAISE_INTX_IRQ BIT(0)
29 #define COMMAND_RAISE_MSI_IRQ BIT(1)
30 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
31 #define COMMAND_READ BIT(3)
32 #define COMMAND_WRITE BIT(4)
33 #define COMMAND_COPY BIT(5)
34 #define COMMAND_ENABLE_DOORBELL BIT(6)
35 #define COMMAND_DISABLE_DOORBELL BIT(7)
36
37 #define STATUS_READ_SUCCESS BIT(0)
38 #define STATUS_READ_FAIL BIT(1)
39 #define STATUS_WRITE_SUCCESS BIT(2)
40 #define STATUS_WRITE_FAIL BIT(3)
41 #define STATUS_COPY_SUCCESS BIT(4)
42 #define STATUS_COPY_FAIL BIT(5)
43 #define STATUS_IRQ_RAISED BIT(6)
44 #define STATUS_SRC_ADDR_INVALID BIT(7)
45 #define STATUS_DST_ADDR_INVALID BIT(8)
46 #define STATUS_DOORBELL_SUCCESS BIT(9)
47 #define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
48 #define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
49 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
50 #define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
51
52 #define FLAG_USE_DMA BIT(0)
53
54 #define TIMER_RESOLUTION 1
55
56 #define CAP_UNALIGNED_ACCESS BIT(0)
57 #define CAP_MSI BIT(1)
58 #define CAP_MSIX BIT(2)
59 #define CAP_INTX BIT(3)
60
61 static struct workqueue_struct *kpcitest_workqueue;
62
63 struct pci_epf_test {
64 void *reg[PCI_STD_NUM_BARS];
65 struct pci_epf *epf;
66 enum pci_barno test_reg_bar;
67 size_t msix_table_offset;
68 struct delayed_work cmd_handler;
69 struct dma_chan *dma_chan_tx;
70 struct dma_chan *dma_chan_rx;
71 struct dma_chan *transfer_chan;
72 dma_cookie_t transfer_cookie;
73 enum dma_status transfer_status;
74 struct completion transfer_complete;
75 bool dma_supported;
76 bool dma_private;
77 const struct pci_epc_features *epc_features;
78 struct pci_epf_bar db_bar;
79 };
80
81 struct pci_epf_test_reg {
82 __le32 magic;
83 __le32 command;
84 __le32 status;
85 __le64 src_addr;
86 __le64 dst_addr;
87 __le32 size;
88 __le32 checksum;
89 __le32 irq_type;
90 __le32 irq_number;
91 __le32 flags;
92 __le32 caps;
93 __le32 doorbell_bar;
94 __le32 doorbell_offset;
95 __le32 doorbell_data;
96 } __packed;
97
98 static struct pci_epf_header test_header = {
99 .vendorid = PCI_ANY_ID,
100 .deviceid = PCI_ANY_ID,
101 .baseclass_code = PCI_CLASS_OTHERS,
102 .interrupt_pin = PCI_INTERRUPT_INTA,
103 };
104
105 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
106
pci_epf_test_dma_callback(void * param)107 static void pci_epf_test_dma_callback(void *param)
108 {
109 struct pci_epf_test *epf_test = param;
110 struct dma_tx_state state;
111
112 epf_test->transfer_status =
113 dmaengine_tx_status(epf_test->transfer_chan,
114 epf_test->transfer_cookie, &state);
115 if (epf_test->transfer_status == DMA_COMPLETE ||
116 epf_test->transfer_status == DMA_ERROR)
117 complete(&epf_test->transfer_complete);
118 }
119
120 /**
121 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
122 * data between PCIe EP and remote PCIe RC
123 * @epf_test: the EPF test device that performs the data transfer operation
124 * @dma_dst: The destination address of the data transfer. It can be a physical
125 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
126 * @dma_src: The source address of the data transfer. It can be a physical
127 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
128 * @len: The size of the data transfer
129 * @dma_remote: remote RC physical address
130 * @dir: DMA transfer direction
131 *
132 * Function that uses dmaengine API to transfer data between PCIe EP and remote
133 * PCIe RC. The source and destination address can be a physical address given
134 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
135 *
136 * The function returns '0' on success and negative value on failure.
137 */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,dma_addr_t dma_remote,enum dma_transfer_direction dir)138 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
139 dma_addr_t dma_dst, dma_addr_t dma_src,
140 size_t len, dma_addr_t dma_remote,
141 enum dma_transfer_direction dir)
142 {
143 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
144 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
145 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
146 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
147 struct pci_epf *epf = epf_test->epf;
148 struct dma_async_tx_descriptor *tx;
149 struct dma_slave_config sconf = {};
150 struct device *dev = &epf->dev;
151 int ret;
152
153 if (IS_ERR_OR_NULL(chan)) {
154 dev_err(dev, "Invalid DMA memcpy channel\n");
155 return -EINVAL;
156 }
157
158 if (epf_test->dma_private) {
159 sconf.direction = dir;
160 if (dir == DMA_MEM_TO_DEV)
161 sconf.dst_addr = dma_remote;
162 else
163 sconf.src_addr = dma_remote;
164
165 if (dmaengine_slave_config(chan, &sconf)) {
166 dev_err(dev, "DMA slave config fail\n");
167 return -EIO;
168 }
169 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
170 flags);
171 } else {
172 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
173 flags);
174 }
175
176 if (!tx) {
177 dev_err(dev, "Failed to prepare DMA memcpy\n");
178 return -EIO;
179 }
180
181 reinit_completion(&epf_test->transfer_complete);
182 epf_test->transfer_chan = chan;
183 tx->callback = pci_epf_test_dma_callback;
184 tx->callback_param = epf_test;
185 epf_test->transfer_cookie = dmaengine_submit(tx);
186
187 ret = dma_submit_error(epf_test->transfer_cookie);
188 if (ret) {
189 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
190 goto terminate;
191 }
192
193 dma_async_issue_pending(chan);
194 ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
195 if (ret < 0) {
196 dev_err(dev, "DMA wait_for_completion interrupted\n");
197 goto terminate;
198 }
199
200 if (epf_test->transfer_status == DMA_ERROR) {
201 dev_err(dev, "DMA transfer failed\n");
202 ret = -EIO;
203 }
204
205 terminate:
206 dmaengine_terminate_sync(chan);
207
208 return ret;
209 }
210
211 struct epf_dma_filter {
212 struct device *dev;
213 u32 dma_mask;
214 };
215
epf_dma_filter_fn(struct dma_chan * chan,void * node)216 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
217 {
218 struct epf_dma_filter *filter = node;
219 struct dma_slave_caps caps;
220
221 memset(&caps, 0, sizeof(caps));
222 dma_get_slave_caps(chan, &caps);
223
224 return chan->device->dev == filter->dev
225 && (filter->dma_mask & caps.directions);
226 }
227
228 /**
229 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
230 * @epf_test: the EPF test device that performs data transfer operation
231 *
232 * Function to initialize EPF test DMA channel.
233 */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)234 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
235 {
236 struct pci_epf *epf = epf_test->epf;
237 struct device *dev = &epf->dev;
238 struct epf_dma_filter filter;
239 struct dma_chan *dma_chan;
240 dma_cap_mask_t mask;
241 int ret;
242
243 filter.dev = epf->epc->dev.parent;
244 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
245
246 dma_cap_zero(mask);
247 dma_cap_set(DMA_SLAVE, mask);
248 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
249 if (!dma_chan) {
250 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
251 goto fail_back_tx;
252 }
253
254 epf_test->dma_chan_rx = dma_chan;
255
256 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
257 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
258
259 if (!dma_chan) {
260 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
261 goto fail_back_rx;
262 }
263
264 epf_test->dma_chan_tx = dma_chan;
265 epf_test->dma_private = true;
266
267 init_completion(&epf_test->transfer_complete);
268
269 return 0;
270
271 fail_back_rx:
272 dma_release_channel(epf_test->dma_chan_rx);
273 epf_test->dma_chan_rx = NULL;
274
275 fail_back_tx:
276 dma_cap_zero(mask);
277 dma_cap_set(DMA_MEMCPY, mask);
278
279 dma_chan = dma_request_chan_by_mask(&mask);
280 if (IS_ERR(dma_chan)) {
281 ret = PTR_ERR(dma_chan);
282 if (ret != -EPROBE_DEFER)
283 dev_err(dev, "Failed to get DMA channel\n");
284 return ret;
285 }
286 init_completion(&epf_test->transfer_complete);
287
288 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
289
290 return 0;
291 }
292
293 /**
294 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
295 * @epf_test: the EPF test device that performs data transfer operation
296 *
297 * Helper to cleanup EPF test DMA channel.
298 */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)299 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
300 {
301 if (!epf_test->dma_supported)
302 return;
303
304 dma_release_channel(epf_test->dma_chan_tx);
305 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
306 epf_test->dma_chan_tx = NULL;
307 epf_test->dma_chan_rx = NULL;
308 return;
309 }
310
311 dma_release_channel(epf_test->dma_chan_rx);
312 epf_test->dma_chan_rx = NULL;
313 }
314
pci_epf_test_print_rate(struct pci_epf_test * epf_test,const char * op,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)315 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
316 const char *op, u64 size,
317 struct timespec64 *start,
318 struct timespec64 *end, bool dma)
319 {
320 struct timespec64 ts = timespec64_sub(*end, *start);
321 u64 rate = 0, ns;
322
323 /* calculate the rate */
324 ns = timespec64_to_ns(&ts);
325 if (ns)
326 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
327
328 dev_info(&epf_test->epf->dev,
329 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
330 op, size, dma ? "YES" : "NO",
331 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
332 }
333
pci_epf_test_copy(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)334 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
335 struct pci_epf_test_reg *reg)
336 {
337 int ret = 0;
338 struct timespec64 start, end;
339 struct pci_epf *epf = epf_test->epf;
340 struct pci_epc *epc = epf->epc;
341 struct device *dev = &epf->dev;
342 struct pci_epc_map src_map, dst_map;
343 u64 src_addr = le64_to_cpu(reg->src_addr);
344 u64 dst_addr = le64_to_cpu(reg->dst_addr);
345 size_t orig_size, copy_size;
346 ssize_t map_size = 0;
347 u32 flags = le32_to_cpu(reg->flags);
348 u32 status = 0;
349 void *copy_buf = NULL, *buf;
350
351 orig_size = copy_size = le32_to_cpu(reg->size);
352
353 if (flags & FLAG_USE_DMA) {
354 if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
355 dev_err(dev, "DMA controller doesn't support MEMCPY\n");
356 ret = -EINVAL;
357 goto set_status;
358 }
359 } else {
360 copy_buf = kzalloc(copy_size, GFP_KERNEL);
361 if (!copy_buf) {
362 ret = -ENOMEM;
363 goto set_status;
364 }
365 buf = copy_buf;
366 }
367
368 while (copy_size) {
369 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
370 src_addr, copy_size, &src_map);
371 if (ret) {
372 dev_err(dev, "Failed to map source address\n");
373 status = STATUS_SRC_ADDR_INVALID;
374 goto free_buf;
375 }
376
377 ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
378 dst_addr, copy_size, &dst_map);
379 if (ret) {
380 dev_err(dev, "Failed to map destination address\n");
381 status = STATUS_DST_ADDR_INVALID;
382 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
383 &src_map);
384 goto free_buf;
385 }
386
387 map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
388
389 ktime_get_ts64(&start);
390 if (flags & FLAG_USE_DMA) {
391 ret = pci_epf_test_data_transfer(epf_test,
392 dst_map.phys_addr, src_map.phys_addr,
393 map_size, 0, DMA_MEM_TO_MEM);
394 if (ret) {
395 dev_err(dev, "Data transfer failed\n");
396 goto unmap;
397 }
398 } else {
399 memcpy_fromio(buf, src_map.virt_addr, map_size);
400 memcpy_toio(dst_map.virt_addr, buf, map_size);
401 buf += map_size;
402 }
403 ktime_get_ts64(&end);
404
405 copy_size -= map_size;
406 src_addr += map_size;
407 dst_addr += map_size;
408
409 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
410 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
411 map_size = 0;
412 }
413
414 pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
415 flags & FLAG_USE_DMA);
416
417 unmap:
418 if (map_size) {
419 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
420 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
421 }
422
423 free_buf:
424 kfree(copy_buf);
425
426 set_status:
427 if (!ret)
428 status |= STATUS_COPY_SUCCESS;
429 else
430 status |= STATUS_COPY_FAIL;
431 reg->status = cpu_to_le32(status);
432 }
433
pci_epf_test_read(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)434 static void pci_epf_test_read(struct pci_epf_test *epf_test,
435 struct pci_epf_test_reg *reg)
436 {
437 int ret = 0;
438 void *src_buf, *buf;
439 u32 crc32;
440 struct pci_epc_map map;
441 phys_addr_t dst_phys_addr;
442 struct timespec64 start, end;
443 struct pci_epf *epf = epf_test->epf;
444 struct pci_epc *epc = epf->epc;
445 struct device *dev = &epf->dev;
446 struct device *dma_dev = epf->epc->dev.parent;
447 u64 src_addr = le64_to_cpu(reg->src_addr);
448 size_t orig_size, src_size;
449 ssize_t map_size = 0;
450 u32 flags = le32_to_cpu(reg->flags);
451 u32 checksum = le32_to_cpu(reg->checksum);
452 u32 status = 0;
453
454 orig_size = src_size = le32_to_cpu(reg->size);
455
456 src_buf = kzalloc(src_size, GFP_KERNEL);
457 if (!src_buf) {
458 ret = -ENOMEM;
459 goto set_status;
460 }
461 buf = src_buf;
462
463 while (src_size) {
464 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
465 src_addr, src_size, &map);
466 if (ret) {
467 dev_err(dev, "Failed to map address\n");
468 status = STATUS_SRC_ADDR_INVALID;
469 goto free_buf;
470 }
471
472 map_size = map.pci_size;
473 if (flags & FLAG_USE_DMA) {
474 dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
475 DMA_FROM_DEVICE);
476 if (dma_mapping_error(dma_dev, dst_phys_addr)) {
477 dev_err(dev,
478 "Failed to map destination buffer addr\n");
479 ret = -ENOMEM;
480 goto unmap;
481 }
482
483 ktime_get_ts64(&start);
484 ret = pci_epf_test_data_transfer(epf_test,
485 dst_phys_addr, map.phys_addr,
486 map_size, src_addr, DMA_DEV_TO_MEM);
487 if (ret)
488 dev_err(dev, "Data transfer failed\n");
489 ktime_get_ts64(&end);
490
491 dma_unmap_single(dma_dev, dst_phys_addr, map_size,
492 DMA_FROM_DEVICE);
493
494 if (ret)
495 goto unmap;
496 } else {
497 ktime_get_ts64(&start);
498 memcpy_fromio(buf, map.virt_addr, map_size);
499 ktime_get_ts64(&end);
500 }
501
502 src_size -= map_size;
503 src_addr += map_size;
504 buf += map_size;
505
506 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
507 map_size = 0;
508 }
509
510 pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
511 flags & FLAG_USE_DMA);
512
513 crc32 = crc32_le(~0, src_buf, orig_size);
514 if (crc32 != checksum)
515 ret = -EIO;
516
517 unmap:
518 if (map_size)
519 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
520
521 free_buf:
522 kfree(src_buf);
523
524 set_status:
525 if (!ret)
526 status |= STATUS_READ_SUCCESS;
527 else
528 status |= STATUS_READ_FAIL;
529 reg->status = cpu_to_le32(status);
530 }
531
pci_epf_test_write(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)532 static void pci_epf_test_write(struct pci_epf_test *epf_test,
533 struct pci_epf_test_reg *reg)
534 {
535 int ret = 0;
536 void *dst_buf, *buf;
537 struct pci_epc_map map;
538 phys_addr_t src_phys_addr;
539 struct timespec64 start, end;
540 struct pci_epf *epf = epf_test->epf;
541 struct pci_epc *epc = epf->epc;
542 struct device *dev = &epf->dev;
543 struct device *dma_dev = epf->epc->dev.parent;
544 u64 dst_addr = le64_to_cpu(reg->dst_addr);
545 size_t orig_size, dst_size;
546 ssize_t map_size = 0;
547 u32 flags = le32_to_cpu(reg->flags);
548 u32 status = 0;
549
550 orig_size = dst_size = le32_to_cpu(reg->size);
551
552 dst_buf = kzalloc(dst_size, GFP_KERNEL);
553 if (!dst_buf) {
554 ret = -ENOMEM;
555 goto set_status;
556 }
557 get_random_bytes(dst_buf, dst_size);
558 reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
559 buf = dst_buf;
560
561 while (dst_size) {
562 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
563 dst_addr, dst_size, &map);
564 if (ret) {
565 dev_err(dev, "Failed to map address\n");
566 status = STATUS_DST_ADDR_INVALID;
567 goto free_buf;
568 }
569
570 map_size = map.pci_size;
571 if (flags & FLAG_USE_DMA) {
572 src_phys_addr = dma_map_single(dma_dev, buf, map_size,
573 DMA_TO_DEVICE);
574 if (dma_mapping_error(dma_dev, src_phys_addr)) {
575 dev_err(dev,
576 "Failed to map source buffer addr\n");
577 ret = -ENOMEM;
578 goto unmap;
579 }
580
581 ktime_get_ts64(&start);
582
583 ret = pci_epf_test_data_transfer(epf_test,
584 map.phys_addr, src_phys_addr,
585 map_size, dst_addr,
586 DMA_MEM_TO_DEV);
587 if (ret)
588 dev_err(dev, "Data transfer failed\n");
589 ktime_get_ts64(&end);
590
591 dma_unmap_single(dma_dev, src_phys_addr, map_size,
592 DMA_TO_DEVICE);
593
594 if (ret)
595 goto unmap;
596 } else {
597 ktime_get_ts64(&start);
598 memcpy_toio(map.virt_addr, buf, map_size);
599 ktime_get_ts64(&end);
600 }
601
602 dst_size -= map_size;
603 dst_addr += map_size;
604 buf += map_size;
605
606 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
607 map_size = 0;
608 }
609
610 pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
611 flags & FLAG_USE_DMA);
612
613 /*
614 * wait 1ms inorder for the write to complete. Without this delay L3
615 * error in observed in the host system.
616 */
617 usleep_range(1000, 2000);
618
619 unmap:
620 if (map_size)
621 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
622
623 free_buf:
624 kfree(dst_buf);
625
626 set_status:
627 if (!ret)
628 status |= STATUS_WRITE_SUCCESS;
629 else
630 status |= STATUS_WRITE_FAIL;
631 reg->status = cpu_to_le32(status);
632 }
633
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)634 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
635 struct pci_epf_test_reg *reg)
636 {
637 struct pci_epf *epf = epf_test->epf;
638 struct device *dev = &epf->dev;
639 struct pci_epc *epc = epf->epc;
640 u32 status = le32_to_cpu(reg->status);
641 u32 irq_number = le32_to_cpu(reg->irq_number);
642 u32 irq_type = le32_to_cpu(reg->irq_type);
643 int count;
644
645 /*
646 * Set the status before raising the IRQ to ensure that the host sees
647 * the updated value when it gets the IRQ.
648 */
649 status |= STATUS_IRQ_RAISED;
650 WRITE_ONCE(reg->status, cpu_to_le32(status));
651
652 switch (irq_type) {
653 case IRQ_TYPE_INTX:
654 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
655 PCI_IRQ_INTX, 0);
656 break;
657 case IRQ_TYPE_MSI:
658 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
659 if (irq_number > count || count <= 0) {
660 dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
661 irq_number, count);
662 return;
663 }
664 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
665 PCI_IRQ_MSI, irq_number);
666 break;
667 case IRQ_TYPE_MSIX:
668 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
669 if (irq_number > count || count <= 0) {
670 dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
671 irq_number, count);
672 return;
673 }
674 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
675 PCI_IRQ_MSIX, irq_number);
676 break;
677 default:
678 dev_err(dev, "Failed to raise IRQ, unknown type\n");
679 break;
680 }
681 }
682
pci_epf_test_doorbell_handler(int irq,void * data)683 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
684 {
685 struct pci_epf_test *epf_test = data;
686 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
687 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
688 u32 status = le32_to_cpu(reg->status);
689
690 status |= STATUS_DOORBELL_SUCCESS;
691 reg->status = cpu_to_le32(status);
692 pci_epf_test_raise_irq(epf_test, reg);
693
694 return IRQ_HANDLED;
695 }
696
pci_epf_test_doorbell_cleanup(struct pci_epf_test * epf_test)697 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
698 {
699 struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
700 struct pci_epf *epf = epf_test->epf;
701
702 free_irq(epf->db_msg[0].virq, epf_test);
703 reg->doorbell_bar = cpu_to_le32(NO_BAR);
704
705 pci_epf_free_doorbell(epf);
706 }
707
pci_epf_test_enable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)708 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
709 struct pci_epf_test_reg *reg)
710 {
711 u32 status = le32_to_cpu(reg->status);
712 struct pci_epf *epf = epf_test->epf;
713 struct pci_epc *epc = epf->epc;
714 struct msi_msg *msg;
715 enum pci_barno bar;
716 size_t offset;
717 int ret;
718
719 ret = pci_epf_alloc_doorbell(epf, 1);
720 if (ret)
721 goto set_status_err;
722
723 msg = &epf->db_msg[0].msg;
724 bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
725 if (bar < BAR_0)
726 goto err_doorbell_cleanup;
727
728 ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0,
729 "pci-ep-test-doorbell", epf_test);
730 if (ret) {
731 dev_err(&epf->dev,
732 "Failed to request doorbell IRQ: %d\n",
733 epf->db_msg[0].virq);
734 goto err_doorbell_cleanup;
735 }
736
737 reg->doorbell_data = cpu_to_le32(msg->data);
738 reg->doorbell_bar = cpu_to_le32(bar);
739
740 msg = &epf->db_msg[0].msg;
741 ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
742 &epf_test->db_bar.phys_addr, &offset);
743
744 if (ret)
745 goto err_doorbell_cleanup;
746
747 reg->doorbell_offset = cpu_to_le32(offset);
748
749 epf_test->db_bar.barno = bar;
750 epf_test->db_bar.size = epf->bar[bar].size;
751 epf_test->db_bar.flags = epf->bar[bar].flags;
752
753 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
754 if (ret)
755 goto err_doorbell_cleanup;
756
757 status |= STATUS_DOORBELL_ENABLE_SUCCESS;
758 reg->status = cpu_to_le32(status);
759 return;
760
761 err_doorbell_cleanup:
762 pci_epf_test_doorbell_cleanup(epf_test);
763 set_status_err:
764 status |= STATUS_DOORBELL_ENABLE_FAIL;
765 reg->status = cpu_to_le32(status);
766 }
767
pci_epf_test_disable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)768 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
769 struct pci_epf_test_reg *reg)
770 {
771 enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
772 u32 status = le32_to_cpu(reg->status);
773 struct pci_epf *epf = epf_test->epf;
774 struct pci_epc *epc = epf->epc;
775
776 if (bar < BAR_0)
777 goto set_status_err;
778
779 pci_epf_test_doorbell_cleanup(epf_test);
780 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
781
782 status |= STATUS_DOORBELL_DISABLE_SUCCESS;
783 reg->status = cpu_to_le32(status);
784
785 return;
786
787 set_status_err:
788 status |= STATUS_DOORBELL_DISABLE_FAIL;
789 reg->status = cpu_to_le32(status);
790 }
791
pci_epf_test_cmd_handler(struct work_struct * work)792 static void pci_epf_test_cmd_handler(struct work_struct *work)
793 {
794 u32 command;
795 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
796 cmd_handler.work);
797 struct pci_epf *epf = epf_test->epf;
798 struct device *dev = &epf->dev;
799 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
800 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
801 u32 irq_type = le32_to_cpu(reg->irq_type);
802
803 command = le32_to_cpu(READ_ONCE(reg->command));
804 if (!command)
805 goto reset_handler;
806
807 WRITE_ONCE(reg->command, 0);
808 WRITE_ONCE(reg->status, 0);
809
810 if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
811 !epf_test->dma_supported) {
812 dev_err(dev, "Cannot transfer data using DMA\n");
813 goto reset_handler;
814 }
815
816 if (irq_type > IRQ_TYPE_MSIX) {
817 dev_err(dev, "Failed to detect IRQ type\n");
818 goto reset_handler;
819 }
820
821 switch (command) {
822 case COMMAND_RAISE_INTX_IRQ:
823 case COMMAND_RAISE_MSI_IRQ:
824 case COMMAND_RAISE_MSIX_IRQ:
825 pci_epf_test_raise_irq(epf_test, reg);
826 break;
827 case COMMAND_WRITE:
828 pci_epf_test_write(epf_test, reg);
829 pci_epf_test_raise_irq(epf_test, reg);
830 break;
831 case COMMAND_READ:
832 pci_epf_test_read(epf_test, reg);
833 pci_epf_test_raise_irq(epf_test, reg);
834 break;
835 case COMMAND_COPY:
836 pci_epf_test_copy(epf_test, reg);
837 pci_epf_test_raise_irq(epf_test, reg);
838 break;
839 case COMMAND_ENABLE_DOORBELL:
840 pci_epf_test_enable_doorbell(epf_test, reg);
841 pci_epf_test_raise_irq(epf_test, reg);
842 break;
843 case COMMAND_DISABLE_DOORBELL:
844 pci_epf_test_disable_doorbell(epf_test, reg);
845 pci_epf_test_raise_irq(epf_test, reg);
846 break;
847 default:
848 dev_err(dev, "Invalid command 0x%x\n", command);
849 break;
850 }
851
852 reset_handler:
853 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
854 msecs_to_jiffies(1));
855 }
856
pci_epf_test_set_bar(struct pci_epf * epf)857 static int pci_epf_test_set_bar(struct pci_epf *epf)
858 {
859 int bar, ret;
860 struct pci_epc *epc = epf->epc;
861 struct device *dev = &epf->dev;
862 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
863 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
864
865 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
866 if (!epf_test->reg[bar])
867 continue;
868
869 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
870 &epf->bar[bar]);
871 if (ret) {
872 pci_epf_free_space(epf, epf_test->reg[bar], bar,
873 PRIMARY_INTERFACE);
874 epf_test->reg[bar] = NULL;
875 dev_err(dev, "Failed to set BAR%d\n", bar);
876 if (bar == test_reg_bar)
877 return ret;
878 }
879 }
880
881 return 0;
882 }
883
pci_epf_test_clear_bar(struct pci_epf * epf)884 static void pci_epf_test_clear_bar(struct pci_epf *epf)
885 {
886 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
887 struct pci_epc *epc = epf->epc;
888 int bar;
889
890 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
891 if (!epf_test->reg[bar])
892 continue;
893
894 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
895 &epf->bar[bar]);
896 }
897 }
898
pci_epf_test_set_capabilities(struct pci_epf * epf)899 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
900 {
901 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
902 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
903 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
904 struct pci_epc *epc = epf->epc;
905 u32 caps = 0;
906
907 if (epc->ops->align_addr)
908 caps |= CAP_UNALIGNED_ACCESS;
909
910 if (epf_test->epc_features->msi_capable)
911 caps |= CAP_MSI;
912
913 if (epf_test->epc_features->msix_capable)
914 caps |= CAP_MSIX;
915
916 if (epf_test->epc_features->intx_capable)
917 caps |= CAP_INTX;
918
919 reg->caps = cpu_to_le32(caps);
920 }
921
pci_epf_test_epc_init(struct pci_epf * epf)922 static int pci_epf_test_epc_init(struct pci_epf *epf)
923 {
924 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
925 struct pci_epf_header *header = epf->header;
926 const struct pci_epc_features *epc_features = epf_test->epc_features;
927 struct pci_epc *epc = epf->epc;
928 struct device *dev = &epf->dev;
929 bool linkup_notifier = false;
930 int ret;
931
932 epf_test->dma_supported = true;
933
934 ret = pci_epf_test_init_dma_chan(epf_test);
935 if (ret)
936 epf_test->dma_supported = false;
937
938 if (epf->vfunc_no <= 1) {
939 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
940 if (ret) {
941 dev_err(dev, "Configuration header write failed\n");
942 return ret;
943 }
944 }
945
946 pci_epf_test_set_capabilities(epf);
947
948 ret = pci_epf_test_set_bar(epf);
949 if (ret)
950 return ret;
951
952 if (epc_features->msi_capable) {
953 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
954 epf->msi_interrupts);
955 if (ret) {
956 dev_err(dev, "MSI configuration failed\n");
957 return ret;
958 }
959 }
960
961 if (epc_features->msix_capable) {
962 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
963 epf->msix_interrupts,
964 epf_test->test_reg_bar,
965 epf_test->msix_table_offset);
966 if (ret) {
967 dev_err(dev, "MSI-X configuration failed\n");
968 return ret;
969 }
970 }
971
972 linkup_notifier = epc_features->linkup_notifier;
973 if (!linkup_notifier)
974 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
975
976 return 0;
977 }
978
pci_epf_test_epc_deinit(struct pci_epf * epf)979 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
980 {
981 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
982
983 cancel_delayed_work_sync(&epf_test->cmd_handler);
984 pci_epf_test_clean_dma_chan(epf_test);
985 pci_epf_test_clear_bar(epf);
986 }
987
pci_epf_test_link_up(struct pci_epf * epf)988 static int pci_epf_test_link_up(struct pci_epf *epf)
989 {
990 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
991
992 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
993 msecs_to_jiffies(1));
994
995 return 0;
996 }
997
pci_epf_test_link_down(struct pci_epf * epf)998 static int pci_epf_test_link_down(struct pci_epf *epf)
999 {
1000 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1001
1002 cancel_delayed_work_sync(&epf_test->cmd_handler);
1003
1004 return 0;
1005 }
1006
1007 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
1008 .epc_init = pci_epf_test_epc_init,
1009 .epc_deinit = pci_epf_test_epc_deinit,
1010 .link_up = pci_epf_test_link_up,
1011 .link_down = pci_epf_test_link_down,
1012 };
1013
pci_epf_test_alloc_space(struct pci_epf * epf)1014 static int pci_epf_test_alloc_space(struct pci_epf *epf)
1015 {
1016 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1017 struct device *dev = &epf->dev;
1018 size_t msix_table_size = 0;
1019 size_t test_reg_bar_size;
1020 size_t pba_size = 0;
1021 void *base;
1022 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1023 enum pci_barno bar;
1024 const struct pci_epc_features *epc_features = epf_test->epc_features;
1025 size_t test_reg_size;
1026
1027 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
1028
1029 if (epc_features->msix_capable) {
1030 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
1031 epf_test->msix_table_offset = test_reg_bar_size;
1032 /* Align to QWORD or 8 Bytes */
1033 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
1034 }
1035 test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
1036
1037 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
1038 epc_features, PRIMARY_INTERFACE);
1039 if (!base) {
1040 dev_err(dev, "Failed to allocated register space\n");
1041 return -ENOMEM;
1042 }
1043 epf_test->reg[test_reg_bar] = base;
1044
1045 for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
1046 bar = pci_epc_get_next_free_bar(epc_features, bar);
1047 if (bar == NO_BAR)
1048 break;
1049
1050 if (bar == test_reg_bar)
1051 continue;
1052
1053 base = pci_epf_alloc_space(epf, bar_size[bar], bar,
1054 epc_features, PRIMARY_INTERFACE);
1055 if (!base)
1056 dev_err(dev, "Failed to allocate space for BAR%d\n",
1057 bar);
1058 epf_test->reg[bar] = base;
1059 }
1060
1061 return 0;
1062 }
1063
pci_epf_test_free_space(struct pci_epf * epf)1064 static void pci_epf_test_free_space(struct pci_epf *epf)
1065 {
1066 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1067 int bar;
1068
1069 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1070 if (!epf_test->reg[bar])
1071 continue;
1072
1073 pci_epf_free_space(epf, epf_test->reg[bar], bar,
1074 PRIMARY_INTERFACE);
1075 epf_test->reg[bar] = NULL;
1076 }
1077 }
1078
pci_epf_test_bind(struct pci_epf * epf)1079 static int pci_epf_test_bind(struct pci_epf *epf)
1080 {
1081 int ret;
1082 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1083 const struct pci_epc_features *epc_features;
1084 enum pci_barno test_reg_bar = BAR_0;
1085 struct pci_epc *epc = epf->epc;
1086
1087 if (WARN_ON_ONCE(!epc))
1088 return -EINVAL;
1089
1090 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
1091 if (!epc_features) {
1092 dev_err(&epf->dev, "epc_features not implemented\n");
1093 return -EOPNOTSUPP;
1094 }
1095
1096 test_reg_bar = pci_epc_get_first_free_bar(epc_features);
1097 if (test_reg_bar < 0)
1098 return -EINVAL;
1099
1100 epf_test->test_reg_bar = test_reg_bar;
1101 epf_test->epc_features = epc_features;
1102
1103 ret = pci_epf_test_alloc_space(epf);
1104 if (ret)
1105 return ret;
1106
1107 return 0;
1108 }
1109
pci_epf_test_unbind(struct pci_epf * epf)1110 static void pci_epf_test_unbind(struct pci_epf *epf)
1111 {
1112 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1113 struct pci_epc *epc = epf->epc;
1114
1115 cancel_delayed_work_sync(&epf_test->cmd_handler);
1116 if (epc->init_complete) {
1117 pci_epf_test_clean_dma_chan(epf_test);
1118 pci_epf_test_clear_bar(epf);
1119 }
1120 pci_epf_test_free_space(epf);
1121 }
1122
1123 static const struct pci_epf_device_id pci_epf_test_ids[] = {
1124 {
1125 .name = "pci_epf_test",
1126 },
1127 {},
1128 };
1129
pci_epf_test_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1130 static int pci_epf_test_probe(struct pci_epf *epf,
1131 const struct pci_epf_device_id *id)
1132 {
1133 struct pci_epf_test *epf_test;
1134 struct device *dev = &epf->dev;
1135
1136 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1137 if (!epf_test)
1138 return -ENOMEM;
1139
1140 epf->header = &test_header;
1141 epf_test->epf = epf;
1142
1143 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1144
1145 epf->event_ops = &pci_epf_test_event_ops;
1146
1147 epf_set_drvdata(epf, epf_test);
1148 return 0;
1149 }
1150
1151 static const struct pci_epf_ops ops = {
1152 .unbind = pci_epf_test_unbind,
1153 .bind = pci_epf_test_bind,
1154 };
1155
1156 static struct pci_epf_driver test_driver = {
1157 .driver.name = "pci_epf_test",
1158 .probe = pci_epf_test_probe,
1159 .id_table = pci_epf_test_ids,
1160 .ops = &ops,
1161 .owner = THIS_MODULE,
1162 };
1163
pci_epf_test_init(void)1164 static int __init pci_epf_test_init(void)
1165 {
1166 int ret;
1167
1168 kpcitest_workqueue = alloc_workqueue("kpcitest",
1169 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1170 if (!kpcitest_workqueue) {
1171 pr_err("Failed to allocate the kpcitest work queue\n");
1172 return -ENOMEM;
1173 }
1174
1175 ret = pci_epf_register_driver(&test_driver);
1176 if (ret) {
1177 destroy_workqueue(kpcitest_workqueue);
1178 pr_err("Failed to register pci epf test driver --> %d\n", ret);
1179 return ret;
1180 }
1181
1182 return 0;
1183 }
1184 module_init(pci_epf_test_init);
1185
pci_epf_test_exit(void)1186 static void __exit pci_epf_test_exit(void)
1187 {
1188 if (kpcitest_workqueue)
1189 destroy_workqueue(kpcitest_workqueue);
1190 pci_epf_unregister_driver(&test_driver);
1191 }
1192 module_exit(pci_epf_test_exit);
1193
1194 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1195 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1196 MODULE_LICENSE("GPL v2");
1197