1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/msi.h> 15 #include <linux/slab.h> 16 #include <linux/pci_ids.h> 17 #include <linux/random.h> 18 19 #include <linux/pci-epc.h> 20 #include <linux/pci-epf.h> 21 #include <linux/pci-ep-msi.h> 22 #include <linux/pci_regs.h> 23 24 #define IRQ_TYPE_INTX 0 25 #define IRQ_TYPE_MSI 1 26 #define IRQ_TYPE_MSIX 2 27 28 #define COMMAND_RAISE_INTX_IRQ BIT(0) 29 #define COMMAND_RAISE_MSI_IRQ BIT(1) 30 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 31 #define COMMAND_READ BIT(3) 32 #define COMMAND_WRITE BIT(4) 33 #define COMMAND_COPY BIT(5) 34 #define COMMAND_ENABLE_DOORBELL BIT(6) 35 #define COMMAND_DISABLE_DOORBELL BIT(7) 36 37 #define STATUS_READ_SUCCESS BIT(0) 38 #define STATUS_READ_FAIL BIT(1) 39 #define STATUS_WRITE_SUCCESS BIT(2) 40 #define STATUS_WRITE_FAIL BIT(3) 41 #define STATUS_COPY_SUCCESS BIT(4) 42 #define STATUS_COPY_FAIL BIT(5) 43 #define STATUS_IRQ_RAISED BIT(6) 44 #define STATUS_SRC_ADDR_INVALID BIT(7) 45 #define STATUS_DST_ADDR_INVALID BIT(8) 46 #define STATUS_DOORBELL_SUCCESS BIT(9) 47 #define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10) 48 #define STATUS_DOORBELL_ENABLE_FAIL BIT(11) 49 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) 50 #define STATUS_DOORBELL_DISABLE_FAIL BIT(13) 51 52 #define FLAG_USE_DMA BIT(0) 53 54 #define TIMER_RESOLUTION 1 55 56 #define CAP_UNALIGNED_ACCESS BIT(0) 57 #define CAP_MSI BIT(1) 58 #define CAP_MSIX BIT(2) 59 #define CAP_INTX BIT(3) 60 61 static struct workqueue_struct *kpcitest_workqueue; 62 63 struct pci_epf_test { 64 void *reg[PCI_STD_NUM_BARS]; 65 struct pci_epf *epf; 66 enum pci_barno test_reg_bar; 67 size_t msix_table_offset; 68 struct delayed_work cmd_handler; 69 struct dma_chan *dma_chan_tx; 70 struct dma_chan *dma_chan_rx; 71 struct dma_chan *transfer_chan; 72 dma_cookie_t transfer_cookie; 73 enum dma_status transfer_status; 74 struct completion transfer_complete; 75 bool dma_supported; 76 bool dma_private; 77 const struct pci_epc_features *epc_features; 78 struct pci_epf_bar db_bar; 79 }; 80 81 struct pci_epf_test_reg { 82 __le32 magic; 83 __le32 command; 84 __le32 status; 85 __le64 src_addr; 86 __le64 dst_addr; 87 __le32 size; 88 __le32 checksum; 89 __le32 irq_type; 90 __le32 irq_number; 91 __le32 flags; 92 __le32 caps; 93 __le32 doorbell_bar; 94 __le32 doorbell_offset; 95 __le32 doorbell_data; 96 } __packed; 97 98 static struct pci_epf_header test_header = { 99 .vendorid = PCI_ANY_ID, 100 .deviceid = PCI_ANY_ID, 101 .baseclass_code = PCI_CLASS_OTHERS, 102 .interrupt_pin = PCI_INTERRUPT_INTA, 103 }; 104 105 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; 106 107 static void pci_epf_test_dma_callback(void *param) 108 { 109 struct pci_epf_test *epf_test = param; 110 struct dma_tx_state state; 111 112 epf_test->transfer_status = 113 dmaengine_tx_status(epf_test->transfer_chan, 114 epf_test->transfer_cookie, &state); 115 if (epf_test->transfer_status == DMA_COMPLETE || 116 epf_test->transfer_status == DMA_ERROR) 117 complete(&epf_test->transfer_complete); 118 } 119 120 /** 121 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer 122 * data between PCIe EP and remote PCIe RC 123 * @epf_test: the EPF test device that performs the data transfer operation 124 * @dma_dst: The destination address of the data transfer. It can be a physical 125 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 126 * @dma_src: The source address of the data transfer. It can be a physical 127 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 128 * @len: The size of the data transfer 129 * @dma_remote: remote RC physical address 130 * @dir: DMA transfer direction 131 * 132 * Function that uses dmaengine API to transfer data between PCIe EP and remote 133 * PCIe RC. The source and destination address can be a physical address given 134 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. 135 * 136 * The function returns '0' on success and negative value on failure. 137 */ 138 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, 139 dma_addr_t dma_dst, dma_addr_t dma_src, 140 size_t len, dma_addr_t dma_remote, 141 enum dma_transfer_direction dir) 142 { 143 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ? 144 epf_test->dma_chan_tx : epf_test->dma_chan_rx; 145 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst; 146 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 147 struct pci_epf *epf = epf_test->epf; 148 struct dma_async_tx_descriptor *tx; 149 struct dma_slave_config sconf = {}; 150 struct device *dev = &epf->dev; 151 int ret; 152 153 if (IS_ERR_OR_NULL(chan)) { 154 dev_err(dev, "Invalid DMA memcpy channel\n"); 155 return -EINVAL; 156 } 157 158 if (epf_test->dma_private) { 159 sconf.direction = dir; 160 if (dir == DMA_MEM_TO_DEV) 161 sconf.dst_addr = dma_remote; 162 else 163 sconf.src_addr = dma_remote; 164 165 if (dmaengine_slave_config(chan, &sconf)) { 166 dev_err(dev, "DMA slave config fail\n"); 167 return -EIO; 168 } 169 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir, 170 flags); 171 } else { 172 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, 173 flags); 174 } 175 176 if (!tx) { 177 dev_err(dev, "Failed to prepare DMA memcpy\n"); 178 return -EIO; 179 } 180 181 reinit_completion(&epf_test->transfer_complete); 182 epf_test->transfer_chan = chan; 183 tx->callback = pci_epf_test_dma_callback; 184 tx->callback_param = epf_test; 185 epf_test->transfer_cookie = dmaengine_submit(tx); 186 187 ret = dma_submit_error(epf_test->transfer_cookie); 188 if (ret) { 189 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret); 190 goto terminate; 191 } 192 193 dma_async_issue_pending(chan); 194 ret = wait_for_completion_interruptible(&epf_test->transfer_complete); 195 if (ret < 0) { 196 dev_err(dev, "DMA wait_for_completion interrupted\n"); 197 goto terminate; 198 } 199 200 if (epf_test->transfer_status == DMA_ERROR) { 201 dev_err(dev, "DMA transfer failed\n"); 202 ret = -EIO; 203 } 204 205 terminate: 206 dmaengine_terminate_sync(chan); 207 208 return ret; 209 } 210 211 struct epf_dma_filter { 212 struct device *dev; 213 u32 dma_mask; 214 }; 215 216 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node) 217 { 218 struct epf_dma_filter *filter = node; 219 struct dma_slave_caps caps; 220 221 memset(&caps, 0, sizeof(caps)); 222 dma_get_slave_caps(chan, &caps); 223 224 return chan->device->dev == filter->dev 225 && (filter->dma_mask & caps.directions); 226 } 227 228 /** 229 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel 230 * @epf_test: the EPF test device that performs data transfer operation 231 * 232 * Function to initialize EPF test DMA channel. 233 */ 234 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) 235 { 236 struct pci_epf *epf = epf_test->epf; 237 struct device *dev = &epf->dev; 238 struct epf_dma_filter filter; 239 struct dma_chan *dma_chan; 240 dma_cap_mask_t mask; 241 int ret; 242 243 filter.dev = epf->epc->dev.parent; 244 filter.dma_mask = BIT(DMA_DEV_TO_MEM); 245 246 dma_cap_zero(mask); 247 dma_cap_set(DMA_SLAVE, mask); 248 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 249 if (!dma_chan) { 250 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n"); 251 goto fail_back_tx; 252 } 253 254 epf_test->dma_chan_rx = dma_chan; 255 256 filter.dma_mask = BIT(DMA_MEM_TO_DEV); 257 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 258 259 if (!dma_chan) { 260 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n"); 261 goto fail_back_rx; 262 } 263 264 epf_test->dma_chan_tx = dma_chan; 265 epf_test->dma_private = true; 266 267 init_completion(&epf_test->transfer_complete); 268 269 return 0; 270 271 fail_back_rx: 272 dma_release_channel(epf_test->dma_chan_rx); 273 epf_test->dma_chan_rx = NULL; 274 275 fail_back_tx: 276 dma_cap_zero(mask); 277 dma_cap_set(DMA_MEMCPY, mask); 278 279 dma_chan = dma_request_chan_by_mask(&mask); 280 if (IS_ERR(dma_chan)) { 281 ret = PTR_ERR(dma_chan); 282 if (ret != -EPROBE_DEFER) 283 dev_err(dev, "Failed to get DMA channel\n"); 284 return ret; 285 } 286 init_completion(&epf_test->transfer_complete); 287 288 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan; 289 290 return 0; 291 } 292 293 /** 294 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel 295 * @epf_test: the EPF test device that performs data transfer operation 296 * 297 * Helper to cleanup EPF test DMA channel. 298 */ 299 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) 300 { 301 if (!epf_test->dma_supported) 302 return; 303 304 dma_release_channel(epf_test->dma_chan_tx); 305 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { 306 epf_test->dma_chan_tx = NULL; 307 epf_test->dma_chan_rx = NULL; 308 return; 309 } 310 311 dma_release_channel(epf_test->dma_chan_rx); 312 epf_test->dma_chan_rx = NULL; 313 } 314 315 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, 316 const char *op, u64 size, 317 struct timespec64 *start, 318 struct timespec64 *end, bool dma) 319 { 320 struct timespec64 ts = timespec64_sub(*end, *start); 321 u64 rate = 0, ns; 322 323 /* calculate the rate */ 324 ns = timespec64_to_ns(&ts); 325 if (ns) 326 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000); 327 328 dev_info(&epf_test->epf->dev, 329 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n", 330 op, size, dma ? "YES" : "NO", 331 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate); 332 } 333 334 static void pci_epf_test_copy(struct pci_epf_test *epf_test, 335 struct pci_epf_test_reg *reg) 336 { 337 int ret = 0; 338 struct timespec64 start, end; 339 struct pci_epf *epf = epf_test->epf; 340 struct pci_epc *epc = epf->epc; 341 struct device *dev = &epf->dev; 342 struct pci_epc_map src_map, dst_map; 343 u64 src_addr = le64_to_cpu(reg->src_addr); 344 u64 dst_addr = le64_to_cpu(reg->dst_addr); 345 size_t orig_size, copy_size; 346 ssize_t map_size = 0; 347 u32 flags = le32_to_cpu(reg->flags); 348 u32 status = 0; 349 void *copy_buf = NULL, *buf; 350 351 orig_size = copy_size = le32_to_cpu(reg->size); 352 353 if (flags & FLAG_USE_DMA) { 354 if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) { 355 dev_err(dev, "DMA controller doesn't support MEMCPY\n"); 356 ret = -EINVAL; 357 goto set_status; 358 } 359 } else { 360 copy_buf = kzalloc(copy_size, GFP_KERNEL); 361 if (!copy_buf) { 362 ret = -ENOMEM; 363 goto set_status; 364 } 365 buf = copy_buf; 366 } 367 368 while (copy_size) { 369 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 370 src_addr, copy_size, &src_map); 371 if (ret) { 372 dev_err(dev, "Failed to map source address\n"); 373 status = STATUS_SRC_ADDR_INVALID; 374 goto free_buf; 375 } 376 377 ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, 378 dst_addr, copy_size, &dst_map); 379 if (ret) { 380 dev_err(dev, "Failed to map destination address\n"); 381 status = STATUS_DST_ADDR_INVALID; 382 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, 383 &src_map); 384 goto free_buf; 385 } 386 387 map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size); 388 389 ktime_get_ts64(&start); 390 if (flags & FLAG_USE_DMA) { 391 ret = pci_epf_test_data_transfer(epf_test, 392 dst_map.phys_addr, src_map.phys_addr, 393 map_size, 0, DMA_MEM_TO_MEM); 394 if (ret) { 395 dev_err(dev, "Data transfer failed\n"); 396 goto unmap; 397 } 398 } else { 399 memcpy_fromio(buf, src_map.virt_addr, map_size); 400 memcpy_toio(dst_map.virt_addr, buf, map_size); 401 buf += map_size; 402 } 403 ktime_get_ts64(&end); 404 405 copy_size -= map_size; 406 src_addr += map_size; 407 dst_addr += map_size; 408 409 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); 410 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); 411 map_size = 0; 412 } 413 414 pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end, 415 flags & FLAG_USE_DMA); 416 417 unmap: 418 if (map_size) { 419 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); 420 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); 421 } 422 423 free_buf: 424 kfree(copy_buf); 425 426 set_status: 427 if (!ret) 428 status |= STATUS_COPY_SUCCESS; 429 else 430 status |= STATUS_COPY_FAIL; 431 reg->status = cpu_to_le32(status); 432 } 433 434 static void pci_epf_test_read(struct pci_epf_test *epf_test, 435 struct pci_epf_test_reg *reg) 436 { 437 int ret = 0; 438 void *src_buf, *buf; 439 u32 crc32; 440 struct pci_epc_map map; 441 phys_addr_t dst_phys_addr; 442 struct timespec64 start, end; 443 struct pci_epf *epf = epf_test->epf; 444 struct pci_epc *epc = epf->epc; 445 struct device *dev = &epf->dev; 446 struct device *dma_dev = epf->epc->dev.parent; 447 u64 src_addr = le64_to_cpu(reg->src_addr); 448 size_t orig_size, src_size; 449 ssize_t map_size = 0; 450 u32 flags = le32_to_cpu(reg->flags); 451 u32 checksum = le32_to_cpu(reg->checksum); 452 u32 status = 0; 453 454 orig_size = src_size = le32_to_cpu(reg->size); 455 456 src_buf = kzalloc(src_size, GFP_KERNEL); 457 if (!src_buf) { 458 ret = -ENOMEM; 459 goto set_status; 460 } 461 buf = src_buf; 462 463 while (src_size) { 464 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 465 src_addr, src_size, &map); 466 if (ret) { 467 dev_err(dev, "Failed to map address\n"); 468 status = STATUS_SRC_ADDR_INVALID; 469 goto free_buf; 470 } 471 472 map_size = map.pci_size; 473 if (flags & FLAG_USE_DMA) { 474 dst_phys_addr = dma_map_single(dma_dev, buf, map_size, 475 DMA_FROM_DEVICE); 476 if (dma_mapping_error(dma_dev, dst_phys_addr)) { 477 dev_err(dev, 478 "Failed to map destination buffer addr\n"); 479 ret = -ENOMEM; 480 goto unmap; 481 } 482 483 ktime_get_ts64(&start); 484 ret = pci_epf_test_data_transfer(epf_test, 485 dst_phys_addr, map.phys_addr, 486 map_size, src_addr, DMA_DEV_TO_MEM); 487 if (ret) 488 dev_err(dev, "Data transfer failed\n"); 489 ktime_get_ts64(&end); 490 491 dma_unmap_single(dma_dev, dst_phys_addr, map_size, 492 DMA_FROM_DEVICE); 493 494 if (ret) 495 goto unmap; 496 } else { 497 ktime_get_ts64(&start); 498 memcpy_fromio(buf, map.virt_addr, map_size); 499 ktime_get_ts64(&end); 500 } 501 502 src_size -= map_size; 503 src_addr += map_size; 504 buf += map_size; 505 506 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 507 map_size = 0; 508 } 509 510 pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end, 511 flags & FLAG_USE_DMA); 512 513 crc32 = crc32_le(~0, src_buf, orig_size); 514 if (crc32 != checksum) 515 ret = -EIO; 516 517 unmap: 518 if (map_size) 519 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 520 521 free_buf: 522 kfree(src_buf); 523 524 set_status: 525 if (!ret) 526 status |= STATUS_READ_SUCCESS; 527 else 528 status |= STATUS_READ_FAIL; 529 reg->status = cpu_to_le32(status); 530 } 531 532 static void pci_epf_test_write(struct pci_epf_test *epf_test, 533 struct pci_epf_test_reg *reg) 534 { 535 int ret = 0; 536 void *dst_buf, *buf; 537 struct pci_epc_map map; 538 phys_addr_t src_phys_addr; 539 struct timespec64 start, end; 540 struct pci_epf *epf = epf_test->epf; 541 struct pci_epc *epc = epf->epc; 542 struct device *dev = &epf->dev; 543 struct device *dma_dev = epf->epc->dev.parent; 544 u64 dst_addr = le64_to_cpu(reg->dst_addr); 545 size_t orig_size, dst_size; 546 ssize_t map_size = 0; 547 u32 flags = le32_to_cpu(reg->flags); 548 u32 status = 0; 549 550 orig_size = dst_size = le32_to_cpu(reg->size); 551 552 dst_buf = kzalloc(dst_size, GFP_KERNEL); 553 if (!dst_buf) { 554 ret = -ENOMEM; 555 goto set_status; 556 } 557 get_random_bytes(dst_buf, dst_size); 558 reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size)); 559 buf = dst_buf; 560 561 while (dst_size) { 562 ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 563 dst_addr, dst_size, &map); 564 if (ret) { 565 dev_err(dev, "Failed to map address\n"); 566 status = STATUS_DST_ADDR_INVALID; 567 goto free_buf; 568 } 569 570 map_size = map.pci_size; 571 if (flags & FLAG_USE_DMA) { 572 src_phys_addr = dma_map_single(dma_dev, buf, map_size, 573 DMA_TO_DEVICE); 574 if (dma_mapping_error(dma_dev, src_phys_addr)) { 575 dev_err(dev, 576 "Failed to map source buffer addr\n"); 577 ret = -ENOMEM; 578 goto unmap; 579 } 580 581 ktime_get_ts64(&start); 582 583 ret = pci_epf_test_data_transfer(epf_test, 584 map.phys_addr, src_phys_addr, 585 map_size, dst_addr, 586 DMA_MEM_TO_DEV); 587 if (ret) 588 dev_err(dev, "Data transfer failed\n"); 589 ktime_get_ts64(&end); 590 591 dma_unmap_single(dma_dev, src_phys_addr, map_size, 592 DMA_TO_DEVICE); 593 594 if (ret) 595 goto unmap; 596 } else { 597 ktime_get_ts64(&start); 598 memcpy_toio(map.virt_addr, buf, map_size); 599 ktime_get_ts64(&end); 600 } 601 602 dst_size -= map_size; 603 dst_addr += map_size; 604 buf += map_size; 605 606 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 607 map_size = 0; 608 } 609 610 pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end, 611 flags & FLAG_USE_DMA); 612 613 /* 614 * wait 1ms inorder for the write to complete. Without this delay L3 615 * error in observed in the host system. 616 */ 617 usleep_range(1000, 2000); 618 619 unmap: 620 if (map_size) 621 pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 622 623 free_buf: 624 kfree(dst_buf); 625 626 set_status: 627 if (!ret) 628 status |= STATUS_WRITE_SUCCESS; 629 else 630 status |= STATUS_WRITE_FAIL; 631 reg->status = cpu_to_le32(status); 632 } 633 634 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, 635 struct pci_epf_test_reg *reg) 636 { 637 struct pci_epf *epf = epf_test->epf; 638 struct device *dev = &epf->dev; 639 struct pci_epc *epc = epf->epc; 640 u32 status = le32_to_cpu(reg->status); 641 u32 irq_number = le32_to_cpu(reg->irq_number); 642 u32 irq_type = le32_to_cpu(reg->irq_type); 643 int count; 644 645 /* 646 * Set the status before raising the IRQ to ensure that the host sees 647 * the updated value when it gets the IRQ. 648 */ 649 status |= STATUS_IRQ_RAISED; 650 WRITE_ONCE(reg->status, cpu_to_le32(status)); 651 652 switch (irq_type) { 653 case IRQ_TYPE_INTX: 654 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 655 PCI_IRQ_INTX, 0); 656 break; 657 case IRQ_TYPE_MSI: 658 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); 659 if (irq_number > count || count <= 0) { 660 dev_err(dev, "Invalid MSI IRQ number %d / %d\n", 661 irq_number, count); 662 return; 663 } 664 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 665 PCI_IRQ_MSI, irq_number); 666 break; 667 case IRQ_TYPE_MSIX: 668 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); 669 if (irq_number > count || count <= 0) { 670 dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n", 671 irq_number, count); 672 return; 673 } 674 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 675 PCI_IRQ_MSIX, irq_number); 676 break; 677 default: 678 dev_err(dev, "Failed to raise IRQ, unknown type\n"); 679 break; 680 } 681 } 682 683 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data) 684 { 685 struct pci_epf_test *epf_test = data; 686 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 687 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 688 u32 status = le32_to_cpu(reg->status); 689 690 status |= STATUS_DOORBELL_SUCCESS; 691 reg->status = cpu_to_le32(status); 692 pci_epf_test_raise_irq(epf_test, reg); 693 694 return IRQ_HANDLED; 695 } 696 697 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test) 698 { 699 struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar]; 700 struct pci_epf *epf = epf_test->epf; 701 702 free_irq(epf->db_msg[0].virq, epf_test); 703 reg->doorbell_bar = cpu_to_le32(NO_BAR); 704 705 pci_epf_free_doorbell(epf); 706 } 707 708 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test, 709 struct pci_epf_test_reg *reg) 710 { 711 u32 status = le32_to_cpu(reg->status); 712 struct pci_epf *epf = epf_test->epf; 713 struct pci_epc *epc = epf->epc; 714 struct msi_msg *msg; 715 enum pci_barno bar; 716 size_t offset; 717 int ret; 718 719 ret = pci_epf_alloc_doorbell(epf, 1); 720 if (ret) 721 goto set_status_err; 722 723 msg = &epf->db_msg[0].msg; 724 bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1); 725 if (bar < BAR_0) 726 goto err_doorbell_cleanup; 727 728 ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0, 729 "pci-ep-test-doorbell", epf_test); 730 if (ret) { 731 dev_err(&epf->dev, 732 "Failed to request doorbell IRQ: %d\n", 733 epf->db_msg[0].virq); 734 goto err_doorbell_cleanup; 735 } 736 737 reg->doorbell_data = cpu_to_le32(msg->data); 738 reg->doorbell_bar = cpu_to_le32(bar); 739 740 msg = &epf->db_msg[0].msg; 741 ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo, 742 &epf_test->db_bar.phys_addr, &offset); 743 744 if (ret) 745 goto err_doorbell_cleanup; 746 747 reg->doorbell_offset = cpu_to_le32(offset); 748 749 epf_test->db_bar.barno = bar; 750 epf_test->db_bar.size = epf->bar[bar].size; 751 epf_test->db_bar.flags = epf->bar[bar].flags; 752 753 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); 754 if (ret) 755 goto err_doorbell_cleanup; 756 757 status |= STATUS_DOORBELL_ENABLE_SUCCESS; 758 reg->status = cpu_to_le32(status); 759 return; 760 761 err_doorbell_cleanup: 762 pci_epf_test_doorbell_cleanup(epf_test); 763 set_status_err: 764 status |= STATUS_DOORBELL_ENABLE_FAIL; 765 reg->status = cpu_to_le32(status); 766 } 767 768 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test, 769 struct pci_epf_test_reg *reg) 770 { 771 enum pci_barno bar = le32_to_cpu(reg->doorbell_bar); 772 u32 status = le32_to_cpu(reg->status); 773 struct pci_epf *epf = epf_test->epf; 774 struct pci_epc *epc = epf->epc; 775 int ret; 776 777 if (bar < BAR_0) 778 goto set_status_err; 779 780 pci_epf_test_doorbell_cleanup(epf_test); 781 782 /* 783 * The doorbell feature temporarily overrides the inbound translation 784 * to point to the address stored in epf_test->db_bar.phys_addr, i.e., 785 * it calls set_bar() twice without ever calling clear_bar(), as 786 * calling clear_bar() would clear the BAR's PCI address assigned by 787 * the host. Thus, when disabling the doorbell, restore the inbound 788 * translation to point to the memory allocated for the BAR. 789 */ 790 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]); 791 if (ret) 792 goto set_status_err; 793 794 status |= STATUS_DOORBELL_DISABLE_SUCCESS; 795 reg->status = cpu_to_le32(status); 796 797 return; 798 799 set_status_err: 800 status |= STATUS_DOORBELL_DISABLE_FAIL; 801 reg->status = cpu_to_le32(status); 802 } 803 804 static void pci_epf_test_cmd_handler(struct work_struct *work) 805 { 806 u32 command; 807 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, 808 cmd_handler.work); 809 struct pci_epf *epf = epf_test->epf; 810 struct device *dev = &epf->dev; 811 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 812 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 813 u32 irq_type = le32_to_cpu(reg->irq_type); 814 815 command = le32_to_cpu(READ_ONCE(reg->command)); 816 if (!command) 817 goto reset_handler; 818 819 WRITE_ONCE(reg->command, 0); 820 WRITE_ONCE(reg->status, 0); 821 822 if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) && 823 !epf_test->dma_supported) { 824 dev_err(dev, "Cannot transfer data using DMA\n"); 825 goto reset_handler; 826 } 827 828 if (irq_type > IRQ_TYPE_MSIX) { 829 dev_err(dev, "Failed to detect IRQ type\n"); 830 goto reset_handler; 831 } 832 833 switch (command) { 834 case COMMAND_RAISE_INTX_IRQ: 835 case COMMAND_RAISE_MSI_IRQ: 836 case COMMAND_RAISE_MSIX_IRQ: 837 pci_epf_test_raise_irq(epf_test, reg); 838 break; 839 case COMMAND_WRITE: 840 pci_epf_test_write(epf_test, reg); 841 pci_epf_test_raise_irq(epf_test, reg); 842 break; 843 case COMMAND_READ: 844 pci_epf_test_read(epf_test, reg); 845 pci_epf_test_raise_irq(epf_test, reg); 846 break; 847 case COMMAND_COPY: 848 pci_epf_test_copy(epf_test, reg); 849 pci_epf_test_raise_irq(epf_test, reg); 850 break; 851 case COMMAND_ENABLE_DOORBELL: 852 pci_epf_test_enable_doorbell(epf_test, reg); 853 pci_epf_test_raise_irq(epf_test, reg); 854 break; 855 case COMMAND_DISABLE_DOORBELL: 856 pci_epf_test_disable_doorbell(epf_test, reg); 857 pci_epf_test_raise_irq(epf_test, reg); 858 break; 859 default: 860 dev_err(dev, "Invalid command 0x%x\n", command); 861 break; 862 } 863 864 reset_handler: 865 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 866 msecs_to_jiffies(1)); 867 } 868 869 static int pci_epf_test_set_bar(struct pci_epf *epf) 870 { 871 int bar, ret; 872 struct pci_epc *epc = epf->epc; 873 struct device *dev = &epf->dev; 874 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 875 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 876 877 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 878 if (!epf_test->reg[bar]) 879 continue; 880 881 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, 882 &epf->bar[bar]); 883 if (ret) { 884 pci_epf_free_space(epf, epf_test->reg[bar], bar, 885 PRIMARY_INTERFACE); 886 epf_test->reg[bar] = NULL; 887 dev_err(dev, "Failed to set BAR%d\n", bar); 888 if (bar == test_reg_bar) 889 return ret; 890 } 891 } 892 893 return 0; 894 } 895 896 static void pci_epf_test_clear_bar(struct pci_epf *epf) 897 { 898 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 899 struct pci_epc *epc = epf->epc; 900 int bar; 901 902 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 903 if (!epf_test->reg[bar]) 904 continue; 905 906 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, 907 &epf->bar[bar]); 908 } 909 } 910 911 static void pci_epf_test_set_capabilities(struct pci_epf *epf) 912 { 913 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 914 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 915 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 916 struct pci_epc *epc = epf->epc; 917 u32 caps = 0; 918 919 if (epc->ops->align_addr) 920 caps |= CAP_UNALIGNED_ACCESS; 921 922 if (epf_test->epc_features->msi_capable) 923 caps |= CAP_MSI; 924 925 if (epf_test->epc_features->msix_capable) 926 caps |= CAP_MSIX; 927 928 if (epf_test->epc_features->intx_capable) 929 caps |= CAP_INTX; 930 931 reg->caps = cpu_to_le32(caps); 932 } 933 934 static int pci_epf_test_epc_init(struct pci_epf *epf) 935 { 936 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 937 struct pci_epf_header *header = epf->header; 938 const struct pci_epc_features *epc_features = epf_test->epc_features; 939 struct pci_epc *epc = epf->epc; 940 struct device *dev = &epf->dev; 941 bool linkup_notifier = false; 942 int ret; 943 944 epf_test->dma_supported = true; 945 946 ret = pci_epf_test_init_dma_chan(epf_test); 947 if (ret) 948 epf_test->dma_supported = false; 949 950 if (epf->vfunc_no <= 1) { 951 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); 952 if (ret) { 953 dev_err(dev, "Configuration header write failed\n"); 954 return ret; 955 } 956 } 957 958 pci_epf_test_set_capabilities(epf); 959 960 ret = pci_epf_test_set_bar(epf); 961 if (ret) 962 return ret; 963 964 if (epc_features->msi_capable) { 965 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, 966 epf->msi_interrupts); 967 if (ret) { 968 dev_err(dev, "MSI configuration failed\n"); 969 return ret; 970 } 971 } 972 973 if (epc_features->msix_capable) { 974 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, 975 epf->msix_interrupts, 976 epf_test->test_reg_bar, 977 epf_test->msix_table_offset); 978 if (ret) { 979 dev_err(dev, "MSI-X configuration failed\n"); 980 return ret; 981 } 982 } 983 984 linkup_notifier = epc_features->linkup_notifier; 985 if (!linkup_notifier) 986 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); 987 988 return 0; 989 } 990 991 static void pci_epf_test_epc_deinit(struct pci_epf *epf) 992 { 993 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 994 995 cancel_delayed_work_sync(&epf_test->cmd_handler); 996 pci_epf_test_clean_dma_chan(epf_test); 997 pci_epf_test_clear_bar(epf); 998 } 999 1000 static int pci_epf_test_link_up(struct pci_epf *epf) 1001 { 1002 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1003 1004 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 1005 msecs_to_jiffies(1)); 1006 1007 return 0; 1008 } 1009 1010 static int pci_epf_test_link_down(struct pci_epf *epf) 1011 { 1012 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1013 1014 cancel_delayed_work_sync(&epf_test->cmd_handler); 1015 1016 return 0; 1017 } 1018 1019 static const struct pci_epc_event_ops pci_epf_test_event_ops = { 1020 .epc_init = pci_epf_test_epc_init, 1021 .epc_deinit = pci_epf_test_epc_deinit, 1022 .link_up = pci_epf_test_link_up, 1023 .link_down = pci_epf_test_link_down, 1024 }; 1025 1026 static int pci_epf_test_alloc_space(struct pci_epf *epf) 1027 { 1028 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1029 struct device *dev = &epf->dev; 1030 size_t msix_table_size = 0; 1031 size_t test_reg_bar_size; 1032 size_t pba_size = 0; 1033 void *base; 1034 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 1035 enum pci_barno bar; 1036 const struct pci_epc_features *epc_features = epf_test->epc_features; 1037 size_t test_reg_size; 1038 1039 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); 1040 1041 if (epc_features->msix_capable) { 1042 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 1043 epf_test->msix_table_offset = test_reg_bar_size; 1044 /* Align to QWORD or 8 Bytes */ 1045 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 1046 } 1047 test_reg_size = test_reg_bar_size + msix_table_size + pba_size; 1048 1049 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, 1050 epc_features, PRIMARY_INTERFACE); 1051 if (!base) { 1052 dev_err(dev, "Failed to allocated register space\n"); 1053 return -ENOMEM; 1054 } 1055 epf_test->reg[test_reg_bar] = base; 1056 1057 for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) { 1058 bar = pci_epc_get_next_free_bar(epc_features, bar); 1059 if (bar == NO_BAR) 1060 break; 1061 1062 if (bar == test_reg_bar) 1063 continue; 1064 1065 if (epc_features->bar[bar].type == BAR_FIXED) 1066 test_reg_size = epc_features->bar[bar].fixed_size; 1067 else 1068 test_reg_size = bar_size[bar]; 1069 1070 base = pci_epf_alloc_space(epf, test_reg_size, bar, 1071 epc_features, PRIMARY_INTERFACE); 1072 if (!base) 1073 dev_err(dev, "Failed to allocate space for BAR%d\n", 1074 bar); 1075 epf_test->reg[bar] = base; 1076 } 1077 1078 return 0; 1079 } 1080 1081 static void pci_epf_test_free_space(struct pci_epf *epf) 1082 { 1083 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1084 int bar; 1085 1086 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 1087 if (!epf_test->reg[bar]) 1088 continue; 1089 1090 pci_epf_free_space(epf, epf_test->reg[bar], bar, 1091 PRIMARY_INTERFACE); 1092 epf_test->reg[bar] = NULL; 1093 } 1094 } 1095 1096 static int pci_epf_test_bind(struct pci_epf *epf) 1097 { 1098 int ret; 1099 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1100 const struct pci_epc_features *epc_features; 1101 enum pci_barno test_reg_bar = BAR_0; 1102 struct pci_epc *epc = epf->epc; 1103 1104 if (WARN_ON_ONCE(!epc)) 1105 return -EINVAL; 1106 1107 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 1108 if (!epc_features) { 1109 dev_err(&epf->dev, "epc_features not implemented\n"); 1110 return -EOPNOTSUPP; 1111 } 1112 1113 test_reg_bar = pci_epc_get_first_free_bar(epc_features); 1114 if (test_reg_bar < 0) 1115 return -EINVAL; 1116 1117 epf_test->test_reg_bar = test_reg_bar; 1118 epf_test->epc_features = epc_features; 1119 1120 ret = pci_epf_test_alloc_space(epf); 1121 if (ret) 1122 return ret; 1123 1124 return 0; 1125 } 1126 1127 static void pci_epf_test_unbind(struct pci_epf *epf) 1128 { 1129 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 1130 struct pci_epc *epc = epf->epc; 1131 1132 cancel_delayed_work_sync(&epf_test->cmd_handler); 1133 if (epc->init_complete) { 1134 pci_epf_test_clean_dma_chan(epf_test); 1135 pci_epf_test_clear_bar(epf); 1136 } 1137 pci_epf_test_free_space(epf); 1138 } 1139 1140 static const struct pci_epf_device_id pci_epf_test_ids[] = { 1141 { 1142 .name = "pci_epf_test", 1143 }, 1144 {}, 1145 }; 1146 1147 static int pci_epf_test_probe(struct pci_epf *epf, 1148 const struct pci_epf_device_id *id) 1149 { 1150 struct pci_epf_test *epf_test; 1151 struct device *dev = &epf->dev; 1152 1153 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); 1154 if (!epf_test) 1155 return -ENOMEM; 1156 1157 epf->header = &test_header; 1158 epf_test->epf = epf; 1159 1160 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); 1161 1162 epf->event_ops = &pci_epf_test_event_ops; 1163 1164 epf_set_drvdata(epf, epf_test); 1165 return 0; 1166 } 1167 1168 static const struct pci_epf_ops ops = { 1169 .unbind = pci_epf_test_unbind, 1170 .bind = pci_epf_test_bind, 1171 }; 1172 1173 static struct pci_epf_driver test_driver = { 1174 .driver.name = "pci_epf_test", 1175 .probe = pci_epf_test_probe, 1176 .id_table = pci_epf_test_ids, 1177 .ops = &ops, 1178 .owner = THIS_MODULE, 1179 }; 1180 1181 static int __init pci_epf_test_init(void) 1182 { 1183 int ret; 1184 1185 kpcitest_workqueue = alloc_workqueue("kpcitest", 1186 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1187 if (!kpcitest_workqueue) { 1188 pr_err("Failed to allocate the kpcitest work queue\n"); 1189 return -ENOMEM; 1190 } 1191 1192 ret = pci_epf_register_driver(&test_driver); 1193 if (ret) { 1194 destroy_workqueue(kpcitest_workqueue); 1195 pr_err("Failed to register pci epf test driver --> %d\n", ret); 1196 return ret; 1197 } 1198 1199 return 0; 1200 } 1201 module_init(pci_epf_test_init); 1202 1203 static void __exit pci_epf_test_exit(void) 1204 { 1205 if (kpcitest_workqueue) 1206 destroy_workqueue(kpcitest_workqueue); 1207 pci_epf_unregister_driver(&test_driver); 1208 } 1209 module_exit(pci_epf_test_exit); 1210 1211 MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); 1212 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 1213 MODULE_LICENSE("GPL v2"); 1214