1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/pci_ids.h> 16 #include <linux/random.h> 17 18 #include <linux/pci-epc.h> 19 #include <linux/pci-epf.h> 20 #include <linux/pci_regs.h> 21 22 #define IRQ_TYPE_INTX 0 23 #define IRQ_TYPE_MSI 1 24 #define IRQ_TYPE_MSIX 2 25 26 #define COMMAND_RAISE_INTX_IRQ BIT(0) 27 #define COMMAND_RAISE_MSI_IRQ BIT(1) 28 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 29 #define COMMAND_READ BIT(3) 30 #define COMMAND_WRITE BIT(4) 31 #define COMMAND_COPY BIT(5) 32 33 #define STATUS_READ_SUCCESS BIT(0) 34 #define STATUS_READ_FAIL BIT(1) 35 #define STATUS_WRITE_SUCCESS BIT(2) 36 #define STATUS_WRITE_FAIL BIT(3) 37 #define STATUS_COPY_SUCCESS BIT(4) 38 #define STATUS_COPY_FAIL BIT(5) 39 #define STATUS_IRQ_RAISED BIT(6) 40 #define STATUS_SRC_ADDR_INVALID BIT(7) 41 #define STATUS_DST_ADDR_INVALID BIT(8) 42 43 #define FLAG_USE_DMA BIT(0) 44 45 #define TIMER_RESOLUTION 1 46 47 static struct workqueue_struct *kpcitest_workqueue; 48 49 struct pci_epf_test { 50 void *reg[PCI_STD_NUM_BARS]; 51 struct pci_epf *epf; 52 enum pci_barno test_reg_bar; 53 size_t msix_table_offset; 54 struct delayed_work cmd_handler; 55 struct dma_chan *dma_chan_tx; 56 struct dma_chan *dma_chan_rx; 57 struct dma_chan *transfer_chan; 58 dma_cookie_t transfer_cookie; 59 enum dma_status transfer_status; 60 struct completion transfer_complete; 61 bool dma_supported; 62 bool dma_private; 63 const struct pci_epc_features *epc_features; 64 }; 65 66 struct pci_epf_test_reg { 67 u32 magic; 68 u32 command; 69 u32 status; 70 u64 src_addr; 71 u64 dst_addr; 72 u32 size; 73 u32 checksum; 74 u32 irq_type; 75 u32 irq_number; 76 u32 flags; 77 } __packed; 78 79 static struct pci_epf_header test_header = { 80 .vendorid = PCI_ANY_ID, 81 .deviceid = PCI_ANY_ID, 82 .baseclass_code = PCI_CLASS_OTHERS, 83 .interrupt_pin = PCI_INTERRUPT_INTA, 84 }; 85 86 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; 87 88 static void pci_epf_test_dma_callback(void *param) 89 { 90 struct pci_epf_test *epf_test = param; 91 struct dma_tx_state state; 92 93 epf_test->transfer_status = 94 dmaengine_tx_status(epf_test->transfer_chan, 95 epf_test->transfer_cookie, &state); 96 if (epf_test->transfer_status == DMA_COMPLETE || 97 epf_test->transfer_status == DMA_ERROR) 98 complete(&epf_test->transfer_complete); 99 } 100 101 /** 102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer 103 * data between PCIe EP and remote PCIe RC 104 * @epf_test: the EPF test device that performs the data transfer operation 105 * @dma_dst: The destination address of the data transfer. It can be a physical 106 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 107 * @dma_src: The source address of the data transfer. It can be a physical 108 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 109 * @len: The size of the data transfer 110 * @dma_remote: remote RC physical address 111 * @dir: DMA transfer direction 112 * 113 * Function that uses dmaengine API to transfer data between PCIe EP and remote 114 * PCIe RC. The source and destination address can be a physical address given 115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. 116 * 117 * The function returns '0' on success and negative value on failure. 118 */ 119 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, 120 dma_addr_t dma_dst, dma_addr_t dma_src, 121 size_t len, dma_addr_t dma_remote, 122 enum dma_transfer_direction dir) 123 { 124 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ? 125 epf_test->dma_chan_tx : epf_test->dma_chan_rx; 126 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst; 127 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 128 struct pci_epf *epf = epf_test->epf; 129 struct dma_async_tx_descriptor *tx; 130 struct dma_slave_config sconf = {}; 131 struct device *dev = &epf->dev; 132 int ret; 133 134 if (IS_ERR_OR_NULL(chan)) { 135 dev_err(dev, "Invalid DMA memcpy channel\n"); 136 return -EINVAL; 137 } 138 139 if (epf_test->dma_private) { 140 sconf.direction = dir; 141 if (dir == DMA_MEM_TO_DEV) 142 sconf.dst_addr = dma_remote; 143 else 144 sconf.src_addr = dma_remote; 145 146 if (dmaengine_slave_config(chan, &sconf)) { 147 dev_err(dev, "DMA slave config fail\n"); 148 return -EIO; 149 } 150 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir, 151 flags); 152 } else { 153 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, 154 flags); 155 } 156 157 if (!tx) { 158 dev_err(dev, "Failed to prepare DMA memcpy\n"); 159 return -EIO; 160 } 161 162 reinit_completion(&epf_test->transfer_complete); 163 epf_test->transfer_chan = chan; 164 tx->callback = pci_epf_test_dma_callback; 165 tx->callback_param = epf_test; 166 epf_test->transfer_cookie = dmaengine_submit(tx); 167 168 ret = dma_submit_error(epf_test->transfer_cookie); 169 if (ret) { 170 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret); 171 goto terminate; 172 } 173 174 dma_async_issue_pending(chan); 175 ret = wait_for_completion_interruptible(&epf_test->transfer_complete); 176 if (ret < 0) { 177 dev_err(dev, "DMA wait_for_completion interrupted\n"); 178 goto terminate; 179 } 180 181 if (epf_test->transfer_status == DMA_ERROR) { 182 dev_err(dev, "DMA transfer failed\n"); 183 ret = -EIO; 184 } 185 186 terminate: 187 dmaengine_terminate_sync(chan); 188 189 return ret; 190 } 191 192 struct epf_dma_filter { 193 struct device *dev; 194 u32 dma_mask; 195 }; 196 197 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node) 198 { 199 struct epf_dma_filter *filter = node; 200 struct dma_slave_caps caps; 201 202 memset(&caps, 0, sizeof(caps)); 203 dma_get_slave_caps(chan, &caps); 204 205 return chan->device->dev == filter->dev 206 && (filter->dma_mask & caps.directions); 207 } 208 209 /** 210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel 211 * @epf_test: the EPF test device that performs data transfer operation 212 * 213 * Function to initialize EPF test DMA channel. 214 */ 215 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) 216 { 217 struct pci_epf *epf = epf_test->epf; 218 struct device *dev = &epf->dev; 219 struct epf_dma_filter filter; 220 struct dma_chan *dma_chan; 221 dma_cap_mask_t mask; 222 int ret; 223 224 filter.dev = epf->epc->dev.parent; 225 filter.dma_mask = BIT(DMA_DEV_TO_MEM); 226 227 dma_cap_zero(mask); 228 dma_cap_set(DMA_SLAVE, mask); 229 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 230 if (!dma_chan) { 231 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n"); 232 goto fail_back_tx; 233 } 234 235 epf_test->dma_chan_rx = dma_chan; 236 237 filter.dma_mask = BIT(DMA_MEM_TO_DEV); 238 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 239 240 if (!dma_chan) { 241 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n"); 242 goto fail_back_rx; 243 } 244 245 epf_test->dma_chan_tx = dma_chan; 246 epf_test->dma_private = true; 247 248 init_completion(&epf_test->transfer_complete); 249 250 return 0; 251 252 fail_back_rx: 253 dma_release_channel(epf_test->dma_chan_rx); 254 epf_test->dma_chan_tx = NULL; 255 256 fail_back_tx: 257 dma_cap_zero(mask); 258 dma_cap_set(DMA_MEMCPY, mask); 259 260 dma_chan = dma_request_chan_by_mask(&mask); 261 if (IS_ERR(dma_chan)) { 262 ret = PTR_ERR(dma_chan); 263 if (ret != -EPROBE_DEFER) 264 dev_err(dev, "Failed to get DMA channel\n"); 265 return ret; 266 } 267 init_completion(&epf_test->transfer_complete); 268 269 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan; 270 271 return 0; 272 } 273 274 /** 275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel 276 * @epf_test: the EPF test device that performs data transfer operation 277 * 278 * Helper to cleanup EPF test DMA channel. 279 */ 280 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) 281 { 282 if (!epf_test->dma_supported) 283 return; 284 285 dma_release_channel(epf_test->dma_chan_tx); 286 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { 287 epf_test->dma_chan_tx = NULL; 288 epf_test->dma_chan_rx = NULL; 289 return; 290 } 291 292 dma_release_channel(epf_test->dma_chan_rx); 293 epf_test->dma_chan_rx = NULL; 294 295 return; 296 } 297 298 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, 299 const char *op, u64 size, 300 struct timespec64 *start, 301 struct timespec64 *end, bool dma) 302 { 303 struct timespec64 ts = timespec64_sub(*end, *start); 304 u64 rate = 0, ns; 305 306 /* calculate the rate */ 307 ns = timespec64_to_ns(&ts); 308 if (ns) 309 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000); 310 311 dev_info(&epf_test->epf->dev, 312 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n", 313 op, size, dma ? "YES" : "NO", 314 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate); 315 } 316 317 static void pci_epf_test_copy(struct pci_epf_test *epf_test, 318 struct pci_epf_test_reg *reg) 319 { 320 int ret; 321 void __iomem *src_addr; 322 void __iomem *dst_addr; 323 phys_addr_t src_phys_addr; 324 phys_addr_t dst_phys_addr; 325 struct timespec64 start, end; 326 struct pci_epf *epf = epf_test->epf; 327 struct device *dev = &epf->dev; 328 struct pci_epc *epc = epf->epc; 329 330 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 331 if (!src_addr) { 332 dev_err(dev, "Failed to allocate source address\n"); 333 reg->status = STATUS_SRC_ADDR_INVALID; 334 ret = -ENOMEM; 335 goto err; 336 } 337 338 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, 339 reg->src_addr, reg->size); 340 if (ret) { 341 dev_err(dev, "Failed to map source address\n"); 342 reg->status = STATUS_SRC_ADDR_INVALID; 343 goto err_src_addr; 344 } 345 346 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 347 if (!dst_addr) { 348 dev_err(dev, "Failed to allocate destination address\n"); 349 reg->status = STATUS_DST_ADDR_INVALID; 350 ret = -ENOMEM; 351 goto err_src_map_addr; 352 } 353 354 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, 355 reg->dst_addr, reg->size); 356 if (ret) { 357 dev_err(dev, "Failed to map destination address\n"); 358 reg->status = STATUS_DST_ADDR_INVALID; 359 goto err_dst_addr; 360 } 361 362 ktime_get_ts64(&start); 363 if (reg->flags & FLAG_USE_DMA) { 364 if (epf_test->dma_private) { 365 dev_err(dev, "Cannot transfer data using DMA\n"); 366 ret = -EINVAL; 367 goto err_map_addr; 368 } 369 370 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 371 src_phys_addr, reg->size, 0, 372 DMA_MEM_TO_MEM); 373 if (ret) 374 dev_err(dev, "Data transfer failed\n"); 375 } else { 376 void *buf; 377 378 buf = kzalloc(reg->size, GFP_KERNEL); 379 if (!buf) { 380 ret = -ENOMEM; 381 goto err_map_addr; 382 } 383 384 memcpy_fromio(buf, src_addr, reg->size); 385 memcpy_toio(dst_addr, buf, reg->size); 386 kfree(buf); 387 } 388 ktime_get_ts64(&end); 389 pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end, 390 reg->flags & FLAG_USE_DMA); 391 392 err_map_addr: 393 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); 394 395 err_dst_addr: 396 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 397 398 err_src_map_addr: 399 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); 400 401 err_src_addr: 402 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 403 404 err: 405 if (!ret) 406 reg->status |= STATUS_COPY_SUCCESS; 407 else 408 reg->status |= STATUS_COPY_FAIL; 409 } 410 411 static void pci_epf_test_read(struct pci_epf_test *epf_test, 412 struct pci_epf_test_reg *reg) 413 { 414 int ret; 415 void __iomem *src_addr; 416 void *buf; 417 u32 crc32; 418 phys_addr_t phys_addr; 419 phys_addr_t dst_phys_addr; 420 struct timespec64 start, end; 421 struct pci_epf *epf = epf_test->epf; 422 struct device *dev = &epf->dev; 423 struct pci_epc *epc = epf->epc; 424 struct device *dma_dev = epf->epc->dev.parent; 425 426 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 427 if (!src_addr) { 428 dev_err(dev, "Failed to allocate address\n"); 429 reg->status = STATUS_SRC_ADDR_INVALID; 430 ret = -ENOMEM; 431 goto err; 432 } 433 434 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 435 reg->src_addr, reg->size); 436 if (ret) { 437 dev_err(dev, "Failed to map address\n"); 438 reg->status = STATUS_SRC_ADDR_INVALID; 439 goto err_addr; 440 } 441 442 buf = kzalloc(reg->size, GFP_KERNEL); 443 if (!buf) { 444 ret = -ENOMEM; 445 goto err_map_addr; 446 } 447 448 if (reg->flags & FLAG_USE_DMA) { 449 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, 450 DMA_FROM_DEVICE); 451 if (dma_mapping_error(dma_dev, dst_phys_addr)) { 452 dev_err(dev, "Failed to map destination buffer addr\n"); 453 ret = -ENOMEM; 454 goto err_dma_map; 455 } 456 457 ktime_get_ts64(&start); 458 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 459 phys_addr, reg->size, 460 reg->src_addr, DMA_DEV_TO_MEM); 461 if (ret) 462 dev_err(dev, "Data transfer failed\n"); 463 ktime_get_ts64(&end); 464 465 dma_unmap_single(dma_dev, dst_phys_addr, reg->size, 466 DMA_FROM_DEVICE); 467 } else { 468 ktime_get_ts64(&start); 469 memcpy_fromio(buf, src_addr, reg->size); 470 ktime_get_ts64(&end); 471 } 472 473 pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end, 474 reg->flags & FLAG_USE_DMA); 475 476 crc32 = crc32_le(~0, buf, reg->size); 477 if (crc32 != reg->checksum) 478 ret = -EIO; 479 480 err_dma_map: 481 kfree(buf); 482 483 err_map_addr: 484 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 485 486 err_addr: 487 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 488 489 err: 490 if (!ret) 491 reg->status |= STATUS_READ_SUCCESS; 492 else 493 reg->status |= STATUS_READ_FAIL; 494 } 495 496 static void pci_epf_test_write(struct pci_epf_test *epf_test, 497 struct pci_epf_test_reg *reg) 498 { 499 int ret; 500 void __iomem *dst_addr; 501 void *buf; 502 phys_addr_t phys_addr; 503 phys_addr_t src_phys_addr; 504 struct timespec64 start, end; 505 struct pci_epf *epf = epf_test->epf; 506 struct device *dev = &epf->dev; 507 struct pci_epc *epc = epf->epc; 508 struct device *dma_dev = epf->epc->dev.parent; 509 510 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 511 if (!dst_addr) { 512 dev_err(dev, "Failed to allocate address\n"); 513 reg->status = STATUS_DST_ADDR_INVALID; 514 ret = -ENOMEM; 515 goto err; 516 } 517 518 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 519 reg->dst_addr, reg->size); 520 if (ret) { 521 dev_err(dev, "Failed to map address\n"); 522 reg->status = STATUS_DST_ADDR_INVALID; 523 goto err_addr; 524 } 525 526 buf = kzalloc(reg->size, GFP_KERNEL); 527 if (!buf) { 528 ret = -ENOMEM; 529 goto err_map_addr; 530 } 531 532 get_random_bytes(buf, reg->size); 533 reg->checksum = crc32_le(~0, buf, reg->size); 534 535 if (reg->flags & FLAG_USE_DMA) { 536 src_phys_addr = dma_map_single(dma_dev, buf, reg->size, 537 DMA_TO_DEVICE); 538 if (dma_mapping_error(dma_dev, src_phys_addr)) { 539 dev_err(dev, "Failed to map source buffer addr\n"); 540 ret = -ENOMEM; 541 goto err_dma_map; 542 } 543 544 ktime_get_ts64(&start); 545 546 ret = pci_epf_test_data_transfer(epf_test, phys_addr, 547 src_phys_addr, reg->size, 548 reg->dst_addr, 549 DMA_MEM_TO_DEV); 550 if (ret) 551 dev_err(dev, "Data transfer failed\n"); 552 ktime_get_ts64(&end); 553 554 dma_unmap_single(dma_dev, src_phys_addr, reg->size, 555 DMA_TO_DEVICE); 556 } else { 557 ktime_get_ts64(&start); 558 memcpy_toio(dst_addr, buf, reg->size); 559 ktime_get_ts64(&end); 560 } 561 562 pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end, 563 reg->flags & FLAG_USE_DMA); 564 565 /* 566 * wait 1ms inorder for the write to complete. Without this delay L3 567 * error in observed in the host system. 568 */ 569 usleep_range(1000, 2000); 570 571 err_dma_map: 572 kfree(buf); 573 574 err_map_addr: 575 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 576 577 err_addr: 578 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 579 580 err: 581 if (!ret) 582 reg->status |= STATUS_WRITE_SUCCESS; 583 else 584 reg->status |= STATUS_WRITE_FAIL; 585 } 586 587 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, 588 struct pci_epf_test_reg *reg) 589 { 590 struct pci_epf *epf = epf_test->epf; 591 struct device *dev = &epf->dev; 592 struct pci_epc *epc = epf->epc; 593 u32 status = reg->status | STATUS_IRQ_RAISED; 594 int count; 595 596 /* 597 * Set the status before raising the IRQ to ensure that the host sees 598 * the updated value when it gets the IRQ. 599 */ 600 WRITE_ONCE(reg->status, status); 601 602 switch (reg->irq_type) { 603 case IRQ_TYPE_INTX: 604 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 605 PCI_IRQ_INTX, 0); 606 break; 607 case IRQ_TYPE_MSI: 608 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); 609 if (reg->irq_number > count || count <= 0) { 610 dev_err(dev, "Invalid MSI IRQ number %d / %d\n", 611 reg->irq_number, count); 612 return; 613 } 614 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 615 PCI_IRQ_MSI, reg->irq_number); 616 break; 617 case IRQ_TYPE_MSIX: 618 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); 619 if (reg->irq_number > count || count <= 0) { 620 dev_err(dev, "Invalid MSIX IRQ number %d / %d\n", 621 reg->irq_number, count); 622 return; 623 } 624 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 625 PCI_IRQ_MSIX, reg->irq_number); 626 break; 627 default: 628 dev_err(dev, "Failed to raise IRQ, unknown type\n"); 629 break; 630 } 631 } 632 633 static void pci_epf_test_cmd_handler(struct work_struct *work) 634 { 635 u32 command; 636 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, 637 cmd_handler.work); 638 struct pci_epf *epf = epf_test->epf; 639 struct device *dev = &epf->dev; 640 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 641 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 642 643 command = READ_ONCE(reg->command); 644 if (!command) 645 goto reset_handler; 646 647 WRITE_ONCE(reg->command, 0); 648 WRITE_ONCE(reg->status, 0); 649 650 if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) && 651 !epf_test->dma_supported) { 652 dev_err(dev, "Cannot transfer data using DMA\n"); 653 goto reset_handler; 654 } 655 656 if (reg->irq_type > IRQ_TYPE_MSIX) { 657 dev_err(dev, "Failed to detect IRQ type\n"); 658 goto reset_handler; 659 } 660 661 switch (command) { 662 case COMMAND_RAISE_INTX_IRQ: 663 case COMMAND_RAISE_MSI_IRQ: 664 case COMMAND_RAISE_MSIX_IRQ: 665 pci_epf_test_raise_irq(epf_test, reg); 666 break; 667 case COMMAND_WRITE: 668 pci_epf_test_write(epf_test, reg); 669 pci_epf_test_raise_irq(epf_test, reg); 670 break; 671 case COMMAND_READ: 672 pci_epf_test_read(epf_test, reg); 673 pci_epf_test_raise_irq(epf_test, reg); 674 break; 675 case COMMAND_COPY: 676 pci_epf_test_copy(epf_test, reg); 677 pci_epf_test_raise_irq(epf_test, reg); 678 break; 679 default: 680 dev_err(dev, "Invalid command 0x%x\n", command); 681 break; 682 } 683 684 reset_handler: 685 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 686 msecs_to_jiffies(1)); 687 } 688 689 static void pci_epf_test_unbind(struct pci_epf *epf) 690 { 691 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 692 struct pci_epc *epc = epf->epc; 693 int bar; 694 695 cancel_delayed_work(&epf_test->cmd_handler); 696 pci_epf_test_clean_dma_chan(epf_test); 697 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 698 if (!epf_test->reg[bar]) 699 continue; 700 701 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, 702 &epf->bar[bar]); 703 pci_epf_free_space(epf, epf_test->reg[bar], bar, 704 PRIMARY_INTERFACE); 705 } 706 } 707 708 static int pci_epf_test_set_bar(struct pci_epf *epf) 709 { 710 int bar, ret; 711 struct pci_epc *epc = epf->epc; 712 struct device *dev = &epf->dev; 713 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 714 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 715 716 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 717 if (!epf_test->reg[bar]) 718 continue; 719 720 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, 721 &epf->bar[bar]); 722 if (ret) { 723 pci_epf_free_space(epf, epf_test->reg[bar], bar, 724 PRIMARY_INTERFACE); 725 dev_err(dev, "Failed to set BAR%d\n", bar); 726 if (bar == test_reg_bar) 727 return ret; 728 } 729 } 730 731 return 0; 732 } 733 734 static int pci_epf_test_core_init(struct pci_epf *epf) 735 { 736 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 737 struct pci_epf_header *header = epf->header; 738 const struct pci_epc_features *epc_features; 739 struct pci_epc *epc = epf->epc; 740 struct device *dev = &epf->dev; 741 bool linkup_notifier = false; 742 bool msix_capable = false; 743 bool msi_capable = true; 744 int ret; 745 746 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 747 if (epc_features) { 748 msix_capable = epc_features->msix_capable; 749 msi_capable = epc_features->msi_capable; 750 } 751 752 if (epf->vfunc_no <= 1) { 753 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); 754 if (ret) { 755 dev_err(dev, "Configuration header write failed\n"); 756 return ret; 757 } 758 } 759 760 ret = pci_epf_test_set_bar(epf); 761 if (ret) 762 return ret; 763 764 if (msi_capable) { 765 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, 766 epf->msi_interrupts); 767 if (ret) { 768 dev_err(dev, "MSI configuration failed\n"); 769 return ret; 770 } 771 } 772 773 if (msix_capable) { 774 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, 775 epf->msix_interrupts, 776 epf_test->test_reg_bar, 777 epf_test->msix_table_offset); 778 if (ret) { 779 dev_err(dev, "MSI-X configuration failed\n"); 780 return ret; 781 } 782 } 783 784 linkup_notifier = epc_features->linkup_notifier; 785 if (!linkup_notifier) 786 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); 787 788 return 0; 789 } 790 791 static int pci_epf_test_link_up(struct pci_epf *epf) 792 { 793 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 794 795 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 796 msecs_to_jiffies(1)); 797 798 return 0; 799 } 800 801 static const struct pci_epc_event_ops pci_epf_test_event_ops = { 802 .core_init = pci_epf_test_core_init, 803 .link_up = pci_epf_test_link_up, 804 }; 805 806 static int pci_epf_test_alloc_space(struct pci_epf *epf) 807 { 808 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 809 struct device *dev = &epf->dev; 810 size_t msix_table_size = 0; 811 size_t test_reg_bar_size; 812 size_t pba_size = 0; 813 bool msix_capable; 814 void *base; 815 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 816 enum pci_barno bar; 817 const struct pci_epc_features *epc_features; 818 size_t test_reg_size; 819 820 epc_features = epf_test->epc_features; 821 822 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); 823 824 msix_capable = epc_features->msix_capable; 825 if (msix_capable) { 826 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 827 epf_test->msix_table_offset = test_reg_bar_size; 828 /* Align to QWORD or 8 Bytes */ 829 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 830 } 831 test_reg_size = test_reg_bar_size + msix_table_size + pba_size; 832 833 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, 834 epc_features, PRIMARY_INTERFACE); 835 if (!base) { 836 dev_err(dev, "Failed to allocated register space\n"); 837 return -ENOMEM; 838 } 839 epf_test->reg[test_reg_bar] = base; 840 841 for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) { 842 bar = pci_epc_get_next_free_bar(epc_features, bar); 843 if (bar == NO_BAR) 844 break; 845 846 if (bar == test_reg_bar) 847 continue; 848 849 base = pci_epf_alloc_space(epf, bar_size[bar], bar, 850 epc_features, PRIMARY_INTERFACE); 851 if (!base) 852 dev_err(dev, "Failed to allocate space for BAR%d\n", 853 bar); 854 epf_test->reg[bar] = base; 855 } 856 857 return 0; 858 } 859 860 static int pci_epf_test_bind(struct pci_epf *epf) 861 { 862 int ret; 863 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 864 const struct pci_epc_features *epc_features; 865 enum pci_barno test_reg_bar = BAR_0; 866 struct pci_epc *epc = epf->epc; 867 868 if (WARN_ON_ONCE(!epc)) 869 return -EINVAL; 870 871 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 872 if (!epc_features) { 873 dev_err(&epf->dev, "epc_features not implemented\n"); 874 return -EOPNOTSUPP; 875 } 876 877 test_reg_bar = pci_epc_get_first_free_bar(epc_features); 878 if (test_reg_bar < 0) 879 return -EINVAL; 880 881 epf_test->test_reg_bar = test_reg_bar; 882 epf_test->epc_features = epc_features; 883 884 ret = pci_epf_test_alloc_space(epf); 885 if (ret) 886 return ret; 887 888 epf_test->dma_supported = true; 889 890 ret = pci_epf_test_init_dma_chan(epf_test); 891 if (ret) 892 epf_test->dma_supported = false; 893 894 return 0; 895 } 896 897 static const struct pci_epf_device_id pci_epf_test_ids[] = { 898 { 899 .name = "pci_epf_test", 900 }, 901 {}, 902 }; 903 904 static int pci_epf_test_probe(struct pci_epf *epf, 905 const struct pci_epf_device_id *id) 906 { 907 struct pci_epf_test *epf_test; 908 struct device *dev = &epf->dev; 909 910 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); 911 if (!epf_test) 912 return -ENOMEM; 913 914 epf->header = &test_header; 915 epf_test->epf = epf; 916 917 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); 918 919 epf->event_ops = &pci_epf_test_event_ops; 920 921 epf_set_drvdata(epf, epf_test); 922 return 0; 923 } 924 925 static const struct pci_epf_ops ops = { 926 .unbind = pci_epf_test_unbind, 927 .bind = pci_epf_test_bind, 928 }; 929 930 static struct pci_epf_driver test_driver = { 931 .driver.name = "pci_epf_test", 932 .probe = pci_epf_test_probe, 933 .id_table = pci_epf_test_ids, 934 .ops = &ops, 935 .owner = THIS_MODULE, 936 }; 937 938 static int __init pci_epf_test_init(void) 939 { 940 int ret; 941 942 kpcitest_workqueue = alloc_workqueue("kpcitest", 943 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 944 if (!kpcitest_workqueue) { 945 pr_err("Failed to allocate the kpcitest work queue\n"); 946 return -ENOMEM; 947 } 948 949 ret = pci_epf_register_driver(&test_driver); 950 if (ret) { 951 destroy_workqueue(kpcitest_workqueue); 952 pr_err("Failed to register pci epf test driver --> %d\n", ret); 953 return ret; 954 } 955 956 return 0; 957 } 958 module_init(pci_epf_test_init); 959 960 static void __exit pci_epf_test_exit(void) 961 { 962 if (kpcitest_workqueue) 963 destroy_workqueue(kpcitest_workqueue); 964 pci_epf_unregister_driver(&test_driver); 965 } 966 module_exit(pci_epf_test_exit); 967 968 MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); 969 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 970 MODULE_LICENSE("GPL v2"); 971