1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/pci_ids.h> 16 #include <linux/random.h> 17 18 #include <linux/pci-epc.h> 19 #include <linux/pci-epf.h> 20 #include <linux/pci_regs.h> 21 22 #define IRQ_TYPE_INTX 0 23 #define IRQ_TYPE_MSI 1 24 #define IRQ_TYPE_MSIX 2 25 26 #define COMMAND_RAISE_INTX_IRQ BIT(0) 27 #define COMMAND_RAISE_MSI_IRQ BIT(1) 28 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 29 #define COMMAND_READ BIT(3) 30 #define COMMAND_WRITE BIT(4) 31 #define COMMAND_COPY BIT(5) 32 33 #define STATUS_READ_SUCCESS BIT(0) 34 #define STATUS_READ_FAIL BIT(1) 35 #define STATUS_WRITE_SUCCESS BIT(2) 36 #define STATUS_WRITE_FAIL BIT(3) 37 #define STATUS_COPY_SUCCESS BIT(4) 38 #define STATUS_COPY_FAIL BIT(5) 39 #define STATUS_IRQ_RAISED BIT(6) 40 #define STATUS_SRC_ADDR_INVALID BIT(7) 41 #define STATUS_DST_ADDR_INVALID BIT(8) 42 43 #define FLAG_USE_DMA BIT(0) 44 45 #define TIMER_RESOLUTION 1 46 47 static struct workqueue_struct *kpcitest_workqueue; 48 49 struct pci_epf_test { 50 void *reg[PCI_STD_NUM_BARS]; 51 struct pci_epf *epf; 52 enum pci_barno test_reg_bar; 53 size_t msix_table_offset; 54 struct delayed_work cmd_handler; 55 struct dma_chan *dma_chan_tx; 56 struct dma_chan *dma_chan_rx; 57 struct dma_chan *transfer_chan; 58 dma_cookie_t transfer_cookie; 59 enum dma_status transfer_status; 60 struct completion transfer_complete; 61 bool dma_supported; 62 bool dma_private; 63 const struct pci_epc_features *epc_features; 64 }; 65 66 struct pci_epf_test_reg { 67 u32 magic; 68 u32 command; 69 u32 status; 70 u64 src_addr; 71 u64 dst_addr; 72 u32 size; 73 u32 checksum; 74 u32 irq_type; 75 u32 irq_number; 76 u32 flags; 77 } __packed; 78 79 static struct pci_epf_header test_header = { 80 .vendorid = PCI_ANY_ID, 81 .deviceid = PCI_ANY_ID, 82 .baseclass_code = PCI_CLASS_OTHERS, 83 .interrupt_pin = PCI_INTERRUPT_INTA, 84 }; 85 86 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; 87 88 static void pci_epf_test_dma_callback(void *param) 89 { 90 struct pci_epf_test *epf_test = param; 91 struct dma_tx_state state; 92 93 epf_test->transfer_status = 94 dmaengine_tx_status(epf_test->transfer_chan, 95 epf_test->transfer_cookie, &state); 96 if (epf_test->transfer_status == DMA_COMPLETE || 97 epf_test->transfer_status == DMA_ERROR) 98 complete(&epf_test->transfer_complete); 99 } 100 101 /** 102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer 103 * data between PCIe EP and remote PCIe RC 104 * @epf_test: the EPF test device that performs the data transfer operation 105 * @dma_dst: The destination address of the data transfer. It can be a physical 106 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 107 * @dma_src: The source address of the data transfer. It can be a physical 108 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 109 * @len: The size of the data transfer 110 * @dma_remote: remote RC physical address 111 * @dir: DMA transfer direction 112 * 113 * Function that uses dmaengine API to transfer data between PCIe EP and remote 114 * PCIe RC. The source and destination address can be a physical address given 115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. 116 * 117 * The function returns '0' on success and negative value on failure. 118 */ 119 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, 120 dma_addr_t dma_dst, dma_addr_t dma_src, 121 size_t len, dma_addr_t dma_remote, 122 enum dma_transfer_direction dir) 123 { 124 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ? 125 epf_test->dma_chan_tx : epf_test->dma_chan_rx; 126 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst; 127 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 128 struct pci_epf *epf = epf_test->epf; 129 struct dma_async_tx_descriptor *tx; 130 struct dma_slave_config sconf = {}; 131 struct device *dev = &epf->dev; 132 int ret; 133 134 if (IS_ERR_OR_NULL(chan)) { 135 dev_err(dev, "Invalid DMA memcpy channel\n"); 136 return -EINVAL; 137 } 138 139 if (epf_test->dma_private) { 140 sconf.direction = dir; 141 if (dir == DMA_MEM_TO_DEV) 142 sconf.dst_addr = dma_remote; 143 else 144 sconf.src_addr = dma_remote; 145 146 if (dmaengine_slave_config(chan, &sconf)) { 147 dev_err(dev, "DMA slave config fail\n"); 148 return -EIO; 149 } 150 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir, 151 flags); 152 } else { 153 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, 154 flags); 155 } 156 157 if (!tx) { 158 dev_err(dev, "Failed to prepare DMA memcpy\n"); 159 return -EIO; 160 } 161 162 reinit_completion(&epf_test->transfer_complete); 163 epf_test->transfer_chan = chan; 164 tx->callback = pci_epf_test_dma_callback; 165 tx->callback_param = epf_test; 166 epf_test->transfer_cookie = dmaengine_submit(tx); 167 168 ret = dma_submit_error(epf_test->transfer_cookie); 169 if (ret) { 170 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret); 171 goto terminate; 172 } 173 174 dma_async_issue_pending(chan); 175 ret = wait_for_completion_interruptible(&epf_test->transfer_complete); 176 if (ret < 0) { 177 dev_err(dev, "DMA wait_for_completion interrupted\n"); 178 goto terminate; 179 } 180 181 if (epf_test->transfer_status == DMA_ERROR) { 182 dev_err(dev, "DMA transfer failed\n"); 183 ret = -EIO; 184 } 185 186 terminate: 187 dmaengine_terminate_sync(chan); 188 189 return ret; 190 } 191 192 struct epf_dma_filter { 193 struct device *dev; 194 u32 dma_mask; 195 }; 196 197 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node) 198 { 199 struct epf_dma_filter *filter = node; 200 struct dma_slave_caps caps; 201 202 memset(&caps, 0, sizeof(caps)); 203 dma_get_slave_caps(chan, &caps); 204 205 return chan->device->dev == filter->dev 206 && (filter->dma_mask & caps.directions); 207 } 208 209 /** 210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel 211 * @epf_test: the EPF test device that performs data transfer operation 212 * 213 * Function to initialize EPF test DMA channel. 214 */ 215 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) 216 { 217 struct pci_epf *epf = epf_test->epf; 218 struct device *dev = &epf->dev; 219 struct epf_dma_filter filter; 220 struct dma_chan *dma_chan; 221 dma_cap_mask_t mask; 222 int ret; 223 224 filter.dev = epf->epc->dev.parent; 225 filter.dma_mask = BIT(DMA_DEV_TO_MEM); 226 227 dma_cap_zero(mask); 228 dma_cap_set(DMA_SLAVE, mask); 229 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 230 if (!dma_chan) { 231 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n"); 232 goto fail_back_tx; 233 } 234 235 epf_test->dma_chan_rx = dma_chan; 236 237 filter.dma_mask = BIT(DMA_MEM_TO_DEV); 238 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); 239 240 if (!dma_chan) { 241 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n"); 242 goto fail_back_rx; 243 } 244 245 epf_test->dma_chan_tx = dma_chan; 246 epf_test->dma_private = true; 247 248 init_completion(&epf_test->transfer_complete); 249 250 return 0; 251 252 fail_back_rx: 253 dma_release_channel(epf_test->dma_chan_rx); 254 epf_test->dma_chan_tx = NULL; 255 256 fail_back_tx: 257 dma_cap_zero(mask); 258 dma_cap_set(DMA_MEMCPY, mask); 259 260 dma_chan = dma_request_chan_by_mask(&mask); 261 if (IS_ERR(dma_chan)) { 262 ret = PTR_ERR(dma_chan); 263 if (ret != -EPROBE_DEFER) 264 dev_err(dev, "Failed to get DMA channel\n"); 265 return ret; 266 } 267 init_completion(&epf_test->transfer_complete); 268 269 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan; 270 271 return 0; 272 } 273 274 /** 275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel 276 * @epf_test: the EPF test device that performs data transfer operation 277 * 278 * Helper to cleanup EPF test DMA channel. 279 */ 280 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) 281 { 282 if (!epf_test->dma_supported) 283 return; 284 285 dma_release_channel(epf_test->dma_chan_tx); 286 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { 287 epf_test->dma_chan_tx = NULL; 288 epf_test->dma_chan_rx = NULL; 289 return; 290 } 291 292 dma_release_channel(epf_test->dma_chan_rx); 293 epf_test->dma_chan_rx = NULL; 294 295 return; 296 } 297 298 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, 299 const char *op, u64 size, 300 struct timespec64 *start, 301 struct timespec64 *end, bool dma) 302 { 303 struct timespec64 ts = timespec64_sub(*end, *start); 304 u64 rate = 0, ns; 305 306 /* calculate the rate */ 307 ns = timespec64_to_ns(&ts); 308 if (ns) 309 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000); 310 311 dev_info(&epf_test->epf->dev, 312 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n", 313 op, size, dma ? "YES" : "NO", 314 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate); 315 } 316 317 static void pci_epf_test_copy(struct pci_epf_test *epf_test, 318 struct pci_epf_test_reg *reg) 319 { 320 int ret; 321 void __iomem *src_addr; 322 void __iomem *dst_addr; 323 phys_addr_t src_phys_addr; 324 phys_addr_t dst_phys_addr; 325 struct timespec64 start, end; 326 struct pci_epf *epf = epf_test->epf; 327 struct device *dev = &epf->dev; 328 struct pci_epc *epc = epf->epc; 329 330 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 331 if (!src_addr) { 332 dev_err(dev, "Failed to allocate source address\n"); 333 reg->status = STATUS_SRC_ADDR_INVALID; 334 ret = -ENOMEM; 335 goto err; 336 } 337 338 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, 339 reg->src_addr, reg->size); 340 if (ret) { 341 dev_err(dev, "Failed to map source address\n"); 342 reg->status = STATUS_SRC_ADDR_INVALID; 343 goto err_src_addr; 344 } 345 346 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 347 if (!dst_addr) { 348 dev_err(dev, "Failed to allocate destination address\n"); 349 reg->status = STATUS_DST_ADDR_INVALID; 350 ret = -ENOMEM; 351 goto err_src_map_addr; 352 } 353 354 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, 355 reg->dst_addr, reg->size); 356 if (ret) { 357 dev_err(dev, "Failed to map destination address\n"); 358 reg->status = STATUS_DST_ADDR_INVALID; 359 goto err_dst_addr; 360 } 361 362 ktime_get_ts64(&start); 363 if (reg->flags & FLAG_USE_DMA) { 364 if (epf_test->dma_private) { 365 dev_err(dev, "Cannot transfer data using DMA\n"); 366 ret = -EINVAL; 367 goto err_map_addr; 368 } 369 370 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 371 src_phys_addr, reg->size, 0, 372 DMA_MEM_TO_MEM); 373 if (ret) 374 dev_err(dev, "Data transfer failed\n"); 375 } else { 376 void *buf; 377 378 buf = kzalloc(reg->size, GFP_KERNEL); 379 if (!buf) { 380 ret = -ENOMEM; 381 goto err_map_addr; 382 } 383 384 memcpy_fromio(buf, src_addr, reg->size); 385 memcpy_toio(dst_addr, buf, reg->size); 386 kfree(buf); 387 } 388 ktime_get_ts64(&end); 389 pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end, 390 reg->flags & FLAG_USE_DMA); 391 392 err_map_addr: 393 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); 394 395 err_dst_addr: 396 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 397 398 err_src_map_addr: 399 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); 400 401 err_src_addr: 402 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 403 404 err: 405 if (!ret) 406 reg->status |= STATUS_COPY_SUCCESS; 407 else 408 reg->status |= STATUS_COPY_FAIL; 409 } 410 411 static void pci_epf_test_read(struct pci_epf_test *epf_test, 412 struct pci_epf_test_reg *reg) 413 { 414 int ret; 415 void __iomem *src_addr; 416 void *buf; 417 u32 crc32; 418 phys_addr_t phys_addr; 419 phys_addr_t dst_phys_addr; 420 struct timespec64 start, end; 421 struct pci_epf *epf = epf_test->epf; 422 struct device *dev = &epf->dev; 423 struct pci_epc *epc = epf->epc; 424 struct device *dma_dev = epf->epc->dev.parent; 425 426 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 427 if (!src_addr) { 428 dev_err(dev, "Failed to allocate address\n"); 429 reg->status = STATUS_SRC_ADDR_INVALID; 430 ret = -ENOMEM; 431 goto err; 432 } 433 434 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 435 reg->src_addr, reg->size); 436 if (ret) { 437 dev_err(dev, "Failed to map address\n"); 438 reg->status = STATUS_SRC_ADDR_INVALID; 439 goto err_addr; 440 } 441 442 buf = kzalloc(reg->size, GFP_KERNEL); 443 if (!buf) { 444 ret = -ENOMEM; 445 goto err_map_addr; 446 } 447 448 if (reg->flags & FLAG_USE_DMA) { 449 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, 450 DMA_FROM_DEVICE); 451 if (dma_mapping_error(dma_dev, dst_phys_addr)) { 452 dev_err(dev, "Failed to map destination buffer addr\n"); 453 ret = -ENOMEM; 454 goto err_dma_map; 455 } 456 457 ktime_get_ts64(&start); 458 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 459 phys_addr, reg->size, 460 reg->src_addr, DMA_DEV_TO_MEM); 461 if (ret) 462 dev_err(dev, "Data transfer failed\n"); 463 ktime_get_ts64(&end); 464 465 dma_unmap_single(dma_dev, dst_phys_addr, reg->size, 466 DMA_FROM_DEVICE); 467 } else { 468 ktime_get_ts64(&start); 469 memcpy_fromio(buf, src_addr, reg->size); 470 ktime_get_ts64(&end); 471 } 472 473 pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end, 474 reg->flags & FLAG_USE_DMA); 475 476 crc32 = crc32_le(~0, buf, reg->size); 477 if (crc32 != reg->checksum) 478 ret = -EIO; 479 480 err_dma_map: 481 kfree(buf); 482 483 err_map_addr: 484 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 485 486 err_addr: 487 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 488 489 err: 490 if (!ret) 491 reg->status |= STATUS_READ_SUCCESS; 492 else 493 reg->status |= STATUS_READ_FAIL; 494 } 495 496 static void pci_epf_test_write(struct pci_epf_test *epf_test, 497 struct pci_epf_test_reg *reg) 498 { 499 int ret; 500 void __iomem *dst_addr; 501 void *buf; 502 phys_addr_t phys_addr; 503 phys_addr_t src_phys_addr; 504 struct timespec64 start, end; 505 struct pci_epf *epf = epf_test->epf; 506 struct device *dev = &epf->dev; 507 struct pci_epc *epc = epf->epc; 508 struct device *dma_dev = epf->epc->dev.parent; 509 510 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 511 if (!dst_addr) { 512 dev_err(dev, "Failed to allocate address\n"); 513 reg->status = STATUS_DST_ADDR_INVALID; 514 ret = -ENOMEM; 515 goto err; 516 } 517 518 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 519 reg->dst_addr, reg->size); 520 if (ret) { 521 dev_err(dev, "Failed to map address\n"); 522 reg->status = STATUS_DST_ADDR_INVALID; 523 goto err_addr; 524 } 525 526 buf = kzalloc(reg->size, GFP_KERNEL); 527 if (!buf) { 528 ret = -ENOMEM; 529 goto err_map_addr; 530 } 531 532 get_random_bytes(buf, reg->size); 533 reg->checksum = crc32_le(~0, buf, reg->size); 534 535 if (reg->flags & FLAG_USE_DMA) { 536 src_phys_addr = dma_map_single(dma_dev, buf, reg->size, 537 DMA_TO_DEVICE); 538 if (dma_mapping_error(dma_dev, src_phys_addr)) { 539 dev_err(dev, "Failed to map source buffer addr\n"); 540 ret = -ENOMEM; 541 goto err_dma_map; 542 } 543 544 ktime_get_ts64(&start); 545 546 ret = pci_epf_test_data_transfer(epf_test, phys_addr, 547 src_phys_addr, reg->size, 548 reg->dst_addr, 549 DMA_MEM_TO_DEV); 550 if (ret) 551 dev_err(dev, "Data transfer failed\n"); 552 ktime_get_ts64(&end); 553 554 dma_unmap_single(dma_dev, src_phys_addr, reg->size, 555 DMA_TO_DEVICE); 556 } else { 557 ktime_get_ts64(&start); 558 memcpy_toio(dst_addr, buf, reg->size); 559 ktime_get_ts64(&end); 560 } 561 562 pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end, 563 reg->flags & FLAG_USE_DMA); 564 565 /* 566 * wait 1ms inorder for the write to complete. Without this delay L3 567 * error in observed in the host system. 568 */ 569 usleep_range(1000, 2000); 570 571 err_dma_map: 572 kfree(buf); 573 574 err_map_addr: 575 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 576 577 err_addr: 578 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 579 580 err: 581 if (!ret) 582 reg->status |= STATUS_WRITE_SUCCESS; 583 else 584 reg->status |= STATUS_WRITE_FAIL; 585 } 586 587 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, 588 struct pci_epf_test_reg *reg) 589 { 590 struct pci_epf *epf = epf_test->epf; 591 struct device *dev = &epf->dev; 592 struct pci_epc *epc = epf->epc; 593 u32 status = reg->status | STATUS_IRQ_RAISED; 594 int count; 595 596 /* 597 * Set the status before raising the IRQ to ensure that the host sees 598 * the updated value when it gets the IRQ. 599 */ 600 WRITE_ONCE(reg->status, status); 601 602 switch (reg->irq_type) { 603 case IRQ_TYPE_INTX: 604 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 605 PCI_IRQ_INTX, 0); 606 break; 607 case IRQ_TYPE_MSI: 608 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); 609 if (reg->irq_number > count || count <= 0) { 610 dev_err(dev, "Invalid MSI IRQ number %d / %d\n", 611 reg->irq_number, count); 612 return; 613 } 614 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 615 PCI_IRQ_MSI, reg->irq_number); 616 break; 617 case IRQ_TYPE_MSIX: 618 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); 619 if (reg->irq_number > count || count <= 0) { 620 dev_err(dev, "Invalid MSIX IRQ number %d / %d\n", 621 reg->irq_number, count); 622 return; 623 } 624 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 625 PCI_IRQ_MSIX, reg->irq_number); 626 break; 627 default: 628 dev_err(dev, "Failed to raise IRQ, unknown type\n"); 629 break; 630 } 631 } 632 633 static void pci_epf_test_cmd_handler(struct work_struct *work) 634 { 635 u32 command; 636 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, 637 cmd_handler.work); 638 struct pci_epf *epf = epf_test->epf; 639 struct device *dev = &epf->dev; 640 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 641 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 642 643 command = READ_ONCE(reg->command); 644 if (!command) 645 goto reset_handler; 646 647 WRITE_ONCE(reg->command, 0); 648 WRITE_ONCE(reg->status, 0); 649 650 if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) && 651 !epf_test->dma_supported) { 652 dev_err(dev, "Cannot transfer data using DMA\n"); 653 goto reset_handler; 654 } 655 656 if (reg->irq_type > IRQ_TYPE_MSIX) { 657 dev_err(dev, "Failed to detect IRQ type\n"); 658 goto reset_handler; 659 } 660 661 switch (command) { 662 case COMMAND_RAISE_INTX_IRQ: 663 case COMMAND_RAISE_MSI_IRQ: 664 case COMMAND_RAISE_MSIX_IRQ: 665 pci_epf_test_raise_irq(epf_test, reg); 666 break; 667 case COMMAND_WRITE: 668 pci_epf_test_write(epf_test, reg); 669 pci_epf_test_raise_irq(epf_test, reg); 670 break; 671 case COMMAND_READ: 672 pci_epf_test_read(epf_test, reg); 673 pci_epf_test_raise_irq(epf_test, reg); 674 break; 675 case COMMAND_COPY: 676 pci_epf_test_copy(epf_test, reg); 677 pci_epf_test_raise_irq(epf_test, reg); 678 break; 679 default: 680 dev_err(dev, "Invalid command 0x%x\n", command); 681 break; 682 } 683 684 reset_handler: 685 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 686 msecs_to_jiffies(1)); 687 } 688 689 static int pci_epf_test_set_bar(struct pci_epf *epf) 690 { 691 int bar, ret; 692 struct pci_epc *epc = epf->epc; 693 struct device *dev = &epf->dev; 694 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 695 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 696 697 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 698 if (!epf_test->reg[bar]) 699 continue; 700 701 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, 702 &epf->bar[bar]); 703 if (ret) { 704 pci_epf_free_space(epf, epf_test->reg[bar], bar, 705 PRIMARY_INTERFACE); 706 dev_err(dev, "Failed to set BAR%d\n", bar); 707 if (bar == test_reg_bar) 708 return ret; 709 } 710 } 711 712 return 0; 713 } 714 715 static void pci_epf_test_clear_bar(struct pci_epf *epf) 716 { 717 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 718 struct pci_epc *epc = epf->epc; 719 int bar; 720 721 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 722 if (!epf_test->reg[bar]) 723 continue; 724 725 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, 726 &epf->bar[bar]); 727 } 728 } 729 730 static int pci_epf_test_epc_init(struct pci_epf *epf) 731 { 732 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 733 struct pci_epf_header *header = epf->header; 734 const struct pci_epc_features *epc_features = epf_test->epc_features; 735 struct pci_epc *epc = epf->epc; 736 struct device *dev = &epf->dev; 737 bool linkup_notifier = false; 738 int ret; 739 740 epf_test->dma_supported = true; 741 742 ret = pci_epf_test_init_dma_chan(epf_test); 743 if (ret) 744 epf_test->dma_supported = false; 745 746 if (epf->vfunc_no <= 1) { 747 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); 748 if (ret) { 749 dev_err(dev, "Configuration header write failed\n"); 750 return ret; 751 } 752 } 753 754 ret = pci_epf_test_set_bar(epf); 755 if (ret) 756 return ret; 757 758 if (epc_features->msi_capable) { 759 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, 760 epf->msi_interrupts); 761 if (ret) { 762 dev_err(dev, "MSI configuration failed\n"); 763 return ret; 764 } 765 } 766 767 if (epc_features->msix_capable) { 768 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, 769 epf->msix_interrupts, 770 epf_test->test_reg_bar, 771 epf_test->msix_table_offset); 772 if (ret) { 773 dev_err(dev, "MSI-X configuration failed\n"); 774 return ret; 775 } 776 } 777 778 linkup_notifier = epc_features->linkup_notifier; 779 if (!linkup_notifier) 780 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); 781 782 return 0; 783 } 784 785 static void pci_epf_test_epc_deinit(struct pci_epf *epf) 786 { 787 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 788 789 cancel_delayed_work(&epf_test->cmd_handler); 790 pci_epf_test_clean_dma_chan(epf_test); 791 pci_epf_test_clear_bar(epf); 792 } 793 794 static int pci_epf_test_link_up(struct pci_epf *epf) 795 { 796 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 797 798 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 799 msecs_to_jiffies(1)); 800 801 return 0; 802 } 803 804 static int pci_epf_test_link_down(struct pci_epf *epf) 805 { 806 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 807 808 cancel_delayed_work_sync(&epf_test->cmd_handler); 809 810 return 0; 811 } 812 813 static const struct pci_epc_event_ops pci_epf_test_event_ops = { 814 .epc_init = pci_epf_test_epc_init, 815 .epc_deinit = pci_epf_test_epc_deinit, 816 .link_up = pci_epf_test_link_up, 817 .link_down = pci_epf_test_link_down, 818 }; 819 820 static int pci_epf_test_alloc_space(struct pci_epf *epf) 821 { 822 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 823 struct device *dev = &epf->dev; 824 size_t msix_table_size = 0; 825 size_t test_reg_bar_size; 826 size_t pba_size = 0; 827 void *base; 828 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 829 enum pci_barno bar; 830 const struct pci_epc_features *epc_features = epf_test->epc_features; 831 size_t test_reg_size; 832 833 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); 834 835 if (epc_features->msix_capable) { 836 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 837 epf_test->msix_table_offset = test_reg_bar_size; 838 /* Align to QWORD or 8 Bytes */ 839 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 840 } 841 test_reg_size = test_reg_bar_size + msix_table_size + pba_size; 842 843 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, 844 epc_features, PRIMARY_INTERFACE); 845 if (!base) { 846 dev_err(dev, "Failed to allocated register space\n"); 847 return -ENOMEM; 848 } 849 epf_test->reg[test_reg_bar] = base; 850 851 for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) { 852 bar = pci_epc_get_next_free_bar(epc_features, bar); 853 if (bar == NO_BAR) 854 break; 855 856 if (bar == test_reg_bar) 857 continue; 858 859 base = pci_epf_alloc_space(epf, bar_size[bar], bar, 860 epc_features, PRIMARY_INTERFACE); 861 if (!base) 862 dev_err(dev, "Failed to allocate space for BAR%d\n", 863 bar); 864 epf_test->reg[bar] = base; 865 } 866 867 return 0; 868 } 869 870 static void pci_epf_test_free_space(struct pci_epf *epf) 871 { 872 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 873 int bar; 874 875 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 876 if (!epf_test->reg[bar]) 877 continue; 878 879 pci_epf_free_space(epf, epf_test->reg[bar], bar, 880 PRIMARY_INTERFACE); 881 } 882 } 883 884 static int pci_epf_test_bind(struct pci_epf *epf) 885 { 886 int ret; 887 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 888 const struct pci_epc_features *epc_features; 889 enum pci_barno test_reg_bar = BAR_0; 890 struct pci_epc *epc = epf->epc; 891 892 if (WARN_ON_ONCE(!epc)) 893 return -EINVAL; 894 895 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 896 if (!epc_features) { 897 dev_err(&epf->dev, "epc_features not implemented\n"); 898 return -EOPNOTSUPP; 899 } 900 901 test_reg_bar = pci_epc_get_first_free_bar(epc_features); 902 if (test_reg_bar < 0) 903 return -EINVAL; 904 905 epf_test->test_reg_bar = test_reg_bar; 906 epf_test->epc_features = epc_features; 907 908 ret = pci_epf_test_alloc_space(epf); 909 if (ret) 910 return ret; 911 912 return 0; 913 } 914 915 static void pci_epf_test_unbind(struct pci_epf *epf) 916 { 917 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 918 struct pci_epc *epc = epf->epc; 919 920 cancel_delayed_work(&epf_test->cmd_handler); 921 if (epc->init_complete) { 922 pci_epf_test_clean_dma_chan(epf_test); 923 pci_epf_test_clear_bar(epf); 924 } 925 pci_epf_test_free_space(epf); 926 } 927 928 static const struct pci_epf_device_id pci_epf_test_ids[] = { 929 { 930 .name = "pci_epf_test", 931 }, 932 {}, 933 }; 934 935 static int pci_epf_test_probe(struct pci_epf *epf, 936 const struct pci_epf_device_id *id) 937 { 938 struct pci_epf_test *epf_test; 939 struct device *dev = &epf->dev; 940 941 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); 942 if (!epf_test) 943 return -ENOMEM; 944 945 epf->header = &test_header; 946 epf_test->epf = epf; 947 948 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); 949 950 epf->event_ops = &pci_epf_test_event_ops; 951 952 epf_set_drvdata(epf, epf_test); 953 return 0; 954 } 955 956 static const struct pci_epf_ops ops = { 957 .unbind = pci_epf_test_unbind, 958 .bind = pci_epf_test_bind, 959 }; 960 961 static struct pci_epf_driver test_driver = { 962 .driver.name = "pci_epf_test", 963 .probe = pci_epf_test_probe, 964 .id_table = pci_epf_test_ids, 965 .ops = &ops, 966 .owner = THIS_MODULE, 967 }; 968 969 static int __init pci_epf_test_init(void) 970 { 971 int ret; 972 973 kpcitest_workqueue = alloc_workqueue("kpcitest", 974 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 975 if (!kpcitest_workqueue) { 976 pr_err("Failed to allocate the kpcitest work queue\n"); 977 return -ENOMEM; 978 } 979 980 ret = pci_epf_register_driver(&test_driver); 981 if (ret) { 982 destroy_workqueue(kpcitest_workqueue); 983 pr_err("Failed to register pci epf test driver --> %d\n", ret); 984 return ret; 985 } 986 987 return 0; 988 } 989 module_init(pci_epf_test_init); 990 991 static void __exit pci_epf_test_exit(void) 992 { 993 if (kpcitest_workqueue) 994 destroy_workqueue(kpcitest_workqueue); 995 pci_epf_unregister_driver(&test_driver); 996 } 997 module_exit(pci_epf_test_exit); 998 999 MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); 1000 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 1001 MODULE_LICENSE("GPL v2"); 1002