1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Host side endpoint driver to implement Non-Transparent Bridge functionality 4 * 5 * Copyright (C) 2020 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/slab.h> 13 #include <linux/ntb.h> 14 15 #define NTB_EPF_COMMAND 0x0 16 #define CMD_CONFIGURE_DOORBELL 1 17 #define CMD_TEARDOWN_DOORBELL 2 18 #define CMD_CONFIGURE_MW 3 19 #define CMD_TEARDOWN_MW 4 20 #define CMD_LINK_UP 5 21 #define CMD_LINK_DOWN 6 22 23 #define NTB_EPF_ARGUMENT 0x4 24 #define MSIX_ENABLE BIT(16) 25 26 #define NTB_EPF_CMD_STATUS 0x8 27 #define COMMAND_STATUS_OK 1 28 #define COMMAND_STATUS_ERROR 2 29 30 #define NTB_EPF_LINK_STATUS 0x0A 31 #define LINK_STATUS_UP BIT(0) 32 33 #define NTB_EPF_TOPOLOGY 0x0C 34 #define NTB_EPF_LOWER_ADDR 0x10 35 #define NTB_EPF_UPPER_ADDR 0x14 36 #define NTB_EPF_LOWER_SIZE 0x18 37 #define NTB_EPF_UPPER_SIZE 0x1C 38 #define NTB_EPF_MW_COUNT 0x20 39 #define NTB_EPF_MW1_OFFSET 0x24 40 #define NTB_EPF_SPAD_OFFSET 0x28 41 #define NTB_EPF_SPAD_COUNT 0x2C 42 #define NTB_EPF_DB_ENTRY_SIZE 0x30 43 #define NTB_EPF_DB_DATA(n) (0x34 + (n) * 4) 44 #define NTB_EPF_DB_OFFSET(n) (0xB4 + (n) * 4) 45 46 #define NTB_EPF_MIN_DB_COUNT 3 47 #define NTB_EPF_MAX_DB_COUNT 31 48 49 #define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */ 50 51 enum pci_barno { 52 NO_BAR = -1, 53 BAR_0, 54 BAR_1, 55 BAR_2, 56 BAR_3, 57 BAR_4, 58 BAR_5, 59 }; 60 61 enum epf_ntb_bar { 62 BAR_CONFIG, 63 BAR_PEER_SPAD, 64 BAR_DB, 65 BAR_MW1, 66 BAR_MW2, 67 BAR_MW3, 68 BAR_MW4, 69 NTB_BAR_NUM, 70 }; 71 72 #define NTB_EPF_MAX_MW_COUNT (NTB_BAR_NUM - BAR_MW1) 73 74 struct ntb_epf_dev { 75 struct ntb_dev ntb; 76 struct device *dev; 77 /* Mutex to protect providing commands to NTB EPF */ 78 struct mutex cmd_lock; 79 80 const enum pci_barno *barno_map; 81 82 unsigned int mw_count; 83 unsigned int spad_count; 84 unsigned int db_count; 85 86 void __iomem *ctrl_reg; 87 void __iomem *db_reg; 88 void __iomem *peer_spad_reg; 89 90 unsigned int self_spad; 91 unsigned int peer_spad; 92 93 int db_val; 94 u64 db_valid_mask; 95 }; 96 97 #define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb) 98 99 static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command, 100 u32 argument) 101 { 102 ktime_t timeout; 103 bool timedout; 104 int ret = 0; 105 u32 status; 106 107 mutex_lock(&ndev->cmd_lock); 108 writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT); 109 writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND); 110 111 timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT); 112 while (1) { 113 timedout = ktime_after(ktime_get(), timeout); 114 status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS); 115 116 if (status == COMMAND_STATUS_ERROR) { 117 ret = -EINVAL; 118 break; 119 } 120 121 if (status == COMMAND_STATUS_OK) 122 break; 123 124 if (WARN_ON(timedout)) { 125 ret = -ETIMEDOUT; 126 break; 127 } 128 129 usleep_range(5, 10); 130 } 131 132 writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS); 133 mutex_unlock(&ndev->cmd_lock); 134 135 return ret; 136 } 137 138 static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx) 139 { 140 struct device *dev = ndev->dev; 141 142 if (idx < 0 || idx > ndev->mw_count) { 143 dev_err(dev, "Unsupported Memory Window index %d\n", idx); 144 return -EINVAL; 145 } 146 147 return ndev->barno_map[BAR_MW1 + idx]; 148 } 149 150 static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx) 151 { 152 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 153 struct device *dev = ndev->dev; 154 155 if (pidx != NTB_DEF_PEER_IDX) { 156 dev_err(dev, "Unsupported Peer ID %d\n", pidx); 157 return -EINVAL; 158 } 159 160 return ndev->mw_count; 161 } 162 163 static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, 164 resource_size_t *addr_align, 165 resource_size_t *size_align, 166 resource_size_t *size_max) 167 { 168 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 169 struct device *dev = ndev->dev; 170 int bar; 171 172 if (pidx != NTB_DEF_PEER_IDX) { 173 dev_err(dev, "Unsupported Peer ID %d\n", pidx); 174 return -EINVAL; 175 } 176 177 bar = ntb_epf_mw_to_bar(ndev, idx); 178 if (bar < 0) 179 return bar; 180 181 if (addr_align) 182 *addr_align = SZ_4K; 183 184 if (size_align) 185 *size_align = 1; 186 187 if (size_max) 188 *size_max = pci_resource_len(ndev->ntb.pdev, bar); 189 190 return 0; 191 } 192 193 static u64 ntb_epf_link_is_up(struct ntb_dev *ntb, 194 enum ntb_speed *speed, 195 enum ntb_width *width) 196 { 197 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 198 u32 status; 199 200 status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS); 201 202 return status & LINK_STATUS_UP; 203 } 204 205 static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx) 206 { 207 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 208 struct device *dev = ndev->dev; 209 u32 offset; 210 211 if (idx < 0 || idx >= ndev->spad_count) { 212 dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx); 213 return 0; 214 } 215 216 offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); 217 offset += (idx << 2); 218 219 return readl(ndev->ctrl_reg + offset); 220 } 221 222 static int ntb_epf_spad_write(struct ntb_dev *ntb, 223 int idx, u32 val) 224 { 225 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 226 struct device *dev = ndev->dev; 227 u32 offset; 228 229 if (idx < 0 || idx >= ndev->spad_count) { 230 dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx); 231 return -EINVAL; 232 } 233 234 offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); 235 offset += (idx << 2); 236 writel(val, ndev->ctrl_reg + offset); 237 238 return 0; 239 } 240 241 static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx) 242 { 243 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 244 struct device *dev = ndev->dev; 245 u32 offset; 246 247 if (pidx != NTB_DEF_PEER_IDX) { 248 dev_err(dev, "Unsupported Peer ID %d\n", pidx); 249 return -EINVAL; 250 } 251 252 if (idx < 0 || idx >= ndev->spad_count) { 253 dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx); 254 return -EINVAL; 255 } 256 257 offset = (idx << 2); 258 return readl(ndev->peer_spad_reg + offset); 259 } 260 261 static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx, 262 int idx, u32 val) 263 { 264 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 265 struct device *dev = ndev->dev; 266 u32 offset; 267 268 if (pidx != NTB_DEF_PEER_IDX) { 269 dev_err(dev, "Unsupported Peer ID %d\n", pidx); 270 return -EINVAL; 271 } 272 273 if (idx < 0 || idx >= ndev->spad_count) { 274 dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx); 275 return -EINVAL; 276 } 277 278 offset = (idx << 2); 279 writel(val, ndev->peer_spad_reg + offset); 280 281 return 0; 282 } 283 284 static int ntb_epf_link_enable(struct ntb_dev *ntb, 285 enum ntb_speed max_speed, 286 enum ntb_width max_width) 287 { 288 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 289 struct device *dev = ndev->dev; 290 int ret; 291 292 ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0); 293 if (ret) { 294 dev_err(dev, "Fail to enable link\n"); 295 return ret; 296 } 297 298 return 0; 299 } 300 301 static int ntb_epf_link_disable(struct ntb_dev *ntb) 302 { 303 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 304 struct device *dev = ndev->dev; 305 int ret; 306 307 ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0); 308 if (ret) { 309 dev_err(dev, "Fail to disable link\n"); 310 return ret; 311 } 312 313 return 0; 314 } 315 316 static irqreturn_t ntb_epf_vec_isr(int irq, void *dev) 317 { 318 struct ntb_epf_dev *ndev = dev; 319 int irq_no; 320 321 irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0); 322 ndev->db_val = irq_no + 1; 323 324 if (irq_no == 0) 325 ntb_link_event(&ndev->ntb); 326 else 327 ntb_db_event(&ndev->ntb, irq_no); 328 329 return IRQ_HANDLED; 330 } 331 332 static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max) 333 { 334 struct pci_dev *pdev = ndev->ntb.pdev; 335 struct device *dev = ndev->dev; 336 u32 argument = MSIX_ENABLE; 337 int irq; 338 int ret; 339 int i; 340 341 irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX); 342 if (irq < 0) { 343 dev_dbg(dev, "Failed to get MSIX interrupts\n"); 344 irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, 345 PCI_IRQ_MSI); 346 if (irq < 0) { 347 dev_err(dev, "Failed to get MSI interrupts\n"); 348 return irq; 349 } 350 argument &= ~MSIX_ENABLE; 351 } 352 353 for (i = 0; i < irq; i++) { 354 ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr, 355 0, "ntb_epf", ndev); 356 if (ret) { 357 dev_err(dev, "Failed to request irq\n"); 358 goto err_request_irq; 359 } 360 } 361 362 ndev->db_count = irq - 1; 363 364 ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL, 365 argument | irq); 366 if (ret) { 367 dev_err(dev, "Failed to configure doorbell\n"); 368 goto err_configure_db; 369 } 370 371 return 0; 372 373 err_configure_db: 374 for (i = 0; i < ndev->db_count + 1; i++) 375 free_irq(pci_irq_vector(pdev, i), ndev); 376 377 err_request_irq: 378 pci_free_irq_vectors(pdev); 379 380 return ret; 381 } 382 383 static int ntb_epf_peer_mw_count(struct ntb_dev *ntb) 384 { 385 return ntb_ndev(ntb)->mw_count; 386 } 387 388 static int ntb_epf_spad_count(struct ntb_dev *ntb) 389 { 390 return ntb_ndev(ntb)->spad_count; 391 } 392 393 static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb) 394 { 395 return ntb_ndev(ntb)->db_valid_mask; 396 } 397 398 static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits) 399 { 400 return 0; 401 } 402 403 static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, 404 dma_addr_t addr, resource_size_t size) 405 { 406 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 407 struct device *dev = ndev->dev; 408 resource_size_t mw_size; 409 int bar; 410 411 if (pidx != NTB_DEF_PEER_IDX) { 412 dev_err(dev, "Unsupported Peer ID %d\n", pidx); 413 return -EINVAL; 414 } 415 416 bar = ntb_epf_mw_to_bar(ndev, idx); 417 if (bar < 0) 418 return bar; 419 420 mw_size = pci_resource_len(ntb->pdev, bar); 421 422 if (size > mw_size) { 423 dev_err(dev, "Size:%pa is greater than the MW size %pa\n", 424 &size, &mw_size); 425 return -EINVAL; 426 } 427 428 writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR); 429 writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR); 430 writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE); 431 writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE); 432 ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx); 433 434 return 0; 435 } 436 437 static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx) 438 { 439 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 440 struct device *dev = ndev->dev; 441 int ret = 0; 442 443 ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx); 444 if (ret) 445 dev_err(dev, "Failed to teardown memory window\n"); 446 447 return ret; 448 } 449 450 static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx, 451 phys_addr_t *base, resource_size_t *size) 452 { 453 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 454 u32 offset = 0; 455 int bar; 456 457 if (idx == 0) 458 offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET); 459 460 bar = ntb_epf_mw_to_bar(ndev, idx); 461 if (bar < 0) 462 return bar; 463 464 if (base) 465 *base = pci_resource_start(ndev->ntb.pdev, bar) + offset; 466 467 if (size) 468 *size = pci_resource_len(ndev->ntb.pdev, bar) - offset; 469 470 return 0; 471 } 472 473 static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 474 { 475 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 476 u32 interrupt_num = ffs(db_bits) + 1; 477 struct device *dev = ndev->dev; 478 u32 db_entry_size; 479 u32 db_offset; 480 u32 db_data; 481 482 if (interrupt_num > ndev->db_count) { 483 dev_err(dev, "DB interrupt %d greater than Max Supported %d\n", 484 interrupt_num, ndev->db_count); 485 return -EINVAL; 486 } 487 488 db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE); 489 490 db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num)); 491 db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num)); 492 writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) + 493 db_offset); 494 495 return 0; 496 } 497 498 static u64 ntb_epf_db_read(struct ntb_dev *ntb) 499 { 500 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 501 502 return ndev->db_val; 503 } 504 505 static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) 506 { 507 return 0; 508 } 509 510 static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits) 511 { 512 struct ntb_epf_dev *ndev = ntb_ndev(ntb); 513 514 ndev->db_val = 0; 515 516 return 0; 517 } 518 519 static const struct ntb_dev_ops ntb_epf_ops = { 520 .mw_count = ntb_epf_mw_count, 521 .spad_count = ntb_epf_spad_count, 522 .peer_mw_count = ntb_epf_peer_mw_count, 523 .db_valid_mask = ntb_epf_db_valid_mask, 524 .db_set_mask = ntb_epf_db_set_mask, 525 .mw_set_trans = ntb_epf_mw_set_trans, 526 .mw_clear_trans = ntb_epf_mw_clear_trans, 527 .peer_mw_get_addr = ntb_epf_peer_mw_get_addr, 528 .link_enable = ntb_epf_link_enable, 529 .spad_read = ntb_epf_spad_read, 530 .spad_write = ntb_epf_spad_write, 531 .peer_spad_read = ntb_epf_peer_spad_read, 532 .peer_spad_write = ntb_epf_peer_spad_write, 533 .peer_db_set = ntb_epf_peer_db_set, 534 .db_read = ntb_epf_db_read, 535 .mw_get_align = ntb_epf_mw_get_align, 536 .link_is_up = ntb_epf_link_is_up, 537 .db_clear_mask = ntb_epf_db_clear_mask, 538 .db_clear = ntb_epf_db_clear, 539 .link_disable = ntb_epf_link_disable, 540 }; 541 542 static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev, 543 struct pci_dev *pdev) 544 { 545 ndev->ntb.pdev = pdev; 546 ndev->ntb.topo = NTB_TOPO_NONE; 547 ndev->ntb.ops = &ntb_epf_ops; 548 } 549 550 static int ntb_epf_init_dev(struct ntb_epf_dev *ndev) 551 { 552 struct device *dev = ndev->dev; 553 int ret; 554 555 /* One Link interrupt and rest doorbell interrupt */ 556 ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1, 557 NTB_EPF_MAX_DB_COUNT + 1); 558 if (ret) { 559 dev_err(dev, "Failed to init ISR\n"); 560 return ret; 561 } 562 563 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; 564 ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT); 565 ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); 566 567 if (ndev->mw_count > NTB_EPF_MAX_MW_COUNT) { 568 dev_err(dev, "Unsupported MW count: %u\n", ndev->mw_count); 569 return -EINVAL; 570 } 571 572 return 0; 573 } 574 575 static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, 576 struct pci_dev *pdev) 577 { 578 struct device *dev = ndev->dev; 579 size_t spad_sz, spad_off; 580 int ret; 581 582 pci_set_drvdata(pdev, ndev); 583 584 ret = pci_enable_device(pdev); 585 if (ret) { 586 dev_err(dev, "Cannot enable PCI device\n"); 587 goto err_pci_enable; 588 } 589 590 ret = pci_request_regions(pdev, "ntb"); 591 if (ret) { 592 dev_err(dev, "Cannot obtain PCI resources\n"); 593 goto err_pci_regions; 594 } 595 596 pci_set_master(pdev); 597 598 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 599 if (ret) { 600 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 601 if (ret) { 602 dev_err(dev, "Cannot set DMA mask\n"); 603 goto err_pci_regions; 604 } 605 dev_warn(&pdev->dev, "Cannot DMA highmem\n"); 606 } 607 608 ndev->ctrl_reg = pci_iomap(pdev, ndev->barno_map[BAR_CONFIG], 0); 609 if (!ndev->ctrl_reg) { 610 ret = -EIO; 611 goto err_pci_regions; 612 } 613 614 if (ndev->barno_map[BAR_PEER_SPAD] != ndev->barno_map[BAR_CONFIG]) { 615 ndev->peer_spad_reg = pci_iomap(pdev, 616 ndev->barno_map[BAR_PEER_SPAD], 0); 617 if (!ndev->peer_spad_reg) { 618 ret = -EIO; 619 goto err_pci_regions; 620 } 621 } else { 622 spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); 623 spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); 624 ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz; 625 } 626 627 ndev->db_reg = pci_iomap(pdev, ndev->barno_map[BAR_DB], 0); 628 if (!ndev->db_reg) { 629 ret = -EIO; 630 goto err_pci_regions; 631 } 632 633 return 0; 634 635 err_pci_regions: 636 pci_disable_device(pdev); 637 638 err_pci_enable: 639 pci_set_drvdata(pdev, NULL); 640 641 return ret; 642 } 643 644 static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev) 645 { 646 struct pci_dev *pdev = ndev->ntb.pdev; 647 648 pci_iounmap(pdev, ndev->ctrl_reg); 649 pci_iounmap(pdev, ndev->peer_spad_reg); 650 pci_iounmap(pdev, ndev->db_reg); 651 652 pci_release_regions(pdev); 653 pci_disable_device(pdev); 654 pci_set_drvdata(pdev, NULL); 655 } 656 657 static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev) 658 { 659 struct pci_dev *pdev = ndev->ntb.pdev; 660 int i; 661 662 ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1); 663 664 for (i = 0; i < ndev->db_count + 1; i++) 665 free_irq(pci_irq_vector(pdev, i), ndev); 666 pci_free_irq_vectors(pdev); 667 } 668 669 static int ntb_epf_pci_probe(struct pci_dev *pdev, 670 const struct pci_device_id *id) 671 { 672 struct device *dev = &pdev->dev; 673 struct ntb_epf_dev *ndev; 674 int ret; 675 676 if (pci_is_bridge(pdev)) 677 return -ENODEV; 678 679 ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL); 680 if (!ndev) 681 return -ENOMEM; 682 683 ndev->barno_map = (const enum pci_barno *)id->driver_data; 684 if (!ndev->barno_map) 685 return -EINVAL; 686 687 ndev->dev = dev; 688 689 ntb_epf_init_struct(ndev, pdev); 690 mutex_init(&ndev->cmd_lock); 691 692 ret = ntb_epf_init_pci(ndev, pdev); 693 if (ret) { 694 dev_err(dev, "Failed to init PCI\n"); 695 return ret; 696 } 697 698 ret = ntb_epf_init_dev(ndev); 699 if (ret) { 700 dev_err(dev, "Failed to init device\n"); 701 goto err_init_dev; 702 } 703 704 ret = ntb_register_device(&ndev->ntb); 705 if (ret) { 706 dev_err(dev, "Failed to register NTB device\n"); 707 goto err_register_dev; 708 } 709 710 return 0; 711 712 err_register_dev: 713 ntb_epf_cleanup_isr(ndev); 714 715 err_init_dev: 716 ntb_epf_deinit_pci(ndev); 717 718 return ret; 719 } 720 721 static void ntb_epf_pci_remove(struct pci_dev *pdev) 722 { 723 struct ntb_epf_dev *ndev = pci_get_drvdata(pdev); 724 725 ntb_unregister_device(&ndev->ntb); 726 ntb_epf_cleanup_isr(ndev); 727 ntb_epf_deinit_pci(ndev); 728 } 729 730 static const enum pci_barno j721e_map[NTB_BAR_NUM] = { 731 [BAR_CONFIG] = BAR_0, 732 [BAR_PEER_SPAD] = BAR_1, 733 [BAR_DB] = BAR_2, 734 [BAR_MW1] = BAR_2, 735 [BAR_MW2] = BAR_3, 736 [BAR_MW3] = BAR_4, 737 [BAR_MW4] = BAR_5 738 }; 739 740 static const enum pci_barno mx8_map[NTB_BAR_NUM] = { 741 [BAR_CONFIG] = BAR_0, 742 [BAR_PEER_SPAD] = BAR_0, 743 [BAR_DB] = BAR_2, 744 [BAR_MW1] = BAR_4, 745 [BAR_MW2] = BAR_5, 746 [BAR_MW3] = NO_BAR, 747 [BAR_MW4] = NO_BAR 748 }; 749 750 static const enum pci_barno rcar_barno[NTB_BAR_NUM] = { 751 [BAR_CONFIG] = BAR_0, 752 [BAR_PEER_SPAD] = BAR_0, 753 [BAR_DB] = BAR_4, 754 [BAR_MW1] = BAR_2, 755 [BAR_MW2] = NO_BAR, 756 [BAR_MW3] = NO_BAR, 757 [BAR_MW4] = NO_BAR, 758 }; 759 760 static const struct pci_device_id ntb_epf_pci_tbl[] = { 761 { 762 PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), 763 .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, 764 .driver_data = (kernel_ulong_t)j721e_map, 765 }, 766 { 767 PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809), 768 .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, 769 .driver_data = (kernel_ulong_t)mx8_map, 770 }, 771 { 772 PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0030), 773 .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, 774 .driver_data = (kernel_ulong_t)rcar_barno, 775 }, 776 { }, 777 }; 778 779 static struct pci_driver ntb_epf_pci_driver = { 780 .name = KBUILD_MODNAME, 781 .id_table = ntb_epf_pci_tbl, 782 .probe = ntb_epf_pci_probe, 783 .remove = ntb_epf_pci_remove, 784 }; 785 module_pci_driver(ntb_epf_pci_driver); 786 787 MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER"); 788 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 789 MODULE_LICENSE("GPL v2"); 790