1 /* 2 * Microsemi Switchtec(tm) PCIe Management Driver 3 * Copyright (c) 2017, Microsemi Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <linux/switchtec.h> 17 #include <linux/module.h> 18 #include <linux/delay.h> 19 #include <linux/kthread.h> 20 #include <linux/interrupt.h> 21 #include <linux/ntb.h> 22 23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); 24 MODULE_VERSION("0.1"); 25 MODULE_LICENSE("GPL"); 26 MODULE_AUTHOR("Microsemi Corporation"); 27 28 static ulong max_mw_size = SZ_2M; 29 module_param(max_mw_size, ulong, 0644); 30 MODULE_PARM_DESC(max_mw_size, 31 "Max memory window size reported to the upper layer"); 32 33 static bool use_lut_mws; 34 module_param(use_lut_mws, bool, 0644); 35 MODULE_PARM_DESC(use_lut_mws, 36 "Enable the use of the LUT based memory windows"); 37 38 #ifndef ioread64 39 #ifdef readq 40 #define ioread64 readq 41 #else 42 #define ioread64 _ioread64 43 static inline u64 _ioread64(void __iomem *mmio) 44 { 45 u64 low, high; 46 47 low = ioread32(mmio); 48 high = ioread32(mmio + sizeof(u32)); 49 return low | (high << 32); 50 } 51 #endif 52 #endif 53 54 #ifndef iowrite64 55 #ifdef writeq 56 #define iowrite64 writeq 57 #else 58 #define iowrite64 _iowrite64 59 static inline void _iowrite64(u64 val, void __iomem *mmio) 60 { 61 iowrite32(val, mmio); 62 iowrite32(val >> 32, mmio + sizeof(u32)); 63 } 64 #endif 65 #endif 66 67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001 68 #define MAX_MWS 128 69 70 struct shared_mw { 71 u32 magic; 72 u32 link_sta; 73 u32 partition_id; 74 u64 mw_sizes[MAX_MWS]; 75 u32 spad[128]; 76 }; 77 78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry) 79 #define LUT_SIZE SZ_64K 80 81 struct switchtec_ntb { 82 struct ntb_dev ntb; 83 struct switchtec_dev *stdev; 84 85 int self_partition; 86 int peer_partition; 87 88 int doorbell_irq; 89 int message_irq; 90 91 struct ntb_info_regs __iomem *mmio_ntb; 92 struct ntb_ctrl_regs __iomem *mmio_ctrl; 93 struct ntb_dbmsg_regs __iomem *mmio_dbmsg; 94 struct ntb_ctrl_regs __iomem *mmio_self_ctrl; 95 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; 96 struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg; 97 struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg; 98 99 void __iomem *mmio_xlink_win; 100 101 struct shared_mw *self_shared; 102 struct shared_mw __iomem *peer_shared; 103 dma_addr_t self_shared_dma; 104 105 u64 db_mask; 106 u64 db_valid_mask; 107 int db_shift; 108 int db_peer_shift; 109 110 /* synchronize rmw access of db_mask and hw reg */ 111 spinlock_t db_mask_lock; 112 113 int nr_direct_mw; 114 int nr_lut_mw; 115 int nr_rsvd_luts; 116 int direct_mw_to_bar[MAX_DIRECT_MW]; 117 118 int peer_nr_direct_mw; 119 int peer_nr_lut_mw; 120 int peer_direct_mw_to_bar[MAX_DIRECT_MW]; 121 122 bool link_is_up; 123 enum ntb_speed link_speed; 124 enum ntb_width link_width; 125 struct work_struct link_reinit_work; 126 }; 127 128 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) 129 { 130 return container_of(ntb, struct switchtec_ntb, ntb); 131 } 132 133 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev, 134 struct ntb_ctrl_regs __iomem *ctl, 135 u32 op, int wait_status) 136 { 137 static const char * const op_text[] = { 138 [NTB_CTRL_PART_OP_LOCK] = "lock", 139 [NTB_CTRL_PART_OP_CFG] = "configure", 140 [NTB_CTRL_PART_OP_RESET] = "reset", 141 }; 142 143 int i; 144 u32 ps; 145 int status; 146 147 switch (op) { 148 case NTB_CTRL_PART_OP_LOCK: 149 status = NTB_CTRL_PART_STATUS_LOCKING; 150 break; 151 case NTB_CTRL_PART_OP_CFG: 152 status = NTB_CTRL_PART_STATUS_CONFIGURING; 153 break; 154 case NTB_CTRL_PART_OP_RESET: 155 status = NTB_CTRL_PART_STATUS_RESETTING; 156 break; 157 default: 158 return -EINVAL; 159 } 160 161 iowrite32(op, &ctl->partition_op); 162 163 for (i = 0; i < 1000; i++) { 164 if (msleep_interruptible(50) != 0) { 165 iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op); 166 return -EINTR; 167 } 168 169 ps = ioread32(&ctl->partition_status) & 0xFFFF; 170 171 if (ps != status) 172 break; 173 } 174 175 if (ps == wait_status) 176 return 0; 177 178 if (ps == status) { 179 dev_err(&sndev->stdev->dev, 180 "Timed out while performing %s (%d). (%08x)\n", 181 op_text[op], op, 182 ioread32(&ctl->partition_status)); 183 184 return -ETIMEDOUT; 185 } 186 187 return -EIO; 188 } 189 190 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx, 191 u32 val) 192 { 193 if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg)) 194 return -EINVAL; 195 196 iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg); 197 198 return 0; 199 } 200 201 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx) 202 { 203 struct switchtec_ntb *sndev = ntb_sndev(ntb); 204 int nr_direct_mw = sndev->peer_nr_direct_mw; 205 int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts; 206 207 if (pidx != NTB_DEF_PEER_IDX) 208 return -EINVAL; 209 210 if (!use_lut_mws) 211 nr_lut_mw = 0; 212 213 return nr_direct_mw + nr_lut_mw; 214 } 215 216 static int lut_index(struct switchtec_ntb *sndev, int mw_idx) 217 { 218 return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts; 219 } 220 221 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx) 222 { 223 return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts; 224 } 225 226 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, 227 int widx, resource_size_t *addr_align, 228 resource_size_t *size_align, 229 resource_size_t *size_max) 230 { 231 struct switchtec_ntb *sndev = ntb_sndev(ntb); 232 int lut; 233 resource_size_t size; 234 235 if (pidx != NTB_DEF_PEER_IDX) 236 return -EINVAL; 237 238 lut = widx >= sndev->peer_nr_direct_mw; 239 size = ioread64(&sndev->peer_shared->mw_sizes[widx]); 240 241 if (size == 0) 242 return -EINVAL; 243 244 if (addr_align) 245 *addr_align = lut ? size : SZ_4K; 246 247 if (size_align) 248 *size_align = lut ? size : SZ_4K; 249 250 if (size_max) 251 *size_max = size; 252 253 return 0; 254 } 255 256 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) 257 { 258 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 259 int bar = sndev->peer_direct_mw_to_bar[idx]; 260 u32 ctl_val; 261 262 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 263 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; 264 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 265 iowrite32(0, &ctl->bar_entry[bar].win_size); 266 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); 267 } 268 269 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx) 270 { 271 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 272 273 iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]); 274 } 275 276 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, 277 dma_addr_t addr, resource_size_t size) 278 { 279 int xlate_pos = ilog2(size); 280 int bar = sndev->peer_direct_mw_to_bar[idx]; 281 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 282 u32 ctl_val; 283 284 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 285 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 286 287 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 288 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 289 iowrite64(sndev->self_partition | addr, 290 &ctl->bar_entry[bar].xlate_addr); 291 } 292 293 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx, 294 dma_addr_t addr, resource_size_t size) 295 { 296 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 297 298 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr), 299 &ctl->lut_entry[peer_lut_index(sndev, idx)]); 300 } 301 302 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, 303 dma_addr_t addr, resource_size_t size) 304 { 305 struct switchtec_ntb *sndev = ntb_sndev(ntb); 306 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 307 int xlate_pos = ilog2(size); 308 int nr_direct_mw = sndev->peer_nr_direct_mw; 309 int rc; 310 311 if (pidx != NTB_DEF_PEER_IDX) 312 return -EINVAL; 313 314 dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n", 315 widx, pidx, &addr, &size); 316 317 if (widx >= switchtec_ntb_mw_count(ntb, pidx)) 318 return -EINVAL; 319 320 if (xlate_pos < 12) 321 return -EINVAL; 322 323 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 324 NTB_CTRL_PART_STATUS_LOCKED); 325 if (rc) 326 return rc; 327 328 if (addr == 0 || size == 0) { 329 if (widx < nr_direct_mw) 330 switchtec_ntb_mw_clr_direct(sndev, widx); 331 else 332 switchtec_ntb_mw_clr_lut(sndev, widx); 333 } else { 334 if (widx < nr_direct_mw) 335 switchtec_ntb_mw_set_direct(sndev, widx, addr, size); 336 else 337 switchtec_ntb_mw_set_lut(sndev, widx, addr, size); 338 } 339 340 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 341 NTB_CTRL_PART_STATUS_NORMAL); 342 343 if (rc == -EIO) { 344 dev_err(&sndev->stdev->dev, 345 "Hardware reported an error configuring mw %d: %08x\n", 346 widx, ioread32(&ctl->bar_error)); 347 348 if (widx < nr_direct_mw) 349 switchtec_ntb_mw_clr_direct(sndev, widx); 350 else 351 switchtec_ntb_mw_clr_lut(sndev, widx); 352 353 switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 354 NTB_CTRL_PART_STATUS_NORMAL); 355 } 356 357 return rc; 358 } 359 360 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb) 361 { 362 struct switchtec_ntb *sndev = ntb_sndev(ntb); 363 int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts; 364 365 return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0); 366 } 367 368 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev, 369 int idx, phys_addr_t *base, 370 resource_size_t *size) 371 { 372 int bar = sndev->direct_mw_to_bar[idx]; 373 size_t offset = 0; 374 375 if (bar < 0) 376 return -EINVAL; 377 378 if (idx == 0) { 379 /* 380 * This is the direct BAR shared with the LUTs 381 * which means the actual window will be offset 382 * by the size of all the LUT entries. 383 */ 384 385 offset = LUT_SIZE * sndev->nr_lut_mw; 386 } 387 388 if (base) 389 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 390 391 if (size) { 392 *size = pci_resource_len(sndev->ntb.pdev, bar) - offset; 393 if (offset && *size > offset) 394 *size = offset; 395 396 if (*size > max_mw_size) 397 *size = max_mw_size; 398 } 399 400 return 0; 401 } 402 403 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev, 404 int idx, phys_addr_t *base, 405 resource_size_t *size) 406 { 407 int bar = sndev->direct_mw_to_bar[0]; 408 int offset; 409 410 offset = LUT_SIZE * lut_index(sndev, idx); 411 412 if (base) 413 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 414 415 if (size) 416 *size = LUT_SIZE; 417 418 return 0; 419 } 420 421 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, 422 phys_addr_t *base, 423 resource_size_t *size) 424 { 425 struct switchtec_ntb *sndev = ntb_sndev(ntb); 426 427 if (idx < sndev->nr_direct_mw) 428 return switchtec_ntb_direct_get_addr(sndev, idx, base, size); 429 else if (idx < switchtec_ntb_peer_mw_count(ntb)) 430 return switchtec_ntb_lut_get_addr(sndev, idx, base, size); 431 else 432 return -EINVAL; 433 } 434 435 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, 436 int partition, 437 enum ntb_speed *speed, 438 enum ntb_width *width) 439 { 440 struct switchtec_dev *stdev = sndev->stdev; 441 442 u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); 443 u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); 444 445 if (speed) 446 *speed = (linksta >> 16) & 0xF; 447 448 if (width) 449 *width = (linksta >> 20) & 0x3F; 450 } 451 452 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev) 453 { 454 enum ntb_speed self_speed, peer_speed; 455 enum ntb_width self_width, peer_width; 456 457 if (!sndev->link_is_up) { 458 sndev->link_speed = NTB_SPEED_NONE; 459 sndev->link_width = NTB_WIDTH_NONE; 460 return; 461 } 462 463 switchtec_ntb_part_link_speed(sndev, sndev->self_partition, 464 &self_speed, &self_width); 465 switchtec_ntb_part_link_speed(sndev, sndev->peer_partition, 466 &peer_speed, &peer_width); 467 468 sndev->link_speed = min(self_speed, peer_speed); 469 sndev->link_width = min(self_width, peer_width); 470 } 471 472 static int crosslink_is_enabled(struct switchtec_ntb *sndev) 473 { 474 struct ntb_info_regs __iomem *inf = sndev->mmio_ntb; 475 476 return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled); 477 } 478 479 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev) 480 { 481 int i; 482 u32 msg_map = 0; 483 484 if (!crosslink_is_enabled(sndev)) 485 return; 486 487 for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) { 488 int m = i | sndev->self_partition << 2; 489 490 msg_map |= m << i * 8; 491 } 492 493 iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map); 494 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, 495 &sndev->mmio_peer_dbmsg->odb_mask); 496 } 497 498 enum switchtec_msg { 499 LINK_MESSAGE = 0, 500 MSG_LINK_UP = 1, 501 MSG_LINK_DOWN = 2, 502 MSG_CHECK_LINK = 3, 503 MSG_LINK_FORCE_DOWN = 4, 504 }; 505 506 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev); 507 508 static void link_reinit_work(struct work_struct *work) 509 { 510 struct switchtec_ntb *sndev; 511 512 sndev = container_of(work, struct switchtec_ntb, link_reinit_work); 513 514 switchtec_ntb_reinit_peer(sndev); 515 } 516 517 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev, 518 enum switchtec_msg msg) 519 { 520 int link_sta; 521 int old = sndev->link_is_up; 522 523 if (msg == MSG_LINK_FORCE_DOWN) { 524 schedule_work(&sndev->link_reinit_work); 525 526 if (sndev->link_is_up) { 527 sndev->link_is_up = 0; 528 ntb_link_event(&sndev->ntb); 529 dev_info(&sndev->stdev->dev, "ntb link forced down\n"); 530 } 531 532 return; 533 } 534 535 link_sta = sndev->self_shared->link_sta; 536 if (link_sta) { 537 u64 peer = ioread64(&sndev->peer_shared->magic); 538 539 if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC) 540 link_sta = peer >> 32; 541 else 542 link_sta = 0; 543 } 544 545 sndev->link_is_up = link_sta; 546 switchtec_ntb_set_link_speed(sndev); 547 548 if (link_sta != old) { 549 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK); 550 ntb_link_event(&sndev->ntb); 551 dev_info(&sndev->stdev->dev, "ntb link %s\n", 552 link_sta ? "up" : "down"); 553 554 if (link_sta) 555 crosslink_init_dbmsgs(sndev); 556 } 557 } 558 559 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) 560 { 561 struct switchtec_ntb *sndev = stdev->sndev; 562 563 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 564 } 565 566 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb, 567 enum ntb_speed *speed, 568 enum ntb_width *width) 569 { 570 struct switchtec_ntb *sndev = ntb_sndev(ntb); 571 572 if (speed) 573 *speed = sndev->link_speed; 574 if (width) 575 *width = sndev->link_width; 576 577 return sndev->link_is_up; 578 } 579 580 static int switchtec_ntb_link_enable(struct ntb_dev *ntb, 581 enum ntb_speed max_speed, 582 enum ntb_width max_width) 583 { 584 struct switchtec_ntb *sndev = ntb_sndev(ntb); 585 586 dev_dbg(&sndev->stdev->dev, "enabling link\n"); 587 588 sndev->self_shared->link_sta = 1; 589 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); 590 591 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 592 593 return 0; 594 } 595 596 static int switchtec_ntb_link_disable(struct ntb_dev *ntb) 597 { 598 struct switchtec_ntb *sndev = ntb_sndev(ntb); 599 600 dev_dbg(&sndev->stdev->dev, "disabling link\n"); 601 602 sndev->self_shared->link_sta = 0; 603 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN); 604 605 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 606 607 return 0; 608 } 609 610 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb) 611 { 612 struct switchtec_ntb *sndev = ntb_sndev(ntb); 613 614 return sndev->db_valid_mask; 615 } 616 617 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb) 618 { 619 return 1; 620 } 621 622 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) 623 { 624 struct switchtec_ntb *sndev = ntb_sndev(ntb); 625 626 if (db_vector < 0 || db_vector > 1) 627 return 0; 628 629 return sndev->db_valid_mask; 630 } 631 632 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb) 633 { 634 u64 ret; 635 struct switchtec_ntb *sndev = ntb_sndev(ntb); 636 637 ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift; 638 639 return ret & sndev->db_valid_mask; 640 } 641 642 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) 643 { 644 struct switchtec_ntb *sndev = ntb_sndev(ntb); 645 646 iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb); 647 648 return 0; 649 } 650 651 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) 652 { 653 unsigned long irqflags; 654 struct switchtec_ntb *sndev = ntb_sndev(ntb); 655 656 if (db_bits & ~sndev->db_valid_mask) 657 return -EINVAL; 658 659 spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 660 661 sndev->db_mask |= db_bits << sndev->db_shift; 662 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 663 664 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 665 666 return 0; 667 } 668 669 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) 670 { 671 unsigned long irqflags; 672 struct switchtec_ntb *sndev = ntb_sndev(ntb); 673 674 if (db_bits & ~sndev->db_valid_mask) 675 return -EINVAL; 676 677 spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 678 679 sndev->db_mask &= ~(db_bits << sndev->db_shift); 680 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 681 682 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 683 684 return 0; 685 } 686 687 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) 688 { 689 struct switchtec_ntb *sndev = ntb_sndev(ntb); 690 691 return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask; 692 } 693 694 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, 695 phys_addr_t *db_addr, 696 resource_size_t *db_size) 697 { 698 struct switchtec_ntb *sndev = ntb_sndev(ntb); 699 unsigned long offset; 700 701 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - 702 (unsigned long)sndev->stdev->mmio; 703 704 offset += sndev->db_shift / 8; 705 706 if (db_addr) 707 *db_addr = pci_resource_start(ntb->pdev, 0) + offset; 708 if (db_size) 709 *db_size = sizeof(u32); 710 711 return 0; 712 } 713 714 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 715 { 716 struct switchtec_ntb *sndev = ntb_sndev(ntb); 717 718 iowrite64(db_bits << sndev->db_peer_shift, 719 &sndev->mmio_peer_dbmsg->odb); 720 721 return 0; 722 } 723 724 static int switchtec_ntb_spad_count(struct ntb_dev *ntb) 725 { 726 struct switchtec_ntb *sndev = ntb_sndev(ntb); 727 728 return ARRAY_SIZE(sndev->self_shared->spad); 729 } 730 731 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx) 732 { 733 struct switchtec_ntb *sndev = ntb_sndev(ntb); 734 735 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 736 return 0; 737 738 if (!sndev->self_shared) 739 return 0; 740 741 return sndev->self_shared->spad[idx]; 742 } 743 744 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) 745 { 746 struct switchtec_ntb *sndev = ntb_sndev(ntb); 747 748 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 749 return -EINVAL; 750 751 if (!sndev->self_shared) 752 return -EIO; 753 754 sndev->self_shared->spad[idx] = val; 755 756 return 0; 757 } 758 759 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, 760 int sidx) 761 { 762 struct switchtec_ntb *sndev = ntb_sndev(ntb); 763 764 if (pidx != NTB_DEF_PEER_IDX) 765 return -EINVAL; 766 767 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 768 return 0; 769 770 if (!sndev->peer_shared) 771 return 0; 772 773 return ioread32(&sndev->peer_shared->spad[sidx]); 774 } 775 776 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, 777 int sidx, u32 val) 778 { 779 struct switchtec_ntb *sndev = ntb_sndev(ntb); 780 781 if (pidx != NTB_DEF_PEER_IDX) 782 return -EINVAL; 783 784 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 785 return -EINVAL; 786 787 if (!sndev->peer_shared) 788 return -EIO; 789 790 iowrite32(val, &sndev->peer_shared->spad[sidx]); 791 792 return 0; 793 } 794 795 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, 796 int sidx, phys_addr_t *spad_addr) 797 { 798 struct switchtec_ntb *sndev = ntb_sndev(ntb); 799 unsigned long offset; 800 801 if (pidx != NTB_DEF_PEER_IDX) 802 return -EINVAL; 803 804 offset = (unsigned long)&sndev->peer_shared->spad[sidx] - 805 (unsigned long)sndev->stdev->mmio; 806 807 if (spad_addr) 808 *spad_addr = pci_resource_start(ntb->pdev, 0) + offset; 809 810 return 0; 811 } 812 813 static const struct ntb_dev_ops switchtec_ntb_ops = { 814 .mw_count = switchtec_ntb_mw_count, 815 .mw_get_align = switchtec_ntb_mw_get_align, 816 .mw_set_trans = switchtec_ntb_mw_set_trans, 817 .peer_mw_count = switchtec_ntb_peer_mw_count, 818 .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr, 819 .link_is_up = switchtec_ntb_link_is_up, 820 .link_enable = switchtec_ntb_link_enable, 821 .link_disable = switchtec_ntb_link_disable, 822 .db_valid_mask = switchtec_ntb_db_valid_mask, 823 .db_vector_count = switchtec_ntb_db_vector_count, 824 .db_vector_mask = switchtec_ntb_db_vector_mask, 825 .db_read = switchtec_ntb_db_read, 826 .db_clear = switchtec_ntb_db_clear, 827 .db_set_mask = switchtec_ntb_db_set_mask, 828 .db_clear_mask = switchtec_ntb_db_clear_mask, 829 .db_read_mask = switchtec_ntb_db_read_mask, 830 .peer_db_addr = switchtec_ntb_peer_db_addr, 831 .peer_db_set = switchtec_ntb_peer_db_set, 832 .spad_count = switchtec_ntb_spad_count, 833 .spad_read = switchtec_ntb_spad_read, 834 .spad_write = switchtec_ntb_spad_write, 835 .peer_spad_read = switchtec_ntb_peer_spad_read, 836 .peer_spad_write = switchtec_ntb_peer_spad_write, 837 .peer_spad_addr = switchtec_ntb_peer_spad_addr, 838 }; 839 840 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) 841 { 842 u64 tpart_vec; 843 int self; 844 u64 part_map; 845 int bit; 846 847 sndev->ntb.pdev = sndev->stdev->pdev; 848 sndev->ntb.topo = NTB_TOPO_SWITCH; 849 sndev->ntb.ops = &switchtec_ntb_ops; 850 851 INIT_WORK(&sndev->link_reinit_work, link_reinit_work); 852 853 sndev->self_partition = sndev->stdev->partition; 854 855 sndev->mmio_ntb = sndev->stdev->mmio_ntb; 856 857 self = sndev->self_partition; 858 tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high); 859 tpart_vec <<= 32; 860 tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low); 861 862 part_map = ioread64(&sndev->mmio_ntb->ep_map); 863 part_map &= ~(1 << sndev->self_partition); 864 865 if (!ffs(tpart_vec)) { 866 if (sndev->stdev->partition_count != 2) { 867 dev_err(&sndev->stdev->dev, 868 "ntb target partition not defined\n"); 869 return -ENODEV; 870 } 871 872 bit = ffs(part_map); 873 if (!bit) { 874 dev_err(&sndev->stdev->dev, 875 "peer partition is not NT partition\n"); 876 return -ENODEV; 877 } 878 879 sndev->peer_partition = bit - 1; 880 } else { 881 if (ffs(tpart_vec) != fls(tpart_vec)) { 882 dev_err(&sndev->stdev->dev, 883 "ntb driver only supports 1 pair of 1-1 ntb mapping\n"); 884 return -ENODEV; 885 } 886 887 sndev->peer_partition = ffs(tpart_vec) - 1; 888 if (!(part_map && (1 << sndev->peer_partition))) { 889 dev_err(&sndev->stdev->dev, 890 "ntb target partition is not NT partition\n"); 891 return -ENODEV; 892 } 893 } 894 895 dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n", 896 sndev->self_partition, sndev->stdev->partition_count); 897 898 sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb + 899 SWITCHTEC_NTB_REG_CTRL_OFFSET; 900 sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb + 901 SWITCHTEC_NTB_REG_DBMSG_OFFSET; 902 903 sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition]; 904 sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition]; 905 sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition]; 906 sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg; 907 908 return 0; 909 } 910 911 static int config_rsvd_lut_win(struct switchtec_ntb *sndev, 912 struct ntb_ctrl_regs __iomem *ctl, 913 int lut_idx, int partition, u64 addr) 914 { 915 int peer_bar = sndev->peer_direct_mw_to_bar[0]; 916 u32 ctl_val; 917 int rc; 918 919 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 920 NTB_CTRL_PART_STATUS_LOCKED); 921 if (rc) 922 return rc; 923 924 ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl); 925 ctl_val &= 0xFF; 926 ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN; 927 ctl_val |= ilog2(LUT_SIZE) << 8; 928 ctl_val |= (sndev->nr_lut_mw - 1) << 14; 929 iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl); 930 931 iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr), 932 &ctl->lut_entry[lut_idx]); 933 934 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 935 NTB_CTRL_PART_STATUS_NORMAL); 936 if (rc) { 937 u32 bar_error, lut_error; 938 939 bar_error = ioread32(&ctl->bar_error); 940 lut_error = ioread32(&ctl->lut_error); 941 dev_err(&sndev->stdev->dev, 942 "Error setting up reserved lut window: %08x / %08x\n", 943 bar_error, lut_error); 944 return rc; 945 } 946 947 return 0; 948 } 949 950 static int config_req_id_table(struct switchtec_ntb *sndev, 951 struct ntb_ctrl_regs __iomem *mmio_ctrl, 952 int *req_ids, int count) 953 { 954 int i, rc = 0; 955 u32 error; 956 u32 proxy_id; 957 958 if (ioread32(&mmio_ctrl->req_id_table_size) < count) { 959 dev_err(&sndev->stdev->dev, 960 "Not enough requester IDs available.\n"); 961 return -EFAULT; 962 } 963 964 rc = switchtec_ntb_part_op(sndev, mmio_ctrl, 965 NTB_CTRL_PART_OP_LOCK, 966 NTB_CTRL_PART_STATUS_LOCKED); 967 if (rc) 968 return rc; 969 970 iowrite32(NTB_PART_CTRL_ID_PROT_DIS, 971 &mmio_ctrl->partition_ctrl); 972 973 for (i = 0; i < count; i++) { 974 iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN, 975 &mmio_ctrl->req_id_table[i]); 976 977 proxy_id = ioread32(&mmio_ctrl->req_id_table[i]); 978 dev_dbg(&sndev->stdev->dev, 979 "Requester ID %02X:%02X.%X -> BB:%02X.%X\n", 980 req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F, 981 req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F, 982 (proxy_id >> 1) & 0x7); 983 } 984 985 rc = switchtec_ntb_part_op(sndev, mmio_ctrl, 986 NTB_CTRL_PART_OP_CFG, 987 NTB_CTRL_PART_STATUS_NORMAL); 988 989 if (rc == -EIO) { 990 error = ioread32(&mmio_ctrl->req_id_error); 991 dev_err(&sndev->stdev->dev, 992 "Error setting up the requester ID table: %08x\n", 993 error); 994 } 995 996 return 0; 997 } 998 999 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx, 1000 u64 *mw_addrs, int mw_count) 1001 { 1002 int rc, i; 1003 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl; 1004 u64 addr; 1005 size_t size, offset; 1006 int bar; 1007 int xlate_pos; 1008 u32 ctl_val; 1009 1010 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 1011 NTB_CTRL_PART_STATUS_LOCKED); 1012 if (rc) 1013 return rc; 1014 1015 for (i = 0; i < sndev->nr_lut_mw; i++) { 1016 if (i == ntb_lut_idx) 1017 continue; 1018 1019 addr = mw_addrs[0] + LUT_SIZE * i; 1020 1021 iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) | 1022 addr), 1023 &ctl->lut_entry[i]); 1024 } 1025 1026 sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count); 1027 1028 for (i = 0; i < sndev->nr_direct_mw; i++) { 1029 bar = sndev->direct_mw_to_bar[i]; 1030 offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0; 1031 addr = mw_addrs[i] + offset; 1032 size = pci_resource_len(sndev->ntb.pdev, bar) - offset; 1033 xlate_pos = ilog2(size); 1034 1035 if (offset && size > offset) 1036 size = offset; 1037 1038 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 1039 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 1040 1041 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 1042 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 1043 iowrite64(sndev->peer_partition | addr, 1044 &ctl->bar_entry[bar].xlate_addr); 1045 } 1046 1047 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 1048 NTB_CTRL_PART_STATUS_NORMAL); 1049 if (rc) { 1050 u32 bar_error, lut_error; 1051 1052 bar_error = ioread32(&ctl->bar_error); 1053 lut_error = ioread32(&ctl->lut_error); 1054 dev_err(&sndev->stdev->dev, 1055 "Error setting up cross link windows: %08x / %08x\n", 1056 bar_error, lut_error); 1057 return rc; 1058 } 1059 1060 return 0; 1061 } 1062 1063 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev, 1064 struct ntb_ctrl_regs __iomem *mmio_ctrl) 1065 { 1066 int req_ids[16]; 1067 int i; 1068 u32 proxy_id; 1069 1070 for (i = 0; i < ARRAY_SIZE(req_ids); i++) { 1071 proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]); 1072 1073 if (!(proxy_id & NTB_CTRL_REQ_ID_EN)) 1074 break; 1075 1076 req_ids[i] = ((proxy_id >> 1) & 0xFF); 1077 } 1078 1079 return config_req_id_table(sndev, mmio_ctrl, req_ids, i); 1080 } 1081 1082 /* 1083 * In crosslink configuration there is a virtual partition in the 1084 * middle of the two switches. The BARs in this partition have to be 1085 * enumerated and assigned addresses. 1086 */ 1087 static int crosslink_enum_partition(struct switchtec_ntb *sndev, 1088 u64 *bar_addrs) 1089 { 1090 struct part_cfg_regs __iomem *part_cfg = 1091 &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition]; 1092 u32 pff = ioread32(&part_cfg->vep_pff_inst_id); 1093 struct pff_csr_regs __iomem *mmio_pff = 1094 &sndev->stdev->mmio_pff_csr[pff]; 1095 const u64 bar_space = 0x1000000000LL; 1096 u64 bar_addr; 1097 int bar_cnt = 0; 1098 int i; 1099 1100 iowrite16(0x6, &mmio_pff->pcicmd); 1101 1102 for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) { 1103 iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]); 1104 bar_addr = ioread64(&mmio_pff->pci_bar64[i]); 1105 bar_addr &= ~0xf; 1106 1107 dev_dbg(&sndev->stdev->dev, 1108 "Crosslink BAR%d addr: %llx\n", 1109 i, bar_addr); 1110 1111 if (bar_addr != bar_space * i) 1112 continue; 1113 1114 bar_addrs[bar_cnt++] = bar_addr; 1115 } 1116 1117 return bar_cnt; 1118 } 1119 1120 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev) 1121 { 1122 int rc; 1123 int bar = sndev->direct_mw_to_bar[0]; 1124 const int ntb_lut_idx = 1; 1125 u64 bar_addrs[6]; 1126 u64 addr; 1127 int offset; 1128 int bar_cnt; 1129 1130 if (!crosslink_is_enabled(sndev)) 1131 return 0; 1132 1133 dev_info(&sndev->stdev->dev, "Using crosslink configuration\n"); 1134 sndev->ntb.topo = NTB_TOPO_CROSSLINK; 1135 1136 bar_cnt = crosslink_enum_partition(sndev, bar_addrs); 1137 if (bar_cnt < sndev->nr_direct_mw + 1) { 1138 dev_err(&sndev->stdev->dev, 1139 "Error enumerating crosslink partition\n"); 1140 return -EINVAL; 1141 } 1142 1143 addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET + 1144 SWITCHTEC_NTB_REG_DBMSG_OFFSET + 1145 sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition); 1146 1147 offset = addr & (LUT_SIZE - 1); 1148 addr -= offset; 1149 1150 rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx, 1151 sndev->peer_partition, addr); 1152 if (rc) 1153 return rc; 1154 1155 rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1], 1156 bar_cnt - 1); 1157 if (rc) 1158 return rc; 1159 1160 rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl); 1161 if (rc) 1162 return rc; 1163 1164 sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar, 1165 LUT_SIZE, LUT_SIZE); 1166 if (!sndev->mmio_xlink_win) { 1167 rc = -ENOMEM; 1168 return rc; 1169 } 1170 1171 sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset; 1172 sndev->nr_rsvd_luts++; 1173 1174 crosslink_init_dbmsgs(sndev); 1175 1176 return 0; 1177 } 1178 1179 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev) 1180 { 1181 if (sndev->mmio_xlink_win) 1182 pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win); 1183 } 1184 1185 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) 1186 { 1187 int i; 1188 int cnt = 0; 1189 1190 for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) { 1191 u32 r = ioread32(&ctrl->bar_entry[i].ctl); 1192 1193 if (r & NTB_CTRL_BAR_VALID) 1194 map[cnt++] = i; 1195 } 1196 1197 return cnt; 1198 } 1199 1200 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev) 1201 { 1202 sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar, 1203 sndev->mmio_self_ctrl); 1204 1205 sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries); 1206 sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw); 1207 1208 dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n", 1209 sndev->nr_direct_mw, sndev->nr_lut_mw); 1210 1211 sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar, 1212 sndev->mmio_peer_ctrl); 1213 1214 sndev->peer_nr_lut_mw = 1215 ioread16(&sndev->mmio_peer_ctrl->lut_table_entries); 1216 sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw); 1217 1218 dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n", 1219 sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw); 1220 1221 } 1222 1223 /* 1224 * There are 64 doorbells in the switch hardware but this is 1225 * shared among all partitions. So we must split them in half 1226 * (32 for each partition). However, the message interrupts are 1227 * also shared with the top 4 doorbells so we just limit this to 1228 * 28 doorbells per partition. 1229 * 1230 * In crosslink mode, each side has it's own dbmsg register so 1231 * they can each use all 60 of the available doorbells. 1232 */ 1233 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev) 1234 { 1235 sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL; 1236 1237 if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) { 1238 sndev->db_shift = 0; 1239 sndev->db_peer_shift = 0; 1240 sndev->db_valid_mask = sndev->db_mask; 1241 } else if (sndev->self_partition < sndev->peer_partition) { 1242 sndev->db_shift = 0; 1243 sndev->db_peer_shift = 32; 1244 sndev->db_valid_mask = 0x0FFFFFFF; 1245 } else { 1246 sndev->db_shift = 32; 1247 sndev->db_peer_shift = 0; 1248 sndev->db_valid_mask = 0x0FFFFFFF; 1249 } 1250 1251 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 1252 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, 1253 &sndev->mmio_peer_dbmsg->odb_mask); 1254 1255 dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n", 1256 sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask); 1257 } 1258 1259 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev) 1260 { 1261 int i; 1262 u32 msg_map = 0; 1263 1264 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 1265 int m = i | sndev->peer_partition << 2; 1266 1267 msg_map |= m << i * 8; 1268 } 1269 1270 iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map); 1271 1272 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) 1273 iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK, 1274 &sndev->mmio_self_dbmsg->imsg[i]); 1275 } 1276 1277 static int 1278 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev) 1279 { 1280 int req_ids[2]; 1281 1282 /* 1283 * Root Complex Requester ID (which is 0:00.0) 1284 */ 1285 req_ids[0] = 0; 1286 1287 /* 1288 * Host Bridge Requester ID (as read from the mmap address) 1289 */ 1290 req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id); 1291 1292 return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids, 1293 ARRAY_SIZE(req_ids)); 1294 } 1295 1296 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev) 1297 { 1298 int i; 1299 1300 memset(sndev->self_shared, 0, LUT_SIZE); 1301 sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC; 1302 sndev->self_shared->partition_id = sndev->stdev->partition; 1303 1304 for (i = 0; i < sndev->nr_direct_mw; i++) { 1305 int bar = sndev->direct_mw_to_bar[i]; 1306 resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar); 1307 1308 if (i == 0) 1309 sz = min_t(resource_size_t, sz, 1310 LUT_SIZE * sndev->nr_lut_mw); 1311 1312 sndev->self_shared->mw_sizes[i] = sz; 1313 } 1314 1315 for (i = 0; i < sndev->nr_lut_mw; i++) { 1316 int idx = sndev->nr_direct_mw + i; 1317 1318 sndev->self_shared->mw_sizes[idx] = LUT_SIZE; 1319 } 1320 } 1321 1322 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) 1323 { 1324 int self_bar = sndev->direct_mw_to_bar[0]; 1325 int rc; 1326 1327 sndev->nr_rsvd_luts++; 1328 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1329 LUT_SIZE, 1330 &sndev->self_shared_dma, 1331 GFP_KERNEL); 1332 if (!sndev->self_shared) { 1333 dev_err(&sndev->stdev->dev, 1334 "unable to allocate memory for shared mw\n"); 1335 return -ENOMEM; 1336 } 1337 1338 switchtec_ntb_init_shared(sndev); 1339 1340 rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0, 1341 sndev->self_partition, 1342 sndev->self_shared_dma); 1343 if (rc) 1344 goto unalloc_and_exit; 1345 1346 sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE); 1347 if (!sndev->peer_shared) { 1348 rc = -ENOMEM; 1349 goto unalloc_and_exit; 1350 } 1351 1352 dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n"); 1353 return 0; 1354 1355 unalloc_and_exit: 1356 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1357 sndev->self_shared, sndev->self_shared_dma); 1358 1359 return rc; 1360 } 1361 1362 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev) 1363 { 1364 if (sndev->peer_shared) 1365 pci_iounmap(sndev->stdev->pdev, sndev->peer_shared); 1366 1367 if (sndev->self_shared) 1368 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1369 sndev->self_shared, 1370 sndev->self_shared_dma); 1371 sndev->nr_rsvd_luts--; 1372 } 1373 1374 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev) 1375 { 1376 struct switchtec_ntb *sndev = dev; 1377 1378 dev_dbg(&sndev->stdev->dev, "doorbell\n"); 1379 1380 ntb_db_event(&sndev->ntb, 0); 1381 1382 return IRQ_HANDLED; 1383 } 1384 1385 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev) 1386 { 1387 int i; 1388 struct switchtec_ntb *sndev = dev; 1389 1390 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 1391 u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]); 1392 1393 if (msg & NTB_DBMSG_IMSG_STATUS) { 1394 dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", 1395 i, (u32)msg); 1396 iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status); 1397 1398 if (i == LINK_MESSAGE) 1399 switchtec_ntb_check_link(sndev, msg); 1400 } 1401 } 1402 1403 return IRQ_HANDLED; 1404 } 1405 1406 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev) 1407 { 1408 int i; 1409 int rc; 1410 int doorbell_irq = 0; 1411 int message_irq = 0; 1412 int event_irq; 1413 int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map); 1414 1415 event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number); 1416 1417 while (doorbell_irq == event_irq) 1418 doorbell_irq++; 1419 while (message_irq == doorbell_irq || 1420 message_irq == event_irq) 1421 message_irq++; 1422 1423 dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n", 1424 event_irq, doorbell_irq, message_irq); 1425 1426 for (i = 0; i < idb_vecs - 4; i++) 1427 iowrite8(doorbell_irq, 1428 &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1429 1430 for (; i < idb_vecs; i++) 1431 iowrite8(message_irq, 1432 &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1433 1434 sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq); 1435 sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq); 1436 1437 rc = request_irq(sndev->doorbell_irq, 1438 switchtec_ntb_doorbell_isr, 0, 1439 "switchtec_ntb_doorbell", sndev); 1440 if (rc) 1441 return rc; 1442 1443 rc = request_irq(sndev->message_irq, 1444 switchtec_ntb_message_isr, 0, 1445 "switchtec_ntb_message", sndev); 1446 if (rc) { 1447 free_irq(sndev->doorbell_irq, sndev); 1448 return rc; 1449 } 1450 1451 return 0; 1452 } 1453 1454 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev) 1455 { 1456 free_irq(sndev->doorbell_irq, sndev); 1457 free_irq(sndev->message_irq, sndev); 1458 } 1459 1460 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev) 1461 { 1462 dev_info(&sndev->stdev->dev, "peer reinitialized\n"); 1463 switchtec_ntb_deinit_shared_mw(sndev); 1464 switchtec_ntb_init_mw(sndev); 1465 return switchtec_ntb_init_shared_mw(sndev); 1466 } 1467 1468 static int switchtec_ntb_add(struct device *dev, 1469 struct class_interface *class_intf) 1470 { 1471 struct switchtec_dev *stdev = to_stdev(dev); 1472 struct switchtec_ntb *sndev; 1473 int rc; 1474 1475 stdev->sndev = NULL; 1476 1477 if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE) 1478 return -ENODEV; 1479 1480 sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev)); 1481 if (!sndev) 1482 return -ENOMEM; 1483 1484 sndev->stdev = stdev; 1485 rc = switchtec_ntb_init_sndev(sndev); 1486 if (rc) 1487 goto free_and_exit; 1488 1489 switchtec_ntb_init_mw(sndev); 1490 1491 rc = switchtec_ntb_init_req_id_table(sndev); 1492 if (rc) 1493 goto free_and_exit; 1494 1495 rc = switchtec_ntb_init_crosslink(sndev); 1496 if (rc) 1497 goto free_and_exit; 1498 1499 switchtec_ntb_init_db(sndev); 1500 switchtec_ntb_init_msgs(sndev); 1501 1502 rc = switchtec_ntb_init_shared_mw(sndev); 1503 if (rc) 1504 goto deinit_crosslink; 1505 1506 rc = switchtec_ntb_init_db_msg_irq(sndev); 1507 if (rc) 1508 goto deinit_shared_and_exit; 1509 1510 /* 1511 * If this host crashed, the other host may think the link is 1512 * still up. Tell them to force it down (it will go back up 1513 * once we register the ntb device). 1514 */ 1515 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN); 1516 1517 rc = ntb_register_device(&sndev->ntb); 1518 if (rc) 1519 goto deinit_and_exit; 1520 1521 stdev->sndev = sndev; 1522 stdev->link_notifier = switchtec_ntb_link_notification; 1523 dev_info(dev, "NTB device registered\n"); 1524 1525 return 0; 1526 1527 deinit_and_exit: 1528 switchtec_ntb_deinit_db_msg_irq(sndev); 1529 deinit_shared_and_exit: 1530 switchtec_ntb_deinit_shared_mw(sndev); 1531 deinit_crosslink: 1532 switchtec_ntb_deinit_crosslink(sndev); 1533 free_and_exit: 1534 kfree(sndev); 1535 dev_err(dev, "failed to register ntb device: %d\n", rc); 1536 return rc; 1537 } 1538 1539 void switchtec_ntb_remove(struct device *dev, 1540 struct class_interface *class_intf) 1541 { 1542 struct switchtec_dev *stdev = to_stdev(dev); 1543 struct switchtec_ntb *sndev = stdev->sndev; 1544 1545 if (!sndev) 1546 return; 1547 1548 stdev->link_notifier = NULL; 1549 stdev->sndev = NULL; 1550 ntb_unregister_device(&sndev->ntb); 1551 switchtec_ntb_deinit_db_msg_irq(sndev); 1552 switchtec_ntb_deinit_shared_mw(sndev); 1553 switchtec_ntb_deinit_crosslink(sndev); 1554 kfree(sndev); 1555 dev_info(dev, "ntb device unregistered\n"); 1556 } 1557 1558 static struct class_interface switchtec_interface = { 1559 .add_dev = switchtec_ntb_add, 1560 .remove_dev = switchtec_ntb_remove, 1561 }; 1562 1563 static int __init switchtec_ntb_init(void) 1564 { 1565 switchtec_interface.class = switchtec_class; 1566 return class_interface_register(&switchtec_interface); 1567 } 1568 module_init(switchtec_ntb_init); 1569 1570 static void __exit switchtec_ntb_exit(void) 1571 { 1572 class_interface_unregister(&switchtec_interface); 1573 } 1574 module_exit(switchtec_ntb_exit); 1575