1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bitset.h> 36 #include <sys/bitstring.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/rman.h> 43 #include <sys/pciio.h> 44 #include <sys/pcpu.h> 45 #include <sys/proc.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/cpuset.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_media.h> 55 56 #include <machine/bus.h> 57 #include <machine/_inttypes.h> 58 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 62 #include <sys/dnv.h> 63 #include <sys/nv.h> 64 #ifdef PCI_IOV 65 #include <sys/iov_schema.h> 66 #include <dev/pci/pci_iov.h> 67 #endif 68 69 #include "thunder_bgx.h" 70 #include "nic_reg.h" 71 #include "nic.h" 72 #include "q_struct.h" 73 74 #define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 75 76 #define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 77 78 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 79 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 80 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 81 82 /* Structure to be used by the SR-IOV for VF configuration schemas */ 83 struct nicvf_info { 84 boolean_t vf_enabled; 85 int vf_flags; 86 }; 87 88 struct nicpf { 89 device_t dev; 90 uint8_t node; 91 u_int flags; 92 uint8_t num_vf_en; /* No of VF enabled */ 93 struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 94 struct resource * reg_base; /* Register start address */ 95 struct pkind_cfg pkind; 96 uint8_t vf_lmac_map[MAX_LMAC]; 97 boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 98 99 struct callout check_link; 100 struct mtx check_link_mtx; 101 102 uint8_t link[MAX_LMAC]; 103 uint8_t duplex[MAX_LMAC]; 104 uint32_t speed[MAX_LMAC]; 105 uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 106 uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 107 uint16_t rss_ind_tbl_size; 108 109 /* MSI-X */ 110 boolean_t msix_enabled; 111 uint8_t num_vec; 112 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 113 struct resource * msix_table_res; 114 }; 115 116 static int nicpf_probe(device_t); 117 static int nicpf_attach(device_t); 118 static int nicpf_detach(device_t); 119 120 #ifdef PCI_IOV 121 static int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 122 static void nicpf_iov_uninit(device_t); 123 static int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 124 #endif 125 126 static device_method_t nicpf_methods[] = { 127 /* Device interface */ 128 DEVMETHOD(device_probe, nicpf_probe), 129 DEVMETHOD(device_attach, nicpf_attach), 130 DEVMETHOD(device_detach, nicpf_detach), 131 /* PCI SR-IOV interface */ 132 #ifdef PCI_IOV 133 DEVMETHOD(pci_iov_init, nicpf_iov_init), 134 DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 135 DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 136 #endif 137 DEVMETHOD_END, 138 }; 139 140 static driver_t vnicpf_driver = { 141 "vnicpf", 142 nicpf_methods, 143 sizeof(struct nicpf), 144 }; 145 146 static devclass_t vnicpf_devclass; 147 148 DRIVER_MODULE(vnicpf, pci, vnicpf_driver, vnicpf_devclass, 0, 0); 149 MODULE_VERSION(vnicpf, 1); 150 MODULE_DEPEND(vnicpf, pci, 1, 1, 1); 151 MODULE_DEPEND(vnicpf, ether, 1, 1, 1); 152 MODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1); 153 154 static int nicpf_alloc_res(struct nicpf *); 155 static void nicpf_free_res(struct nicpf *); 156 static void nic_set_lmac_vf_mapping(struct nicpf *); 157 static void nic_init_hw(struct nicpf *); 158 static int nic_sriov_init(device_t, struct nicpf *); 159 static void nic_poll_for_link(void *); 160 static int nic_register_interrupts(struct nicpf *); 161 static void nic_unregister_interrupts(struct nicpf *); 162 163 /* 164 * Device interface 165 */ 166 static int 167 nicpf_probe(device_t dev) 168 { 169 uint16_t vendor_id; 170 uint16_t device_id; 171 172 vendor_id = pci_get_vendor(dev); 173 device_id = pci_get_device(dev); 174 175 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 176 device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 177 device_set_desc(dev, VNIC_PF_DEVSTR); 178 return (BUS_PROBE_DEFAULT); 179 } 180 181 return (ENXIO); 182 } 183 184 static int 185 nicpf_attach(device_t dev) 186 { 187 struct nicpf *nic; 188 int err; 189 190 nic = device_get_softc(dev); 191 nic->dev = dev; 192 193 /* Enable bus mastering */ 194 pci_enable_busmaster(dev); 195 196 /* Allocate PCI resources */ 197 err = nicpf_alloc_res(nic); 198 if (err != 0) { 199 device_printf(dev, "Could not allocate PCI resources\n"); 200 return (err); 201 } 202 203 nic->node = nic_get_node_id(nic->reg_base); 204 205 /* Enable Traffic Network Switch (TNS) bypass mode by default */ 206 nic->flags &= ~NIC_TNS_ENABLED; 207 nic_set_lmac_vf_mapping(nic); 208 209 /* Initialize hardware */ 210 nic_init_hw(nic); 211 212 /* Set RSS TBL size for each VF */ 213 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 214 215 /* Setup interrupts */ 216 err = nic_register_interrupts(nic); 217 if (err != 0) 218 goto err_free_res; 219 220 /* Configure SRIOV */ 221 err = nic_sriov_init(dev, nic); 222 if (err != 0) 223 goto err_free_intr; 224 225 if (nic->flags & NIC_TNS_ENABLED) 226 return (0); 227 228 mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 229 /* Register physical link status poll callout */ 230 callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 231 mtx_lock(&nic->check_link_mtx); 232 nic_poll_for_link(nic); 233 mtx_unlock(&nic->check_link_mtx); 234 235 return (0); 236 237 err_free_intr: 238 nic_unregister_interrupts(nic); 239 err_free_res: 240 nicpf_free_res(nic); 241 pci_disable_busmaster(dev); 242 243 return (err); 244 } 245 246 static int 247 nicpf_detach(device_t dev) 248 { 249 struct nicpf *nic; 250 int err; 251 252 err = 0; 253 nic = device_get_softc(dev); 254 255 callout_drain(&nic->check_link); 256 mtx_destroy(&nic->check_link_mtx); 257 258 nic_unregister_interrupts(nic); 259 nicpf_free_res(nic); 260 pci_disable_busmaster(dev); 261 262 #ifdef PCI_IOV 263 err = pci_iov_detach(dev); 264 if (err != 0) 265 device_printf(dev, "SR-IOV in use. Detach first.\n"); 266 #endif 267 return (err); 268 } 269 270 /* 271 * SR-IOV interface 272 */ 273 #ifdef PCI_IOV 274 static int 275 nicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 276 { 277 struct nicpf *nic; 278 279 nic = device_get_softc(dev); 280 281 if (num_vfs == 0) 282 return (ENXIO); 283 284 nic->flags |= NIC_SRIOV_ENABLED; 285 286 return (0); 287 } 288 289 static void 290 nicpf_iov_uninit(device_t dev) 291 { 292 293 /* ARM64TODO: Implement this function */ 294 } 295 296 static int 297 nicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 298 { 299 const void *mac; 300 struct nicpf *nic; 301 size_t size; 302 int bgx, lmac; 303 304 nic = device_get_softc(dev); 305 306 if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 307 return (ENXIO); 308 309 if (vfnum > (nic->num_vf_en - 1)) 310 return (EINVAL); 311 312 if (nvlist_exists_binary(params, "mac-addr") != 0) { 313 mac = nvlist_get_binary(params, "mac-addr", &size); 314 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 315 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 316 bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 317 } 318 319 return (0); 320 } 321 #endif 322 323 /* 324 * Helper routines 325 */ 326 static int 327 nicpf_alloc_res(struct nicpf *nic) 328 { 329 device_t dev; 330 int rid; 331 332 dev = nic->dev; 333 334 rid = VNIC_PF_REG_RID; 335 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 336 RF_ACTIVE); 337 if (nic->reg_base == NULL) { 338 /* For verbose output print some more details */ 339 if (bootverbose) { 340 device_printf(dev, 341 "Could not allocate registers memory\n"); 342 } 343 return (ENXIO); 344 } 345 346 return (0); 347 } 348 349 static void 350 nicpf_free_res(struct nicpf *nic) 351 { 352 device_t dev; 353 354 dev = nic->dev; 355 356 if (nic->reg_base != NULL) { 357 bus_release_resource(dev, SYS_RES_MEMORY, 358 rman_get_rid(nic->reg_base), nic->reg_base); 359 } 360 } 361 362 /* Register read/write APIs */ 363 static __inline void 364 nic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 365 uint64_t val) 366 { 367 368 bus_write_8(nic->reg_base, offset, val); 369 } 370 371 static __inline uint64_t 372 nic_reg_read(struct nicpf *nic, uint64_t offset) 373 { 374 uint64_t val; 375 376 val = bus_read_8(nic->reg_base, offset); 377 return (val); 378 } 379 380 /* PF -> VF mailbox communication APIs */ 381 static void 382 nic_enable_mbx_intr(struct nicpf *nic) 383 { 384 385 /* Enable mailbox interrupt for all 128 VFs */ 386 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 387 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 388 } 389 390 static void 391 nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 392 { 393 394 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 395 } 396 397 static uint64_t 398 nic_get_mbx_addr(int vf) 399 { 400 401 return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 402 } 403 404 /* 405 * Send a mailbox message to VF 406 * @vf: vf to which this message to be sent 407 * @mbx: Message to be sent 408 */ 409 static void 410 nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 411 { 412 bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 413 uint64_t *msg = (uint64_t *)mbx; 414 415 /* 416 * In first revision HW, mbox interrupt is triggerred 417 * when PF writes to MBOX(1), in next revisions when 418 * PF writes to MBOX(0) 419 */ 420 if (pass1_silicon(nic->dev)) { 421 nic_reg_write(nic, mbx_addr + 0, msg[0]); 422 nic_reg_write(nic, mbx_addr + 8, msg[1]); 423 } else { 424 nic_reg_write(nic, mbx_addr + 8, msg[1]); 425 nic_reg_write(nic, mbx_addr + 0, msg[0]); 426 } 427 } 428 429 /* 430 * Responds to VF's READY message with VF's 431 * ID, node, MAC address e.t.c 432 * @vf: VF which sent READY message 433 */ 434 static void 435 nic_mbx_send_ready(struct nicpf *nic, int vf) 436 { 437 union nic_mbx mbx = {}; 438 int bgx_idx, lmac; 439 const char *mac; 440 441 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 442 mbx.nic_cfg.vf_id = vf; 443 444 if (nic->flags & NIC_TNS_ENABLED) 445 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 446 else 447 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 448 449 if (vf < MAX_LMAC) { 450 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 451 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 452 453 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 454 if (mac) { 455 memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 456 ETHER_ADDR_LEN); 457 } 458 } 459 mbx.nic_cfg.node_id = nic->node; 460 461 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 462 463 nic_send_msg_to_vf(nic, vf, &mbx); 464 } 465 466 /* 467 * ACKs VF's mailbox message 468 * @vf: VF to which ACK to be sent 469 */ 470 static void 471 nic_mbx_send_ack(struct nicpf *nic, int vf) 472 { 473 union nic_mbx mbx = {}; 474 475 mbx.msg.msg = NIC_MBOX_MSG_ACK; 476 nic_send_msg_to_vf(nic, vf, &mbx); 477 } 478 479 /* 480 * NACKs VF's mailbox message that PF is not able to 481 * complete the action 482 * @vf: VF to which ACK to be sent 483 */ 484 static void 485 nic_mbx_send_nack(struct nicpf *nic, int vf) 486 { 487 union nic_mbx mbx = {}; 488 489 mbx.msg.msg = NIC_MBOX_MSG_NACK; 490 nic_send_msg_to_vf(nic, vf, &mbx); 491 } 492 493 /* 494 * Flush all in flight receive packets to memory and 495 * bring down an active RQ 496 */ 497 static int 498 nic_rcv_queue_sw_sync(struct nicpf *nic) 499 { 500 uint16_t timeout = ~0x00; 501 502 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 503 /* Wait till sync cycle is finished */ 504 while (timeout) { 505 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 506 break; 507 timeout--; 508 } 509 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 510 if (!timeout) { 511 device_printf(nic->dev, "Receive queue software sync failed\n"); 512 return (ETIMEDOUT); 513 } 514 return (0); 515 } 516 517 /* Get BGX Rx/Tx stats and respond to VF's request */ 518 static void 519 nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 520 { 521 int bgx_idx, lmac; 522 union nic_mbx mbx = {}; 523 524 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 525 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 526 527 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 528 mbx.bgx_stats.vf_id = bgx->vf_id; 529 mbx.bgx_stats.rx = bgx->rx; 530 mbx.bgx_stats.idx = bgx->idx; 531 if (bgx->rx != 0) { 532 mbx.bgx_stats.stats = 533 bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 534 } else { 535 mbx.bgx_stats.stats = 536 bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 537 } 538 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 539 } 540 541 /* Update hardware min/max frame size */ 542 static int 543 nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 544 { 545 546 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 547 device_printf(nic->dev, 548 "Invalid MTU setting from VF%d rejected, " 549 "should be between %d and %d\n", 550 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 551 return (EINVAL); 552 } 553 new_frs += ETHER_HDR_LEN; 554 if (new_frs <= nic->pkind.maxlen) 555 return (0); 556 557 nic->pkind.maxlen = new_frs; 558 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 559 return (0); 560 } 561 562 /* Set minimum transmit packet size */ 563 static void 564 nic_set_tx_pkt_pad(struct nicpf *nic, int size) 565 { 566 int lmac; 567 uint64_t lmac_cfg; 568 569 /* Max value that can be set is 60 */ 570 if (size > 60) 571 size = 60; 572 573 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 574 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 575 lmac_cfg &= ~(0xF << 2); 576 lmac_cfg |= ((size / 4) << 2); 577 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 578 } 579 } 580 581 /* 582 * Function to check number of LMACs present and set VF::LMAC mapping. 583 * Mapping will be used while initializing channels. 584 */ 585 static void 586 nic_set_lmac_vf_mapping(struct nicpf *nic) 587 { 588 unsigned bgx_map = bgx_get_map(nic->node); 589 int bgx, next_bgx_lmac = 0; 590 int lmac, lmac_cnt = 0; 591 uint64_t lmac_credit; 592 593 nic->num_vf_en = 0; 594 if (nic->flags & NIC_TNS_ENABLED) { 595 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 596 return; 597 } 598 599 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 600 if ((bgx_map & (1 << bgx)) == 0) 601 continue; 602 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 603 for (lmac = 0; lmac < lmac_cnt; lmac++) 604 nic->vf_lmac_map[next_bgx_lmac++] = 605 NIC_SET_VF_LMAC_MAP(bgx, lmac); 606 nic->num_vf_en += lmac_cnt; 607 608 /* Program LMAC credits */ 609 lmac_credit = (1UL << 1); /* channel credit enable */ 610 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 611 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 612 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 613 NIC_HW_MAX_FRS) / 16) << 12); 614 lmac = bgx * MAX_LMAC_PER_BGX; 615 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 616 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 617 lmac_credit); 618 } 619 } 620 } 621 622 #define TNS_PORT0_BLOCK 6 623 #define TNS_PORT1_BLOCK 7 624 #define BGX0_BLOCK 8 625 #define BGX1_BLOCK 9 626 627 static void 628 nic_init_hw(struct nicpf *nic) 629 { 630 int i; 631 632 /* Enable NIC HW block */ 633 nic_reg_write(nic, NIC_PF_CFG, 0x3); 634 635 /* Enable backpressure */ 636 nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 637 638 if (nic->flags & NIC_TNS_ENABLED) { 639 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 640 (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 641 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 642 (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 643 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 644 (1UL << 63) | TNS_PORT0_BLOCK); 645 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 646 (1UL << 63) | TNS_PORT1_BLOCK); 647 648 } else { 649 /* Disable TNS mode on both interfaces */ 650 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 651 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 652 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 653 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 654 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 655 (1UL << 63) | BGX0_BLOCK); 656 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 657 (1UL << 63) | BGX1_BLOCK); 658 } 659 660 /* PKIND configuration */ 661 nic->pkind.minlen = 0; 662 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 663 nic->pkind.lenerr_en = 1; 664 nic->pkind.rx_hdr = 0; 665 nic->pkind.hdr_sl = 0; 666 667 for (i = 0; i < NIC_MAX_PKIND; i++) { 668 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 669 *(uint64_t *)&nic->pkind); 670 } 671 672 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 673 674 /* Timer config */ 675 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 676 677 /* Enable VLAN ethertype matching and stripping */ 678 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 679 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 680 } 681 682 /* Channel parse index configuration */ 683 static void 684 nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 685 { 686 uint32_t vnic, bgx, lmac, chan; 687 uint32_t padd, cpi_count = 0; 688 uint64_t cpi_base, cpi, rssi_base, rssi; 689 uint8_t qset, rq_idx = 0; 690 691 vnic = cfg->vf_id; 692 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 693 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 694 695 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 696 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 697 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 698 699 /* Rx channel configuration */ 700 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 701 (1UL << 63) | (vnic << 0)); 702 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 703 ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 704 705 if (cfg->cpi_alg == CPI_ALG_NONE) 706 cpi_count = 1; 707 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 708 cpi_count = 8; 709 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 710 cpi_count = 16; 711 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 712 cpi_count = NIC_MAX_CPI_PER_LMAC; 713 714 /* RSS Qset, Qidx mapping */ 715 qset = cfg->vf_id; 716 rssi = rssi_base; 717 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 718 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 719 (qset << 3) | rq_idx); 720 rq_idx++; 721 } 722 723 rssi = 0; 724 cpi = cpi_base; 725 for (; cpi < (cpi_base + cpi_count); cpi++) { 726 /* Determine port to channel adder */ 727 if (cfg->cpi_alg != CPI_ALG_DIFF) 728 padd = cpi % cpi_count; 729 else 730 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 731 732 /* Leave RSS_SIZE as '0' to disable RSS */ 733 if (pass1_silicon(nic->dev)) { 734 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 735 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 736 } else { 737 /* Set MPI_ALG to '0' to disable MCAM parsing */ 738 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 739 (padd << 16)); 740 /* MPI index is same as CPI if MPI_ALG is not enabled */ 741 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 742 (vnic << 24) | (rssi_base + rssi)); 743 } 744 745 if ((rssi + 1) >= cfg->rq_cnt) 746 continue; 747 748 if (cfg->cpi_alg == CPI_ALG_VLAN) 749 rssi++; 750 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 751 rssi = ((cpi - cpi_base) & 0xe) >> 1; 752 else if (cfg->cpi_alg == CPI_ALG_DIFF) 753 rssi = ((cpi - cpi_base) & 0x38) >> 3; 754 } 755 nic->cpi_base[cfg->vf_id] = cpi_base; 756 nic->rssi_base[cfg->vf_id] = rssi_base; 757 } 758 759 /* Responsds to VF with its RSS indirection table size */ 760 static void 761 nic_send_rss_size(struct nicpf *nic, int vf) 762 { 763 union nic_mbx mbx = {}; 764 uint64_t *msg; 765 766 msg = (uint64_t *)&mbx; 767 768 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 769 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 770 nic_send_msg_to_vf(nic, vf, &mbx); 771 } 772 773 /* 774 * Receive side scaling configuration 775 * configure: 776 * - RSS index 777 * - indir table i.e hash::RQ mapping 778 * - no of hash bits to consider 779 */ 780 static void 781 nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 782 { 783 uint8_t qset, idx; 784 uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 785 uint64_t idx_addr; 786 787 idx = 0; 788 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 789 790 rssi = rssi_base; 791 qset = cfg->vf_id; 792 793 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 794 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 795 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 796 idx++; 797 } 798 799 cpi_base = nic->cpi_base[cfg->vf_id]; 800 if (pass1_silicon(nic->dev)) 801 idx_addr = NIC_PF_CPI_0_2047_CFG; 802 else 803 idx_addr = NIC_PF_MPI_0_2047_CFG; 804 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 805 cpi_cfg &= ~(0xFUL << 20); 806 cpi_cfg |= (cfg->hash_bits << 20); 807 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 808 } 809 810 /* 811 * 4 level transmit side scheduler configutation 812 * for TNS bypass mode 813 * 814 * Sample configuration for SQ0 815 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 816 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 817 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 818 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 819 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 820 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 821 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 822 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 823 */ 824 static void 825 nic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 826 { 827 uint32_t bgx, lmac, chan; 828 uint32_t tl2, tl3, tl4; 829 uint32_t rr_quantum; 830 uint8_t sq_idx = sq->sq_num; 831 uint8_t pqs_vnic; 832 833 pqs_vnic = vnic; 834 835 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 836 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 837 838 /* 24 bytes for FCS, IPG and preamble */ 839 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 840 841 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 842 tl4 += sq_idx; 843 844 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 845 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 846 ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 847 ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 848 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 849 ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 850 851 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 852 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 853 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 854 /* Enable backpressure on the channel */ 855 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 856 857 tl2 = tl3 >> 2; 858 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 859 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 860 /* No priorities as of now */ 861 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 862 } 863 864 static int 865 nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 866 { 867 int bgx_idx, lmac_idx; 868 869 if (lbk->vf_id > MAX_LMAC) 870 return (ENXIO); 871 872 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 873 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 874 875 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 876 877 return (0); 878 } 879 880 /* Interrupt handler to handle mailbox messages from VFs */ 881 static void 882 nic_handle_mbx_intr(struct nicpf *nic, int vf) 883 { 884 union nic_mbx mbx = {}; 885 uint64_t *mbx_data; 886 uint64_t mbx_addr; 887 uint64_t reg_addr; 888 uint64_t cfg; 889 int bgx, lmac; 890 int i; 891 int ret = 0; 892 893 nic->mbx_lock[vf] = TRUE; 894 895 mbx_addr = nic_get_mbx_addr(vf); 896 mbx_data = (uint64_t *)&mbx; 897 898 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 899 *mbx_data = nic_reg_read(nic, mbx_addr); 900 mbx_data++; 901 mbx_addr += sizeof(uint64_t); 902 } 903 904 switch (mbx.msg.msg) { 905 case NIC_MBOX_MSG_READY: 906 nic_mbx_send_ready(nic, vf); 907 if (vf < MAX_LMAC) { 908 nic->link[vf] = 0; 909 nic->duplex[vf] = 0; 910 nic->speed[vf] = 0; 911 } 912 ret = 1; 913 break; 914 case NIC_MBOX_MSG_QS_CFG: 915 reg_addr = NIC_PF_QSET_0_127_CFG | 916 (mbx.qs.num << NIC_QS_ID_SHIFT); 917 cfg = mbx.qs.cfg; 918 nic_reg_write(nic, reg_addr, cfg); 919 break; 920 case NIC_MBOX_MSG_RQ_CFG: 921 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 922 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 923 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 924 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 925 break; 926 case NIC_MBOX_MSG_RQ_BP_CFG: 927 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 928 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 929 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 930 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 931 break; 932 case NIC_MBOX_MSG_RQ_SW_SYNC: 933 ret = nic_rcv_queue_sw_sync(nic); 934 break; 935 case NIC_MBOX_MSG_RQ_DROP_CFG: 936 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 937 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 938 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 939 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 940 break; 941 case NIC_MBOX_MSG_SQ_CFG: 942 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 943 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 944 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 945 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 946 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 947 break; 948 case NIC_MBOX_MSG_SET_MAC: 949 lmac = mbx.mac.vf_id; 950 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 951 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 952 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 953 break; 954 case NIC_MBOX_MSG_SET_MAX_FRS: 955 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 956 break; 957 case NIC_MBOX_MSG_CPI_CFG: 958 nic_config_cpi(nic, &mbx.cpi_cfg); 959 break; 960 case NIC_MBOX_MSG_RSS_SIZE: 961 nic_send_rss_size(nic, vf); 962 goto unlock; 963 case NIC_MBOX_MSG_RSS_CFG: 964 case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 965 nic_config_rss(nic, &mbx.rss_cfg); 966 break; 967 case NIC_MBOX_MSG_CFG_DONE: 968 /* Last message of VF config msg sequence */ 969 nic->vf_info[vf].vf_enabled = TRUE; 970 goto unlock; 971 case NIC_MBOX_MSG_SHUTDOWN: 972 /* First msg in VF teardown sequence */ 973 nic->vf_info[vf].vf_enabled = FALSE; 974 break; 975 case NIC_MBOX_MSG_BGX_STATS: 976 nic_get_bgx_stats(nic, &mbx.bgx_stats); 977 goto unlock; 978 case NIC_MBOX_MSG_LOOPBACK: 979 ret = nic_config_loopback(nic, &mbx.lbk); 980 break; 981 default: 982 device_printf(nic->dev, 983 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 984 break; 985 } 986 987 if (ret == 0) 988 nic_mbx_send_ack(nic, vf); 989 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 990 nic_mbx_send_nack(nic, vf); 991 unlock: 992 nic->mbx_lock[vf] = FALSE; 993 } 994 995 static void 996 nic_mbx_intr_handler(struct nicpf *nic, int mbx) 997 { 998 uint64_t intr; 999 uint8_t vf, vf_per_mbx_reg = 64; 1000 1001 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1002 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1003 if (intr & (1UL << vf)) { 1004 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1005 nic_clear_mbx_intr(nic, vf, mbx); 1006 } 1007 } 1008 } 1009 1010 static void 1011 nic_mbx0_intr_handler (void *arg) 1012 { 1013 struct nicpf *nic = (struct nicpf *)arg; 1014 1015 nic_mbx_intr_handler(nic, 0); 1016 } 1017 1018 static void 1019 nic_mbx1_intr_handler (void *arg) 1020 { 1021 struct nicpf *nic = (struct nicpf *)arg; 1022 1023 nic_mbx_intr_handler(nic, 1); 1024 } 1025 1026 static int 1027 nic_enable_msix(struct nicpf *nic) 1028 { 1029 struct pci_devinfo *dinfo; 1030 int rid, count; 1031 int ret; 1032 1033 dinfo = device_get_ivars(nic->dev); 1034 rid = dinfo->cfg.msix.msix_table_bar; 1035 nic->msix_table_res = 1036 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1037 if (nic->msix_table_res == NULL) { 1038 device_printf(nic->dev, 1039 "Could not allocate memory for MSI-X table\n"); 1040 return (ENXIO); 1041 } 1042 1043 count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1044 1045 ret = pci_alloc_msix(nic->dev, &count); 1046 if ((ret != 0) || (count != nic->num_vec)) { 1047 device_printf(nic->dev, 1048 "Request for #%d msix vectors failed, error: %d\n", 1049 nic->num_vec, ret); 1050 return (ret); 1051 } 1052 1053 nic->msix_enabled = 1; 1054 return (0); 1055 } 1056 1057 static void 1058 nic_disable_msix(struct nicpf *nic) 1059 { 1060 if (nic->msix_enabled) { 1061 pci_release_msi(nic->dev); 1062 nic->msix_enabled = 0; 1063 nic->num_vec = 0; 1064 } 1065 1066 bus_release_resource(nic->dev, SYS_RES_MEMORY, 1067 rman_get_rid(nic->msix_table_res), nic->msix_table_res); 1068 } 1069 1070 static void 1071 nic_free_all_interrupts(struct nicpf *nic) 1072 { 1073 int irq; 1074 1075 for (irq = 0; irq < nic->num_vec; irq++) { 1076 if (nic->msix_entries[irq].irq_res == NULL) 1077 continue; 1078 if (nic->msix_entries[irq].handle != NULL) { 1079 bus_teardown_intr(nic->dev, 1080 nic->msix_entries[irq].irq_res, 1081 nic->msix_entries[irq].handle); 1082 } 1083 1084 bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1, 1085 nic->msix_entries[irq].irq_res); 1086 } 1087 } 1088 1089 static int 1090 nic_register_interrupts(struct nicpf *nic) 1091 { 1092 int irq, rid; 1093 int ret; 1094 1095 /* Enable MSI-X */ 1096 ret = nic_enable_msix(nic); 1097 if (ret != 0) 1098 return (ret); 1099 1100 /* Register mailbox interrupt handlers */ 1101 irq = NIC_PF_INTR_ID_MBOX0; 1102 rid = irq + 1; 1103 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1104 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1105 if (nic->msix_entries[irq].irq_res == NULL) { 1106 ret = ENXIO; 1107 goto fail; 1108 } 1109 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1110 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1111 &nic->msix_entries[irq].handle); 1112 if (ret != 0) 1113 goto fail; 1114 1115 irq = NIC_PF_INTR_ID_MBOX1; 1116 rid = irq + 1; 1117 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1118 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1119 if (nic->msix_entries[irq].irq_res == NULL) { 1120 ret = ENXIO; 1121 goto fail; 1122 } 1123 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1124 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1125 &nic->msix_entries[irq].handle); 1126 if (ret != 0) 1127 goto fail; 1128 1129 /* Enable mailbox interrupt */ 1130 nic_enable_mbx_intr(nic); 1131 return (0); 1132 1133 fail: 1134 nic_free_all_interrupts(nic); 1135 return (ret); 1136 } 1137 1138 static void 1139 nic_unregister_interrupts(struct nicpf *nic) 1140 { 1141 1142 nic_free_all_interrupts(nic); 1143 nic_disable_msix(nic); 1144 } 1145 1146 static int nic_sriov_init(device_t dev, struct nicpf *nic) 1147 { 1148 #ifdef PCI_IOV 1149 nvlist_t *pf_schema, *vf_schema; 1150 int iov_pos; 1151 int err; 1152 uint16_t total_vf_cnt; 1153 1154 err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1155 if (err != 0) { 1156 device_printf(dev, 1157 "SR-IOV capability is not found in PCIe config space\n"); 1158 return (err); 1159 } 1160 /* Fix-up the number of enabled VFs */ 1161 total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1162 if (total_vf_cnt == 0) 1163 return (ENXIO); 1164 1165 /* Attach SR-IOV */ 1166 pf_schema = pci_iov_schema_alloc_node(); 1167 vf_schema = pci_iov_schema_alloc_node(); 1168 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1169 /* 1170 * All VFs can change their MACs. 1171 * This flag will be ignored but we set it just for the record. 1172 */ 1173 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1174 IOV_SCHEMA_HASDEFAULT, TRUE); 1175 1176 err = pci_iov_attach(dev, pf_schema, vf_schema); 1177 if (err != 0) { 1178 device_printf(dev, 1179 "Failed to initialize SR-IOV (error=%d)\n", 1180 err); 1181 return (err); 1182 } 1183 #endif 1184 return (0); 1185 } 1186 1187 /* 1188 * Poll for BGX LMAC link status and update corresponding VF 1189 * if there is a change, valid only if internal L2 switch 1190 * is not present otherwise VF link is always treated as up 1191 */ 1192 static void 1193 nic_poll_for_link(void *arg) 1194 { 1195 union nic_mbx mbx = {}; 1196 struct nicpf *nic; 1197 struct bgx_link_status link; 1198 uint8_t vf, bgx, lmac; 1199 1200 nic = (struct nicpf *)arg; 1201 1202 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1203 1204 for (vf = 0; vf < nic->num_vf_en; vf++) { 1205 /* Poll only if VF is UP */ 1206 if (!nic->vf_info[vf].vf_enabled) 1207 continue; 1208 1209 /* Get BGX, LMAC indices for the VF */ 1210 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1211 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1212 /* Get interface link status */ 1213 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1214 1215 /* Inform VF only if link status changed */ 1216 if (nic->link[vf] == link.link_up) 1217 continue; 1218 1219 if (!nic->mbx_lock[vf]) { 1220 nic->link[vf] = link.link_up; 1221 nic->duplex[vf] = link.duplex; 1222 nic->speed[vf] = link.speed; 1223 1224 /* Send a mbox message to VF with current link status */ 1225 mbx.link_status.link_up = link.link_up; 1226 mbx.link_status.duplex = link.duplex; 1227 mbx.link_status.speed = link.speed; 1228 nic_send_msg_to_vf(nic, vf, &mbx); 1229 } 1230 } 1231 callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1232 } 1233