1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Marvell/Cavium ThunderX vnic/bgx network controller 30 * 31 * UNIMPLEMENTED FEATURES 32 * ---------------------- 33 * A number of features supported by the hardware are not yet implemented in 34 * this driver: 35 * 36 * - PR223573 multicast rx filter 37 * - PR223575 non-promiscuous mode (driver currently forces promisc) 38 */ 39 40 #include <sys/cdefs.h> 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bitset.h> 44 #include <sys/bitstring.h> 45 #include <sys/bus.h> 46 #include <sys/endian.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #include <sys/module.h> 50 #include <sys/rman.h> 51 #include <sys/pciio.h> 52 #include <sys/pcpu.h> 53 #include <sys/proc.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/cpuset.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 60 #include <net/ethernet.h> 61 #include <net/if.h> 62 #include <net/if_media.h> 63 64 #include <machine/bus.h> 65 #include <machine/_inttypes.h> 66 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 70 #include <sys/dnv.h> 71 #include <sys/nv.h> 72 #ifdef PCI_IOV 73 #include <sys/iov_schema.h> 74 #include <dev/pci/pci_iov.h> 75 #endif 76 77 #include "thunder_bgx.h" 78 #include "nic_reg.h" 79 #include "nic.h" 80 #include "q_struct.h" 81 82 #define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 83 84 #define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 85 86 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 87 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 88 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 89 90 /* Structure to be used by the SR-IOV for VF configuration schemas */ 91 struct nicvf_info { 92 boolean_t vf_enabled; 93 int vf_flags; 94 }; 95 96 struct nicpf { 97 device_t dev; 98 uint8_t node; 99 u_int flags; 100 uint8_t num_vf_en; /* No of VF enabled */ 101 struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 102 struct resource * reg_base; /* Register start address */ 103 struct pkind_cfg pkind; 104 uint8_t vf_lmac_map[MAX_LMAC]; 105 boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 106 107 struct callout check_link; 108 struct mtx check_link_mtx; 109 110 uint8_t link[MAX_LMAC]; 111 uint8_t duplex[MAX_LMAC]; 112 uint32_t speed[MAX_LMAC]; 113 uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 114 uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 115 uint16_t rss_ind_tbl_size; 116 117 /* MSI-X */ 118 boolean_t msix_enabled; 119 uint8_t num_vec; 120 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 121 struct resource * msix_table_res; 122 }; 123 124 static int nicpf_probe(device_t); 125 static int nicpf_attach(device_t); 126 static int nicpf_detach(device_t); 127 128 #ifdef PCI_IOV 129 static int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 130 static void nicpf_iov_uninit(device_t); 131 static int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 132 #endif 133 134 static device_method_t nicpf_methods[] = { 135 /* Device interface */ 136 DEVMETHOD(device_probe, nicpf_probe), 137 DEVMETHOD(device_attach, nicpf_attach), 138 DEVMETHOD(device_detach, nicpf_detach), 139 /* PCI SR-IOV interface */ 140 #ifdef PCI_IOV 141 DEVMETHOD(pci_iov_init, nicpf_iov_init), 142 DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 143 DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 144 #endif 145 DEVMETHOD_END, 146 }; 147 148 static driver_t vnicpf_driver = { 149 "vnicpf", 150 nicpf_methods, 151 sizeof(struct nicpf), 152 }; 153 154 DRIVER_MODULE(vnicpf, pci, vnicpf_driver, 0, 0); 155 MODULE_VERSION(vnicpf, 1); 156 MODULE_DEPEND(vnicpf, pci, 1, 1, 1); 157 MODULE_DEPEND(vnicpf, ether, 1, 1, 1); 158 MODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1); 159 160 static int nicpf_alloc_res(struct nicpf *); 161 static void nicpf_free_res(struct nicpf *); 162 static void nic_set_lmac_vf_mapping(struct nicpf *); 163 static void nic_init_hw(struct nicpf *); 164 static int nic_sriov_init(device_t, struct nicpf *); 165 static void nic_poll_for_link(void *); 166 static int nic_register_interrupts(struct nicpf *); 167 static void nic_unregister_interrupts(struct nicpf *); 168 169 /* 170 * Device interface 171 */ 172 static int 173 nicpf_probe(device_t dev) 174 { 175 uint16_t vendor_id; 176 uint16_t device_id; 177 178 vendor_id = pci_get_vendor(dev); 179 device_id = pci_get_device(dev); 180 181 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 182 device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 183 device_set_desc(dev, VNIC_PF_DEVSTR); 184 return (BUS_PROBE_DEFAULT); 185 } 186 187 return (ENXIO); 188 } 189 190 static int 191 nicpf_attach(device_t dev) 192 { 193 struct nicpf *nic; 194 int err; 195 196 nic = device_get_softc(dev); 197 nic->dev = dev; 198 199 /* Enable bus mastering */ 200 pci_enable_busmaster(dev); 201 202 /* Allocate PCI resources */ 203 err = nicpf_alloc_res(nic); 204 if (err != 0) { 205 device_printf(dev, "Could not allocate PCI resources\n"); 206 return (err); 207 } 208 209 nic->node = nic_get_node_id(nic->reg_base); 210 211 /* Enable Traffic Network Switch (TNS) bypass mode by default */ 212 nic->flags &= ~NIC_TNS_ENABLED; 213 nic_set_lmac_vf_mapping(nic); 214 215 /* Initialize hardware */ 216 nic_init_hw(nic); 217 218 /* Set RSS TBL size for each VF */ 219 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 220 221 /* Setup interrupts */ 222 err = nic_register_interrupts(nic); 223 if (err != 0) 224 goto err_free_res; 225 226 /* Configure SRIOV */ 227 err = nic_sriov_init(dev, nic); 228 if (err != 0) 229 goto err_free_intr; 230 231 if (nic->flags & NIC_TNS_ENABLED) 232 return (0); 233 234 mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 235 /* Register physical link status poll callout */ 236 callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 237 mtx_lock(&nic->check_link_mtx); 238 nic_poll_for_link(nic); 239 mtx_unlock(&nic->check_link_mtx); 240 241 return (0); 242 243 err_free_intr: 244 nic_unregister_interrupts(nic); 245 err_free_res: 246 nicpf_free_res(nic); 247 pci_disable_busmaster(dev); 248 249 return (err); 250 } 251 252 static int 253 nicpf_detach(device_t dev) 254 { 255 struct nicpf *nic; 256 int err; 257 258 err = 0; 259 nic = device_get_softc(dev); 260 261 callout_drain(&nic->check_link); 262 mtx_destroy(&nic->check_link_mtx); 263 264 nic_unregister_interrupts(nic); 265 nicpf_free_res(nic); 266 pci_disable_busmaster(dev); 267 268 #ifdef PCI_IOV 269 err = pci_iov_detach(dev); 270 if (err != 0) 271 device_printf(dev, "SR-IOV in use. Detach first.\n"); 272 #endif 273 return (err); 274 } 275 276 /* 277 * SR-IOV interface 278 */ 279 #ifdef PCI_IOV 280 static int 281 nicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 282 { 283 struct nicpf *nic; 284 285 nic = device_get_softc(dev); 286 287 if (num_vfs == 0) 288 return (ENXIO); 289 290 nic->flags |= NIC_SRIOV_ENABLED; 291 292 return (0); 293 } 294 295 static void 296 nicpf_iov_uninit(device_t dev) 297 { 298 299 /* ARM64TODO: Implement this function */ 300 } 301 302 static int 303 nicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 304 { 305 const void *mac; 306 struct nicpf *nic; 307 size_t size; 308 int bgx, lmac; 309 310 nic = device_get_softc(dev); 311 312 if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 313 return (ENXIO); 314 315 if (vfnum > (nic->num_vf_en - 1)) 316 return (EINVAL); 317 318 if (nvlist_exists_binary(params, "mac-addr") != 0) { 319 mac = nvlist_get_binary(params, "mac-addr", &size); 320 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 321 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 322 bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 323 } 324 325 return (0); 326 } 327 #endif 328 329 /* 330 * Helper routines 331 */ 332 static int 333 nicpf_alloc_res(struct nicpf *nic) 334 { 335 device_t dev; 336 int rid; 337 338 dev = nic->dev; 339 340 rid = VNIC_PF_REG_RID; 341 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 342 RF_ACTIVE); 343 if (nic->reg_base == NULL) { 344 /* For verbose output print some more details */ 345 if (bootverbose) { 346 device_printf(dev, 347 "Could not allocate registers memory\n"); 348 } 349 return (ENXIO); 350 } 351 352 return (0); 353 } 354 355 static void 356 nicpf_free_res(struct nicpf *nic) 357 { 358 device_t dev; 359 360 dev = nic->dev; 361 362 if (nic->reg_base != NULL) { 363 bus_release_resource(dev, SYS_RES_MEMORY, 364 rman_get_rid(nic->reg_base), nic->reg_base); 365 } 366 } 367 368 /* Register read/write APIs */ 369 static __inline void 370 nic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 371 uint64_t val) 372 { 373 374 bus_write_8(nic->reg_base, offset, val); 375 } 376 377 static __inline uint64_t 378 nic_reg_read(struct nicpf *nic, uint64_t offset) 379 { 380 uint64_t val; 381 382 val = bus_read_8(nic->reg_base, offset); 383 return (val); 384 } 385 386 /* PF -> VF mailbox communication APIs */ 387 static void 388 nic_enable_mbx_intr(struct nicpf *nic) 389 { 390 391 /* Enable mailbox interrupt for all 128 VFs */ 392 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 393 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 394 } 395 396 static void 397 nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 398 { 399 400 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 401 } 402 403 static uint64_t 404 nic_get_mbx_addr(int vf) 405 { 406 407 return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 408 } 409 410 /* 411 * Send a mailbox message to VF 412 * @vf: vf to which this message to be sent 413 * @mbx: Message to be sent 414 */ 415 static void 416 nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 417 { 418 bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 419 uint64_t *msg = (uint64_t *)mbx; 420 421 /* 422 * In first revision HW, mbox interrupt is triggerred 423 * when PF writes to MBOX(1), in next revisions when 424 * PF writes to MBOX(0) 425 */ 426 if (pass1_silicon(nic->dev)) { 427 nic_reg_write(nic, mbx_addr + 0, msg[0]); 428 nic_reg_write(nic, mbx_addr + 8, msg[1]); 429 } else { 430 nic_reg_write(nic, mbx_addr + 8, msg[1]); 431 nic_reg_write(nic, mbx_addr + 0, msg[0]); 432 } 433 } 434 435 /* 436 * Responds to VF's READY message with VF's 437 * ID, node, MAC address e.t.c 438 * @vf: VF which sent READY message 439 */ 440 static void 441 nic_mbx_send_ready(struct nicpf *nic, int vf) 442 { 443 union nic_mbx mbx = {}; 444 int bgx_idx, lmac; 445 const char *mac; 446 447 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 448 mbx.nic_cfg.vf_id = vf; 449 450 if (nic->flags & NIC_TNS_ENABLED) 451 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 452 else 453 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 454 455 if (vf < MAX_LMAC) { 456 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 457 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 458 459 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 460 if (mac) { 461 memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 462 ETHER_ADDR_LEN); 463 } 464 } 465 mbx.nic_cfg.node_id = nic->node; 466 467 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 468 469 nic_send_msg_to_vf(nic, vf, &mbx); 470 } 471 472 /* 473 * ACKs VF's mailbox message 474 * @vf: VF to which ACK to be sent 475 */ 476 static void 477 nic_mbx_send_ack(struct nicpf *nic, int vf) 478 { 479 union nic_mbx mbx = {}; 480 481 mbx.msg.msg = NIC_MBOX_MSG_ACK; 482 nic_send_msg_to_vf(nic, vf, &mbx); 483 } 484 485 /* 486 * NACKs VF's mailbox message that PF is not able to 487 * complete the action 488 * @vf: VF to which ACK to be sent 489 */ 490 static void 491 nic_mbx_send_nack(struct nicpf *nic, int vf) 492 { 493 union nic_mbx mbx = {}; 494 495 mbx.msg.msg = NIC_MBOX_MSG_NACK; 496 nic_send_msg_to_vf(nic, vf, &mbx); 497 } 498 499 /* 500 * Flush all in flight receive packets to memory and 501 * bring down an active RQ 502 */ 503 static int 504 nic_rcv_queue_sw_sync(struct nicpf *nic) 505 { 506 uint16_t timeout = ~0x00; 507 508 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 509 /* Wait till sync cycle is finished */ 510 while (timeout) { 511 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 512 break; 513 timeout--; 514 } 515 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 516 if (!timeout) { 517 device_printf(nic->dev, "Receive queue software sync failed\n"); 518 return (ETIMEDOUT); 519 } 520 return (0); 521 } 522 523 /* Get BGX Rx/Tx stats and respond to VF's request */ 524 static void 525 nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 526 { 527 int bgx_idx, lmac; 528 union nic_mbx mbx = {}; 529 530 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 531 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 532 533 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 534 mbx.bgx_stats.vf_id = bgx->vf_id; 535 mbx.bgx_stats.rx = bgx->rx; 536 mbx.bgx_stats.idx = bgx->idx; 537 if (bgx->rx != 0) { 538 mbx.bgx_stats.stats = 539 bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 540 } else { 541 mbx.bgx_stats.stats = 542 bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 543 } 544 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 545 } 546 547 /* Update hardware min/max frame size */ 548 static int 549 nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 550 { 551 552 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 553 device_printf(nic->dev, 554 "Invalid MTU setting from VF%d rejected, " 555 "should be between %d and %d\n", 556 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 557 return (EINVAL); 558 } 559 new_frs += ETHER_HDR_LEN; 560 if (new_frs <= nic->pkind.maxlen) 561 return (0); 562 563 nic->pkind.maxlen = new_frs; 564 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 565 return (0); 566 } 567 568 /* Set minimum transmit packet size */ 569 static void 570 nic_set_tx_pkt_pad(struct nicpf *nic, int size) 571 { 572 int lmac; 573 uint64_t lmac_cfg; 574 575 /* Max value that can be set is 60 */ 576 if (size > 60) 577 size = 60; 578 579 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 580 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 581 lmac_cfg &= ~(0xF << 2); 582 lmac_cfg |= ((size / 4) << 2); 583 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 584 } 585 } 586 587 /* 588 * Function to check number of LMACs present and set VF::LMAC mapping. 589 * Mapping will be used while initializing channels. 590 */ 591 static void 592 nic_set_lmac_vf_mapping(struct nicpf *nic) 593 { 594 unsigned bgx_map = bgx_get_map(nic->node); 595 int bgx, next_bgx_lmac = 0; 596 int lmac, lmac_cnt = 0; 597 uint64_t lmac_credit; 598 599 nic->num_vf_en = 0; 600 if (nic->flags & NIC_TNS_ENABLED) { 601 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 602 return; 603 } 604 605 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 606 if ((bgx_map & (1 << bgx)) == 0) 607 continue; 608 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 609 for (lmac = 0; lmac < lmac_cnt; lmac++) 610 nic->vf_lmac_map[next_bgx_lmac++] = 611 NIC_SET_VF_LMAC_MAP(bgx, lmac); 612 nic->num_vf_en += lmac_cnt; 613 614 /* Program LMAC credits */ 615 lmac_credit = (1UL << 1); /* channel credit enable */ 616 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 617 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 618 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 619 NIC_HW_MAX_FRS) / 16) << 12); 620 lmac = bgx * MAX_LMAC_PER_BGX; 621 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 622 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 623 lmac_credit); 624 } 625 } 626 } 627 628 #define TNS_PORT0_BLOCK 6 629 #define TNS_PORT1_BLOCK 7 630 #define BGX0_BLOCK 8 631 #define BGX1_BLOCK 9 632 633 static void 634 nic_init_hw(struct nicpf *nic) 635 { 636 int i; 637 638 /* Enable NIC HW block */ 639 nic_reg_write(nic, NIC_PF_CFG, 0x3); 640 641 /* Enable backpressure */ 642 nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 643 644 if (nic->flags & NIC_TNS_ENABLED) { 645 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 646 (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 647 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 648 (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 649 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 650 (1UL << 63) | TNS_PORT0_BLOCK); 651 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 652 (1UL << 63) | TNS_PORT1_BLOCK); 653 654 } else { 655 /* Disable TNS mode on both interfaces */ 656 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 657 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 658 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 659 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 660 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 661 (1UL << 63) | BGX0_BLOCK); 662 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 663 (1UL << 63) | BGX1_BLOCK); 664 } 665 666 /* PKIND configuration */ 667 nic->pkind.minlen = 0; 668 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 669 nic->pkind.lenerr_en = 1; 670 nic->pkind.rx_hdr = 0; 671 nic->pkind.hdr_sl = 0; 672 673 for (i = 0; i < NIC_MAX_PKIND; i++) { 674 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 675 *(uint64_t *)&nic->pkind); 676 } 677 678 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 679 680 /* Timer config */ 681 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 682 683 /* Enable VLAN ethertype matching and stripping */ 684 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 685 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 686 } 687 688 /* Channel parse index configuration */ 689 static void 690 nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 691 { 692 uint32_t vnic, bgx, lmac, chan; 693 uint32_t padd, cpi_count = 0; 694 uint64_t cpi_base, cpi, rssi_base, rssi; 695 uint8_t qset, rq_idx = 0; 696 697 vnic = cfg->vf_id; 698 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 699 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 700 701 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 702 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 703 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 704 705 /* Rx channel configuration */ 706 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 707 (1UL << 63) | (vnic << 0)); 708 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 709 ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 710 711 if (cfg->cpi_alg == CPI_ALG_NONE) 712 cpi_count = 1; 713 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 714 cpi_count = 8; 715 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 716 cpi_count = 16; 717 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 718 cpi_count = NIC_MAX_CPI_PER_LMAC; 719 720 /* RSS Qset, Qidx mapping */ 721 qset = cfg->vf_id; 722 rssi = rssi_base; 723 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 724 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 725 (qset << 3) | rq_idx); 726 rq_idx++; 727 } 728 729 rssi = 0; 730 cpi = cpi_base; 731 for (; cpi < (cpi_base + cpi_count); cpi++) { 732 /* Determine port to channel adder */ 733 if (cfg->cpi_alg != CPI_ALG_DIFF) 734 padd = cpi % cpi_count; 735 else 736 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 737 738 /* Leave RSS_SIZE as '0' to disable RSS */ 739 if (pass1_silicon(nic->dev)) { 740 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 741 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 742 } else { 743 /* Set MPI_ALG to '0' to disable MCAM parsing */ 744 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 745 (padd << 16)); 746 /* MPI index is same as CPI if MPI_ALG is not enabled */ 747 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 748 (vnic << 24) | (rssi_base + rssi)); 749 } 750 751 if ((rssi + 1) >= cfg->rq_cnt) 752 continue; 753 754 if (cfg->cpi_alg == CPI_ALG_VLAN) 755 rssi++; 756 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 757 rssi = ((cpi - cpi_base) & 0xe) >> 1; 758 else if (cfg->cpi_alg == CPI_ALG_DIFF) 759 rssi = ((cpi - cpi_base) & 0x38) >> 3; 760 } 761 nic->cpi_base[cfg->vf_id] = cpi_base; 762 nic->rssi_base[cfg->vf_id] = rssi_base; 763 } 764 765 /* Responsds to VF with its RSS indirection table size */ 766 static void 767 nic_send_rss_size(struct nicpf *nic, int vf) 768 { 769 union nic_mbx mbx = {}; 770 771 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 772 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 773 nic_send_msg_to_vf(nic, vf, &mbx); 774 } 775 776 /* 777 * Receive side scaling configuration 778 * configure: 779 * - RSS index 780 * - indir table i.e hash::RQ mapping 781 * - no of hash bits to consider 782 */ 783 static void 784 nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 785 { 786 uint8_t qset, idx; 787 uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 788 uint64_t idx_addr; 789 790 idx = 0; 791 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 792 793 rssi = rssi_base; 794 qset = cfg->vf_id; 795 796 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 797 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 798 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 799 idx++; 800 } 801 802 cpi_base = nic->cpi_base[cfg->vf_id]; 803 if (pass1_silicon(nic->dev)) 804 idx_addr = NIC_PF_CPI_0_2047_CFG; 805 else 806 idx_addr = NIC_PF_MPI_0_2047_CFG; 807 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 808 cpi_cfg &= ~(0xFUL << 20); 809 cpi_cfg |= (cfg->hash_bits << 20); 810 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 811 } 812 813 /* 814 * 4 level transmit side scheduler configutation 815 * for TNS bypass mode 816 * 817 * Sample configuration for SQ0 818 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 819 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 820 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 821 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 822 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 823 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 824 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 825 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 826 */ 827 static void 828 nic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 829 { 830 uint32_t bgx, lmac, chan; 831 uint32_t tl2, tl3, tl4; 832 uint32_t rr_quantum; 833 uint8_t sq_idx = sq->sq_num; 834 uint8_t pqs_vnic; 835 836 pqs_vnic = vnic; 837 838 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 839 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 840 841 /* 24 bytes for FCS, IPG and preamble */ 842 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 843 844 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 845 tl4 += sq_idx; 846 847 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 848 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 849 ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 850 ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 851 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 852 ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 853 854 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 855 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 856 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 857 /* Enable backpressure on the channel */ 858 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 859 860 tl2 = tl3 >> 2; 861 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 862 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 863 /* No priorities as of now */ 864 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 865 } 866 867 static int 868 nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 869 { 870 int bgx_idx, lmac_idx; 871 872 if (lbk->vf_id > MAX_LMAC) 873 return (ENXIO); 874 875 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 876 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 877 878 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 879 880 return (0); 881 } 882 883 /* Interrupt handler to handle mailbox messages from VFs */ 884 static void 885 nic_handle_mbx_intr(struct nicpf *nic, int vf) 886 { 887 union nic_mbx mbx = {}; 888 uint64_t *mbx_data; 889 uint64_t mbx_addr; 890 uint64_t reg_addr; 891 uint64_t cfg; 892 int bgx, lmac; 893 int i; 894 int ret = 0; 895 896 nic->mbx_lock[vf] = TRUE; 897 898 mbx_addr = nic_get_mbx_addr(vf); 899 mbx_data = (uint64_t *)&mbx; 900 901 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 902 *mbx_data = nic_reg_read(nic, mbx_addr); 903 mbx_data++; 904 mbx_addr += sizeof(uint64_t); 905 } 906 907 switch (mbx.msg.msg) { 908 case NIC_MBOX_MSG_READY: 909 nic_mbx_send_ready(nic, vf); 910 if (vf < MAX_LMAC) { 911 nic->link[vf] = 0; 912 nic->duplex[vf] = 0; 913 nic->speed[vf] = 0; 914 } 915 ret = 1; 916 break; 917 case NIC_MBOX_MSG_QS_CFG: 918 reg_addr = NIC_PF_QSET_0_127_CFG | 919 (mbx.qs.num << NIC_QS_ID_SHIFT); 920 cfg = mbx.qs.cfg; 921 nic_reg_write(nic, reg_addr, cfg); 922 break; 923 case NIC_MBOX_MSG_RQ_CFG: 924 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 925 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 926 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 927 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 928 break; 929 case NIC_MBOX_MSG_RQ_BP_CFG: 930 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 931 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 932 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 933 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 934 break; 935 case NIC_MBOX_MSG_RQ_SW_SYNC: 936 ret = nic_rcv_queue_sw_sync(nic); 937 break; 938 case NIC_MBOX_MSG_RQ_DROP_CFG: 939 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 940 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 941 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 942 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 943 break; 944 case NIC_MBOX_MSG_SQ_CFG: 945 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 946 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 947 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 948 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 949 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 950 break; 951 case NIC_MBOX_MSG_SET_MAC: 952 lmac = mbx.mac.vf_id; 953 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 954 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 955 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 956 break; 957 case NIC_MBOX_MSG_SET_MAX_FRS: 958 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 959 break; 960 case NIC_MBOX_MSG_CPI_CFG: 961 nic_config_cpi(nic, &mbx.cpi_cfg); 962 break; 963 case NIC_MBOX_MSG_RSS_SIZE: 964 nic_send_rss_size(nic, vf); 965 goto unlock; 966 case NIC_MBOX_MSG_RSS_CFG: 967 case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 968 nic_config_rss(nic, &mbx.rss_cfg); 969 break; 970 case NIC_MBOX_MSG_CFG_DONE: 971 /* Last message of VF config msg sequence */ 972 nic->vf_info[vf].vf_enabled = TRUE; 973 goto unlock; 974 case NIC_MBOX_MSG_SHUTDOWN: 975 /* First msg in VF teardown sequence */ 976 nic->vf_info[vf].vf_enabled = FALSE; 977 break; 978 case NIC_MBOX_MSG_BGX_STATS: 979 nic_get_bgx_stats(nic, &mbx.bgx_stats); 980 goto unlock; 981 case NIC_MBOX_MSG_LOOPBACK: 982 ret = nic_config_loopback(nic, &mbx.lbk); 983 break; 984 default: 985 device_printf(nic->dev, 986 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 987 break; 988 } 989 990 if (ret == 0) 991 nic_mbx_send_ack(nic, vf); 992 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 993 nic_mbx_send_nack(nic, vf); 994 unlock: 995 nic->mbx_lock[vf] = FALSE; 996 } 997 998 static void 999 nic_mbx_intr_handler(struct nicpf *nic, int mbx) 1000 { 1001 uint64_t intr; 1002 uint8_t vf, vf_per_mbx_reg = 64; 1003 1004 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1005 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1006 if (intr & (1UL << vf)) { 1007 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1008 nic_clear_mbx_intr(nic, vf, mbx); 1009 } 1010 } 1011 } 1012 1013 static void 1014 nic_mbx0_intr_handler (void *arg) 1015 { 1016 struct nicpf *nic = (struct nicpf *)arg; 1017 1018 nic_mbx_intr_handler(nic, 0); 1019 } 1020 1021 static void 1022 nic_mbx1_intr_handler (void *arg) 1023 { 1024 struct nicpf *nic = (struct nicpf *)arg; 1025 1026 nic_mbx_intr_handler(nic, 1); 1027 } 1028 1029 static int 1030 nic_enable_msix(struct nicpf *nic) 1031 { 1032 struct pci_devinfo *dinfo; 1033 int rid, count; 1034 int ret; 1035 1036 dinfo = device_get_ivars(nic->dev); 1037 rid = dinfo->cfg.msix.msix_table_bar; 1038 nic->msix_table_res = 1039 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1040 if (nic->msix_table_res == NULL) { 1041 device_printf(nic->dev, 1042 "Could not allocate memory for MSI-X table\n"); 1043 return (ENXIO); 1044 } 1045 1046 count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1047 1048 ret = pci_alloc_msix(nic->dev, &count); 1049 if ((ret != 0) || (count != nic->num_vec)) { 1050 device_printf(nic->dev, 1051 "Request for #%d msix vectors failed, error: %d\n", 1052 nic->num_vec, ret); 1053 return (ret); 1054 } 1055 1056 nic->msix_enabled = 1; 1057 return (0); 1058 } 1059 1060 static void 1061 nic_disable_msix(struct nicpf *nic) 1062 { 1063 if (nic->msix_enabled) { 1064 pci_release_msi(nic->dev); 1065 nic->msix_enabled = 0; 1066 nic->num_vec = 0; 1067 } 1068 1069 bus_release_resource(nic->dev, SYS_RES_MEMORY, 1070 rman_get_rid(nic->msix_table_res), nic->msix_table_res); 1071 } 1072 1073 static void 1074 nic_free_all_interrupts(struct nicpf *nic) 1075 { 1076 int irq; 1077 1078 for (irq = 0; irq < nic->num_vec; irq++) { 1079 if (nic->msix_entries[irq].irq_res == NULL) 1080 continue; 1081 if (nic->msix_entries[irq].handle != NULL) { 1082 bus_teardown_intr(nic->dev, 1083 nic->msix_entries[irq].irq_res, 1084 nic->msix_entries[irq].handle); 1085 } 1086 1087 bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1, 1088 nic->msix_entries[irq].irq_res); 1089 } 1090 } 1091 1092 static int 1093 nic_register_interrupts(struct nicpf *nic) 1094 { 1095 int irq, rid; 1096 int ret; 1097 1098 /* Enable MSI-X */ 1099 ret = nic_enable_msix(nic); 1100 if (ret != 0) 1101 return (ret); 1102 1103 /* Register mailbox interrupt handlers */ 1104 irq = NIC_PF_INTR_ID_MBOX0; 1105 rid = irq + 1; 1106 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1107 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1108 if (nic->msix_entries[irq].irq_res == NULL) { 1109 ret = ENXIO; 1110 goto fail; 1111 } 1112 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1113 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1114 &nic->msix_entries[irq].handle); 1115 if (ret != 0) 1116 goto fail; 1117 1118 irq = NIC_PF_INTR_ID_MBOX1; 1119 rid = irq + 1; 1120 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1121 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1122 if (nic->msix_entries[irq].irq_res == NULL) { 1123 ret = ENXIO; 1124 goto fail; 1125 } 1126 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1127 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1128 &nic->msix_entries[irq].handle); 1129 if (ret != 0) 1130 goto fail; 1131 1132 /* Enable mailbox interrupt */ 1133 nic_enable_mbx_intr(nic); 1134 return (0); 1135 1136 fail: 1137 nic_free_all_interrupts(nic); 1138 return (ret); 1139 } 1140 1141 static void 1142 nic_unregister_interrupts(struct nicpf *nic) 1143 { 1144 1145 nic_free_all_interrupts(nic); 1146 nic_disable_msix(nic); 1147 } 1148 1149 static int nic_sriov_init(device_t dev, struct nicpf *nic) 1150 { 1151 #ifdef PCI_IOV 1152 nvlist_t *pf_schema, *vf_schema; 1153 int iov_pos; 1154 int err; 1155 uint16_t total_vf_cnt; 1156 1157 err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1158 if (err != 0) { 1159 device_printf(dev, 1160 "SR-IOV capability is not found in PCIe config space\n"); 1161 return (err); 1162 } 1163 /* Fix-up the number of enabled VFs */ 1164 total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1165 if (total_vf_cnt == 0) 1166 return (ENXIO); 1167 1168 /* Attach SR-IOV */ 1169 pf_schema = pci_iov_schema_alloc_node(); 1170 vf_schema = pci_iov_schema_alloc_node(); 1171 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1172 /* 1173 * All VFs can change their MACs. 1174 * This flag will be ignored but we set it just for the record. 1175 */ 1176 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1177 IOV_SCHEMA_HASDEFAULT, TRUE); 1178 1179 err = pci_iov_attach(dev, pf_schema, vf_schema); 1180 if (err != 0) { 1181 device_printf(dev, 1182 "Failed to initialize SR-IOV (error=%d)\n", 1183 err); 1184 return (err); 1185 } 1186 #endif 1187 return (0); 1188 } 1189 1190 /* 1191 * Poll for BGX LMAC link status and update corresponding VF 1192 * if there is a change, valid only if internal L2 switch 1193 * is not present otherwise VF link is always treated as up 1194 */ 1195 static void 1196 nic_poll_for_link(void *arg) 1197 { 1198 union nic_mbx mbx = {}; 1199 struct nicpf *nic; 1200 struct bgx_link_status link; 1201 uint8_t vf, bgx, lmac; 1202 1203 nic = (struct nicpf *)arg; 1204 1205 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1206 1207 for (vf = 0; vf < nic->num_vf_en; vf++) { 1208 /* Poll only if VF is UP */ 1209 if (!nic->vf_info[vf].vf_enabled) 1210 continue; 1211 1212 /* Get BGX, LMAC indices for the VF */ 1213 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1214 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1215 /* Get interface link status */ 1216 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1217 1218 /* Inform VF only if link status changed */ 1219 if (nic->link[vf] == link.link_up) 1220 continue; 1221 1222 if (!nic->mbx_lock[vf]) { 1223 nic->link[vf] = link.link_up; 1224 nic->duplex[vf] = link.duplex; 1225 nic->speed[vf] = link.speed; 1226 1227 /* Send a mbox message to VF with current link status */ 1228 mbx.link_status.link_up = link.link_up; 1229 mbx.link_status.duplex = link.duplex; 1230 mbx.link_status.speed = link.speed; 1231 nic_send_msg_to_vf(nic, vf, &mbx); 1232 } 1233 } 1234 callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1235 } 1236