1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Marvell/Cavium ThunderX vnic/bgx network controller 30 * 31 * UNIMPLEMENTED FEATURES 32 * ---------------------- 33 * A number of features supported by the hardware are not yet implemented in 34 * this driver: 35 * 36 * - PR223573 multicast rx filter 37 * - PR223575 non-promiscuous mode (driver currently forces promisc) 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bitset.h> 46 #include <sys/bitstring.h> 47 #include <sys/bus.h> 48 #include <sys/endian.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/module.h> 52 #include <sys/rman.h> 53 #include <sys/pciio.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 #include <sys/cpuset.h> 59 #include <sys/lock.h> 60 #include <sys/mutex.h> 61 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_media.h> 65 66 #include <machine/bus.h> 67 #include <machine/_inttypes.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <sys/dnv.h> 73 #include <sys/nv.h> 74 #ifdef PCI_IOV 75 #include <sys/iov_schema.h> 76 #include <dev/pci/pci_iov.h> 77 #endif 78 79 #include "thunder_bgx.h" 80 #include "nic_reg.h" 81 #include "nic.h" 82 #include "q_struct.h" 83 84 #define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 85 86 #define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 87 88 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 89 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 90 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 91 92 /* Structure to be used by the SR-IOV for VF configuration schemas */ 93 struct nicvf_info { 94 boolean_t vf_enabled; 95 int vf_flags; 96 }; 97 98 struct nicpf { 99 device_t dev; 100 uint8_t node; 101 u_int flags; 102 uint8_t num_vf_en; /* No of VF enabled */ 103 struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 104 struct resource * reg_base; /* Register start address */ 105 struct pkind_cfg pkind; 106 uint8_t vf_lmac_map[MAX_LMAC]; 107 boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 108 109 struct callout check_link; 110 struct mtx check_link_mtx; 111 112 uint8_t link[MAX_LMAC]; 113 uint8_t duplex[MAX_LMAC]; 114 uint32_t speed[MAX_LMAC]; 115 uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 116 uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 117 uint16_t rss_ind_tbl_size; 118 119 /* MSI-X */ 120 boolean_t msix_enabled; 121 uint8_t num_vec; 122 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 123 struct resource * msix_table_res; 124 }; 125 126 static int nicpf_probe(device_t); 127 static int nicpf_attach(device_t); 128 static int nicpf_detach(device_t); 129 130 #ifdef PCI_IOV 131 static int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 132 static void nicpf_iov_uninit(device_t); 133 static int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 134 #endif 135 136 static device_method_t nicpf_methods[] = { 137 /* Device interface */ 138 DEVMETHOD(device_probe, nicpf_probe), 139 DEVMETHOD(device_attach, nicpf_attach), 140 DEVMETHOD(device_detach, nicpf_detach), 141 /* PCI SR-IOV interface */ 142 #ifdef PCI_IOV 143 DEVMETHOD(pci_iov_init, nicpf_iov_init), 144 DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 145 DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 146 #endif 147 DEVMETHOD_END, 148 }; 149 150 static driver_t vnicpf_driver = { 151 "vnicpf", 152 nicpf_methods, 153 sizeof(struct nicpf), 154 }; 155 156 DRIVER_MODULE(vnicpf, pci, vnicpf_driver, 0, 0); 157 MODULE_VERSION(vnicpf, 1); 158 MODULE_DEPEND(vnicpf, pci, 1, 1, 1); 159 MODULE_DEPEND(vnicpf, ether, 1, 1, 1); 160 MODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1); 161 162 static int nicpf_alloc_res(struct nicpf *); 163 static void nicpf_free_res(struct nicpf *); 164 static void nic_set_lmac_vf_mapping(struct nicpf *); 165 static void nic_init_hw(struct nicpf *); 166 static int nic_sriov_init(device_t, struct nicpf *); 167 static void nic_poll_for_link(void *); 168 static int nic_register_interrupts(struct nicpf *); 169 static void nic_unregister_interrupts(struct nicpf *); 170 171 /* 172 * Device interface 173 */ 174 static int 175 nicpf_probe(device_t dev) 176 { 177 uint16_t vendor_id; 178 uint16_t device_id; 179 180 vendor_id = pci_get_vendor(dev); 181 device_id = pci_get_device(dev); 182 183 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 184 device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 185 device_set_desc(dev, VNIC_PF_DEVSTR); 186 return (BUS_PROBE_DEFAULT); 187 } 188 189 return (ENXIO); 190 } 191 192 static int 193 nicpf_attach(device_t dev) 194 { 195 struct nicpf *nic; 196 int err; 197 198 nic = device_get_softc(dev); 199 nic->dev = dev; 200 201 /* Enable bus mastering */ 202 pci_enable_busmaster(dev); 203 204 /* Allocate PCI resources */ 205 err = nicpf_alloc_res(nic); 206 if (err != 0) { 207 device_printf(dev, "Could not allocate PCI resources\n"); 208 return (err); 209 } 210 211 nic->node = nic_get_node_id(nic->reg_base); 212 213 /* Enable Traffic Network Switch (TNS) bypass mode by default */ 214 nic->flags &= ~NIC_TNS_ENABLED; 215 nic_set_lmac_vf_mapping(nic); 216 217 /* Initialize hardware */ 218 nic_init_hw(nic); 219 220 /* Set RSS TBL size for each VF */ 221 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 222 223 /* Setup interrupts */ 224 err = nic_register_interrupts(nic); 225 if (err != 0) 226 goto err_free_res; 227 228 /* Configure SRIOV */ 229 err = nic_sriov_init(dev, nic); 230 if (err != 0) 231 goto err_free_intr; 232 233 if (nic->flags & NIC_TNS_ENABLED) 234 return (0); 235 236 mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 237 /* Register physical link status poll callout */ 238 callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 239 mtx_lock(&nic->check_link_mtx); 240 nic_poll_for_link(nic); 241 mtx_unlock(&nic->check_link_mtx); 242 243 return (0); 244 245 err_free_intr: 246 nic_unregister_interrupts(nic); 247 err_free_res: 248 nicpf_free_res(nic); 249 pci_disable_busmaster(dev); 250 251 return (err); 252 } 253 254 static int 255 nicpf_detach(device_t dev) 256 { 257 struct nicpf *nic; 258 int err; 259 260 err = 0; 261 nic = device_get_softc(dev); 262 263 callout_drain(&nic->check_link); 264 mtx_destroy(&nic->check_link_mtx); 265 266 nic_unregister_interrupts(nic); 267 nicpf_free_res(nic); 268 pci_disable_busmaster(dev); 269 270 #ifdef PCI_IOV 271 err = pci_iov_detach(dev); 272 if (err != 0) 273 device_printf(dev, "SR-IOV in use. Detach first.\n"); 274 #endif 275 return (err); 276 } 277 278 /* 279 * SR-IOV interface 280 */ 281 #ifdef PCI_IOV 282 static int 283 nicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 284 { 285 struct nicpf *nic; 286 287 nic = device_get_softc(dev); 288 289 if (num_vfs == 0) 290 return (ENXIO); 291 292 nic->flags |= NIC_SRIOV_ENABLED; 293 294 return (0); 295 } 296 297 static void 298 nicpf_iov_uninit(device_t dev) 299 { 300 301 /* ARM64TODO: Implement this function */ 302 } 303 304 static int 305 nicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 306 { 307 const void *mac; 308 struct nicpf *nic; 309 size_t size; 310 int bgx, lmac; 311 312 nic = device_get_softc(dev); 313 314 if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 315 return (ENXIO); 316 317 if (vfnum > (nic->num_vf_en - 1)) 318 return (EINVAL); 319 320 if (nvlist_exists_binary(params, "mac-addr") != 0) { 321 mac = nvlist_get_binary(params, "mac-addr", &size); 322 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 323 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 324 bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 325 } 326 327 return (0); 328 } 329 #endif 330 331 /* 332 * Helper routines 333 */ 334 static int 335 nicpf_alloc_res(struct nicpf *nic) 336 { 337 device_t dev; 338 int rid; 339 340 dev = nic->dev; 341 342 rid = VNIC_PF_REG_RID; 343 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 344 RF_ACTIVE); 345 if (nic->reg_base == NULL) { 346 /* For verbose output print some more details */ 347 if (bootverbose) { 348 device_printf(dev, 349 "Could not allocate registers memory\n"); 350 } 351 return (ENXIO); 352 } 353 354 return (0); 355 } 356 357 static void 358 nicpf_free_res(struct nicpf *nic) 359 { 360 device_t dev; 361 362 dev = nic->dev; 363 364 if (nic->reg_base != NULL) { 365 bus_release_resource(dev, SYS_RES_MEMORY, 366 rman_get_rid(nic->reg_base), nic->reg_base); 367 } 368 } 369 370 /* Register read/write APIs */ 371 static __inline void 372 nic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 373 uint64_t val) 374 { 375 376 bus_write_8(nic->reg_base, offset, val); 377 } 378 379 static __inline uint64_t 380 nic_reg_read(struct nicpf *nic, uint64_t offset) 381 { 382 uint64_t val; 383 384 val = bus_read_8(nic->reg_base, offset); 385 return (val); 386 } 387 388 /* PF -> VF mailbox communication APIs */ 389 static void 390 nic_enable_mbx_intr(struct nicpf *nic) 391 { 392 393 /* Enable mailbox interrupt for all 128 VFs */ 394 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 395 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 396 } 397 398 static void 399 nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 400 { 401 402 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 403 } 404 405 static uint64_t 406 nic_get_mbx_addr(int vf) 407 { 408 409 return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 410 } 411 412 /* 413 * Send a mailbox message to VF 414 * @vf: vf to which this message to be sent 415 * @mbx: Message to be sent 416 */ 417 static void 418 nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 419 { 420 bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 421 uint64_t *msg = (uint64_t *)mbx; 422 423 /* 424 * In first revision HW, mbox interrupt is triggerred 425 * when PF writes to MBOX(1), in next revisions when 426 * PF writes to MBOX(0) 427 */ 428 if (pass1_silicon(nic->dev)) { 429 nic_reg_write(nic, mbx_addr + 0, msg[0]); 430 nic_reg_write(nic, mbx_addr + 8, msg[1]); 431 } else { 432 nic_reg_write(nic, mbx_addr + 8, msg[1]); 433 nic_reg_write(nic, mbx_addr + 0, msg[0]); 434 } 435 } 436 437 /* 438 * Responds to VF's READY message with VF's 439 * ID, node, MAC address e.t.c 440 * @vf: VF which sent READY message 441 */ 442 static void 443 nic_mbx_send_ready(struct nicpf *nic, int vf) 444 { 445 union nic_mbx mbx = {}; 446 int bgx_idx, lmac; 447 const char *mac; 448 449 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 450 mbx.nic_cfg.vf_id = vf; 451 452 if (nic->flags & NIC_TNS_ENABLED) 453 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 454 else 455 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 456 457 if (vf < MAX_LMAC) { 458 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 459 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 460 461 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 462 if (mac) { 463 memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 464 ETHER_ADDR_LEN); 465 } 466 } 467 mbx.nic_cfg.node_id = nic->node; 468 469 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 470 471 nic_send_msg_to_vf(nic, vf, &mbx); 472 } 473 474 /* 475 * ACKs VF's mailbox message 476 * @vf: VF to which ACK to be sent 477 */ 478 static void 479 nic_mbx_send_ack(struct nicpf *nic, int vf) 480 { 481 union nic_mbx mbx = {}; 482 483 mbx.msg.msg = NIC_MBOX_MSG_ACK; 484 nic_send_msg_to_vf(nic, vf, &mbx); 485 } 486 487 /* 488 * NACKs VF's mailbox message that PF is not able to 489 * complete the action 490 * @vf: VF to which ACK to be sent 491 */ 492 static void 493 nic_mbx_send_nack(struct nicpf *nic, int vf) 494 { 495 union nic_mbx mbx = {}; 496 497 mbx.msg.msg = NIC_MBOX_MSG_NACK; 498 nic_send_msg_to_vf(nic, vf, &mbx); 499 } 500 501 /* 502 * Flush all in flight receive packets to memory and 503 * bring down an active RQ 504 */ 505 static int 506 nic_rcv_queue_sw_sync(struct nicpf *nic) 507 { 508 uint16_t timeout = ~0x00; 509 510 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 511 /* Wait till sync cycle is finished */ 512 while (timeout) { 513 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 514 break; 515 timeout--; 516 } 517 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 518 if (!timeout) { 519 device_printf(nic->dev, "Receive queue software sync failed\n"); 520 return (ETIMEDOUT); 521 } 522 return (0); 523 } 524 525 /* Get BGX Rx/Tx stats and respond to VF's request */ 526 static void 527 nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 528 { 529 int bgx_idx, lmac; 530 union nic_mbx mbx = {}; 531 532 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 533 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 534 535 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 536 mbx.bgx_stats.vf_id = bgx->vf_id; 537 mbx.bgx_stats.rx = bgx->rx; 538 mbx.bgx_stats.idx = bgx->idx; 539 if (bgx->rx != 0) { 540 mbx.bgx_stats.stats = 541 bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 542 } else { 543 mbx.bgx_stats.stats = 544 bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 545 } 546 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 547 } 548 549 /* Update hardware min/max frame size */ 550 static int 551 nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 552 { 553 554 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 555 device_printf(nic->dev, 556 "Invalid MTU setting from VF%d rejected, " 557 "should be between %d and %d\n", 558 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 559 return (EINVAL); 560 } 561 new_frs += ETHER_HDR_LEN; 562 if (new_frs <= nic->pkind.maxlen) 563 return (0); 564 565 nic->pkind.maxlen = new_frs; 566 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 567 return (0); 568 } 569 570 /* Set minimum transmit packet size */ 571 static void 572 nic_set_tx_pkt_pad(struct nicpf *nic, int size) 573 { 574 int lmac; 575 uint64_t lmac_cfg; 576 577 /* Max value that can be set is 60 */ 578 if (size > 60) 579 size = 60; 580 581 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 582 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 583 lmac_cfg &= ~(0xF << 2); 584 lmac_cfg |= ((size / 4) << 2); 585 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 586 } 587 } 588 589 /* 590 * Function to check number of LMACs present and set VF::LMAC mapping. 591 * Mapping will be used while initializing channels. 592 */ 593 static void 594 nic_set_lmac_vf_mapping(struct nicpf *nic) 595 { 596 unsigned bgx_map = bgx_get_map(nic->node); 597 int bgx, next_bgx_lmac = 0; 598 int lmac, lmac_cnt = 0; 599 uint64_t lmac_credit; 600 601 nic->num_vf_en = 0; 602 if (nic->flags & NIC_TNS_ENABLED) { 603 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 604 return; 605 } 606 607 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 608 if ((bgx_map & (1 << bgx)) == 0) 609 continue; 610 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 611 for (lmac = 0; lmac < lmac_cnt; lmac++) 612 nic->vf_lmac_map[next_bgx_lmac++] = 613 NIC_SET_VF_LMAC_MAP(bgx, lmac); 614 nic->num_vf_en += lmac_cnt; 615 616 /* Program LMAC credits */ 617 lmac_credit = (1UL << 1); /* channel credit enable */ 618 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 619 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 620 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 621 NIC_HW_MAX_FRS) / 16) << 12); 622 lmac = bgx * MAX_LMAC_PER_BGX; 623 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 624 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 625 lmac_credit); 626 } 627 } 628 } 629 630 #define TNS_PORT0_BLOCK 6 631 #define TNS_PORT1_BLOCK 7 632 #define BGX0_BLOCK 8 633 #define BGX1_BLOCK 9 634 635 static void 636 nic_init_hw(struct nicpf *nic) 637 { 638 int i; 639 640 /* Enable NIC HW block */ 641 nic_reg_write(nic, NIC_PF_CFG, 0x3); 642 643 /* Enable backpressure */ 644 nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 645 646 if (nic->flags & NIC_TNS_ENABLED) { 647 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 648 (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 649 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 650 (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 651 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 652 (1UL << 63) | TNS_PORT0_BLOCK); 653 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 654 (1UL << 63) | TNS_PORT1_BLOCK); 655 656 } else { 657 /* Disable TNS mode on both interfaces */ 658 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 659 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 660 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 661 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 662 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 663 (1UL << 63) | BGX0_BLOCK); 664 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 665 (1UL << 63) | BGX1_BLOCK); 666 } 667 668 /* PKIND configuration */ 669 nic->pkind.minlen = 0; 670 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 671 nic->pkind.lenerr_en = 1; 672 nic->pkind.rx_hdr = 0; 673 nic->pkind.hdr_sl = 0; 674 675 for (i = 0; i < NIC_MAX_PKIND; i++) { 676 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 677 *(uint64_t *)&nic->pkind); 678 } 679 680 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 681 682 /* Timer config */ 683 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 684 685 /* Enable VLAN ethertype matching and stripping */ 686 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 687 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 688 } 689 690 /* Channel parse index configuration */ 691 static void 692 nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 693 { 694 uint32_t vnic, bgx, lmac, chan; 695 uint32_t padd, cpi_count = 0; 696 uint64_t cpi_base, cpi, rssi_base, rssi; 697 uint8_t qset, rq_idx = 0; 698 699 vnic = cfg->vf_id; 700 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 701 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 702 703 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 704 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 705 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 706 707 /* Rx channel configuration */ 708 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 709 (1UL << 63) | (vnic << 0)); 710 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 711 ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 712 713 if (cfg->cpi_alg == CPI_ALG_NONE) 714 cpi_count = 1; 715 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 716 cpi_count = 8; 717 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 718 cpi_count = 16; 719 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 720 cpi_count = NIC_MAX_CPI_PER_LMAC; 721 722 /* RSS Qset, Qidx mapping */ 723 qset = cfg->vf_id; 724 rssi = rssi_base; 725 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 726 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 727 (qset << 3) | rq_idx); 728 rq_idx++; 729 } 730 731 rssi = 0; 732 cpi = cpi_base; 733 for (; cpi < (cpi_base + cpi_count); cpi++) { 734 /* Determine port to channel adder */ 735 if (cfg->cpi_alg != CPI_ALG_DIFF) 736 padd = cpi % cpi_count; 737 else 738 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 739 740 /* Leave RSS_SIZE as '0' to disable RSS */ 741 if (pass1_silicon(nic->dev)) { 742 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 743 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 744 } else { 745 /* Set MPI_ALG to '0' to disable MCAM parsing */ 746 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 747 (padd << 16)); 748 /* MPI index is same as CPI if MPI_ALG is not enabled */ 749 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 750 (vnic << 24) | (rssi_base + rssi)); 751 } 752 753 if ((rssi + 1) >= cfg->rq_cnt) 754 continue; 755 756 if (cfg->cpi_alg == CPI_ALG_VLAN) 757 rssi++; 758 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 759 rssi = ((cpi - cpi_base) & 0xe) >> 1; 760 else if (cfg->cpi_alg == CPI_ALG_DIFF) 761 rssi = ((cpi - cpi_base) & 0x38) >> 3; 762 } 763 nic->cpi_base[cfg->vf_id] = cpi_base; 764 nic->rssi_base[cfg->vf_id] = rssi_base; 765 } 766 767 /* Responsds to VF with its RSS indirection table size */ 768 static void 769 nic_send_rss_size(struct nicpf *nic, int vf) 770 { 771 union nic_mbx mbx = {}; 772 773 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 774 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 775 nic_send_msg_to_vf(nic, vf, &mbx); 776 } 777 778 /* 779 * Receive side scaling configuration 780 * configure: 781 * - RSS index 782 * - indir table i.e hash::RQ mapping 783 * - no of hash bits to consider 784 */ 785 static void 786 nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 787 { 788 uint8_t qset, idx; 789 uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 790 uint64_t idx_addr; 791 792 idx = 0; 793 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 794 795 rssi = rssi_base; 796 qset = cfg->vf_id; 797 798 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 799 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 800 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 801 idx++; 802 } 803 804 cpi_base = nic->cpi_base[cfg->vf_id]; 805 if (pass1_silicon(nic->dev)) 806 idx_addr = NIC_PF_CPI_0_2047_CFG; 807 else 808 idx_addr = NIC_PF_MPI_0_2047_CFG; 809 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 810 cpi_cfg &= ~(0xFUL << 20); 811 cpi_cfg |= (cfg->hash_bits << 20); 812 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 813 } 814 815 /* 816 * 4 level transmit side scheduler configutation 817 * for TNS bypass mode 818 * 819 * Sample configuration for SQ0 820 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 821 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 822 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 823 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 824 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 825 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 826 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 827 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 828 */ 829 static void 830 nic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 831 { 832 uint32_t bgx, lmac, chan; 833 uint32_t tl2, tl3, tl4; 834 uint32_t rr_quantum; 835 uint8_t sq_idx = sq->sq_num; 836 uint8_t pqs_vnic; 837 838 pqs_vnic = vnic; 839 840 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 841 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 842 843 /* 24 bytes for FCS, IPG and preamble */ 844 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 845 846 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 847 tl4 += sq_idx; 848 849 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 850 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 851 ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 852 ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 853 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 854 ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 855 856 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 857 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 858 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 859 /* Enable backpressure on the channel */ 860 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 861 862 tl2 = tl3 >> 2; 863 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 864 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 865 /* No priorities as of now */ 866 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 867 } 868 869 static int 870 nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 871 { 872 int bgx_idx, lmac_idx; 873 874 if (lbk->vf_id > MAX_LMAC) 875 return (ENXIO); 876 877 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 878 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 879 880 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 881 882 return (0); 883 } 884 885 /* Interrupt handler to handle mailbox messages from VFs */ 886 static void 887 nic_handle_mbx_intr(struct nicpf *nic, int vf) 888 { 889 union nic_mbx mbx = {}; 890 uint64_t *mbx_data; 891 uint64_t mbx_addr; 892 uint64_t reg_addr; 893 uint64_t cfg; 894 int bgx, lmac; 895 int i; 896 int ret = 0; 897 898 nic->mbx_lock[vf] = TRUE; 899 900 mbx_addr = nic_get_mbx_addr(vf); 901 mbx_data = (uint64_t *)&mbx; 902 903 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 904 *mbx_data = nic_reg_read(nic, mbx_addr); 905 mbx_data++; 906 mbx_addr += sizeof(uint64_t); 907 } 908 909 switch (mbx.msg.msg) { 910 case NIC_MBOX_MSG_READY: 911 nic_mbx_send_ready(nic, vf); 912 if (vf < MAX_LMAC) { 913 nic->link[vf] = 0; 914 nic->duplex[vf] = 0; 915 nic->speed[vf] = 0; 916 } 917 ret = 1; 918 break; 919 case NIC_MBOX_MSG_QS_CFG: 920 reg_addr = NIC_PF_QSET_0_127_CFG | 921 (mbx.qs.num << NIC_QS_ID_SHIFT); 922 cfg = mbx.qs.cfg; 923 nic_reg_write(nic, reg_addr, cfg); 924 break; 925 case NIC_MBOX_MSG_RQ_CFG: 926 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 927 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 928 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 929 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 930 break; 931 case NIC_MBOX_MSG_RQ_BP_CFG: 932 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 933 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 934 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 935 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 936 break; 937 case NIC_MBOX_MSG_RQ_SW_SYNC: 938 ret = nic_rcv_queue_sw_sync(nic); 939 break; 940 case NIC_MBOX_MSG_RQ_DROP_CFG: 941 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 942 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 943 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 944 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 945 break; 946 case NIC_MBOX_MSG_SQ_CFG: 947 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 948 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 949 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 950 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 951 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 952 break; 953 case NIC_MBOX_MSG_SET_MAC: 954 lmac = mbx.mac.vf_id; 955 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 956 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 957 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 958 break; 959 case NIC_MBOX_MSG_SET_MAX_FRS: 960 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 961 break; 962 case NIC_MBOX_MSG_CPI_CFG: 963 nic_config_cpi(nic, &mbx.cpi_cfg); 964 break; 965 case NIC_MBOX_MSG_RSS_SIZE: 966 nic_send_rss_size(nic, vf); 967 goto unlock; 968 case NIC_MBOX_MSG_RSS_CFG: 969 case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 970 nic_config_rss(nic, &mbx.rss_cfg); 971 break; 972 case NIC_MBOX_MSG_CFG_DONE: 973 /* Last message of VF config msg sequence */ 974 nic->vf_info[vf].vf_enabled = TRUE; 975 goto unlock; 976 case NIC_MBOX_MSG_SHUTDOWN: 977 /* First msg in VF teardown sequence */ 978 nic->vf_info[vf].vf_enabled = FALSE; 979 break; 980 case NIC_MBOX_MSG_BGX_STATS: 981 nic_get_bgx_stats(nic, &mbx.bgx_stats); 982 goto unlock; 983 case NIC_MBOX_MSG_LOOPBACK: 984 ret = nic_config_loopback(nic, &mbx.lbk); 985 break; 986 default: 987 device_printf(nic->dev, 988 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 989 break; 990 } 991 992 if (ret == 0) 993 nic_mbx_send_ack(nic, vf); 994 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 995 nic_mbx_send_nack(nic, vf); 996 unlock: 997 nic->mbx_lock[vf] = FALSE; 998 } 999 1000 static void 1001 nic_mbx_intr_handler(struct nicpf *nic, int mbx) 1002 { 1003 uint64_t intr; 1004 uint8_t vf, vf_per_mbx_reg = 64; 1005 1006 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1007 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1008 if (intr & (1UL << vf)) { 1009 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1010 nic_clear_mbx_intr(nic, vf, mbx); 1011 } 1012 } 1013 } 1014 1015 static void 1016 nic_mbx0_intr_handler (void *arg) 1017 { 1018 struct nicpf *nic = (struct nicpf *)arg; 1019 1020 nic_mbx_intr_handler(nic, 0); 1021 } 1022 1023 static void 1024 nic_mbx1_intr_handler (void *arg) 1025 { 1026 struct nicpf *nic = (struct nicpf *)arg; 1027 1028 nic_mbx_intr_handler(nic, 1); 1029 } 1030 1031 static int 1032 nic_enable_msix(struct nicpf *nic) 1033 { 1034 struct pci_devinfo *dinfo; 1035 int rid, count; 1036 int ret; 1037 1038 dinfo = device_get_ivars(nic->dev); 1039 rid = dinfo->cfg.msix.msix_table_bar; 1040 nic->msix_table_res = 1041 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1042 if (nic->msix_table_res == NULL) { 1043 device_printf(nic->dev, 1044 "Could not allocate memory for MSI-X table\n"); 1045 return (ENXIO); 1046 } 1047 1048 count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1049 1050 ret = pci_alloc_msix(nic->dev, &count); 1051 if ((ret != 0) || (count != nic->num_vec)) { 1052 device_printf(nic->dev, 1053 "Request for #%d msix vectors failed, error: %d\n", 1054 nic->num_vec, ret); 1055 return (ret); 1056 } 1057 1058 nic->msix_enabled = 1; 1059 return (0); 1060 } 1061 1062 static void 1063 nic_disable_msix(struct nicpf *nic) 1064 { 1065 if (nic->msix_enabled) { 1066 pci_release_msi(nic->dev); 1067 nic->msix_enabled = 0; 1068 nic->num_vec = 0; 1069 } 1070 1071 bus_release_resource(nic->dev, SYS_RES_MEMORY, 1072 rman_get_rid(nic->msix_table_res), nic->msix_table_res); 1073 } 1074 1075 static void 1076 nic_free_all_interrupts(struct nicpf *nic) 1077 { 1078 int irq; 1079 1080 for (irq = 0; irq < nic->num_vec; irq++) { 1081 if (nic->msix_entries[irq].irq_res == NULL) 1082 continue; 1083 if (nic->msix_entries[irq].handle != NULL) { 1084 bus_teardown_intr(nic->dev, 1085 nic->msix_entries[irq].irq_res, 1086 nic->msix_entries[irq].handle); 1087 } 1088 1089 bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1, 1090 nic->msix_entries[irq].irq_res); 1091 } 1092 } 1093 1094 static int 1095 nic_register_interrupts(struct nicpf *nic) 1096 { 1097 int irq, rid; 1098 int ret; 1099 1100 /* Enable MSI-X */ 1101 ret = nic_enable_msix(nic); 1102 if (ret != 0) 1103 return (ret); 1104 1105 /* Register mailbox interrupt handlers */ 1106 irq = NIC_PF_INTR_ID_MBOX0; 1107 rid = irq + 1; 1108 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1109 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1110 if (nic->msix_entries[irq].irq_res == NULL) { 1111 ret = ENXIO; 1112 goto fail; 1113 } 1114 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1115 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1116 &nic->msix_entries[irq].handle); 1117 if (ret != 0) 1118 goto fail; 1119 1120 irq = NIC_PF_INTR_ID_MBOX1; 1121 rid = irq + 1; 1122 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1123 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1124 if (nic->msix_entries[irq].irq_res == NULL) { 1125 ret = ENXIO; 1126 goto fail; 1127 } 1128 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1129 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1130 &nic->msix_entries[irq].handle); 1131 if (ret != 0) 1132 goto fail; 1133 1134 /* Enable mailbox interrupt */ 1135 nic_enable_mbx_intr(nic); 1136 return (0); 1137 1138 fail: 1139 nic_free_all_interrupts(nic); 1140 return (ret); 1141 } 1142 1143 static void 1144 nic_unregister_interrupts(struct nicpf *nic) 1145 { 1146 1147 nic_free_all_interrupts(nic); 1148 nic_disable_msix(nic); 1149 } 1150 1151 static int nic_sriov_init(device_t dev, struct nicpf *nic) 1152 { 1153 #ifdef PCI_IOV 1154 nvlist_t *pf_schema, *vf_schema; 1155 int iov_pos; 1156 int err; 1157 uint16_t total_vf_cnt; 1158 1159 err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1160 if (err != 0) { 1161 device_printf(dev, 1162 "SR-IOV capability is not found in PCIe config space\n"); 1163 return (err); 1164 } 1165 /* Fix-up the number of enabled VFs */ 1166 total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1167 if (total_vf_cnt == 0) 1168 return (ENXIO); 1169 1170 /* Attach SR-IOV */ 1171 pf_schema = pci_iov_schema_alloc_node(); 1172 vf_schema = pci_iov_schema_alloc_node(); 1173 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1174 /* 1175 * All VFs can change their MACs. 1176 * This flag will be ignored but we set it just for the record. 1177 */ 1178 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1179 IOV_SCHEMA_HASDEFAULT, TRUE); 1180 1181 err = pci_iov_attach(dev, pf_schema, vf_schema); 1182 if (err != 0) { 1183 device_printf(dev, 1184 "Failed to initialize SR-IOV (error=%d)\n", 1185 err); 1186 return (err); 1187 } 1188 #endif 1189 return (0); 1190 } 1191 1192 /* 1193 * Poll for BGX LMAC link status and update corresponding VF 1194 * if there is a change, valid only if internal L2 switch 1195 * is not present otherwise VF link is always treated as up 1196 */ 1197 static void 1198 nic_poll_for_link(void *arg) 1199 { 1200 union nic_mbx mbx = {}; 1201 struct nicpf *nic; 1202 struct bgx_link_status link; 1203 uint8_t vf, bgx, lmac; 1204 1205 nic = (struct nicpf *)arg; 1206 1207 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1208 1209 for (vf = 0; vf < nic->num_vf_en; vf++) { 1210 /* Poll only if VF is UP */ 1211 if (!nic->vf_info[vf].vf_enabled) 1212 continue; 1213 1214 /* Get BGX, LMAC indices for the VF */ 1215 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1216 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1217 /* Get interface link status */ 1218 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1219 1220 /* Inform VF only if link status changed */ 1221 if (nic->link[vf] == link.link_up) 1222 continue; 1223 1224 if (!nic->mbx_lock[vf]) { 1225 nic->link[vf] = link.link_up; 1226 nic->duplex[vf] = link.duplex; 1227 nic->speed[vf] = link.speed; 1228 1229 /* Send a mbox message to VF with current link status */ 1230 mbx.link_status.link_up = link.link_up; 1231 mbx.link_status.duplex = link.duplex; 1232 mbx.link_status.speed = link.speed; 1233 nic_send_msg_to_vf(nic, vf, &mbx); 1234 } 1235 } 1236 callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1237 } 1238