1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Marvell/Cavium ThunderX vnic/bgx network controller 30 * 31 * UNIMPLEMENTED FEATURES 32 * ---------------------- 33 * A number of features supported by the hardware are not yet implemented in 34 * this driver: 35 * 36 * - PR223573 multicast rx filter 37 * - PR223575 non-promiscuous mode (driver currently forces promisc) 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bitset.h> 43 #include <sys/bitstring.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/module.h> 49 #include <sys/rman.h> 50 #include <sys/pciio.h> 51 #include <sys/pcpu.h> 52 #include <sys/proc.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/cpuset.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 59 #include <net/ethernet.h> 60 #include <net/if.h> 61 #include <net/if_media.h> 62 63 #include <machine/bus.h> 64 #include <machine/_inttypes.h> 65 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 69 #include <sys/dnv.h> 70 #include <sys/nv.h> 71 #ifdef PCI_IOV 72 #include <sys/iov_schema.h> 73 #include <dev/pci/pci_iov.h> 74 #endif 75 76 #include "thunder_bgx.h" 77 #include "nic_reg.h" 78 #include "nic.h" 79 #include "q_struct.h" 80 81 #define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 82 83 #define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 84 85 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 86 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 87 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 88 89 /* Structure to be used by the SR-IOV for VF configuration schemas */ 90 struct nicvf_info { 91 boolean_t vf_enabled; 92 int vf_flags; 93 }; 94 95 struct nicpf { 96 device_t dev; 97 uint8_t node; 98 u_int flags; 99 uint8_t num_vf_en; /* No of VF enabled */ 100 struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 101 struct resource * reg_base; /* Register start address */ 102 struct pkind_cfg pkind; 103 uint8_t vf_lmac_map[MAX_LMAC]; 104 boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 105 106 struct callout check_link; 107 struct mtx check_link_mtx; 108 109 uint8_t link[MAX_LMAC]; 110 uint8_t duplex[MAX_LMAC]; 111 uint32_t speed[MAX_LMAC]; 112 uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 113 uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 114 uint16_t rss_ind_tbl_size; 115 116 /* MSI-X */ 117 boolean_t msix_enabled; 118 uint8_t num_vec; 119 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 120 struct resource * msix_table_res; 121 }; 122 123 static int nicpf_probe(device_t); 124 static int nicpf_attach(device_t); 125 static int nicpf_detach(device_t); 126 127 #ifdef PCI_IOV 128 static int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 129 static void nicpf_iov_uninit(device_t); 130 static int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 131 #endif 132 133 static device_method_t nicpf_methods[] = { 134 /* Device interface */ 135 DEVMETHOD(device_probe, nicpf_probe), 136 DEVMETHOD(device_attach, nicpf_attach), 137 DEVMETHOD(device_detach, nicpf_detach), 138 /* PCI SR-IOV interface */ 139 #ifdef PCI_IOV 140 DEVMETHOD(pci_iov_init, nicpf_iov_init), 141 DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 142 DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 143 #endif 144 DEVMETHOD_END, 145 }; 146 147 static driver_t vnicpf_driver = { 148 "vnicpf", 149 nicpf_methods, 150 sizeof(struct nicpf), 151 }; 152 153 DRIVER_MODULE(vnicpf, pci, vnicpf_driver, 0, 0); 154 MODULE_VERSION(vnicpf, 1); 155 MODULE_DEPEND(vnicpf, pci, 1, 1, 1); 156 MODULE_DEPEND(vnicpf, ether, 1, 1, 1); 157 MODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1); 158 159 static int nicpf_alloc_res(struct nicpf *); 160 static void nicpf_free_res(struct nicpf *); 161 static void nic_set_lmac_vf_mapping(struct nicpf *); 162 static void nic_init_hw(struct nicpf *); 163 static int nic_sriov_init(device_t, struct nicpf *); 164 static void nic_poll_for_link(void *); 165 static int nic_register_interrupts(struct nicpf *); 166 static void nic_unregister_interrupts(struct nicpf *); 167 168 /* 169 * Device interface 170 */ 171 static int 172 nicpf_probe(device_t dev) 173 { 174 uint16_t vendor_id; 175 uint16_t device_id; 176 177 vendor_id = pci_get_vendor(dev); 178 device_id = pci_get_device(dev); 179 180 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 181 device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 182 device_set_desc(dev, VNIC_PF_DEVSTR); 183 return (BUS_PROBE_DEFAULT); 184 } 185 186 return (ENXIO); 187 } 188 189 static int 190 nicpf_attach(device_t dev) 191 { 192 struct nicpf *nic; 193 int err; 194 195 nic = device_get_softc(dev); 196 nic->dev = dev; 197 198 /* Enable bus mastering */ 199 pci_enable_busmaster(dev); 200 201 /* Allocate PCI resources */ 202 err = nicpf_alloc_res(nic); 203 if (err != 0) { 204 device_printf(dev, "Could not allocate PCI resources\n"); 205 return (err); 206 } 207 208 nic->node = nic_get_node_id(nic->reg_base); 209 210 /* Enable Traffic Network Switch (TNS) bypass mode by default */ 211 nic->flags &= ~NIC_TNS_ENABLED; 212 nic_set_lmac_vf_mapping(nic); 213 214 /* Initialize hardware */ 215 nic_init_hw(nic); 216 217 /* Set RSS TBL size for each VF */ 218 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 219 220 /* Setup interrupts */ 221 err = nic_register_interrupts(nic); 222 if (err != 0) 223 goto err_free_res; 224 225 /* Configure SRIOV */ 226 err = nic_sriov_init(dev, nic); 227 if (err != 0) 228 goto err_free_intr; 229 230 if (nic->flags & NIC_TNS_ENABLED) 231 return (0); 232 233 mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 234 /* Register physical link status poll callout */ 235 callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 236 mtx_lock(&nic->check_link_mtx); 237 nic_poll_for_link(nic); 238 mtx_unlock(&nic->check_link_mtx); 239 240 return (0); 241 242 err_free_intr: 243 nic_unregister_interrupts(nic); 244 err_free_res: 245 nicpf_free_res(nic); 246 pci_disable_busmaster(dev); 247 248 return (err); 249 } 250 251 static int 252 nicpf_detach(device_t dev) 253 { 254 struct nicpf *nic; 255 int err; 256 257 err = 0; 258 nic = device_get_softc(dev); 259 260 callout_drain(&nic->check_link); 261 mtx_destroy(&nic->check_link_mtx); 262 263 nic_unregister_interrupts(nic); 264 nicpf_free_res(nic); 265 pci_disable_busmaster(dev); 266 267 #ifdef PCI_IOV 268 err = pci_iov_detach(dev); 269 if (err != 0) 270 device_printf(dev, "SR-IOV in use. Detach first.\n"); 271 #endif 272 return (err); 273 } 274 275 /* 276 * SR-IOV interface 277 */ 278 #ifdef PCI_IOV 279 static int 280 nicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 281 { 282 struct nicpf *nic; 283 284 nic = device_get_softc(dev); 285 286 if (num_vfs == 0) 287 return (ENXIO); 288 289 nic->flags |= NIC_SRIOV_ENABLED; 290 291 return (0); 292 } 293 294 static void 295 nicpf_iov_uninit(device_t dev) 296 { 297 298 /* ARM64TODO: Implement this function */ 299 } 300 301 static int 302 nicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 303 { 304 const void *mac; 305 struct nicpf *nic; 306 size_t size; 307 int bgx, lmac; 308 309 nic = device_get_softc(dev); 310 311 if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 312 return (ENXIO); 313 314 if (vfnum > (nic->num_vf_en - 1)) 315 return (EINVAL); 316 317 if (nvlist_exists_binary(params, "mac-addr") != 0) { 318 mac = nvlist_get_binary(params, "mac-addr", &size); 319 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 320 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 321 bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 322 } 323 324 return (0); 325 } 326 #endif 327 328 /* 329 * Helper routines 330 */ 331 static int 332 nicpf_alloc_res(struct nicpf *nic) 333 { 334 device_t dev; 335 int rid; 336 337 dev = nic->dev; 338 339 rid = VNIC_PF_REG_RID; 340 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 341 RF_ACTIVE); 342 if (nic->reg_base == NULL) { 343 /* For verbose output print some more details */ 344 if (bootverbose) { 345 device_printf(dev, 346 "Could not allocate registers memory\n"); 347 } 348 return (ENXIO); 349 } 350 351 return (0); 352 } 353 354 static void 355 nicpf_free_res(struct nicpf *nic) 356 { 357 device_t dev; 358 359 dev = nic->dev; 360 361 if (nic->reg_base != NULL) { 362 bus_release_resource(dev, SYS_RES_MEMORY, 363 rman_get_rid(nic->reg_base), nic->reg_base); 364 } 365 } 366 367 /* Register read/write APIs */ 368 static __inline void 369 nic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 370 uint64_t val) 371 { 372 373 bus_write_8(nic->reg_base, offset, val); 374 } 375 376 static __inline uint64_t 377 nic_reg_read(struct nicpf *nic, uint64_t offset) 378 { 379 uint64_t val; 380 381 val = bus_read_8(nic->reg_base, offset); 382 return (val); 383 } 384 385 /* PF -> VF mailbox communication APIs */ 386 static void 387 nic_enable_mbx_intr(struct nicpf *nic) 388 { 389 390 /* Enable mailbox interrupt for all 128 VFs */ 391 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 392 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 393 } 394 395 static void 396 nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 397 { 398 399 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 400 } 401 402 static uint64_t 403 nic_get_mbx_addr(int vf) 404 { 405 406 return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 407 } 408 409 /* 410 * Send a mailbox message to VF 411 * @vf: vf to which this message to be sent 412 * @mbx: Message to be sent 413 */ 414 static void 415 nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 416 { 417 bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 418 uint64_t *msg = (uint64_t *)mbx; 419 420 /* 421 * In first revision HW, mbox interrupt is triggerred 422 * when PF writes to MBOX(1), in next revisions when 423 * PF writes to MBOX(0) 424 */ 425 if (pass1_silicon(nic->dev)) { 426 nic_reg_write(nic, mbx_addr + 0, msg[0]); 427 nic_reg_write(nic, mbx_addr + 8, msg[1]); 428 } else { 429 nic_reg_write(nic, mbx_addr + 8, msg[1]); 430 nic_reg_write(nic, mbx_addr + 0, msg[0]); 431 } 432 } 433 434 /* 435 * Responds to VF's READY message with VF's 436 * ID, node, MAC address e.t.c 437 * @vf: VF which sent READY message 438 */ 439 static void 440 nic_mbx_send_ready(struct nicpf *nic, int vf) 441 { 442 union nic_mbx mbx = {}; 443 int bgx_idx, lmac; 444 const char *mac; 445 446 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 447 mbx.nic_cfg.vf_id = vf; 448 449 if (nic->flags & NIC_TNS_ENABLED) 450 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 451 else 452 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 453 454 if (vf < MAX_LMAC) { 455 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 456 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 457 458 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 459 if (mac) { 460 memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 461 ETHER_ADDR_LEN); 462 } 463 } 464 mbx.nic_cfg.node_id = nic->node; 465 466 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 467 468 nic_send_msg_to_vf(nic, vf, &mbx); 469 } 470 471 /* 472 * ACKs VF's mailbox message 473 * @vf: VF to which ACK to be sent 474 */ 475 static void 476 nic_mbx_send_ack(struct nicpf *nic, int vf) 477 { 478 union nic_mbx mbx = {}; 479 480 mbx.msg.msg = NIC_MBOX_MSG_ACK; 481 nic_send_msg_to_vf(nic, vf, &mbx); 482 } 483 484 /* 485 * NACKs VF's mailbox message that PF is not able to 486 * complete the action 487 * @vf: VF to which ACK to be sent 488 */ 489 static void 490 nic_mbx_send_nack(struct nicpf *nic, int vf) 491 { 492 union nic_mbx mbx = {}; 493 494 mbx.msg.msg = NIC_MBOX_MSG_NACK; 495 nic_send_msg_to_vf(nic, vf, &mbx); 496 } 497 498 /* 499 * Flush all in flight receive packets to memory and 500 * bring down an active RQ 501 */ 502 static int 503 nic_rcv_queue_sw_sync(struct nicpf *nic) 504 { 505 uint16_t timeout = ~0x00; 506 507 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 508 /* Wait till sync cycle is finished */ 509 while (timeout) { 510 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 511 break; 512 timeout--; 513 } 514 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 515 if (!timeout) { 516 device_printf(nic->dev, "Receive queue software sync failed\n"); 517 return (ETIMEDOUT); 518 } 519 return (0); 520 } 521 522 /* Get BGX Rx/Tx stats and respond to VF's request */ 523 static void 524 nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 525 { 526 int bgx_idx, lmac; 527 union nic_mbx mbx = {}; 528 529 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 530 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 531 532 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 533 mbx.bgx_stats.vf_id = bgx->vf_id; 534 mbx.bgx_stats.rx = bgx->rx; 535 mbx.bgx_stats.idx = bgx->idx; 536 if (bgx->rx != 0) { 537 mbx.bgx_stats.stats = 538 bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 539 } else { 540 mbx.bgx_stats.stats = 541 bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 542 } 543 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 544 } 545 546 /* Update hardware min/max frame size */ 547 static int 548 nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 549 { 550 551 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 552 device_printf(nic->dev, 553 "Invalid MTU setting from VF%d rejected, " 554 "should be between %d and %d\n", 555 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 556 return (EINVAL); 557 } 558 new_frs += ETHER_HDR_LEN; 559 if (new_frs <= nic->pkind.maxlen) 560 return (0); 561 562 nic->pkind.maxlen = new_frs; 563 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 564 return (0); 565 } 566 567 /* Set minimum transmit packet size */ 568 static void 569 nic_set_tx_pkt_pad(struct nicpf *nic, int size) 570 { 571 int lmac; 572 uint64_t lmac_cfg; 573 574 /* Max value that can be set is 60 */ 575 if (size > 60) 576 size = 60; 577 578 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 579 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 580 lmac_cfg &= ~(0xF << 2); 581 lmac_cfg |= ((size / 4) << 2); 582 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 583 } 584 } 585 586 /* 587 * Function to check number of LMACs present and set VF::LMAC mapping. 588 * Mapping will be used while initializing channels. 589 */ 590 static void 591 nic_set_lmac_vf_mapping(struct nicpf *nic) 592 { 593 unsigned bgx_map = bgx_get_map(nic->node); 594 int bgx, next_bgx_lmac = 0; 595 int lmac, lmac_cnt = 0; 596 uint64_t lmac_credit; 597 598 nic->num_vf_en = 0; 599 if (nic->flags & NIC_TNS_ENABLED) { 600 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 601 return; 602 } 603 604 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 605 if ((bgx_map & (1 << bgx)) == 0) 606 continue; 607 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 608 for (lmac = 0; lmac < lmac_cnt; lmac++) 609 nic->vf_lmac_map[next_bgx_lmac++] = 610 NIC_SET_VF_LMAC_MAP(bgx, lmac); 611 nic->num_vf_en += lmac_cnt; 612 613 /* Program LMAC credits */ 614 lmac_credit = (1UL << 1); /* channel credit enable */ 615 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 616 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 617 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 618 NIC_HW_MAX_FRS) / 16) << 12); 619 lmac = bgx * MAX_LMAC_PER_BGX; 620 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 621 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 622 lmac_credit); 623 } 624 } 625 } 626 627 #define TNS_PORT0_BLOCK 6 628 #define TNS_PORT1_BLOCK 7 629 #define BGX0_BLOCK 8 630 #define BGX1_BLOCK 9 631 632 static void 633 nic_init_hw(struct nicpf *nic) 634 { 635 int i; 636 637 /* Enable NIC HW block */ 638 nic_reg_write(nic, NIC_PF_CFG, 0x3); 639 640 /* Enable backpressure */ 641 nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 642 643 if (nic->flags & NIC_TNS_ENABLED) { 644 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 645 (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 646 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 647 (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 648 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 649 (1UL << 63) | TNS_PORT0_BLOCK); 650 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 651 (1UL << 63) | TNS_PORT1_BLOCK); 652 653 } else { 654 /* Disable TNS mode on both interfaces */ 655 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 656 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 657 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 658 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 659 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 660 (1UL << 63) | BGX0_BLOCK); 661 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 662 (1UL << 63) | BGX1_BLOCK); 663 } 664 665 /* PKIND configuration */ 666 nic->pkind.minlen = 0; 667 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 668 nic->pkind.lenerr_en = 1; 669 nic->pkind.rx_hdr = 0; 670 nic->pkind.hdr_sl = 0; 671 672 for (i = 0; i < NIC_MAX_PKIND; i++) { 673 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 674 *(uint64_t *)&nic->pkind); 675 } 676 677 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 678 679 /* Timer config */ 680 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 681 682 /* Enable VLAN ethertype matching and stripping */ 683 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 684 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 685 } 686 687 /* Channel parse index configuration */ 688 static void 689 nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 690 { 691 uint32_t vnic, bgx, lmac, chan; 692 uint32_t padd, cpi_count = 0; 693 uint64_t cpi_base, cpi, rssi_base, rssi; 694 uint8_t qset, rq_idx = 0; 695 696 vnic = cfg->vf_id; 697 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 698 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 699 700 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 701 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 702 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 703 704 /* Rx channel configuration */ 705 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 706 (1UL << 63) | (vnic << 0)); 707 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 708 ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 709 710 if (cfg->cpi_alg == CPI_ALG_NONE) 711 cpi_count = 1; 712 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 713 cpi_count = 8; 714 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 715 cpi_count = 16; 716 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 717 cpi_count = NIC_MAX_CPI_PER_LMAC; 718 719 /* RSS Qset, Qidx mapping */ 720 qset = cfg->vf_id; 721 rssi = rssi_base; 722 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 723 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 724 (qset << 3) | rq_idx); 725 rq_idx++; 726 } 727 728 rssi = 0; 729 cpi = cpi_base; 730 for (; cpi < (cpi_base + cpi_count); cpi++) { 731 /* Determine port to channel adder */ 732 if (cfg->cpi_alg != CPI_ALG_DIFF) 733 padd = cpi % cpi_count; 734 else 735 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 736 737 /* Leave RSS_SIZE as '0' to disable RSS */ 738 if (pass1_silicon(nic->dev)) { 739 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 740 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 741 } else { 742 /* Set MPI_ALG to '0' to disable MCAM parsing */ 743 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 744 (padd << 16)); 745 /* MPI index is same as CPI if MPI_ALG is not enabled */ 746 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 747 (vnic << 24) | (rssi_base + rssi)); 748 } 749 750 if ((rssi + 1) >= cfg->rq_cnt) 751 continue; 752 753 if (cfg->cpi_alg == CPI_ALG_VLAN) 754 rssi++; 755 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 756 rssi = ((cpi - cpi_base) & 0xe) >> 1; 757 else if (cfg->cpi_alg == CPI_ALG_DIFF) 758 rssi = ((cpi - cpi_base) & 0x38) >> 3; 759 } 760 nic->cpi_base[cfg->vf_id] = cpi_base; 761 nic->rssi_base[cfg->vf_id] = rssi_base; 762 } 763 764 /* Responsds to VF with its RSS indirection table size */ 765 static void 766 nic_send_rss_size(struct nicpf *nic, int vf) 767 { 768 union nic_mbx mbx = {}; 769 770 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 771 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 772 nic_send_msg_to_vf(nic, vf, &mbx); 773 } 774 775 /* 776 * Receive side scaling configuration 777 * configure: 778 * - RSS index 779 * - indir table i.e hash::RQ mapping 780 * - no of hash bits to consider 781 */ 782 static void 783 nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 784 { 785 uint8_t qset, idx; 786 uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 787 uint64_t idx_addr; 788 789 idx = 0; 790 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 791 792 rssi = rssi_base; 793 qset = cfg->vf_id; 794 795 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 796 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 797 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 798 idx++; 799 } 800 801 cpi_base = nic->cpi_base[cfg->vf_id]; 802 if (pass1_silicon(nic->dev)) 803 idx_addr = NIC_PF_CPI_0_2047_CFG; 804 else 805 idx_addr = NIC_PF_MPI_0_2047_CFG; 806 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 807 cpi_cfg &= ~(0xFUL << 20); 808 cpi_cfg |= (cfg->hash_bits << 20); 809 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 810 } 811 812 /* 813 * 4 level transmit side scheduler configutation 814 * for TNS bypass mode 815 * 816 * Sample configuration for SQ0 817 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 818 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 819 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 820 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 821 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 822 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 823 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 824 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 825 */ 826 static void 827 nic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 828 { 829 uint32_t bgx, lmac, chan; 830 uint32_t tl2, tl3, tl4; 831 uint32_t rr_quantum; 832 uint8_t sq_idx = sq->sq_num; 833 uint8_t pqs_vnic; 834 835 pqs_vnic = vnic; 836 837 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 838 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 839 840 /* 24 bytes for FCS, IPG and preamble */ 841 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 842 843 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 844 tl4 += sq_idx; 845 846 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 847 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 848 ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 849 ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 850 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 851 ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 852 853 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 854 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 855 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 856 /* Enable backpressure on the channel */ 857 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 858 859 tl2 = tl3 >> 2; 860 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 861 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 862 /* No priorities as of now */ 863 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 864 } 865 866 static int 867 nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 868 { 869 int bgx_idx, lmac_idx; 870 871 if (lbk->vf_id > MAX_LMAC) 872 return (ENXIO); 873 874 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 875 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 876 877 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 878 879 return (0); 880 } 881 882 /* Interrupt handler to handle mailbox messages from VFs */ 883 static void 884 nic_handle_mbx_intr(struct nicpf *nic, int vf) 885 { 886 union nic_mbx mbx = {}; 887 uint64_t *mbx_data; 888 uint64_t mbx_addr; 889 uint64_t reg_addr; 890 uint64_t cfg; 891 int bgx, lmac; 892 int i; 893 int ret = 0; 894 895 nic->mbx_lock[vf] = TRUE; 896 897 mbx_addr = nic_get_mbx_addr(vf); 898 mbx_data = (uint64_t *)&mbx; 899 900 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 901 *mbx_data = nic_reg_read(nic, mbx_addr); 902 mbx_data++; 903 mbx_addr += sizeof(uint64_t); 904 } 905 906 switch (mbx.msg.msg) { 907 case NIC_MBOX_MSG_READY: 908 nic_mbx_send_ready(nic, vf); 909 if (vf < MAX_LMAC) { 910 nic->link[vf] = 0; 911 nic->duplex[vf] = 0; 912 nic->speed[vf] = 0; 913 } 914 ret = 1; 915 break; 916 case NIC_MBOX_MSG_QS_CFG: 917 reg_addr = NIC_PF_QSET_0_127_CFG | 918 (mbx.qs.num << NIC_QS_ID_SHIFT); 919 cfg = mbx.qs.cfg; 920 nic_reg_write(nic, reg_addr, cfg); 921 break; 922 case NIC_MBOX_MSG_RQ_CFG: 923 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 924 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 925 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 926 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 927 break; 928 case NIC_MBOX_MSG_RQ_BP_CFG: 929 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 930 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 931 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 932 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 933 break; 934 case NIC_MBOX_MSG_RQ_SW_SYNC: 935 ret = nic_rcv_queue_sw_sync(nic); 936 break; 937 case NIC_MBOX_MSG_RQ_DROP_CFG: 938 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 939 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 940 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 941 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 942 break; 943 case NIC_MBOX_MSG_SQ_CFG: 944 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 945 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 946 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 947 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 948 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 949 break; 950 case NIC_MBOX_MSG_SET_MAC: 951 lmac = mbx.mac.vf_id; 952 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 953 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 954 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 955 break; 956 case NIC_MBOX_MSG_SET_MAX_FRS: 957 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 958 break; 959 case NIC_MBOX_MSG_CPI_CFG: 960 nic_config_cpi(nic, &mbx.cpi_cfg); 961 break; 962 case NIC_MBOX_MSG_RSS_SIZE: 963 nic_send_rss_size(nic, vf); 964 goto unlock; 965 case NIC_MBOX_MSG_RSS_CFG: 966 case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 967 nic_config_rss(nic, &mbx.rss_cfg); 968 break; 969 case NIC_MBOX_MSG_CFG_DONE: 970 /* Last message of VF config msg sequence */ 971 nic->vf_info[vf].vf_enabled = TRUE; 972 goto unlock; 973 case NIC_MBOX_MSG_SHUTDOWN: 974 /* First msg in VF teardown sequence */ 975 nic->vf_info[vf].vf_enabled = FALSE; 976 break; 977 case NIC_MBOX_MSG_BGX_STATS: 978 nic_get_bgx_stats(nic, &mbx.bgx_stats); 979 goto unlock; 980 case NIC_MBOX_MSG_LOOPBACK: 981 ret = nic_config_loopback(nic, &mbx.lbk); 982 break; 983 default: 984 device_printf(nic->dev, 985 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 986 break; 987 } 988 989 if (ret == 0) 990 nic_mbx_send_ack(nic, vf); 991 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 992 nic_mbx_send_nack(nic, vf); 993 unlock: 994 nic->mbx_lock[vf] = FALSE; 995 } 996 997 static void 998 nic_mbx_intr_handler(struct nicpf *nic, int mbx) 999 { 1000 uint64_t intr; 1001 uint8_t vf, vf_per_mbx_reg = 64; 1002 1003 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1004 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1005 if (intr & (1UL << vf)) { 1006 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1007 nic_clear_mbx_intr(nic, vf, mbx); 1008 } 1009 } 1010 } 1011 1012 static void 1013 nic_mbx0_intr_handler (void *arg) 1014 { 1015 struct nicpf *nic = (struct nicpf *)arg; 1016 1017 nic_mbx_intr_handler(nic, 0); 1018 } 1019 1020 static void 1021 nic_mbx1_intr_handler (void *arg) 1022 { 1023 struct nicpf *nic = (struct nicpf *)arg; 1024 1025 nic_mbx_intr_handler(nic, 1); 1026 } 1027 1028 static int 1029 nic_enable_msix(struct nicpf *nic) 1030 { 1031 struct pci_devinfo *dinfo; 1032 int rid, count; 1033 int ret; 1034 1035 dinfo = device_get_ivars(nic->dev); 1036 rid = dinfo->cfg.msix.msix_table_bar; 1037 nic->msix_table_res = 1038 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1039 if (nic->msix_table_res == NULL) { 1040 device_printf(nic->dev, 1041 "Could not allocate memory for MSI-X table\n"); 1042 return (ENXIO); 1043 } 1044 1045 count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1046 1047 ret = pci_alloc_msix(nic->dev, &count); 1048 if ((ret != 0) || (count != nic->num_vec)) { 1049 device_printf(nic->dev, 1050 "Request for #%d msix vectors failed, error: %d\n", 1051 nic->num_vec, ret); 1052 return (ret); 1053 } 1054 1055 nic->msix_enabled = 1; 1056 return (0); 1057 } 1058 1059 static void 1060 nic_disable_msix(struct nicpf *nic) 1061 { 1062 if (nic->msix_enabled) { 1063 pci_release_msi(nic->dev); 1064 nic->msix_enabled = 0; 1065 nic->num_vec = 0; 1066 } 1067 1068 bus_release_resource(nic->dev, SYS_RES_MEMORY, 1069 rman_get_rid(nic->msix_table_res), nic->msix_table_res); 1070 } 1071 1072 static void 1073 nic_free_all_interrupts(struct nicpf *nic) 1074 { 1075 int irq; 1076 1077 for (irq = 0; irq < nic->num_vec; irq++) { 1078 if (nic->msix_entries[irq].irq_res == NULL) 1079 continue; 1080 if (nic->msix_entries[irq].handle != NULL) { 1081 bus_teardown_intr(nic->dev, 1082 nic->msix_entries[irq].irq_res, 1083 nic->msix_entries[irq].handle); 1084 } 1085 1086 bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1, 1087 nic->msix_entries[irq].irq_res); 1088 } 1089 } 1090 1091 static int 1092 nic_register_interrupts(struct nicpf *nic) 1093 { 1094 int irq, rid; 1095 int ret; 1096 1097 /* Enable MSI-X */ 1098 ret = nic_enable_msix(nic); 1099 if (ret != 0) 1100 return (ret); 1101 1102 /* Register mailbox interrupt handlers */ 1103 irq = NIC_PF_INTR_ID_MBOX0; 1104 rid = irq + 1; 1105 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1106 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1107 if (nic->msix_entries[irq].irq_res == NULL) { 1108 ret = ENXIO; 1109 goto fail; 1110 } 1111 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1112 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1113 &nic->msix_entries[irq].handle); 1114 if (ret != 0) 1115 goto fail; 1116 1117 irq = NIC_PF_INTR_ID_MBOX1; 1118 rid = irq + 1; 1119 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1120 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1121 if (nic->msix_entries[irq].irq_res == NULL) { 1122 ret = ENXIO; 1123 goto fail; 1124 } 1125 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1126 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1127 &nic->msix_entries[irq].handle); 1128 if (ret != 0) 1129 goto fail; 1130 1131 /* Enable mailbox interrupt */ 1132 nic_enable_mbx_intr(nic); 1133 return (0); 1134 1135 fail: 1136 nic_free_all_interrupts(nic); 1137 return (ret); 1138 } 1139 1140 static void 1141 nic_unregister_interrupts(struct nicpf *nic) 1142 { 1143 1144 nic_free_all_interrupts(nic); 1145 nic_disable_msix(nic); 1146 } 1147 1148 static int nic_sriov_init(device_t dev, struct nicpf *nic) 1149 { 1150 #ifdef PCI_IOV 1151 nvlist_t *pf_schema, *vf_schema; 1152 int iov_pos; 1153 int err; 1154 uint16_t total_vf_cnt; 1155 1156 err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1157 if (err != 0) { 1158 device_printf(dev, 1159 "SR-IOV capability is not found in PCIe config space\n"); 1160 return (err); 1161 } 1162 /* Fix-up the number of enabled VFs */ 1163 total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1164 if (total_vf_cnt == 0) 1165 return (ENXIO); 1166 1167 /* Attach SR-IOV */ 1168 pf_schema = pci_iov_schema_alloc_node(); 1169 vf_schema = pci_iov_schema_alloc_node(); 1170 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1171 /* 1172 * All VFs can change their MACs. 1173 * This flag will be ignored but we set it just for the record. 1174 */ 1175 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1176 IOV_SCHEMA_HASDEFAULT, TRUE); 1177 1178 err = pci_iov_attach(dev, pf_schema, vf_schema); 1179 if (err != 0) { 1180 device_printf(dev, 1181 "Failed to initialize SR-IOV (error=%d)\n", 1182 err); 1183 return (err); 1184 } 1185 #endif 1186 return (0); 1187 } 1188 1189 /* 1190 * Poll for BGX LMAC link status and update corresponding VF 1191 * if there is a change, valid only if internal L2 switch 1192 * is not present otherwise VF link is always treated as up 1193 */ 1194 static void 1195 nic_poll_for_link(void *arg) 1196 { 1197 union nic_mbx mbx = {}; 1198 struct nicpf *nic; 1199 struct bgx_link_status link; 1200 uint8_t vf, bgx, lmac; 1201 1202 nic = (struct nicpf *)arg; 1203 1204 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1205 1206 for (vf = 0; vf < nic->num_vf_en; vf++) { 1207 /* Poll only if VF is UP */ 1208 if (!nic->vf_info[vf].vf_enabled) 1209 continue; 1210 1211 /* Get BGX, LMAC indices for the VF */ 1212 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1213 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1214 /* Get interface link status */ 1215 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1216 1217 /* Inform VF only if link status changed */ 1218 if (nic->link[vf] == link.link_up) 1219 continue; 1220 1221 if (!nic->mbx_lock[vf]) { 1222 nic->link[vf] = link.link_up; 1223 nic->duplex[vf] = link.duplex; 1224 nic->speed[vf] = link.speed; 1225 1226 /* Send a mbox message to VF with current link status */ 1227 mbx.link_status.link_up = link.link_up; 1228 mbx.link_status.duplex = link.duplex; 1229 mbx.link_status.speed = link.speed; 1230 nic_send_msg_to_vf(nic, vf, &mbx); 1231 } 1232 } 1233 callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1234 } 1235