1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bitset.h> 36 #include <sys/bitstring.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/rman.h> 43 #include <sys/pciio.h> 44 #include <sys/pcpu.h> 45 #include <sys/proc.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/cpuset.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_media.h> 55 56 #include <machine/bus.h> 57 #include <machine/_inttypes.h> 58 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 62 #include <sys/dnv.h> 63 #include <sys/nv.h> 64 #ifdef PCI_IOV 65 #include <sys/iov_schema.h> 66 #include <dev/pci/pci_iov.h> 67 #endif 68 69 #include "thunder_bgx.h" 70 #include "nic_reg.h" 71 #include "nic.h" 72 #include "q_struct.h" 73 74 #define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 75 76 #define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 77 78 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 79 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 80 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 81 82 /* Structure to be used by the SR-IOV for VF configuration schemas */ 83 struct nicvf_info { 84 boolean_t vf_enabled; 85 int vf_flags; 86 }; 87 88 struct nicpf { 89 device_t dev; 90 uint8_t node; 91 u_int flags; 92 uint8_t num_vf_en; /* No of VF enabled */ 93 struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 94 struct resource * reg_base; /* Register start address */ 95 struct pkind_cfg pkind; 96 uint8_t vf_lmac_map[MAX_LMAC]; 97 boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 98 99 struct callout check_link; 100 struct mtx check_link_mtx; 101 102 uint8_t link[MAX_LMAC]; 103 uint8_t duplex[MAX_LMAC]; 104 uint32_t speed[MAX_LMAC]; 105 uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 106 uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 107 uint16_t rss_ind_tbl_size; 108 109 /* MSI-X */ 110 boolean_t msix_enabled; 111 uint8_t num_vec; 112 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 113 struct resource * msix_table_res; 114 }; 115 116 static int nicpf_probe(device_t); 117 static int nicpf_attach(device_t); 118 static int nicpf_detach(device_t); 119 120 #ifdef PCI_IOV 121 static int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 122 static void nicpf_iov_uninit(device_t); 123 static int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 124 #endif 125 126 static device_method_t nicpf_methods[] = { 127 /* Device interface */ 128 DEVMETHOD(device_probe, nicpf_probe), 129 DEVMETHOD(device_attach, nicpf_attach), 130 DEVMETHOD(device_detach, nicpf_detach), 131 /* PCI SR-IOV interface */ 132 #ifdef PCI_IOV 133 DEVMETHOD(pci_iov_init, nicpf_iov_init), 134 DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 135 DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 136 #endif 137 DEVMETHOD_END, 138 }; 139 140 static driver_t nicpf_driver = { 141 "vnicpf", 142 nicpf_methods, 143 sizeof(struct nicpf), 144 }; 145 146 static devclass_t nicpf_devclass; 147 148 DRIVER_MODULE(nicpf, pci, nicpf_driver, nicpf_devclass, 0, 0); 149 MODULE_DEPEND(nicpf, pci, 1, 1, 1); 150 MODULE_DEPEND(nicpf, ether, 1, 1, 1); 151 MODULE_DEPEND(nicpf, thunder_bgx, 1, 1, 1); 152 153 static int nicpf_alloc_res(struct nicpf *); 154 static void nicpf_free_res(struct nicpf *); 155 static void nic_set_lmac_vf_mapping(struct nicpf *); 156 static void nic_init_hw(struct nicpf *); 157 static int nic_sriov_init(device_t, struct nicpf *); 158 static void nic_poll_for_link(void *); 159 static int nic_register_interrupts(struct nicpf *); 160 static void nic_unregister_interrupts(struct nicpf *); 161 162 /* 163 * Device interface 164 */ 165 static int 166 nicpf_probe(device_t dev) 167 { 168 uint16_t vendor_id; 169 uint16_t device_id; 170 171 vendor_id = pci_get_vendor(dev); 172 device_id = pci_get_device(dev); 173 174 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 175 device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 176 device_set_desc(dev, VNIC_PF_DEVSTR); 177 return (BUS_PROBE_DEFAULT); 178 } 179 180 return (ENXIO); 181 } 182 183 static int 184 nicpf_attach(device_t dev) 185 { 186 struct nicpf *nic; 187 int err; 188 189 nic = device_get_softc(dev); 190 nic->dev = dev; 191 192 /* Enable bus mastering */ 193 pci_enable_busmaster(dev); 194 195 /* Allocate PCI resources */ 196 err = nicpf_alloc_res(nic); 197 if (err != 0) { 198 device_printf(dev, "Could not allocate PCI resources\n"); 199 return (err); 200 } 201 202 nic->node = nic_get_node_id(nic->reg_base); 203 204 /* Enable Traffic Network Switch (TNS) bypass mode by default */ 205 nic->flags &= ~NIC_TNS_ENABLED; 206 nic_set_lmac_vf_mapping(nic); 207 208 /* Initialize hardware */ 209 nic_init_hw(nic); 210 211 /* Set RSS TBL size for each VF */ 212 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 213 214 /* Setup interrupts */ 215 err = nic_register_interrupts(nic); 216 if (err != 0) 217 goto err_free_res; 218 219 /* Configure SRIOV */ 220 err = nic_sriov_init(dev, nic); 221 if (err != 0) 222 goto err_free_intr; 223 224 if (nic->flags & NIC_TNS_ENABLED) 225 return (0); 226 227 mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 228 /* Register physical link status poll callout */ 229 callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 230 mtx_lock(&nic->check_link_mtx); 231 nic_poll_for_link(nic); 232 mtx_unlock(&nic->check_link_mtx); 233 234 return (0); 235 236 err_free_intr: 237 nic_unregister_interrupts(nic); 238 err_free_res: 239 nicpf_free_res(nic); 240 pci_disable_busmaster(dev); 241 242 return (err); 243 } 244 245 static int 246 nicpf_detach(device_t dev) 247 { 248 struct nicpf *nic; 249 250 nic = device_get_softc(dev); 251 252 callout_drain(&nic->check_link); 253 mtx_destroy(&nic->check_link_mtx); 254 255 nic_unregister_interrupts(nic); 256 nicpf_free_res(nic); 257 pci_disable_busmaster(dev); 258 259 return (0); 260 } 261 262 /* 263 * SR-IOV interface 264 */ 265 #ifdef PCI_IOV 266 static int 267 nicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 268 { 269 struct nicpf *nic; 270 271 nic = device_get_softc(dev); 272 273 if (num_vfs == 0) 274 return (ENXIO); 275 276 nic->flags |= NIC_SRIOV_ENABLED; 277 278 return (0); 279 } 280 281 static void 282 nicpf_iov_uninit(device_t dev) 283 { 284 285 /* ARM64TODO: Implement this function */ 286 } 287 288 static int 289 nicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 290 { 291 const void *mac; 292 struct nicpf *nic; 293 size_t size; 294 int bgx, lmac; 295 296 nic = device_get_softc(dev); 297 298 if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 299 return (ENXIO); 300 301 if (vfnum > (nic->num_vf_en - 1)) 302 return (EINVAL); 303 304 if (nvlist_exists_binary(params, "mac-addr") != 0) { 305 mac = nvlist_get_binary(params, "mac-addr", &size); 306 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 307 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 308 bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 309 } 310 311 return (0); 312 } 313 #endif 314 315 /* 316 * Helper routines 317 */ 318 static int 319 nicpf_alloc_res(struct nicpf *nic) 320 { 321 device_t dev; 322 int rid; 323 324 dev = nic->dev; 325 326 rid = VNIC_PF_REG_RID; 327 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 328 RF_ACTIVE); 329 if (nic->reg_base == NULL) { 330 /* For verbose output print some more details */ 331 if (bootverbose) { 332 device_printf(dev, 333 "Could not allocate registers memory\n"); 334 } 335 return (ENXIO); 336 } 337 338 return (0); 339 } 340 341 static void 342 nicpf_free_res(struct nicpf *nic) 343 { 344 device_t dev; 345 346 dev = nic->dev; 347 348 if (nic->reg_base != NULL) { 349 bus_release_resource(dev, SYS_RES_MEMORY, 350 rman_get_rid(nic->reg_base), nic->reg_base); 351 } 352 } 353 354 /* Register read/write APIs */ 355 static __inline void 356 nic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 357 uint64_t val) 358 { 359 360 bus_write_8(nic->reg_base, offset, val); 361 } 362 363 static __inline uint64_t 364 nic_reg_read(struct nicpf *nic, uint64_t offset) 365 { 366 uint64_t val; 367 368 val = bus_read_8(nic->reg_base, offset); 369 return (val); 370 } 371 372 /* PF -> VF mailbox communication APIs */ 373 static void 374 nic_enable_mbx_intr(struct nicpf *nic) 375 { 376 377 /* Enable mailbox interrupt for all 128 VFs */ 378 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 379 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 380 } 381 382 static void 383 nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 384 { 385 386 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 387 } 388 389 static uint64_t 390 nic_get_mbx_addr(int vf) 391 { 392 393 return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 394 } 395 396 /* 397 * Send a mailbox message to VF 398 * @vf: vf to which this message to be sent 399 * @mbx: Message to be sent 400 */ 401 static void 402 nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 403 { 404 bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 405 uint64_t *msg = (uint64_t *)mbx; 406 407 /* 408 * In first revision HW, mbox interrupt is triggerred 409 * when PF writes to MBOX(1), in next revisions when 410 * PF writes to MBOX(0) 411 */ 412 if (pass1_silicon(nic->dev)) { 413 nic_reg_write(nic, mbx_addr + 0, msg[0]); 414 nic_reg_write(nic, mbx_addr + 8, msg[1]); 415 } else { 416 nic_reg_write(nic, mbx_addr + 8, msg[1]); 417 nic_reg_write(nic, mbx_addr + 0, msg[0]); 418 } 419 } 420 421 /* 422 * Responds to VF's READY message with VF's 423 * ID, node, MAC address e.t.c 424 * @vf: VF which sent READY message 425 */ 426 static void 427 nic_mbx_send_ready(struct nicpf *nic, int vf) 428 { 429 union nic_mbx mbx = {}; 430 int bgx_idx, lmac; 431 const char *mac; 432 433 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 434 mbx.nic_cfg.vf_id = vf; 435 436 if (nic->flags & NIC_TNS_ENABLED) 437 mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 438 else 439 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 440 441 if (vf < MAX_LMAC) { 442 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 443 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 444 445 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 446 if (mac) { 447 memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 448 ETHER_ADDR_LEN); 449 } 450 } 451 mbx.nic_cfg.node_id = nic->node; 452 453 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 454 455 nic_send_msg_to_vf(nic, vf, &mbx); 456 } 457 458 /* 459 * ACKs VF's mailbox message 460 * @vf: VF to which ACK to be sent 461 */ 462 static void 463 nic_mbx_send_ack(struct nicpf *nic, int vf) 464 { 465 union nic_mbx mbx = {}; 466 467 mbx.msg.msg = NIC_MBOX_MSG_ACK; 468 nic_send_msg_to_vf(nic, vf, &mbx); 469 } 470 471 /* 472 * NACKs VF's mailbox message that PF is not able to 473 * complete the action 474 * @vf: VF to which ACK to be sent 475 */ 476 static void 477 nic_mbx_send_nack(struct nicpf *nic, int vf) 478 { 479 union nic_mbx mbx = {}; 480 481 mbx.msg.msg = NIC_MBOX_MSG_NACK; 482 nic_send_msg_to_vf(nic, vf, &mbx); 483 } 484 485 /* 486 * Flush all in flight receive packets to memory and 487 * bring down an active RQ 488 */ 489 static int 490 nic_rcv_queue_sw_sync(struct nicpf *nic) 491 { 492 uint16_t timeout = ~0x00; 493 494 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 495 /* Wait till sync cycle is finished */ 496 while (timeout) { 497 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 498 break; 499 timeout--; 500 } 501 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 502 if (!timeout) { 503 device_printf(nic->dev, "Receive queue software sync failed\n"); 504 return (ETIMEDOUT); 505 } 506 return (0); 507 } 508 509 /* Get BGX Rx/Tx stats and respond to VF's request */ 510 static void 511 nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 512 { 513 int bgx_idx, lmac; 514 union nic_mbx mbx = {}; 515 516 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 517 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 518 519 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 520 mbx.bgx_stats.vf_id = bgx->vf_id; 521 mbx.bgx_stats.rx = bgx->rx; 522 mbx.bgx_stats.idx = bgx->idx; 523 if (bgx->rx != 0) { 524 mbx.bgx_stats.stats = 525 bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 526 } else { 527 mbx.bgx_stats.stats = 528 bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 529 } 530 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 531 } 532 533 /* Update hardware min/max frame size */ 534 static int 535 nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 536 { 537 538 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 539 device_printf(nic->dev, 540 "Invalid MTU setting from VF%d rejected, " 541 "should be between %d and %d\n", 542 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 543 return (EINVAL); 544 } 545 new_frs += ETHER_HDR_LEN; 546 if (new_frs <= nic->pkind.maxlen) 547 return (0); 548 549 nic->pkind.maxlen = new_frs; 550 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 551 return (0); 552 } 553 554 /* Set minimum transmit packet size */ 555 static void 556 nic_set_tx_pkt_pad(struct nicpf *nic, int size) 557 { 558 int lmac; 559 uint64_t lmac_cfg; 560 561 /* Max value that can be set is 60 */ 562 if (size > 60) 563 size = 60; 564 565 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 566 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 567 lmac_cfg &= ~(0xF << 2); 568 lmac_cfg |= ((size / 4) << 2); 569 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 570 } 571 } 572 573 /* 574 * Function to check number of LMACs present and set VF::LMAC mapping. 575 * Mapping will be used while initializing channels. 576 */ 577 static void 578 nic_set_lmac_vf_mapping(struct nicpf *nic) 579 { 580 unsigned bgx_map = bgx_get_map(nic->node); 581 int bgx, next_bgx_lmac = 0; 582 int lmac, lmac_cnt = 0; 583 uint64_t lmac_credit; 584 585 nic->num_vf_en = 0; 586 if (nic->flags & NIC_TNS_ENABLED) { 587 nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 588 return; 589 } 590 591 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 592 if ((bgx_map & (1 << bgx)) == 0) 593 continue; 594 lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 595 for (lmac = 0; lmac < lmac_cnt; lmac++) 596 nic->vf_lmac_map[next_bgx_lmac++] = 597 NIC_SET_VF_LMAC_MAP(bgx, lmac); 598 nic->num_vf_en += lmac_cnt; 599 600 /* Program LMAC credits */ 601 lmac_credit = (1UL << 1); /* channel credit enable */ 602 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 603 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 604 lmac_credit |= (((((48 * 1024) / lmac_cnt) - 605 NIC_HW_MAX_FRS) / 16) << 12); 606 lmac = bgx * MAX_LMAC_PER_BGX; 607 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 608 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 609 lmac_credit); 610 } 611 } 612 } 613 614 #define TNS_PORT0_BLOCK 6 615 #define TNS_PORT1_BLOCK 7 616 #define BGX0_BLOCK 8 617 #define BGX1_BLOCK 9 618 619 static void 620 nic_init_hw(struct nicpf *nic) 621 { 622 int i; 623 624 /* Enable NIC HW block */ 625 nic_reg_write(nic, NIC_PF_CFG, 0x3); 626 627 /* Enable backpressure */ 628 nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 629 630 if (nic->flags & NIC_TNS_ENABLED) { 631 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 632 (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 633 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 634 (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 635 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 636 (1UL << 63) | TNS_PORT0_BLOCK); 637 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 638 (1UL << 63) | TNS_PORT1_BLOCK); 639 640 } else { 641 /* Disable TNS mode on both interfaces */ 642 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 643 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 644 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 645 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 646 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 647 (1UL << 63) | BGX0_BLOCK); 648 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 649 (1UL << 63) | BGX1_BLOCK); 650 } 651 652 /* PKIND configuration */ 653 nic->pkind.minlen = 0; 654 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 655 nic->pkind.lenerr_en = 1; 656 nic->pkind.rx_hdr = 0; 657 nic->pkind.hdr_sl = 0; 658 659 for (i = 0; i < NIC_MAX_PKIND; i++) { 660 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 661 *(uint64_t *)&nic->pkind); 662 } 663 664 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 665 666 /* Timer config */ 667 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 668 669 /* Enable VLAN ethertype matching and stripping */ 670 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 671 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 672 } 673 674 /* Channel parse index configuration */ 675 static void 676 nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 677 { 678 uint32_t vnic, bgx, lmac, chan; 679 uint32_t padd, cpi_count = 0; 680 uint64_t cpi_base, cpi, rssi_base, rssi; 681 uint8_t qset, rq_idx = 0; 682 683 vnic = cfg->vf_id; 684 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 685 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 686 687 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 688 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 689 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 690 691 /* Rx channel configuration */ 692 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 693 (1UL << 63) | (vnic << 0)); 694 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 695 ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 696 697 if (cfg->cpi_alg == CPI_ALG_NONE) 698 cpi_count = 1; 699 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 700 cpi_count = 8; 701 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 702 cpi_count = 16; 703 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 704 cpi_count = NIC_MAX_CPI_PER_LMAC; 705 706 /* RSS Qset, Qidx mapping */ 707 qset = cfg->vf_id; 708 rssi = rssi_base; 709 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 710 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 711 (qset << 3) | rq_idx); 712 rq_idx++; 713 } 714 715 rssi = 0; 716 cpi = cpi_base; 717 for (; cpi < (cpi_base + cpi_count); cpi++) { 718 /* Determine port to channel adder */ 719 if (cfg->cpi_alg != CPI_ALG_DIFF) 720 padd = cpi % cpi_count; 721 else 722 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 723 724 /* Leave RSS_SIZE as '0' to disable RSS */ 725 if (pass1_silicon(nic->dev)) { 726 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 727 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 728 } else { 729 /* Set MPI_ALG to '0' to disable MCAM parsing */ 730 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 731 (padd << 16)); 732 /* MPI index is same as CPI if MPI_ALG is not enabled */ 733 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 734 (vnic << 24) | (rssi_base + rssi)); 735 } 736 737 if ((rssi + 1) >= cfg->rq_cnt) 738 continue; 739 740 if (cfg->cpi_alg == CPI_ALG_VLAN) 741 rssi++; 742 else if (cfg->cpi_alg == CPI_ALG_VLAN16) 743 rssi = ((cpi - cpi_base) & 0xe) >> 1; 744 else if (cfg->cpi_alg == CPI_ALG_DIFF) 745 rssi = ((cpi - cpi_base) & 0x38) >> 3; 746 } 747 nic->cpi_base[cfg->vf_id] = cpi_base; 748 nic->rssi_base[cfg->vf_id] = rssi_base; 749 } 750 751 /* Responsds to VF with its RSS indirection table size */ 752 static void 753 nic_send_rss_size(struct nicpf *nic, int vf) 754 { 755 union nic_mbx mbx = {}; 756 uint64_t *msg; 757 758 msg = (uint64_t *)&mbx; 759 760 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 761 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 762 nic_send_msg_to_vf(nic, vf, &mbx); 763 } 764 765 /* 766 * Receive side scaling configuration 767 * configure: 768 * - RSS index 769 * - indir table i.e hash::RQ mapping 770 * - no of hash bits to consider 771 */ 772 static void 773 nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 774 { 775 uint8_t qset, idx; 776 uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 777 uint64_t idx_addr; 778 779 idx = 0; 780 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 781 782 rssi = rssi_base; 783 qset = cfg->vf_id; 784 785 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 786 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 787 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 788 idx++; 789 } 790 791 cpi_base = nic->cpi_base[cfg->vf_id]; 792 if (pass1_silicon(nic->dev)) 793 idx_addr = NIC_PF_CPI_0_2047_CFG; 794 else 795 idx_addr = NIC_PF_MPI_0_2047_CFG; 796 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 797 cpi_cfg &= ~(0xFUL << 20); 798 cpi_cfg |= (cfg->hash_bits << 20); 799 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 800 } 801 802 /* 803 * 4 level transmit side scheduler configutation 804 * for TNS bypass mode 805 * 806 * Sample configuration for SQ0 807 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 808 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 809 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 810 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 811 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 812 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 813 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 814 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 815 */ 816 static void 817 nic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 818 { 819 uint32_t bgx, lmac, chan; 820 uint32_t tl2, tl3, tl4; 821 uint32_t rr_quantum; 822 uint8_t sq_idx = sq->sq_num; 823 uint8_t pqs_vnic; 824 825 pqs_vnic = vnic; 826 827 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 828 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 829 830 /* 24 bytes for FCS, IPG and preamble */ 831 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 832 833 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 834 tl4 += sq_idx; 835 836 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 837 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 838 ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 839 ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 840 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 841 ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 842 843 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 844 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 845 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 846 /* Enable backpressure on the channel */ 847 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 848 849 tl2 = tl3 >> 2; 850 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 851 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 852 /* No priorities as of now */ 853 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 854 } 855 856 static int 857 nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 858 { 859 int bgx_idx, lmac_idx; 860 861 if (lbk->vf_id > MAX_LMAC) 862 return (ENXIO); 863 864 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 865 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 866 867 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 868 869 return (0); 870 } 871 872 /* Interrupt handler to handle mailbox messages from VFs */ 873 static void 874 nic_handle_mbx_intr(struct nicpf *nic, int vf) 875 { 876 union nic_mbx mbx = {}; 877 uint64_t *mbx_data; 878 uint64_t mbx_addr; 879 uint64_t reg_addr; 880 uint64_t cfg; 881 int bgx, lmac; 882 int i; 883 int ret = 0; 884 885 nic->mbx_lock[vf] = TRUE; 886 887 mbx_addr = nic_get_mbx_addr(vf); 888 mbx_data = (uint64_t *)&mbx; 889 890 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 891 *mbx_data = nic_reg_read(nic, mbx_addr); 892 mbx_data++; 893 mbx_addr += sizeof(uint64_t); 894 } 895 896 switch (mbx.msg.msg) { 897 case NIC_MBOX_MSG_READY: 898 nic_mbx_send_ready(nic, vf); 899 if (vf < MAX_LMAC) { 900 nic->link[vf] = 0; 901 nic->duplex[vf] = 0; 902 nic->speed[vf] = 0; 903 } 904 ret = 1; 905 break; 906 case NIC_MBOX_MSG_QS_CFG: 907 reg_addr = NIC_PF_QSET_0_127_CFG | 908 (mbx.qs.num << NIC_QS_ID_SHIFT); 909 cfg = mbx.qs.cfg; 910 nic_reg_write(nic, reg_addr, cfg); 911 break; 912 case NIC_MBOX_MSG_RQ_CFG: 913 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 914 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 915 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 916 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 917 break; 918 case NIC_MBOX_MSG_RQ_BP_CFG: 919 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 920 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 921 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 922 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 923 break; 924 case NIC_MBOX_MSG_RQ_SW_SYNC: 925 ret = nic_rcv_queue_sw_sync(nic); 926 break; 927 case NIC_MBOX_MSG_RQ_DROP_CFG: 928 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 929 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 930 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 931 nic_reg_write(nic, reg_addr, mbx.rq.cfg); 932 break; 933 case NIC_MBOX_MSG_SQ_CFG: 934 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 935 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 936 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 937 nic_reg_write(nic, reg_addr, mbx.sq.cfg); 938 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 939 break; 940 case NIC_MBOX_MSG_SET_MAC: 941 lmac = mbx.mac.vf_id; 942 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 943 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 944 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 945 break; 946 case NIC_MBOX_MSG_SET_MAX_FRS: 947 ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 948 break; 949 case NIC_MBOX_MSG_CPI_CFG: 950 nic_config_cpi(nic, &mbx.cpi_cfg); 951 break; 952 case NIC_MBOX_MSG_RSS_SIZE: 953 nic_send_rss_size(nic, vf); 954 goto unlock; 955 case NIC_MBOX_MSG_RSS_CFG: 956 case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 957 nic_config_rss(nic, &mbx.rss_cfg); 958 break; 959 case NIC_MBOX_MSG_CFG_DONE: 960 /* Last message of VF config msg sequence */ 961 nic->vf_info[vf].vf_enabled = TRUE; 962 goto unlock; 963 case NIC_MBOX_MSG_SHUTDOWN: 964 /* First msg in VF teardown sequence */ 965 nic->vf_info[vf].vf_enabled = FALSE; 966 break; 967 case NIC_MBOX_MSG_BGX_STATS: 968 nic_get_bgx_stats(nic, &mbx.bgx_stats); 969 goto unlock; 970 case NIC_MBOX_MSG_LOOPBACK: 971 ret = nic_config_loopback(nic, &mbx.lbk); 972 break; 973 default: 974 device_printf(nic->dev, 975 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 976 break; 977 } 978 979 if (ret == 0) 980 nic_mbx_send_ack(nic, vf); 981 else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 982 nic_mbx_send_nack(nic, vf); 983 unlock: 984 nic->mbx_lock[vf] = FALSE; 985 } 986 987 static void 988 nic_mbx_intr_handler(struct nicpf *nic, int mbx) 989 { 990 uint64_t intr; 991 uint8_t vf, vf_per_mbx_reg = 64; 992 993 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 994 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 995 if (intr & (1UL << vf)) { 996 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 997 nic_clear_mbx_intr(nic, vf, mbx); 998 } 999 } 1000 } 1001 1002 static void 1003 nic_mbx0_intr_handler (void *arg) 1004 { 1005 struct nicpf *nic = (struct nicpf *)arg; 1006 1007 nic_mbx_intr_handler(nic, 0); 1008 } 1009 1010 static void 1011 nic_mbx1_intr_handler (void *arg) 1012 { 1013 struct nicpf *nic = (struct nicpf *)arg; 1014 1015 nic_mbx_intr_handler(nic, 1); 1016 } 1017 1018 static int 1019 nic_enable_msix(struct nicpf *nic) 1020 { 1021 struct pci_devinfo *dinfo; 1022 int rid, count; 1023 int ret; 1024 1025 dinfo = device_get_ivars(nic->dev); 1026 rid = dinfo->cfg.msix.msix_table_bar; 1027 nic->msix_table_res = 1028 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1029 if (nic->msix_table_res == NULL) { 1030 device_printf(nic->dev, 1031 "Could not allocate memory for MSI-X table\n"); 1032 return (ENXIO); 1033 } 1034 1035 count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1036 1037 ret = pci_alloc_msix(nic->dev, &count); 1038 if ((ret != 0) || (count != nic->num_vec)) { 1039 device_printf(nic->dev, 1040 "Request for #%d msix vectors failed, error: %d\n", 1041 nic->num_vec, ret); 1042 return (ret); 1043 } 1044 1045 nic->msix_enabled = 1; 1046 return (0); 1047 } 1048 1049 static void 1050 nic_disable_msix(struct nicpf *nic) 1051 { 1052 if (nic->msix_enabled) { 1053 pci_release_msi(nic->dev); 1054 nic->msix_enabled = 0; 1055 nic->num_vec = 0; 1056 } 1057 } 1058 1059 static void 1060 nic_free_all_interrupts(struct nicpf *nic) 1061 { 1062 int irq; 1063 1064 for (irq = 0; irq < nic->num_vec; irq++) { 1065 if (nic->msix_entries[irq].irq_res == NULL) 1066 continue; 1067 if (nic->msix_entries[irq].handle != NULL) { 1068 bus_teardown_intr(nic->dev, 1069 nic->msix_entries[irq].irq_res, 1070 nic->msix_entries[irq].handle); 1071 } 1072 1073 bus_release_resource(nic->dev, SYS_RES_IRQ, irq, 1074 nic->msix_entries[irq].irq_res); 1075 } 1076 } 1077 1078 static int 1079 nic_register_interrupts(struct nicpf *nic) 1080 { 1081 int irq, rid; 1082 int ret; 1083 1084 /* Enable MSI-X */ 1085 ret = nic_enable_msix(nic); 1086 if (ret != 0) 1087 return (ret); 1088 1089 /* Register mailbox interrupt handlers */ 1090 irq = NIC_PF_INTR_ID_MBOX0; 1091 rid = irq + 1; 1092 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1093 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1094 if (nic->msix_entries[irq].irq_res == NULL) { 1095 ret = ENXIO; 1096 goto fail; 1097 } 1098 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1099 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1100 &nic->msix_entries[irq].handle); 1101 if (ret != 0) 1102 goto fail; 1103 1104 irq = NIC_PF_INTR_ID_MBOX1; 1105 rid = irq + 1; 1106 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1107 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1108 if (nic->msix_entries[irq].irq_res == NULL) { 1109 ret = ENXIO; 1110 goto fail; 1111 } 1112 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1113 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1114 &nic->msix_entries[irq].handle); 1115 if (ret != 0) 1116 goto fail; 1117 1118 /* Enable mailbox interrupt */ 1119 nic_enable_mbx_intr(nic); 1120 return (0); 1121 1122 fail: 1123 nic_free_all_interrupts(nic); 1124 return (ret); 1125 } 1126 1127 static void 1128 nic_unregister_interrupts(struct nicpf *nic) 1129 { 1130 1131 nic_free_all_interrupts(nic); 1132 nic_disable_msix(nic); 1133 } 1134 1135 static int nic_sriov_init(device_t dev, struct nicpf *nic) 1136 { 1137 #ifdef PCI_IOV 1138 nvlist_t *pf_schema, *vf_schema; 1139 int iov_pos; 1140 int err; 1141 uint16_t total_vf_cnt; 1142 1143 err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1144 if (err != 0) { 1145 device_printf(dev, 1146 "SR-IOV capability is not found in PCIe config space\n"); 1147 return (err); 1148 } 1149 /* Fix-up the number of enabled VFs */ 1150 total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1151 if (total_vf_cnt == 0) 1152 return (ENXIO); 1153 1154 /* Attach SR-IOV */ 1155 pf_schema = pci_iov_schema_alloc_node(); 1156 vf_schema = pci_iov_schema_alloc_node(); 1157 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1158 /* 1159 * All VFs can change their MACs. 1160 * This flag will be ignored but we set it just for the record. 1161 */ 1162 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1163 IOV_SCHEMA_HASDEFAULT, TRUE); 1164 1165 err = pci_iov_attach(dev, pf_schema, vf_schema); 1166 if (err != 0) { 1167 device_printf(dev, 1168 "Failed to initialize SR-IOV (error=%d)\n", 1169 err); 1170 return (err); 1171 } 1172 #endif 1173 return (0); 1174 } 1175 1176 /* 1177 * Poll for BGX LMAC link status and update corresponding VF 1178 * if there is a change, valid only if internal L2 switch 1179 * is not present otherwise VF link is always treated as up 1180 */ 1181 static void 1182 nic_poll_for_link(void *arg) 1183 { 1184 union nic_mbx mbx = {}; 1185 struct nicpf *nic; 1186 struct bgx_link_status link; 1187 uint8_t vf, bgx, lmac; 1188 1189 nic = (struct nicpf *)arg; 1190 1191 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1192 1193 for (vf = 0; vf < nic->num_vf_en; vf++) { 1194 /* Poll only if VF is UP */ 1195 if (!nic->vf_info[vf].vf_enabled) 1196 continue; 1197 1198 /* Get BGX, LMAC indices for the VF */ 1199 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1200 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1201 /* Get interface link status */ 1202 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1203 1204 /* Inform VF only if link status changed */ 1205 if (nic->link[vf] == link.link_up) 1206 continue; 1207 1208 if (!nic->mbx_lock[vf]) { 1209 nic->link[vf] = link.link_up; 1210 nic->duplex[vf] = link.duplex; 1211 nic->speed[vf] = link.speed; 1212 1213 /* Send a mbox message to VF with current link status */ 1214 mbx.link_status.link_up = link.link_up; 1215 mbx.link_status.duplex = link.duplex; 1216 mbx.link_status.speed = link.speed; 1217 nic_send_msg_to_vf(nic, vf, &mbx); 1218 } 1219 } 1220 callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1221 } 1222