Lines Matching +full:pcie +full:- +full:mirror

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
64 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
241 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
253 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
256 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
261 &bxe_queue_count, 0, "Multi-Queue queue count");
288 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
289 static int bxe_mrrs = -1;
291 &bxe_mrrs, 0, "PCIe maximum read request size");
298 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
756 * 1. "mirror" every bit in calc_crc32()
761 /* Mirror */ in calc_crc32()
763 shft = sizeof(crc32_result) * 8 - 1; in calc_crc32()
769 shft-- ; in calc_crc32()
772 /* temp[31-bit] = crc32_result[bit] */ in calc_crc32()
860 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
872 dma->paddr = 0; in bxe_dma_map_addr()
873 dma->nseg = 0; in bxe_dma_map_addr()
874 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); in bxe_dma_map_addr()
876 dma->paddr = segs->ds_addr; in bxe_dma_map_addr()
877 dma->nseg = nseg; in bxe_dma_map_addr()
897 if (dma->size > 0) { in bxe_dma_alloc()
899 (unsigned long)dma->size); in bxe_dma_alloc()
904 dma->sc = sc; in bxe_dma_alloc()
905 dma->size = size; in bxe_dma_alloc()
906 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); in bxe_dma_alloc()
908 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ in bxe_dma_alloc()
921 &dma->tag); /* returned dma tag */ in bxe_dma_alloc()
928 rc = bus_dmamem_alloc(dma->tag, in bxe_dma_alloc()
929 (void **)&dma->vaddr, in bxe_dma_alloc()
931 &dma->map); in bxe_dma_alloc()
934 bus_dma_tag_destroy(dma->tag); in bxe_dma_alloc()
939 rc = bus_dmamap_load(dma->tag, in bxe_dma_alloc()
940 dma->map, in bxe_dma_alloc()
941 dma->vaddr, in bxe_dma_alloc()
948 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); in bxe_dma_alloc()
949 bus_dma_tag_destroy(dma->tag); in bxe_dma_alloc()
961 if (dma->size > 0) { in bxe_dma_free()
962 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); in bxe_dma_free()
964 bus_dmamap_sync(dma->tag, dma->map, in bxe_dma_free()
966 bus_dmamap_unload(dma->tag, dma->map); in bxe_dma_free()
967 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); in bxe_dma_free()
968 bus_dma_tag_destroy(dma->tag); in bxe_dma_free()
984 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); in bxe_reg_wr_ind()
985 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); in bxe_reg_wr_ind()
986 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); in bxe_reg_wr_ind()
995 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); in bxe_reg_rd_ind()
996 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); in bxe_reg_rd_ind()
997 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); in bxe_reg_rd_ind()
1016 return (-1); in bxe_acquire_hw_lock()
1023 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); in bxe_acquire_hw_lock()
1031 return (-1); in bxe_acquire_hw_lock()
1046 return (-1); in bxe_acquire_hw_lock()
1062 return (-1); in bxe_release_hw_lock()
1069 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); in bxe_release_hw_lock()
1077 return (-1); in bxe_release_hw_lock()
1141 return (-1); in bxe_acquire_nvram_lock()
1177 return (-1); in bxe_release_nvram_lock()
1241 rc = -1; in bxe_nvram_read_dword()
1250 * converting to big-endian will do the work in bxe_nvram_read_dword()
1258 if (rc == -1) { in bxe_nvram_read_dword()
1280 return (-1); in bxe_nvram_read()
1283 if ((offset + buf_size) > sc->devinfo.flash_size) { in bxe_nvram_read()
1286 offset, buf_size, sc->devinfo.flash_size); in bxe_nvram_read()
1287 return (-1); in bxe_nvram_read()
1308 buf_size -= sizeof(uint32_t); in bxe_nvram_read()
1356 rc = -1; in bxe_nvram_write_dword()
1366 if (rc == -1) { in bxe_nvram_write_dword()
1388 if ((offset + buf_size) > sc->devinfo.flash_size) { in bxe_nvram_write1()
1391 offset, buf_size, sc->devinfo.flash_size); in bxe_nvram_write1()
1392 return (-1); in bxe_nvram_write1()
1445 return (-1); in bxe_nvram_write()
1452 if ((offset + buf_size) > sc->devinfo.flash_size) { in bxe_nvram_write()
1455 offset, buf_size, sc->devinfo.flash_size); in bxe_nvram_write()
1456 return (-1); in bxe_nvram_write()
1471 if (written_so_far == (buf_size - sizeof(uint32_t))) { in bxe_nvram_write()
1571 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, in bxe_prep_dmae_with_comp()
1575 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); in bxe_prep_dmae_with_comp()
1576 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); in bxe_prep_dmae_with_comp()
1577 dmae->comp_val = DMAE_COMP_VAL; in bxe_prep_dmae_with_comp()
1601 (sc->recovery_state != BXE_RECOVERY_DONE && in bxe_issue_dmae_with_comp()
1602 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { in bxe_issue_dmae_with_comp()
1604 *wb_comp, sc->recovery_state); in bxe_issue_dmae_with_comp()
1609 timeout--; in bxe_issue_dmae_with_comp()
1615 *wb_comp, sc->recovery_state); in bxe_issue_dmae_with_comp()
1635 if (!sc->dmae_ready) { in bxe_read_dmae()
1672 if (!sc->dmae_ready) { in bxe_write_dmae()
1715 len -= dmae_wr_max; in bxe_write_dmae_phys_len()
1730 cxt->ustorm_ag_context.cdu_usage = in bxe_set_ctx_validation()
1734 cxt->xstorm_ag_context.cdu_reserved = in bxe_set_ctx_validation()
1863 return (-1); in bxe_set_spio()
1873 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); in bxe_set_spio()
1880 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); in bxe_set_spio()
1887 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); in bxe_set_spio()
1919 return (-1); in bxe_gpio_read()
1947 return (-1); in bxe_gpio_write()
1958 "Set GPIO %d (shift %d) -> output low\n", in bxe_gpio_write()
1967 "Set GPIO %d (shift %d) -> output high\n", in bxe_gpio_write()
1976 "Set GPIO %d (shift %d) -> input\n", in bxe_gpio_write()
2011 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); in bxe_gpio_mult_write()
2017 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); in bxe_gpio_mult_write()
2023 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); in bxe_gpio_mult_write()
2032 return (-1); in bxe_gpio_mult_write()
2059 return (-1); in bxe_gpio_int_write()
2070 "Clear GPIO INT %d (shift %d) -> output low\n", in bxe_gpio_int_write()
2079 "Set GPIO INT %d (shift %d) -> output high\n", in bxe_gpio_int_write()
2151 seq = ++sc->fw_seq; in elink_cb_fw_command()
2173 /* Ruh-roh! */ in elink_cb_fw_command()
2271 * RCQ of the multi-queue/RSS connection being initialized.
2287 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2305 * Used for connection offload. Completes on the RCQ of the multi-queue
2319 struct eth_spe *next_spe = sc->spq_prod_bd; in bxe_sp_get_next()
2321 if (sc->spq_prod_bd == sc->spq_last_bd) { in bxe_sp_get_next()
2323 sc->spq_prod_bd = sc->spq; in bxe_sp_get_next()
2324 sc->spq_prod_idx = 0; in bxe_sp_get_next()
2326 sc->spq_prod_bd++; in bxe_sp_get_next()
2327 sc->spq_prod_idx++; in bxe_sp_get_next()
2347 sc->spq_prod_idx); in bxe_sp_prod_update()
2349 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, in bxe_sp_prod_update()
2354 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2377 * bxe_sp_post - place a single command on an SP ring
2407 if (!atomic_load_acq_long(&sc->eq_spq_left)) { in bxe_sp_post()
2410 return (-1); in bxe_sp_post()
2413 if (!atomic_load_acq_long(&sc->cq_spq_left)) { in bxe_sp_post()
2416 return (-1); in bxe_sp_post()
2423 spe->hdr.conn_and_cmd_data = in bxe_sp_post()
2432 spe->hdr.type = htole16(type); in bxe_sp_post()
2434 spe->data.update_data_addr.hi = htole32(data_hi); in bxe_sp_post()
2435 spe->data.update_data_addr.lo = htole32(data_lo); in bxe_sp_post()
2443 atomic_subtract_acq_long(&sc->eq_spq_left, 1); in bxe_sp_post()
2445 atomic_subtract_acq_long(&sc->cq_spq_left, 1); in bxe_sp_post()
2448 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); in bxe_sp_post()
2449 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", in bxe_sp_post()
2453 sc->spq_prod_idx, in bxe_sp_post()
2454 (uint32_t)U64_HI(sc->spq_dma.paddr), in bxe_sp_post()
2455 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), in bxe_sp_post()
2462 atomic_load_acq_long(&sc->cq_spq_left), in bxe_sp_post()
2463 atomic_load_acq_long(&sc->eq_spq_left)); in bxe_sp_post()
2472 * bxe_debug_print_ind_table - prints the indirection table configuration.
2504 while (t->bxe_name != NULL) { in bxe_probe()
2505 if ((vid == t->bxe_vid) && (did == t->bxe_did) && in bxe_probe()
2506 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && in bxe_probe()
2507 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { in bxe_probe()
2509 "%s (%c%d) BXE v:%s", t->bxe_name, in bxe_probe()
2526 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), in bxe_init_mutexes()
2527 "bxe%d_core_lock", sc->unit); in bxe_init_mutexes()
2528 sx_init(&sc->core_sx, sc->core_sx_name); in bxe_init_mutexes()
2530 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), in bxe_init_mutexes()
2531 "bxe%d_core_lock", sc->unit); in bxe_init_mutexes()
2532 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2535 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), in bxe_init_mutexes()
2536 "bxe%d_sp_lock", sc->unit); in bxe_init_mutexes()
2537 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2539 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), in bxe_init_mutexes()
2540 "bxe%d_dmae_lock", sc->unit); in bxe_init_mutexes()
2541 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2543 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), in bxe_init_mutexes()
2544 "bxe%d_phy_lock", sc->unit); in bxe_init_mutexes()
2545 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2547 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), in bxe_init_mutexes()
2548 "bxe%d_fwmb_lock", sc->unit); in bxe_init_mutexes()
2549 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2551 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), in bxe_init_mutexes()
2552 "bxe%d_print_lock", sc->unit); in bxe_init_mutexes()
2553 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2555 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), in bxe_init_mutexes()
2556 "bxe%d_stats_lock", sc->unit); in bxe_init_mutexes()
2557 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2559 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), in bxe_init_mutexes()
2560 "bxe%d_mcast_lock", sc->unit); in bxe_init_mutexes()
2561 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); in bxe_init_mutexes()
2568 sx_destroy(&sc->core_sx); in bxe_release_mutexes()
2570 if (mtx_initialized(&sc->core_mtx)) { in bxe_release_mutexes()
2571 mtx_destroy(&sc->core_mtx); in bxe_release_mutexes()
2575 if (mtx_initialized(&sc->sp_mtx)) { in bxe_release_mutexes()
2576 mtx_destroy(&sc->sp_mtx); in bxe_release_mutexes()
2579 if (mtx_initialized(&sc->dmae_mtx)) { in bxe_release_mutexes()
2580 mtx_destroy(&sc->dmae_mtx); in bxe_release_mutexes()
2583 if (mtx_initialized(&sc->port.phy_mtx)) { in bxe_release_mutexes()
2584 mtx_destroy(&sc->port.phy_mtx); in bxe_release_mutexes()
2587 if (mtx_initialized(&sc->fwmb_mtx)) { in bxe_release_mutexes()
2588 mtx_destroy(&sc->fwmb_mtx); in bxe_release_mutexes()
2591 if (mtx_initialized(&sc->print_mtx)) { in bxe_release_mutexes()
2592 mtx_destroy(&sc->print_mtx); in bxe_release_mutexes()
2595 if (mtx_initialized(&sc->stats_mtx)) { in bxe_release_mutexes()
2596 mtx_destroy(&sc->stats_mtx); in bxe_release_mutexes()
2599 if (mtx_initialized(&sc->mcast_mtx)) { in bxe_release_mutexes()
2600 mtx_destroy(&sc->mcast_mtx); in bxe_release_mutexes()
2607 if_t ifp = sc->ifp; in bxe_tx_disable()
2619 sc->fw_drv_pulse_wr_seq); in bxe_drv_pulse()
2630 prod = fp->tx_bd_prod; in bxe_tx_avail()
2631 cons = fp->tx_bd_cons; in bxe_tx_avail()
2635 return (int16_t)(sc->tx_ring_size) - used; in bxe_tx_avail()
2644 hw_cons = le16toh(*fp->tx_cons_sb); in bxe_tx_queue_has_work()
2645 return (hw_cons != fp->tx_pkt_cons); in bxe_tx_queue_has_work()
2651 /* expand this for multi-cos if ever supported */ in bxe_has_tx_work()
2661 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); in bxe_has_rx_work()
2664 return (fp->rx_cq_cons != rx_cq_cons_sb); in bxe_has_rx_work()
2672 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bxe_sp_event()
2673 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bxe_sp_event()
2678 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); in bxe_sp_event()
2692 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); in bxe_sp_event()
2713 command, fp->index); in bxe_sp_event()
2718 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { in bxe_sp_event()
2720 * q_obj->complete_cmd() failure means that this was in bxe_sp_event()
2723 * In this case we don't want to increase the sc->spq_left in bxe_sp_event()
2731 atomic_add_acq_long(&sc->cq_spq_left, 1); in bxe_sp_event()
2733 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", in bxe_sp_event()
2734 atomic_load_acq_long(&sc->cq_spq_left)); in bxe_sp_event()
2740 * the current aggregation queue as in-progress.
2754 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; in bxe_tpa_start()
2759 fp->index, queue, cons, prod); in bxe_tpa_start()
2765 fp->index, queue, max_agg_queues)); in bxe_tpa_start()
2767 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), in bxe_tpa_start()
2769 fp->index, queue)); in bxe_tpa_start()
2772 tmp_bd = tpa_info->bd; in bxe_tpa_start()
2780 fp->index, queue, cons, prod); in bxe_tpa_start()
2789 tpa_info->state = BXE_TPA_STATE_START; in bxe_tpa_start()
2790 tpa_info->placement_offset = cqe->placement_offset; in bxe_tpa_start()
2791 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); in bxe_tpa_start()
2792 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); in bxe_tpa_start()
2793 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); in bxe_tpa_start()
2795 fp->rx_tpa_queue_used |= (1 << queue); in bxe_tpa_start()
2802 index = (sc->max_rx_bufs != RX_BD_USABLE) ? in bxe_tpa_start()
2806 tpa_info->bd = fp->rx_mbuf_chain[cons]; in bxe_tpa_start()
2810 rx_buf = &fp->rx_mbuf_chain[cons]; in bxe_tpa_start()
2812 if (rx_buf->m_map != NULL) { in bxe_tpa_start()
2813 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, in bxe_tpa_start()
2815 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); in bxe_tpa_start()
2823 fp->rx_mbuf_chain[cons].m = NULL; in bxe_tpa_start()
2827 fp->rx_mbuf_chain[index] = tmp_bd; in bxe_tpa_start()
2830 rx_bd = &fp->rx_chain[index]; in bxe_tpa_start()
2831 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); in bxe_tpa_start()
2832 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); in bxe_tpa_start()
2856 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; in bxe_fill_frag_mbuf()
2860 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); in bxe_fill_frag_mbuf()
2869 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), in bxe_fill_frag_mbuf()
2870 tpa_info->len_on_bd, frag_size); in bxe_fill_frag_mbuf()
2884 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); in bxe_fill_frag_mbuf()
2894 fp->index, queue, i, j, sge_idx, frag_size, frag_len); in bxe_fill_frag_mbuf()
2896 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; in bxe_fill_frag_mbuf()
2906 m_frag->m_len = frag_len; in bxe_fill_frag_mbuf()
2910 fp->eth_q_stats.mbuf_alloc_sge--; in bxe_fill_frag_mbuf()
2913 m->m_pkthdr.len += frag_len; in bxe_fill_frag_mbuf()
2914 frag_size -= frag_len; in bxe_fill_frag_mbuf()
2919 fp->index, queue, frag_size); in bxe_fill_frag_mbuf()
2930 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; in bxe_clear_sge_mask_next_elems()
2933 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); in bxe_clear_sge_mask_next_elems()
2934 idx--; in bxe_clear_sge_mask_next_elems()
2943 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); in bxe_init_sge_ring_bit_mask()
2957 uint16_t last_max = fp->last_max_sge; in bxe_update_last_max_sge()
2960 fp->last_max_sge = idx; in bxe_update_last_max_sge()
2980 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bxe_update_sge_prod()
2981 RX_SGE(le16toh(cqe->sgl[i]))); in bxe_update_sge_prod()
2985 "fp[%02d] fp_cqe->sgl[%d] = %d\n", in bxe_update_sge_prod()
2986 fp->index, sge_len - 1, in bxe_update_sge_prod()
2987 le16toh(cqe->sgl[sge_len - 1])); in bxe_update_sge_prod()
2991 le16toh(cqe->sgl[sge_len - 1])); in bxe_update_sge_prod()
2993 last_max = RX_SGE(fp->last_max_sge); in bxe_update_sge_prod()
2995 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bxe_update_sge_prod()
3004 if (__predict_true(fp->sge_mask[i])) { in bxe_update_sge_prod()
3008 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bxe_update_sge_prod()
3013 fp->rx_sge_prod += delta; in bxe_update_sge_prod()
3014 /* clear page-end entries */ in bxe_update_sge_prod()
3019 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", in bxe_update_sge_prod()
3020 fp->index, fp->last_max_sge, fp->rx_sge_prod); in bxe_update_sge_prod()
3037 if_t ifp = sc->ifp; in bxe_tpa_stop()
3043 fp->index, queue, tpa_info->placement_offset, in bxe_tpa_stop()
3044 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); in bxe_tpa_stop()
3046 m = tpa_info->bd.m; in bxe_tpa_stop()
3052 fp->eth_q_stats.rx_soft_errors++; in bxe_tpa_stop()
3057 m_adj(m, tpa_info->placement_offset); in bxe_tpa_stop()
3058 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; in bxe_tpa_stop()
3061 fp->eth_q_stats.rx_ofld_frames_csum_ip++; in bxe_tpa_stop()
3062 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; in bxe_tpa_stop()
3063 m->m_pkthdr.csum_data = 0xffff; in bxe_tpa_stop()
3064 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | in bxe_tpa_stop()
3073 fp->eth_q_stats.rx_soft_errors++; in bxe_tpa_stop()
3076 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { in bxe_tpa_stop()
3077 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; in bxe_tpa_stop()
3078 m->m_flags |= M_VLANTAG; in bxe_tpa_stop()
3085 m->m_pkthdr.flowid = fp->index; in bxe_tpa_stop()
3089 fp->eth_q_stats.rx_tpa_pkts++; in bxe_tpa_stop()
3096 fp->eth_q_stats.mbuf_alloc_tpa--; in bxe_tpa_stop()
3100 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; in bxe_tpa_stop()
3101 fp->rx_tpa_queue_used &= ~(1 << queue); in bxe_tpa_stop()
3120 m->m_len = lenonbd; in bxe_service_rxsgl()
3122 frag_size = len - lenonbd; in bxe_service_rxsgl()
3126 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); in bxe_service_rxsgl()
3128 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; in bxe_service_rxsgl()
3130 m_frag->m_len = frag_len; in bxe_service_rxsgl()
3138 fp->eth_q_stats.mbuf_alloc_sge--; in bxe_service_rxsgl()
3143 frag_size -= frag_len; in bxe_service_rxsgl()
3146 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); in bxe_service_rxsgl()
3155 if_t ifp = sc->ifp; in bxe_rxeof()
3164 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); in bxe_rxeof()
3169 bd_cons = fp->rx_bd_cons; in bxe_rxeof()
3170 bd_prod = fp->rx_bd_prod; in bxe_rxeof()
3172 sw_cq_cons = fp->rx_cq_cons; in bxe_rxeof()
3173 sw_cq_prod = fp->rx_cq_prod; in bxe_rxeof()
3183 fp->index, hw_cq_cons, sw_cq_cons); in bxe_rxeof()
3198 cqe = &fp->rcq_chain[comp_ring_cons]; in bxe_rxeof()
3199 cqe_fp = &cqe->fast_path_cqe; in bxe_rxeof()
3200 cqe_fp_flags = cqe_fp->type_error_flags; in bxe_rxeof()
3207 fp->index, in bxe_rxeof()
3214 cqe_fp->status_flags, in bxe_rxeof()
3215 le32toh(cqe_fp->rss_hash_result), in bxe_rxeof()
3216 le16toh(cqe_fp->vlan_tag), in bxe_rxeof()
3217 le16toh(cqe_fp->pkt_len_or_gro_seg_len), in bxe_rxeof()
3218 le16toh(cqe_fp->len_on_bd)); in bxe_rxeof()
3226 rx_buf = &fp->rx_mbuf_chain[bd_cons]; in bxe_rxeof()
3234 bxe_tpa_start(sc, fp, cqe_fp->queue_index, in bxe_rxeof()
3243 queue = cqe->end_agg_cqe.queue_index; in bxe_rxeof()
3244 tpa_info = &fp->rx_tpa_info[queue]; in bxe_rxeof()
3247 fp->index, queue); in bxe_rxeof()
3249 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - in bxe_rxeof()
3250 tpa_info->len_on_bd); in bxe_rxeof()
3254 &cqe->end_agg_cqe, comp_ring_cons); in bxe_rxeof()
3256 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); in bxe_rxeof()
3267 fp->eth_q_stats.rx_soft_errors++; in bxe_rxeof()
3271 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); in bxe_rxeof()
3272 lenonbd = le16toh(cqe_fp->len_on_bd); in bxe_rxeof()
3273 pad = cqe_fp->placement_offset; in bxe_rxeof()
3275 m = rx_buf->m; in bxe_rxeof()
3279 bd_cons, fp->index); in bxe_rxeof()
3291 (sc->max_rx_bufs != RX_BD_USABLE) ? in bxe_rxeof()
3299 fp->index, rc); in bxe_rxeof()
3300 fp->eth_q_stats.rx_soft_errors++; in bxe_rxeof()
3302 if (sc->max_rx_bufs != RX_BD_USABLE) { in bxe_rxeof()
3304 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, in bxe_rxeof()
3313 fp->eth_q_stats.mbuf_alloc_rx--; in bxe_rxeof()
3317 m->m_pkthdr.len = m->m_len = len; in bxe_rxeof()
3320 fp->eth_q_stats.rx_bxe_service_rxsgl++; in bxe_rxeof()
3324 fp->eth_q_stats.rx_jumbo_sge_pkts++; in bxe_rxeof()
3326 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; in bxe_rxeof()
3333 m->m_pkthdr.csum_flags = 0; in bxe_rxeof()
3338 if (!(cqe->fast_path_cqe.status_flags & in bxe_rxeof()
3340 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; in bxe_rxeof()
3343 fp->eth_q_stats.rx_hw_csum_errors++; in bxe_rxeof()
3345 fp->eth_q_stats.rx_ofld_frames_csum_ip++; in bxe_rxeof()
3346 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; in bxe_rxeof()
3351 if (!(cqe->fast_path_cqe.status_flags & in bxe_rxeof()
3355 fp->eth_q_stats.rx_hw_csum_errors++; in bxe_rxeof()
3357 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; in bxe_rxeof()
3358 m->m_pkthdr.csum_data = 0xFFFF; in bxe_rxeof()
3359 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | in bxe_rxeof()
3366 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { in bxe_rxeof()
3367 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; in bxe_rxeof()
3368 m->m_flags |= M_VLANTAG; in bxe_rxeof()
3372 m->m_pkthdr.flowid = fp->index; in bxe_rxeof()
3397 if (rx_pkts == sc->rx_budget) { in bxe_rxeof()
3398 fp->eth_q_stats.rx_budget_reached++; in bxe_rxeof()
3403 fp->rx_bd_cons = bd_cons; in bxe_rxeof()
3404 fp->rx_bd_prod = bd_prod_fw; in bxe_rxeof()
3405 fp->rx_cq_cons = sw_cq_cons; in bxe_rxeof()
3406 fp->rx_cq_prod = sw_cq_prod; in bxe_rxeof()
3409 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); in bxe_rxeof()
3411 fp->eth_q_stats.rx_pkts += rx_pkts; in bxe_rxeof()
3412 fp->eth_q_stats.rx_calls++; in bxe_rxeof()
3424 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; in bxe_free_tx_pkt()
3426 uint16_t bd_idx = TX_BD(tx_buf->first_bd); in bxe_free_tx_pkt()
3430 /* unmap the mbuf from non-paged memory */ in bxe_free_tx_pkt()
3431 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); in bxe_free_tx_pkt()
3433 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; in bxe_free_tx_pkt()
3434 nbd = le16toh(tx_start_bd->nbd) - 1; in bxe_free_tx_pkt()
3436 new_cons = (tx_buf->first_bd + nbd); in bxe_free_tx_pkt()
3439 if (__predict_true(tx_buf->m != NULL)) { in bxe_free_tx_pkt()
3440 m_freem(tx_buf->m); in bxe_free_tx_pkt()
3441 fp->eth_q_stats.mbuf_alloc_tx--; in bxe_free_tx_pkt()
3443 fp->eth_q_stats.tx_chain_lost_mbuf++; in bxe_free_tx_pkt()
3446 tx_buf->m = NULL; in bxe_free_tx_pkt()
3447 tx_buf->first_bd = 0; in bxe_free_tx_pkt()
3459 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { in bxe_watchdog()
3464 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); in bxe_watchdog()
3469 &sc->sp_err_timeout_task, hz/10); in bxe_watchdog()
3471 return (-1); in bxe_watchdog()
3479 if_t ifp = sc->ifp; in bxe_txeof()
3485 bd_cons = fp->tx_bd_cons; in bxe_txeof()
3486 hw_cons = le16toh(*fp->tx_cons_sb); in bxe_txeof()
3487 sw_cons = fp->tx_pkt_cons; in bxe_txeof()
3494 fp->index, hw_cons, sw_cons, pkt_cons); in bxe_txeof()
3501 fp->tx_pkt_cons = sw_cons; in bxe_txeof()
3502 fp->tx_bd_cons = bd_cons; in bxe_txeof()
3506 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); in bxe_txeof()
3518 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { in bxe_txeof()
3520 fp->watchdog_timer = BXE_TX_TIMEOUT; in bxe_txeof()
3524 fp->watchdog_timer = 0; in bxe_txeof()
3536 for (i = 0; i < sc->num_queues; i++) { in bxe_drain_tx_queues()
3537 fp = &sc->fp[i]; in bxe_drain_tx_queues()
3554 count--; in bxe_drain_tx_queues()
3580 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); in bxe_del_all_macs()
3656 return (-1); in bxe_fill_accept_flags()
3684 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; in bxe_set_q_rx_mode()
3687 ramrod_param.pstate = &sc->sp_state; in bxe_set_q_rx_mode()
3693 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); in bxe_set_q_rx_mode()
3705 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, in bxe_set_q_rx_mode()
3721 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, in bxe_set_storm_rx_mode()
3731 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, in bxe_set_storm_rx_mode()
3743 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", in bxe_nic_load_no_mcp()
3748 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", in bxe_nic_load_no_mcp()
3767 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", in bxe_nic_unload_no_mcp()
3770 load_count[path][0]--; in bxe_nic_unload_no_mcp()
3771 load_count[path][1 + port]--; in bxe_nic_unload_no_mcp()
3772 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", in bxe_nic_unload_no_mcp()
3827 if (!sc->port.pmf) { in bxe_func_wait_started()
3835 * 2. Sync SP queue - this guarantees us that attention handling started in bxe_func_wait_started()
3839 * pending bit of transaction from STARTED-->TX_STOPPED, if we already in bxe_func_wait_started()
3841 * State will return to STARTED after completion of TX_STOPPED-->STARTED in bxe_func_wait_started()
3850 while (ecore_func_get_state(sc, &sc->func_obj) != in bxe_func_wait_started()
3851 ECORE_F_STATE_STARTED && tout--) { in bxe_func_wait_started()
3855 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { in bxe_func_wait_started()
3863 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); in bxe_func_wait_started()
3865 func_params.f_obj = &sc->func_obj; in bxe_func_wait_started()
3868 /* STARTED-->TX_STOPPED */ in bxe_func_wait_started()
3872 /* TX_STOPPED-->STARTED */ in bxe_func_wait_started()
3884 struct bxe_fastpath *fp = &sc->fp[index]; in bxe_stop_queue()
3888 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); in bxe_stop_queue()
3890 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; in bxe_stop_queue()
3927 while (tout--) { in bxe_wait_sp_comp()
3929 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { in bxe_wait_sp_comp()
3938 tmp = atomic_load_acq_long(&sc->sp_state); in bxe_wait_sp_comp()
3957 func_params.f_obj = &sc->func_obj; in bxe_func_stop()
3986 func_params.f_obj = &sc->func_obj; in bxe_reset_hw()
4026 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); in bxe_chip_cleanup()
4032 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); in bxe_chip_cleanup()
4050 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { in bxe_chip_cleanup()
4051 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); in bxe_chip_cleanup()
4057 rparam.mcast_obj = &sc->mcast_obj; in bxe_chip_cleanup()
4087 for (i = 0; i < sc->num_queues; i++) { in bxe_chip_cleanup()
4094 * If SP settings didn't get completed so far - something in bxe_chip_cleanup()
4156 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; in bxe_squeeze_objects()
4168 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, in bxe_squeeze_objects()
4177 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, in bxe_squeeze_objects()
4185 rparam.mcast_obj = &sc->mcast_obj; in bxe_squeeze_objects()
4219 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); in bxe_nic_unload()
4221 for (i = 0; i < sc->num_queues; i++) { in bxe_nic_unload()
4224 fp = &sc->fp[i]; in bxe_nic_unload()
4225 fp->watchdog_timer = 0; in bxe_nic_unload()
4239 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && in bxe_nic_unload()
4240 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { in bxe_nic_unload()
4251 sc->recovery_state = BXE_RECOVERY_DONE; in bxe_nic_unload()
4252 sc->is_leader = 0; in bxe_nic_unload()
4258 " state = 0x%x\n", sc->recovery_state, sc->state); in bxe_nic_unload()
4259 return (-1); in bxe_nic_unload()
4264 * did not completed successfully - all resourses are released. in bxe_nic_unload()
4266 if ((sc->state == BXE_STATE_CLOSED) || in bxe_nic_unload()
4267 (sc->state == BXE_STATE_ERROR)) { in bxe_nic_unload()
4271 sc->state = BXE_STATE_CLOSING_WAITING_HALT; in bxe_nic_unload()
4277 sc->rx_mode = BXE_RX_MODE_NONE; in bxe_nic_unload()
4280 if (IS_PF(sc) && !sc->grcdump_done) { in bxe_nic_unload()
4282 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bxe_nic_unload()
4300 if (!sc->grcdump_done) in bxe_nic_unload()
4336 sc->sp_state = 0; in bxe_nic_unload()
4338 sc->port.pmf = 0; in bxe_nic_unload()
4348 sc->state = BXE_STATE_CLOSED; in bxe_nic_unload()
4351 * Check if there are pending parity attentions. If there are - set in bxe_nic_unload()
4389 ifm = &sc->ifmedia; in bxe_ifmedia_update()
4392 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { in bxe_ifmedia_update()
4396 switch (IFM_SUBTYPE(ifm->ifm_media)) { in bxe_ifmedia_update()
4406 IFM_SUBTYPE(ifm->ifm_media)); in bxe_ifmedia_update()
4427 ifmr->ifm_status = IFM_AVALID; in bxe_ifmedia_status()
4430 ifmr->ifm_active = IFM_ETHER; in bxe_ifmedia_status()
4434 ifmr->ifm_active |= IFM_NONE; in bxe_ifmedia_status()
4437 __func__, sc->link_vars.link_up); in bxe_ifmedia_status()
4442 if (sc->link_vars.link_up) { in bxe_ifmedia_status()
4443 ifmr->ifm_status |= IFM_ACTIVE; in bxe_ifmedia_status()
4444 ifmr->ifm_active |= IFM_FDX; in bxe_ifmedia_status()
4446 ifmr->ifm_active |= IFM_NONE; in bxe_ifmedia_status()
4452 ifmr->ifm_active |= sc->media; in bxe_ifmedia_status()
4461 long work = atomic_load_acq_long(&sc->chip_tq_flags); in bxe_handle_chip_tq()
4467 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { in bxe_handle_chip_tq()
4500 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); in bxe_ioctl()
4501 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); in bxe_ioctl()
4507 ifr->ifr_mtu); in bxe_ioctl()
4509 if (sc->mtu == ifr->ifr_mtu) { in bxe_ioctl()
4514 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { in bxe_ioctl()
4515 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", in bxe_ioctl()
4516 ifr->ifr_mtu, mtu_min, mtu_max); in bxe_ioctl()
4521 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, in bxe_ioctl()
4522 (unsigned long)ifr->ifr_mtu); in bxe_ioctl()
4525 (unsigned long)ifr->ifr_mtu); in bxe_ioctl()
4526 XXX - Not sure why it needs to be atomic in bxe_ioctl()
4528 if_setmtu(ifp, ifr->ifr_mtu); in bxe_ioctl()
4542 } else if(sc->state != BXE_STATE_DISABLED) { in bxe_ioctl()
4572 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); in bxe_ioctl()
4681 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); in bxe_ioctl()
4691 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { in bxe_ioctl()
4693 "Re-initializing hardware from IOCTL change\n"); in bxe_ioctl()
4712 if (!(sc->debug & DBG_MBUF)) { in bxe_dump_mbuf()
4725 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); in bxe_dump_mbuf()
4727 if (m->m_flags & M_PKTHDR) { in bxe_dump_mbuf()
4729 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", in bxe_dump_mbuf()
4730 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, in bxe_dump_mbuf()
4731 (int)m->m_pkthdr.csum_flags, CSUM_BITS); in bxe_dump_mbuf()
4734 if (m->m_flags & M_EXT) { in bxe_dump_mbuf()
4735 switch (m->m_ext.ext_type) { in bxe_dump_mbuf()
4751 "%02d: - m_ext: %p ext_size=%d type=%s\n", in bxe_dump_mbuf()
4752 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); in bxe_dump_mbuf()
4759 m = m->m_next; in bxe_dump_mbuf()
4766 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4768 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4783 num_wnds = nsegs - wnd_size; in bxe_chktso_window()
4784 lso_mss = htole16(m->m_pkthdr.tso_segsz); in bxe_chktso_window()
4802 /* subtract the first mbuf->m_len of the last wndw(-header) */ in bxe_chktso_window()
4803 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); in bxe_chktso_window()
4827 if (m->m_pkthdr.csum_flags == CSUM_IP) { in bxe_set_pbd_csum_e2()
4836 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in bxe_set_pbd_csum_e2()
4838 proto = ntohs(eh->evl_proto); in bxe_set_pbd_csum_e2()
4841 proto = ntohs(eh->evl_encap_proto); in bxe_set_pbd_csum_e2()
4847 ip4 = (m->m_len < sizeof(struct ip)) ? in bxe_set_pbd_csum_e2()
4848 (struct ip *)m->m_next->m_data : in bxe_set_pbd_csum_e2()
4849 (struct ip *)(m->m_data + e_hlen); in bxe_set_pbd_csum_e2()
4850 /* ip_hl is number of 32-bit words */ in bxe_set_pbd_csum_e2()
4851 ip_hlen = (ip4->ip_hl << 2); in bxe_set_pbd_csum_e2()
4856 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? in bxe_set_pbd_csum_e2()
4857 (struct ip6_hdr *)m->m_next->m_data : in bxe_set_pbd_csum_e2()
4858 (struct ip6_hdr *)(m->m_data + e_hlen); in bxe_set_pbd_csum_e2()
4876 if (m->m_pkthdr.csum_flags & (CSUM_TCP | in bxe_set_pbd_csum_e2()
4879 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; in bxe_set_pbd_csum_e2()
4881 /* th_off is number of 32-bit words */ in bxe_set_pbd_csum_e2()
4882 *parsing_data |= ((th->th_off << in bxe_set_pbd_csum_e2()
4885 return (l4_off + (th->th_off << 2)); /* entire header length */ in bxe_set_pbd_csum_e2()
4886 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | in bxe_set_pbd_csum_e2()
4888 fp->eth_q_stats.tx_ofld_frames_csum_udp++; in bxe_set_pbd_csum_e2()
4917 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in bxe_set_pbd_csum()
4919 proto = ntohs(eh->evl_proto); in bxe_set_pbd_csum()
4922 proto = ntohs(eh->evl_encap_proto); in bxe_set_pbd_csum()
4928 ip4 = (m->m_len < sizeof(struct ip)) ? in bxe_set_pbd_csum()
4929 (struct ip *)m->m_next->m_data : in bxe_set_pbd_csum()
4930 (struct ip *)(m->m_data + e_hlen); in bxe_set_pbd_csum()
4931 /* ip_hl is number of 32-bit words */ in bxe_set_pbd_csum()
4932 ip_hlen = (ip4->ip_hl << 1); in bxe_set_pbd_csum()
4937 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? in bxe_set_pbd_csum()
4938 (struct ip6_hdr *)m->m_next->m_data : in bxe_set_pbd_csum()
4939 (struct ip6_hdr *)(m->m_data + e_hlen); in bxe_set_pbd_csum()
4953 if (m->m_flags & M_VLANTAG) { in bxe_set_pbd_csum()
4954 pbd->global_data = in bxe_set_pbd_csum()
4957 pbd->global_data = htole16(hlen); in bxe_set_pbd_csum()
4960 pbd->ip_hlen_w = ip_hlen; in bxe_set_pbd_csum()
4962 hlen += pbd->ip_hlen_w; in bxe_set_pbd_csum()
4966 if (m->m_pkthdr.csum_flags & (CSUM_TCP | in bxe_set_pbd_csum()
4970 /* th_off is number of 32-bit words */ in bxe_set_pbd_csum()
4971 hlen += (uint16_t)(th->th_off << 1); in bxe_set_pbd_csum()
4972 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | in bxe_set_pbd_csum()
4981 pbd->total_hlen_w = htole16(hlen); in bxe_set_pbd_csum()
4983 if (m->m_pkthdr.csum_flags & (CSUM_TCP | in bxe_set_pbd_csum()
4986 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; in bxe_set_pbd_csum()
4987 pbd->tcp_pseudo_csum = ntohs(th->th_sum); in bxe_set_pbd_csum()
4988 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | in bxe_set_pbd_csum()
4990 fp->eth_q_stats.tx_ofld_frames_csum_udp++; in bxe_set_pbd_csum()
5009 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); in bxe_set_pbd_csum()
5016 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); in bxe_set_pbd_csum()
5026 *parsing_data |= ((m->m_pkthdr.tso_segsz << in bxe_set_pbd_lso_e2()
5046 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? in bxe_set_pbd_lso()
5051 ip = (struct ip *)(m->m_data + e_hlen); in bxe_set_pbd_lso()
5052 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); in bxe_set_pbd_lso()
5054 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); in bxe_set_pbd_lso()
5055 pbd->tcp_send_seq = ntohl(th->th_seq); in bxe_set_pbd_lso()
5056 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); in bxe_set_pbd_lso()
5060 pbd->ip_id = ntohs(ip->ip_id); in bxe_set_pbd_lso()
5061 pbd->tcp_pseudo_csum = in bxe_set_pbd_lso()
5062 ntohs(in_pseudo(ip->ip_src.s_addr, in bxe_set_pbd_lso()
5063 ip->ip_dst.s_addr, in bxe_set_pbd_lso()
5067 pbd->tcp_pseudo_csum = in bxe_set_pbd_lso()
5068 ntohs(in_pseudo(&ip6->ip6_src, in bxe_set_pbd_lso()
5069 &ip6->ip6_dst, in bxe_set_pbd_lso()
5073 pbd->global_data |= in bxe_set_pbd_lso()
5112 sc = fp->sc; in bxe_tx_encap()
5123 pkt_prod = fp->tx_pkt_prod; in bxe_tx_encap()
5124 bd_prod = fp->tx_bd_prod; in bxe_tx_encap()
5129 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; in bxe_tx_encap()
5130 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, in bxe_tx_encap()
5131 tx_buf->m_map, m0, in bxe_tx_encap()
5136 fp->eth_q_stats.tx_dma_mapping_failure++; in bxe_tx_encap()
5142 fp->eth_q_stats.mbuf_defrag_attempts++; in bxe_tx_encap()
5145 fp->eth_q_stats.mbuf_defrag_failures++; in bxe_tx_encap()
5150 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, in bxe_tx_encap()
5151 tx_buf->m_map, m0, in bxe_tx_encap()
5154 fp->eth_q_stats.tx_dma_mapping_failure++; in bxe_tx_encap()
5173 fp->eth_q_stats.tx_hw_queue_full++; in bxe_tx_encap()
5174 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); in bxe_tx_encap()
5180 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < in bxe_tx_encap()
5181 (TX_BD_USABLE - tx_bd_avail))) { in bxe_tx_encap()
5182 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); in bxe_tx_encap()
5191 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { in bxe_tx_encap()
5195 fp->eth_q_stats.tx_window_violation_tso++; in bxe_tx_encap()
5198 fp->eth_q_stats.tx_window_violation_std++; in bxe_tx_encap()
5202 fp->eth_q_stats.mbuf_defrag_attempts++; in bxe_tx_encap()
5203 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); in bxe_tx_encap()
5207 fp->eth_q_stats.mbuf_defrag_failures++; in bxe_tx_encap()
5213 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, in bxe_tx_encap()
5214 tx_buf->m_map, m0, in bxe_tx_encap()
5217 fp->eth_q_stats.tx_dma_mapping_failure++; in bxe_tx_encap()
5222 if(m0->m_pkthdr.csum_flags & CSUM_TSO) { in bxe_tx_encap()
5228 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); in bxe_tx_encap()
5229 fp->eth_q_stats.nsegs_path1_errors++; in bxe_tx_encap()
5234 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); in bxe_tx_encap()
5235 fp->eth_q_stats.nsegs_path2_errors++; in bxe_tx_encap()
5250 fp->eth_q_stats.tx_soft_errors++; in bxe_tx_encap()
5251 fp->eth_q_stats.mbuf_alloc_tx--; in bxe_tx_encap()
5260 if (m0->m_flags & M_BCAST) { in bxe_tx_encap()
5262 } else if (m0->m_flags & M_MCAST) { in bxe_tx_encap()
5267 tx_buf->m = m0; in bxe_tx_encap()
5268 tx_buf->first_bd = fp->tx_bd_prod; in bxe_tx_encap()
5269 tx_buf->flags = 0; in bxe_tx_encap()
5272 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; in bxe_tx_encap()
5276 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); in bxe_tx_encap()
5278 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); in bxe_tx_encap()
5279 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); in bxe_tx_encap()
5280 tx_start_bd->nbytes = htole16(segs[0].ds_len); in bxe_tx_encap()
5281 total_pkt_size += tx_start_bd->nbytes; in bxe_tx_encap()
5282 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; in bxe_tx_encap()
5284 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); in bxe_tx_encap()
5288 tx_start_bd->nbd = htole16(nbds); in bxe_tx_encap()
5290 if (m0->m_flags & M_VLANTAG) { in bxe_tx_encap()
5291 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); in bxe_tx_encap()
5292 tx_start_bd->bd_flags.as_bitfield |= in bxe_tx_encap()
5299 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; in bxe_tx_encap()
5302 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); in bxe_tx_encap()
5312 if (m0->m_pkthdr.csum_flags) { in bxe_tx_encap()
5313 if (m0->m_pkthdr.csum_flags & CSUM_IP) { in bxe_tx_encap()
5314 fp->eth_q_stats.tx_ofld_frames_csum_ip++; in bxe_tx_encap()
5315 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; in bxe_tx_encap()
5318 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { in bxe_tx_encap()
5319 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | in bxe_tx_encap()
5321 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { in bxe_tx_encap()
5322 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | in bxe_tx_encap()
5325 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || in bxe_tx_encap()
5326 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { in bxe_tx_encap()
5327 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; in bxe_tx_encap()
5328 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { in bxe_tx_encap()
5329 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | in bxe_tx_encap()
5335 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; in bxe_tx_encap()
5338 if (m0->m_pkthdr.csum_flags) { in bxe_tx_encap()
5347 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; in bxe_tx_encap()
5350 if (m0->m_pkthdr.csum_flags) { in bxe_tx_encap()
5356 pbd_e1x->global_data |= htole16(global_data); in bxe_tx_encap()
5360 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { in bxe_tx_encap()
5361 fp->eth_q_stats.tx_ofld_frames_lso++; in bxe_tx_encap()
5362 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; in bxe_tx_encap()
5364 if (__predict_false(tx_start_bd->nbytes > hlen)) { in bxe_tx_encap()
5365 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; in bxe_tx_encap()
5369 tx_start_bd->nbd = htole16(nbds); in bxe_tx_encap()
5370 tx_start_bd->nbytes = htole16(hlen); in bxe_tx_encap()
5375 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; in bxe_tx_encap()
5376 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); in bxe_tx_encap()
5377 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); in bxe_tx_encap()
5378 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); in bxe_tx_encap()
5385 le16toh(tx_start_bd->nbytes), in bxe_tx_encap()
5386 le32toh(tx_start_bd->addr_hi), in bxe_tx_encap()
5387 le32toh(tx_start_bd->addr_lo), in bxe_tx_encap()
5399 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); in bxe_tx_encap()
5405 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; in bxe_tx_encap()
5406 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); in bxe_tx_encap()
5407 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); in bxe_tx_encap()
5408 tx_data_bd->nbytes = htole16(segs[i].ds_len); in bxe_tx_encap()
5412 total_pkt_size += tx_data_bd->nbytes; in bxe_tx_encap()
5418 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; in bxe_tx_encap()
5421 if (__predict_false(sc->debug & DBG_TX)) { in bxe_tx_encap()
5422 tmp_bd = tx_buf->first_bd; in bxe_tx_encap()
5431 le16toh(tx_start_bd->nbd), in bxe_tx_encap()
5432 le16toh(tx_start_bd->vlan_or_ethertype), in bxe_tx_encap()
5433 tx_start_bd->bd_flags.as_bitfield, in bxe_tx_encap()
5434 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); in bxe_tx_encap()
5438 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " in bxe_tx_encap()
5443 pbd_e1x->global_data, in bxe_tx_encap()
5444 pbd_e1x->ip_hlen_w, in bxe_tx_encap()
5445 pbd_e1x->ip_id, in bxe_tx_encap()
5446 pbd_e1x->lso_mss, in bxe_tx_encap()
5447 pbd_e1x->tcp_flags, in bxe_tx_encap()
5448 pbd_e1x->tcp_pseudo_csum, in bxe_tx_encap()
5449 pbd_e1x->tcp_send_seq, in bxe_tx_encap()
5450 le16toh(pbd_e1x->total_hlen_w)); in bxe_tx_encap()
5453 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " in bxe_tx_encap()
5457 pbd_e2->data.mac_addr.dst_hi, in bxe_tx_encap()
5458 pbd_e2->data.mac_addr.dst_mid, in bxe_tx_encap()
5459 pbd_e2->data.mac_addr.dst_lo, in bxe_tx_encap()
5460 pbd_e2->data.mac_addr.src_hi, in bxe_tx_encap()
5461 pbd_e2->data.mac_addr.src_mid, in bxe_tx_encap()
5462 pbd_e2->data.mac_addr.src_lo, in bxe_tx_encap()
5463 pbd_e2->parsing_data); in bxe_tx_encap()
5468 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; in bxe_tx_encap()
5470 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", in bxe_tx_encap()
5473 le16toh(tx_data_bd->nbytes), in bxe_tx_encap()
5474 le32toh(tx_data_bd->addr_hi), in bxe_tx_encap()
5475 le32toh(tx_data_bd->addr_lo)); in bxe_tx_encap()
5498 fp->tx_db.data.prod += nbds; in bxe_tx_encap()
5501 fp->tx_pkt_prod++; in bxe_tx_encap()
5502 fp->tx_bd_prod = bd_prod; in bxe_tx_encap()
5504 DOORBELL(sc, fp->index, fp->tx_db.raw); in bxe_tx_encap()
5506 fp->eth_q_stats.tx_pkts++; in bxe_tx_encap()
5509 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, in bxe_tx_encap()
5513 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, in bxe_tx_encap()
5543 fp->eth_q_stats.mbuf_alloc_tx++; in bxe_tx_start_locked()
5551 fp->eth_q_stats.tx_encap_failures++; in bxe_tx_start_locked()
5556 fp->eth_q_stats.mbuf_alloc_tx--; in bxe_tx_start_locked()
5557 fp->eth_q_stats.tx_queue_xoff++; in bxe_tx_start_locked()
5585 fp->watchdog_timer = BXE_TX_TIMEOUT; in bxe_tx_start_locked()
5589 /* Legacy (non-RSS) dispatch routine */
5603 if (!sc->link_vars.link_up) { in bxe_tx_start()
5608 fp = &sc->fp[0]; in bxe_tx_start()
5611 fp->eth_q_stats.tx_queue_full_return++; in bxe_tx_start()
5626 struct buf_ring *tx_br = fp->tx_br; in bxe_tx_mq_start_locked()
5635 if (sc->state != BXE_STATE_OPEN) { in bxe_tx_mq_start_locked()
5636 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; in bxe_tx_mq_start_locked()
5648 fp->eth_q_stats.tx_soft_errors++; in bxe_tx_mq_start_locked()
5653 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { in bxe_tx_mq_start_locked()
5654 fp->eth_q_stats.tx_request_link_down_failures++; in bxe_tx_mq_start_locked()
5660 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { in bxe_tx_mq_start_locked()
5661 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; in bxe_tx_mq_start_locked()
5673 fp->eth_q_stats.bd_avail_too_less_failures++; in bxe_tx_mq_start_locked()
5682 fp->eth_q_stats.mbuf_alloc_tx++; in bxe_tx_mq_start_locked()
5691 fp->eth_q_stats.tx_encap_failures++; in bxe_tx_mq_start_locked()
5696 fp->eth_q_stats.mbuf_alloc_tx--; in bxe_tx_mq_start_locked()
5697 fp->eth_q_stats.tx_frames_deferred++; in bxe_tx_mq_start_locked()
5717 fp->watchdog_timer = BXE_TX_TIMEOUT; in bxe_tx_mq_start_locked()
5723 fp->eth_q_stats.tx_mq_not_empty++; in bxe_tx_mq_start_locked()
5724 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); in bxe_tx_mq_start_locked()
5735 struct bxe_softc *sc = fp->sc; in bxe_tx_mq_start_deferred()
5736 if_t ifp = sc->ifp; in bxe_tx_mq_start_deferred()
5757 fp_index = (m->m_pkthdr.flowid % sc->num_queues); in bxe_tx_mq_start()
5759 fp = &sc->fp[fp_index]; in bxe_tx_mq_start()
5761 if (sc->state != BXE_STATE_OPEN) { in bxe_tx_mq_start()
5762 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; in bxe_tx_mq_start()
5770 rc = drbr_enqueue(ifp, fp->tx_br, m); in bxe_tx_mq_start()
5771 taskqueue_enqueue(fp->tq, &fp->tx_task); in bxe_tx_mq_start()
5785 for (i = 0; i < sc->num_queues; i++) { in bxe_mq_flush()
5786 fp = &sc->fp[i]; in bxe_mq_flush()
5788 if (fp->state != BXE_FP_STATE_IRQ) { in bxe_mq_flush()
5790 fp->index, fp->state); in bxe_mq_flush()
5794 if (fp->tx_br != NULL) { in bxe_mq_flush()
5795 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); in bxe_mq_flush()
5797 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { in bxe_mq_flush()
5820 struct ecore_ilt *ilt = sc->ilt; in bxe_ilt_set_info()
5823 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); in bxe_ilt_set_info()
5824 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); in bxe_ilt_set_info()
5827 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; in bxe_ilt_set_info()
5828 ilt_client->client_num = ILT_CLIENT_CDU; in bxe_ilt_set_info()
5829 ilt_client->page_size = CDU_ILT_PAGE_SZ; in bxe_ilt_set_info()
5830 ilt_client->flags = ILT_CLIENT_SKIP_MEM; in bxe_ilt_set_info()
5831 ilt_client->start = line; in bxe_ilt_set_info()
5838 ilt_client->end = (line - 1); in bxe_ilt_set_info()
5843 ilt_client->start, ilt_client->end, in bxe_ilt_set_info()
5844 ilt_client->page_size, in bxe_ilt_set_info()
5845 ilt_client->flags, in bxe_ilt_set_info()
5846 ilog2(ilt_client->page_size >> 12)); in bxe_ilt_set_info()
5849 if (QM_INIT(sc->qm_cid_count)) { in bxe_ilt_set_info()
5850 ilt_client = &ilt->clients[ILT_CLIENT_QM]; in bxe_ilt_set_info()
5851 ilt_client->client_num = ILT_CLIENT_QM; in bxe_ilt_set_info()
5852 ilt_client->page_size = QM_ILT_PAGE_SZ; in bxe_ilt_set_info()
5853 ilt_client->flags = 0; in bxe_ilt_set_info()
5854 ilt_client->start = line; in bxe_ilt_set_info()
5857 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, in bxe_ilt_set_info()
5860 ilt_client->end = (line - 1); in bxe_ilt_set_info()
5865 ilt_client->start, ilt_client->end, in bxe_ilt_set_info()
5866 ilt_client->page_size, ilt_client->flags, in bxe_ilt_set_info()
5867 ilog2(ilt_client->page_size >> 12)); in bxe_ilt_set_info()
5872 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; in bxe_ilt_set_info()
5873 ilt_client->client_num = ILT_CLIENT_SRC; in bxe_ilt_set_info()
5874 ilt_client->page_size = SRC_ILT_PAGE_SZ; in bxe_ilt_set_info()
5875 ilt_client->flags = 0; in bxe_ilt_set_info()
5876 ilt_client->start = line; in bxe_ilt_set_info()
5878 ilt_client->end = (line - 1); in bxe_ilt_set_info()
5883 ilt_client->start, ilt_client->end, in bxe_ilt_set_info()
5884 ilt_client->page_size, ilt_client->flags, in bxe_ilt_set_info()
5885 ilog2(ilt_client->page_size >> 12)); in bxe_ilt_set_info()
5888 ilt_client = &ilt->clients[ILT_CLIENT_TM]; in bxe_ilt_set_info()
5889 ilt_client->client_num = ILT_CLIENT_TM; in bxe_ilt_set_info()
5890 ilt_client->page_size = TM_ILT_PAGE_SZ; in bxe_ilt_set_info()
5891 ilt_client->flags = 0; in bxe_ilt_set_info()
5892 ilt_client->start = line; in bxe_ilt_set_info()
5894 ilt_client->end = (line - 1); in bxe_ilt_set_info()
5899 ilt_client->start, ilt_client->end, in bxe_ilt_set_info()
5900 ilt_client->page_size, ilt_client->flags, in bxe_ilt_set_info()
5901 ilog2(ilt_client->page_size >> 12)); in bxe_ilt_set_info()
5913 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); in bxe_set_fp_rx_buf_size()
5915 for (i = 0; i < sc->num_queues; i++) { in bxe_set_fp_rx_buf_size()
5917 sc->fp[i].rx_buf_size = rx_buf_size; in bxe_set_fp_rx_buf_size()
5918 sc->fp[i].mbuf_alloc_size = MCLBYTES; in bxe_set_fp_rx_buf_size()
5920 sc->fp[i].rx_buf_size = rx_buf_size; in bxe_set_fp_rx_buf_size()
5921 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; in bxe_set_fp_rx_buf_size()
5923 sc->fp[i].rx_buf_size = MCLBYTES; in bxe_set_fp_rx_buf_size()
5924 sc->fp[i].mbuf_alloc_size = MCLBYTES; in bxe_set_fp_rx_buf_size()
5926 sc->fp[i].rx_buf_size = MJUMPAGESIZE; in bxe_set_fp_rx_buf_size()
5927 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; in bxe_set_fp_rx_buf_size()
5929 sc->fp[i].rx_buf_size = MCLBYTES; in bxe_set_fp_rx_buf_size()
5930 sc->fp[i].mbuf_alloc_size = MCLBYTES; in bxe_set_fp_rx_buf_size()
5940 if ((sc->ilt = in bxe_alloc_ilt_mem()
5955 if ((sc->ilt->lines = in bxe_alloc_ilt_lines_mem()
5968 if (sc->ilt != NULL) { in bxe_free_ilt_mem()
5969 free(sc->ilt, M_BXE_ILT); in bxe_free_ilt_mem()
5970 sc->ilt = NULL; in bxe_free_ilt_mem()
5977 if (sc->ilt->lines != NULL) { in bxe_free_ilt_lines_mem()
5978 free(sc->ilt->lines, M_BXE_ILT); in bxe_free_ilt_lines_mem()
5979 sc->ilt->lines = NULL; in bxe_free_ilt_lines_mem()
5989 bxe_dma_free(sc, &sc->context[i].vcxt_dma); in bxe_free_mem()
5990 sc->context[i].vcxt = NULL; in bxe_free_mem()
5991 sc->context[i].size = 0; in bxe_free_mem()
6012 * 1. There can be multiple entities allocating memory for context - in bxe_alloc_mem()
6015 * 2. Since CDU page-size is not a single 4KB page (which is the case in bxe_alloc_mem()
6017 * allocation of sub-page-size in the last entry. in bxe_alloc_mem()
6024 sc->context[i].size = min(CDU_ILT_PAGE_SZ, in bxe_alloc_mem()
6025 (context_size - allocated)); in bxe_alloc_mem()
6027 if (bxe_dma_alloc(sc, sc->context[i].size, in bxe_alloc_mem()
6028 &sc->context[i].vcxt_dma, in bxe_alloc_mem()
6031 return (-1); in bxe_alloc_mem()
6034 sc->context[i].vcxt = in bxe_alloc_mem()
6035 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; in bxe_alloc_mem()
6037 allocated += sc->context[i].size; in bxe_alloc_mem()
6043 sc->ilt, sc->ilt->start_line, sc->ilt->lines); in bxe_alloc_mem()
6049 sc->ilt->clients[i].page_size, in bxe_alloc_mem()
6050 sc->ilt->clients[i].start, in bxe_alloc_mem()
6051 sc->ilt->clients[i].end, in bxe_alloc_mem()
6052 sc->ilt->clients[i].client_num, in bxe_alloc_mem()
6053 sc->ilt->clients[i].flags); in bxe_alloc_mem()
6059 return (-1); in bxe_alloc_mem()
6070 if (fp->rx_mbuf_tag == NULL) { in bxe_free_rx_bd_chain()
6076 if (fp->rx_mbuf_chain[i].m_map != NULL) { in bxe_free_rx_bd_chain()
6077 bus_dmamap_sync(fp->rx_mbuf_tag, in bxe_free_rx_bd_chain()
6078 fp->rx_mbuf_chain[i].m_map, in bxe_free_rx_bd_chain()
6080 bus_dmamap_unload(fp->rx_mbuf_tag, in bxe_free_rx_bd_chain()
6081 fp->rx_mbuf_chain[i].m_map); in bxe_free_rx_bd_chain()
6084 if (fp->rx_mbuf_chain[i].m != NULL) { in bxe_free_rx_bd_chain()
6085 m_freem(fp->rx_mbuf_chain[i].m); in bxe_free_rx_bd_chain()
6086 fp->rx_mbuf_chain[i].m = NULL; in bxe_free_rx_bd_chain()
6087 fp->eth_q_stats.mbuf_alloc_rx--; in bxe_free_rx_bd_chain()
6098 sc = fp->sc; in bxe_free_tpa_pool()
6100 if (fp->rx_mbuf_tag == NULL) { in bxe_free_tpa_pool()
6108 if (fp->rx_tpa_info[i].bd.m_map != NULL) { in bxe_free_tpa_pool()
6109 bus_dmamap_sync(fp->rx_mbuf_tag, in bxe_free_tpa_pool()
6110 fp->rx_tpa_info[i].bd.m_map, in bxe_free_tpa_pool()
6112 bus_dmamap_unload(fp->rx_mbuf_tag, in bxe_free_tpa_pool()
6113 fp->rx_tpa_info[i].bd.m_map); in bxe_free_tpa_pool()
6116 if (fp->rx_tpa_info[i].bd.m != NULL) { in bxe_free_tpa_pool()
6117 m_freem(fp->rx_tpa_info[i].bd.m); in bxe_free_tpa_pool()
6118 fp->rx_tpa_info[i].bd.m = NULL; in bxe_free_tpa_pool()
6119 fp->eth_q_stats.mbuf_alloc_tpa--; in bxe_free_tpa_pool()
6129 if (fp->rx_sge_mbuf_tag == NULL) { in bxe_free_sge_chain()
6135 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { in bxe_free_sge_chain()
6136 bus_dmamap_sync(fp->rx_sge_mbuf_tag, in bxe_free_sge_chain()
6137 fp->rx_sge_mbuf_chain[i].m_map, in bxe_free_sge_chain()
6139 bus_dmamap_unload(fp->rx_sge_mbuf_tag, in bxe_free_sge_chain()
6140 fp->rx_sge_mbuf_chain[i].m_map); in bxe_free_sge_chain()
6143 if (fp->rx_sge_mbuf_chain[i].m != NULL) { in bxe_free_sge_chain()
6144 m_freem(fp->rx_sge_mbuf_chain[i].m); in bxe_free_sge_chain()
6145 fp->rx_sge_mbuf_chain[i].m = NULL; in bxe_free_sge_chain()
6146 fp->eth_q_stats.mbuf_alloc_sge--; in bxe_free_sge_chain()
6157 for (i = 0; i < sc->num_queues; i++) { in bxe_free_fp_buffers()
6158 fp = &sc->fp[i]; in bxe_free_fp_buffers()
6160 if (fp->tx_br != NULL) { in bxe_free_fp_buffers()
6162 if (mtx_initialized(&fp->tx_mtx)) { in bxe_free_fp_buffers()
6166 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) in bxe_free_fp_buffers()
6177 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { in bxe_free_fp_buffers()
6179 fp->eth_q_stats.mbuf_alloc_rx); in bxe_free_fp_buffers()
6182 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { in bxe_free_fp_buffers()
6184 fp->eth_q_stats.mbuf_alloc_sge); in bxe_free_fp_buffers()
6187 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { in bxe_free_fp_buffers()
6189 fp->eth_q_stats.mbuf_alloc_tpa); in bxe_free_fp_buffers()
6192 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { in bxe_free_fp_buffers()
6194 fp->eth_q_stats.mbuf_alloc_tx); in bxe_free_fp_buffers()
6216 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); in bxe_alloc_rx_bd_mbuf()
6218 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; in bxe_alloc_rx_bd_mbuf()
6222 fp->eth_q_stats.mbuf_alloc_rx++; in bxe_alloc_rx_bd_mbuf()
6225 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; in bxe_alloc_rx_bd_mbuf()
6227 /* map the mbuf into non-paged pool */ in bxe_alloc_rx_bd_mbuf()
6228 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, in bxe_alloc_rx_bd_mbuf()
6229 fp->rx_mbuf_spare_map, in bxe_alloc_rx_bd_mbuf()
6232 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; in bxe_alloc_rx_bd_mbuf()
6234 fp->eth_q_stats.mbuf_alloc_rx--; in bxe_alloc_rx_bd_mbuf()
6244 rx_buf = &fp->rx_mbuf_chain[prev_index]; in bxe_alloc_rx_bd_mbuf()
6246 if (rx_buf->m_map != NULL) { in bxe_alloc_rx_bd_mbuf()
6247 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, in bxe_alloc_rx_bd_mbuf()
6249 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); in bxe_alloc_rx_bd_mbuf()
6258 fp->rx_mbuf_chain[prev_index].m = NULL; in bxe_alloc_rx_bd_mbuf()
6261 rx_buf = &fp->rx_mbuf_chain[index]; in bxe_alloc_rx_bd_mbuf()
6263 if (rx_buf->m_map != NULL) { in bxe_alloc_rx_bd_mbuf()
6264 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, in bxe_alloc_rx_bd_mbuf()
6266 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); in bxe_alloc_rx_bd_mbuf()
6271 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; in bxe_alloc_rx_bd_mbuf()
6272 rx_buf->m_map = fp->rx_mbuf_spare_map; in bxe_alloc_rx_bd_mbuf()
6273 fp->rx_mbuf_spare_map = map; in bxe_alloc_rx_bd_mbuf()
6274 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, in bxe_alloc_rx_bd_mbuf()
6276 rx_buf->m = m; in bxe_alloc_rx_bd_mbuf()
6278 rx_bd = &fp->rx_chain[index]; in bxe_alloc_rx_bd_mbuf()
6279 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); in bxe_alloc_rx_bd_mbuf()
6280 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); in bxe_alloc_rx_bd_mbuf()
6289 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; in bxe_alloc_rx_tpa_mbuf()
6297 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); in bxe_alloc_rx_tpa_mbuf()
6299 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; in bxe_alloc_rx_tpa_mbuf()
6303 fp->eth_q_stats.mbuf_alloc_tpa++; in bxe_alloc_rx_tpa_mbuf()
6306 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; in bxe_alloc_rx_tpa_mbuf()
6308 /* map the mbuf into non-paged pool */ in bxe_alloc_rx_tpa_mbuf()
6309 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, in bxe_alloc_rx_tpa_mbuf()
6310 fp->rx_tpa_info_mbuf_spare_map, in bxe_alloc_rx_tpa_mbuf()
6313 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; in bxe_alloc_rx_tpa_mbuf()
6315 fp->eth_q_stats.mbuf_alloc_tpa--; in bxe_alloc_rx_tpa_mbuf()
6323 if (tpa_info->bd.m_map != NULL) { in bxe_alloc_rx_tpa_mbuf()
6324 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, in bxe_alloc_rx_tpa_mbuf()
6326 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); in bxe_alloc_rx_tpa_mbuf()
6330 map = tpa_info->bd.m_map; in bxe_alloc_rx_tpa_mbuf()
6331 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; in bxe_alloc_rx_tpa_mbuf()
6332 fp->rx_tpa_info_mbuf_spare_map = map; in bxe_alloc_rx_tpa_mbuf()
6333 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, in bxe_alloc_rx_tpa_mbuf()
6335 tpa_info->bd.m = m; in bxe_alloc_rx_tpa_mbuf()
6336 tpa_info->seg = segs[0]; in bxe_alloc_rx_tpa_mbuf()
6361 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; in bxe_alloc_rx_sge_mbuf()
6365 fp->eth_q_stats.mbuf_alloc_sge++; in bxe_alloc_rx_sge_mbuf()
6368 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; in bxe_alloc_rx_sge_mbuf()
6370 /* map the SGE mbuf into non-paged pool */ in bxe_alloc_rx_sge_mbuf()
6371 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, in bxe_alloc_rx_sge_mbuf()
6372 fp->rx_sge_mbuf_spare_map, in bxe_alloc_rx_sge_mbuf()
6375 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; in bxe_alloc_rx_sge_mbuf()
6377 fp->eth_q_stats.mbuf_alloc_sge--; in bxe_alloc_rx_sge_mbuf()
6384 sge_buf = &fp->rx_sge_mbuf_chain[index]; in bxe_alloc_rx_sge_mbuf()
6387 if (sge_buf->m_map != NULL) { in bxe_alloc_rx_sge_mbuf()
6388 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, in bxe_alloc_rx_sge_mbuf()
6390 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); in bxe_alloc_rx_sge_mbuf()
6394 map = sge_buf->m_map; in bxe_alloc_rx_sge_mbuf()
6395 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; in bxe_alloc_rx_sge_mbuf()
6396 fp->rx_sge_mbuf_spare_map = map; in bxe_alloc_rx_sge_mbuf()
6397 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, in bxe_alloc_rx_sge_mbuf()
6399 sge_buf->m = m; in bxe_alloc_rx_sge_mbuf()
6401 sge = &fp->rx_sge_chain[index]; in bxe_alloc_rx_sge_mbuf()
6402 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); in bxe_alloc_rx_sge_mbuf()
6403 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); in bxe_alloc_rx_sge_mbuf()
6416 for (i = 0; i < sc->num_queues; i++) { in bxe_alloc_fp_buffers()
6417 fp = &sc->fp[i]; in bxe_alloc_fp_buffers()
6420 fp->rx_bd_cons = 0; in bxe_alloc_fp_buffers()
6421 fp->rx_cq_cons = 0; in bxe_alloc_fp_buffers()
6424 for (j = 0; j < sc->max_rx_bufs; j++) { in bxe_alloc_fp_buffers()
6436 fp->rx_bd_prod = ring_prod; in bxe_alloc_fp_buffers()
6437 fp->rx_cq_prod = cqe_ring_prod; in bxe_alloc_fp_buffers()
6438 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; in bxe_alloc_fp_buffers()
6442 fp->tpa_enable = TRUE; in bxe_alloc_fp_buffers()
6450 fp->tpa_enable = FALSE; in bxe_alloc_fp_buffers()
6454 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; in bxe_alloc_fp_buffers()
6457 if (fp->tpa_enable) { in bxe_alloc_fp_buffers()
6465 fp->tpa_enable = FALSE; in bxe_alloc_fp_buffers()
6473 fp->rx_sge_prod = ring_prod; in bxe_alloc_fp_buffers()
6492 bxe_dma_free(sc, &sc->fw_stats_dma); in bxe_free_fw_stats_mem()
6494 sc->fw_stats_num = 0; in bxe_free_fw_stats_mem()
6496 sc->fw_stats_req_size = 0; in bxe_free_fw_stats_mem()
6497 sc->fw_stats_req = NULL; in bxe_free_fw_stats_mem()
6498 sc->fw_stats_req_mapping = 0; in bxe_free_fw_stats_mem()
6500 sc->fw_stats_data_size = 0; in bxe_free_fw_stats_mem()
6501 sc->fw_stats_data = NULL; in bxe_free_fw_stats_mem()
6502 sc->fw_stats_data_mapping = 0; in bxe_free_fw_stats_mem()
6518 sc->fw_stats_num = (2 + num_queue_stats); in bxe_alloc_fw_stats_mem()
6527 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + in bxe_alloc_fw_stats_mem()
6528 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); in bxe_alloc_fw_stats_mem()
6531 sc->fw_stats_num, num_groups); in bxe_alloc_fw_stats_mem()
6533 sc->fw_stats_req_size = in bxe_alloc_fw_stats_mem()
6539 * stats_counter holds per-STORM counters that are incremented when in bxe_alloc_fw_stats_mem()
6545 sc->fw_stats_data_size = in bxe_alloc_fw_stats_mem()
6552 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), in bxe_alloc_fw_stats_mem()
6553 &sc->fw_stats_dma, "fw stats") != 0) { in bxe_alloc_fw_stats_mem()
6555 return (-1); in bxe_alloc_fw_stats_mem()
6560 sc->fw_stats_req = in bxe_alloc_fw_stats_mem()
6561 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; in bxe_alloc_fw_stats_mem()
6562 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; in bxe_alloc_fw_stats_mem()
6564 sc->fw_stats_data = in bxe_alloc_fw_stats_mem()
6565 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + in bxe_alloc_fw_stats_mem()
6566 sc->fw_stats_req_size); in bxe_alloc_fw_stats_mem()
6567 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + in bxe_alloc_fw_stats_mem()
6568 sc->fw_stats_req_size); in bxe_alloc_fw_stats_mem()
6571 (uintmax_t)sc->fw_stats_req_mapping); in bxe_alloc_fw_stats_mem()
6574 (uintmax_t)sc->fw_stats_data_mapping); in bxe_alloc_fw_stats_mem()
6581 * 0-7 - Engine0 load counter.
6582 * 8-15 - Engine1 load counter.
6583 * 16 - Engine0 RESET_IN_PROGRESS bit.
6584 * 17 - Engine1 RESET_IN_PROGRESS bit.
6585 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6587 * 19 - Engine1 ONE_IS_LOADED.
6588 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6773 sc->fw_seq = in bxe_nic_load_request()
6777 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); in bxe_nic_load_request()
6780 sc->fw_drv_pulse_wr_seq = in bxe_nic_load_request()
6785 sc->fw_drv_pulse_wr_seq); in bxe_nic_load_request()
6794 return (-1); in bxe_nic_load_request()
6800 return (-1); in bxe_nic_load_request()
6835 return (-1); in bxe_nic_load_analyze_req()
6853 * Barrier here for ordering between the writing to sc->port.pmf here in bxe_nic_load_pmf()
6856 sc->port.pmf = 1; in bxe_nic_load_pmf()
6859 sc->port.pmf = 0; in bxe_nic_load_pmf()
6862 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); in bxe_nic_load_pmf()
6901 sc->devinfo.mf_info.mf_config[vn] = in bxe_read_mf_cfg()
6905 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & in bxe_read_mf_cfg()
6908 sc->flags |= BXE_MF_FUNC_DIS; in bxe_read_mf_cfg()
6911 sc->flags &= ~BXE_MF_FUNC_DIS; in bxe_read_mf_cfg()
6932 return (-1); in bxe_acquire_alr()
6973 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); in bxe_fan_failure()
6974 schedule_delayed_work(&sc->sp_rtnl_task, 0); in bxe_fan_failure()
6990 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); in bxe_link_attn()
6991 elink_link_update(&sc->link_params, &sc->link_vars); in bxe_link_attn()
6993 if (sc->link_vars.link_up) { in bxe_link_attn()
6996 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { in bxe_link_attn()
6999 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { in bxe_link_attn()
7009 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { in bxe_link_attn()
7012 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); in bxe_link_attn()
7015 if (sc->state == BXE_STATE_OPEN) { in bxe_link_attn()
7019 fp = &sc->fp[i]; in bxe_link_attn()
7020 taskqueue_enqueue(fp->tq, &fp->tx_task); in bxe_link_attn()
7026 if (sc->link_vars.link_up && sc->link_vars.line_speed) { in bxe_link_attn()
7031 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); in bxe_link_attn()
7060 if (sc->attn_state & asserted) { in bxe_attn_int_asserted()
7079 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); in bxe_attn_int_asserted()
7080 sc->attn_state |= asserted; in bxe_attn_int_asserted()
7081 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); in bxe_attn_int_asserted()
7145 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_attn_int_asserted()
7153 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bxe_attn_int_asserted()
7160 * NIG mask. This loop should exit after 2-3 iterations max. in bxe_attn_int_asserted()
7162 if (sc->devinfo.int_block != INT_BLOCK_HC) { in bxe_attn_int_asserted()
7529 if(sc->state != BXE_STATE_OPEN) in bxe_chk_parity_attn()
7612 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted4()
7646 if (sc->link_vars.link_up) { in bxe_config_mf_bw()
7651 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); in bxe_config_mf_bw()
7664 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); in bxe_handle_eee_event()
7674 &sc->sp->drv_info_to_mcp.ether_stat; in bxe_drv_info_ether_stat()
7676 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, in bxe_drv_info_ether_stat()
7680 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, in bxe_drv_info_ether_stat()
7682 ether_stat->mac_local + MAC_PAD, in bxe_drv_info_ether_stat()
7685 ether_stat->mtu_size = sc->mtu; in bxe_drv_info_ether_stat()
7687 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; in bxe_drv_info_ether_stat()
7688 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { in bxe_drv_info_ether_stat()
7689 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; in bxe_drv_info_ether_stat()
7692 // XXX ether_stat->feature_flags |= ???; in bxe_drv_info_ether_stat()
7694 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; in bxe_drv_info_ether_stat()
7696 ether_stat->txq_size = sc->tx_ring_size; in bxe_drv_info_ether_stat()
7697 ether_stat->rxq_size = sc->rx_ring_size; in bxe_drv_info_ether_stat()
7706 /* if drv_info version supported by MFW doesn't match - send NACK */ in bxe_handle_drv_info_req()
7715 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); in bxe_handle_drv_info_req()
7724 /* if op code isn't supported - send NACK */ in bxe_handle_drv_info_req()
7750 * where the sc->flags can change so it is done without any in bxe_dcc_event()
7753 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { in bxe_dcc_event()
7755 sc->flags |= BXE_MF_FUNC_DIS; in bxe_dcc_event()
7759 sc->flags &= ~BXE_MF_FUNC_DIS; in bxe_dcc_event()
7783 sc->port.pmf = 1; in bxe_pmf_update()
7784 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); in bxe_pmf_update()
7788 * sc->port.pmf here and reading it from the bxe_periodic_task(). in bxe_pmf_update()
7799 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_pmf_update()
7924 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = in bxe_attn_int_deasserted3()
7937 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) in bxe_attn_int_deasserted3()
7943 if (sc->link_vars.periodic_flags & in bxe_attn_int_deasserted3()
7947 sc->link_vars.periodic_flags &= in bxe_attn_int_deasserted3()
7972 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted3()
7980 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted3()
7993 BLOGE(sc, "GRC time-out 0x%08x\n", val); in bxe_attn_int_deasserted3()
8025 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); in bxe_attn_int_deasserted2()
8034 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); in bxe_attn_int_deasserted2()
8053 * STS0 and STS1 - clear it in bxe_attn_int_deasserted2()
8101 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted2()
8141 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted1()
8165 elink_hw_reset_phy(&sc->link_params); in bxe_attn_int_deasserted0()
8169 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { in bxe_attn_int_deasserted0()
8171 elink_handle_module_detect_int(&sc->link_params); in bxe_attn_int_deasserted0()
8183 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted0()
8219 &sc->sp_err_timeout_task, hz/10); in bxe_attn_int_deasserted()
8239 group_mask = &sc->attn_group[index]; in bxe_attn_int_deasserted()
8243 group_mask->sig[0], group_mask->sig[1], in bxe_attn_int_deasserted()
8244 group_mask->sig[2], group_mask->sig[3], in bxe_attn_int_deasserted()
8245 group_mask->sig[4]); in bxe_attn_int_deasserted()
8247 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); in bxe_attn_int_deasserted()
8248 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); in bxe_attn_int_deasserted()
8249 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); in bxe_attn_int_deasserted()
8250 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); in bxe_attn_int_deasserted()
8251 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); in bxe_attn_int_deasserted()
8257 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_attn_int_deasserted()
8267 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bxe_attn_int_deasserted()
8270 if (~sc->attn_state & deasserted) { in bxe_attn_int_deasserted()
8289 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); in bxe_attn_int_deasserted()
8290 sc->attn_state &= ~deasserted; in bxe_attn_int_deasserted()
8291 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); in bxe_attn_int_deasserted()
8298 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); in bxe_attn_int()
8299 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); in bxe_attn_int()
8300 uint32_t attn_state = sc->attn_state; in bxe_attn_int()
8327 struct host_sp_status_block *def_sb = sc->def_sb; in bxe_update_dsb_idx()
8332 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { in bxe_update_dsb_idx()
8333 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; in bxe_update_dsb_idx()
8337 if (sc->def_idx != def_sb->sp_sb.running_index) { in bxe_update_dsb_idx()
8338 sc->def_idx = def_sb->sp_sb.running_index; in bxe_update_dsb_idx()
8352 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); in bxe_cid_to_q_obj()
8363 rparam.mcast_obj = &sc->mcast_obj; in bxe_handle_mcast_eqe()
8368 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); in bxe_handle_mcast_eqe()
8370 /* if there are pending mcast commands - send them */ in bxe_handle_mcast_eqe()
8371 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { in bxe_handle_mcast_eqe()
8388 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; in bxe_handle_classification_eqe()
8394 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { in bxe_handle_classification_eqe()
8397 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; in bxe_handle_classification_eqe()
8411 elem->message.data.eth_event.echo); in bxe_handle_classification_eqe()
8415 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); in bxe_handle_classification_eqe()
8428 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); in bxe_handle_rx_mode_eqe()
8432 &sc->sp_state)) { in bxe_handle_rx_mode_eqe()
8455 struct ecore_func_sp_obj *f_obj = &sc->func_obj; in bxe_eq_int()
8456 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; in bxe_eq_int()
8458 hw_cons = le16toh(*sc->eq_cons_sb); in bxe_eq_int()
8461 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. in bxe_eq_int()
8462 * when we get to the next-page we need to adjust so the loop in bxe_eq_int()
8474 sw_cons = sc->eq_cons; in bxe_eq_int()
8475 sw_prod = sc->eq_prod; in bxe_eq_int()
8478 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); in bxe_eq_int()
8484 elem = &sc->eq[EQ_DESC(sw_cons)]; in bxe_eq_int()
8487 cid = SW_CID(elem->message.data.cfc_del_event.cid); in bxe_eq_int()
8488 opcode = elem->message.opcode; in bxe_eq_int()
8495 sc->stats_comp++); in bxe_eq_int()
8504 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { in bxe_eq_int()
8511 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { in bxe_eq_int()
8519 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { in bxe_eq_int()
8526 echo = elem->message.data.function_update_event.echo; in bxe_eq_int()
8529 if (f_obj->complete_cmd(sc, f_obj, in bxe_eq_int()
8542 if (q_obj->complete_cmd(sc, q_obj, in bxe_eq_int()
8550 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { in bxe_eq_int()
8557 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { in bxe_eq_int()
8563 switch (opcode | sc->state) { in bxe_eq_int()
8566 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; in bxe_eq_int()
8568 rss_raw->clear_pending(rss_raw); in bxe_eq_int()
8597 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", in bxe_eq_int()
8598 elem->message.opcode, sc->state); in bxe_eq_int()
8606 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); in bxe_eq_int()
8608 sc->eq_cons = sw_cons; in bxe_eq_int()
8609 sc->eq_prod = sw_prod; in bxe_eq_int()
8615 bxe_update_eq_prod(sc, sc->eq_prod); in bxe_eq_int()
8625 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); in bxe_handle_sp_tq()
8634 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); in bxe_handle_sp_tq()
8642 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); in bxe_handle_sp_tq()
8644 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, in bxe_handle_sp_tq()
8645 le16toh(sc->def_idx), IGU_INT_NOP, 1); in bxe_handle_sp_tq()
8655 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, in bxe_handle_sp_tq()
8656 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); in bxe_handle_sp_tq()
8673 struct bxe_softc *sc = fp->sc; in bxe_handle_fp_tq()
8677 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); in bxe_handle_fp_tq()
8686 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { in bxe_handle_fp_tq()
8695 /* fp->txdata[cos] */ in bxe_handle_fp_tq()
8708 taskqueue_enqueue(fp->tq, &fp->tq_task); in bxe_handle_fp_tq()
8712 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, in bxe_handle_fp_tq()
8713 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); in bxe_handle_fp_tq()
8719 struct bxe_softc *sc = fp->sc; in bxe_task_fp()
8723 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); in bxe_task_fp()
8729 /* fp->txdata[cos] */ in bxe_task_fp()
8742 taskqueue_enqueue(fp->tq, &fp->tq_task); in bxe_task_fp()
8753 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, in bxe_task_fp()
8754 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); in bxe_task_fp()
8772 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); in bxe_intr_legacy()
8776 * the bits returned from ack_int() are 0-15 in bxe_intr_legacy()
8794 fp = &sc->fp[i]; in bxe_intr_legacy()
8795 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); in bxe_intr_legacy()
8798 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bxe_intr_legacy()
8806 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bxe_intr_legacy()
8809 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); in bxe_intr_legacy()
8825 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); in bxe_intr_sp()
8828 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bxe_intr_sp()
8831 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); in bxe_intr_sp()
8839 struct bxe_softc *sc = fp->sc; in bxe_intr_fp()
8841 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); in bxe_intr_fp()
8844 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", in bxe_intr_fp()
8845 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); in bxe_intr_fp()
8848 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bxe_intr_fp()
8859 switch (sc->interrupt_mode) { in bxe_interrupt_free()
8862 if (sc->intr[0].resource != NULL) { in bxe_interrupt_free()
8863 bus_release_resource(sc->dev, in bxe_interrupt_free()
8865 sc->intr[0].rid, in bxe_interrupt_free()
8866 sc->intr[0].resource); in bxe_interrupt_free()
8870 for (i = 0; i < sc->intr_count; i++) { in bxe_interrupt_free()
8872 if (sc->intr[i].resource && sc->intr[i].rid) { in bxe_interrupt_free()
8873 bus_release_resource(sc->dev, in bxe_interrupt_free()
8875 sc->intr[i].rid, in bxe_interrupt_free()
8876 sc->intr[i].resource); in bxe_interrupt_free()
8879 pci_release_msi(sc->dev); in bxe_interrupt_free()
8882 for (i = 0; i < sc->intr_count; i++) { in bxe_interrupt_free()
8883 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); in bxe_interrupt_free()
8884 if (sc->intr[i].resource && sc->intr[i].rid) { in bxe_interrupt_free()
8885 bus_release_resource(sc->dev, in bxe_interrupt_free()
8887 sc->intr[i].rid, in bxe_interrupt_free()
8888 sc->intr[i].resource); in bxe_interrupt_free()
8891 pci_release_msi(sc->dev); in bxe_interrupt_free()
8926 /* get the number of available MSI/MSI-X interrupts from the OS */ in bxe_interrupt_alloc()
8927 if (sc->interrupt_mode > 0) { in bxe_interrupt_alloc()
8928 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { in bxe_interrupt_alloc()
8929 msix_count = pci_msix_count(sc->dev); in bxe_interrupt_alloc()
8932 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { in bxe_interrupt_alloc()
8933 msi_count = pci_msi_count(sc->dev); in bxe_interrupt_alloc()
8936 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", in bxe_interrupt_alloc()
8940 do { /* try allocating MSI-X interrupt resources (at least 2) */ in bxe_interrupt_alloc()
8941 if (sc->interrupt_mode != INTR_MODE_MSIX) { in bxe_interrupt_alloc()
8945 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || in bxe_interrupt_alloc()
8947 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ in bxe_interrupt_alloc()
8951 /* ask for the necessary number of MSI-X vectors */ in bxe_interrupt_alloc()
8952 num_requested = min((sc->num_queues + 1), msix_count); in bxe_interrupt_alloc()
8954 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); in bxe_interrupt_alloc()
8957 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { in bxe_interrupt_alloc()
8958 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); in bxe_interrupt_alloc()
8959 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ in bxe_interrupt_alloc()
8964 BLOGE(sc, "MSI-X allocation less than 2!\n"); in bxe_interrupt_alloc()
8965 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ in bxe_interrupt_alloc()
8966 pci_release_msi(sc->dev); in bxe_interrupt_alloc()
8970 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", in bxe_interrupt_alloc()
8974 sc->intr_count = num_allocated; in bxe_interrupt_alloc()
8975 sc->num_queues = num_allocated - 1; in bxe_interrupt_alloc()
8979 /* allocate the MSI-X vectors */ in bxe_interrupt_alloc()
8981 sc->intr[i].rid = (rid + i); in bxe_interrupt_alloc()
8983 if ((sc->intr[i].resource = in bxe_interrupt_alloc()
8984 bus_alloc_resource_any(sc->dev, in bxe_interrupt_alloc()
8986 &sc->intr[i].rid, in bxe_interrupt_alloc()
8988 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", in bxe_interrupt_alloc()
8991 for (j = (i - 1); j >= 0; j--) { in bxe_interrupt_alloc()
8992 bus_release_resource(sc->dev, in bxe_interrupt_alloc()
8994 sc->intr[j].rid, in bxe_interrupt_alloc()
8995 sc->intr[j].resource); in bxe_interrupt_alloc()
8998 sc->intr_count = 0; in bxe_interrupt_alloc()
8999 sc->num_queues = 0; in bxe_interrupt_alloc()
9000 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ in bxe_interrupt_alloc()
9001 pci_release_msi(sc->dev); in bxe_interrupt_alloc()
9005 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); in bxe_interrupt_alloc()
9010 if (sc->interrupt_mode != INTR_MODE_MSI) { in bxe_interrupt_alloc()
9014 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || in bxe_interrupt_alloc()
9016 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ in bxe_interrupt_alloc()
9026 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { in bxe_interrupt_alloc()
9028 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ in bxe_interrupt_alloc()
9034 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ in bxe_interrupt_alloc()
9035 pci_release_msi(sc->dev); in bxe_interrupt_alloc()
9043 sc->intr_count = num_allocated; in bxe_interrupt_alloc()
9044 sc->num_queues = num_allocated; in bxe_interrupt_alloc()
9048 sc->intr[0].rid = rid; in bxe_interrupt_alloc()
9050 if ((sc->intr[0].resource = in bxe_interrupt_alloc()
9051 bus_alloc_resource_any(sc->dev, in bxe_interrupt_alloc()
9053 &sc->intr[0].rid, in bxe_interrupt_alloc()
9056 sc->intr_count = 0; in bxe_interrupt_alloc()
9057 sc->num_queues = 0; in bxe_interrupt_alloc()
9058 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ in bxe_interrupt_alloc()
9059 pci_release_msi(sc->dev); in bxe_interrupt_alloc()
9067 if (sc->interrupt_mode != INTR_MODE_INTX) { in bxe_interrupt_alloc()
9074 sc->intr_count = 1; in bxe_interrupt_alloc()
9075 sc->num_queues = 1; in bxe_interrupt_alloc()
9079 sc->intr[0].rid = rid; in bxe_interrupt_alloc()
9081 if ((sc->intr[0].resource = in bxe_interrupt_alloc()
9082 bus_alloc_resource_any(sc->dev, in bxe_interrupt_alloc()
9084 &sc->intr[0].rid, in bxe_interrupt_alloc()
9087 sc->intr_count = 0; in bxe_interrupt_alloc()
9088 sc->num_queues = 0; in bxe_interrupt_alloc()
9089 sc->interrupt_mode = -1; /* Failed! */ in bxe_interrupt_alloc()
9096 if (sc->interrupt_mode == -1) { in bxe_interrupt_alloc()
9102 sc->interrupt_mode, sc->num_queues); in bxe_interrupt_alloc()
9116 for (i = 0; i < sc->intr_count; i++) { in bxe_interrupt_detach()
9117 if (sc->intr[i].resource && sc->intr[i].tag) { in bxe_interrupt_detach()
9119 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); in bxe_interrupt_detach()
9123 for (i = 0; i < sc->num_queues; i++) { in bxe_interrupt_detach()
9124 fp = &sc->fp[i]; in bxe_interrupt_detach()
9125 if (fp->tq) { in bxe_interrupt_detach()
9126 taskqueue_drain(fp->tq, &fp->tq_task); in bxe_interrupt_detach()
9127 taskqueue_drain(fp->tq, &fp->tx_task); in bxe_interrupt_detach()
9128 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, in bxe_interrupt_detach()
9130 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); in bxe_interrupt_detach()
9133 for (i = 0; i < sc->num_queues; i++) { in bxe_interrupt_detach()
9134 fp = &sc->fp[i]; in bxe_interrupt_detach()
9135 if (fp->tq != NULL) { in bxe_interrupt_detach()
9136 taskqueue_free(fp->tq); in bxe_interrupt_detach()
9137 fp->tq = NULL; in bxe_interrupt_detach()
9142 if (sc->sp_tq) { in bxe_interrupt_detach()
9143 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); in bxe_interrupt_detach()
9144 taskqueue_free(sc->sp_tq); in bxe_interrupt_detach()
9145 sc->sp_tq = NULL; in bxe_interrupt_detach()
9152 * When using multiple MSI/MSI-X vectors the first vector
9155 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9165 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), in bxe_interrupt_attach()
9166 "bxe%d_sp_tq", sc->unit); in bxe_interrupt_attach()
9167 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); in bxe_interrupt_attach()
9168 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, in bxe_interrupt_attach()
9170 &sc->sp_tq); in bxe_interrupt_attach()
9171 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ in bxe_interrupt_attach()
9172 "%s", sc->sp_tq_name); in bxe_interrupt_attach()
9175 for (i = 0; i < sc->num_queues; i++) { in bxe_interrupt_attach()
9176 fp = &sc->fp[i]; in bxe_interrupt_attach()
9177 snprintf(fp->tq_name, sizeof(fp->tq_name), in bxe_interrupt_attach()
9178 "bxe%d_fp%d_tq", sc->unit, i); in bxe_interrupt_attach()
9179 NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); in bxe_interrupt_attach()
9180 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); in bxe_interrupt_attach()
9181 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, in bxe_interrupt_attach()
9183 &fp->tq); in bxe_interrupt_attach()
9184 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, in bxe_interrupt_attach()
9186 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ in bxe_interrupt_attach()
9187 "%s", fp->tq_name); in bxe_interrupt_attach()
9191 if (sc->interrupt_mode == INTR_MODE_MSIX) { in bxe_interrupt_attach()
9192 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); in bxe_interrupt_attach()
9198 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, in bxe_interrupt_attach()
9201 &sc->intr[0].tag)) != 0) { in bxe_interrupt_attach()
9202 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); in bxe_interrupt_attach()
9206 bus_describe_intr(sc->dev, sc->intr[0].resource, in bxe_interrupt_attach()
9207 sc->intr[0].tag, "sp"); in bxe_interrupt_attach()
9209 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ in bxe_interrupt_attach()
9212 for (i = 0; i < sc->num_queues; i++) { in bxe_interrupt_attach()
9213 fp = &sc->fp[i]; in bxe_interrupt_attach()
9214 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); in bxe_interrupt_attach()
9221 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, in bxe_interrupt_attach()
9224 &sc->intr[i + 1].tag)) != 0) { in bxe_interrupt_attach()
9225 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", in bxe_interrupt_attach()
9230 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, in bxe_interrupt_attach()
9231 sc->intr[i + 1].tag, "fp%02d", i); in bxe_interrupt_attach()
9234 if (sc->num_queues > 1) { in bxe_interrupt_attach()
9235 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); in bxe_interrupt_attach()
9238 fp->state = BXE_FP_STATE_IRQ; in bxe_interrupt_attach()
9240 } else if (sc->interrupt_mode == INTR_MODE_MSI) { in bxe_interrupt_attach()
9248 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, in bxe_interrupt_attach()
9251 &sc->intr[0].tag)) != 0) { in bxe_interrupt_attach()
9256 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ in bxe_interrupt_attach()
9264 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, in bxe_interrupt_attach()
9267 &sc->intr[0].tag)) != 0) { in bxe_interrupt_attach()
9311 sc->dmae_ready = 0; in bxe_init_func_obj()
9314 &sc->func_obj, in bxe_init_func_obj()
9332 func_params.f_obj = &sc->func_obj; in bxe_init_hw()
9365 /* writes FP SP data to FW - data_size in dwords */
9460 hc_sm->igu_sb_id = igu_sb_id; in bxe_setup_ndsb_state_machine()
9461 hc_sm->igu_seg_id = igu_seg_id; in bxe_setup_ndsb_state_machine()
9462 hc_sm->timer_value = 0xFF; in bxe_setup_ndsb_state_machine()
9463 hc_sm->time_to_expire = 0xFFFFFFFF; in bxe_setup_ndsb_state_machine()
9557 /* write indices to HW - PCI guarantees endianity of regpairs */ in bxe_init_sb()
9564 if (CHIP_IS_E1x(fp->sc)) { in bxe_fp_qzone_id()
9565 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); in bxe_fp_qzone_id()
9567 return (fp->cl_id); in bxe_fp_qzone_id()
9578 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); in bxe_rx_ustorm_prods_offset()
9580 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); in bxe_rx_ustorm_prods_offset()
9590 struct bxe_fastpath *fp = &sc->fp[idx]; in bxe_init_eth_fp()
9595 fp->sc = sc; in bxe_init_eth_fp()
9596 fp->index = idx; in bxe_init_eth_fp()
9598 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); in bxe_init_eth_fp()
9599 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); in bxe_init_eth_fp()
9601 fp->cl_id = (CHIP_IS_E1x(sc)) ? in bxe_init_eth_fp()
9603 /* want client ID same as IGU SB ID for non-E1 */ in bxe_init_eth_fp()
9604 fp->igu_sb_id; in bxe_init_eth_fp()
9605 fp->cl_qzone_id = bxe_fp_qzone_id(fp); in bxe_init_eth_fp()
9609 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; in bxe_init_eth_fp()
9610 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; in bxe_init_eth_fp()
9612 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; in bxe_init_eth_fp()
9613 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; in bxe_init_eth_fp()
9617 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); in bxe_init_eth_fp()
9619 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; in bxe_init_eth_fp()
9625 for (cos = 0; cos < sc->max_cos; cos++) { in bxe_init_eth_fp()
9628 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; in bxe_init_eth_fp()
9635 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, in bxe_init_eth_fp()
9636 fp->fw_sb_id, fp->igu_sb_id); in bxe_init_eth_fp()
9645 &sc->sp_objs[idx].q_obj, in bxe_init_eth_fp()
9646 fp->cl_id, in bxe_init_eth_fp()
9648 sc->max_cos, in bxe_init_eth_fp()
9656 &sc->sp_objs[idx].mac_obj, in bxe_init_eth_fp()
9657 fp->cl_id, in bxe_init_eth_fp()
9663 &sc->sp_state, in bxe_init_eth_fp()
9665 &sc->macs_pool); in bxe_init_eth_fp()
9668 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); in bxe_init_eth_fp()
9690 * This is only applicable for weak-ordered memory model archs such in bxe_update_rx_prod()
9691 * as IA-64. The following barrier is also mandatory since FW will in bxe_update_rx_prod()
9698 (fp->ustorm_rx_prods_offset + (i * 4)), in bxe_update_rx_prod()
9706 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); in bxe_update_rx_prod()
9715 for (i = 0; i < sc->num_queues; i++) { in bxe_init_rx_rings()
9716 fp = &sc->fp[i]; in bxe_init_rx_rings()
9718 fp->rx_bd_cons = 0; in bxe_init_rx_rings()
9726 fp->rx_bd_prod, in bxe_init_rx_rings()
9727 fp->rx_cq_prod, in bxe_init_rx_rings()
9728 fp->rx_sge_prod); in bxe_init_rx_rings()
9738 U64_LO(fp->rcq_dma.paddr)); in bxe_init_rx_rings()
9742 U64_HI(fp->rcq_dma.paddr)); in bxe_init_rx_rings()
9750 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); in bxe_init_tx_ring_one()
9751 fp->tx_db.data.zero_fill1 = 0; in bxe_init_tx_ring_one()
9752 fp->tx_db.data.prod = 0; in bxe_init_tx_ring_one()
9754 fp->tx_pkt_prod = 0; in bxe_init_tx_ring_one()
9755 fp->tx_pkt_cons = 0; in bxe_init_tx_ring_one()
9756 fp->tx_bd_prod = 0; in bxe_init_tx_ring_one()
9757 fp->tx_bd_cons = 0; in bxe_init_tx_ring_one()
9758 fp->eth_q_stats.tx_pkts = 0; in bxe_init_tx_ring_one()
9766 for (i = 0; i < sc->num_queues; i++) { in bxe_init_tx_rings()
9767 bxe_init_tx_ring_one(&sc->fp[i]); in bxe_init_tx_rings()
9774 struct host_sp_status_block *def_sb = sc->def_sb; in bxe_init_def_sb()
9775 bus_addr_t mapping = sc->def_sb_dma.paddr; in bxe_init_def_sb()
9791 igu_sp_sb_index = sc->igu_dsb_id; in bxe_init_def_sb()
9798 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; in bxe_init_def_sb()
9799 sc->attn_state = 0; in bxe_init_def_sb()
9811 sc->attn_group[index].sig[sindex] = in bxe_init_def_sb()
9821 sc->attn_group[index].sig[4] = in bxe_init_def_sb()
9824 sc->attn_group[index].sig[4] = 0; in bxe_init_def_sb()
9828 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_init_def_sb()
9856 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); in bxe_init_def_sb()
9862 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); in bxe_init_sp_ring()
9863 sc->spq_prod_idx = 0; in bxe_init_sp_ring()
9864 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; in bxe_init_sp_ring()
9865 sc->spq_prod_bd = sc->spq; in bxe_init_sp_ring()
9866 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); in bxe_init_sp_ring()
9876 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; in bxe_init_eq_ring()
9878 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + in bxe_init_eq_ring()
9881 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + in bxe_init_eq_ring()
9886 sc->eq_cons = 0; in bxe_init_eq_ring()
9887 sc->eq_prod = NUM_EQ_DESC; in bxe_init_eq_ring()
9888 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; in bxe_init_eq_ring()
9890 atomic_store_rel_long(&sc->eq_spq_left, in bxe_init_eq_ring()
9891 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), in bxe_init_eq_ring()
9892 NUM_EQ_DESC) - 1)); in bxe_init_eq_ring()
9961 storm_memset_func_cfg(sc, &tcfg, p->func_id); in bxe_func_init()
9965 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); in bxe_func_init()
9966 storm_memset_func_en(sc, p->func_id, 1); in bxe_func_init()
9969 if (p->func_flgs & FUNC_FLG_SPQ) { in bxe_func_init()
9970 storm_memset_spq_addr(sc, p->spq_map, p->func_id); in bxe_func_init()
9972 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), in bxe_func_init()
9973 p->spq_prod); in bxe_func_init()
9983 * 0 - if all the min_rates are 0.
9997 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; in bxe_calc_vn_min()
10005 /* If min rate is zero - set it to 100 */ in bxe_calc_vn_min()
10011 input->vnic_min_rate[vn] = vn_min_rate; in bxe_calc_vn_min()
10014 /* if ETS or all min rates are zeros - disable fairness */ in bxe_calc_vn_min()
10016 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; in bxe_calc_vn_min()
10019 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; in bxe_calc_vn_min()
10023 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; in bxe_calc_vn_min()
10035 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); in bxe_extract_max_cfg()
10048 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; in bxe_calc_vn_max()
10058 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); in bxe_calc_vn_max()
10067 input->vnic_max_rate[vn] = vn_max_rate; in bxe_calc_vn_max()
10080 input.port_rate = sc->link_vars.line_speed; in bxe_cmng_fns_init()
10092 if (sc->port.pmf) { in bxe_cmng_fns_init()
10101 ecore_init_cmng(&input, &sc->cmng); in bxe_cmng_fns_init()
10136 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); in storm_memset_cmng()
10145 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); in storm_memset_cmng()
10151 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); in storm_memset_cmng()
10186 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; in bxe_pf_init()
10191 func_init.spq_map = sc->spq_dma.paddr; in bxe_pf_init()
10192 func_init.spq_prod = sc->spq_prod_idx; in bxe_pf_init()
10196 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); in bxe_pf_init()
10202 * re-calculated according to the actual link rate. in bxe_pf_init()
10204 sc->link_vars.line_speed = SPEED_10000; in bxe_pf_init()
10208 if (sc->port.pmf) { in bxe_pf_init()
10209 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); in bxe_pf_init()
10212 /* init Event Queue - PCI bus guarantees correct endainity */ in bxe_pf_init()
10213 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); in bxe_pf_init()
10214 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); in bxe_pf_init()
10215 eq_data.producer = sc->eq_prod; in bxe_pf_init()
10227 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; in bxe_hc_int_enable()
10228 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && in bxe_hc_int_enable()
10229 (sc->intr_count == 1)) ? TRUE : FALSE; in bxe_hc_int_enable()
10230 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; in bxe_hc_int_enable()
10266 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); in bxe_hc_int_enable()
10277 if (sc->port.pmf) { in bxe_hc_int_enable()
10297 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; in bxe_igu_int_enable()
10298 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && in bxe_igu_int_enable()
10299 (sc->intr_count == 1)) ? TRUE : FALSE; in bxe_igu_int_enable()
10300 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; in bxe_igu_int_enable()
10324 /* clean previous status - need to configure igu prior to ack*/ in bxe_igu_int_enable()
10333 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); in bxe_igu_int_enable()
10342 if (sc->port.pmf) { in bxe_igu_int_enable()
10360 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_int_enable()
10430 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_int_disable()
10443 for (i = 0; i < sc->num_queues; i++) { in bxe_nic_init()
10457 elink_init_mod_abs_int(sc, &sc->link_vars, in bxe_nic_init()
10458 sc->devinfo.chip_id, in bxe_nic_init()
10459 sc->devinfo.shmem_base, in bxe_nic_init()
10460 sc->devinfo.shmem2_base, in bxe_nic_init()
10489 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : in bxe_init_objs()
10493 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); in bxe_init_objs()
10497 &sc->mcast_obj, in bxe_init_objs()
10498 sc->fp[0].cl_id, in bxe_init_objs()
10499 sc->fp[0].index, in bxe_init_objs()
10505 &sc->sp_state, in bxe_init_objs()
10510 &sc->macs_pool, in bxe_init_objs()
10516 &sc->vlans_pool, in bxe_init_objs()
10523 &sc->rss_conf_obj, in bxe_init_objs()
10524 sc->fp[0].cl_id, in bxe_init_objs()
10525 sc->fp[0].index, in bxe_init_objs()
10531 &sc->sp_state, ECORE_OBJ_TYPE_RX); in bxe_init_objs()
10547 func_params.f_obj = &sc->func_obj; in bxe_func_start()
10551 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; in bxe_func_start()
10552 start_params->sd_vlan_tag = OVLAN(sc); in bxe_func_start()
10555 start_params->network_cos_mode = STATIC_COS; in bxe_func_start()
10557 start_params->network_cos_mode = FW_WRR; in bxe_func_start()
10560 //start_params->gre_tunnel_mode = 0; in bxe_func_start()
10561 //start_params->gre_tunnel_rss = 0; in bxe_func_start()
10573 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { in bxe_set_power_state()
10578 pmcsr = pci_read_config(sc->dev, in bxe_set_power_state()
10579 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), in bxe_set_power_state()
10584 pci_write_config(sc->dev, in bxe_set_power_state()
10585 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), in bxe_set_power_state()
10606 if (sc->wol) { in bxe_set_power_state()
10610 pci_write_config(sc->dev, in bxe_set_power_state()
10611 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), in bxe_set_power_state()
10623 return (-1); in bxe_set_power_state()
10653 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bxe_trylock_hw_lock()
10755 } while (--cnt > 0); in bxe_er_poll_igu_vq()
10759 return (-1); in bxe_er_poll_igu_vq()
10834 sc->devinfo.shmem_base = in bxe_init_shmem()
10835 sc->link_params.shmem_base = in bxe_init_shmem()
10838 if (sc->devinfo.shmem_base) { in bxe_init_shmem()
10850 return (-1); in bxe_init_shmem()
10879 * - PCIE core
10880 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10881 * - IGU
10882 * - MISC (including AEU)
10883 * - GRC
10884 * - RBCN, RBCP
10932 * - all xxMACs are handled by the elink code. in bxe_process_kill_chip_reset()
11014 } while (cnt-- > 0); in bxe_process_kill()
11023 return (-1); in bxe_process_kill()
11033 return (-1); in bxe_process_kill()
11046 * Wait for 1ms to empty GLUE and PCI-E core queues, in bxe_process_kill()
11072 return (-1); in bxe_process_kill()
11082 * re-enable attentions in bxe_process_kill()
11104 rc = -1; in bxe_leader_reset()
11111 rc = -1; in bxe_leader_reset()
11118 rc = -1; in bxe_leader_reset()
11126 rc = -1; in bxe_leader_reset()
11149 sc->is_leader = 0; in bxe_leader_reset()
11158 * - HC configuration
11159 * - Queue's CDU context
11169 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); in bxe_pf_q_prep_init()
11170 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); in bxe_pf_q_prep_init()
11172 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); in bxe_pf_q_prep_init()
11173 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); in bxe_pf_q_prep_init()
11176 init_params->rx.hc_rate = in bxe_pf_q_prep_init()
11177 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; in bxe_pf_q_prep_init()
11178 init_params->tx.hc_rate = in bxe_pf_q_prep_init()
11179 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; in bxe_pf_q_prep_init()
11182 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; in bxe_pf_q_prep_init()
11185 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bxe_pf_q_prep_init()
11186 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; in bxe_pf_q_prep_init()
11189 init_params->max_cos = sc->max_cos; in bxe_pf_q_prep_init()
11192 fp->index, init_params->max_cos); in bxe_pf_q_prep_init()
11195 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { in bxe_pf_q_prep_init()
11197 /* fp->txdata[cos]->cid */ in bxe_pf_q_prep_init()
11198 cxt_index = fp->index / ILT_PAGE_CIDS; in bxe_pf_q_prep_init()
11199 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); in bxe_pf_q_prep_init()
11200 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; in bxe_pf_q_prep_init()
11204 /* set flags that are common for the Tx-only and not normal connections */
11227 * tx only connections can support tx-switching, though their in bxe_get_common_flags()
11228 * CoS-ness doesn't survive the loopback in bxe_get_common_flags()
11230 if (sc->flags & BXE_TX_SWITCHING) { in bxe_get_common_flags()
11250 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { in bxe_get_q_flags()
11272 gen_init->stat_id = bxe_stats_id(fp); in bxe_pf_q_prep_general()
11273 gen_init->spcl_id = fp->cl_id; in bxe_pf_q_prep_general()
11274 gen_init->mtu = sc->mtu; in bxe_pf_q_prep_general()
11275 gen_init->cos = cos; in bxe_pf_q_prep_general()
11288 pause->sge_th_lo = SGE_TH_LO(sc); in bxe_pf_rx_q_prep()
11289 pause->sge_th_hi = SGE_TH_HI(sc); in bxe_pf_rx_q_prep()
11292 if (sc->dropless_fc && in bxe_pf_rx_q_prep()
11293 (pause->sge_th_hi + FW_PREFETCH_CNT) > in bxe_pf_rx_q_prep()
11299 tpa_agg_size = (2 * sc->mtu); in bxe_pf_rx_q_prep()
11300 if (tpa_agg_size < sc->max_aggregation_size) { in bxe_pf_rx_q_prep()
11301 tpa_agg_size = sc->max_aggregation_size; in bxe_pf_rx_q_prep()
11304 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; in bxe_pf_rx_q_prep()
11305 max_sge = ((max_sge + PAGES_PER_SGE - 1) & in bxe_pf_rx_q_prep()
11306 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; in bxe_pf_rx_q_prep()
11309 /* pause - not for e1 */ in bxe_pf_rx_q_prep()
11311 pause->bd_th_lo = BD_TH_LO(sc); in bxe_pf_rx_q_prep()
11312 pause->bd_th_hi = BD_TH_HI(sc); in bxe_pf_rx_q_prep()
11314 pause->rcq_th_lo = RCQ_TH_LO(sc); in bxe_pf_rx_q_prep()
11315 pause->rcq_th_hi = RCQ_TH_HI(sc); in bxe_pf_rx_q_prep()
11318 if (sc->dropless_fc && in bxe_pf_rx_q_prep()
11319 pause->bd_th_hi + FW_PREFETCH_CNT > in bxe_pf_rx_q_prep()
11320 sc->rx_ring_size) { in bxe_pf_rx_q_prep()
11324 if (sc->dropless_fc && in bxe_pf_rx_q_prep()
11325 pause->rcq_th_hi + FW_PREFETCH_CNT > in bxe_pf_rx_q_prep()
11330 pause->pri_map = 1; in bxe_pf_rx_q_prep()
11334 rxq_init->dscr_map = fp->rx_dma.paddr; in bxe_pf_rx_q_prep()
11335 rxq_init->sge_map = fp->rx_sge_dma.paddr; in bxe_pf_rx_q_prep()
11336 rxq_init->rcq_map = fp->rcq_dma.paddr; in bxe_pf_rx_q_prep()
11337 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); in bxe_pf_rx_q_prep()
11343 rxq_init->buf_sz = (fp->rx_buf_size - in bxe_pf_rx_q_prep()
11346 rxq_init->cl_qzone_id = fp->cl_qzone_id; in bxe_pf_rx_q_prep()
11347 rxq_init->tpa_agg_sz = tpa_agg_size; in bxe_pf_rx_q_prep()
11348 rxq_init->sge_buf_sz = sge_sz; in bxe_pf_rx_q_prep()
11349 rxq_init->max_sges_pkt = max_sge; in bxe_pf_rx_q_prep()
11350 rxq_init->rss_engine_id = SC_FUNC(sc); in bxe_pf_rx_q_prep()
11351 rxq_init->mcast_engine_id = SC_FUNC(sc); in bxe_pf_rx_q_prep()
11358 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); in bxe_pf_rx_q_prep()
11360 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; in bxe_pf_rx_q_prep()
11361 rxq_init->fw_sb_id = fp->fw_sb_id; in bxe_pf_rx_q_prep()
11363 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bxe_pf_rx_q_prep()
11370 rxq_init->silent_removal_value = in bxe_pf_rx_q_prep()
11371 sc->devinfo.mf_info.afex_def_vlan_tag; in bxe_pf_rx_q_prep()
11372 rxq_init->silent_removal_mask = EVL_VLID_MASK; in bxe_pf_rx_q_prep()
11385 * fp->txdata[cos]->tx_dma.paddr; in bxe_pf_tx_q_prep()
11387 txq_init->dscr_map = fp->tx_dma.paddr; in bxe_pf_tx_q_prep()
11388 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; in bxe_pf_tx_q_prep()
11389 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; in bxe_pf_tx_q_prep()
11390 txq_init->fw_sb_id = fp->fw_sb_id; in bxe_pf_tx_q_prep()
11396 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); in bxe_pf_tx_q_prep()
11401 * 1) RESET->INIT
11402 * 2) INIT->SETUP
11414 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); in bxe_setup_queue()
11416 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); in bxe_setup_queue()
11432 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); in bxe_setup_queue()
11442 setup_params->flags = bxe_get_q_flags(sc, fp, leading); in bxe_setup_queue()
11445 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, in bxe_setup_queue()
11449 &setup_params->pause_params, in bxe_setup_queue()
11450 &setup_params->rxq_params); in bxe_setup_queue()
11453 &setup_params->txq_params, in bxe_setup_queue()
11462 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); in bxe_setup_queue()
11472 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); in bxe_setup_leading()
11499 if (rss_obj->udp_rss_v4) { in bxe_config_rss_pf()
11502 if (rss_obj->udp_rss_v6) { in bxe_config_rss_pf()
11509 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); in bxe_config_rss_pf()
11527 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); in bxe_config_rss_eth()
11540 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { in bxe_init_rss_pf()
11541 sc->rss_conf_obj.ind_table[i] = in bxe_init_rss_pf()
11542 (sc->fp->cl_id + (i % num_eth_queues)); in bxe_init_rss_pf()
11545 if (sc->udp_rss) { in bxe_init_rss_pf()
11546 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; in bxe_init_rss_pf()
11551 * per-port, so if explicit configuration is needed, do it only in bxe_init_rss_pf()
11554 * For 57712 and newer it's a per-function configuration. in bxe_init_rss_pf()
11556 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); in bxe_init_rss_pf()
11611 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, in bxe_set_eth_mac()
11612 &sc->sp_objs->mac_obj, in bxe_set_eth_mac()
11621 if (sc->link_params.num_phys <= 1) { in bxe_get_cur_phy_idx()
11625 if (sc->link_vars.link_up) { in bxe_get_cur_phy_idx()
11628 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bxe_get_cur_phy_idx()
11629 (sc->link_params.phy[ELINK_EXT_PHY2].supported & in bxe_get_cur_phy_idx()
11633 switch (elink_phy_selection(&sc->link_params)) { in bxe_get_cur_phy_idx()
11660 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { in bxe_get_link_cfg_idx()
11678 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { in bxe_set_requested_fc()
11679 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; in bxe_set_requested_fc()
11681 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; in bxe_set_requested_fc()
11691 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | in bxe_calc_fc_adv()
11694 switch (sc->link_vars.ieee_fc & in bxe_calc_fc_adv()
11698 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | in bxe_calc_fc_adv()
11703 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; in bxe_calc_fc_adv()
11715 uint16_t line_speed = sc->link_vars.line_speed; in bxe_get_mf_speed()
11718 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); in bxe_get_mf_speed()
11744 data->line_speed = line_speed; in bxe_fill_report_data()
11747 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { in bxe_fill_report_data()
11748 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); in bxe_fill_report_data()
11752 if (sc->link_vars.duplex == DUPLEX_FULL) { in bxe_fill_report_data()
11753 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); in bxe_fill_report_data()
11757 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { in bxe_fill_report_data()
11758 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); in bxe_fill_report_data()
11762 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { in bxe_fill_report_data()
11763 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); in bxe_fill_report_data()
11782 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || in bxe_link_report_locked()
11784 &sc->last_reported_link.link_report_flags) && in bxe_link_report_locked()
11791 cur_data.link_report_flags, sc->last_reported_link.link_report_flags); in bxe_link_report_locked()
11792 sc->link_cnt++; in bxe_link_report_locked()
11794 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); in bxe_link_report_locked()
11796 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); in bxe_link_report_locked()
11800 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in bxe_link_report_locked()
11824 flow = "ON - receive & transmit"; in bxe_link_report_locked()
11829 flow = "ON - receive"; in bxe_link_report_locked()
11834 flow = "ON - transmit"; in bxe_link_report_locked()
11842 if_link_state_change(sc->ifp, LINK_STATE_UP); in bxe_link_report_locked()
11859 if (sc->state != BXE_STATE_OPEN) { in bxe_link_status_update()
11864 elink_link_status_update(&sc->link_params, &sc->link_vars); in bxe_link_status_update()
11866 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | in bxe_link_status_update()
11878 sc->port.advertising[0] = sc->port.supported[0]; in bxe_link_status_update()
11880 sc->link_params.sc = sc; in bxe_link_status_update()
11881 sc->link_params.port = SC_PORT(sc); in bxe_link_status_update()
11882 sc->link_params.req_duplex[0] = DUPLEX_FULL; in bxe_link_status_update()
11883 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; in bxe_link_status_update()
11884 sc->link_params.req_line_speed[0] = SPEED_10000; in bxe_link_status_update()
11885 sc->link_params.speed_cap_mask[0] = 0x7f0000; in bxe_link_status_update()
11886 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; in bxe_link_status_update()
11889 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; in bxe_link_status_update()
11890 sc->link_vars.line_speed = ELINK_SPEED_1000; in bxe_link_status_update()
11891 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | in bxe_link_status_update()
11894 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; in bxe_link_status_update()
11895 sc->link_vars.line_speed = ELINK_SPEED_10000; in bxe_link_status_update()
11896 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | in bxe_link_status_update()
11900 sc->link_vars.link_up = 1; in bxe_link_status_update()
11902 sc->link_vars.duplex = DUPLEX_FULL; in bxe_link_status_update()
11903 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; in bxe_link_status_update()
11906 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); in bxe_link_status_update()
11913 if (sc->link_vars.link_up) { in bxe_link_status_update()
11930 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; in bxe_initial_phy_init()
11931 struct elink_params *lp = &sc->link_params; in bxe_initial_phy_init()
11960 sc->link_params.feature_config_flags |= feat; in bxe_initial_phy_init()
11966 lp->loopback_mode = ELINK_LOOPBACK_XGXS; in bxe_initial_phy_init()
11968 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { in bxe_initial_phy_init()
11969 if (lp->speed_cap_mask[cfg_idx] & in bxe_initial_phy_init()
11971 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; in bxe_initial_phy_init()
11973 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; in bxe_initial_phy_init()
11979 lp->loopback_mode = ELINK_LOOPBACK_EXT; in bxe_initial_phy_init()
11982 rc = elink_phy_init(&sc->link_params, &sc->link_vars); in bxe_initial_phy_init()
11988 if (sc->link_vars.link_up) { in bxe_initial_phy_init()
11997 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; in bxe_initial_phy_init()
12007 mc_mac->mac = (uint8_t *)LLADDR(sdl); in bxe_push_maddr()
12016 if_t ifp = sc->ifp; in bxe_init_mcast_macs_list()
12020 ECORE_LIST_INIT(&p->mcast_list); in bxe_init_mcast_macs_list()
12021 p->mcast_list_len = 0; in bxe_init_mcast_macs_list()
12034 return (-1); in bxe_init_mcast_macs_list()
12040 ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list); in bxe_init_mcast_macs_list()
12048 p->mcast_list_len = mc_count; in bxe_init_mcast_macs_list()
12057 ECORE_LIST_FIRST_ENTRY(&p->mcast_list, in bxe_free_mcast_macs_list()
12072 rparam.mcast_obj = &sc->mcast_obj; in bxe_set_mc_list()
12116 struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj; in bxe_set_addr()
12119 if (ctx->rc < 0) in bxe_set_addr()
12122 rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE, in bxe_set_addr()
12123 ECORE_UC_LIST_MAC, &ctx->ramrod_flags); in bxe_set_addr()
12126 if (rc == -EEXIST) in bxe_set_addr()
12127 BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); in bxe_set_addr()
12129 BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc); in bxe_set_addr()
12130 ctx->rc = rc; in bxe_set_addr()
12139 if_t ifp = sc->ifp; in bxe_set_uc_list()
12140 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; in bxe_set_uc_list()
12164 if_t ifp = sc->ifp; in bxe_set_rx_mode()
12167 if (sc->state != BXE_STATE_OPEN) { in bxe_set_rx_mode()
12168 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); in bxe_set_rx_mode()
12172 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); in bxe_set_rx_mode()
12192 sc->rx_mode = rx_mode; in bxe_set_rx_mode()
12195 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { in bxe_set_rx_mode()
12197 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); in bxe_set_rx_mode()
12243 if ((sc->state == BXE_STATE_OPEN) && in bxe_periodic_callout_func()
12244 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { in bxe_periodic_callout_func()
12246 callout_reset(&sc->periodic_callout, hz, in bxe_periodic_callout_func()
12253 if ((sc->state != BXE_STATE_OPEN) || in bxe_periodic_callout_func()
12254 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { in bxe_periodic_callout_func()
12255 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); in bxe_periodic_callout_func()
12263 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { in bxe_periodic_callout_func()
12264 /* Ruh-Roh, chip was reset! */ in bxe_periodic_callout_func()
12272 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and in bxe_periodic_callout_func()
12276 if (sc->port.pmf) { in bxe_periodic_callout_func()
12278 elink_period_func(&sc->link_params, &sc->link_vars); in bxe_periodic_callout_func()
12283 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { in bxe_periodic_callout_func()
12288 ++sc->fw_drv_pulse_wr_seq; in bxe_periodic_callout_func()
12289 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; in bxe_periodic_callout_func()
12291 drv_pulse = sc->fw_drv_pulse_wr_seq; in bxe_periodic_callout_func()
12314 if ((sc->state == BXE_STATE_OPEN) && in bxe_periodic_callout_func()
12315 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { in bxe_periodic_callout_func()
12317 callout_reset(&sc->periodic_callout, hz, in bxe_periodic_callout_func()
12325 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); in bxe_periodic_start()
12326 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); in bxe_periodic_start()
12332 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); in bxe_periodic_stop()
12333 callout_drain(&sc->periodic_callout); in bxe_periodic_stop()
12343 if ((sc->recovery_state == BXE_RECOVERY_FAILED) && in bxe_parity_recover()
12344 (sc->state == BXE_STATE_ERROR)) { in bxe_parity_recover()
12354 __func__, sc, sc->state, sc->recovery_state, sc->error_status); in bxe_parity_recover()
12356 switch(sc->recovery_state) { in bxe_parity_recover()
12362 (sc->error_status & BXE_ERR_MCP_ASSERT) || in bxe_parity_recover()
12363 (sc->error_status & BXE_ERR_GLOBAL)) { in bxe_parity_recover()
12366 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { in bxe_parity_recover()
12370 sc->state = BXE_STATE_ERROR; in bxe_parity_recover()
12371 sc->recovery_state = BXE_RECOVERY_FAILED; in bxe_parity_recover()
12375 sc->error_status); in bxe_parity_recover()
12393 sc->is_leader = 1; in bxe_parity_recover()
12396 /* If interface has been removed - break */ in bxe_parity_recover()
12398 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { in bxe_parity_recover()
12404 sc->recovery_state = BXE_RECOVERY_WAIT; in bxe_parity_recover()
12416 if (sc->is_leader) { in bxe_parity_recover()
12439 &sc->sp_err_timeout_task, hz/10); in bxe_parity_recover()
12451 sc->recovery_state = BXE_RECOVERY_FAILED; in bxe_parity_recover()
12452 sc->state = BXE_STATE_ERROR; in bxe_parity_recover()
12461 * to continue as a none-leader. in bxe_parity_recover()
12466 } else { /* non-leader */ in bxe_parity_recover()
12480 sc->is_leader = 1; in bxe_parity_recover()
12485 &sc->sp_err_timeout_task, hz/10); in bxe_parity_recover()
12495 &sc->sp_err_timeout_task, hz/10); in bxe_parity_recover()
12500 sc->eth_stats.recoverable_error; in bxe_parity_recover()
12502 sc->eth_stats.unrecoverable_error; in bxe_parity_recover()
12504 sc->recovery_state = in bxe_parity_recover()
12508 sc->recovery_state = BXE_RECOVERY_FAILED; in bxe_parity_recover()
12509 sc->state = BXE_STATE_ERROR; in bxe_parity_recover()
12512 sc->state, sc->recovery_state, sc->error_status); in bxe_parity_recover()
12513 sc->error_status = 0; in bxe_parity_recover()
12515 sc->recovery_state = in bxe_parity_recover()
12520 " recovery_state=0x%x \n", sc->error_status, in bxe_parity_recover()
12521 sc->state, sc->recovery_state); in bxe_parity_recover()
12524 sc->error_status = 0; in bxe_parity_recover()
12526 sc->eth_stats.recoverable_error = in bxe_parity_recover()
12528 sc->eth_stats.unrecoverable_error = in bxe_parity_recover()
12543 if(sc->recovery_state == BXE_RECOVERY_WAIT) { in bxe_handle_error()
12546 if(sc->error_status) { in bxe_handle_error()
12547 if (sc->state == BXE_STATE_OPEN) { in bxe_handle_error()
12550 if (sc->link_vars.link_up) { in bxe_handle_error()
12551 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in bxe_handle_error()
12553 sc->recovery_state = BXE_RECOVERY_INIT; in bxe_handle_error()
12555 sc->unit, sc->error_status, sc->recovery_state); in bxe_handle_error()
12568 __func__, sc->state, sc->recovery_state, sc->error_status); in bxe_sp_err_timeout_task()
12570 if((sc->recovery_state == BXE_RECOVERY_FAILED) && in bxe_sp_err_timeout_task()
12571 (sc->state == BXE_STATE_ERROR)) { in bxe_sp_err_timeout_task()
12575 if ((sc->error_status) && (sc->trigger_grcdump)) { in bxe_sp_err_timeout_task()
12578 if (sc->recovery_state != BXE_RECOVERY_DONE) { in bxe_sp_err_timeout_task()
12581 } else if (sc->error_status) { in bxe_sp_err_timeout_task()
12601 sc->state = BXE_STATE_OPENING_WAITING_LOAD; in bxe_nic_load()
12608 sc->last_reported_link_state = LINK_STATE_UNKNOWN; in bxe_nic_load()
12614 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12620 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12626 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12639 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12647 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12666 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12673 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bxe_nic_load()
12675 sc->flags |= BXE_NO_PULSE; in bxe_nic_load()
12679 sc->state = BXE_STATE_CLOSED; in bxe_nic_load()
12686 /* Init per-function objects */ in bxe_nic_load()
12692 sc->devinfo.mf_info.afex_def_vlan_tag = -1; in bxe_nic_load()
12695 sc->state = BXE_STATE_OPENING_WAITING_PORT; in bxe_nic_load()
12700 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12709 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12718 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12723 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); in bxe_nic_load()
12726 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12734 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12741 sc->state = BXE_STATE_OPEN; in bxe_nic_load()
12749 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12753 if (sc->port.pmf) { in bxe_nic_load()
12756 sc->state = BXE_STATE_ERROR; in bxe_nic_load()
12761 sc->link_params.feature_config_flags &= in bxe_nic_load()
12777 sc->state = BXE_STATE_DIAG; in bxe_nic_load()
12784 if (sc->port.pmf) { in bxe_nic_load()
12811 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); in bxe_nic_load()
12835 sc->port.pmf = 0; in bxe_nic_load()
12864 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { in bxe_init_locked()
12869 if((sc->state == BXE_STATE_ERROR) && in bxe_init_locked()
12870 (sc->recovery_state == BXE_RECOVERY_FAILED)) { in bxe_init_locked()
12873 "Reboot/Power-cycle the system\n" ); in bxe_init_locked()
12915 sc->recovery_state = BXE_RECOVERY_FAILED; in bxe_init_locked()
12928 sc->recovery_state = BXE_RECOVERY_DONE; in bxe_init_locked()
12938 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); in bxe_init_locked()
12975 ifmedia_init(&sc->ifmedia, IFM_IMASK, in bxe_init_ifnet()
12980 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); in bxe_init_ifnet()
12981 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); in bxe_init_ifnet()
12982 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); in bxe_init_ifnet()
12984 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ in bxe_init_ifnet()
12985 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); in bxe_init_ifnet()
12991 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in bxe_init_ifnet()
12999 if_setmtu(ifp, sc->mtu); in bxe_init_ifnet()
13023 if_setsendqlen(ifp, sc->tx_ring_size); in bxe_init_ifnet()
13027 sc->ifp = ifp; in bxe_init_ifnet()
13030 ether_ifattach(ifp, sc->link_params.mac_addr); in bxe_init_ifnet()
13042 if (sc->bar[i].resource != NULL) { in bxe_deallocate_bars()
13043 bus_release_resource(sc->dev, in bxe_deallocate_bars()
13045 sc->bar[i].rid, in bxe_deallocate_bars()
13046 sc->bar[i].resource); in bxe_deallocate_bars()
13059 memset(sc->bar, 0, sizeof(sc->bar)); in bxe_allocate_bars()
13064 /* Run `pciconf -lb` to see mappings */ in bxe_allocate_bars()
13069 sc->bar[i].rid = PCIR_BAR(i); in bxe_allocate_bars()
13076 if ((sc->bar[i].resource = in bxe_allocate_bars()
13077 bus_alloc_resource_any(sc->dev, in bxe_allocate_bars()
13079 &sc->bar[i].rid, in bxe_allocate_bars()
13084 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); in bxe_allocate_bars()
13085 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); in bxe_allocate_bars()
13086 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); in bxe_allocate_bars()
13088 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n", in bxe_allocate_bars()
13090 rman_get_start(sc->bar[i].resource), in bxe_allocate_bars()
13091 rman_get_end(sc->bar[i].resource), in bxe_allocate_bars()
13092 rman_get_size(sc->bar[i].resource), in bxe_allocate_bars()
13093 (uintmax_t)sc->bar[i].kva); in bxe_allocate_bars()
13106 * holds the relative-function number and absolute-function number. The in bxe_get_function_num()
13107 * absolute-function number appears only in E2 and above. Before that in bxe_get_function_num()
13113 sc->pfunc_rel = in bxe_get_function_num()
13115 sc->path_id = in bxe_get_function_num()
13119 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); in bxe_get_function_num()
13121 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); in bxe_get_function_num()
13126 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); in bxe_get_function_num()
13141 if (sc->devinfo.shmem2_base != 0) { in bxe_get_shmem_mf_cfg_base()
13161 /* ensure PCIe capability is enabled */ in bxe_pcie_capability_read()
13162 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { in bxe_pcie_capability_read()
13164 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); in bxe_pcie_capability_read()
13165 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); in bxe_pcie_capability_read()
13169 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); in bxe_pcie_capability_read()
13193 if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) { in bxe_probe_pci_caps()
13197 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; in bxe_probe_pci_caps()
13198 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; in bxe_probe_pci_caps()
13204 /* handle PCIe 2.0 workarounds for 57710 */ in bxe_probe_pci_caps()
13207 sc->devinfo.pcie_link_speed = in bxe_probe_pci_caps()
13211 sc->devinfo.pcie_link_width = in bxe_probe_pci_caps()
13213 if (sc->devinfo.pcie_link_speed > 1) { in bxe_probe_pci_caps()
13214 sc->devinfo.pcie_link_width = in bxe_probe_pci_caps()
13218 sc->devinfo.pcie_link_speed = in bxe_probe_pci_caps()
13220 sc->devinfo.pcie_link_width = in bxe_probe_pci_caps()
13224 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", in bxe_probe_pci_caps()
13225 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); in bxe_probe_pci_caps()
13227 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; in bxe_probe_pci_caps()
13228 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; in bxe_probe_pci_caps()
13231 if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) { in bxe_probe_pci_caps()
13235 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; in bxe_probe_pci_caps()
13236 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; in bxe_probe_pci_caps()
13240 /* check if MSI-X capability is enabled */ in bxe_probe_pci_caps()
13241 if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) { in bxe_probe_pci_caps()
13243 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); in bxe_probe_pci_caps()
13245 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; in bxe_probe_pci_caps()
13246 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; in bxe_probe_pci_caps()
13254 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; in bxe_get_shmem_mf_cfg_info_sd()
13257 /* get the outer vlan if we're in switch-dependent mode */ in bxe_get_shmem_mf_cfg_info_sd()
13260 mf_info->ext_id = (uint16_t)val; in bxe_get_shmem_mf_cfg_info_sd()
13262 mf_info->multi_vnics_mode = 1; in bxe_get_shmem_mf_cfg_info_sd()
13264 if (!VALID_OVLAN(mf_info->ext_id)) { in bxe_get_shmem_mf_cfg_info_sd()
13265 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); in bxe_get_shmem_mf_cfg_info_sd()
13270 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == in bxe_get_shmem_mf_cfg_info_sd()
13272 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; in bxe_get_shmem_mf_cfg_info_sd()
13273 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == in bxe_get_shmem_mf_cfg_info_sd()
13275 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; in bxe_get_shmem_mf_cfg_info_sd()
13277 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; in bxe_get_shmem_mf_cfg_info_sd()
13280 mf_info->vnics_per_port = in bxe_get_shmem_mf_cfg_info_sd()
13312 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; in bxe_get_shmem_mf_cfg_info_si()
13316 * There is no outer vlan if we're in switch-independent mode. in bxe_get_shmem_mf_cfg_info_si()
13317 * If the mac is valid then assume multi-function. in bxe_get_shmem_mf_cfg_info_si()
13322 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); in bxe_get_shmem_mf_cfg_info_si()
13324 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); in bxe_get_shmem_mf_cfg_info_si()
13326 mf_info->vnics_per_port = in bxe_get_shmem_mf_cfg_info_si()
13335 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; in bxe_get_shmem_mf_cfg_info_niv()
13340 mf_info->multi_vnics_mode = 1; in bxe_get_shmem_mf_cfg_info_niv()
13346 mf_info->ext_id = in bxe_get_shmem_mf_cfg_info_niv()
13350 mf_info->default_vlan = in bxe_get_shmem_mf_cfg_info_niv()
13354 mf_info->niv_allowed_priorities = in bxe_get_shmem_mf_cfg_info_niv()
13358 mf_info->niv_default_cos = in bxe_get_shmem_mf_cfg_info_niv()
13362 mf_info->afex_vlan_mode = in bxe_get_shmem_mf_cfg_info_niv()
13366 mf_info->niv_mba_enabled = in bxe_get_shmem_mf_cfg_info_niv()
13370 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); in bxe_get_shmem_mf_cfg_info_niv()
13372 mf_info->vnics_per_port = in bxe_get_shmem_mf_cfg_info_niv()
13381 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; in bxe_check_valid_mf_cfg()
13391 mf_info->mf_config[SC_VN(sc)]); in bxe_check_valid_mf_cfg()
13393 mf_info->multi_vnics_mode); in bxe_check_valid_mf_cfg()
13395 mf_info->vnics_per_port); in bxe_check_valid_mf_cfg()
13397 mf_info->ext_id); in bxe_check_valid_mf_cfg()
13399 mf_info->min_bw[0], mf_info->min_bw[1], in bxe_check_valid_mf_cfg()
13400 mf_info->min_bw[2], mf_info->min_bw[3]); in bxe_check_valid_mf_cfg()
13402 mf_info->max_bw[0], mf_info->max_bw[1], in bxe_check_valid_mf_cfg()
13403 mf_info->max_bw[2], mf_info->max_bw[3]); in bxe_check_valid_mf_cfg()
13405 sc->mac_addr_str); in bxe_check_valid_mf_cfg()
13409 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { in bxe_check_valid_mf_cfg()
13415 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { in bxe_check_valid_mf_cfg()
13417 mf_info->vnics_per_port, mf_info->multi_vnics_mode); in bxe_check_valid_mf_cfg()
13421 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { in bxe_check_valid_mf_cfg()
13422 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ in bxe_check_valid_mf_cfg()
13429 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { in bxe_check_valid_mf_cfg()
13431 mf_info->multi_vnics_mode, OVLAN(sc)); in bxe_check_valid_mf_cfg()
13437 * sure that all non-hidden functions have a valid ovlan. If SF, in bxe_check_valid_mf_cfg()
13438 * make sure that all non-hidden functions have an invalid ovlan. in bxe_check_valid_mf_cfg()
13444 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || in bxe_check_valid_mf_cfg()
13445 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { in bxe_check_valid_mf_cfg()
13448 i, mf_info->multi_vnics_mode, ovlan1); in bxe_check_valid_mf_cfg()
13481 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; in bxe_get_mf_cfg_info()
13486 mf_info->vnics_per_port = 1; in bxe_get_mf_cfg_info()
13487 mf_info->multi_vnics_mode = FALSE; in bxe_get_mf_cfg_info()
13488 mf_info->path_has_ovlan = FALSE; in bxe_get_mf_cfg_info()
13489 mf_info->mf_mode = SINGLE_FUNCTION; in bxe_get_mf_cfg_info()
13495 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { in bxe_get_mf_cfg_info()
13500 /* get the MF mode (switch dependent / independent / single-function) */ in bxe_get_mf_cfg_info()
13512 mf_info->mf_mode = MULTI_FUNCTION_SI; in bxe_get_mf_cfg_info()
13527 mf_info->mf_mode = MULTI_FUNCTION_SD; in bxe_get_mf_cfg_info()
13542 * Mark MF mode as NIV if MCP version includes NPAR-SD support in bxe_get_mf_cfg_info()
13549 mf_info->mf_mode = MULTI_FUNCTION_AFEX; in bxe_get_mf_cfg_info()
13565 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { in bxe_get_mf_cfg_info()
13566 mf_info->path_has_ovlan = TRUE; in bxe_get_mf_cfg_info()
13567 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { in bxe_get_mf_cfg_info()
13570 * 4-port mode, this is good enough to check vnic-0 of the other port in bxe_get_mf_cfg_info()
13579 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; in bxe_get_mf_cfg_info()
13583 if (mf_info->mf_mode == SINGLE_FUNCTION) { in bxe_get_mf_cfg_info()
13594 mf_info->mf_config[SC_VN(sc)] = in bxe_get_mf_cfg_info()
13597 switch(mf_info->mf_mode) in bxe_get_mf_cfg_info()
13617 mf_info->mf_mode); in bxe_get_mf_cfg_info()
13627 mf_info->min_bw[vnic] = in bxe_get_mf_cfg_info()
13629 mf_info->max_bw[vnic] = in bxe_get_mf_cfg_info()
13646 sc->link_params.sc = sc; in bxe_get_shmem_info()
13647 sc->link_params.port = port; in bxe_get_shmem_info()
13650 sc->devinfo.hw_config = in bxe_get_shmem_info()
13652 sc->devinfo.hw_config2 = in bxe_get_shmem_info()
13655 sc->link_params.hw_led_mode = in bxe_get_shmem_info()
13656 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> in bxe_get_shmem_info()
13660 sc->port.config = in bxe_get_shmem_info()
13664 sc->link_params.speed_cap_mask[0] = in bxe_get_shmem_info()
13666 sc->link_params.speed_cap_mask[1] = in bxe_get_shmem_info()
13670 sc->link_params.lane_config = in bxe_get_shmem_info()
13675 sc->port.link_config[ELINK_INT_PHY] = val; in bxe_get_shmem_info()
13676 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); in bxe_get_shmem_info()
13677 sc->port.link_config[ELINK_EXT_PHY1] = in bxe_get_shmem_info()
13683 sc->link_params.feature_config_flags |= in bxe_get_shmem_info()
13686 sc->link_params.feature_config_flags &= in bxe_get_shmem_info()
13691 sc->link_params.multi_phy_config = in bxe_get_shmem_info()
13695 sc->port.ext_phy_config = in bxe_get_shmem_info()
13711 *sc->mac_addr_str = 0; in bxe_get_shmem_info()
13714 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); in bxe_get_shmem_info()
13715 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); in bxe_get_shmem_info()
13716 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); in bxe_get_shmem_info()
13717 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); in bxe_get_shmem_info()
13718 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); in bxe_get_shmem_info()
13719 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); in bxe_get_shmem_info()
13720 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), in bxe_get_shmem_info()
13722 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], in bxe_get_shmem_info()
13723 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], in bxe_get_shmem_info()
13724 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); in bxe_get_shmem_info()
13725 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); in bxe_get_shmem_info()
13777 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { in bxe_get_tunable_params()
13779 bxe_mrrs = -1; in bxe_get_tunable_params()
13794 sc->interrupt_mode = bxe_interrupt_mode; in bxe_get_tunable_params()
13795 sc->max_rx_bufs = bxe_max_rx_bufs; in bxe_get_tunable_params()
13796 sc->hc_rx_ticks = bxe_hc_rx_ticks; in bxe_get_tunable_params()
13797 sc->hc_tx_ticks = bxe_hc_tx_ticks; in bxe_get_tunable_params()
13798 sc->max_aggregation_size = bxe_max_aggregation_size; in bxe_get_tunable_params()
13799 sc->mrrs = bxe_mrrs; in bxe_get_tunable_params()
13800 sc->autogreeen = bxe_autogreeen; in bxe_get_tunable_params()
13801 sc->udp_rss = bxe_udp_rss; in bxe_get_tunable_params()
13804 sc->num_queues = 1; in bxe_get_tunable_params()
13806 sc->num_queues = in bxe_get_tunable_params()
13809 if (sc->num_queues > mp_ncpus) { in bxe_get_tunable_params()
13810 sc->num_queues = mp_ncpus; in bxe_get_tunable_params()
13827 sc->interrupt_mode, in bxe_get_tunable_params()
13828 sc->num_queues, in bxe_get_tunable_params()
13829 sc->hc_rx_ticks, in bxe_get_tunable_params()
13830 sc->hc_tx_ticks, in bxe_get_tunable_params()
13832 sc->max_aggregation_size, in bxe_get_tunable_params()
13833 sc->mrrs, in bxe_get_tunable_params()
13834 sc->autogreeen, in bxe_get_tunable_params()
13835 sc->udp_rss); in bxe_get_tunable_params()
13844 switch (sc->link_params.phy[phy_idx].media_type) { in bxe_media_detect()
13848 sc->media = IFM_10G_SR; in bxe_media_detect()
13853 sc->media = IFM_1000_SX; in bxe_media_detect()
13858 BLOGI(sc, "Found 10GBase-CX4 media.\n"); in bxe_media_detect()
13859 sc->media = IFM_10G_CX4; in bxe_media_detect()
13864 sc->media = IFM_10G_TWINAX; in bxe_media_detect()
13868 if (sc->link_params.speed_cap_mask[0] & in bxe_media_detect()
13870 BLOGI(sc, "Found 10GBase-T media.\n"); in bxe_media_detect()
13871 sc->media = IFM_10G_T; in bxe_media_detect()
13874 BLOGI(sc, "Found 1000Base-T media.\n"); in bxe_media_detect()
13875 sc->media = IFM_1000_T; in bxe_media_detect()
13881 sc->media = 0; in bxe_media_detect()
13887 sc->media = 0; in bxe_media_detect()
13907 sc->igu_base_sb = 0xff; in bxe_get_igu_cam_info()
13911 igu_sb_cnt = sc->igu_sb_cnt; in bxe_get_igu_cam_info()
13912 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * in bxe_get_igu_cam_info()
13914 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + in bxe_get_igu_cam_info()
13919 /* IGU in normal mode - read CAM */ in bxe_get_igu_cam_info()
13934 sc->igu_dsb_id = igu_sb_id; in bxe_get_igu_cam_info()
13936 if (sc->igu_base_sb == 0xff) { in bxe_get_igu_cam_info()
13937 sc->igu_base_sb = igu_sb_id; in bxe_get_igu_cam_info()
13950 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); in bxe_get_igu_cam_info()
13954 return (-1); in bxe_get_igu_cam_info()
13971 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); in bxe_get_device_info()
13972 sc->devinfo.device_id = pci_get_device(sc->dev); in bxe_get_device_info()
13973 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); in bxe_get_device_info()
13974 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); in bxe_get_device_info()
13977 sc->devinfo.chip_id = in bxe_get_device_info()
13978 sc->link_params.chip_id = in bxe_get_device_info()
13987 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | in bxe_get_device_info()
13988 (sc->devinfo.chip_id & 0x0000ffff)); in bxe_get_device_info()
13990 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | in bxe_get_device_info()
13991 (sc->devinfo.chip_id & 0x0000ffff)); in bxe_get_device_info()
13993 sc->devinfo.chip_id |= 0x1; in bxe_get_device_info()
13998 sc->devinfo.chip_id, in bxe_get_device_info()
13999 ((sc->devinfo.chip_id >> 16) & 0xffff), in bxe_get_device_info()
14000 ((sc->devinfo.chip_id >> 12) & 0xf), in bxe_get_device_info()
14001 ((sc->devinfo.chip_id >> 4) & 0xff), in bxe_get_device_info()
14002 ((sc->devinfo.chip_id >> 0) & 0xf)); in bxe_get_device_info()
14005 if ((sc->devinfo.chip_id & 0x1) || in bxe_get_device_info()
14008 sc->flags |= BXE_ONE_PORT_FLAG; in bxe_get_device_info()
14013 sc->doorbell_size = (1 << BXE_DB_SHIFT); in bxe_get_device_info()
14016 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ in bxe_get_device_info()
14030 sc->devinfo.chip_port_mode = in bxe_get_device_info()
14040 sc->devinfo.shmem_base = in bxe_get_device_info()
14041 sc->link_params.shmem_base = in bxe_get_device_info()
14043 sc->devinfo.shmem2_base = in bxe_get_device_info()
14048 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); in bxe_get_device_info()
14050 if (!sc->devinfo.shmem_base) { in bxe_get_device_info()
14053 sc->flags |= BXE_NO_MCP_FLAG; in bxe_get_device_info()
14067 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); in bxe_get_device_info()
14068 snprintf(sc->devinfo.bc_ver_str, in bxe_get_device_info()
14069 sizeof(sc->devinfo.bc_ver_str), in bxe_get_device_info()
14071 ((sc->devinfo.bc_ver >> 24) & 0xff), in bxe_get_device_info()
14072 ((sc->devinfo.bc_ver >> 16) & 0xff), in bxe_get_device_info()
14073 ((sc->devinfo.bc_ver >> 8) & 0xff)); in bxe_get_device_info()
14074 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); in bxe_get_device_info()
14077 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); in bxe_get_device_info()
14078 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); in bxe_get_device_info()
14081 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); in bxe_get_device_info()
14095 * Enable internal target-read (in case we are probed after PF in bxe_get_device_info()
14106 sc->devinfo.flash_size = in bxe_get_device_info()
14108 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); in bxe_get_device_info()
14118 if (sc->devinfo.pcie_msix_cap_reg != 0) { in bxe_get_device_info()
14119 val = pci_read_config(sc->dev, in bxe_get_device_info()
14120 (sc->devinfo.pcie_msix_cap_reg + in bxe_get_device_info()
14123 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); in bxe_get_device_info()
14125 sc->igu_sb_cnt = 1; in bxe_get_device_info()
14128 sc->igu_base_addr = BAR_IGU_INTMEM; in bxe_get_device_info()
14132 sc->devinfo.int_block = INT_BLOCK_HC; in bxe_get_device_info()
14133 sc->igu_dsb_id = DEF_SB_IGU_ID; in bxe_get_device_info()
14134 sc->igu_base_sb = 0; in bxe_get_device_info()
14136 sc->devinfo.int_block = INT_BLOCK_IGU; in bxe_get_device_info()
14153 tout--; in bxe_get_device_info()
14160 return (-1); in bxe_get_device_info()
14166 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; in bxe_get_device_info()
14181 * Get base FW non-default (fast path) status block ID. This value is in bxe_get_device_info()
14186 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); in bxe_get_device_info()
14189 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of in bxe_get_device_info()
14193 sc->base_fw_ndsb = sc->igu_base_sb; in bxe_get_device_info()
14198 sc->igu_dsb_id, sc->igu_base_sb, in bxe_get_device_info()
14199 sc->igu_sb_cnt, sc->base_fw_ndsb); in bxe_get_device_info()
14201 elink_phy_probe(&sc->link_params); in bxe_get_device_info()
14215 sc->port.supported[0] = 0; in bxe_link_settings_supported()
14216 sc->port.supported[1] = 0; in bxe_link_settings_supported()
14218 switch (sc->link_params.num_phys) { in bxe_link_settings_supported()
14220 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; in bxe_link_settings_supported()
14224 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; in bxe_link_settings_supported()
14228 if (sc->link_params.multi_phy_config & in bxe_link_settings_supported()
14230 sc->port.supported[1] = in bxe_link_settings_supported()
14231 sc->link_params.phy[ELINK_EXT_PHY1].supported; in bxe_link_settings_supported()
14232 sc->port.supported[0] = in bxe_link_settings_supported()
14233 sc->link_params.phy[ELINK_EXT_PHY2].supported; in bxe_link_settings_supported()
14235 sc->port.supported[0] = in bxe_link_settings_supported()
14236 sc->link_params.phy[ELINK_EXT_PHY1].supported; in bxe_link_settings_supported()
14237 sc->port.supported[1] = in bxe_link_settings_supported()
14238 sc->link_params.phy[ELINK_EXT_PHY2].supported; in bxe_link_settings_supported()
14244 if (!(sc->port.supported[0] || sc->port.supported[1])) { in bxe_link_settings_supported()
14254 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); in bxe_link_settings_supported()
14258 sc->port.phy_addr = in bxe_link_settings_supported()
14262 sc->port.phy_addr = in bxe_link_settings_supported()
14267 sc->port.link_config[0]); in bxe_link_settings_supported()
14272 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); in bxe_link_settings_supported()
14276 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14278 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; in bxe_link_settings_supported()
14281 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14283 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; in bxe_link_settings_supported()
14286 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14288 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; in bxe_link_settings_supported()
14291 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14293 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; in bxe_link_settings_supported()
14296 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14298 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; in bxe_link_settings_supported()
14301 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14303 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; in bxe_link_settings_supported()
14306 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14308 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; in bxe_link_settings_supported()
14311 if (!(sc->link_params.speed_cap_mask[idx] & in bxe_link_settings_supported()
14313 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; in bxe_link_settings_supported()
14318 sc->port.supported[0], sc->port.supported[1]); in bxe_link_settings_supported()
14320 sc->port.supported[0], sc->port.supported[1]); in bxe_link_settings_supported()
14330 sc->port.advertising[0] = 0; in bxe_link_settings_requested()
14331 sc->port.advertising[1] = 0; in bxe_link_settings_requested()
14333 switch (sc->link_params.num_phys) { in bxe_link_settings_requested()
14344 sc->link_params.req_duplex[idx] = DUPLEX_FULL; in bxe_link_settings_requested()
14345 link_config = sc->port.link_config[idx]; in bxe_link_settings_requested()
14349 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { in bxe_link_settings_requested()
14350 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; in bxe_link_settings_requested()
14351 sc->port.advertising[idx] |= sc->port.supported[idx]; in bxe_link_settings_requested()
14352 if (sc->link_params.phy[ELINK_EXT_PHY1].type == in bxe_link_settings_requested()
14354 sc->port.advertising[idx] |= in bxe_link_settings_requested()
14359 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; in bxe_link_settings_requested()
14360 sc->port.advertising[idx] |= in bxe_link_settings_requested()
14367 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { in bxe_link_settings_requested()
14368 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; in bxe_link_settings_requested()
14369 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | in bxe_link_settings_requested()
14374 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14380 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { in bxe_link_settings_requested()
14381 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; in bxe_link_settings_requested()
14382 sc->link_params.req_duplex[idx] = DUPLEX_HALF; in bxe_link_settings_requested()
14383 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | in bxe_link_settings_requested()
14386 sc->link_params.req_duplex[idx]); in bxe_link_settings_requested()
14390 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14396 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { in bxe_link_settings_requested()
14397 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; in bxe_link_settings_requested()
14398 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | in bxe_link_settings_requested()
14403 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14409 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { in bxe_link_settings_requested()
14410 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; in bxe_link_settings_requested()
14411 sc->link_params.req_duplex[idx] = DUPLEX_HALF; in bxe_link_settings_requested()
14412 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | in bxe_link_settings_requested()
14417 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14423 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { in bxe_link_settings_requested()
14424 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; in bxe_link_settings_requested()
14425 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | in bxe_link_settings_requested()
14430 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14436 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { in bxe_link_settings_requested()
14437 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; in bxe_link_settings_requested()
14438 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | in bxe_link_settings_requested()
14443 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14449 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { in bxe_link_settings_requested()
14450 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; in bxe_link_settings_requested()
14451 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | in bxe_link_settings_requested()
14456 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14462 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; in bxe_link_settings_requested()
14468 link_config, sc->link_params.speed_cap_mask[idx]); in bxe_link_settings_requested()
14469 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; in bxe_link_settings_requested()
14470 sc->port.advertising[idx] = sc->port.supported[idx]; in bxe_link_settings_requested()
14474 sc->link_params.req_flow_ctrl[idx] = in bxe_link_settings_requested()
14477 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { in bxe_link_settings_requested()
14478 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { in bxe_link_settings_requested()
14479 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; in bxe_link_settings_requested()
14487 sc->link_params.req_line_speed[idx], in bxe_link_settings_requested()
14488 sc->link_params.req_duplex[idx], in bxe_link_settings_requested()
14489 sc->link_params.req_flow_ctrl[idx], in bxe_link_settings_requested()
14490 sc->port.advertising[idx]); in bxe_link_settings_requested()
14493 sc->link_params.req_line_speed[idx], in bxe_link_settings_requested()
14494 sc->link_params.req_duplex[idx], in bxe_link_settings_requested()
14495 sc->port.advertising[idx]); in bxe_link_settings_requested()
14503 uint32_t config = sc->port.config; in bxe_get_phy_info()
14510 sc->link_params.lane_config, in bxe_get_phy_info()
14511 sc->link_params.speed_cap_mask[0], in bxe_get_phy_info()
14512 sc->port.link_config[0]); in bxe_get_phy_info()
14515 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); in bxe_get_phy_info()
14518 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { in bxe_get_phy_info()
14519 sc->link_params.feature_config_flags |= in bxe_get_phy_info()
14521 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { in bxe_get_phy_info()
14522 sc->link_params.feature_config_flags &= in bxe_get_phy_info()
14525 sc->link_params.feature_config_flags |= in bxe_get_phy_info()
14535 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | in bxe_get_phy_info()
14539 sc->link_params.eee_mode = 0; in bxe_get_phy_info()
14544 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); in bxe_get_phy_info()
14554 sc->tx_ring_size = TX_BD_USABLE; in bxe_get_params()
14555 sc->rx_ring_size = RX_BD_USABLE; in bxe_get_params()
14558 sc->wol = 0; in bxe_get_params()
14593 switch (sc->devinfo.mf_info.mf_mode) { in bxe_set_modes_bitmap()
14633 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ in bxe_alloc_hsi_mem()
14646 &sc->parent_dma_tag); /* returned dma tag */ in bxe_alloc_hsi_mem()
14657 &sc->def_sb_dma, "default status block") != 0) { in bxe_alloc_hsi_mem()
14659 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14663 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; in bxe_alloc_hsi_mem()
14670 &sc->eq_dma, "event queue") != 0) { in bxe_alloc_hsi_mem()
14672 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_alloc_hsi_mem()
14673 sc->def_sb = NULL; in bxe_alloc_hsi_mem()
14674 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14678 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; in bxe_alloc_hsi_mem()
14685 &sc->sp_dma, "slow path") != 0) { in bxe_alloc_hsi_mem()
14687 bxe_dma_free(sc, &sc->eq_dma); in bxe_alloc_hsi_mem()
14688 sc->eq = NULL; in bxe_alloc_hsi_mem()
14689 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_alloc_hsi_mem()
14690 sc->def_sb = NULL; in bxe_alloc_hsi_mem()
14691 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14695 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; in bxe_alloc_hsi_mem()
14702 &sc->spq_dma, "slow path queue") != 0) { in bxe_alloc_hsi_mem()
14704 bxe_dma_free(sc, &sc->sp_dma); in bxe_alloc_hsi_mem()
14705 sc->sp = NULL; in bxe_alloc_hsi_mem()
14706 bxe_dma_free(sc, &sc->eq_dma); in bxe_alloc_hsi_mem()
14707 sc->eq = NULL; in bxe_alloc_hsi_mem()
14708 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_alloc_hsi_mem()
14709 sc->def_sb = NULL; in bxe_alloc_hsi_mem()
14710 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14714 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; in bxe_alloc_hsi_mem()
14720 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, in bxe_alloc_hsi_mem()
14723 bxe_dma_free(sc, &sc->spq_dma); in bxe_alloc_hsi_mem()
14724 sc->spq = NULL; in bxe_alloc_hsi_mem()
14725 bxe_dma_free(sc, &sc->sp_dma); in bxe_alloc_hsi_mem()
14726 sc->sp = NULL; in bxe_alloc_hsi_mem()
14727 bxe_dma_free(sc, &sc->eq_dma); in bxe_alloc_hsi_mem()
14728 sc->eq = NULL; in bxe_alloc_hsi_mem()
14729 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_alloc_hsi_mem()
14730 sc->def_sb = NULL; in bxe_alloc_hsi_mem()
14731 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14735 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; in bxe_alloc_hsi_mem()
14737 if ((sc->gz_strm = in bxe_alloc_hsi_mem()
14738 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { in bxe_alloc_hsi_mem()
14740 bxe_dma_free(sc, &sc->gz_buf_dma); in bxe_alloc_hsi_mem()
14741 sc->gz_buf = NULL; in bxe_alloc_hsi_mem()
14742 bxe_dma_free(sc, &sc->spq_dma); in bxe_alloc_hsi_mem()
14743 sc->spq = NULL; in bxe_alloc_hsi_mem()
14744 bxe_dma_free(sc, &sc->sp_dma); in bxe_alloc_hsi_mem()
14745 sc->sp = NULL; in bxe_alloc_hsi_mem()
14746 bxe_dma_free(sc, &sc->eq_dma); in bxe_alloc_hsi_mem()
14747 sc->eq = NULL; in bxe_alloc_hsi_mem()
14748 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_alloc_hsi_mem()
14749 sc->def_sb = NULL; in bxe_alloc_hsi_mem()
14750 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_alloc_hsi_mem()
14759 for (i = 0; i < sc->num_queues; i++) { in bxe_alloc_hsi_mem()
14760 fp = &sc->fp[i]; in bxe_alloc_hsi_mem()
14761 fp->sc = sc; in bxe_alloc_hsi_mem()
14762 fp->index = i; in bxe_alloc_hsi_mem()
14770 &fp->sb_dma, buf) != 0) { in bxe_alloc_hsi_mem()
14776 fp->status_block.e2_sb = in bxe_alloc_hsi_mem()
14777 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; in bxe_alloc_hsi_mem()
14779 fp->status_block.e1x_sb = in bxe_alloc_hsi_mem()
14780 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; in bxe_alloc_hsi_mem()
14790 &fp->tx_dma, buf) != 0) { in bxe_alloc_hsi_mem()
14795 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; in bxe_alloc_hsi_mem()
14802 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; in bxe_alloc_hsi_mem()
14804 busaddr = (fp->tx_dma.paddr + in bxe_alloc_hsi_mem()
14806 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); in bxe_alloc_hsi_mem()
14807 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); in bxe_alloc_hsi_mem()
14816 &fp->rx_dma, buf) != 0) { in bxe_alloc_hsi_mem()
14821 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; in bxe_alloc_hsi_mem()
14828 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; in bxe_alloc_hsi_mem()
14830 busaddr = (fp->rx_dma.paddr + in bxe_alloc_hsi_mem()
14832 rx_bd->addr_hi = htole32(U64_HI(busaddr)); in bxe_alloc_hsi_mem()
14833 rx_bd->addr_lo = htole32(U64_LO(busaddr)); in bxe_alloc_hsi_mem()
14842 &fp->rcq_dma, buf) != 0) { in bxe_alloc_hsi_mem()
14847 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; in bxe_alloc_hsi_mem()
14855 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; in bxe_alloc_hsi_mem()
14857 busaddr = (fp->rcq_dma.paddr + in bxe_alloc_hsi_mem()
14859 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); in bxe_alloc_hsi_mem()
14860 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); in bxe_alloc_hsi_mem()
14869 &fp->rx_sge_dma, buf) != 0) { in bxe_alloc_hsi_mem()
14874 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; in bxe_alloc_hsi_mem()
14881 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; in bxe_alloc_hsi_mem()
14883 busaddr = (fp->rx_sge_dma.paddr + in bxe_alloc_hsi_mem()
14885 rx_sge->addr_hi = htole32(U64_HI(busaddr)); in bxe_alloc_hsi_mem()
14886 rx_sge->addr_lo = htole32(U64_LO(busaddr)); in bxe_alloc_hsi_mem()
14894 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { in bxe_alloc_hsi_mem()
14905 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ in bxe_alloc_hsi_mem()
14918 &fp->tx_mbuf_tag); /* returned dma tag */ in bxe_alloc_hsi_mem()
14928 if (bus_dmamap_create(fp->tx_mbuf_tag, in bxe_alloc_hsi_mem()
14930 &fp->tx_mbuf_chain[j].m_map)) { in bxe_alloc_hsi_mem()
14943 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ in bxe_alloc_hsi_mem()
14956 &fp->rx_mbuf_tag); /* returned dma tag */ in bxe_alloc_hsi_mem()
14966 if (bus_dmamap_create(fp->rx_mbuf_tag, in bxe_alloc_hsi_mem()
14968 &fp->rx_mbuf_chain[j].m_map)) { in bxe_alloc_hsi_mem()
14977 if (bus_dmamap_create(fp->rx_mbuf_tag, in bxe_alloc_hsi_mem()
14979 &fp->rx_mbuf_spare_map)) { in bxe_alloc_hsi_mem()
14991 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ in bxe_alloc_hsi_mem()
15004 &fp->rx_sge_mbuf_tag); /* returned dma tag */ in bxe_alloc_hsi_mem()
15014 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, in bxe_alloc_hsi_mem()
15016 &fp->rx_sge_mbuf_chain[j].m_map)) { in bxe_alloc_hsi_mem()
15025 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, in bxe_alloc_hsi_mem()
15027 &fp->rx_sge_mbuf_spare_map)) { in bxe_alloc_hsi_mem()
15042 if (bus_dmamap_create(fp->rx_mbuf_tag, in bxe_alloc_hsi_mem()
15044 &fp->rx_tpa_info[j].bd.m_map)) { in bxe_alloc_hsi_mem()
15053 if (bus_dmamap_create(fp->rx_mbuf_tag, in bxe_alloc_hsi_mem()
15055 &fp->rx_tpa_info_mbuf_spare_map)) { in bxe_alloc_hsi_mem()
15075 if (sc->parent_dma_tag == NULL) { in bxe_free_hsi_mem()
15079 for (i = 0; i < sc->num_queues; i++) { in bxe_free_hsi_mem()
15080 fp = &sc->fp[i]; in bxe_free_hsi_mem()
15086 bxe_dma_free(sc, &fp->sb_dma); in bxe_free_hsi_mem()
15087 memset(&fp->status_block, 0, sizeof(fp->status_block)); in bxe_free_hsi_mem()
15093 bxe_dma_free(sc, &fp->tx_dma); in bxe_free_hsi_mem()
15094 fp->tx_chain = NULL; in bxe_free_hsi_mem()
15100 bxe_dma_free(sc, &fp->rx_dma); in bxe_free_hsi_mem()
15101 fp->rx_chain = NULL; in bxe_free_hsi_mem()
15107 bxe_dma_free(sc, &fp->rcq_dma); in bxe_free_hsi_mem()
15108 fp->rcq_chain = NULL; in bxe_free_hsi_mem()
15114 bxe_dma_free(sc, &fp->rx_sge_dma); in bxe_free_hsi_mem()
15115 fp->rx_sge_chain = NULL; in bxe_free_hsi_mem()
15121 if (fp->tx_mbuf_tag != NULL) { in bxe_free_hsi_mem()
15123 if (fp->tx_mbuf_chain[j].m_map != NULL) { in bxe_free_hsi_mem()
15124 bus_dmamap_unload(fp->tx_mbuf_tag, in bxe_free_hsi_mem()
15125 fp->tx_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15126 bus_dmamap_destroy(fp->tx_mbuf_tag, in bxe_free_hsi_mem()
15127 fp->tx_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15131 bus_dma_tag_destroy(fp->tx_mbuf_tag); in bxe_free_hsi_mem()
15132 fp->tx_mbuf_tag = NULL; in bxe_free_hsi_mem()
15139 if (fp->rx_mbuf_tag != NULL) { in bxe_free_hsi_mem()
15141 if (fp->rx_mbuf_chain[j].m_map != NULL) { in bxe_free_hsi_mem()
15142 bus_dmamap_unload(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15143 fp->rx_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15144 bus_dmamap_destroy(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15145 fp->rx_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15149 if (fp->rx_mbuf_spare_map != NULL) { in bxe_free_hsi_mem()
15150 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); in bxe_free_hsi_mem()
15151 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); in bxe_free_hsi_mem()
15161 if (fp->rx_tpa_info[j].bd.m_map != NULL) { in bxe_free_hsi_mem()
15162 bus_dmamap_unload(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15163 fp->rx_tpa_info[j].bd.m_map); in bxe_free_hsi_mem()
15164 bus_dmamap_destroy(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15165 fp->rx_tpa_info[j].bd.m_map); in bxe_free_hsi_mem()
15169 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { in bxe_free_hsi_mem()
15170 bus_dmamap_unload(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15171 fp->rx_tpa_info_mbuf_spare_map); in bxe_free_hsi_mem()
15172 bus_dmamap_destroy(fp->rx_mbuf_tag, in bxe_free_hsi_mem()
15173 fp->rx_tpa_info_mbuf_spare_map); in bxe_free_hsi_mem()
15176 bus_dma_tag_destroy(fp->rx_mbuf_tag); in bxe_free_hsi_mem()
15177 fp->rx_mbuf_tag = NULL; in bxe_free_hsi_mem()
15184 if (fp->rx_sge_mbuf_tag != NULL) { in bxe_free_hsi_mem()
15186 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { in bxe_free_hsi_mem()
15187 bus_dmamap_unload(fp->rx_sge_mbuf_tag, in bxe_free_hsi_mem()
15188 fp->rx_sge_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15189 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, in bxe_free_hsi_mem()
15190 fp->rx_sge_mbuf_chain[j].m_map); in bxe_free_hsi_mem()
15194 if (fp->rx_sge_mbuf_spare_map != NULL) { in bxe_free_hsi_mem()
15195 bus_dmamap_unload(fp->rx_sge_mbuf_tag, in bxe_free_hsi_mem()
15196 fp->rx_sge_mbuf_spare_map); in bxe_free_hsi_mem()
15197 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, in bxe_free_hsi_mem()
15198 fp->rx_sge_mbuf_spare_map); in bxe_free_hsi_mem()
15201 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); in bxe_free_hsi_mem()
15202 fp->rx_sge_mbuf_tag = NULL; in bxe_free_hsi_mem()
15210 bxe_dma_free(sc, &sc->gz_buf_dma); in bxe_free_hsi_mem()
15211 sc->gz_buf = NULL; in bxe_free_hsi_mem()
15212 free(sc->gz_strm, M_DEVBUF); in bxe_free_hsi_mem()
15213 sc->gz_strm = NULL; in bxe_free_hsi_mem()
15219 bxe_dma_free(sc, &sc->spq_dma); in bxe_free_hsi_mem()
15220 sc->spq = NULL; in bxe_free_hsi_mem()
15226 bxe_dma_free(sc, &sc->sp_dma); in bxe_free_hsi_mem()
15227 sc->sp = NULL; in bxe_free_hsi_mem()
15233 bxe_dma_free(sc, &sc->eq_dma); in bxe_free_hsi_mem()
15234 sc->eq = NULL; in bxe_free_hsi_mem()
15240 bxe_dma_free(sc, &sc->def_sb_dma); in bxe_free_hsi_mem()
15241 sc->def_sb = NULL; in bxe_free_hsi_mem()
15243 bus_dma_tag_destroy(sc->parent_dma_tag); in bxe_free_hsi_mem()
15244 sc->parent_dma_tag = NULL; in bxe_free_hsi_mem()
15248 * Previous driver DMAE transaction may have occurred when pre-boot stage
15250 * transaction, resulting in was-error bit set in the PCI causing all
15251 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15252 * the interrupt which detected this from the pglueb and the was-done bit
15263 "Clearing 'was-error' bit that was set in pglueb"); in bxe_prev_interrupted_dmae()
15276 return (-1); in bxe_prev_mcp_done()
15288 if ((sc->pcie_bus == tmp->bus) && in bxe_prev_path_get_entry()
15289 (sc->pcie_device == tmp->slot) && in bxe_prev_path_get_entry()
15290 (SC_PATH(sc) == tmp->path)) { in bxe_prev_path_get_entry()
15308 if (tmp->aer) { in bxe_prev_is_path_marked()
15311 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); in bxe_prev_is_path_marked()
15316 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); in bxe_prev_is_path_marked()
15336 if (!tmp->aer) { in bxe_prev_mark_path()
15338 "Re-marking AER in path %d/%d/%d\n", in bxe_prev_mark_path()
15339 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); in bxe_prev_mark_path()
15343 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); in bxe_prev_mark_path()
15344 tmp->aer = 0; in bxe_prev_mark_path()
15358 return (-1); in bxe_prev_mark_path()
15361 tmp->bus = sc->pcie_bus; in bxe_prev_mark_path()
15362 tmp->slot = sc->pcie_device; in bxe_prev_mark_path()
15363 tmp->path = SC_PATH(sc); in bxe_prev_mark_path()
15364 tmp->aer = 0; in bxe_prev_mark_path()
15365 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; in bxe_prev_mark_path()
15370 "Marked path %d/%d/%d - finished previous unload\n", in bxe_prev_mark_path()
15371 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); in bxe_prev_mark_path()
15387 return (-1); in bxe_do_flr()
15391 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { in bxe_do_flr()
15393 sc->devinfo.bc_ver); in bxe_do_flr()
15394 return (-1); in bxe_do_flr()
15400 DELAY(((1 << (i - 1)) * 100) * 1000); in bxe_do_flr()
15408 BLOGE(sc, "PCIE transaction is not cleared, " in bxe_do_flr()
15440 vals->bmac_addr = 0; in bxe_prev_unload_close_mac()
15441 vals->umac_addr = 0; in bxe_prev_unload_close_mac()
15442 vals->xmac_addr = 0; in bxe_prev_unload_close_mac()
15443 vals->emac_addr = 0; in bxe_prev_unload_close_mac()
15465 vals->bmac_addr = base_addr + offset; in bxe_prev_unload_close_mac()
15466 vals->bmac_val[0] = wb_data[0]; in bxe_prev_unload_close_mac()
15467 vals->bmac_val[1] = wb_data[1]; in bxe_prev_unload_close_mac()
15469 REG_WR(sc, vals->bmac_addr, wb_data[0]); in bxe_prev_unload_close_mac()
15470 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); in bxe_prev_unload_close_mac()
15474 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; in bxe_prev_unload_close_mac()
15475 vals->emac_val = REG_RD(sc, vals->emac_addr); in bxe_prev_unload_close_mac()
15476 REG_WR(sc, vals->emac_addr, 0); in bxe_prev_unload_close_mac()
15485 vals->xmac_addr = base_addr + XMAC_REG_CTRL; in bxe_prev_unload_close_mac()
15486 vals->xmac_val = REG_RD(sc, vals->xmac_addr); in bxe_prev_unload_close_mac()
15487 REG_WR(sc, vals->xmac_addr, 0); in bxe_prev_unload_close_mac()
15495 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; in bxe_prev_unload_close_mac()
15496 vals->umac_val = REG_RD(sc, vals->umac_addr); in bxe_prev_unload_close_mac()
15497 REG_WR(sc, vals->umac_addr, 0); in bxe_prev_unload_close_mac()
15527 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", in bxe_prev_unload_undi_inc()
15561 elink_set_rx_filter(&sc->link_params, 0); in bxe_prev_unload_common()
15595 timer_count--; in bxe_prev_unload_common()
15697 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); in bxe_prev_unload()
15722 rc = -1; in bxe_prev_unload()
15731 /* non-common reply from MCP night require looping */ in bxe_prev_unload()
15738 } while (--time_counter); in bxe_prev_unload()
15743 rc = -1; in bxe_prev_unload()
15755 sc->dcb_state = dcb_on; in bxe_dcbx_set_state()
15756 sc->dcbx_enabled = dcbx_enabled; in bxe_dcbx_set_state()
15758 sc->dcb_state = FALSE; in bxe_dcbx_set_state()
15759 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; in bxe_dcbx_set_state()
15764 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : in bxe_dcbx_set_state()
15765 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : in bxe_dcbx_set_state()
15767 "on-chip with negotiation" : "invalid"); in bxe_dcbx_set_state()
15770 /* must be called after sriov-enable */
15796 if (cos < sc->max_cos) { in bxe_init_multi_cos()
15797 sc->prio_to_cos[pri] = cos; in bxe_init_multi_cos()
15801 cos, pri, (sc->max_cos - 1)); in bxe_init_multi_cos()
15802 sc->prio_to_cos[pri] = 0; in bxe_init_multi_cos()
15816 if (error || !req->newptr) { in bxe_sysctl_state()
15836 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; in bxe_sysctl_eth_stat()
15843 return (-1); in bxe_sysctl_eth_stat()
15858 return (-1); in bxe_sysctl_eth_stat()
15875 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; in bxe_sysctl_eth_q_stat()
15879 return (-1); in bxe_sysctl_eth_q_stat()
15894 return (-1); in bxe_sysctl_eth_q_stat()
15904 elink_link_reset(&sc->link_params, &sc->link_vars, 1); in bxe_force_link_reset()
15918 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); in bxe_sysctl_pauseparam()
15920 if (error || !req->newptr) { in bxe_sysctl_pauseparam()
15923 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { in bxe_sysctl_pauseparam()
15924 … BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param); in bxe_sysctl_pauseparam()
15925 sc->bxe_pause_param = 8; in bxe_sysctl_pauseparam()
15928 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); in bxe_sysctl_pauseparam()
15931 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { in bxe_sysctl_pauseparam()
15932 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); in bxe_sysctl_pauseparam()
15933 return -EINVAL; in bxe_sysctl_pauseparam()
15938 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; in bxe_sysctl_pauseparam()
15940 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; in bxe_sysctl_pauseparam()
15943 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; in bxe_sysctl_pauseparam()
15944 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) in bxe_sysctl_pauseparam()
15945 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; in bxe_sysctl_pauseparam()
15948 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { in bxe_sysctl_pauseparam()
15949 sc->link_params.req_flow_ctrl[cfg_idx] = in bxe_sysctl_pauseparam()
15952 sc->link_params.req_fc_auto_adv = 0; in bxe_sysctl_pauseparam()
15954 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; in bxe_sysctl_pauseparam()
15957 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; in bxe_sysctl_pauseparam()
15958 if (!sc->link_params.req_fc_auto_adv) in bxe_sysctl_pauseparam()
15959 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; in bxe_sysctl_pauseparam()
15962 if (sc->link_vars.link_up) { in bxe_sysctl_pauseparam()
15965 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { in bxe_sysctl_pauseparam()
15969 rc = elink_phy_init(&sc->link_params, &sc->link_vars); in bxe_sysctl_pauseparam()
15991 ctx = device_get_sysctl_ctx(sc->dev); in bxe_add_sysctls()
15992 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); in bxe_add_sysctls()
15998 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", in bxe_add_sysctls()
16004 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", in bxe_add_sysctls()
16005 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : in bxe_add_sysctls()
16006 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : in bxe_add_sysctls()
16007 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : in bxe_add_sysctls()
16008 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : in bxe_add_sysctls()
16011 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, in bxe_add_sysctls()
16014 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", in bxe_add_sysctls()
16015 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : in bxe_add_sysctls()
16016 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : in bxe_add_sysctls()
16017 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : in bxe_add_sysctls()
16019 sc->devinfo.pcie_link_width); in bxe_add_sysctls()
16021 sc->debug = bxe_debug; in bxe_add_sysctls()
16024 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, in bxe_add_sysctls()
16027 CTLFLAG_RD, sc->fw_ver_str, 0, in bxe_add_sysctls()
16030 CTLFLAG_RD, sc->mf_mode_str, 0, in bxe_add_sysctls()
16033 CTLFLAG_RD, sc->mac_addr_str, 0, in bxe_add_sysctls()
16036 CTLFLAG_RD, sc->pci_link_str, 0, in bxe_add_sysctls()
16039 CTLFLAG_RW, &sc->debug, in bxe_add_sysctls()
16042 sc->trigger_grcdump = 0; in bxe_add_sysctls()
16044 CTLFLAG_RW, &sc->trigger_grcdump, 0, in bxe_add_sysctls()
16048 sc->grcdump_started = 0; in bxe_add_sysctls()
16049 sc->grcdump_done = 0; in bxe_add_sysctls()
16051 CTLFLAG_RD, &sc->grcdump_done, 0, in bxe_add_sysctls()
16054 sc->rx_budget = bxe_rx_budget; in bxe_add_sysctls()
16056 CTLFLAG_RW, &sc->rx_budget, 0, in bxe_add_sysctls()
16062 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); in bxe_add_sysctls()
16081 for (i = 0; i < sc->num_queues; i++) { in bxe_add_sysctls()
16104 for (i = 0; i < sc->num_queues; i++) { in bxe_alloc_buf_rings()
16106 fp = &sc->fp[i]; in bxe_alloc_buf_rings()
16108 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, in bxe_alloc_buf_rings()
16109 M_NOWAIT, &fp->tx_mtx); in bxe_alloc_buf_rings()
16110 if (fp->tx_br == NULL) in bxe_alloc_buf_rings()
16111 return (-1); in bxe_alloc_buf_rings()
16123 for (i = 0; i < sc->num_queues; i++) { in bxe_free_buf_rings()
16125 fp = &sc->fp[i]; in bxe_free_buf_rings()
16127 if (fp->tx_br) { in bxe_free_buf_rings()
16128 buf_ring_free(fp->tx_br, M_DEVBUF); in bxe_free_buf_rings()
16129 fp->tx_br = NULL; in bxe_free_buf_rings()
16140 for (i = 0; i < sc->num_queues; i++) { in bxe_init_fp_mutexs()
16142 fp = &sc->fp[i]; in bxe_init_fp_mutexs()
16144 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), in bxe_init_fp_mutexs()
16145 "bxe%d_fp%d_tx_lock", sc->unit, i); in bxe_init_fp_mutexs()
16146 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); in bxe_init_fp_mutexs()
16148 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), in bxe_init_fp_mutexs()
16149 "bxe%d_fp%d_rx_lock", sc->unit, i); in bxe_init_fp_mutexs()
16150 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); in bxe_init_fp_mutexs()
16160 for (i = 0; i < sc->num_queues; i++) { in bxe_destroy_fp_mutexs()
16162 fp = &sc->fp[i]; in bxe_destroy_fp_mutexs()
16164 if (mtx_initialized(&fp->tx_mtx)) { in bxe_destroy_fp_mutexs()
16165 mtx_destroy(&fp->tx_mtx); in bxe_destroy_fp_mutexs()
16168 if (mtx_initialized(&fp->rx_mtx)) { in bxe_destroy_fp_mutexs()
16169 mtx_destroy(&fp->rx_mtx); in bxe_destroy_fp_mutexs()
16194 sc->state = BXE_STATE_CLOSED; in bxe_attach()
16196 sc->dev = dev; in bxe_attach()
16197 sc->unit = device_get_unit(dev); in bxe_attach()
16201 sc->pcie_bus = pci_get_bus(dev); in bxe_attach()
16202 sc->pcie_device = pci_get_slot(dev); in bxe_attach()
16203 sc->pcie_func = pci_get_function(dev); in bxe_attach()
16217 callout_init(&sc->periodic_callout, 1); in bxe_attach()
16220 sc->chip_tq_flags = CHIP_TQ_NONE; in bxe_attach()
16221 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), in bxe_attach()
16222 "bxe%d_chip_tq", sc->unit); in bxe_attach()
16223 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); in bxe_attach()
16224 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, in bxe_attach()
16226 &sc->chip_tq); in bxe_attach()
16227 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ in bxe_attach()
16228 "%s", sc->chip_tq_name); in bxe_attach()
16231 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); in bxe_attach()
16246 sc->mtu = ETHERMTU; in bxe_attach()
16262 if (sc->ifp != NULL) { in bxe_attach()
16263 ether_ifdetach(sc->ifp); in bxe_attach()
16265 ifmedia_removeall(&sc->ifmedia); in bxe_attach()
16275 if (sc->ifp != NULL) { in bxe_attach()
16276 ether_ifdetach(sc->ifp); in bxe_attach()
16278 ifmedia_removeall(&sc->ifmedia); in bxe_attach()
16291 if (sc->ifp != NULL) { in bxe_attach()
16292 ether_ifdetach(sc->ifp); in bxe_attach()
16294 ifmedia_removeall(&sc->ifmedia); in bxe_attach()
16306 if (sc->ifp != NULL) { in bxe_attach()
16307 ether_ifdetach(sc->ifp); in bxe_attach()
16309 ifmedia_removeall(&sc->ifmedia); in bxe_attach()
16322 if (sc->ifp != NULL) { in bxe_attach()
16323 ether_ifdetach(sc->ifp); in bxe_attach()
16325 ifmedia_removeall(&sc->ifmedia); in bxe_attach()
16335 sc->fw_seq = in bxe_attach()
16338 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); in bxe_attach()
16358 sc->qm_cid_count = bxe_set_qm_cid_count(sc); in bxe_attach()
16359 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); in bxe_attach()
16361 sc->max_cos = 1; in bxe_attach()
16387 ifp = sc->ifp; in bxe_detach()
16399 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); in bxe_detach()
16400 if (sc->chip_tq) { in bxe_detach()
16401 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); in bxe_detach()
16402 taskqueue_free(sc->chip_tq); in bxe_detach()
16403 sc->chip_tq = NULL; in bxe_detach()
16405 &sc->sp_err_timeout_task); in bxe_detach()
16409 if (sc->state != BXE_STATE_CLOSED) { in bxe_detach()
16412 sc->state = BXE_STATE_DISABLED; in bxe_detach()
16420 ifmedia_removeall(&sc->ifmedia); in bxe_detach()
16440 /* Release the PCIe BAR mapped memory */ in bxe_detach()
16444 if (sc->ifp != NULL) { in bxe_detach()
16445 if_free(sc->ifp); in bxe_detach()
16473 if (sc->state != BXE_STATE_CLOSED) { in bxe_shutdown()
16490 uint32_t igu_addr = sc->igu_base_addr; in bxe_igu_ack_sb()
16527 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, in bxe_igu_clear_sb_gen()
16535 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, in bxe_igu_clear_sb_gen()
16540 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { in bxe_igu_clear_sb_gen()
16598 shmem_base[0] = sc->devinfo.shmem_base; in bxe_common_init_phy()
16599 shmem2_base[0] = sc->devinfo.shmem2_base; in bxe_common_init_phy()
16608 sc->devinfo.chip_id, 0); in bxe_common_init_phy()
16636 if (sc->mrrs == -1) { in bxe_init_pxp()
16639 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); in bxe_init_pxp()
16640 r_order = sc->mrrs; in bxe_init_pxp()
16650 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); in bxe_get_pretend_reg()
16657 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16667 return (-1); in bxe_pretend_func()
16701 /* NON-IP protocol */ in bxe_lb_pckt()
16750 count--; in bxe_int_mem_test()
16755 return (-1); in bxe_int_mem_test()
16767 count--; in bxe_int_mem_test()
16772 return (-2); in bxe_int_mem_test()
16807 count--; in bxe_int_mem_test()
16812 return (-3); in bxe_int_mem_test()
16841 return (-4); in bxe_int_mem_test()
16887 sc->devinfo.shmem_base, in bxe_setup_fan_failure_detection()
16888 sc->devinfo.shmem2_base, in bxe_setup_fan_failure_detection()
16978 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17016 * 4-port mode or 2-port mode we need to turn off master-enable for in bxe_init_hw_common()
17018 * multi-function, and always disable all functions on the given path, in bxe_init_hw_common()
17083 return (-1); in bxe_init_hw_common()
17088 return (-1); in bxe_init_hw_common()
17101 * (i.e. vnic3) to start even if it is marked as "scan-off". in bxe_init_hw_common()
17103 * as "scan-off". Real-life scenario for example: if a driver is being in bxe_init_hw_common()
17104 * load-unloaded while func6,7 are down. This will cause the timer to access in bxe_init_hw_common()
17119 * dmae-operations (writing to pram for example.) in bxe_init_hw_common()
17129 * b. Wait 20msec. - note that this timeout is needed to make in bxe_init_hw_common()
17160 * PF-s might be dynamic. in bxe_init_hw_common()
17170 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bxe_init_hw_common()
17209 } while (factor-- && (val != 1)); in bxe_init_hw_common()
17213 return (-1); in bxe_init_hw_common()
17224 sc->dmae_ready = 1; in bxe_init_hw_common()
17243 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); in bxe_init_hw_common()
17265 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); in bxe_init_hw_common()
17281 * Bit-map indicating which L2 hdrs may appear in bxe_init_hw_common()
17285 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); in bxe_init_hw_common()
17334 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); in bxe_init_hw_common()
17383 /* Reset PCIE errors for debug */ in bxe_init_hw_common()
17404 /* in E3 this done in per-port section */ in bxe_init_hw_common()
17422 return (-1); in bxe_init_hw_common()
17427 return (-1); in bxe_init_hw_common()
17432 return (-1); in bxe_init_hw_common()
17444 return (-1); in bxe_init_hw_common()
17469 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17483 /* In E2 2-PORT mode, same ext phy is used for the two paths */ in bxe_init_hw_common_chip()
17510 * attempted. Therefore we manually added the enable-master to the in bxe_init_hw_port()
17528 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); in bxe_init_hw_port()
17543 } else if (sc->mtu > 4096) { in bxe_init_hw_port()
17547 val = sc->mtu; in bxe_init_hw_port()
17579 /* Ovlan exists only if we are in multi-function + in bxe_init_hw_port()
17580 * switch-dependent mode, in switch-independent there in bxe_init_hw_port()
17586 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); in bxe_init_hw_port()
17612 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); in bxe_init_hw_port()
17637 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use in bxe_init_hw_port()
17638 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF in bxe_init_hw_port()
17639 * bits 4-7 are used for "per vn group attention" */ in bxe_init_hw_port()
17648 /* Bit-map indicating which L2 hdrs may appear after the in bxe_init_hw_port()
17679 switch (sc->devinfo.mf_info.mf_mode) { in bxe_init_hw_port()
17719 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { in bxe_flr_clnup_reg_poll()
17762 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ in bxe_poll_hw_usage_counters()
17770 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ in bxe_poll_hw_usage_counters()
17778 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ in bxe_poll_hw_usage_counters()
17786 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ in bxe_poll_hw_usage_counters()
17864 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); in bxe_pbf_pN_buf_flushed()
17865 crd = crd_start = REG_RD(sc, regs->crd); in bxe_pbf_pN_buf_flushed()
17866 init_crd = REG_RD(sc, regs->init_crd); in bxe_pbf_pN_buf_flushed()
17868 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); in bxe_pbf_pN_buf_flushed()
17869 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); in bxe_pbf_pN_buf_flushed()
17870 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); in bxe_pbf_pN_buf_flushed()
17873 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < in bxe_pbf_pN_buf_flushed()
17874 (init_crd - crd_start))) { in bxe_pbf_pN_buf_flushed()
17875 if (cur_cnt--) { in bxe_pbf_pN_buf_flushed()
17877 crd = REG_RD(sc, regs->crd); in bxe_pbf_pN_buf_flushed()
17878 crd_freed = REG_RD(sc, regs->crd_freed); in bxe_pbf_pN_buf_flushed()
17880 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); in bxe_pbf_pN_buf_flushed()
17881 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); in bxe_pbf_pN_buf_flushed()
17882 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); in bxe_pbf_pN_buf_flushed()
17888 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bxe_pbf_pN_buf_flushed()
17899 occup = to_free = REG_RD(sc, regs->lines_occup); in bxe_pbf_pN_cmd_flushed()
17900 freed = freed_start = REG_RD(sc, regs->lines_freed); in bxe_pbf_pN_cmd_flushed()
17902 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); in bxe_pbf_pN_cmd_flushed()
17903 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); in bxe_pbf_pN_cmd_flushed()
17906 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { in bxe_pbf_pN_cmd_flushed()
17907 if (cur_cnt--) { in bxe_pbf_pN_cmd_flushed()
17909 occup = REG_RD(sc, regs->lines_occup); in bxe_pbf_pN_cmd_flushed()
17910 freed = REG_RD(sc, regs->lines_freed); in bxe_pbf_pN_cmd_flushed()
17912 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); in bxe_pbf_pN_cmd_flushed()
17913 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); in bxe_pbf_pN_cmd_flushed()
17914 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); in bxe_pbf_pN_cmd_flushed()
17920 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bxe_pbf_pN_cmd_flushed()
18027 /* Re-enable PF target read access */ in bxe_pf_flr_clnup()
18033 return (-1); in bxe_pf_flr_clnup()
18040 return (-1); in bxe_pf_flr_clnup()
18053 BLOGE(sc, "PCIE Transactions still pending\n"); in bxe_pf_flr_clnup()
18060 * Master enable - Due to WB DMAE writes performed before this in bxe_pf_flr_clnup()
18061 * register is re-initialized as part of the regular function init in bxe_pf_flr_clnup()
18074 struct ecore_ilt *ilt = sc->ilt; in bxe_init_hw_func()
18094 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_init_hw_func()
18104 ilt = sc->ilt; in bxe_init_hw_func()
18105 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bxe_init_hw_func()
18108 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; in bxe_init_hw_func()
18109 ilt->lines[cdu_ilt_start + i].page_mapping = in bxe_init_hw_func()
18110 sc->context[i].vcxt_dma.paddr; in bxe_init_hw_func()
18111 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; in bxe_init_hw_func()
18125 if (sc->interrupt_mode != INTR_MODE_MSIX) { in bxe_init_hw_func()
18138 * Master enable - Due to WB DMAE writes performed before this in bxe_init_hw_func()
18139 * register is re-initialized as part of the regular function in bxe_init_hw_func()
18147 sc->dmae_ready = 1; in bxe_init_hw_func()
18211 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_init_hw_func()
18236 * E2 mode: address 0-135 match to the mapping memory; in bxe_init_hw_func()
18237 * 136 - PF0 default prod; 137 - PF1 default prod; in bxe_init_hw_func()
18238 * 138 - PF2 default prod; 139 - PF3 default prod; in bxe_init_hw_func()
18239 * 140 - PF0 attn prod; 141 - PF1 attn prod; in bxe_init_hw_func()
18240 * 142 - PF2 attn prod; 143 - PF3 attn prod; in bxe_init_hw_func()
18241 * 144-147 reserved. in bxe_init_hw_func()
18243 * E1.5 mode - In backward compatible mode; in bxe_init_hw_func()
18247 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 in bxe_init_hw_func()
18250 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; in bxe_init_hw_func()
18251 * 132-135 C prods; 136-139 X prods; 140-143 T prods; in bxe_init_hw_func()
18252 * 144-147 attn prods; in bxe_init_hw_func()
18254 /* non-default-status-blocks */ in bxe_init_hw_func()
18257 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { in bxe_init_hw_func()
18258 prod_offset = (sc->igu_base_sb + sb_idx) * in bxe_init_hw_func()
18267 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, in bxe_init_hw_func()
18269 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); in bxe_init_hw_func()
18272 /* default-status-blocks */ in bxe_init_hw_func()
18286 * igu prods come in chunks of E1HVN_MAX (4) - in bxe_init_hw_func()
18297 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18299 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18301 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18303 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18305 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18308 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18310 bxe_ack_sb(sc, sc->igu_dsb_id, in bxe_init_hw_func()
18313 bxe_igu_clear_sb(sc, sc->igu_dsb_id); in bxe_init_hw_func()
18316 rf-tool supports split-68 const */ in bxe_init_hw_func()
18326 /* Reset PCIE errors for debug */ in bxe_init_hw_func()
18344 /* Clear "false" parity errors in MSI-X table */ in bxe_init_hw_func()
18368 elink_phy_probe(&sc->link_params); in bxe_init_hw_func()
18378 elink_lfa_reset(&sc->link_params, &sc->link_vars); in bxe_link_reset()
18382 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); in bxe_link_reset()
18465 fp = &sc->fp[i]; in bxe_reset_func()
18467 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), in bxe_reset_func()
18481 if (sc->devinfo.int_block == INT_BLOCK_HC) { in bxe_reset_func()
18507 * Timers workaround bug for E2: if this is vnic-3, in bxe_reset_func()
18515 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bxe_reset_func()
18526 sc->dmae_ready = 0; in bxe_reset_func()
18546 sc->iro_array = e1_iro_arr; in bxe_init_firmware()
18549 sc->iro_array = e1h_iro_arr; in bxe_init_firmware()
18552 sc->iro_array = e2_iro_arr; in bxe_init_firmware()
18555 return (-1); in bxe_init_firmware()
18609 * character device - ioctl interface definitions
18626 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18638 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18645 return dump_num_registers[0][preset-1]; in bxe_get_preset_regs_len()
18647 return dump_num_registers[1][preset-1]; in bxe_get_preset_regs_len()
18649 return dump_num_registers[2][preset-1]; in bxe_get_preset_regs_len()
18651 return dump_num_registers[3][preset-1]; in bxe_get_preset_regs_len()
18653 return dump_num_registers[4][preset-1]; in bxe_get_preset_regs_len()
18743 return IS_E1_REG(reg_info->chips); in bxe_is_reg_in_chip()
18745 return IS_E1H_REG(reg_info->chips); in bxe_is_reg_in_chip()
18747 return IS_E2_REG(reg_info->chips); in bxe_is_reg_in_chip()
18749 return IS_E3A0_REG(reg_info->chips); in bxe_is_reg_in_chip()
18751 return IS_E3B0_REG(reg_info->chips); in bxe_is_reg_in_chip()
18760 return IS_E1_REG(wreg_info->chips); in bxe_is_wreg_in_chip()
18762 return IS_E1H_REG(wreg_info->chips); in bxe_is_wreg_in_chip()
18764 return IS_E2_REG(wreg_info->chips); in bxe_is_wreg_in_chip()
18766 return IS_E3A0_REG(wreg_info->chips); in bxe_is_wreg_in_chip()
18768 return IS_E3B0_REG(wreg_info->chips); in bxe_is_wreg_in_chip()
18774 * bxe_read_pages_regs - read "paged" registers
18839 return (-1); in bxe_get_preset_regs()
18861 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { in bxe_get_preset_regs()
18862 for (i = 0; i < wreg_addr_p->size; i++) { in bxe_get_preset_regs()
18863 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); in bxe_get_preset_regs()
18868 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { in bxe_get_preset_regs()
18869 addr = *(wreg_addr_p->read_regs); in bxe_get_preset_regs()
18902 if (sc->grcdump_done || sc->grcdump_started) in bxe_grc_dump()
18905 sc->grcdump_started = 1; in bxe_grc_dump()
18911 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); in bxe_grc_dump()
18913 if (sc->grc_dump == NULL) { in bxe_grc_dump()
18922 * will re-enable parity attentions right after the dump. in bxe_grc_dump()
18937 buf = sc->grc_dump; in bxe_grc_dump()
18938 d_hdr = sc->grc_dump; in bxe_grc_dump()
18940 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; in bxe_grc_dump()
18941 d_hdr->version = BNX2X_DUMP_VERSION; in bxe_grc_dump()
18942 d_hdr->preset = DUMP_ALL_PRESETS; in bxe_grc_dump()
18945 d_hdr->dump_meta_data = DUMP_CHIP_E1; in bxe_grc_dump()
18947 d_hdr->dump_meta_data = DUMP_CHIP_E1H; in bxe_grc_dump()
18949 d_hdr->dump_meta_data = DUMP_CHIP_E2 | in bxe_grc_dump()
18952 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | in bxe_grc_dump()
18955 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | in bxe_grc_dump()
18991 if(sc->state == BXE_STATE_OPEN) { in bxe_grc_dump()
18992 if(sc->fw_stats_req != NULL) { in bxe_grc_dump()
18994 (uintmax_t)sc->fw_stats_req_mapping, in bxe_grc_dump()
18995 (uintmax_t)sc->fw_stats_data_mapping, in bxe_grc_dump()
18996 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); in bxe_grc_dump()
18998 if(sc->def_sb != NULL) { in bxe_grc_dump()
19000 (void *)sc->def_sb_dma.paddr, sc->def_sb, in bxe_grc_dump()
19003 if(sc->eq_dma.vaddr != NULL) { in bxe_grc_dump()
19005 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); in bxe_grc_dump()
19007 if(sc->sp_dma.vaddr != NULL) { in bxe_grc_dump()
19009 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, in bxe_grc_dump()
19012 if(sc->spq_dma.vaddr != NULL) { in bxe_grc_dump()
19014 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); in bxe_grc_dump()
19016 if(sc->gz_buf_dma.vaddr != NULL) { in bxe_grc_dump()
19018 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, in bxe_grc_dump()
19021 for (i = 0; i < sc->num_queues; i++) { in bxe_grc_dump()
19022 fp = &sc->fp[i]; in bxe_grc_dump()
19023 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && in bxe_grc_dump()
19024 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && in bxe_grc_dump()
19025 fp->rx_sge_dma.vaddr != NULL) { in bxe_grc_dump()
19028 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, in bxe_grc_dump()
19031 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, in bxe_grc_dump()
19034 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, in bxe_grc_dump()
19037 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, in bxe_grc_dump()
19040 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, in bxe_grc_dump()
19045 ilt_cli = &ilt->clients[1]; in bxe_grc_dump()
19046 if(ilt->lines != NULL) { in bxe_grc_dump()
19047 for (i = ilt_cli->start; i <= ilt_cli->end; i++) { in bxe_grc_dump()
19049 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), in bxe_grc_dump()
19050 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); in bxe_grc_dump()
19066 sc->grcdump_done = 1; in bxe_grc_dump()
19073 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); in bxe_add_cdev()
19075 if (sc->eeprom == NULL) { in bxe_add_cdev()
19077 return (-1); in bxe_add_cdev()
19080 sc->ioctl_dev = make_dev(&bxe_cdevsw, in bxe_add_cdev()
19081 if_getdunit(sc->ifp), in bxe_add_cdev()
19086 if_name(sc->ifp)); in bxe_add_cdev()
19088 if (sc->ioctl_dev == NULL) { in bxe_add_cdev()
19089 free(sc->eeprom, M_DEVBUF); in bxe_add_cdev()
19090 sc->eeprom = NULL; in bxe_add_cdev()
19091 return (-1); in bxe_add_cdev()
19094 sc->ioctl_dev->si_drv1 = sc; in bxe_add_cdev()
19102 if (sc->ioctl_dev != NULL) in bxe_del_cdev()
19103 destroy_dev(sc->ioctl_dev); in bxe_del_cdev()
19105 if (sc->eeprom != NULL) { in bxe_del_cdev()
19106 free(sc->eeprom, M_DEVBUF); in bxe_del_cdev()
19107 sc->eeprom = NULL; in bxe_del_cdev()
19109 sc->ioctl_dev = NULL; in bxe_del_cdev()
19117 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) in bxe_is_nvram_accessible()
19131 return (-EAGAIN); in bxe_wr_eeprom()
19146 return (-EAGAIN); in bxe_rd_eeprom()
19158 switch (eeprom->eeprom_cmd) { in bxe_eeprom_rd_wr()
19162 rval = copyin(eeprom->eeprom_data, sc->eeprom, in bxe_eeprom_rd_wr()
19163 eeprom->eeprom_data_len); in bxe_eeprom_rd_wr()
19168 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, in bxe_eeprom_rd_wr()
19169 eeprom->eeprom_data_len); in bxe_eeprom_rd_wr()
19174 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, in bxe_eeprom_rd_wr()
19175 eeprom->eeprom_data_len); in bxe_eeprom_rd_wr()
19181 rval = copyout(sc->eeprom, eeprom->eeprom_data, in bxe_eeprom_rd_wr()
19182 eeprom->eeprom_data_len); in bxe_eeprom_rd_wr()
19191 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); in bxe_eeprom_rd_wr()
19204 dev_p->supported = sc->port.supported[cfg_idx] | in bxe_get_settings()
19205 (sc->port.supported[cfg_idx ^ 1] & in bxe_get_settings()
19207 dev_p->advertising = sc->port.advertising[cfg_idx]; in bxe_get_settings()
19208 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == in bxe_get_settings()
19210 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); in bxe_get_settings()
19211 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); in bxe_get_settings()
19213 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && in bxe_get_settings()
19214 !(sc->flags & BXE_MF_FUNC_DIS)) { in bxe_get_settings()
19215 dev_p->duplex = sc->link_vars.duplex; in bxe_get_settings()
19217 dev_p->speed = bxe_get_mf_speed(sc); in bxe_get_settings()
19219 dev_p->speed = sc->link_vars.line_speed; in bxe_get_settings()
19221 dev_p->duplex = DUPLEX_UNKNOWN; in bxe_get_settings()
19222 dev_p->speed = SPEED_UNKNOWN; in bxe_get_settings()
19225 dev_p->port = bxe_media_detect(sc); in bxe_get_settings()
19231 dev_p->phy_address = sc->port.phy_addr; in bxe_get_settings()
19236 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); in bxe_get_settings()
19238 dev_p->phy_address = 0; in bxe_get_settings()
19240 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) in bxe_get_settings()
19241 dev_p->autoneg = AUTONEG_ENABLE; in bxe_get_settings()
19243 dev_p->autoneg = AUTONEG_DISABLE; in bxe_get_settings()
19266 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) in bxe_eioctl()
19274 dump->pci_func = sc->pcie_func; in bxe_eioctl()
19275 dump->grcdump_size = in bxe_eioctl()
19284 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || in bxe_eioctl()
19285 (dump->grcdump_size < grc_dump_size)) { in bxe_eioctl()
19290 if((sc->trigger_grcdump) && (!sc->grcdump_done) && in bxe_eioctl()
19291 (!sc->grcdump_started)) { in bxe_eioctl()
19295 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && in bxe_eioctl()
19296 (sc->grc_dump != NULL)) { in bxe_eioctl()
19297 dump->grcdump_dwords = grc_dump_size >> 2; in bxe_eioctl()
19298 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); in bxe_eioctl()
19299 free(sc->grc_dump, M_DEVBUF); in bxe_eioctl()
19300 sc->grc_dump = NULL; in bxe_eioctl()
19301 sc->grcdump_started = 0; in bxe_eioctl()
19302 sc->grcdump_done = 0; in bxe_eioctl()
19309 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); in bxe_eioctl()
19310 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", in bxe_eioctl()
19312 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", in bxe_eioctl()
19313 sc->devinfo.bc_ver_str); in bxe_eioctl()
19314 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, in bxe_eioctl()
19315 "%s", sc->fw_ver_str); in bxe_eioctl()
19316 drv_infop->eeprom_dump_len = sc->devinfo.flash_size; in bxe_eioctl()
19317 drv_infop->reg_dump_len = in bxe_eioctl()
19320 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", in bxe_eioctl()
19321 sc->pcie_bus, sc->pcie_device, sc->pcie_func); in bxe_eioctl()
19327 dev_p->supported = dev_set.supported; in bxe_eioctl()
19328 dev_p->advertising = dev_set.advertising; in bxe_eioctl()
19329 dev_p->speed = dev_set.speed; in bxe_eioctl()
19330 dev_p->duplex = dev_set.duplex; in bxe_eioctl()
19331 dev_p->port = dev_set.port; in bxe_eioctl()
19332 dev_p->phy_address = dev_set.phy_address; in bxe_eioctl()
19333 dev_p->autoneg = dev_set.autoneg; in bxe_eioctl()
19340 grc_dump_size = reg_p->reg_buf_len; in bxe_eioctl()
19342 if((!sc->grcdump_done) && (!sc->grcdump_started)) { in bxe_eioctl()
19345 if((sc->grcdump_done) && (sc->grcdump_started) && in bxe_eioctl()
19346 (sc->grc_dump != NULL)) { in bxe_eioctl()
19347 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); in bxe_eioctl()
19348 free(sc->grc_dump, M_DEVBUF); in bxe_eioctl()
19349 sc->grc_dump = NULL; in bxe_eioctl()
19350 sc->grcdump_started = 0; in bxe_eioctl()
19351 sc->grcdump_done = 0; in bxe_eioctl()
19358 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && in bxe_eioctl()
19359 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) in bxe_eioctl()
19360 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); in bxe_eioctl()
19362 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && in bxe_eioctl()
19363 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) in bxe_eioctl()
19364 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); in bxe_eioctl()
19370 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { in bxe_eioctl()
19372 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, in bxe_eioctl()
19373 cfg_rdw_p->cfg_width); in bxe_eioctl()
19375 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { in bxe_eioctl()
19376 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, in bxe_eioctl()
19377 cfg_rdw_p->cfg_width); in bxe_eioctl()
19385 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", in bxe_eioctl()
19386 sc->mac_addr_str); in bxe_eioctl()
19409 *nrxr = sc->num_queues; in bxe_debugnet_init()
19411 *clsize = sc->fp[0].mbuf_alloc_size; in bxe_debugnet_init()
19428 IFF_DRV_RUNNING || !sc->link_vars.link_up) in bxe_debugnet_transmit()
19431 error = bxe_tx_encap(&sc->fp[0], &m); in bxe_debugnet_transmit()
19445 !sc->link_vars.link_up) in bxe_debugnet_poll()
19448 for (i = 0; i < sc->num_queues; i++) in bxe_debugnet_poll()
19449 (void)bxe_rxeof(sc, &sc->fp[i]); in bxe_debugnet_poll()
19450 (void)bxe_txeof(sc, &sc->fp[0]); in bxe_debugnet_poll()