Lines Matching full:bp

36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) in bnx2x_add_all_napi_cnic() argument
46 for_each_rx_queue_cnic(bp, i) { in bnx2x_add_all_napi_cnic()
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi_cnic()
51 static void bnx2x_add_all_napi(struct bnx2x *bp) in bnx2x_add_all_napi() argument
56 for_each_eth_queue(bp, i) { in bnx2x_add_all_napi()
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi()
61 static int bnx2x_calc_num_queues(struct bnx2x *bp) in bnx2x_calc_num_queues() argument
69 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); in bnx2x_calc_num_queues()
76 * @bp: driver handle
80 * Makes sure the contents of the bp->fp[to].napi is kept
86 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) in bnx2x_move_fp() argument
88 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
89 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
90 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; in bnx2x_move_fp()
91 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; in bnx2x_move_fp()
92 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; in bnx2x_move_fp()
93 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; in bnx2x_move_fp()
121 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; in bnx2x_move_fp()
122 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * in bnx2x_move_fp()
123 (bp)->max_cos; in bnx2x_move_fp()
124 if (from == FCOE_IDX(bp)) { in bnx2x_move_fp()
129 memcpy(&bp->bnx2x_txq[new_txdata_index], in bnx2x_move_fp()
130 &bp->bnx2x_txq[old_txdata_index], in bnx2x_move_fp()
132 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; in bnx2x_move_fp()
138 * @bp: driver handle
143 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) in bnx2x_fill_fw_str() argument
145 if (IS_PF(bp)) { in bnx2x_fill_fw_str()
149 bnx2x_get_ext_phy_fw_version(&bp->link_params, in bnx2x_fill_fw_str()
154 bp->fw_ver, in bnx2x_fill_fw_str()
155 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fill_fw_str()
156 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fill_fw_str()
157 (bp->common.bc_ver & 0xff), in bnx2x_fill_fw_str()
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len); in bnx2x_fill_fw_str()
167 * @bp: driver handle
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) in bnx2x_shrink_eth_fp() argument
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_shrink_eth_fp()
177 for (cos = 1; cos < bp->max_cos; cos++) { in bnx2x_shrink_eth_fp()
179 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp()
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, in bnx2x_free_tx_pkt() argument
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), in bnx2x_free_tx_pkt()
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in bnx2x_free_tx_pkt()
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) in bnx2x_tx_int() argument
280 if (unlikely(bp->panic)) in bnx2x_tx_int()
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, in bnx2x_tx_int()
336 (bp->state == BNX2X_STATE_OPEN) && in bnx2x_tx_int()
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) in bnx2x_tx_int()
358 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod() local
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, in bnx2x_get_rxhash() argument
415 if ((bp->dev->features & NETIF_F_RXHASH) && in bnx2x_get_rxhash()
434 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start() local
447 mapping = dma_map_single(&bp->pdev->dev, in bnx2x_tpa_start()
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_tpa_start()
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); in bnx2x_tpa_start()
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
563 mapping = dma_map_page(&bp->pdev->dev, pool->page, in bnx2x_alloc_rx_sge()
565 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_sge()
586 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
638 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
640 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
644 dma_unmap_page(&bp->pdev->dev, in bnx2x_fill_frag_skb()
710 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ip_csum() argument
722 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ipv6_csum() argument
734 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_gro_csum() argument
738 gro_func(bp, skb); in bnx2x_gro_csum()
743 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
750 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); in bnx2x_gro_receive()
753 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); in bnx2x_gro_receive()
756 netdev_WARN_ONCE(bp->dev, in bnx2x_gro_receive()
766 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
792 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), in bnx2x_tpa_stop()
812 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_tpa_stop()
815 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
819 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
837 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
840 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
852 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, in bnx2x_alloc_rx_data()
855 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_data()
896 struct bnx2x *bp = fp->bp; in bnx2x_rx_int() local
904 if (unlikely(bp->panic)) in bnx2x_rx_int()
934 if (unlikely(bp->panic)) in bnx2x_rx_int()
1013 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
1016 if (bp->panic) in bnx2x_rx_int()
1026 dma_sync_single_for_cpu(&bp->pdev->dev, in bnx2x_rx_int()
1037 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1044 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && in bnx2x_rx_int()
1050 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1056 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1058 dma_unmap_single(&bp->pdev->dev, in bnx2x_rx_int()
1065 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1073 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1081 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_rx_int()
1084 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); in bnx2x_rx_int()
1089 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_rx_int()
1091 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1098 bnx2x_set_rx_ts(bp, skb); in bnx2x_rx_int()
1134 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1143 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int() local
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1153 if (unlikely(bp->panic)) in bnx2x_msix_fp_int()
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1168 void bnx2x_acquire_phy_lock(struct bnx2x *bp) in bnx2x_acquire_phy_lock() argument
1170 mutex_lock(&bp->port.phy_mutex); in bnx2x_acquire_phy_lock()
1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_acquire_phy_lock()
1175 void bnx2x_release_phy_lock(struct bnx2x *bp) in bnx2x_release_phy_lock() argument
1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_release_phy_lock()
1179 mutex_unlock(&bp->port.phy_mutex); in bnx2x_release_phy_lock()
1183 u16 bnx2x_get_mf_speed(struct bnx2x *bp) in bnx2x_get_mf_speed() argument
1185 u16 line_speed = bp->link_vars.line_speed; in bnx2x_get_mf_speed()
1186 if (IS_MF(bp)) { in bnx2x_get_mf_speed()
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp, in bnx2x_get_mf_speed()
1188 bp->mf_config[BP_VN(bp)]); in bnx2x_get_mf_speed()
1193 if (IS_MF_PERCENT_BW(bp)) in bnx2x_get_mf_speed()
1209 * @bp: driver handle
1214 static void bnx2x_fill_report_data(struct bnx2x *bp, in bnx2x_fill_report_data() argument
1219 if (IS_PF(bp)) { in bnx2x_fill_report_data()
1221 data->line_speed = bnx2x_get_mf_speed(bp); in bnx2x_fill_report_data()
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) in bnx2x_fill_report_data()
1228 if (!BNX2X_NUM_ETH_QUEUES(bp)) in bnx2x_fill_report_data()
1233 if (bp->link_vars.duplex == DUPLEX_FULL) in bnx2x_fill_report_data()
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) in bnx2x_fill_report_data()
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_fill_report_data()
1247 *data = bp->vf_link_vars; in bnx2x_fill_report_data()
1254 * @bp: driver handle
1261 void bnx2x_link_report(struct bnx2x *bp) in bnx2x_link_report() argument
1263 bnx2x_acquire_phy_lock(bp); in bnx2x_link_report()
1264 __bnx2x_link_report(bp); in bnx2x_link_report()
1265 bnx2x_release_phy_lock(bp); in bnx2x_link_report()
1271 * @bp: driver handle
1276 void __bnx2x_link_report(struct bnx2x *bp) in __bnx2x_link_report() argument
1280 if (bp->force_link_down) { in __bnx2x_link_report()
1281 bp->link_vars.link_up = 0; in __bnx2x_link_report()
1286 if (IS_PF(bp) && !CHIP_IS_E1(bp)) in __bnx2x_link_report()
1287 bnx2x_read_mf_cfg(bp); in __bnx2x_link_report()
1290 bnx2x_fill_report_data(bp, &cur_data); in __bnx2x_link_report()
1293 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || in __bnx2x_link_report()
1295 &bp->last_reported_link.link_report_flags) && in __bnx2x_link_report()
1300 bp->link_cnt++; in __bnx2x_link_report()
1305 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); in __bnx2x_link_report()
1308 if (IS_PF(bp)) in __bnx2x_link_report()
1309 bnx2x_iov_link_update(bp); in __bnx2x_link_report()
1313 netif_carrier_off(bp->dev); in __bnx2x_link_report()
1314 netdev_err(bp->dev, "NIC Link is Down\n"); in __bnx2x_link_report()
1320 netif_carrier_on(bp->dev); in __bnx2x_link_report()
1346 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in __bnx2x_link_report()
1369 static void bnx2x_free_tpa_pool(struct bnx2x *bp, in bnx2x_free_tpa_pool() argument
1384 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_tpa_pool()
1392 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) in bnx2x_init_rx_rings_cnic() argument
1396 for_each_rx_queue_cnic(bp, j) { in bnx2x_init_rx_rings_cnic()
1397 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic()
1406 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1411 void bnx2x_init_rx_rings(struct bnx2x *bp) in bnx2x_init_rx_rings() argument
1413 int func = BP_FUNC(bp); in bnx2x_init_rx_rings()
1418 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1419 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1422 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1426 for (i = 0; i < MAX_AGG_QS(bp); i++) { in bnx2x_init_rx_rings()
1437 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1455 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1462 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1464 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1465 MAX_AGG_QS(bp)); in bnx2x_init_rx_rings()
1477 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1478 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1487 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1493 if (CHIP_IS_E1(bp)) { in bnx2x_init_rx_rings()
1494 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1497 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1507 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue() local
1517 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), in bnx2x_free_tx_skbs_queue()
1523 netdev_get_tx_queue(bp->dev, in bnx2x_free_tx_skbs_queue()
1528 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_tx_skbs_cnic() argument
1532 for_each_tx_queue_cnic(bp, i) { in bnx2x_free_tx_skbs_cnic()
1533 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1537 static void bnx2x_free_tx_skbs(struct bnx2x *bp) in bnx2x_free_tx_skbs() argument
1541 for_each_eth_queue(bp, i) { in bnx2x_free_tx_skbs()
1542 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1548 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds() local
1561 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_rx_bds()
1570 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_rx_skbs_cnic() argument
1574 for_each_rx_queue_cnic(bp, j) { in bnx2x_free_rx_skbs_cnic()
1575 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1579 static void bnx2x_free_rx_skbs(struct bnx2x *bp) in bnx2x_free_rx_skbs() argument
1583 for_each_eth_queue(bp, j) { in bnx2x_free_rx_skbs()
1584 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs()
1589 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1593 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) in bnx2x_free_skbs_cnic() argument
1595 bnx2x_free_tx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1596 bnx2x_free_rx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1599 void bnx2x_free_skbs(struct bnx2x *bp) in bnx2x_free_skbs() argument
1601 bnx2x_free_tx_skbs(bp); in bnx2x_free_skbs()
1602 bnx2x_free_rx_skbs(bp); in bnx2x_free_skbs()
1605 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) in bnx2x_update_max_mf_config() argument
1608 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; in bnx2x_update_max_mf_config()
1610 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { in bnx2x_update_max_mf_config()
1618 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); in bnx2x_update_max_mf_config()
1625 * @bp: driver handle
1628 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) in bnx2x_free_msix_irqs() argument
1636 if (IS_PF(bp)) { in bnx2x_free_msix_irqs()
1637 free_irq(bp->msix_table[offset].vector, bp->dev); in bnx2x_free_msix_irqs()
1639 bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1643 if (CNIC_SUPPORT(bp)) { in bnx2x_free_msix_irqs()
1649 for_each_eth_queue(bp, i) { in bnx2x_free_msix_irqs()
1653 i, bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1655 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1659 void bnx2x_free_irq(struct bnx2x *bp) in bnx2x_free_irq() argument
1661 if (bp->flags & USING_MSIX_FLAG && in bnx2x_free_irq()
1662 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_free_irq()
1663 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); in bnx2x_free_irq()
1666 if (IS_PF(bp)) in bnx2x_free_irq()
1669 bnx2x_free_msix_irqs(bp, nvecs); in bnx2x_free_irq()
1671 free_irq(bp->dev->irq, bp->dev); in bnx2x_free_irq()
1675 int bnx2x_enable_msix(struct bnx2x *bp) in bnx2x_enable_msix() argument
1680 if (IS_PF(bp)) { in bnx2x_enable_msix()
1681 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1683 bp->msix_table[0].entry); in bnx2x_enable_msix()
1688 if (CNIC_SUPPORT(bp)) { in bnx2x_enable_msix()
1689 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1691 msix_vec, bp->msix_table[msix_vec].entry); in bnx2x_enable_msix()
1696 for_each_eth_queue(bp, i) { in bnx2x_enable_msix()
1697 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1706 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], in bnx2x_enable_msix()
1707 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); in bnx2x_enable_msix()
1714 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); in bnx2x_enable_msix()
1722 bp->flags |= USING_SINGLE_MSIX_FLAG; in bnx2x_enable_msix()
1725 bp->num_ethernet_queues = 1; in bnx2x_enable_msix()
1726 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1739 bp->num_ethernet_queues -= diff; in bnx2x_enable_msix()
1740 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1743 bp->num_queues); in bnx2x_enable_msix()
1746 bp->flags |= USING_MSIX_FLAG; in bnx2x_enable_msix()
1753 bp->flags |= DISABLE_MSI_FLAG; in bnx2x_enable_msix()
1758 static int bnx2x_req_msix_irqs(struct bnx2x *bp) in bnx2x_req_msix_irqs() argument
1763 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1764 rc = request_irq(bp->msix_table[offset++].vector, in bnx2x_req_msix_irqs()
1766 bp->dev->name, bp->dev); in bnx2x_req_msix_irqs()
1773 if (CNIC_SUPPORT(bp)) in bnx2x_req_msix_irqs()
1776 for_each_eth_queue(bp, i) { in bnx2x_req_msix_irqs()
1777 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs()
1779 bp->dev->name, i); in bnx2x_req_msix_irqs()
1781 rc = request_irq(bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1785 bp->msix_table[offset].vector, rc); in bnx2x_req_msix_irqs()
1786 bnx2x_free_msix_irqs(bp, offset); in bnx2x_req_msix_irqs()
1793 i = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_req_msix_irqs()
1794 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1795 offset = 1 + CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1796 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1798 bp->msix_table[0].vector, in bnx2x_req_msix_irqs()
1799 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1800 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1802 offset = CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1803 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1805 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1806 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1811 int bnx2x_enable_msi(struct bnx2x *bp) in bnx2x_enable_msi() argument
1815 rc = pci_enable_msi(bp->pdev); in bnx2x_enable_msi()
1820 bp->flags |= USING_MSI_FLAG; in bnx2x_enable_msi()
1825 static int bnx2x_req_irq(struct bnx2x *bp) in bnx2x_req_irq() argument
1830 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) in bnx2x_req_irq()
1835 if (bp->flags & USING_MSIX_FLAG) in bnx2x_req_irq()
1836 irq = bp->msix_table[0].vector; in bnx2x_req_irq()
1838 irq = bp->pdev->irq; in bnx2x_req_irq()
1840 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); in bnx2x_req_irq()
1843 static int bnx2x_setup_irqs(struct bnx2x *bp) in bnx2x_setup_irqs() argument
1846 if (bp->flags & USING_MSIX_FLAG && in bnx2x_setup_irqs()
1847 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_setup_irqs()
1848 rc = bnx2x_req_msix_irqs(bp); in bnx2x_setup_irqs()
1852 rc = bnx2x_req_irq(bp); in bnx2x_setup_irqs()
1857 if (bp->flags & USING_MSI_FLAG) { in bnx2x_setup_irqs()
1858 bp->dev->irq = bp->pdev->irq; in bnx2x_setup_irqs()
1859 netdev_info(bp->dev, "using MSI IRQ %d\n", in bnx2x_setup_irqs()
1860 bp->dev->irq); in bnx2x_setup_irqs()
1862 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_irqs()
1863 bp->dev->irq = bp->msix_table[0].vector; in bnx2x_setup_irqs()
1864 netdev_info(bp->dev, "using MSIX IRQ %d\n", in bnx2x_setup_irqs()
1865 bp->dev->irq); in bnx2x_setup_irqs()
1872 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) in bnx2x_napi_enable_cnic() argument
1876 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_enable_cnic()
1877 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable_cnic()
1881 static void bnx2x_napi_enable(struct bnx2x *bp) in bnx2x_napi_enable() argument
1885 for_each_eth_queue(bp, i) { in bnx2x_napi_enable()
1886 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable()
1890 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) in bnx2x_napi_disable_cnic() argument
1894 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_disable_cnic()
1895 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable_cnic()
1899 static void bnx2x_napi_disable(struct bnx2x *bp) in bnx2x_napi_disable() argument
1903 for_each_eth_queue(bp, i) { in bnx2x_napi_disable()
1904 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable()
1908 void bnx2x_netif_start(struct bnx2x *bp) in bnx2x_netif_start() argument
1910 if (netif_running(bp->dev)) { in bnx2x_netif_start()
1911 bnx2x_napi_enable(bp); in bnx2x_netif_start()
1912 if (CNIC_LOADED(bp)) in bnx2x_netif_start()
1913 bnx2x_napi_enable_cnic(bp); in bnx2x_netif_start()
1914 bnx2x_int_enable(bp); in bnx2x_netif_start()
1915 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_netif_start()
1916 netif_tx_wake_all_queues(bp->dev); in bnx2x_netif_start()
1920 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) in bnx2x_netif_stop() argument
1922 bnx2x_int_disable_sync(bp, disable_hw); in bnx2x_netif_stop()
1923 bnx2x_napi_disable(bp); in bnx2x_netif_stop()
1924 if (CNIC_LOADED(bp)) in bnx2x_netif_stop()
1925 bnx2x_napi_disable_cnic(bp); in bnx2x_netif_stop()
1931 struct bnx2x *bp = netdev_priv(dev); in bnx2x_select_queue() local
1933 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { in bnx2x_select_queue()
1946 return bnx2x_fcoe_tx(bp, txq_index); in bnx2x_select_queue()
1951 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); in bnx2x_select_queue()
1954 void bnx2x_set_num_queues(struct bnx2x *bp) in bnx2x_set_num_queues() argument
1957 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); in bnx2x_set_num_queues()
1960 if (IS_MF_STORAGE_ONLY(bp)) in bnx2x_set_num_queues()
1961 bp->num_ethernet_queues = 1; in bnx2x_set_num_queues()
1964 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ in bnx2x_set_num_queues()
1965 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_num_queues()
1967 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); in bnx2x_set_num_queues()
1973 * @bp: Driver handle
1978 * bp->max_cos.
1993 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) in bnx2x_set_real_num_queues() argument
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; in bnx2x_set_real_num_queues()
1998 rx = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_set_real_num_queues()
2001 if (include_cnic && !NO_FCOE(bp)) { in bnx2x_set_real_num_queues()
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx); in bnx2x_set_real_num_queues()
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx); in bnx2x_set_real_num_queues()
2023 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) in bnx2x_set_rx_buf_size() argument
2027 for_each_queue(bp, i) { in bnx2x_set_rx_buf_size()
2028 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size()
2041 mtu = bp->dev->mtu; in bnx2x_set_rx_buf_size()
2056 static int bnx2x_init_rss(struct bnx2x *bp) in bnx2x_init_rss() argument
2059 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_init_rss()
2064 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) in bnx2x_init_rss()
2065 bp->rss_conf_obj.ind_table[i] = in bnx2x_init_rss()
2066 bp->fp->cl_id + in bnx2x_init_rss()
2077 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); in bnx2x_init_rss()
2080 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, in bnx2x_rss() argument
2088 * if (!is_eth_multi(bp)) in bnx2x_rss()
2089 * bp->multi_mode = ETH_RSS_MODE_DISABLED; in bnx2x_rss()
2109 if (!CHIP_IS_E1x(bp)) { in bnx2x_rss()
2132 if (IS_PF(bp)) in bnx2x_rss()
2133 return bnx2x_config_rss(bp, &params); in bnx2x_rss()
2135 return bnx2x_vfpf_config_rss(bp, &params); in bnx2x_rss()
2138 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) in bnx2x_init_hw() argument
2145 func_params.f_obj = &bp->func_obj; in bnx2x_init_hw()
2150 return bnx2x_func_state_change(bp, &func_params); in bnx2x_init_hw()
2157 void bnx2x_squeeze_objects(struct bnx2x *bp) in bnx2x_squeeze_objects() argument
2162 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_squeeze_objects()
2173 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2181 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2187 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_squeeze_objects()
2194 netif_addr_lock_bh(bp->dev); in bnx2x_squeeze_objects()
2195 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); in bnx2x_squeeze_objects()
2201 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2206 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2210 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2212 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2216 #define LOAD_ERROR_EXIT(bp, label) \ argument
2218 (bp)->state = BNX2X_STATE_ERROR; \
2222 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2224 bp->cnic_loaded = false; \
2228 #define LOAD_ERROR_EXIT(bp, label) \ argument
2230 (bp)->state = BNX2X_STATE_ERROR; \
2231 (bp)->panic = 1; \
2234 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2236 bp->cnic_loaded = false; \
2237 (bp)->panic = 1; \
2242 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) in bnx2x_free_fw_stats_mem() argument
2244 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_fw_stats_mem()
2245 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_fw_stats_mem()
2249 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) in bnx2x_alloc_fw_stats_mem() argument
2252 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; in bnx2x_alloc_fw_stats_mem()
2255 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; in bnx2x_alloc_fw_stats_mem()
2262 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; in bnx2x_alloc_fw_stats_mem()
2265 * the VFs themselves. We don't include them in the bp->fw_stats_num as in bnx2x_alloc_fw_stats_mem()
2269 if (IS_SRIOV(bp)) in bnx2x_alloc_fw_stats_mem()
2270 vf_headroom = bnx2x_vf_headroom(bp); in bnx2x_alloc_fw_stats_mem()
2278 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + in bnx2x_alloc_fw_stats_mem()
2279 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? in bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_num, vf_headroom, num_groups); in bnx2x_alloc_fw_stats_mem()
2284 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + in bnx2x_alloc_fw_stats_mem()
2295 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + in bnx2x_alloc_fw_stats_mem()
2301 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, in bnx2x_alloc_fw_stats_mem()
2302 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2303 if (!bp->fw_stats) in bnx2x_alloc_fw_stats_mem()
2307 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; in bnx2x_alloc_fw_stats_mem()
2308 bp->fw_stats_req_mapping = bp->fw_stats_mapping; in bnx2x_alloc_fw_stats_mem()
2309 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) in bnx2x_alloc_fw_stats_mem()
2310 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2311 bp->fw_stats_data_mapping = bp->fw_stats_mapping + in bnx2x_alloc_fw_stats_mem()
2312 bp->fw_stats_req_sz; in bnx2x_alloc_fw_stats_mem()
2315 U64_HI(bp->fw_stats_req_mapping), in bnx2x_alloc_fw_stats_mem()
2316 U64_LO(bp->fw_stats_req_mapping)); in bnx2x_alloc_fw_stats_mem()
2318 U64_HI(bp->fw_stats_data_mapping), in bnx2x_alloc_fw_stats_mem()
2319 U64_LO(bp->fw_stats_data_mapping)); in bnx2x_alloc_fw_stats_mem()
2323 bnx2x_free_fw_stats_mem(bp); in bnx2x_alloc_fw_stats_mem()
2329 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) in bnx2x_nic_load_request() argument
2334 bp->fw_seq = in bnx2x_nic_load_request()
2335 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_nic_load_request()
2337 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_nic_load_request()
2340 bp->fw_drv_pulse_wr_seq = in bnx2x_nic_load_request()
2341 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & in bnx2x_nic_load_request()
2343 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); in bnx2x_nic_load_request()
2347 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) in bnx2x_nic_load_request()
2351 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); in bnx2x_nic_load_request()
2373 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) in bnx2x_compare_fw_ver() argument
2382 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); in bnx2x_compare_fw_ver()
2409 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) in bnx2x_nic_load_no_mcp() argument
2411 int path = BP_PATH(bp); in bnx2x_nic_load_no_mcp()
2430 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) in bnx2x_nic_load_pmf() argument
2435 bp->port.pmf = 1; in bnx2x_nic_load_pmf()
2437 * writing to bp->port.pmf here and reading it from the in bnx2x_nic_load_pmf()
2442 bp->port.pmf = 0; in bnx2x_nic_load_pmf()
2445 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); in bnx2x_nic_load_pmf()
2448 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) in bnx2x_nic_load_afex_dcc() argument
2452 (bp->common.shmem2_base)) { in bnx2x_nic_load_afex_dcc()
2453 if (SHMEM2_HAS(bp, dcc_support)) in bnx2x_nic_load_afex_dcc()
2454 SHMEM2_WR(bp, dcc_support, in bnx2x_nic_load_afex_dcc()
2457 if (SHMEM2_HAS(bp, afex_driver_support)) in bnx2x_nic_load_afex_dcc()
2458 SHMEM2_WR(bp, afex_driver_support, in bnx2x_nic_load_afex_dcc()
2463 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load_afex_dcc()
2469 * @bp: driver handle
2472 * Makes sure the contents of the bp->fp[index].napi is kept
2475 static void bnx2x_bz_fp(struct bnx2x *bp, int index) in bnx2x_bz_fp() argument
2477 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp()
2491 fp->bp = bp; in bnx2x_bz_fp()
2494 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2501 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2504 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2505 BNX2X_NUM_ETH_QUEUES(bp) + index]; in bnx2x_bz_fp()
2510 if (bp->dev->features & NETIF_F_LRO) in bnx2x_bz_fp()
2512 else if (bp->dev->features & NETIF_F_GRO_HW) in bnx2x_bz_fp()
2517 /* We don't want TPA if it's disabled in bp in bnx2x_bz_fp()
2520 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2524 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) in bnx2x_set_os_driver_state() argument
2528 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) in bnx2x_set_os_driver_state()
2531 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); in bnx2x_set_os_driver_state()
2535 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); in bnx2x_set_os_driver_state()
2538 int bnx2x_load_cnic(struct bnx2x *bp) in bnx2x_load_cnic() argument
2540 int i, rc, port = BP_PORT(bp); in bnx2x_load_cnic()
2544 mutex_init(&bp->cnic_mutex); in bnx2x_load_cnic()
2546 if (IS_PF(bp)) { in bnx2x_load_cnic()
2547 rc = bnx2x_alloc_mem_cnic(bp); in bnx2x_load_cnic()
2549 BNX2X_ERR("Unable to allocate bp memory for cnic\n"); in bnx2x_load_cnic()
2550 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2554 rc = bnx2x_alloc_fp_mem_cnic(bp); in bnx2x_load_cnic()
2557 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2561 rc = bnx2x_set_real_num_queues(bp, 1); in bnx2x_load_cnic()
2564 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2568 bnx2x_add_all_napi_cnic(bp); in bnx2x_load_cnic()
2570 bnx2x_napi_enable_cnic(bp); in bnx2x_load_cnic()
2572 rc = bnx2x_init_hw_func_cnic(bp); in bnx2x_load_cnic()
2574 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); in bnx2x_load_cnic()
2576 bnx2x_nic_init_cnic(bp); in bnx2x_load_cnic()
2578 if (IS_PF(bp)) { in bnx2x_load_cnic()
2580 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); in bnx2x_load_cnic()
2583 for_each_cnic_queue(bp, i) { in bnx2x_load_cnic()
2584 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2587 LOAD_ERROR_EXIT(bp, load_error_cnic2); in bnx2x_load_cnic()
2593 bnx2x_set_rx_mode_inner(bp); in bnx2x_load_cnic()
2596 bnx2x_get_iscsi_info(bp); in bnx2x_load_cnic()
2597 bnx2x_setup_cnic_irq_info(bp); in bnx2x_load_cnic()
2598 bnx2x_setup_cnic_info(bp); in bnx2x_load_cnic()
2599 bp->cnic_loaded = true; in bnx2x_load_cnic()
2600 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_load_cnic()
2601 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); in bnx2x_load_cnic()
2610 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); in bnx2x_load_cnic()
2613 bnx2x_napi_disable_cnic(bp); in bnx2x_load_cnic()
2615 if (bnx2x_set_real_num_queues(bp, 0)) in bnx2x_load_cnic()
2619 bnx2x_free_fp_mem_cnic(bp); in bnx2x_load_cnic()
2620 bnx2x_free_mem_cnic(bp); in bnx2x_load_cnic()
2626 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) in bnx2x_nic_load() argument
2628 int port = BP_PORT(bp); in bnx2x_nic_load()
2633 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); in bnx2x_nic_load()
2636 if (unlikely(bp->panic)) { in bnx2x_nic_load()
2642 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; in bnx2x_nic_load()
2645 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); in bnx2x_nic_load()
2647 &bp->last_reported_link.link_report_flags); in bnx2x_nic_load()
2649 if (IS_PF(bp)) in bnx2x_nic_load()
2651 bnx2x_ilt_set_info(bp); in bnx2x_nic_load()
2655 * allocated only once, fp index, max_cos, bp pointer. in bnx2x_nic_load()
2658 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); in bnx2x_nic_load()
2659 for_each_queue(bp, i) in bnx2x_nic_load()
2660 bnx2x_bz_fp(bp, i); in bnx2x_nic_load()
2661 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + in bnx2x_nic_load()
2662 bp->num_cnic_queues) * in bnx2x_nic_load()
2665 bp->fcoe_init = false; in bnx2x_nic_load()
2668 bnx2x_set_rx_buf_size(bp); in bnx2x_nic_load()
2670 if (IS_PF(bp)) { in bnx2x_nic_load()
2671 rc = bnx2x_alloc_mem(bp); in bnx2x_nic_load()
2673 BNX2X_ERR("Unable to allocate bp memory\n"); in bnx2x_nic_load()
2681 rc = bnx2x_alloc_fp_mem(bp); in bnx2x_nic_load()
2684 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2688 rc = bnx2x_alloc_fw_stats_mem(bp); in bnx2x_nic_load()
2690 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2693 if (IS_VF(bp)) { in bnx2x_nic_load()
2694 rc = bnx2x_vfpf_init(bp); in bnx2x_nic_load()
2696 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2700 * bp->num_queues, bnx2x_set_real_num_queues() should always in bnx2x_nic_load()
2703 rc = bnx2x_set_real_num_queues(bp, 0); in bnx2x_nic_load()
2706 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2713 bnx2x_setup_tc(bp->dev, bp->max_cos); in bnx2x_nic_load()
2716 bnx2x_add_all_napi(bp); in bnx2x_nic_load()
2718 bnx2x_napi_enable(bp); in bnx2x_nic_load()
2719 bp->nic_stopped = false; in bnx2x_nic_load()
2721 if (IS_PF(bp)) { in bnx2x_nic_load()
2723 bnx2x_set_pf_load(bp); in bnx2x_nic_load()
2726 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2728 rc = bnx2x_nic_load_request(bp, &load_code); in bnx2x_nic_load()
2730 LOAD_ERROR_EXIT(bp, load_error1); in bnx2x_nic_load()
2733 rc = bnx2x_compare_fw_ver(bp, load_code, true); in bnx2x_nic_load()
2735 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2736 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2739 load_code = bnx2x_nic_load_no_mcp(bp, port); in bnx2x_nic_load()
2743 bnx2x_nic_load_pmf(bp, load_code); in bnx2x_nic_load()
2746 bnx2x__init_func_obj(bp); in bnx2x_nic_load()
2749 rc = bnx2x_init_hw(bp, load_code); in bnx2x_nic_load()
2752 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2753 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2757 bnx2x_pre_irq_nic_init(bp); in bnx2x_nic_load()
2760 rc = bnx2x_setup_irqs(bp); in bnx2x_nic_load()
2763 if (IS_PF(bp)) in bnx2x_nic_load()
2764 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2765 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2769 if (IS_PF(bp)) { in bnx2x_nic_load()
2771 bnx2x_post_irq_nic_init(bp, load_code); in bnx2x_nic_load()
2773 bnx2x_init_bp_objs(bp); in bnx2x_nic_load()
2774 bnx2x_iov_nic_init(bp); in bnx2x_nic_load()
2777 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load()
2778 bnx2x_nic_load_afex_dcc(bp, load_code); in bnx2x_nic_load()
2779 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; in bnx2x_nic_load()
2780 rc = bnx2x_func_start(bp); in bnx2x_nic_load()
2783 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2785 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2789 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2790 load_code = bnx2x_fw_command(bp, in bnx2x_nic_load()
2795 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2800 bnx2x_update_coalesce(bp); in bnx2x_nic_load()
2804 rc = bnx2x_setup_leading(bp); in bnx2x_nic_load()
2807 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2811 for_each_nondefault_eth_queue(bp, i) { in bnx2x_nic_load()
2812 if (IS_PF(bp)) in bnx2x_nic_load()
2813 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2815 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2818 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2823 rc = bnx2x_init_rss(bp); in bnx2x_nic_load()
2826 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2830 bp->state = BNX2X_STATE_OPEN; in bnx2x_nic_load()
2833 if (IS_PF(bp)) in bnx2x_nic_load()
2834 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_nic_load()
2836 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2840 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2843 if (IS_PF(bp) && bp->pending_max) { in bnx2x_nic_load()
2844 bnx2x_update_max_mf_config(bp, bp->pending_max); in bnx2x_nic_load()
2845 bp->pending_max = 0; in bnx2x_nic_load()
2848 bp->force_link_down = false; in bnx2x_nic_load()
2849 if (bp->port.pmf) { in bnx2x_nic_load()
2850 rc = bnx2x_initial_phy_init(bp, load_mode); in bnx2x_nic_load()
2852 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2854 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; in bnx2x_nic_load()
2859 rc = bnx2x_vlan_reconfigure_vid(bp); in bnx2x_nic_load()
2861 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2864 bnx2x_set_rx_mode_inner(bp); in bnx2x_nic_load()
2866 if (bp->flags & PTP_SUPPORTED) { in bnx2x_nic_load()
2867 bnx2x_register_phc(bp); in bnx2x_nic_load()
2868 bnx2x_init_ptp(bp); in bnx2x_nic_load()
2869 bnx2x_configure_ptp_filters(bp); in bnx2x_nic_load()
2875 netif_tx_wake_all_queues(bp->dev); in bnx2x_nic_load()
2879 netif_tx_start_all_queues(bp->dev); in bnx2x_nic_load()
2885 bp->state = BNX2X_STATE_DIAG; in bnx2x_nic_load()
2892 if (bp->port.pmf) in bnx2x_nic_load()
2893 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); in bnx2x_nic_load()
2895 bnx2x__link_status_update(bp); in bnx2x_nic_load()
2898 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_nic_load()
2900 if (CNIC_ENABLED(bp)) in bnx2x_nic_load()
2901 bnx2x_load_cnic(bp); in bnx2x_nic_load()
2903 if (IS_PF(bp)) in bnx2x_nic_load()
2904 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); in bnx2x_nic_load()
2906 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_load()
2909 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_load()
2911 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); in bnx2x_nic_load()
2912 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_load()
2918 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { in bnx2x_nic_load()
2920 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_nic_load()
2925 if (IS_PF(bp)) in bnx2x_nic_load()
2926 bnx2x_update_mfw_dump(bp); in bnx2x_nic_load()
2929 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) in bnx2x_nic_load()
2930 bnx2x_dcbx_init(bp, false); in bnx2x_nic_load()
2932 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) in bnx2x_nic_load()
2933 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); in bnx2x_nic_load()
2941 if (IS_PF(bp)) { in bnx2x_nic_load()
2942 bnx2x_int_disable_sync(bp, 1); in bnx2x_nic_load()
2945 bnx2x_squeeze_objects(bp); in bnx2x_nic_load()
2949 bnx2x_free_skbs(bp); in bnx2x_nic_load()
2950 for_each_rx_queue(bp, i) in bnx2x_nic_load()
2951 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2954 bnx2x_free_irq(bp); in bnx2x_nic_load()
2956 if (IS_PF(bp) && !BP_NOMCP(bp)) { in bnx2x_nic_load()
2957 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); in bnx2x_nic_load()
2958 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); in bnx2x_nic_load()
2961 bp->port.pmf = 0; in bnx2x_nic_load()
2963 bnx2x_napi_disable(bp); in bnx2x_nic_load()
2964 bnx2x_del_all_napi(bp); in bnx2x_nic_load()
2965 bp->nic_stopped = true; in bnx2x_nic_load()
2968 if (IS_PF(bp)) in bnx2x_nic_load()
2969 bnx2x_clear_pf_load(bp); in bnx2x_nic_load()
2971 bnx2x_free_fw_stats_mem(bp); in bnx2x_nic_load()
2972 bnx2x_free_fp_mem(bp); in bnx2x_nic_load()
2973 bnx2x_free_mem(bp); in bnx2x_nic_load()
2979 int bnx2x_drain_tx_queues(struct bnx2x *bp) in bnx2x_drain_tx_queues() argument
2984 for_each_tx_queue(bp, i) { in bnx2x_drain_tx_queues()
2985 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues()
2988 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
2996 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) in bnx2x_nic_unload() argument
3003 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) in bnx2x_nic_unload()
3004 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); in bnx2x_nic_unload()
3007 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_unload()
3009 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_unload()
3010 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_unload()
3014 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_nic_unload()
3015 (bp->state == BNX2X_STATE_CLOSED || in bnx2x_nic_unload()
3016 bp->state == BNX2X_STATE_ERROR)) { in bnx2x_nic_unload()
3024 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_nic_unload()
3025 bp->is_leader = 0; in bnx2x_nic_unload()
3026 bnx2x_release_leader_lock(bp); in bnx2x_nic_unload()
3040 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) in bnx2x_nic_unload()
3043 /* It's important to set the bp->state to the value different from in bnx2x_nic_unload()
3047 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_nic_unload()
3051 bnx2x_iov_channel_down(bp); in bnx2x_nic_unload()
3053 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3054 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); in bnx2x_nic_unload()
3057 bnx2x_tx_disable(bp); in bnx2x_nic_unload()
3058 netdev_reset_tc(bp->dev); in bnx2x_nic_unload()
3060 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_nic_unload()
3062 timer_delete_sync(&bp->timer); in bnx2x_nic_unload()
3064 if (IS_PF(bp) && !BP_NOMCP(bp)) { in bnx2x_nic_unload()
3066 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bnx2x_nic_unload()
3067 bnx2x_drv_pulse(bp); in bnx2x_nic_unload()
3068 bnx2x_stats_handle(bp, STATS_EVENT_STOP); in bnx2x_nic_unload()
3069 bnx2x_save_statistics(bp); in bnx2x_nic_unload()
3077 bnx2x_drain_tx_queues(bp); in bnx2x_nic_unload()
3082 if (IS_VF(bp)) { in bnx2x_nic_unload()
3083 bnx2x_clear_vlan_info(bp); in bnx2x_nic_unload()
3084 bnx2x_vfpf_close_vf(bp); in bnx2x_nic_unload()
3087 bnx2x_chip_cleanup(bp, unload_mode, keep_link); in bnx2x_nic_unload()
3090 bnx2x_send_unload_req(bp, unload_mode); in bnx2x_nic_unload()
3098 if (!CHIP_IS_E1x(bp)) in bnx2x_nic_unload()
3099 bnx2x_pf_disable(bp); in bnx2x_nic_unload()
3101 if (!bp->nic_stopped) { in bnx2x_nic_unload()
3103 bnx2x_netif_stop(bp, 1); in bnx2x_nic_unload()
3105 bnx2x_del_all_napi(bp); in bnx2x_nic_unload()
3106 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3107 bnx2x_del_all_napi_cnic(bp); in bnx2x_nic_unload()
3109 bnx2x_free_irq(bp); in bnx2x_nic_unload()
3110 bp->nic_stopped = true; in bnx2x_nic_unload()
3114 bnx2x_send_unload_done(bp, false); in bnx2x_nic_unload()
3121 if (IS_PF(bp)) in bnx2x_nic_unload()
3122 bnx2x_squeeze_objects(bp); in bnx2x_nic_unload()
3125 bp->sp_state = 0; in bnx2x_nic_unload()
3127 bp->port.pmf = 0; in bnx2x_nic_unload()
3130 bp->sp_rtnl_state = 0; in bnx2x_nic_unload()
3134 bnx2x_free_skbs(bp); in bnx2x_nic_unload()
3135 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3136 bnx2x_free_skbs_cnic(bp); in bnx2x_nic_unload()
3137 for_each_rx_queue(bp, i) in bnx2x_nic_unload()
3138 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3140 bnx2x_free_fp_mem(bp); in bnx2x_nic_unload()
3141 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3142 bnx2x_free_fp_mem_cnic(bp); in bnx2x_nic_unload()
3144 if (IS_PF(bp)) { in bnx2x_nic_unload()
3145 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3146 bnx2x_free_mem_cnic(bp); in bnx2x_nic_unload()
3148 bnx2x_free_mem(bp); in bnx2x_nic_unload()
3150 bp->state = BNX2X_STATE_CLOSED; in bnx2x_nic_unload()
3151 bp->cnic_loaded = false; in bnx2x_nic_unload()
3154 if (IS_PF(bp) && !BP_NOMCP(bp)) in bnx2x_nic_unload()
3155 bnx2x_update_mng_version(bp); in bnx2x_nic_unload()
3160 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { in bnx2x_nic_unload()
3161 bnx2x_set_reset_in_progress(bp); in bnx2x_nic_unload()
3165 bnx2x_set_reset_global(bp); in bnx2x_nic_unload()
3171 if (IS_PF(bp) && in bnx2x_nic_unload()
3172 !bnx2x_clear_pf_load(bp) && in bnx2x_nic_unload()
3173 bnx2x_reset_is_done(bp, BP_PATH(bp))) in bnx2x_nic_unload()
3174 bnx2x_disable_close_the_gate(bp); in bnx2x_nic_unload()
3181 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) in bnx2x_set_power_state() argument
3186 if (!bp->pdev->pm_cap) { in bnx2x_set_power_state()
3191 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); in bnx2x_set_power_state()
3195 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3207 if (atomic_read(&bp->pdev->enable_cnt) != 1) in bnx2x_set_power_state()
3210 if (CHIP_REV_IS_SLOW(bp)) in bnx2x_set_power_state()
3216 if (bp->wol) in bnx2x_set_power_state()
3219 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3228 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); in bnx2x_set_power_state()
3241 struct bnx2x *bp = fp->bp; in bnx2x_poll() local
3246 if (unlikely(bp->panic)) { in bnx2x_poll()
3253 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3303 static u16 bnx2x_tx_split(struct bnx2x *bp, in bnx2x_tx_split() argument
3362 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_xmit_type() argument
3380 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { in bnx2x_xmit_type()
3419 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_pkt_req_lin() argument
3530 * @bp: driver handle
3537 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_enc() argument
3562 * @bp: driver handle
3569 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_e2() argument
3591 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_sbd_csum() argument
3607 * @bp: driver handle
3612 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum() argument
3753 struct bnx2x *bp = netdev_priv(dev); in bnx2x_start_xmit() local
3767 u32 xmit_type = bnx2x_xmit_type(bp, skb); in bnx2x_start_xmit()
3775 if (unlikely(bp->panic)) in bnx2x_start_xmit()
3782 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); in bnx2x_start_xmit()
3784 txdata = &bp->bnx2x_txq[txq_index]; in bnx2x_start_xmit()
3795 if (unlikely(bnx2x_tx_avail(bp, txdata) < in bnx2x_start_xmit()
3802 bnx2x_fp_qstats(bp, txdata->parent_fp); in bnx2x_start_xmit()
3807 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
3834 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { in bnx2x_start_xmit()
3836 bp->lin_cnt++; in bnx2x_start_xmit()
3846 mapping = dma_map_single(&bp->pdev->dev, skb->data, in bnx2x_start_xmit()
3848 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
3880 if (!(bp->flags & TX_TIMESTAMPING_EN)) { in bnx2x_start_xmit()
3881 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3883 } else if (bp->ptp_tx_skb) { in bnx2x_start_xmit()
3884 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3885 netdev_err_once(bp->dev, in bnx2x_start_xmit()
3890 bp->ptp_tx_skb = skb_get(skb); in bnx2x_start_xmit()
3891 bp->ptp_tx_start = jiffies; in bnx2x_start_xmit()
3892 schedule_work(&bp->ptp_task); in bnx2x_start_xmit()
3919 if (IS_VF(bp)) { in bnx2x_start_xmit()
3946 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); in bnx2x_start_xmit()
3948 if (!CHIP_IS_E1x(bp)) { in bnx2x_start_xmit()
3956 hlen = bnx2x_set_pbd_csum_enc(bp, skb, in bnx2x_start_xmit()
3990 hlen = bnx2x_set_pbd_csum_e2(bp, skb, in bnx2x_start_xmit()
3999 if (IS_VF(bp)) { in bnx2x_start_xmit()
4011 if (bp->flags & TX_SWITCHING) in bnx2x_start_xmit()
4036 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); in bnx2x_start_xmit()
4067 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, in bnx2x_start_xmit()
4071 if (!CHIP_IS_E1x(bp)) in bnx2x_start_xmit()
4092 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, in bnx2x_start_xmit()
4094 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
4106 bnx2x_free_tx_pkt(bp, txdata, in bnx2x_start_xmit()
4190 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); in bnx2x_start_xmit()
4194 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { in bnx2x_start_xmit()
4202 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
4203 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) in bnx2x_start_xmit()
4211 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) in bnx2x_get_c2s_mapping() argument
4213 int mfw_vn = BP_FW_MB_IDX(bp); in bnx2x_get_c2s_mapping()
4217 if (!IS_MF_BD(bp)) { in bnx2x_get_c2s_mapping()
4227 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); in bnx2x_get_c2s_mapping()
4234 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); in bnx2x_get_c2s_mapping()
4241 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); in bnx2x_get_c2s_mapping()
4256 struct bnx2x *bp = netdev_priv(dev); in bnx2x_setup_tc() local
4270 if (num_tc > bp->max_cos) { in bnx2x_setup_tc()
4272 num_tc, bp->max_cos); in bnx2x_setup_tc()
4282 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); in bnx2x_setup_tc()
4288 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4291 outer_prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4305 for (cos = 0; cos < bp->max_cos; cos++) { in bnx2x_setup_tc()
4306 count = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_setup_tc()
4307 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); in bnx2x_setup_tc()
4334 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mac_addr() local
4342 if (IS_MF_STORAGE_ONLY(bp)) { in bnx2x_change_mac_addr()
4348 rc = bnx2x_set_eth_mac(bp, false); in bnx2x_change_mac_addr()
4356 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_change_mac_addr()
4358 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) in bnx2x_change_mac_addr()
4359 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); in bnx2x_change_mac_addr()
4364 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) in bnx2x_free_fp_mem_at() argument
4366 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); in bnx2x_free_fp_mem_at()
4367 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at()
4377 if (!CHIP_IS_E1x(bp)) in bnx2x_free_fp_mem_at()
4379 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4384 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4390 if (!skip_rx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4394 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); in bnx2x_free_fp_mem_at()
4395 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), in bnx2x_free_fp_mem_at()
4396 bnx2x_fp(bp, fp_index, rx_desc_mapping), in bnx2x_free_fp_mem_at()
4399 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), in bnx2x_free_fp_mem_at()
4400 bnx2x_fp(bp, fp_index, rx_comp_mapping), in bnx2x_free_fp_mem_at()
4405 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); in bnx2x_free_fp_mem_at()
4406 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), in bnx2x_free_fp_mem_at()
4407 bnx2x_fp(bp, fp_index, rx_sge_mapping), in bnx2x_free_fp_mem_at()
4412 if (!skip_tx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4430 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) in bnx2x_free_fp_mem_cnic() argument
4433 for_each_cnic_queue(bp, i) in bnx2x_free_fp_mem_cnic()
4434 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem_cnic()
4437 void bnx2x_free_fp_mem(struct bnx2x *bp) in bnx2x_free_fp_mem() argument
4440 for_each_eth_queue(bp, i) in bnx2x_free_fp_mem()
4441 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem()
4444 static void set_sb_shortcuts(struct bnx2x *bp, int index) in set_sb_shortcuts() argument
4446 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); in set_sb_shortcuts()
4447 if (!CHIP_IS_E1x(bp)) { in set_sb_shortcuts()
4448 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4450 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4453 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4455 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4464 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds() local
4475 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4493 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4516 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) in bnx2x_alloc_fp_mem_at() argument
4519 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at()
4524 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { in bnx2x_alloc_fp_mem_at()
4526 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4527 } else if (!bp->rx_ring_size) { in bnx2x_alloc_fp_mem_at()
4528 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); in bnx2x_alloc_fp_mem_at()
4530 if (CHIP_IS_E3(bp)) { in bnx2x_alloc_fp_mem_at()
4531 u32 cfg = SHMEM_RD(bp, in bnx2x_alloc_fp_mem_at()
4532 dev_info.port_hw_config[BP_PORT(bp)]. in bnx2x_alloc_fp_mem_at()
4542 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : in bnx2x_alloc_fp_mem_at()
4545 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4547 rx_ring_size = bp->rx_ring_size; in bnx2x_alloc_fp_mem_at()
4552 sb = &bnx2x_fp(bp, index, status_blk); in bnx2x_alloc_fp_mem_at()
4556 if (!CHIP_IS_E1x(bp)) { in bnx2x_alloc_fp_mem_at()
4557 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4562 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4573 set_sb_shortcuts(bp, index); in bnx2x_alloc_fp_mem_at()
4576 if (!skip_tx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4598 if (!skip_rx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4600 bnx2x_fp(bp, index, rx_buf_ring) = in bnx2x_alloc_fp_mem_at()
4602 if (!bnx2x_fp(bp, index, rx_buf_ring)) in bnx2x_alloc_fp_mem_at()
4604 bnx2x_fp(bp, index, rx_desc_ring) = in bnx2x_alloc_fp_mem_at()
4605 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), in bnx2x_alloc_fp_mem_at()
4607 if (!bnx2x_fp(bp, index, rx_desc_ring)) in bnx2x_alloc_fp_mem_at()
4611 bnx2x_fp(bp, index, rx_comp_ring) = in bnx2x_alloc_fp_mem_at()
4612 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), in bnx2x_alloc_fp_mem_at()
4614 if (!bnx2x_fp(bp, index, rx_comp_ring)) in bnx2x_alloc_fp_mem_at()
4618 bnx2x_fp(bp, index, rx_page_ring) = in bnx2x_alloc_fp_mem_at()
4621 if (!bnx2x_fp(bp, index, rx_page_ring)) in bnx2x_alloc_fp_mem_at()
4623 bnx2x_fp(bp, index, rx_sge_ring) = in bnx2x_alloc_fp_mem_at()
4624 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), in bnx2x_alloc_fp_mem_at()
4626 if (!bnx2x_fp(bp, index, rx_sge_ring)) in bnx2x_alloc_fp_mem_at()
4653 bnx2x_free_fp_mem_at(bp, index); in bnx2x_alloc_fp_mem_at()
4659 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) in bnx2x_alloc_fp_mem_cnic() argument
4661 if (!NO_FCOE(bp)) in bnx2x_alloc_fp_mem_cnic()
4663 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) in bnx2x_alloc_fp_mem_cnic()
4672 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) in bnx2x_alloc_fp_mem() argument
4681 if (bnx2x_alloc_fp_mem_at(bp, 0)) in bnx2x_alloc_fp_mem()
4685 for_each_nondefault_eth_queue(bp, i) in bnx2x_alloc_fp_mem()
4686 if (bnx2x_alloc_fp_mem_at(bp, i)) in bnx2x_alloc_fp_mem()
4690 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { in bnx2x_alloc_fp_mem()
4691 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; in bnx2x_alloc_fp_mem()
4694 bnx2x_shrink_eth_fp(bp, delta); in bnx2x_alloc_fp_mem()
4695 if (CNIC_SUPPORT(bp)) in bnx2x_alloc_fp_mem()
4702 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); in bnx2x_alloc_fp_mem()
4703 bp->num_ethernet_queues -= delta; in bnx2x_alloc_fp_mem()
4704 bp->num_queues = bp->num_ethernet_queues + in bnx2x_alloc_fp_mem()
4705 bp->num_cnic_queues; in bnx2x_alloc_fp_mem()
4707 bp->num_queues + delta, bp->num_queues); in bnx2x_alloc_fp_mem()
4713 void bnx2x_free_mem_bp(struct bnx2x *bp) in bnx2x_free_mem_bp() argument
4717 for (i = 0; i < bp->fp_array_size; i++) in bnx2x_free_mem_bp()
4718 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4719 kfree(bp->fp); in bnx2x_free_mem_bp()
4720 kfree(bp->sp_objs); in bnx2x_free_mem_bp()
4721 kfree(bp->fp_stats); in bnx2x_free_mem_bp()
4722 kfree(bp->bnx2x_txq); in bnx2x_free_mem_bp()
4723 kfree(bp->msix_table); in bnx2x_free_mem_bp()
4724 kfree(bp->ilt); in bnx2x_free_mem_bp()
4727 int bnx2x_alloc_mem_bp(struct bnx2x *bp) in bnx2x_alloc_mem_bp() argument
4740 msix_table_size = bp->igu_sb_cnt; in bnx2x_alloc_mem_bp()
4741 if (IS_PF(bp)) in bnx2x_alloc_mem_bp()
4746 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4747 bp->fp_array_size = fp_array_size; in bnx2x_alloc_mem_bp()
4748 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); in bnx2x_alloc_mem_bp()
4750 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4753 for (i = 0; i < bp->fp_array_size; i++) { in bnx2x_alloc_mem_bp()
4761 bp->fp = fp; in bnx2x_alloc_mem_bp()
4764 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), in bnx2x_alloc_mem_bp()
4766 if (!bp->sp_objs) in bnx2x_alloc_mem_bp()
4770 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), in bnx2x_alloc_mem_bp()
4772 if (!bp->fp_stats) in bnx2x_alloc_mem_bp()
4777 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4780 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), in bnx2x_alloc_mem_bp()
4782 if (!bp->bnx2x_txq) in bnx2x_alloc_mem_bp()
4789 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
4795 bp->ilt = ilt; in bnx2x_alloc_mem_bp()
4799 bnx2x_free_mem_bp(bp); in bnx2x_alloc_mem_bp()
4805 struct bnx2x *bp = netdev_priv(dev); in bnx2x_reload_if_running() local
4810 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); in bnx2x_reload_if_running()
4811 return bnx2x_nic_load(bp, LOAD_NORMAL); in bnx2x_reload_if_running()
4814 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) in bnx2x_get_cur_phy_idx() argument
4817 if (bp->link_params.num_phys <= 1) in bnx2x_get_cur_phy_idx()
4820 if (bp->link_vars.link_up) { in bnx2x_get_cur_phy_idx()
4823 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bnx2x_get_cur_phy_idx()
4824 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) in bnx2x_get_cur_phy_idx()
4828 switch (bnx2x_phy_selection(&bp->link_params)) { in bnx2x_get_cur_phy_idx()
4843 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) in bnx2x_get_link_cfg_idx() argument
4845 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); in bnx2x_get_link_cfg_idx()
4852 if (bp->link_params.multi_phy_config & in bnx2x_get_link_cfg_idx()
4865 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fcoe_get_wwn() local
4866 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_fcoe_get_wwn()
4889 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mtu() local
4891 if (pci_num_vf(bp->pdev)) { in bnx2x_change_mtu()
4896 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_change_mtu()
4910 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) in bnx2x_change_mtu()
4911 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); in bnx2x_change_mtu()
4919 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fix_features() local
4921 if (pci_num_vf(bp->pdev)) { in bnx2x_fix_features()
4927 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { in bnx2x_fix_features()
4952 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_features() local
4958 if (!pci_num_vf(bp->pdev)) { in bnx2x_set_features()
4960 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { in bnx2x_set_features()
4961 bp->link_params.loopback_mode = LOOPBACK_BMAC; in bnx2x_set_features()
4965 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { in bnx2x_set_features()
4966 bp->link_params.loopback_mode = LOOPBACK_NONE; in bnx2x_set_features()
4979 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { in bnx2x_set_features()
4992 struct bnx2x *bp = netdev_priv(dev); in bnx2x_tx_timeout() local
4997 if (!bp->panic) in bnx2x_tx_timeout()
4999 bnx2x_panic_dump(bp, false); in bnx2x_tx_timeout()
5005 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); in bnx2x_tx_timeout()
5012 struct bnx2x *bp; in bnx2x_suspend() local
5018 bp = netdev_priv(dev); in bnx2x_suspend()
5029 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_suspend()
5040 struct bnx2x *bp; in bnx2x_resume() local
5047 bp = netdev_priv(dev); in bnx2x_resume()
5049 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_resume()
5063 rc = bnx2x_nic_load(bp, LOAD_OPEN); in bnx2x_resume()
5072 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, in bnx2x_set_ctx_validation() argument
5082 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
5086 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
5090 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, in storm_memset_hc_timeout() argument
5096 REG_WR8(bp, addr, ticks); in storm_memset_hc_timeout()
5102 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, in storm_memset_hc_disable() argument
5109 u8 flags = REG_RD8(bp, addr); in storm_memset_hc_disable()
5113 REG_WR8(bp, addr, flags); in storm_memset_hc_disable()
5119 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, in bnx2x_update_coalesce_sb_index() argument
5122 int port = BP_PORT(bp); in bnx2x_update_coalesce_sb_index()
5125 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); in bnx2x_update_coalesce_sb_index()
5128 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); in bnx2x_update_coalesce_sb_index()
5131 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, in bnx2x_schedule_sp_rtnl() argument
5135 set_bit(flag, &bp->sp_rtnl_state); in bnx2x_schedule_sp_rtnl()
5139 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_schedule_sp_rtnl()