Lines Matching full:bp
328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
332 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
342 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) in bnxt_db_nq() argument
344 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_db_nq()
346 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_nq()
352 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) in bnxt_db_nq_arm() argument
354 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_nq_arm()
360 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) in bnxt_db_cq() argument
362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_cq()
363 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | in bnxt_db_cq()
369 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) in bnxt_queue_fw_reset_work() argument
371 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) in bnxt_queue_fw_reset_work()
374 if (BNXT_PF(bp)) in bnxt_queue_fw_reset_work()
375 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); in bnxt_queue_fw_reset_work()
377 schedule_delayed_work(&bp->fw_reset_task, delay); in bnxt_queue_fw_reset_work()
380 static void __bnxt_queue_sp_work(struct bnxt *bp) in __bnxt_queue_sp_work() argument
382 if (BNXT_PF(bp)) in __bnxt_queue_sp_work()
383 queue_work(bnxt_pf_wq, &bp->sp_task); in __bnxt_queue_sp_work()
385 schedule_work(&bp->sp_task); in __bnxt_queue_sp_work()
388 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) in bnxt_queue_sp_work() argument
390 set_bit(event, &bp->sp_event); in bnxt_queue_sp_work()
391 __bnxt_queue_sp_work(bp); in bnxt_queue_sp_work()
394 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) in bnxt_sched_reset_rxr() argument
398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_sched_reset_rxr()
399 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); in bnxt_sched_reset_rxr()
401 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); in bnxt_sched_reset_rxr()
402 __bnxt_queue_sp_work(bp); in bnxt_sched_reset_rxr()
407 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_sched_reset_txr() argument
415 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", in bnxt_sched_reset_txr()
420 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); in bnxt_sched_reset_txr()
455 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_txr_db_kick() argument
460 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick()
466 struct bnxt *bp = netdev_priv(dev); in bnxt_start_xmit() local
474 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_start_xmit()
475 struct pci_dev *pdev = bp->pdev; in bnxt_start_xmit()
483 if (unlikely(i >= bp->tx_nr_rings)) { in bnxt_start_xmit()
490 txr = &bp->tx_ring[bp->tx_ring_map[i]]; in bnxt_start_xmit()
504 free_size = bnxt_tx_avail(bp, txr); in bnxt_start_xmit()
508 netif_warn(bp, tx_err, dev, in bnxt_start_xmit()
510 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), in bnxt_start_xmit()
511 bp->tx_wake_thresh)) in bnxt_start_xmit()
522 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
524 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
542 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { in bnxt_start_xmit()
566 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && in bnxt_start_xmit()
615 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); in bnxt_start_xmit()
618 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
661 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); in bnxt_start_xmit()
665 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
714 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
723 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
746 bnxt_txr_db_kick(bp, txr, prod); in bnxt_start_xmit()
748 if (free_size >= bp->tx_wake_thresh) in bnxt_start_xmit()
756 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { in bnxt_start_xmit()
760 bnxt_txr_db_kick(bp, txr, prod); in bnxt_start_xmit()
763 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), in bnxt_start_xmit()
764 bp->tx_wake_thresh); in bnxt_start_xmit()
773 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
781 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
793 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; in bnxt_start_xmit()
794 atomic64_inc(&bp->ptp_cfg->stats.ts_err); in bnxt_start_xmit()
795 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) in bnxt_start_xmit()
800 bnxt_txr_db_kick(bp, txr, txr->tx_prod); in bnxt_start_xmit()
801 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; in bnxt_start_xmit()
807 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in __bnxt_tx_int() argument
810 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); in __bnxt_tx_int()
811 struct pci_dev *pdev = bp->pdev; in __bnxt_tx_int()
819 while (RING_TX(bp, cons) != hw_cons) { in __bnxt_tx_int()
825 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; in __bnxt_tx_int()
829 bnxt_sched_reset_txr(bp, txr, cons); in __bnxt_tx_int()
834 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { in __bnxt_tx_int()
857 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; in __bnxt_tx_int()
865 if (BNXT_CHIP_P5(bp)) { in __bnxt_tx_int()
867 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); in __bnxt_tx_int()
881 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, in __bnxt_tx_int()
887 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) in bnxt_tx_int() argument
894 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) in bnxt_tx_int()
895 more |= __bnxt_tx_int(bp, txr, budget); in bnxt_tx_int()
906 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, in __bnxt_alloc_rx_page() argument
927 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, in __bnxt_alloc_rx_netmem() argument
947 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, in __bnxt_alloc_rx_frag() argument
955 bp->rx_buf_size, gfp); in __bnxt_alloc_rx_frag()
959 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; in __bnxt_alloc_rx_frag()
963 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, in bnxt_alloc_rx_data() argument
966 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_alloc_rx_data()
967 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_alloc_rx_data()
970 if (BNXT_RX_PAGE_MODE(bp)) { in bnxt_alloc_rx_data()
973 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); in bnxt_alloc_rx_data()
978 mapping += bp->rx_dma_offset; in bnxt_alloc_rx_data()
980 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; in bnxt_alloc_rx_data()
982 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); in bnxt_alloc_rx_data()
988 rx_buf->data_ptr = data + bp->rx_offset; in bnxt_alloc_rx_data()
1000 struct bnxt *bp = rxr->bnapi->bp; in bnxt_reuse_rx_data() local
1003 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_reuse_rx_data()
1011 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_reuse_rx_data()
1012 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; in bnxt_reuse_rx_data()
1027 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, in bnxt_alloc_rx_netmem() argument
1031 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; in bnxt_alloc_rx_netmem()
1038 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp); in bnxt_alloc_rx_netmem()
1047 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); in bnxt_alloc_rx_netmem()
1057 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, in bnxt_get_agg() argument
1069 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, in bnxt_get_tpa_agg_p5() argument
1082 struct bnxt *bp = bnapi->bp; in bnxt_reuse_rx_agg_bufs() local
1089 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) in bnxt_reuse_rx_agg_bufs()
1100 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); in bnxt_reuse_rx_agg_bufs()
1102 agg = bnxt_get_agg(bp, cpr, idx, start + i); in bnxt_reuse_rx_agg_bufs()
1123 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; in bnxt_reuse_rx_agg_bufs()
1129 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); in bnxt_reuse_rx_agg_bufs()
1135 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, in bnxt_rx_multi_page_skb() argument
1147 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); in bnxt_rx_multi_page_skb()
1152 dma_addr -= bp->rx_dma_offset; in bnxt_rx_multi_page_skb()
1153 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, in bnxt_rx_multi_page_skb()
1154 bp->rx_dir); in bnxt_rx_multi_page_skb()
1155 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); in bnxt_rx_multi_page_skb()
1161 skb_reserve(skb, bp->rx_offset); in bnxt_rx_multi_page_skb()
1167 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, in bnxt_rx_page_skb() argument
1181 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); in bnxt_rx_page_skb()
1186 dma_addr -= bp->rx_dma_offset; in bnxt_rx_page_skb()
1187 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, in bnxt_rx_page_skb()
1188 bp->rx_dir); in bnxt_rx_page_skb()
1191 payload = eth_get_headlen(bp->dev, data_ptr, len); in bnxt_rx_page_skb()
1214 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, in bnxt_rx_skb() argument
1224 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); in bnxt_rx_skb()
1230 skb = napi_build_skb(data, bp->rx_buf_size); in bnxt_rx_skb()
1231 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, in bnxt_rx_skb()
1232 bp->rx_dir); in bnxt_rx_skb()
1239 skb_reserve(skb, bp->rx_offset); in bnxt_rx_skb()
1244 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, in __bnxt_rx_agg_netmems() argument
1260 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) in __bnxt_rx_agg_netmems()
1275 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); in __bnxt_rx_agg_netmems()
1277 agg = bnxt_get_agg(bp, cpr, idx, i); in __bnxt_rx_agg_netmems()
1307 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) { in __bnxt_rx_agg_netmems()
1335 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp, in bnxt_rx_agg_netmems_skb() argument
1342 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, in bnxt_rx_agg_netmems_skb()
1353 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp, in bnxt_rx_agg_netmems_xdp() argument
1364 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, in bnxt_rx_agg_netmems_xdp()
1374 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in bnxt_agg_bufs_valid() argument
1391 struct bnxt *bp = bnapi->bp; in bnxt_copy_data() local
1392 struct pci_dev *pdev = bp->pdev; in bnxt_copy_data()
1399 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak, in bnxt_copy_data()
1400 bp->rx_dir); in bnxt_copy_data()
1405 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak, in bnxt_copy_data()
1406 bp->rx_dir); in bnxt_copy_data()
1445 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in bnxt_discard_rx() argument
1461 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_discard_rx()
1468 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) in bnxt_discard_rx()
1533 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, in bnxt_tpa_start() argument
1543 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_tpa_start()
1552 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_tpa_start()
1557 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", in bnxt_tpa_start()
1560 bnxt_sched_reset_rxr(bp, rxr); in bnxt_tpa_start()
1569 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_tpa_start()
1587 else if (!BNXT_CHIP_P4_PLUS(bp) && in bnxt_tpa_start()
1595 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); in bnxt_tpa_start()
1606 cons = RING_RX(bp, NEXT_RX(cons)); in bnxt_tpa_start()
1607 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); in bnxt_tpa_start()
1788 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, in bnxt_gro_skb() argument
1806 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_gro_skb()
1810 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); in bnxt_gro_skb()
1820 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) in bnxt_get_pkt_dev() argument
1822 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); in bnxt_get_pkt_dev()
1825 return dev ? dev : bp->dev; in bnxt_get_pkt_dev()
1828 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, in bnxt_tpa_end() argument
1837 struct net_device *dev = bp->dev; in bnxt_tpa_end()
1848 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); in bnxt_tpa_end()
1855 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_tpa_end()
1861 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", in bnxt_tpa_end()
1869 gro = !!(bp->flags & BNXT_FLAG_GRO); in bnxt_tpa_end()
1876 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) in bnxt_tpa_end()
1893 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", in bnxt_tpa_end()
1898 if (len <= bp->rx_copybreak) { in bnxt_tpa_end()
1909 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, in bnxt_tpa_end()
1918 tpa_info->data_ptr = new_data + bp->rx_offset; in bnxt_tpa_end()
1921 skb = napi_build_skb(data, bp->rx_buf_size); in bnxt_tpa_end()
1922 dma_sync_single_for_cpu(&bp->pdev->dev, mapping, in bnxt_tpa_end()
1923 bp->rx_buf_use_size, bp->rx_dir); in bnxt_tpa_end()
1932 skb_reserve(skb, bp->rx_offset); in bnxt_tpa_end()
1937 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs, in bnxt_tpa_end()
1947 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); in bnxt_tpa_end()
1975 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); in bnxt_tpa_end()
1980 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, in bnxt_tpa_agg() argument
1992 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, in bnxt_deliver_skb() argument
1997 if (skb->dev != bp->dev) { in bnxt_deliver_skb()
1999 bnxt_vf_rep_rx(bp, skb); in bnxt_deliver_skb()
2006 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, in bnxt_rx_ts_valid() argument
2013 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) in bnxt_rx_ts_valid()
2063 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, in bnxt_rss_ext_op() argument
2068 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); in bnxt_rss_ext_op()
2087 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in bnxt_rx_pkt() argument
2092 struct net_device *dev = bp->dev; in bnxt_rx_pkt()
2116 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); in bnxt_rx_pkt()
2136 bnxt_tpa_start(bp, rxr, cmp_type, in bnxt_rx_pkt()
2144 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, in bnxt_rx_pkt()
2153 bnxt_deliver_skb(bp, bnapi, skb); in bnxt_rx_pkt()
2162 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); in bnxt_rx_pkt()
2166 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", in bnxt_rx_pkt()
2168 bnxt_sched_reset_rxr(bp, rxr); in bnxt_rx_pkt()
2182 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) in bnxt_rx_pkt()
2202 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_rx_pkt()
2203 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { in bnxt_rx_pkt()
2204 netdev_warn_once(bp->dev, "RX buffer error %x\n", in bnxt_rx_pkt()
2206 bnxt_sched_reset_rxr(bp, rxr); in bnxt_rx_pkt()
2216 if (bnxt_xdp_attached(bp, rxr)) { in bnxt_rx_pkt()
2217 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); in bnxt_rx_pkt()
2219 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp, in bnxt_rx_pkt()
2231 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { in bnxt_rx_pkt()
2243 if (len <= bp->rx_copybreak) { in bnxt_rx_pkt()
2266 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, in bnxt_rx_pkt()
2274 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons, in bnxt_rx_pkt()
2279 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, in bnxt_rx_pkt()
2293 type = bnxt_rss_ext_op(bp, rxcmp); in bnxt_rx_pkt()
2307 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); in bnxt_rx_pkt()
2329 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { in bnxt_rx_pkt()
2330 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_rx_pkt()
2333 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { in bnxt_rx_pkt()
2334 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_rx_pkt()
2343 bnxt_deliver_skb(bp, bnapi, skb); in bnxt_rx_pkt()
2352 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); in bnxt_rx_pkt()
2368 static int bnxt_force_rx_discard(struct bnxt *bp, in bnxt_force_rx_discard() argument
2407 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); in bnxt_force_rx_discard()
2413 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) in bnxt_fw_health_readl() argument
2415 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_health_readl()
2423 pci_read_config_dword(bp->pdev, reg_off, &val); in bnxt_fw_health_readl()
2429 val = readl(bp->bar0 + reg_off); in bnxt_fw_health_readl()
2432 val = readl(bp->bar1 + reg_off); in bnxt_fw_health_readl()
2440 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) in bnxt_agg_ring_id_to_grp_idx() argument
2444 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_agg_ring_id_to_grp_idx()
2445 u16 grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_agg_ring_id_to_grp_idx()
2448 grp_info = &bp->grp_info[grp_idx]; in bnxt_agg_ring_id_to_grp_idx()
2457 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_get_force_speed() local
2459 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) in bnxt_get_force_speed()
2468 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_set_force_speed() local
2470 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_set_force_speed()
2500 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_set_auto_speed() local
2502 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_set_auto_speed()
2512 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_force_speed_updated() local
2514 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_force_speed_updated()
2530 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_auto_speed_updated() local
2532 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_auto_speed_updated()
2543 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type) in bnxt_bs_trace_avail() argument
2545 u32 flags = bp->ctx->ctx_arr[type].flags; in bnxt_bs_trace_avail()
2552 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm) in bnxt_bs_trace_init() argument
2572 bs_trace = &bp->bs_trace[trace_type]; in bnxt_bs_trace_init()
2615 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) in bnxt_event_error_report() argument
2621 …netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix … in bnxt_event_error_report()
2625 netdev_warn(bp->dev, "Pause Storm detected!\n"); in bnxt_event_error_report()
2628 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); in bnxt_event_error_report()
2650 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); in bnxt_event_error_report()
2659 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", in bnxt_event_error_report()
2661 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", in bnxt_event_error_report()
2665 bp->thermal_threshold_type = type; in bnxt_event_error_report()
2666 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); in bnxt_event_error_report()
2672 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); in bnxt_event_error_report()
2675 netdev_err(bp->dev, "FW reported unknown error type %u\n", in bnxt_event_error_report()
2704 static int bnxt_async_event_process(struct bnxt *bp, in bnxt_async_event_process() argument
2711 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", in bnxt_async_event_process()
2717 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_async_event_process()
2719 if (BNXT_VF(bp)) in bnxt_async_event_process()
2729 netdev_warn(bp->dev, "Link speed %d no longer supported\n", in bnxt_async_event_process()
2732 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2737 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2740 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2743 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2748 if (BNXT_VF(bp)) in bnxt_async_event_process()
2751 if (bp->pf.port_id != port_id) in bnxt_async_event_process()
2754 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2758 if (BNXT_PF(bp)) in bnxt_async_event_process()
2760 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2765 if (!bp->fw_health) in bnxt_async_event_process()
2768 bp->fw_reset_timestamp = jiffies; in bnxt_async_event_process()
2769 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; in bnxt_async_event_process()
2770 if (!bp->fw_reset_min_dsecs) in bnxt_async_event_process()
2771 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; in bnxt_async_event_process()
2772 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); in bnxt_async_event_process()
2773 if (!bp->fw_reset_max_dsecs) in bnxt_async_event_process()
2774 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; in bnxt_async_event_process()
2776 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); in bnxt_async_event_process()
2779 bp->fw_health->fatalities++; in bnxt_async_event_process()
2780 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_async_event_process()
2784 bp->fw_health->survivals++; in bnxt_async_event_process()
2785 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); in bnxt_async_event_process()
2787 netif_warn(bp, hw, bp->dev, in bnxt_async_event_process()
2790 bp->fw_reset_min_dsecs * 100, in bnxt_async_event_process()
2791 bp->fw_reset_max_dsecs * 100); in bnxt_async_event_process()
2792 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2796 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_async_event_process()
2805 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); in bnxt_async_event_process()
2811 bp->current_interval * 10); in bnxt_async_event_process()
2815 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); in bnxt_async_event_process()
2817 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); in bnxt_async_event_process()
2818 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); in bnxt_async_event_process()
2821 netif_info(bp, drv, bp->dev, in bnxt_async_event_process()
2835 netif_notice(bp, hw, bp->dev, in bnxt_async_event_process()
2843 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_async_event_process()
2846 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", in bnxt_async_event_process()
2851 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); in bnxt_async_event_process()
2853 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", in bnxt_async_event_process()
2857 rxr = bp->bnapi[grp_idx]->rx_ring; in bnxt_async_event_process()
2858 bnxt_sched_reset_rxr(bp, rxr); in bnxt_async_event_process()
2862 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_async_event_process()
2864 netif_notice(bp, hw, bp->dev, in bnxt_async_event_process()
2870 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2876 bnxt_ptp_pps_event(bp, data1, data2); in bnxt_async_event_process()
2880 if (bnxt_event_error_report(bp, data1, data2)) in bnxt_async_event_process()
2887 if (BNXT_PTP_USE_RTC(bp)) { in bnxt_async_event_process()
2888 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_async_event_process()
2895 bnxt_ptp_update_current_time(bp); in bnxt_async_event_process()
2909 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); in bnxt_async_event_process()
2916 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); in bnxt_async_event_process()
2922 __bnxt_queue_sp_work(bp); in bnxt_async_event_process()
2924 bnxt_ulp_async_events(bp, cmpl); in bnxt_async_event_process()
2928 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) in bnxt_hwrm_handler() argument
2938 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); in bnxt_hwrm_handler()
2944 if ((vf_id < bp->pf.first_vf_id) || in bnxt_hwrm_handler()
2945 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { in bnxt_hwrm_handler()
2946 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", in bnxt_hwrm_handler()
2951 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); in bnxt_hwrm_handler()
2952 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); in bnxt_hwrm_handler()
2956 bnxt_async_event_process(bp, in bnxt_hwrm_handler()
2967 static bool bnxt_vnic_is_active(struct bnxt *bp) in bnxt_vnic_is_active() argument
2969 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_vnic_is_active()
2977 struct bnxt *bp = bnapi->bp; in bnxt_msix() local
2987 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) in bnxt_has_work() argument
2998 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in __bnxt_poll_work() argument
3037 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); in __bnxt_poll_work()
3039 bp->tx_ring_mask; in __bnxt_poll_work()
3041 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { in __bnxt_poll_work()
3049 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); in __bnxt_poll_work()
3053 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); in __bnxt_poll_work()
3055 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, in __bnxt_poll_work()
3073 bnxt_hwrm_handler(bp, txcmp); in __bnxt_poll_work()
3095 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); in __bnxt_poll_work()
3104 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, in __bnxt_poll_work_done() argument
3108 bnapi->tx_int(bp, bnapi, budget); in __bnxt_poll_work_done()
3113 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in __bnxt_poll_work_done()
3119 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in __bnxt_poll_work_done()
3124 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in bnxt_poll_work() argument
3130 rx_pkts = __bnxt_poll_work(bp, cpr, budget); in bnxt_poll_work()
3136 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll_work()
3138 __bnxt_poll_work_done(bp, bnapi, budget); in bnxt_poll_work()
3145 struct bnxt *bp = bnapi->bp; in bnxt_poll_nitroa0() local
3182 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); in bnxt_poll_nitroa0()
3191 bnxt_hwrm_handler(bp, txcmp); in bnxt_poll_nitroa0()
3193 netdev_err(bp->dev, in bnxt_poll_nitroa0()
3204 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_poll_nitroa0()
3207 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_poll_nitroa0()
3211 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { in bnxt_poll_nitroa0()
3221 struct bnxt *bp = bnapi->bp; in bnxt_poll() local
3225 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { in bnxt_poll()
3230 work_done += bnxt_poll_work(bp, cpr, budget - work_done); in bnxt_poll()
3238 if (!bnxt_has_work(bp, cpr)) { in bnxt_poll()
3244 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { in bnxt_poll()
3256 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) in __bnxt_poll_cqs() argument
3265 work_done += __bnxt_poll_work(bp, cpr2, in __bnxt_poll_cqs()
3273 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, in __bnxt_poll_cqs_done() argument
3291 bnxt_writeq(bp, in __bnxt_poll_cqs_done()
3298 __bnxt_poll_work_done(bp, bnapi, budget); in __bnxt_poll_cqs_done()
3307 struct bnxt *bp = bnapi->bp; in bnxt_poll_p5() local
3312 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { in bnxt_poll_p5()
3318 work_done = __bnxt_poll_cqs(bp, bnapi, budget); in bnxt_poll_p5()
3330 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, in bnxt_poll_p5()
3359 work_done += __bnxt_poll_work(bp, cpr2, in bnxt_poll_p5()
3363 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); in bnxt_poll_p5()
3367 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); in bnxt_poll_p5()
3375 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { in bnxt_poll_p5()
3387 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, in bnxt_free_one_tx_ring_skbs() argument
3391 struct pci_dev *pdev = bp->pdev; in bnxt_free_one_tx_ring_skbs()
3393 max_idx = bp->tx_nr_pages * TX_DESC_CNT; in bnxt_free_one_tx_ring_skbs()
3400 if (idx < bp->tx_nr_rings_xdp && in bnxt_free_one_tx_ring_skbs()
3435 int ring_idx = i & bp->tx_ring_mask; in bnxt_free_one_tx_ring_skbs()
3447 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx)); in bnxt_free_one_tx_ring_skbs()
3450 static void bnxt_free_tx_skbs(struct bnxt *bp) in bnxt_free_tx_skbs() argument
3454 if (!bp->tx_ring) in bnxt_free_tx_skbs()
3457 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_skbs()
3458 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_free_tx_skbs()
3463 bnxt_free_one_tx_ring_skbs(bp, txr, i); in bnxt_free_tx_skbs()
3466 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) in bnxt_free_tx_skbs()
3467 bnxt_ptp_free_txts_skbs(bp->ptp_cfg); in bnxt_free_tx_skbs()
3470 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) in bnxt_free_one_rx_ring() argument
3474 max_idx = bp->rx_nr_pages * RX_DESC_CNT; in bnxt_free_one_rx_ring()
3484 if (BNXT_RX_PAGE_MODE(bp)) in bnxt_free_one_rx_ring()
3491 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) in bnxt_free_one_rx_agg_ring() argument
3495 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; in bnxt_free_one_rx_agg_ring()
3511 static void bnxt_free_one_tpa_info_data(struct bnxt *bp, in bnxt_free_one_tpa_info_data() argument
3516 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_tpa_info_data()
3528 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, in bnxt_free_one_rx_ring_skbs() argument
3536 bnxt_free_one_tpa_info_data(bp, rxr); in bnxt_free_one_rx_ring_skbs()
3542 bnxt_free_one_rx_ring(bp, rxr); in bnxt_free_one_rx_ring_skbs()
3548 bnxt_free_one_rx_agg_ring(bp, rxr); in bnxt_free_one_rx_ring_skbs()
3556 static void bnxt_free_rx_skbs(struct bnxt *bp) in bnxt_free_rx_skbs() argument
3560 if (!bp->rx_ring) in bnxt_free_rx_skbs()
3563 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_free_rx_skbs()
3564 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); in bnxt_free_rx_skbs()
3567 static void bnxt_free_skbs(struct bnxt *bp) in bnxt_free_skbs() argument
3569 bnxt_free_tx_skbs(bp); in bnxt_free_skbs()
3570 bnxt_free_rx_skbs(bp); in bnxt_free_skbs()
3590 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, in __bnxt_copy_ring() argument
3619 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) in bnxt_free_ring() argument
3621 struct pci_dev *pdev = bp->pdev; in bnxt_free_ring()
3652 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) in bnxt_alloc_ring() argument
3654 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_ring()
3705 static void bnxt_free_one_tpa_info(struct bnxt *bp, in bnxt_free_one_tpa_info() argument
3713 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_tpa_info()
3722 static void bnxt_free_tpa_info(struct bnxt *bp) in bnxt_free_tpa_info() argument
3726 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_tpa_info()
3727 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_free_tpa_info()
3729 bnxt_free_one_tpa_info(bp, rxr); in bnxt_free_tpa_info()
3733 static int bnxt_alloc_one_tpa_info(struct bnxt *bp, in bnxt_alloc_one_tpa_info() argument
3739 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), in bnxt_alloc_one_tpa_info()
3744 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_one_tpa_info()
3746 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_tpa_info()
3760 static int bnxt_alloc_tpa_info(struct bnxt *bp) in bnxt_alloc_tpa_info() argument
3764 bp->max_tpa = MAX_TPA; in bnxt_alloc_tpa_info()
3765 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_tpa_info()
3766 if (!bp->max_tpa_v2) in bnxt_alloc_tpa_info()
3768 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); in bnxt_alloc_tpa_info()
3771 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_tpa_info()
3772 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_tpa_info()
3774 rc = bnxt_alloc_one_tpa_info(bp, rxr); in bnxt_alloc_tpa_info()
3781 static void bnxt_free_rx_rings(struct bnxt *bp) in bnxt_free_rx_rings() argument
3785 if (!bp->rx_ring) in bnxt_free_rx_rings()
3788 bnxt_free_tpa_info(bp); in bnxt_free_rx_rings()
3789 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_rings()
3790 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_free_rx_rings()
3808 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_rx_rings()
3811 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_rx_rings()
3815 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, in bnxt_alloc_rx_page_pool() argument
3824 pp.pool_size = bp->rx_agg_ring_size / agg_size_fac; in bnxt_alloc_rx_page_pool()
3825 if (BNXT_RX_PAGE_MODE(bp)) in bnxt_alloc_rx_page_pool()
3826 pp.pool_size += bp->rx_ring_size / rx_size_fac; in bnxt_alloc_rx_page_pool()
3828 pp.netdev = bp->dev; in bnxt_alloc_rx_page_pool()
3829 pp.dev = &bp->pdev->dev; in bnxt_alloc_rx_page_pool()
3830 pp.dma_dir = bp->rx_dir; in bnxt_alloc_rx_page_pool()
3843 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024); in bnxt_alloc_rx_page_pool()
3865 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) in bnxt_alloc_rx_agg_bmap() argument
3869 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; in bnxt_alloc_rx_agg_bmap()
3878 static int bnxt_alloc_rx_rings(struct bnxt *bp) in bnxt_alloc_rx_rings() argument
3880 int numa_node = dev_to_node(&bp->pdev->dev); in bnxt_alloc_rx_rings()
3883 if (!bp->rx_ring) in bnxt_alloc_rx_rings()
3886 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_alloc_rx_rings()
3889 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rx_rings()
3890 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_rx_rings()
3898 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", in bnxt_alloc_rx_rings()
3900 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); in bnxt_alloc_rx_rings()
3905 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); in bnxt_alloc_rx_rings()
3917 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_rx_rings()
3924 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_rx_rings()
3929 rc = bnxt_alloc_rx_agg_bmap(bp, rxr); in bnxt_alloc_rx_rings()
3934 if (bp->flags & BNXT_FLAG_TPA) in bnxt_alloc_rx_rings()
3935 rc = bnxt_alloc_tpa_info(bp); in bnxt_alloc_rx_rings()
3939 static void bnxt_free_tx_rings(struct bnxt *bp) in bnxt_free_tx_rings() argument
3942 struct pci_dev *pdev = bp->pdev; in bnxt_free_tx_rings()
3944 if (!bp->tx_ring) in bnxt_free_tx_rings()
3947 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_rings()
3948 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_free_tx_rings()
3952 dma_free_coherent(&pdev->dev, bp->tx_push_size, in bnxt_free_tx_rings()
3959 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_tx_rings()
3963 #define BNXT_TC_TO_RING_BASE(bp, tc) \ argument
3964 ((tc) * (bp)->tx_nr_rings_per_tc)
3966 #define BNXT_RING_TO_TC_OFF(bp, tx) \ argument
3967 ((tx) % (bp)->tx_nr_rings_per_tc)
3969 #define BNXT_RING_TO_TC(bp, tx) \ argument
3970 ((tx) / (bp)->tx_nr_rings_per_tc)
3972 static int bnxt_alloc_tx_rings(struct bnxt *bp) in bnxt_alloc_tx_rings() argument
3975 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_tx_rings()
3977 bp->tx_push_size = 0; in bnxt_alloc_tx_rings()
3978 if (bp->tx_push_thresh) { in bnxt_alloc_tx_rings()
3982 bp->tx_push_thresh); in bnxt_alloc_tx_rings()
3986 bp->tx_push_thresh = 0; in bnxt_alloc_tx_rings()
3989 bp->tx_push_size = push_size; in bnxt_alloc_tx_rings()
3992 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_tx_rings()
3993 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_alloc_tx_rings()
3999 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_tx_rings()
4004 if (bp->tx_push_size) { in bnxt_alloc_tx_rings()
4011 bp->tx_push_size, in bnxt_alloc_tx_rings()
4022 qidx = bp->tc_to_qidx[j]; in bnxt_alloc_tx_rings()
4023 ring->queue_id = bp->q_info[qidx].queue_id; in bnxt_alloc_tx_rings()
4025 if (i < bp->tx_nr_rings_xdp) in bnxt_alloc_tx_rings()
4027 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) in bnxt_alloc_tx_rings()
4057 static void bnxt_free_all_cp_arrays(struct bnxt *bp) in bnxt_free_all_cp_arrays() argument
4061 if (!bp->bnapi) in bnxt_free_all_cp_arrays()
4063 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_all_cp_arrays()
4064 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_all_cp_arrays()
4072 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) in bnxt_alloc_all_cp_arrays() argument
4074 int i, n = bp->cp_nr_pages; in bnxt_alloc_all_cp_arrays()
4076 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_all_cp_arrays()
4077 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_all_cp_arrays()
4089 static void bnxt_free_cp_rings(struct bnxt *bp) in bnxt_free_cp_rings() argument
4093 if (!bp->bnapi) in bnxt_free_cp_rings()
4096 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_cp_rings()
4097 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_cp_rings()
4108 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_cp_rings()
4117 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_cp_rings()
4126 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, in bnxt_alloc_cp_sub_ring() argument
4133 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); in bnxt_alloc_cp_sub_ring()
4140 rmem->nr_pages = bp->cp_nr_pages; in bnxt_alloc_cp_sub_ring()
4145 rc = bnxt_alloc_ring(bp, rmem); in bnxt_alloc_cp_sub_ring()
4147 bnxt_free_ring(bp, rmem); in bnxt_alloc_cp_sub_ring()
4153 static int bnxt_alloc_cp_rings(struct bnxt *bp) in bnxt_alloc_cp_rings() argument
4155 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); in bnxt_alloc_cp_rings()
4157 int tcs = bp->num_tc; in bnxt_alloc_cp_rings()
4161 ulp_msix = bnxt_get_ulp_msix_num(bp); in bnxt_alloc_cp_rings()
4162 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_cp_rings()
4163 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_cp_rings()
4176 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_cp_rings()
4182 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_cp_rings()
4185 if (i < bp->rx_nr_rings) { in bnxt_alloc_cp_rings()
4189 if (i < bp->tx_nr_rings_xdp) { in bnxt_alloc_cp_rings()
4192 } else if ((sh && i < bp->tx_nr_rings) || in bnxt_alloc_cp_rings()
4193 (!sh && i >= bp->rx_nr_rings)) { in bnxt_alloc_cp_rings()
4206 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); in bnxt_alloc_cp_rings()
4213 bp->rx_ring[i].rx_cpr = cpr2; in bnxt_alloc_cp_rings()
4218 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; in bnxt_alloc_cp_rings()
4219 bp->tx_ring[n].tx_cpr = cpr2; in bnxt_alloc_cp_rings()
4229 static void bnxt_init_rx_ring_struct(struct bnxt *bp, in bnxt_init_rx_ring_struct() argument
4237 rmem->nr_pages = bp->rx_nr_pages; in bnxt_init_rx_ring_struct()
4241 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; in bnxt_init_rx_ring_struct()
4246 rmem->nr_pages = bp->rx_agg_nr_pages; in bnxt_init_rx_ring_struct()
4250 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; in bnxt_init_rx_ring_struct()
4254 static void bnxt_reset_rx_ring_struct(struct bnxt *bp, in bnxt_reset_rx_ring_struct() argument
4288 static void bnxt_init_ring_struct(struct bnxt *bp) in bnxt_init_ring_struct() argument
4292 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_struct()
4293 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_init_ring_struct()
4306 rmem->nr_pages = bp->cp_nr_pages; in bnxt_init_ring_struct()
4318 rmem->nr_pages = bp->rx_nr_pages; in bnxt_init_ring_struct()
4322 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; in bnxt_init_ring_struct()
4327 rmem->nr_pages = bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
4331 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
4338 rmem->nr_pages = bp->tx_nr_pages; in bnxt_init_ring_struct()
4342 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; in bnxt_init_ring_struct()
4370 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, in bnxt_alloc_one_rx_ring_skb() argument
4378 for (i = 0; i < bp->rx_ring_size; i++) { in bnxt_alloc_one_rx_ring_skb()
4379 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { in bnxt_alloc_one_rx_ring_skb()
4380 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", in bnxt_alloc_one_rx_ring_skb()
4381 ring_nr, i, bp->rx_ring_size); in bnxt_alloc_one_rx_ring_skb()
4389 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp, in bnxt_alloc_one_rx_ring_netmem() argument
4397 for (i = 0; i < bp->rx_agg_ring_size; i++) { in bnxt_alloc_one_rx_ring_netmem()
4398 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) { in bnxt_alloc_one_rx_ring_netmem()
4399 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", in bnxt_alloc_one_rx_ring_netmem()
4400 ring_nr, i, bp->rx_agg_ring_size); in bnxt_alloc_one_rx_ring_netmem()
4408 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, in bnxt_alloc_one_tpa_info_data() argument
4415 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_tpa_info_data()
4416 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, in bnxt_alloc_one_tpa_info_data()
4422 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; in bnxt_alloc_one_tpa_info_data()
4429 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) in bnxt_alloc_one_rx_ring() argument
4431 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; in bnxt_alloc_one_rx_ring()
4434 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); in bnxt_alloc_one_rx_ring()
4436 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) in bnxt_alloc_one_rx_ring()
4439 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr); in bnxt_alloc_one_rx_ring()
4442 rc = bnxt_alloc_one_tpa_info_data(bp, rxr); in bnxt_alloc_one_rx_ring()
4449 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, in bnxt_init_one_rx_ring_rxbd() argument
4455 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | in bnxt_init_one_rx_ring_rxbd()
4466 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, in bnxt_init_one_rx_agg_ring_rxbd() argument
4474 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { in bnxt_init_one_rx_agg_ring_rxbd()
4482 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) in bnxt_init_one_rx_ring() argument
4486 rxr = &bp->rx_ring[ring_nr]; in bnxt_init_one_rx_ring()
4487 bnxt_init_one_rx_ring_rxbd(bp, rxr); in bnxt_init_one_rx_ring()
4489 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, in bnxt_init_one_rx_ring()
4492 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { in bnxt_init_one_rx_ring()
4493 bpf_prog_add(bp->xdp_prog, 1); in bnxt_init_one_rx_ring()
4494 rxr->xdp_prog = bp->xdp_prog; in bnxt_init_one_rx_ring()
4497 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); in bnxt_init_one_rx_ring()
4499 return bnxt_alloc_one_rx_ring(bp, ring_nr); in bnxt_init_one_rx_ring()
4502 static void bnxt_init_cp_rings(struct bnxt *bp) in bnxt_init_cp_rings() argument
4506 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_cp_rings()
4507 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; in bnxt_init_cp_rings()
4511 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; in bnxt_init_cp_rings()
4512 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; in bnxt_init_cp_rings()
4520 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; in bnxt_init_cp_rings()
4521 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; in bnxt_init_cp_rings()
4526 static int bnxt_init_rx_rings(struct bnxt *bp) in bnxt_init_rx_rings() argument
4530 if (BNXT_RX_PAGE_MODE(bp)) { in bnxt_init_rx_rings()
4531 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; in bnxt_init_rx_rings()
4532 bp->rx_dma_offset = XDP_PACKET_HEADROOM; in bnxt_init_rx_rings()
4534 bp->rx_offset = BNXT_RX_OFFSET; in bnxt_init_rx_rings()
4535 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; in bnxt_init_rx_rings()
4538 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_init_rx_rings()
4539 rc = bnxt_init_one_rx_ring(bp, i); in bnxt_init_rx_rings()
4547 static int bnxt_init_tx_rings(struct bnxt *bp) in bnxt_init_tx_rings() argument
4551 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, in bnxt_init_tx_rings()
4554 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_init_tx_rings()
4555 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_init_tx_rings()
4560 if (i >= bp->tx_nr_rings_xdp) in bnxt_init_tx_rings()
4561 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, in bnxt_init_tx_rings()
4569 static void bnxt_free_ring_grps(struct bnxt *bp) in bnxt_free_ring_grps() argument
4571 kfree(bp->grp_info); in bnxt_free_ring_grps()
4572 bp->grp_info = NULL; in bnxt_free_ring_grps()
4575 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) in bnxt_init_ring_grps() argument
4580 bp->grp_info = kcalloc(bp->cp_nr_rings, in bnxt_init_ring_grps()
4583 if (!bp->grp_info) in bnxt_init_ring_grps()
4586 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_grps()
4588 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4589 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4590 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4591 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4592 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4597 static void bnxt_free_vnics(struct bnxt *bp) in bnxt_free_vnics() argument
4599 kfree(bp->vnic_info); in bnxt_free_vnics()
4600 bp->vnic_info = NULL; in bnxt_free_vnics()
4601 bp->nr_vnics = 0; in bnxt_free_vnics()
4604 static int bnxt_alloc_vnics(struct bnxt *bp) in bnxt_alloc_vnics() argument
4609 if (bp->flags & BNXT_FLAG_RFS) { in bnxt_alloc_vnics()
4610 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) in bnxt_alloc_vnics()
4612 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_vnics()
4613 num_vnics += bp->rx_nr_rings; in bnxt_alloc_vnics()
4617 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_alloc_vnics()
4620 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), in bnxt_alloc_vnics()
4622 if (!bp->vnic_info) in bnxt_alloc_vnics()
4625 bp->nr_vnics = num_vnics; in bnxt_alloc_vnics()
4629 static void bnxt_init_vnics(struct bnxt *bp) in bnxt_init_vnics() argument
4631 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_init_vnics()
4634 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_init_vnics()
4635 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_init_vnics()
4645 if (bp->vnic_info[i].rss_hash_key) { in bnxt_init_vnics()
4650 if (!bp->rss_hash_key_valid && in bnxt_init_vnics()
4651 !bp->rss_hash_key_updated) { in bnxt_init_vnics()
4652 get_random_bytes(bp->rss_hash_key, in bnxt_init_vnics()
4654 bp->rss_hash_key_updated = true; in bnxt_init_vnics()
4657 memcpy(vnic->rss_hash_key, bp->rss_hash_key, in bnxt_init_vnics()
4660 if (!bp->rss_hash_key_updated) in bnxt_init_vnics()
4663 bp->rss_hash_key_updated = false; in bnxt_init_vnics()
4664 bp->rss_hash_key_valid = true; in bnxt_init_vnics()
4666 bp->toeplitz_prefix = 0; in bnxt_init_vnics()
4668 bp->toeplitz_prefix <<= 8; in bnxt_init_vnics()
4669 bp->toeplitz_prefix |= key[k]; in bnxt_init_vnics()
4696 void bnxt_set_tpa_flags(struct bnxt *bp) in bnxt_set_tpa_flags() argument
4698 bp->flags &= ~BNXT_FLAG_TPA; in bnxt_set_tpa_flags()
4699 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) in bnxt_set_tpa_flags()
4701 if (bp->dev->features & NETIF_F_LRO) in bnxt_set_tpa_flags()
4702 bp->flags |= BNXT_FLAG_LRO; in bnxt_set_tpa_flags()
4703 else if (bp->dev->features & NETIF_F_GRO_HW) in bnxt_set_tpa_flags()
4704 bp->flags |= BNXT_FLAG_GRO; in bnxt_set_tpa_flags()
4707 static void bnxt_init_ring_params(struct bnxt *bp) in bnxt_init_ring_params() argument
4711 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; in bnxt_init_ring_params()
4715 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size); in bnxt_init_ring_params()
4718 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4721 void bnxt_set_ring_params(struct bnxt *bp) in bnxt_set_ring_params() argument
4727 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); in bnxt_set_ring_params()
4732 ring_size = bp->rx_ring_size; in bnxt_set_ring_params()
4733 bp->rx_agg_ring_size = 0; in bnxt_set_ring_params()
4734 bp->rx_agg_nr_pages = 0; in bnxt_set_ring_params()
4736 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) in bnxt_set_ring_params()
4739 bp->flags &= ~BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
4740 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { in bnxt_set_ring_params()
4743 bp->flags |= BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
4744 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; in bnxt_set_ring_params()
4751 … netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", in bnxt_set_ring_params()
4752 bp->rx_ring_size, ring_size); in bnxt_set_ring_params()
4753 bp->rx_ring_size = ring_size; in bnxt_set_ring_params()
4757 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, in bnxt_set_ring_params()
4759 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { in bnxt_set_ring_params()
4762 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; in bnxt_set_ring_params()
4764 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", in bnxt_set_ring_params()
4767 bp->rx_agg_ring_size = agg_ring_size; in bnxt_set_ring_params()
4768 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
4770 if (BNXT_RX_PAGE_MODE(bp)) { in bnxt_set_ring_params()
4777 bp->rx_copybreak, in bnxt_set_ring_params()
4778 bp->dev->cfg_pending->hds_thresh); in bnxt_set_ring_params()
4785 bp->rx_buf_use_size = rx_size; in bnxt_set_ring_params()
4786 bp->rx_buf_size = rx_space; in bnxt_set_ring_params()
4788 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); in bnxt_set_ring_params()
4789 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
4791 ring_size = bp->tx_ring_size; in bnxt_set_ring_params()
4792 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); in bnxt_set_ring_params()
4793 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; in bnxt_set_ring_params()
4795 max_rx_cmpl = bp->rx_ring_size; in bnxt_set_ring_params()
4800 if (bp->flags & BNXT_FLAG_TPA) in bnxt_set_ring_params()
4801 max_rx_cmpl += bp->max_tpa; in bnxt_set_ring_params()
4803 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; in bnxt_set_ring_params()
4804 bp->cp_ring_size = ring_size; in bnxt_set_ring_params()
4806 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); in bnxt_set_ring_params()
4807 if (bp->cp_nr_pages > MAX_CP_PAGES) { in bnxt_set_ring_params()
4808 bp->cp_nr_pages = MAX_CP_PAGES; in bnxt_set_ring_params()
4809 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; in bnxt_set_ring_params()
4810 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", in bnxt_set_ring_params()
4811 ring_size, bp->cp_ring_size); in bnxt_set_ring_params()
4813 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; in bnxt_set_ring_params()
4814 bp->cp_ring_mask = bp->cp_bit - 1; in bnxt_set_ring_params()
4820 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) in __bnxt_set_rx_skb_mode() argument
4822 struct net_device *dev = bp->dev; in __bnxt_set_rx_skb_mode()
4825 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); in __bnxt_set_rx_skb_mode()
4826 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; in __bnxt_set_rx_skb_mode()
4828 if (bp->xdp_prog->aux->xdp_has_frags) in __bnxt_set_rx_skb_mode()
4829 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); in __bnxt_set_rx_skb_mode()
4832 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); in __bnxt_set_rx_skb_mode()
4834 bp->flags |= BNXT_FLAG_JUMBO; in __bnxt_set_rx_skb_mode()
4835 bp->rx_skb_func = bnxt_rx_multi_page_skb; in __bnxt_set_rx_skb_mode()
4837 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in __bnxt_set_rx_skb_mode()
4838 bp->rx_skb_func = bnxt_rx_page_skb; in __bnxt_set_rx_skb_mode()
4840 bp->rx_dir = DMA_BIDIRECTIONAL; in __bnxt_set_rx_skb_mode()
4842 dev->max_mtu = bp->max_mtu; in __bnxt_set_rx_skb_mode()
4843 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; in __bnxt_set_rx_skb_mode()
4844 bp->rx_dir = DMA_FROM_DEVICE; in __bnxt_set_rx_skb_mode()
4845 bp->rx_skb_func = bnxt_rx_skb; in __bnxt_set_rx_skb_mode()
4849 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) in bnxt_set_rx_skb_mode() argument
4851 __bnxt_set_rx_skb_mode(bp, page_mode); in bnxt_set_rx_skb_mode()
4856 bnxt_get_max_rings(bp, &rx, &tx, true); in bnxt_set_rx_skb_mode()
4858 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; in bnxt_set_rx_skb_mode()
4859 bp->dev->hw_features |= NETIF_F_LRO; in bnxt_set_rx_skb_mode()
4864 netdev_update_features(bp->dev); in bnxt_set_rx_skb_mode()
4867 static void bnxt_free_vnic_attributes(struct bnxt *bp) in bnxt_free_vnic_attributes() argument
4871 struct pci_dev *pdev = bp->pdev; in bnxt_free_vnic_attributes()
4873 if (!bp->vnic_info) in bnxt_free_vnic_attributes()
4876 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_free_vnic_attributes()
4877 vnic = &bp->vnic_info[i]; in bnxt_free_vnic_attributes()
4903 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) in bnxt_alloc_vnic_attributes() argument
4907 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_vnic_attributes()
4910 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_alloc_vnic_attributes()
4911 vnic = &bp->vnic_info[i]; in bnxt_alloc_vnic_attributes()
4938 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_vnic_attributes()
4942 max_rings = bp->rx_nr_rings; in bnxt_alloc_vnic_attributes()
4952 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && in bnxt_alloc_vnic_attributes()
4958 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_vnic_attributes()
4980 static void bnxt_free_hwrm_resources(struct bnxt *bp) in bnxt_free_hwrm_resources() argument
4984 dma_pool_destroy(bp->hwrm_dma_pool); in bnxt_free_hwrm_resources()
4985 bp->hwrm_dma_pool = NULL; in bnxt_free_hwrm_resources()
4988 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) in bnxt_free_hwrm_resources()
4993 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) in bnxt_alloc_hwrm_resources() argument
4995 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, in bnxt_alloc_hwrm_resources()
4998 if (!bp->hwrm_dma_pool) in bnxt_alloc_hwrm_resources()
5001 INIT_HLIST_HEAD(&bp->hwrm_pending_list); in bnxt_alloc_hwrm_resources()
5006 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) in bnxt_free_stats_mem() argument
5013 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, in bnxt_free_stats_mem()
5019 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, in bnxt_alloc_stats_mem() argument
5022 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, in bnxt_alloc_stats_mem()
5039 bnxt_free_stats_mem(bp, stats); in bnxt_alloc_stats_mem()
5059 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, in bnxt_hwrm_func_qstat_ext() argument
5067 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || in bnxt_hwrm_func_qstat_ext()
5068 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_func_qstat_ext()
5071 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); in bnxt_hwrm_func_qstat_ext()
5078 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_qstat_ext()
5079 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_qstat_ext()
5084 hwrm_req_drop(bp, req); in bnxt_hwrm_func_qstat_ext()
5088 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5089 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5091 static void bnxt_init_stats(struct bnxt *bp) in bnxt_init_stats() argument
5093 struct bnxt_napi *bnapi = bp->bnapi[0]; in bnxt_init_stats()
5104 rc = bnxt_hwrm_func_qstat_ext(bp, stats); in bnxt_init_stats()
5106 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_init_stats()
5112 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_init_stats()
5113 stats = &bp->port_stats; in bnxt_init_stats()
5122 rc = bnxt_hwrm_port_qstats(bp, flags); in bnxt_init_stats()
5131 bnxt_hwrm_port_qstats(bp, 0); in bnxt_init_stats()
5134 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { in bnxt_init_stats()
5135 stats = &bp->rx_port_stats_ext; in bnxt_init_stats()
5139 stats = &bp->tx_port_stats_ext; in bnxt_init_stats()
5145 rc = bnxt_hwrm_port_qstats_ext(bp, flags); in bnxt_init_stats()
5157 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_init_stats()
5162 static void bnxt_free_port_stats(struct bnxt *bp) in bnxt_free_port_stats() argument
5164 bp->flags &= ~BNXT_FLAG_PORT_STATS; in bnxt_free_port_stats()
5165 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; in bnxt_free_port_stats()
5167 bnxt_free_stats_mem(bp, &bp->port_stats); in bnxt_free_port_stats()
5168 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); in bnxt_free_port_stats()
5169 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); in bnxt_free_port_stats()
5172 static void bnxt_free_ring_stats(struct bnxt *bp) in bnxt_free_ring_stats() argument
5176 if (!bp->bnapi) in bnxt_free_ring_stats()
5179 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_ring_stats()
5180 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_ring_stats()
5183 bnxt_free_stats_mem(bp, &cpr->stats); in bnxt_free_ring_stats()
5190 static int bnxt_alloc_stats(struct bnxt *bp) in bnxt_alloc_stats() argument
5195 size = bp->hw_ring_stats_size; in bnxt_alloc_stats()
5197 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_stats()
5198 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_stats()
5206 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); in bnxt_alloc_stats()
5213 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) in bnxt_alloc_stats()
5216 if (bp->port_stats.hw_stats) in bnxt_alloc_stats()
5219 bp->port_stats.len = BNXT_PORT_STATS_SIZE; in bnxt_alloc_stats()
5220 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); in bnxt_alloc_stats()
5224 bp->flags |= BNXT_FLAG_PORT_STATS; in bnxt_alloc_stats()
5228 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) in bnxt_alloc_stats()
5229 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) in bnxt_alloc_stats()
5232 if (bp->rx_port_stats_ext.hw_stats) in bnxt_alloc_stats()
5235 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); in bnxt_alloc_stats()
5236 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); in bnxt_alloc_stats()
5242 if (bp->tx_port_stats_ext.hw_stats) in bnxt_alloc_stats()
5245 if (bp->hwrm_spec_code >= 0x10902 || in bnxt_alloc_stats()
5246 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { in bnxt_alloc_stats()
5247 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); in bnxt_alloc_stats()
5248 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); in bnxt_alloc_stats()
5253 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; in bnxt_alloc_stats()
5257 static void bnxt_clear_ring_indices(struct bnxt *bp) in bnxt_clear_ring_indices() argument
5261 if (!bp->bnapi) in bnxt_clear_ring_indices()
5264 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_clear_ring_indices()
5265 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_clear_ring_indices()
5293 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_insert_usr_fltr() argument
5300 list_add_tail(&fltr->list, &bp->usr_fltr_list); in bnxt_insert_usr_fltr()
5303 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_del_one_usr_fltr() argument
5309 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) in bnxt_clear_usr_fltrs() argument
5313 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { in bnxt_clear_usr_fltrs()
5316 bnxt_del_one_usr_fltr(bp, usr_fltr); in bnxt_clear_usr_fltrs()
5320 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_del_fltr() argument
5323 bnxt_del_one_usr_fltr(bp, fltr); in bnxt_del_fltr()
5325 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); in bnxt_del_fltr()
5326 bp->ntp_fltr_count--; in bnxt_del_fltr()
5331 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) in bnxt_free_ntp_fltrs() argument
5335 netdev_assert_locked_or_invisible(bp->dev); in bnxt_free_ntp_fltrs()
5345 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_free_ntp_fltrs()
5347 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_free_ntp_fltrs()
5351 bnxt_del_fltr(bp, &fltr->base); in bnxt_free_ntp_fltrs()
5357 bitmap_free(bp->ntp_fltr_bmap); in bnxt_free_ntp_fltrs()
5358 bp->ntp_fltr_bmap = NULL; in bnxt_free_ntp_fltrs()
5359 bp->ntp_fltr_count = 0; in bnxt_free_ntp_fltrs()
5362 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) in bnxt_alloc_ntp_fltrs() argument
5366 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) in bnxt_alloc_ntp_fltrs()
5370 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); in bnxt_alloc_ntp_fltrs()
5372 bp->ntp_fltr_count = 0; in bnxt_alloc_ntp_fltrs()
5373 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); in bnxt_alloc_ntp_fltrs()
5375 if (!bp->ntp_fltr_bmap) in bnxt_alloc_ntp_fltrs()
5381 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) in bnxt_free_l2_filters() argument
5390 head = &bp->l2_fltr_hash_tbl[i]; in bnxt_free_l2_filters()
5395 bnxt_del_fltr(bp, &fltr->base); in bnxt_free_l2_filters()
5400 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) in bnxt_init_l2_fltr_tbl() argument
5405 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); in bnxt_init_l2_fltr_tbl()
5406 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); in bnxt_init_l2_fltr_tbl()
5409 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) in bnxt_free_mem() argument
5411 bnxt_free_vnic_attributes(bp); in bnxt_free_mem()
5412 bnxt_free_tx_rings(bp); in bnxt_free_mem()
5413 bnxt_free_rx_rings(bp); in bnxt_free_mem()
5414 bnxt_free_cp_rings(bp); in bnxt_free_mem()
5415 bnxt_free_all_cp_arrays(bp); in bnxt_free_mem()
5416 bnxt_free_ntp_fltrs(bp, false); in bnxt_free_mem()
5417 bnxt_free_l2_filters(bp, false); in bnxt_free_mem()
5419 bnxt_free_ring_stats(bp); in bnxt_free_mem()
5420 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || in bnxt_free_mem()
5421 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_free_mem()
5422 bnxt_free_port_stats(bp); in bnxt_free_mem()
5423 bnxt_free_ring_grps(bp); in bnxt_free_mem()
5424 bnxt_free_vnics(bp); in bnxt_free_mem()
5425 kfree(bp->tx_ring_map); in bnxt_free_mem()
5426 bp->tx_ring_map = NULL; in bnxt_free_mem()
5427 kfree(bp->tx_ring); in bnxt_free_mem()
5428 bp->tx_ring = NULL; in bnxt_free_mem()
5429 kfree(bp->rx_ring); in bnxt_free_mem()
5430 bp->rx_ring = NULL; in bnxt_free_mem()
5431 kfree(bp->bnapi); in bnxt_free_mem()
5432 bp->bnapi = NULL; in bnxt_free_mem()
5434 bnxt_clear_ring_indices(bp); in bnxt_free_mem()
5438 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) in bnxt_alloc_mem() argument
5448 bp->cp_nr_rings); in bnxt_alloc_mem()
5450 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); in bnxt_alloc_mem()
5454 bp->bnapi = bnapi; in bnxt_alloc_mem()
5456 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { in bnxt_alloc_mem()
5457 bp->bnapi[i] = bnapi; in bnxt_alloc_mem()
5458 bp->bnapi[i]->index = i; in bnxt_alloc_mem()
5459 bp->bnapi[i]->bp = bp; in bnxt_alloc_mem()
5460 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_mem()
5462 &bp->bnapi[i]->cp_ring; in bnxt_alloc_mem()
5469 bp->rx_ring = kcalloc(bp->rx_nr_rings, in bnxt_alloc_mem()
5472 if (!bp->rx_ring) in bnxt_alloc_mem()
5475 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_mem()
5476 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_mem()
5478 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_mem()
5484 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; in bnxt_alloc_mem()
5486 rxr->bnapi = bp->bnapi[i]; in bnxt_alloc_mem()
5487 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; in bnxt_alloc_mem()
5490 bp->tx_ring = kcalloc(bp->tx_nr_rings, in bnxt_alloc_mem()
5493 if (!bp->tx_ring) in bnxt_alloc_mem()
5496 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), in bnxt_alloc_mem()
5499 if (!bp->tx_ring_map) in bnxt_alloc_mem()
5502 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_alloc_mem()
5505 j = bp->rx_nr_rings; in bnxt_alloc_mem()
5507 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_mem()
5508 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_alloc_mem()
5511 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_mem()
5514 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; in bnxt_alloc_mem()
5515 if (i >= bp->tx_nr_rings_xdp) { in bnxt_alloc_mem()
5516 int k = j + BNXT_RING_TO_TC_OFF(bp, i); in bnxt_alloc_mem()
5518 bnapi2 = bp->bnapi[k]; in bnxt_alloc_mem()
5519 txr->txq_index = i - bp->tx_nr_rings_xdp; in bnxt_alloc_mem()
5521 BNXT_RING_TO_TC(bp, txr->txq_index); in bnxt_alloc_mem()
5525 bnapi2 = bp->bnapi[j]; in bnxt_alloc_mem()
5532 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_mem()
5536 rc = bnxt_alloc_stats(bp); in bnxt_alloc_mem()
5539 bnxt_init_stats(bp); in bnxt_alloc_mem()
5541 rc = bnxt_alloc_ntp_fltrs(bp); in bnxt_alloc_mem()
5545 rc = bnxt_alloc_vnics(bp); in bnxt_alloc_mem()
5550 rc = bnxt_alloc_all_cp_arrays(bp); in bnxt_alloc_mem()
5554 bnxt_init_ring_struct(bp); in bnxt_alloc_mem()
5556 rc = bnxt_alloc_rx_rings(bp); in bnxt_alloc_mem()
5560 rc = bnxt_alloc_tx_rings(bp); in bnxt_alloc_mem()
5564 rc = bnxt_alloc_cp_rings(bp); in bnxt_alloc_mem()
5568 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | in bnxt_alloc_mem()
5571 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) in bnxt_alloc_mem()
5572 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= in bnxt_alloc_mem()
5575 rc = bnxt_alloc_vnic_attributes(bp); in bnxt_alloc_mem()
5581 bnxt_free_mem(bp, true); in bnxt_alloc_mem()
5585 static void bnxt_disable_int(struct bnxt *bp) in bnxt_disable_int() argument
5589 if (!bp->bnapi) in bnxt_disable_int()
5592 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int()
5593 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_disable_int()
5598 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_disable_int()
5602 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) in bnxt_cp_num_to_irq_num() argument
5604 struct bnxt_napi *bnapi = bp->bnapi[n]; in bnxt_cp_num_to_irq_num()
5611 static void bnxt_disable_int_sync(struct bnxt *bp) in bnxt_disable_int_sync() argument
5615 if (!bp->irq_tbl) in bnxt_disable_int_sync()
5618 atomic_inc(&bp->intr_sem); in bnxt_disable_int_sync()
5620 bnxt_disable_int(bp); in bnxt_disable_int_sync()
5621 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int_sync()
5622 int map_idx = bnxt_cp_num_to_irq_num(bp, i); in bnxt_disable_int_sync()
5624 synchronize_irq(bp->irq_tbl[map_idx].vector); in bnxt_disable_int_sync()
5628 static void bnxt_enable_int(struct bnxt *bp) in bnxt_enable_int() argument
5632 atomic_set(&bp->intr_sem, 0); in bnxt_enable_int()
5633 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_int()
5634 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_enable_int()
5637 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_enable_int()
5641 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, in bnxt_hwrm_func_drv_rgtr() argument
5651 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); in bnxt_hwrm_func_drv_rgtr()
5661 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) in bnxt_hwrm_func_drv_rgtr()
5663 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) in bnxt_hwrm_func_drv_rgtr()
5666 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) in bnxt_hwrm_func_drv_rgtr()
5676 if (BNXT_PF(bp)) { in bnxt_hwrm_func_drv_rgtr()
5697 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) in bnxt_hwrm_func_drv_rgtr()
5706 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_hwrm_func_drv_rgtr()
5709 !bp->ptp_cfg) in bnxt_hwrm_func_drv_rgtr()
5726 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_drv_rgtr()
5727 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_drv_rgtr()
5729 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); in bnxt_hwrm_func_drv_rgtr()
5732 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; in bnxt_hwrm_func_drv_rgtr()
5734 hwrm_req_drop(bp, req); in bnxt_hwrm_func_drv_rgtr()
5738 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) in bnxt_hwrm_func_drv_unrgtr() argument
5743 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) in bnxt_hwrm_func_drv_unrgtr()
5746 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); in bnxt_hwrm_func_drv_unrgtr()
5749 return hwrm_req_send(bp, req); in bnxt_hwrm_func_drv_unrgtr()
5752 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5754 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) in bnxt_hwrm_tunnel_dst_port_free() argument
5760 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) in bnxt_hwrm_tunnel_dst_port_free()
5763 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) in bnxt_hwrm_tunnel_dst_port_free()
5766 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); in bnxt_hwrm_tunnel_dst_port_free()
5774 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5775 bp->vxlan_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5776 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5779 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5780 bp->nge_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5781 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5784 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5785 bp->vxlan_gpe_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5786 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5792 rc = hwrm_req_send(bp, req); in bnxt_hwrm_tunnel_dst_port_free()
5794 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_free()
5796 if (bp->flags & BNXT_FLAG_TPA) in bnxt_hwrm_tunnel_dst_port_free()
5797 bnxt_set_tpa(bp, true); in bnxt_hwrm_tunnel_dst_port_free()
5801 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, in bnxt_hwrm_tunnel_dst_port_alloc() argument
5808 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); in bnxt_hwrm_tunnel_dst_port_alloc()
5815 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_tunnel_dst_port_alloc()
5816 rc = hwrm_req_send(bp, req); in bnxt_hwrm_tunnel_dst_port_alloc()
5818 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_alloc()
5825 bp->vxlan_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5826 bp->vxlan_fw_dst_port_id = in bnxt_hwrm_tunnel_dst_port_alloc()
5830 bp->nge_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5831 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); in bnxt_hwrm_tunnel_dst_port_alloc()
5834 bp->vxlan_gpe_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5835 bp->vxlan_gpe_fw_dst_port_id = in bnxt_hwrm_tunnel_dst_port_alloc()
5841 if (bp->flags & BNXT_FLAG_TPA) in bnxt_hwrm_tunnel_dst_port_alloc()
5842 bnxt_set_tpa(bp, true); in bnxt_hwrm_tunnel_dst_port_alloc()
5845 hwrm_req_drop(bp, req); in bnxt_hwrm_tunnel_dst_port_alloc()
5849 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_cfa_l2_set_rx_mask() argument
5852 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_cfa_l2_set_rx_mask()
5855 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); in bnxt_hwrm_cfa_l2_set_rx_mask()
5865 return hwrm_req_send_silent(bp, req); in bnxt_hwrm_cfa_l2_set_rx_mask()
5868 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_del_l2_filter() argument
5872 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5874 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5878 bnxt_del_one_usr_fltr(bp, &fltr->base); in bnxt_del_l2_filter()
5880 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_l2_filter()
5881 bp->ntp_fltr_count--; in bnxt_del_l2_filter()
5883 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5887 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, in __bnxt_lookup_l2_filter() argument
5891 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; in __bnxt_lookup_l2_filter()
5904 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, in bnxt_lookup_l2_filter() argument
5911 fltr = __bnxt_lookup_l2_filter(bp, key, idx); in bnxt_lookup_l2_filter()
5918 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ argument
5920 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5922 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5924 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ argument
5926 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5928 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5930 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) in bnxt_get_rss_flow_tuple_len() argument
5933 if (BNXT_IPV4_4TUPLE(bp, fkeys)) in bnxt_get_rss_flow_tuple_len()
5937 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) in bnxt_get_rss_flow_tuple_len()
5942 if (BNXT_IPV6_4TUPLE(bp, fkeys)) in bnxt_get_rss_flow_tuple_len()
5946 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) in bnxt_get_rss_flow_tuple_len()
5953 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, in bnxt_toeplitz() argument
5956 u64 prefix = bp->toeplitz_prefix, hash = 0; in bnxt_toeplitz()
5962 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); in bnxt_toeplitz()
5993 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) in bnxt_lookup_l2_filter_from_key() argument
5998 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & in bnxt_lookup_l2_filter_from_key()
6000 fltr = bnxt_lookup_l2_filter(bp, key, idx); in bnxt_lookup_l2_filter_from_key()
6005 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, in bnxt_init_l2_filter() argument
6016 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, in bnxt_init_l2_filter()
6017 bp->max_fltr, 0); in bnxt_init_l2_filter()
6021 bp->ntp_fltr_count++; in bnxt_init_l2_filter()
6023 head = &bp->l2_fltr_hash_tbl[idx]; in bnxt_init_l2_filter()
6025 bnxt_insert_usr_fltr(bp, &fltr->base); in bnxt_init_l2_filter()
6031 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, in bnxt_alloc_l2_filter() argument
6039 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & in bnxt_alloc_l2_filter()
6041 fltr = bnxt_lookup_l2_filter(bp, key, idx); in bnxt_alloc_l2_filter()
6048 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_l2_filter()
6049 rc = bnxt_init_l2_filter(bp, fltr, key, idx); in bnxt_alloc_l2_filter()
6050 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_l2_filter()
6052 bnxt_del_l2_filter(bp, fltr); in bnxt_alloc_l2_filter()
6058 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, in bnxt_alloc_new_l2_filter() argument
6066 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & in bnxt_alloc_new_l2_filter()
6068 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_new_l2_filter()
6069 fltr = __bnxt_lookup_l2_filter(bp, key, idx); in bnxt_alloc_new_l2_filter()
6080 rc = bnxt_init_l2_filter(bp, fltr, key, idx); in bnxt_alloc_new_l2_filter()
6082 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_new_l2_filter()
6083 bnxt_del_l2_filter(bp, fltr); in bnxt_alloc_new_l2_filter()
6088 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_new_l2_filter()
6103 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_hwrm_l2_filter_free() argument
6110 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_l2_filter_free()
6120 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); in bnxt_hwrm_l2_filter_free()
6126 return hwrm_req_send(bp, req); in bnxt_hwrm_l2_filter_free()
6129 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) in bnxt_hwrm_l2_filter_alloc() argument
6137 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_l2_filter_alloc()
6144 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); in bnxt_hwrm_l2_filter_alloc()
6151 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_hwrm_l2_filter_alloc()
6172 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_l2_filter_alloc()
6173 rc = hwrm_req_send(bp, req); in bnxt_hwrm_l2_filter_alloc()
6178 hwrm_req_drop(bp, req); in bnxt_hwrm_l2_filter_alloc()
6182 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, in bnxt_hwrm_cfa_ntuple_filter_free() argument
6189 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); in bnxt_hwrm_cfa_ntuple_filter_free()
6194 return hwrm_req_send(bp, req); in bnxt_hwrm_cfa_ntuple_filter_free()
6224 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, in bnxt_cfg_rfs_ring_tbl_idx() argument
6235 ctx = xa_load(&bp->dev->ethtool->rss_ctx, in bnxt_cfg_rfs_ring_tbl_idx()
6245 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { in bnxt_cfg_rfs_ring_tbl_idx()
6249 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; in bnxt_cfg_rfs_ring_tbl_idx()
6263 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, in bnxt_hwrm_cfa_ntuple_filter_alloc() argument
6274 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6284 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
6285 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6287 vnic = &bp->vnic_info[fltr->base.rxq + 1]; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6321 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6322 rc = hwrm_req_send(bp, req); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6325 hwrm_req_drop(bp, req); in bnxt_hwrm_cfa_ntuple_filter_alloc()
6329 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, in bnxt_hwrm_set_vnic_filter() argument
6338 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); in bnxt_hwrm_set_vnic_filter()
6342 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; in bnxt_hwrm_set_vnic_filter()
6343 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); in bnxt_hwrm_set_vnic_filter()
6345 bnxt_del_l2_filter(bp, fltr); in bnxt_hwrm_set_vnic_filter()
6347 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; in bnxt_hwrm_set_vnic_filter()
6351 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) in bnxt_hwrm_clear_vnic_filter() argument
6357 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_clear_vnic_filter()
6362 bnxt_hwrm_l2_filter_free(bp, fltr); in bnxt_hwrm_clear_vnic_filter()
6363 bnxt_del_l2_filter(bp, fltr); in bnxt_hwrm_clear_vnic_filter()
6374 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, in bnxt_hwrm_vnic_update_tunl_tpa() argument
6379 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) in bnxt_hwrm_vnic_update_tunl_tpa()
6382 if (bp->vxlan_port) in bnxt_hwrm_vnic_update_tunl_tpa()
6384 if (bp->vxlan_gpe_port) in bnxt_hwrm_vnic_update_tunl_tpa()
6386 if (bp->nge_port) in bnxt_hwrm_vnic_update_tunl_tpa()
6393 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_hwrm_vnic_set_tpa() argument
6403 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); in bnxt_hwrm_vnic_set_tpa()
6408 u16 mss = bp->dev->mtu - 40; in bnxt_hwrm_vnic_set_tpa()
6439 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_vnic_set_tpa()
6441 max_aggs = bp->max_tpa; in bnxt_hwrm_vnic_set_tpa()
6449 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); in bnxt_hwrm_vnic_set_tpa()
6453 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_set_tpa()
6456 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) in bnxt_cp_ring_from_grp() argument
6460 grp_info = &bp->grp_info[ring->grp_idx]; in bnxt_cp_ring_from_grp()
6464 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) in bnxt_cp_ring_for_rx() argument
6466 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_cp_ring_for_rx()
6469 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); in bnxt_cp_ring_for_rx()
6472 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) in bnxt_cp_ring_for_tx() argument
6474 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_cp_ring_for_tx()
6477 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); in bnxt_cp_ring_for_tx()
6480 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) in bnxt_alloc_rss_indir_tbl() argument
6484 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_rss_indir_tbl()
6489 bp->rss_indir_tbl_entries = entries; in bnxt_alloc_rss_indir_tbl()
6490 bp->rss_indir_tbl = in bnxt_alloc_rss_indir_tbl()
6491 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); in bnxt_alloc_rss_indir_tbl()
6492 if (!bp->rss_indir_tbl) in bnxt_alloc_rss_indir_tbl()
6498 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, in bnxt_set_dflt_rss_indir_tbl() argument
6504 if (!bp->rx_nr_rings) in bnxt_set_dflt_rss_indir_tbl()
6507 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_set_dflt_rss_indir_tbl()
6508 max_rings = bp->rx_nr_rings - 1; in bnxt_set_dflt_rss_indir_tbl()
6510 max_rings = bp->rx_nr_rings; in bnxt_set_dflt_rss_indir_tbl()
6512 max_entries = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_set_dflt_rss_indir_tbl()
6516 rss_indir_tbl = &bp->rss_indir_tbl[0]; in bnxt_set_dflt_rss_indir_tbl()
6521 pad = bp->rss_indir_tbl_entries - max_entries; in bnxt_set_dflt_rss_indir_tbl()
6526 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) in bnxt_get_max_rss_ring() argument
6530 if (!bp->rss_indir_tbl) in bnxt_get_max_rss_ring()
6533 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_get_max_rss_ring()
6535 max_ring = max(max_ring, bp->rss_indir_tbl[i]); in bnxt_get_max_rss_ring()
6539 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) in bnxt_get_nr_rss_ctxs() argument
6541 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_get_nr_rss_ctxs()
6547 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_get_nr_rss_ctxs()
6552 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) in bnxt_fill_hw_rss_tbl() argument
6560 j = bp->rss_indir_tbl[i]; in bnxt_fill_hw_rss_tbl()
6565 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, in bnxt_fill_hw_rss_tbl_p5() argument
6572 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_fill_hw_rss_tbl_p5()
6578 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); in bnxt_fill_hw_rss_tbl_p5()
6582 j = bp->rss_indir_tbl[i]; in bnxt_fill_hw_rss_tbl_p5()
6583 rxr = &bp->rx_ring[j]; in bnxt_fill_hw_rss_tbl_p5()
6587 ring_id = bnxt_cp_ring_for_rx(bp, rxr); in bnxt_fill_hw_rss_tbl_p5()
6593 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, in __bnxt_hwrm_vnic_set_rss() argument
6596 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_vnic_set_rss()
6597 bnxt_fill_hw_rss_tbl_p5(bp, vnic); in __bnxt_hwrm_vnic_set_rss()
6598 if (bp->flags & BNXT_FLAG_CHIP_P7) in __bnxt_hwrm_vnic_set_rss()
6601 bnxt_fill_hw_rss_tbl(bp, vnic); in __bnxt_hwrm_vnic_set_rss()
6604 if (bp->rss_hash_delta) { in __bnxt_hwrm_vnic_set_rss()
6605 req->hash_type = cpu_to_le32(bp->rss_hash_delta); in __bnxt_hwrm_vnic_set_rss()
6606 if (bp->rss_hash_cfg & bp->rss_hash_delta) in __bnxt_hwrm_vnic_set_rss()
6611 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); in __bnxt_hwrm_vnic_set_rss()
6618 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_hwrm_vnic_set_rss() argument
6624 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || in bnxt_hwrm_vnic_set_rss()
6628 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); in bnxt_hwrm_vnic_set_rss()
6633 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); in bnxt_hwrm_vnic_set_rss()
6635 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_set_rss()
6638 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, in bnxt_hwrm_vnic_set_rss_p5() argument
6646 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); in bnxt_hwrm_vnic_set_rss_p5()
6652 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_set_rss_p5()
6654 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); in bnxt_hwrm_vnic_set_rss_p5()
6656 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); in bnxt_hwrm_vnic_set_rss_p5()
6658 hwrm_req_hold(bp, req); in bnxt_hwrm_vnic_set_rss_p5()
6663 rc = hwrm_req_send(bp, req); in bnxt_hwrm_vnic_set_rss_p5()
6669 hwrm_req_drop(bp, req); in bnxt_hwrm_vnic_set_rss_p5()
6673 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) in bnxt_hwrm_update_rss_hash_cfg() argument
6675 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_hwrm_update_rss_hash_cfg()
6679 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) in bnxt_hwrm_update_rss_hash_cfg()
6685 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_update_rss_hash_cfg()
6686 if (!hwrm_req_send(bp, req)) { in bnxt_hwrm_update_rss_hash_cfg()
6687 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; in bnxt_hwrm_update_rss_hash_cfg()
6688 bp->rss_hash_delta = 0; in bnxt_hwrm_update_rss_hash_cfg()
6690 hwrm_req_drop(bp, req); in bnxt_hwrm_update_rss_hash_cfg()
6693 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) in bnxt_hwrm_vnic_set_hds() argument
6695 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh; in bnxt_hwrm_vnic_set_hds()
6699 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); in bnxt_hwrm_vnic_set_hds()
6705 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); in bnxt_hwrm_vnic_set_hds()
6707 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) { in bnxt_hwrm_vnic_set_hds()
6715 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_set_hds()
6718 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, in bnxt_hwrm_vnic_ctx_free_one() argument
6724 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) in bnxt_hwrm_vnic_ctx_free_one()
6730 hwrm_req_send(bp, req); in bnxt_hwrm_vnic_ctx_free_one()
6734 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) in bnxt_hwrm_vnic_ctx_free() argument
6738 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_hwrm_vnic_ctx_free()
6739 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_vnic_ctx_free()
6743 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); in bnxt_hwrm_vnic_ctx_free()
6746 bp->rsscos_nr_ctxs = 0; in bnxt_hwrm_vnic_ctx_free()
6749 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, in bnxt_hwrm_vnic_ctx_alloc() argument
6756 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); in bnxt_hwrm_vnic_ctx_alloc()
6760 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_vnic_ctx_alloc()
6761 rc = hwrm_req_send(bp, req); in bnxt_hwrm_vnic_ctx_alloc()
6765 hwrm_req_drop(bp, req); in bnxt_hwrm_vnic_ctx_alloc()
6770 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) in bnxt_get_roce_vnic_mode() argument
6772 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) in bnxt_get_roce_vnic_mode()
6777 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) in bnxt_hwrm_vnic_cfg() argument
6779 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_hwrm_vnic_cfg()
6785 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); in bnxt_hwrm_vnic_cfg()
6789 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_vnic_cfg()
6790 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; in bnxt_hwrm_vnic_cfg()
6795 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); in bnxt_hwrm_vnic_cfg()
6816 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && in bnxt_hwrm_vnic_cfg()
6828 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_hwrm_vnic_cfg()
6829 ring = bp->rx_nr_rings - 1; in bnxt_hwrm_vnic_cfg()
6831 grp_idx = bp->rx_ring[ring].bnapi->index; in bnxt_hwrm_vnic_cfg()
6832 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); in bnxt_hwrm_vnic_cfg()
6835 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; in bnxt_hwrm_vnic_cfg()
6840 if (BNXT_VF(bp)) in bnxt_hwrm_vnic_cfg()
6841 def_vlan = bp->vf.vlan; in bnxt_hwrm_vnic_cfg()
6843 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) in bnxt_hwrm_vnic_cfg()
6845 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) in bnxt_hwrm_vnic_cfg()
6846 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); in bnxt_hwrm_vnic_cfg()
6848 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_cfg()
6851 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, in bnxt_hwrm_vnic_free_one() argument
6857 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) in bnxt_hwrm_vnic_free_one()
6862 hwrm_req_send(bp, req); in bnxt_hwrm_vnic_free_one()
6867 static void bnxt_hwrm_vnic_free(struct bnxt *bp) in bnxt_hwrm_vnic_free() argument
6871 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_vnic_free()
6872 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); in bnxt_hwrm_vnic_free()
6875 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_hwrm_vnic_alloc() argument
6884 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); in bnxt_hwrm_vnic_alloc()
6888 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_vnic_alloc()
6893 grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_hwrm_vnic_alloc()
6894 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_alloc()
6895 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", in bnxt_hwrm_vnic_alloc()
6899 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; in bnxt_hwrm_vnic_alloc()
6908 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_vnic_alloc()
6909 rc = hwrm_req_send(bp, req); in bnxt_hwrm_vnic_alloc()
6912 hwrm_req_drop(bp, req); in bnxt_hwrm_vnic_alloc()
6916 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) in bnxt_hwrm_vnic_qcaps() argument
6922 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); in bnxt_hwrm_vnic_qcaps()
6923 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; in bnxt_hwrm_vnic_qcaps()
6924 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6925 if (bp->hwrm_spec_code < 0x10600) in bnxt_hwrm_vnic_qcaps()
6928 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); in bnxt_hwrm_vnic_qcaps()
6932 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_vnic_qcaps()
6933 rc = hwrm_req_send(bp, req); in bnxt_hwrm_vnic_qcaps()
6937 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_hwrm_vnic_qcaps()
6939 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6942 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; in bnxt_hwrm_vnic_qcaps()
6948 (BNXT_CHIP_P5(bp) && in bnxt_hwrm_vnic_qcaps()
6949 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) in bnxt_hwrm_vnic_qcaps()
6950 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; in bnxt_hwrm_vnic_qcaps()
6952 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; in bnxt_hwrm_vnic_qcaps()
6954 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; in bnxt_hwrm_vnic_qcaps()
6955 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); in bnxt_hwrm_vnic_qcaps()
6956 if (bp->max_tpa_v2) { in bnxt_hwrm_vnic_qcaps()
6957 if (BNXT_CHIP_P5(bp)) in bnxt_hwrm_vnic_qcaps()
6958 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; in bnxt_hwrm_vnic_qcaps()
6960 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; in bnxt_hwrm_vnic_qcaps()
6963 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; in bnxt_hwrm_vnic_qcaps()
6965 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6967 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6969 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6971 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6973 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; in bnxt_hwrm_vnic_qcaps()
6975 hwrm_req_drop(bp, req); in bnxt_hwrm_vnic_qcaps()
6979 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) in bnxt_hwrm_ring_grp_alloc() argument
6986 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_grp_alloc()
6989 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); in bnxt_hwrm_ring_grp_alloc()
6993 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_ring_grp_alloc()
6994 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_grp_alloc()
6995 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_hwrm_ring_grp_alloc()
6997 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
6998 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
6999 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
7000 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); in bnxt_hwrm_ring_grp_alloc()
7002 rc = hwrm_req_send(bp, req); in bnxt_hwrm_ring_grp_alloc()
7007 bp->grp_info[grp_idx].fw_grp_id = in bnxt_hwrm_ring_grp_alloc()
7010 hwrm_req_drop(bp, req); in bnxt_hwrm_ring_grp_alloc()
7014 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) in bnxt_hwrm_ring_grp_free() argument
7019 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_ring_grp_free()
7022 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) in bnxt_hwrm_ring_grp_free()
7025 hwrm_req_hold(bp, req); in bnxt_hwrm_ring_grp_free()
7026 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_grp_free()
7027 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) in bnxt_hwrm_ring_grp_free()
7030 cpu_to_le32(bp->grp_info[i].fw_grp_id); in bnxt_hwrm_ring_grp_free()
7032 hwrm_req_send(bp, req); in bnxt_hwrm_ring_grp_free()
7033 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_grp_free()
7035 hwrm_req_drop(bp, req); in bnxt_hwrm_ring_grp_free()
7038 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, in bnxt_set_rx_ring_params_p5() argument
7042 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; in bnxt_set_rx_ring_params_p5()
7052 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); in bnxt_set_rx_ring_params_p5()
7062 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, in hwrm_ring_alloc_send_msg() argument
7073 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); in hwrm_ring_alloc_send_msg()
7099 grp_info = &bp->grp_info[ring->grp_idx]; in hwrm_ring_alloc_send_msg()
7100 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); in hwrm_ring_alloc_send_msg()
7101 req->length = cpu_to_le32(bp->tx_ring_mask + 1); in hwrm_ring_alloc_send_msg()
7104 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) in hwrm_ring_alloc_send_msg()
7107 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) in hwrm_ring_alloc_send_msg()
7116 cpu_to_le32(bp->rx_ring_mask + 1) : in hwrm_ring_alloc_send_msg()
7117 cpu_to_le32(bp->rx_agg_ring_mask + 1); in hwrm_ring_alloc_send_msg()
7118 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in hwrm_ring_alloc_send_msg()
7119 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); in hwrm_ring_alloc_send_msg()
7123 req->length = cpu_to_le32(bp->cp_ring_mask + 1); in hwrm_ring_alloc_send_msg()
7124 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in hwrm_ring_alloc_send_msg()
7126 grp_info = &bp->grp_info[map_index]; in hwrm_ring_alloc_send_msg()
7137 req->length = cpu_to_le32(bp->cp_ring_mask + 1); in hwrm_ring_alloc_send_msg()
7141 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", in hwrm_ring_alloc_send_msg()
7146 resp = hwrm_req_hold(bp, req); in hwrm_ring_alloc_send_msg()
7147 rc = hwrm_req_send(bp, req); in hwrm_ring_alloc_send_msg()
7150 hwrm_req_drop(bp, req); in hwrm_ring_alloc_send_msg()
7154 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", in hwrm_ring_alloc_send_msg()
7162 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) in bnxt_hwrm_set_async_event_cr() argument
7166 if (BNXT_PF(bp)) { in bnxt_hwrm_set_async_event_cr()
7169 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); in bnxt_hwrm_set_async_event_cr()
7176 return hwrm_req_send(bp, req); in bnxt_hwrm_set_async_event_cr()
7180 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); in bnxt_hwrm_set_async_event_cr()
7187 return hwrm_req_send(bp, req); in bnxt_hwrm_set_async_event_cr()
7191 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, in bnxt_set_db_mask() argument
7196 db->db_ring_mask = bp->tx_ring_mask; in bnxt_set_db_mask()
7199 db->db_ring_mask = bp->rx_ring_mask; in bnxt_set_db_mask()
7202 db->db_ring_mask = bp->rx_agg_ring_mask; in bnxt_set_db_mask()
7206 db->db_ring_mask = bp->cp_ring_mask; in bnxt_set_db_mask()
7209 if (bp->flags & BNXT_FLAG_CHIP_P7) { in bnxt_set_db_mask()
7215 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, in bnxt_set_db() argument
7218 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_set_db()
7236 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_set_db()
7239 db->doorbell = bp->bar1 + bp->db_offset; in bnxt_set_db()
7241 db->doorbell = bp->bar1 + map_idx * 0x80; in bnxt_set_db()
7255 bnxt_set_db_mask(bp, db, ring_type); in bnxt_set_db()
7258 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, in bnxt_hwrm_rx_ring_alloc() argument
7267 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); in bnxt_hwrm_rx_ring_alloc()
7271 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_rx_ring_alloc()
7272 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_rx_ring_alloc()
7277 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, in bnxt_hwrm_rx_agg_ring_alloc() argument
7286 map_idx = grp_idx + bp->rx_nr_rings; in bnxt_hwrm_rx_agg_ring_alloc()
7287 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); in bnxt_hwrm_rx_agg_ring_alloc()
7291 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, in bnxt_hwrm_rx_agg_ring_alloc()
7293 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_hwrm_rx_agg_ring_alloc()
7294 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_hwrm_rx_agg_ring_alloc()
7295 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_rx_agg_ring_alloc()
7300 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, in bnxt_hwrm_cp_ring_alloc_p5() argument
7311 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); in bnxt_hwrm_cp_ring_alloc_p5()
7314 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_cp_ring_alloc_p5()
7315 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_hwrm_cp_ring_alloc_p5()
7319 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, in bnxt_hwrm_tx_ring_alloc() argument
7326 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); in bnxt_hwrm_tx_ring_alloc()
7329 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); in bnxt_hwrm_tx_ring_alloc()
7333 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) in bnxt_hwrm_ring_alloc() argument
7335 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); in bnxt_hwrm_ring_alloc()
7339 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_alloc()
7343 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7344 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
7350 vector = bp->irq_tbl[map_idx].vector; in bnxt_hwrm_ring_alloc()
7352 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); in bnxt_hwrm_ring_alloc()
7357 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
7358 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_hwrm_ring_alloc()
7360 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
7363 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
7365 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); in bnxt_hwrm_ring_alloc()
7369 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7370 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_hwrm_ring_alloc()
7372 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_ring_alloc()
7373 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); in bnxt_hwrm_ring_alloc()
7377 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); in bnxt_hwrm_ring_alloc()
7382 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7383 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_hwrm_ring_alloc()
7385 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); in bnxt_hwrm_ring_alloc()
7390 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_hwrm_ring_alloc()
7391 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_ring_alloc()
7392 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); in bnxt_hwrm_ring_alloc()
7399 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7400 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); in bnxt_hwrm_ring_alloc()
7409 static void bnxt_cancel_dim(struct bnxt *bp) in bnxt_cancel_dim() argument
7416 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) in bnxt_cancel_dim()
7421 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_cancel_dim()
7422 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_cancel_dim()
7429 static int hwrm_ring_free_send_msg(struct bnxt *bp, in hwrm_ring_free_send_msg() argument
7438 if (BNXT_NO_FW_ACCESS(bp)) in hwrm_ring_free_send_msg()
7441 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); in hwrm_ring_free_send_msg()
7449 resp = hwrm_req_hold(bp, req); in hwrm_ring_free_send_msg()
7450 rc = hwrm_req_send(bp, req); in hwrm_ring_free_send_msg()
7452 hwrm_req_drop(bp, req); in hwrm_ring_free_send_msg()
7455 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", in hwrm_ring_free_send_msg()
7462 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, in bnxt_hwrm_tx_ring_free() argument
7472 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : in bnxt_hwrm_tx_ring_free()
7474 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, in bnxt_hwrm_tx_ring_free()
7479 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, in bnxt_hwrm_rx_ring_free() argument
7490 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); in bnxt_hwrm_rx_ring_free()
7491 hwrm_ring_free_send_msg(bp, ring, in bnxt_hwrm_rx_ring_free()
7496 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_rx_ring_free()
7499 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, in bnxt_hwrm_rx_agg_ring_free() argument
7507 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_rx_agg_ring_free()
7515 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); in bnxt_hwrm_rx_agg_ring_free()
7516 hwrm_ring_free_send_msg(bp, ring, type, in bnxt_hwrm_rx_agg_ring_free()
7520 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_rx_agg_ring_free()
7523 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, in bnxt_hwrm_cp_ring_free() argument
7532 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, in bnxt_hwrm_cp_ring_free()
7537 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) in bnxt_clear_one_cp_ring() argument
7545 for (i = 0; i < bp->cp_nr_pages; i++) in bnxt_clear_one_cp_ring()
7550 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) in bnxt_hwrm_ring_free() argument
7555 if (!bp->bnapi) in bnxt_hwrm_ring_free()
7558 for (i = 0; i < bp->tx_nr_rings; i++) in bnxt_hwrm_ring_free()
7559 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); in bnxt_hwrm_ring_free()
7561 bnxt_cancel_dim(bp); in bnxt_hwrm_ring_free()
7562 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
7563 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); in bnxt_hwrm_ring_free()
7564 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); in bnxt_hwrm_ring_free()
7571 bnxt_disable_int_sync(bp); in bnxt_hwrm_ring_free()
7573 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_free()
7577 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_free()
7578 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
7584 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]); in bnxt_hwrm_ring_free()
7588 hwrm_ring_free_send_msg(bp, ring, type, in bnxt_hwrm_ring_free()
7591 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
7596 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7598 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7601 static int bnxt_hwrm_get_rings(struct bnxt *bp) in bnxt_hwrm_get_rings() argument
7603 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_get_rings()
7608 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_get_rings()
7611 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); in bnxt_hwrm_get_rings()
7616 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_get_rings()
7617 rc = hwrm_req_send(bp, req); in bnxt_hwrm_get_rings()
7619 hwrm_req_drop(bp, req); in bnxt_hwrm_get_rings()
7624 if (BNXT_NEW_RM(bp)) { in bnxt_hwrm_get_rings()
7635 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_get_rings()
7639 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_get_rings()
7642 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); in bnxt_hwrm_get_rings()
7645 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_get_rings()
7657 hwrm_req_drop(bp, req); in bnxt_hwrm_get_rings()
7661 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) in __bnxt_hwrm_get_tx_rings() argument
7667 if (bp->hwrm_spec_code < 0x10601) in __bnxt_hwrm_get_tx_rings()
7670 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); in __bnxt_hwrm_get_tx_rings()
7675 resp = hwrm_req_hold(bp, req); in __bnxt_hwrm_get_tx_rings()
7676 rc = hwrm_req_send(bp, req); in __bnxt_hwrm_get_tx_rings()
7680 hwrm_req_drop(bp, req); in __bnxt_hwrm_get_tx_rings()
7684 static bool bnxt_rfs_supported(struct bnxt *bp);
7687 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in __bnxt_hwrm_reserve_pf_rings() argument
7692 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) in __bnxt_hwrm_reserve_pf_rings()
7698 if (BNXT_NEW_RM(bp)) { in __bnxt_hwrm_reserve_pf_rings()
7701 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_pf_rings()
7716 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_pf_rings()
7731 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in __bnxt_hwrm_reserve_vf_rings() argument
7736 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) in __bnxt_hwrm_reserve_vf_rings()
7744 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_vf_rings()
7759 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_vf_rings()
7773 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_reserve_pf_rings() argument
7778 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); in bnxt_hwrm_reserve_pf_rings()
7783 hwrm_req_drop(bp, req); in bnxt_hwrm_reserve_pf_rings()
7787 rc = hwrm_req_send(bp, req); in bnxt_hwrm_reserve_pf_rings()
7791 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_reserve_pf_rings()
7792 bp->hw_resc.resv_tx_rings = hwr->tx; in bnxt_hwrm_reserve_pf_rings()
7794 return bnxt_hwrm_get_rings(bp); in bnxt_hwrm_reserve_pf_rings()
7798 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_reserve_vf_rings() argument
7803 if (!BNXT_NEW_RM(bp)) { in bnxt_hwrm_reserve_vf_rings()
7804 bp->hw_resc.resv_tx_rings = hwr->tx; in bnxt_hwrm_reserve_vf_rings()
7808 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); in bnxt_hwrm_reserve_vf_rings()
7812 rc = hwrm_req_send(bp, req); in bnxt_hwrm_reserve_vf_rings()
7816 return bnxt_hwrm_get_rings(bp); in bnxt_hwrm_reserve_vf_rings()
7819 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_reserve_rings() argument
7821 if (BNXT_PF(bp)) in bnxt_hwrm_reserve_rings()
7822 return bnxt_hwrm_reserve_pf_rings(bp, hwr); in bnxt_hwrm_reserve_rings()
7824 return bnxt_hwrm_reserve_vf_rings(bp, hwr); in bnxt_hwrm_reserve_rings()
7827 int bnxt_nq_rings_in_use(struct bnxt *bp) in bnxt_nq_rings_in_use() argument
7829 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); in bnxt_nq_rings_in_use()
7832 static int bnxt_cp_rings_in_use(struct bnxt *bp) in bnxt_cp_rings_in_use() argument
7836 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_cp_rings_in_use()
7837 return bnxt_nq_rings_in_use(bp); in bnxt_cp_rings_in_use()
7839 cp = bp->tx_nr_rings + bp->rx_nr_rings; in bnxt_cp_rings_in_use()
7843 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) in bnxt_get_func_stat_ctxs() argument
7845 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); in bnxt_get_func_stat_ctxs()
7848 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_get_total_rss_ctxs() argument
7852 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_get_total_rss_ctxs()
7853 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); in bnxt_get_total_rss_ctxs()
7855 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) in bnxt_get_total_rss_ctxs()
7859 if (BNXT_VF(bp)) in bnxt_get_total_rss_ctxs()
7861 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) in bnxt_get_total_rss_ctxs()
7869 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) in bnxt_check_rss_tbl_no_rmgr() argument
7871 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_check_rss_tbl_no_rmgr()
7874 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { in bnxt_check_rss_tbl_no_rmgr()
7875 hw_resc->resv_rx_rings = bp->rx_nr_rings; in bnxt_check_rss_tbl_no_rmgr()
7876 if (!netif_is_rxfh_configured(bp->dev)) in bnxt_check_rss_tbl_no_rmgr()
7877 bnxt_set_dflt_rss_indir_tbl(bp, NULL); in bnxt_check_rss_tbl_no_rmgr()
7881 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) in bnxt_get_total_vnics() argument
7883 if (bp->flags & BNXT_FLAG_RFS) { in bnxt_get_total_vnics()
7884 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) in bnxt_get_total_vnics()
7885 return 2 + bp->num_rss_ctx; in bnxt_get_total_vnics()
7886 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_get_total_vnics()
7892 static bool bnxt_need_reserve_rings(struct bnxt *bp) in bnxt_need_reserve_rings() argument
7894 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_need_reserve_rings()
7895 int cp = bnxt_cp_rings_in_use(bp); in bnxt_need_reserve_rings()
7896 int nq = bnxt_nq_rings_in_use(bp); in bnxt_need_reserve_rings()
7897 int rx = bp->rx_nr_rings, stat; in bnxt_need_reserve_rings()
7905 if (!BNXT_NEW_RM(bp)) in bnxt_need_reserve_rings()
7906 bnxt_check_rss_tbl_no_rmgr(bp); in bnxt_need_reserve_rings()
7908 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && in bnxt_need_reserve_rings()
7909 bp->hwrm_spec_code >= 0x10601) in bnxt_need_reserve_rings()
7912 if (!BNXT_NEW_RM(bp)) in bnxt_need_reserve_rings()
7915 vnic = bnxt_get_total_vnics(bp, rx); in bnxt_need_reserve_rings()
7917 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_need_reserve_rings()
7919 stat = bnxt_get_func_stat_ctxs(bp); in bnxt_need_reserve_rings()
7923 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) in bnxt_need_reserve_rings()
7925 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && in bnxt_need_reserve_rings()
7931 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_copy_reserved_rings() argument
7933 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_copy_reserved_rings()
7936 if (BNXT_NEW_RM(bp)) { in bnxt_copy_reserved_rings()
7939 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_copy_reserved_rings()
7948 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_rings_ok() argument
7951 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); in bnxt_rings_ok()
7954 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7956 static int __bnxt_reserve_rings(struct bnxt *bp) in __bnxt_reserve_rings() argument
7960 int cp = bp->cp_nr_rings; in __bnxt_reserve_rings()
7965 if (!bnxt_need_reserve_rings(bp)) in __bnxt_reserve_rings()
7968 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { in __bnxt_reserve_rings()
7969 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); in __bnxt_reserve_rings()
7971 bnxt_set_ulp_stat_ctxs(bp, 0); in __bnxt_reserve_rings()
7973 if (ulp_msix > bp->ulp_num_msix_want) in __bnxt_reserve_rings()
7974 ulp_msix = bp->ulp_num_msix_want; in __bnxt_reserve_rings()
7977 hwr.cp = bnxt_nq_rings_in_use(bp); in __bnxt_reserve_rings()
7980 hwr.tx = bp->tx_nr_rings; in __bnxt_reserve_rings()
7981 hwr.rx = bp->rx_nr_rings; in __bnxt_reserve_rings()
7982 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in __bnxt_reserve_rings()
7984 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in __bnxt_reserve_rings()
7987 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); in __bnxt_reserve_rings()
7989 if (bp->flags & BNXT_FLAG_AGG_RINGS) in __bnxt_reserve_rings()
7991 hwr.grp = bp->rx_nr_rings; in __bnxt_reserve_rings()
7992 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); in __bnxt_reserve_rings()
7993 hwr.stat = bnxt_get_func_stat_ctxs(bp); in __bnxt_reserve_rings()
7994 old_rx_rings = bp->hw_resc.resv_rx_rings; in __bnxt_reserve_rings()
7996 rc = bnxt_hwrm_reserve_rings(bp, &hwr); in __bnxt_reserve_rings()
8000 bnxt_copy_reserved_rings(bp, &hwr); in __bnxt_reserve_rings()
8003 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_reserve_rings()
8007 if (netif_running(bp->dev)) in __bnxt_reserve_rings()
8010 bp->flags &= ~BNXT_FLAG_AGG_RINGS; in __bnxt_reserve_rings()
8011 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in __bnxt_reserve_rings()
8012 bp->dev->hw_features &= ~NETIF_F_LRO; in __bnxt_reserve_rings()
8013 bp->dev->features &= ~NETIF_F_LRO; in __bnxt_reserve_rings()
8014 bnxt_set_ring_params(bp); in __bnxt_reserve_rings()
8018 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); in __bnxt_reserve_rings()
8019 if (bnxt_ulp_registered(bp->edev) && in __bnxt_reserve_rings()
8020 hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) in __bnxt_reserve_rings()
8021 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); in __bnxt_reserve_rings()
8023 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); in __bnxt_reserve_rings()
8024 if (bp->flags & BNXT_FLAG_AGG_RINGS) in __bnxt_reserve_rings()
8026 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); in __bnxt_reserve_rings()
8028 if (hwr.tx != bp->tx_nr_rings) { in __bnxt_reserve_rings()
8029 netdev_warn(bp->dev, in __bnxt_reserve_rings()
8031 hwr.tx, bp->tx_nr_rings); in __bnxt_reserve_rings()
8033 bp->tx_nr_rings = hwr.tx; in __bnxt_reserve_rings()
8038 if (rx_rings != bp->rx_nr_rings) { in __bnxt_reserve_rings()
8039 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", in __bnxt_reserve_rings()
8040 rx_rings, bp->rx_nr_rings); in __bnxt_reserve_rings()
8041 if (netif_is_rxfh_configured(bp->dev) && in __bnxt_reserve_rings()
8042 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != in __bnxt_reserve_rings()
8043 bnxt_get_nr_rss_ctxs(bp, rx_rings) || in __bnxt_reserve_rings()
8044 bnxt_get_max_rss_ring(bp) >= rx_rings)) { in __bnxt_reserve_rings()
8045 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); in __bnxt_reserve_rings()
8046 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; in __bnxt_reserve_rings()
8049 bp->rx_nr_rings = rx_rings; in __bnxt_reserve_rings()
8050 bp->cp_nr_rings = hwr.cp; in __bnxt_reserve_rings()
8052 if (!bnxt_rings_ok(bp, &hwr)) in __bnxt_reserve_rings()
8055 if (old_rx_rings != bp->hw_resc.resv_rx_rings && in __bnxt_reserve_rings()
8056 !netif_is_rxfh_configured(bp->dev)) in __bnxt_reserve_rings()
8057 bnxt_set_dflt_rss_indir_tbl(bp, NULL); in __bnxt_reserve_rings()
8059 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { in __bnxt_reserve_rings()
8063 hw_resc = &bp->hw_resc; in __bnxt_reserve_rings()
8064 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; in __bnxt_reserve_rings()
8066 bnxt_set_ulp_msix_num(bp, ulp_msix); in __bnxt_reserve_rings()
8067 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; in __bnxt_reserve_rings()
8068 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); in __bnxt_reserve_rings()
8069 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); in __bnxt_reserve_rings()
8075 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_check_vf_rings() argument
8080 if (!BNXT_NEW_RM(bp)) in bnxt_hwrm_check_vf_rings()
8083 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); in bnxt_hwrm_check_vf_rings()
8090 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_check_vf_rings()
8094 return hwrm_req_send_silent(bp, req); in bnxt_hwrm_check_vf_rings()
8097 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_check_pf_rings() argument
8102 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); in bnxt_hwrm_check_pf_rings()
8104 if (BNXT_NEW_RM(bp)) { in bnxt_hwrm_check_pf_rings()
8109 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_check_pf_rings()
8117 return hwrm_req_send_silent(bp, req); in bnxt_hwrm_check_pf_rings()
8120 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) in bnxt_hwrm_check_rings() argument
8122 if (bp->hwrm_spec_code < 0x10801) in bnxt_hwrm_check_rings()
8125 if (BNXT_PF(bp)) in bnxt_hwrm_check_rings()
8126 return bnxt_hwrm_check_pf_rings(bp, hwr); in bnxt_hwrm_check_rings()
8128 return bnxt_hwrm_check_vf_rings(bp, hwr); in bnxt_hwrm_check_rings()
8131 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) in bnxt_hwrm_coal_params_qcaps() argument
8133 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_hwrm_coal_params_qcaps()
8148 if (bp->hwrm_spec_code < 0x10902) in bnxt_hwrm_coal_params_qcaps()
8151 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) in bnxt_hwrm_coal_params_qcaps()
8154 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_coal_params_qcaps()
8155 rc = hwrm_req_send_silent(bp, req); in bnxt_hwrm_coal_params_qcaps()
8175 hwrm_req_drop(bp, req); in bnxt_hwrm_coal_params_qcaps()
8178 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) in bnxt_usec_to_coal_tmr() argument
8180 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_usec_to_coal_tmr()
8185 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, in bnxt_hwrm_set_coal_params() argument
8189 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_hwrm_set_coal_params()
8208 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); in bnxt_hwrm_set_coal_params()
8226 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); in bnxt_hwrm_set_coal_params()
8241 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, in __bnxt_hwrm_set_coal_nq() argument
8246 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in __bnxt_hwrm_set_coal_nq()
8254 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); in __bnxt_hwrm_set_coal_nq()
8262 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; in __bnxt_hwrm_set_coal_nq()
8266 return hwrm_req_send(bp, req); in __bnxt_hwrm_set_coal_nq()
8269 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) in bnxt_hwrm_set_ring_coal() argument
8279 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); in bnxt_hwrm_set_ring_coal()
8287 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); in bnxt_hwrm_set_ring_coal()
8291 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); in bnxt_hwrm_set_ring_coal()
8293 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); in bnxt_hwrm_set_ring_coal()
8295 return hwrm_req_send(bp, req_rx); in bnxt_hwrm_set_ring_coal()
8299 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, in bnxt_hwrm_set_rx_coal() argument
8302 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); in bnxt_hwrm_set_rx_coal()
8305 return hwrm_req_send(bp, req); in bnxt_hwrm_set_rx_coal()
8309 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, in bnxt_hwrm_set_tx_coal() argument
8318 ring_id = bnxt_cp_ring_for_tx(bp, txr); in bnxt_hwrm_set_tx_coal()
8320 rc = hwrm_req_send(bp, req); in bnxt_hwrm_set_tx_coal()
8323 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_set_tx_coal()
8329 int bnxt_hwrm_set_coal(struct bnxt *bp) in bnxt_hwrm_set_coal() argument
8334 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); in bnxt_hwrm_set_coal()
8338 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); in bnxt_hwrm_set_coal()
8340 hwrm_req_drop(bp, req_rx); in bnxt_hwrm_set_coal()
8344 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); in bnxt_hwrm_set_coal()
8345 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); in bnxt_hwrm_set_coal()
8347 hwrm_req_hold(bp, req_rx); in bnxt_hwrm_set_coal()
8348 hwrm_req_hold(bp, req_tx); in bnxt_hwrm_set_coal()
8349 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_set_coal()
8350 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_set_coal()
8354 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); in bnxt_hwrm_set_coal()
8356 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); in bnxt_hwrm_set_coal()
8360 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_set_coal()
8364 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); in bnxt_hwrm_set_coal()
8369 hw_coal = &bp->rx_coal; in bnxt_hwrm_set_coal()
8371 hw_coal = &bp->tx_coal; in bnxt_hwrm_set_coal()
8372 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); in bnxt_hwrm_set_coal()
8374 hwrm_req_drop(bp, req_rx); in bnxt_hwrm_set_coal()
8375 hwrm_req_drop(bp, req_tx); in bnxt_hwrm_set_coal()
8379 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) in bnxt_hwrm_stat_ctx_free() argument
8385 if (!bp->bnapi) in bnxt_hwrm_stat_ctx_free()
8388 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_hwrm_stat_ctx_free()
8391 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) in bnxt_hwrm_stat_ctx_free()
8393 if (BNXT_FW_MAJ(bp) <= 20) { in bnxt_hwrm_stat_ctx_free()
8394 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { in bnxt_hwrm_stat_ctx_free()
8395 hwrm_req_drop(bp, req); in bnxt_hwrm_stat_ctx_free()
8398 hwrm_req_hold(bp, req0); in bnxt_hwrm_stat_ctx_free()
8400 hwrm_req_hold(bp, req); in bnxt_hwrm_stat_ctx_free()
8401 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_free()
8402 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_free()
8409 hwrm_req_send(bp, req0); in bnxt_hwrm_stat_ctx_free()
8411 hwrm_req_send(bp, req); in bnxt_hwrm_stat_ctx_free()
8416 hwrm_req_drop(bp, req); in bnxt_hwrm_stat_ctx_free()
8418 hwrm_req_drop(bp, req0); in bnxt_hwrm_stat_ctx_free()
8421 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) in bnxt_hwrm_stat_ctx_alloc() argument
8427 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_hwrm_stat_ctx_alloc()
8430 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); in bnxt_hwrm_stat_ctx_alloc()
8434 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); in bnxt_hwrm_stat_ctx_alloc()
8435 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); in bnxt_hwrm_stat_ctx_alloc()
8437 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_stat_ctx_alloc()
8438 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_alloc()
8439 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_alloc()
8444 rc = hwrm_req_send(bp, req); in bnxt_hwrm_stat_ctx_alloc()
8450 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; in bnxt_hwrm_stat_ctx_alloc()
8452 hwrm_req_drop(bp, req); in bnxt_hwrm_stat_ctx_alloc()
8456 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) in bnxt_hwrm_func_qcfg() argument
8463 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); in bnxt_hwrm_func_qcfg()
8468 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_qcfg()
8469 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_qcfg()
8475 if (BNXT_VF(bp)) { in bnxt_hwrm_func_qcfg()
8476 struct bnxt_vf_info *vf = &bp->vf; in bnxt_hwrm_func_qcfg()
8484 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); in bnxt_hwrm_func_qcfg()
8489 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; in bnxt_hwrm_func_qcfg()
8491 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; in bnxt_hwrm_func_qcfg()
8493 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) in bnxt_hwrm_func_qcfg()
8494 bp->flags |= BNXT_FLAG_MULTI_HOST; in bnxt_hwrm_func_qcfg()
8497 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; in bnxt_hwrm_func_qcfg()
8500 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; in bnxt_hwrm_func_qcfg()
8507 bp->port_partition_type = resp->port_partition_type; in bnxt_hwrm_func_qcfg()
8510 if (bp->hwrm_spec_code < 0x10707 || in bnxt_hwrm_func_qcfg()
8512 bp->br_mode = BRIDGE_MODE_VEB; in bnxt_hwrm_func_qcfg()
8514 bp->br_mode = BRIDGE_MODE_VEPA; in bnxt_hwrm_func_qcfg()
8516 bp->br_mode = BRIDGE_MODE_UNDEF; in bnxt_hwrm_func_qcfg()
8518 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); in bnxt_hwrm_func_qcfg()
8519 if (!bp->max_mtu) in bnxt_hwrm_func_qcfg()
8520 bp->max_mtu = BNXT_MAX_MTU; in bnxt_hwrm_func_qcfg()
8522 if (bp->db_size) in bnxt_hwrm_func_qcfg()
8525 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; in bnxt_hwrm_func_qcfg()
8526 if (BNXT_CHIP_P5(bp)) { in bnxt_hwrm_func_qcfg()
8527 if (BNXT_PF(bp)) in bnxt_hwrm_func_qcfg()
8528 bp->db_offset = DB_PF_OFFSET_P5; in bnxt_hwrm_func_qcfg()
8530 bp->db_offset = DB_VF_OFFSET_P5; in bnxt_hwrm_func_qcfg()
8532 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * in bnxt_hwrm_func_qcfg()
8534 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || in bnxt_hwrm_func_qcfg()
8535 bp->db_size <= bp->db_offset) in bnxt_hwrm_func_qcfg()
8536 bp->db_size = pci_resource_len(bp->pdev, 2); in bnxt_hwrm_func_qcfg()
8539 hwrm_req_drop(bp, req); in bnxt_hwrm_func_qcfg()
8555 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) in bnxt_alloc_all_ctx_pg_info() argument
8557 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_alloc_all_ctx_pg_info()
8576 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8583 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) in bnxt_hwrm_func_backing_store_qcaps_v2() argument
8587 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_hwrm_func_backing_store_qcaps_v2()
8591 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); in bnxt_hwrm_func_backing_store_qcaps_v2()
8599 bp->ctx = ctx; in bnxt_hwrm_func_backing_store_qcaps_v2()
8602 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_backing_store_qcaps_v2()
8613 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_backing_store_qcaps_v2()
8619 bnxt_free_one_ctx_mem(bp, ctxm, true); in bnxt_hwrm_func_backing_store_qcaps_v2()
8628 bnxt_free_one_ctx_mem(bp, ctxm, true); in bnxt_hwrm_func_backing_store_qcaps_v2()
8649 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); in bnxt_hwrm_func_backing_store_qcaps_v2()
8652 hwrm_req_drop(bp, req); in bnxt_hwrm_func_backing_store_qcaps_v2()
8656 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) in bnxt_hwrm_func_backing_store_qcaps() argument
8662 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || in bnxt_hwrm_func_backing_store_qcaps()
8663 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED)) in bnxt_hwrm_func_backing_store_qcaps()
8666 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) in bnxt_hwrm_func_backing_store_qcaps()
8667 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); in bnxt_hwrm_func_backing_store_qcaps()
8669 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); in bnxt_hwrm_func_backing_store_qcaps()
8673 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_backing_store_qcaps()
8674 rc = hwrm_req_send_silent(bp, req); in bnxt_hwrm_func_backing_store_qcaps()
8681 ctx = bp->ctx; in bnxt_hwrm_func_backing_store_qcaps()
8688 bp->ctx = ctx; in bnxt_hwrm_func_backing_store_qcaps()
8757 ctx->tqm_fp_rings_count = bp->max_q; in bnxt_hwrm_func_backing_store_qcaps()
8765 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); in bnxt_hwrm_func_backing_store_qcaps()
8770 hwrm_req_drop(bp, req); in bnxt_hwrm_func_backing_store_qcaps()
8799 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) in bnxt_hwrm_func_backing_store_cfg() argument
8802 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_hwrm_func_backing_store_cfg()
8818 if (req_len > bp->hwrm_max_ext_req_len) in bnxt_hwrm_func_backing_store_cfg()
8820 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); in bnxt_hwrm_func_backing_store_cfg()
8926 return hwrm_req_send(bp, req); in bnxt_hwrm_func_backing_store_cfg()
8929 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, in bnxt_alloc_ctx_mem_blk() argument
8940 return bnxt_alloc_ring(bp, rmem); in bnxt_alloc_ctx_mem_blk()
8943 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, in bnxt_alloc_ctx_pg_tbls() argument
8968 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); in bnxt_alloc_ctx_pg_tbls()
8990 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); in bnxt_alloc_ctx_pg_tbls()
8999 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); in bnxt_alloc_ctx_pg_tbls()
9004 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp, in bnxt_copy_ctx_pg_tbls() argument
9024 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail); in bnxt_copy_ctx_pg_tbls()
9034 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, in bnxt_free_ctx_pg_tbls() argument
9051 bnxt_free_ring(bp, rmem2); in bnxt_free_ctx_pg_tbls()
9059 bnxt_free_ring(bp, rmem); in bnxt_free_ctx_pg_tbls()
9063 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, in bnxt_setup_ctxm_pg_tbls() argument
9081 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, in bnxt_setup_ctxm_pg_tbls()
9089 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, in bnxt_hwrm_func_backing_store_cfg_v2() argument
9106 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); in bnxt_hwrm_func_backing_store_cfg_v2()
9109 hwrm_req_hold(bp, req); in bnxt_hwrm_func_backing_store_cfg_v2()
9113 bnxt_bs_trace_avail(bp, ctxm->type)) { in bnxt_hwrm_func_backing_store_cfg_v2()
9119 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]]; in bnxt_hwrm_func_backing_store_cfg_v2()
9141 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_backing_store_cfg_v2()
9143 hwrm_req_drop(bp, req); in bnxt_hwrm_func_backing_store_cfg_v2()
9147 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) in bnxt_backing_store_cfg_v2() argument
9149 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_backing_store_cfg_v2()
9157 if (!bnxt_bs_trace_avail(bp, type)) in bnxt_backing_store_cfg_v2()
9160 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, in bnxt_backing_store_cfg_v2()
9163 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", in bnxt_backing_store_cfg_v2()
9167 bnxt_bs_trace_init(bp, ctxm); in bnxt_backing_store_cfg_v2()
9187 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); in bnxt_backing_store_cfg_v2()
9196 * @bp: The driver context
9208 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp, in __bnxt_copy_ctx_mem() argument
9222 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head, in __bnxt_copy_ctx_mem()
9230 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, in bnxt_copy_ctx_mem() argument
9235 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail); in bnxt_copy_ctx_mem()
9238 static void bnxt_free_one_ctx_mem(struct bnxt *bp, in bnxt_free_one_ctx_mem() argument
9254 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); in bnxt_free_one_ctx_mem()
9263 void bnxt_free_ctx_mem(struct bnxt *bp, bool force) in bnxt_free_ctx_mem() argument
9265 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_free_ctx_mem()
9272 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force); in bnxt_free_ctx_mem()
9277 bp->ctx = NULL; in bnxt_free_ctx_mem()
9281 static int bnxt_alloc_ctx_mem(struct bnxt *bp) in bnxt_alloc_ctx_mem() argument
9295 rc = bnxt_hwrm_func_backing_store_qcaps(bp); in bnxt_alloc_ctx_mem()
9297 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", in bnxt_alloc_ctx_mem()
9301 ctx = bp->ctx; in bnxt_alloc_ctx_mem()
9314 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { in bnxt_alloc_ctx_mem()
9316 if (BNXT_SW_RES_LMT(bp)) { in bnxt_alloc_ctx_mem()
9333 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, in bnxt_alloc_ctx_mem()
9339 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); in bnxt_alloc_ctx_mem()
9344 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + in bnxt_alloc_ctx_mem()
9350 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); in bnxt_alloc_ctx_mem()
9355 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); in bnxt_alloc_ctx_mem()
9359 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) in bnxt_alloc_ctx_mem()
9363 if (BNXT_SW_RES_LMT(bp) && in bnxt_alloc_ctx_mem()
9378 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); in bnxt_alloc_ctx_mem()
9384 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); in bnxt_alloc_ctx_mem()
9394 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); in bnxt_alloc_ctx_mem()
9400 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); in bnxt_alloc_ctx_mem()
9407 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) in bnxt_alloc_ctx_mem()
9408 rc = bnxt_backing_store_cfg_v2(bp, ena); in bnxt_alloc_ctx_mem()
9410 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); in bnxt_alloc_ctx_mem()
9412 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", in bnxt_alloc_ctx_mem()
9420 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) in bnxt_hwrm_crash_dump_mem_cfg() argument
9426 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) in bnxt_hwrm_crash_dump_mem_cfg()
9429 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); in bnxt_hwrm_crash_dump_mem_cfg()
9440 bp->fw_crash_mem->ring_mem.depth); in bnxt_hwrm_crash_dump_mem_cfg()
9441 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); in bnxt_hwrm_crash_dump_mem_cfg()
9442 req->size = cpu_to_le32(bp->fw_crash_len); in bnxt_hwrm_crash_dump_mem_cfg()
9444 return hwrm_req_send(bp, req); in bnxt_hwrm_crash_dump_mem_cfg()
9447 static void bnxt_free_crash_dump_mem(struct bnxt *bp) in bnxt_free_crash_dump_mem() argument
9449 if (bp->fw_crash_mem) { in bnxt_free_crash_dump_mem()
9450 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); in bnxt_free_crash_dump_mem()
9451 kfree(bp->fw_crash_mem); in bnxt_free_crash_dump_mem()
9452 bp->fw_crash_mem = NULL; in bnxt_free_crash_dump_mem()
9456 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) in bnxt_alloc_crash_dump_mem() argument
9461 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) in bnxt_alloc_crash_dump_mem()
9464 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); in bnxt_alloc_crash_dump_mem()
9471 if (bp->fw_crash_mem && in bnxt_alloc_crash_dump_mem()
9472 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) in bnxt_alloc_crash_dump_mem()
9475 if (bp->fw_crash_mem) in bnxt_alloc_crash_dump_mem()
9476 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); in bnxt_alloc_crash_dump_mem()
9478 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), in bnxt_alloc_crash_dump_mem()
9480 if (!bp->fw_crash_mem) in bnxt_alloc_crash_dump_mem()
9483 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); in bnxt_alloc_crash_dump_mem()
9485 bnxt_free_crash_dump_mem(bp); in bnxt_alloc_crash_dump_mem()
9490 bp->fw_crash_len = mem_size; in bnxt_alloc_crash_dump_mem()
9494 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) in bnxt_hwrm_func_resc_qcaps() argument
9498 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_func_resc_qcaps()
9501 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); in bnxt_hwrm_func_resc_qcaps()
9506 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_resc_qcaps()
9507 rc = hwrm_req_send_silent(bp, req); in bnxt_hwrm_func_resc_qcaps()
9532 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_func_resc_qcaps()
9539 if (BNXT_PF(bp)) { in bnxt_hwrm_func_resc_qcaps()
9540 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_resc_qcaps()
9548 hwrm_req_drop(bp, req); in bnxt_hwrm_func_resc_qcaps()
9552 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) in __bnxt_hwrm_ptp_qcfg() argument
9556 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in __bnxt_hwrm_ptp_qcfg()
9560 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { in __bnxt_hwrm_ptp_qcfg()
9565 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); in __bnxt_hwrm_ptp_qcfg()
9569 req->port_id = cpu_to_le16(bp->pf.port_id); in __bnxt_hwrm_ptp_qcfg()
9570 resp = hwrm_req_hold(bp, req); in __bnxt_hwrm_ptp_qcfg()
9571 rc = hwrm_req_send(bp, req); in __bnxt_hwrm_ptp_qcfg()
9576 if (BNXT_CHIP_P5_AND_MINUS(bp) && in __bnxt_hwrm_ptp_qcfg()
9587 ptp->bp = bp; in __bnxt_hwrm_ptp_qcfg()
9588 bp->ptp_cfg = ptp; in __bnxt_hwrm_ptp_qcfg()
9596 } else if (BNXT_CHIP_P5(bp)) { in __bnxt_hwrm_ptp_qcfg()
9605 rc = bnxt_ptp_init(bp); in __bnxt_hwrm_ptp_qcfg()
9607 netdev_warn(bp->dev, "PTP initialization failed.\n"); in __bnxt_hwrm_ptp_qcfg()
9609 hwrm_req_drop(bp, req); in __bnxt_hwrm_ptp_qcfg()
9614 bnxt_ptp_clear(bp); in __bnxt_hwrm_ptp_qcfg()
9616 bp->ptp_cfg = NULL; in __bnxt_hwrm_ptp_qcfg()
9620 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) in __bnxt_hwrm_func_qcaps() argument
9624 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in __bnxt_hwrm_func_qcaps()
9628 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); in __bnxt_hwrm_func_qcaps()
9633 resp = hwrm_req_hold(bp, req); in __bnxt_hwrm_func_qcaps()
9634 rc = hwrm_req_send(bp, req); in __bnxt_hwrm_func_qcaps()
9640 bp->flags |= BNXT_FLAG_ROCEV1_CAP; in __bnxt_hwrm_func_qcaps()
9642 bp->flags |= BNXT_FLAG_ROCEV2_CAP; in __bnxt_hwrm_func_qcaps()
9644 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
9646 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; in __bnxt_hwrm_func_qcaps()
9648 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
9650 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; in __bnxt_hwrm_func_qcaps()
9652 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; in __bnxt_hwrm_func_qcaps()
9654 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; in __bnxt_hwrm_func_qcaps()
9656 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; in __bnxt_hwrm_func_qcaps()
9660 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
9661 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) in __bnxt_hwrm_func_qcaps()
9662 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; in __bnxt_hwrm_func_qcaps()
9664 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; in __bnxt_hwrm_func_qcaps()
9665 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) in __bnxt_hwrm_func_qcaps()
9666 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; in __bnxt_hwrm_func_qcaps()
9667 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) in __bnxt_hwrm_func_qcaps()
9668 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; in __bnxt_hwrm_func_qcaps()
9670 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; in __bnxt_hwrm_func_qcaps()
9671 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) in __bnxt_hwrm_func_qcaps()
9672 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; in __bnxt_hwrm_func_qcaps()
9674 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; in __bnxt_hwrm_func_qcaps()
9676 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; in __bnxt_hwrm_func_qcaps()
9680 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; in __bnxt_hwrm_func_qcaps()
9682 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; in __bnxt_hwrm_func_qcaps()
9684 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; in __bnxt_hwrm_func_qcaps()
9687 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; in __bnxt_hwrm_func_qcaps()
9688 if (BNXT_PF(bp) && in __bnxt_hwrm_func_qcaps()
9690 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; in __bnxt_hwrm_func_qcaps()
9692 bp->tx_push_thresh = 0; in __bnxt_hwrm_func_qcaps()
9694 BNXT_FW_MAJ(bp) > 217) in __bnxt_hwrm_func_qcaps()
9695 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; in __bnxt_hwrm_func_qcaps()
9715 if (BNXT_PF(bp)) { in __bnxt_hwrm_func_qcaps()
9716 struct bnxt_pf_info *pf = &bp->pf; in __bnxt_hwrm_func_qcaps()
9723 bp->flags &= ~BNXT_FLAG_WOL_CAP; in __bnxt_hwrm_func_qcaps()
9725 bp->flags |= BNXT_FLAG_WOL_CAP; in __bnxt_hwrm_func_qcaps()
9727 bp->fw_cap |= BNXT_FW_CAP_PTP; in __bnxt_hwrm_func_qcaps()
9729 bnxt_ptp_clear(bp); in __bnxt_hwrm_func_qcaps()
9730 kfree(bp->ptp_cfg); in __bnxt_hwrm_func_qcaps()
9731 bp->ptp_cfg = NULL; in __bnxt_hwrm_func_qcaps()
9735 struct bnxt_vf_info *vf = &bp->vf; in __bnxt_hwrm_func_qcaps()
9741 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); in __bnxt_hwrm_func_qcaps()
9744 hwrm_req_drop(bp, req); in __bnxt_hwrm_func_qcaps()
9748 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) in bnxt_hwrm_dbg_qcaps() argument
9754 bp->fw_dbg_cap = 0; in bnxt_hwrm_dbg_qcaps()
9755 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) in bnxt_hwrm_dbg_qcaps()
9758 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); in bnxt_hwrm_dbg_qcaps()
9763 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_dbg_qcaps()
9764 rc = hwrm_req_send(bp, req); in bnxt_hwrm_dbg_qcaps()
9768 bp->fw_dbg_cap = le32_to_cpu(resp->flags); in bnxt_hwrm_dbg_qcaps()
9771 hwrm_req_drop(bp, req); in bnxt_hwrm_dbg_qcaps()
9774 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9776 int bnxt_hwrm_func_qcaps(struct bnxt *bp) in bnxt_hwrm_func_qcaps() argument
9780 rc = __bnxt_hwrm_func_qcaps(bp); in bnxt_hwrm_func_qcaps()
9784 bnxt_hwrm_dbg_qcaps(bp); in bnxt_hwrm_func_qcaps()
9786 rc = bnxt_hwrm_queue_qportcfg(bp); in bnxt_hwrm_func_qcaps()
9788 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); in bnxt_hwrm_func_qcaps()
9791 if (bp->hwrm_spec_code >= 0x10803) { in bnxt_hwrm_func_qcaps()
9792 rc = bnxt_alloc_ctx_mem(bp); in bnxt_hwrm_func_qcaps()
9795 rc = bnxt_hwrm_func_resc_qcaps(bp, true); in bnxt_hwrm_func_qcaps()
9797 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; in bnxt_hwrm_func_qcaps()
9802 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps() argument
9809 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9812 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9816 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9817 rc = hwrm_req_send(bp, req); in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9824 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9828 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9832 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9835 hwrm_req_drop(bp, req); in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9839 static int __bnxt_alloc_fw_health(struct bnxt *bp) in __bnxt_alloc_fw_health() argument
9841 if (bp->fw_health) in __bnxt_alloc_fw_health()
9844 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); in __bnxt_alloc_fw_health()
9845 if (!bp->fw_health) in __bnxt_alloc_fw_health()
9848 mutex_init(&bp->fw_health->lock); in __bnxt_alloc_fw_health()
9852 static int bnxt_alloc_fw_health(struct bnxt *bp) in bnxt_alloc_fw_health() argument
9856 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && in bnxt_alloc_fw_health()
9857 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_alloc_fw_health()
9860 rc = __bnxt_alloc_fw_health(bp); in bnxt_alloc_fw_health()
9862 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; in bnxt_alloc_fw_health()
9863 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; in bnxt_alloc_fw_health()
9870 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) in __bnxt_map_fw_health_reg() argument
9872 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + in __bnxt_map_fw_health_reg()
9877 static void bnxt_inv_fw_health_reg(struct bnxt *bp) in bnxt_inv_fw_health_reg() argument
9879 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_inv_fw_health_reg()
9894 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) in bnxt_try_map_fw_health_reg() argument
9901 if (bp->fw_health) in bnxt_try_map_fw_health_reg()
9902 bp->fw_health->status_reliable = false; in bnxt_try_map_fw_health_reg()
9904 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); in bnxt_try_map_fw_health_reg()
9905 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); in bnxt_try_map_fw_health_reg()
9909 if (!bp->chip_num) { in bnxt_try_map_fw_health_reg()
9910 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); in bnxt_try_map_fw_health_reg()
9911 bp->chip_num = readl(bp->bar0 + in bnxt_try_map_fw_health_reg()
9915 if (!BNXT_CHIP_P5_PLUS(bp)) in bnxt_try_map_fw_health_reg()
9925 if (__bnxt_alloc_fw_health(bp)) { in bnxt_try_map_fw_health_reg()
9926 netdev_warn(bp->dev, "no memory for firmware status checks\n"); in bnxt_try_map_fw_health_reg()
9930 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; in bnxt_try_map_fw_health_reg()
9933 __bnxt_map_fw_health_reg(bp, status_loc); in bnxt_try_map_fw_health_reg()
9934 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = in bnxt_try_map_fw_health_reg()
9938 bp->fw_health->status_reliable = true; in bnxt_try_map_fw_health_reg()
9941 static int bnxt_map_fw_health_regs(struct bnxt *bp) in bnxt_map_fw_health_regs() argument
9943 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_map_fw_health_regs()
9947 bp->fw_health->status_reliable = false; in bnxt_map_fw_health_regs()
9948 bp->fw_health->resets_reliable = false; in bnxt_map_fw_health_regs()
9961 bp->fw_health->status_reliable = true; in bnxt_map_fw_health_regs()
9962 bp->fw_health->resets_reliable = true; in bnxt_map_fw_health_regs()
9966 __bnxt_map_fw_health_reg(bp, reg_base); in bnxt_map_fw_health_regs()
9970 static void bnxt_remap_fw_health_regs(struct bnxt *bp) in bnxt_remap_fw_health_regs() argument
9972 if (!bp->fw_health) in bnxt_remap_fw_health_regs()
9975 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { in bnxt_remap_fw_health_regs()
9976 bp->fw_health->status_reliable = true; in bnxt_remap_fw_health_regs()
9977 bp->fw_health->resets_reliable = true; in bnxt_remap_fw_health_regs()
9979 bnxt_try_map_fw_health_reg(bp); in bnxt_remap_fw_health_regs()
9983 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) in bnxt_hwrm_error_recovery_qcfg() argument
9985 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_hwrm_error_recovery_qcfg()
9990 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_hwrm_error_recovery_qcfg()
9993 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); in bnxt_hwrm_error_recovery_qcfg()
9997 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_error_recovery_qcfg()
9998 rc = hwrm_req_send(bp, req); in bnxt_hwrm_error_recovery_qcfg()
10003 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { in bnxt_hwrm_error_recovery_qcfg()
10040 hwrm_req_drop(bp, req); in bnxt_hwrm_error_recovery_qcfg()
10042 rc = bnxt_map_fw_health_regs(bp); in bnxt_hwrm_error_recovery_qcfg()
10044 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; in bnxt_hwrm_error_recovery_qcfg()
10048 static int bnxt_hwrm_func_reset(struct bnxt *bp) in bnxt_hwrm_func_reset() argument
10053 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); in bnxt_hwrm_func_reset()
10058 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); in bnxt_hwrm_func_reset()
10059 return hwrm_req_send(bp, req); in bnxt_hwrm_func_reset()
10062 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) in bnxt_nvm_cfg_ver_get() argument
10066 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) in bnxt_nvm_cfg_ver_get()
10067 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_nvm_cfg_ver_get()
10072 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) in bnxt_hwrm_queue_qportcfg() argument
10080 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); in bnxt_hwrm_queue_qportcfg()
10084 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_queue_qportcfg()
10085 rc = hwrm_req_send(bp, req); in bnxt_hwrm_queue_qportcfg()
10093 bp->max_tc = resp->max_configurable_queues; in bnxt_hwrm_queue_qportcfg()
10094 bp->max_lltc = resp->max_configurable_lossless_queues; in bnxt_hwrm_queue_qportcfg()
10095 if (bp->max_tc > BNXT_MAX_QUEUE) in bnxt_hwrm_queue_qportcfg()
10096 bp->max_tc = BNXT_MAX_QUEUE; in bnxt_hwrm_queue_qportcfg()
10098 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); in bnxt_hwrm_queue_qportcfg()
10100 for (i = 0, j = 0; i < bp->max_tc; i++) { in bnxt_hwrm_queue_qportcfg()
10101 bp->q_info[j].queue_id = *qptr; in bnxt_hwrm_queue_qportcfg()
10102 bp->q_ids[i] = *qptr++; in bnxt_hwrm_queue_qportcfg()
10103 bp->q_info[j].queue_profile = *qptr++; in bnxt_hwrm_queue_qportcfg()
10104 bp->tc_to_qidx[j] = j; in bnxt_hwrm_queue_qportcfg()
10105 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || in bnxt_hwrm_queue_qportcfg()
10106 (no_rdma && BNXT_PF(bp))) in bnxt_hwrm_queue_qportcfg()
10109 bp->max_q = bp->max_tc; in bnxt_hwrm_queue_qportcfg()
10110 bp->max_tc = max_t(u8, j, 1); in bnxt_hwrm_queue_qportcfg()
10113 bp->max_tc = 1; in bnxt_hwrm_queue_qportcfg()
10115 if (bp->max_lltc > bp->max_tc) in bnxt_hwrm_queue_qportcfg()
10116 bp->max_lltc = bp->max_tc; in bnxt_hwrm_queue_qportcfg()
10119 hwrm_req_drop(bp, req); in bnxt_hwrm_queue_qportcfg()
10123 static int bnxt_hwrm_poll(struct bnxt *bp) in bnxt_hwrm_poll() argument
10128 rc = hwrm_req_init(bp, req, HWRM_VER_GET); in bnxt_hwrm_poll()
10136 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); in bnxt_hwrm_poll()
10137 rc = hwrm_req_send(bp, req); in bnxt_hwrm_poll()
10141 static int bnxt_hwrm_ver_get(struct bnxt *bp) in bnxt_hwrm_ver_get() argument
10149 rc = hwrm_req_init(bp, req, HWRM_VER_GET); in bnxt_hwrm_ver_get()
10153 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); in bnxt_hwrm_ver_get()
10154 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; in bnxt_hwrm_ver_get()
10159 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_ver_get()
10160 rc = hwrm_req_send(bp, req); in bnxt_hwrm_ver_get()
10164 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); in bnxt_hwrm_ver_get()
10166 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | in bnxt_hwrm_ver_get()
10170 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", in bnxt_hwrm_ver_get()
10173 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); in bnxt_hwrm_ver_get()
10179 if (bp->hwrm_spec_code > hwrm_ver) in bnxt_hwrm_ver_get()
10180 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_hwrm_ver_get()
10184 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_hwrm_ver_get()
10189 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { in bnxt_hwrm_ver_get()
10201 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); in bnxt_hwrm_ver_get()
10202 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, in bnxt_hwrm_ver_get()
10206 int fw_ver_len = strlen(bp->fw_ver_str); in bnxt_hwrm_ver_get()
10208 snprintf(bp->fw_ver_str + fw_ver_len, in bnxt_hwrm_ver_get()
10211 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; in bnxt_hwrm_ver_get()
10214 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); in bnxt_hwrm_ver_get()
10215 if (!bp->hwrm_cmd_timeout) in bnxt_hwrm_ver_get()
10216 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; in bnxt_hwrm_ver_get()
10217 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; in bnxt_hwrm_ver_get()
10218 if (!bp->hwrm_cmd_max_timeout) in bnxt_hwrm_ver_get()
10219 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; in bnxt_hwrm_ver_get()
10220 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000; in bnxt_hwrm_ver_get()
10222 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT || in bnxt_hwrm_ver_get()
10224 …netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (k… in bnxt_hwrm_ver_get()
10230 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); in bnxt_hwrm_ver_get()
10231 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); in bnxt_hwrm_ver_get()
10233 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) in bnxt_hwrm_ver_get()
10234 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; in bnxt_hwrm_ver_get()
10236 bp->chip_num = le16_to_cpu(resp->chip_num); in bnxt_hwrm_ver_get()
10237 bp->chip_rev = resp->chip_rev; in bnxt_hwrm_ver_get()
10238 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && in bnxt_hwrm_ver_get()
10240 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; in bnxt_hwrm_ver_get()
10245 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; in bnxt_hwrm_ver_get()
10248 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; in bnxt_hwrm_ver_get()
10252 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; in bnxt_hwrm_ver_get()
10256 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; in bnxt_hwrm_ver_get()
10260 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; in bnxt_hwrm_ver_get()
10263 hwrm_req_drop(bp, req); in bnxt_hwrm_ver_get()
10267 int bnxt_hwrm_fw_set_time(struct bnxt *bp) in bnxt_hwrm_fw_set_time() argument
10274 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || in bnxt_hwrm_fw_set_time()
10275 bp->hwrm_spec_code < 0x10400) in bnxt_hwrm_fw_set_time()
10279 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); in bnxt_hwrm_fw_set_time()
10289 return hwrm_req_send(bp, req); in bnxt_hwrm_fw_set_time()
10330 static void bnxt_accumulate_all_stats(struct bnxt *bp) in bnxt_accumulate_all_stats() argument
10337 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_accumulate_all_stats()
10340 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_accumulate_all_stats()
10341 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_accumulate_all_stats()
10353 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_accumulate_all_stats()
10354 struct bnxt_stats_mem *stats = &bp->port_stats; in bnxt_accumulate_all_stats()
10369 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { in bnxt_accumulate_all_stats()
10370 bnxt_accumulate_stats(&bp->rx_port_stats_ext); in bnxt_accumulate_all_stats()
10371 bnxt_accumulate_stats(&bp->tx_port_stats_ext); in bnxt_accumulate_all_stats()
10375 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) in bnxt_hwrm_port_qstats() argument
10378 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_qstats()
10381 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) in bnxt_hwrm_port_qstats()
10384 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) in bnxt_hwrm_port_qstats()
10387 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); in bnxt_hwrm_port_qstats()
10393 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + in bnxt_hwrm_port_qstats()
10395 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); in bnxt_hwrm_port_qstats()
10396 return hwrm_req_send(bp, req); in bnxt_hwrm_port_qstats()
10399 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) in bnxt_hwrm_port_qstats_ext() argument
10405 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_qstats_ext()
10409 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) in bnxt_hwrm_port_qstats_ext()
10412 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) in bnxt_hwrm_port_qstats_ext()
10415 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); in bnxt_hwrm_port_qstats_ext()
10422 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); in bnxt_hwrm_port_qstats_ext()
10423 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? in bnxt_hwrm_port_qstats_ext()
10426 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); in bnxt_hwrm_port_qstats_ext()
10427 resp_qs = hwrm_req_hold(bp, req_qs); in bnxt_hwrm_port_qstats_ext()
10428 rc = hwrm_req_send(bp, req_qs); in bnxt_hwrm_port_qstats_ext()
10430 bp->fw_rx_stats_ext_size = in bnxt_hwrm_port_qstats_ext()
10432 if (BNXT_FW_MAJ(bp) < 220 && in bnxt_hwrm_port_qstats_ext()
10433 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) in bnxt_hwrm_port_qstats_ext()
10434 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; in bnxt_hwrm_port_qstats_ext()
10436 bp->fw_tx_stats_ext_size = tx_stat_size ? in bnxt_hwrm_port_qstats_ext()
10439 bp->fw_rx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
10440 bp->fw_tx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
10442 hwrm_req_drop(bp, req_qs); in bnxt_hwrm_port_qstats_ext()
10447 if (bp->fw_tx_stats_ext_size <= in bnxt_hwrm_port_qstats_ext()
10449 bp->pri2cos_valid = 0; in bnxt_hwrm_port_qstats_ext()
10453 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); in bnxt_hwrm_port_qstats_ext()
10459 resp_qc = hwrm_req_hold(bp, req_qc); in bnxt_hwrm_port_qstats_ext()
10460 rc = hwrm_req_send(bp, req_qc); in bnxt_hwrm_port_qstats_ext()
10473 bp->pri2cos_valid = false; in bnxt_hwrm_port_qstats_ext()
10474 hwrm_req_drop(bp, req_qc); in bnxt_hwrm_port_qstats_ext()
10477 for (j = 0; j < bp->max_q; j++) { in bnxt_hwrm_port_qstats_ext()
10478 if (bp->q_ids[j] == queue_id) in bnxt_hwrm_port_qstats_ext()
10479 bp->pri2cos_idx[i] = queue_idx; in bnxt_hwrm_port_qstats_ext()
10482 bp->pri2cos_valid = true; in bnxt_hwrm_port_qstats_ext()
10484 hwrm_req_drop(bp, req_qc); in bnxt_hwrm_port_qstats_ext()
10489 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) in bnxt_hwrm_free_tunnel_ports() argument
10491 bnxt_hwrm_tunnel_dst_port_free(bp, in bnxt_hwrm_free_tunnel_ports()
10493 bnxt_hwrm_tunnel_dst_port_free(bp, in bnxt_hwrm_free_tunnel_ports()
10497 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) in bnxt_set_tpa() argument
10503 tpa_flags = bp->flags & BNXT_FLAG_TPA; in bnxt_set_tpa()
10504 else if (BNXT_NO_FW_ACCESS(bp)) in bnxt_set_tpa()
10506 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_set_tpa()
10507 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); in bnxt_set_tpa()
10509 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", in bnxt_set_tpa()
10517 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) in bnxt_hwrm_clear_vnic_rss() argument
10521 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_clear_vnic_rss()
10522 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); in bnxt_hwrm_clear_vnic_rss()
10525 static void bnxt_clear_vnic(struct bnxt *bp) in bnxt_clear_vnic() argument
10527 if (!bp->vnic_info) in bnxt_clear_vnic()
10530 bnxt_hwrm_clear_vnic_filter(bp); in bnxt_clear_vnic()
10531 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { in bnxt_clear_vnic()
10533 bnxt_hwrm_clear_vnic_rss(bp); in bnxt_clear_vnic()
10534 bnxt_hwrm_vnic_ctx_free(bp); in bnxt_clear_vnic()
10537 if (bp->flags & BNXT_FLAG_TPA) in bnxt_clear_vnic()
10538 bnxt_set_tpa(bp, false); in bnxt_clear_vnic()
10539 bnxt_hwrm_vnic_free(bp); in bnxt_clear_vnic()
10540 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_clear_vnic()
10541 bnxt_hwrm_vnic_ctx_free(bp); in bnxt_clear_vnic()
10544 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, in bnxt_hwrm_resource_free() argument
10547 bnxt_clear_vnic(bp); in bnxt_hwrm_resource_free()
10548 bnxt_hwrm_ring_free(bp, close_path); in bnxt_hwrm_resource_free()
10549 bnxt_hwrm_ring_grp_free(bp); in bnxt_hwrm_resource_free()
10551 bnxt_hwrm_stat_ctx_free(bp); in bnxt_hwrm_resource_free()
10552 bnxt_hwrm_free_tunnel_ports(bp); in bnxt_hwrm_resource_free()
10556 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) in bnxt_hwrm_set_br_mode() argument
10569 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); in bnxt_hwrm_set_br_mode()
10576 return hwrm_req_send(bp, req); in bnxt_hwrm_set_br_mode()
10579 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) in bnxt_hwrm_set_cache_line_size() argument
10584 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) in bnxt_hwrm_set_cache_line_size()
10587 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); in bnxt_hwrm_set_cache_line_size()
10597 return hwrm_req_send(bp, req); in bnxt_hwrm_set_cache_line_size()
10600 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) in __bnxt_setup_vnic() argument
10608 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); in __bnxt_setup_vnic()
10610 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in __bnxt_setup_vnic()
10614 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic()
10616 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { in __bnxt_setup_vnic()
10617 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); in __bnxt_setup_vnic()
10619 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", in __bnxt_setup_vnic()
10623 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic()
10628 rc = bnxt_hwrm_vnic_cfg(bp, vnic); in __bnxt_setup_vnic()
10630 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", in __bnxt_setup_vnic()
10636 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); in __bnxt_setup_vnic()
10638 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", in __bnxt_setup_vnic()
10643 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_setup_vnic()
10644 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); in __bnxt_setup_vnic()
10646 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", in __bnxt_setup_vnic()
10655 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_hwrm_vnic_update() argument
10661 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); in bnxt_hwrm_vnic_update()
10672 return hwrm_req_send(bp, req); in bnxt_hwrm_vnic_update()
10675 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) in bnxt_hwrm_vnic_rss_cfg_p5() argument
10679 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); in bnxt_hwrm_vnic_rss_cfg_p5()
10681 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", in bnxt_hwrm_vnic_rss_cfg_p5()
10685 rc = bnxt_hwrm_vnic_cfg(bp, vnic); in bnxt_hwrm_vnic_rss_cfg_p5()
10687 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", in bnxt_hwrm_vnic_rss_cfg_p5()
10692 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) in __bnxt_setup_vnic_p5() argument
10696 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); in __bnxt_setup_vnic_p5()
10698 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); in __bnxt_setup_vnic_p5()
10700 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", in __bnxt_setup_vnic_p5()
10704 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic_p5()
10709 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); in __bnxt_setup_vnic_p5()
10713 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_setup_vnic_p5()
10714 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); in __bnxt_setup_vnic_p5()
10716 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", in __bnxt_setup_vnic_p5()
10723 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) in bnxt_setup_vnic() argument
10725 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_setup_vnic()
10726 return __bnxt_setup_vnic_p5(bp, vnic); in bnxt_setup_vnic()
10728 return __bnxt_setup_vnic(bp, vnic); in bnxt_setup_vnic()
10731 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, in bnxt_alloc_and_setup_vnic() argument
10737 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); in bnxt_alloc_and_setup_vnic()
10739 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in bnxt_alloc_and_setup_vnic()
10743 return bnxt_setup_vnic(bp, vnic); in bnxt_alloc_and_setup_vnic()
10746 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) in bnxt_alloc_rfs_vnics() argument
10751 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { in bnxt_alloc_rfs_vnics()
10752 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; in bnxt_alloc_rfs_vnics()
10753 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); in bnxt_alloc_rfs_vnics()
10756 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_rfs_vnics()
10759 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rfs_vnics()
10763 if (vnic_id >= bp->nr_vnics) in bnxt_alloc_rfs_vnics()
10766 vnic = &bp->vnic_info[vnic_id]; in bnxt_alloc_rfs_vnics()
10768 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) in bnxt_alloc_rfs_vnics()
10770 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) in bnxt_alloc_rfs_vnics()
10776 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, in bnxt_del_one_rss_ctx() argument
10784 if (netif_running(bp->dev)) { in bnxt_del_one_rss_ctx()
10785 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); in bnxt_del_one_rss_ctx()
10788 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); in bnxt_del_one_rss_ctx()
10794 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { in bnxt_del_one_rss_ctx()
10800 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); in bnxt_del_one_rss_ctx()
10801 bnxt_del_ntp_filter(bp, ntp_fltr); in bnxt_del_one_rss_ctx()
10802 bnxt_del_one_usr_fltr(bp, usr_fltr); in bnxt_del_one_rss_ctx()
10807 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, in bnxt_del_one_rss_ctx()
10810 bp->num_rss_ctx--; in bnxt_del_one_rss_ctx()
10813 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_vnic_has_rx_ring() argument
10816 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_vnic_has_rx_ring()
10829 vnic_rx = bp->rss_indir_tbl[i]; in bnxt_vnic_has_rx_ring()
10838 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, in bnxt_set_vnic_mru_p5() argument
10843 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id)) in bnxt_set_vnic_mru_p5()
10847 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); in bnxt_set_vnic_mru_p5()
10849 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", in bnxt_set_vnic_mru_p5()
10855 bnxt_hwrm_vnic_update(bp, vnic, in bnxt_set_vnic_mru_p5()
10861 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id) in bnxt_set_rss_ctx_vnic_mru() argument
10867 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { in bnxt_set_rss_ctx_vnic_mru()
10871 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id); in bnxt_set_rss_ctx_vnic_mru()
10879 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) in bnxt_hwrm_realloc_rss_ctx_vnic() argument
10881 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); in bnxt_hwrm_realloc_rss_ctx_vnic()
10885 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { in bnxt_hwrm_realloc_rss_ctx_vnic()
10889 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || in bnxt_hwrm_realloc_rss_ctx_vnic()
10890 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || in bnxt_hwrm_realloc_rss_ctx_vnic()
10891 __bnxt_setup_vnic_p5(bp, vnic)) { in bnxt_hwrm_realloc_rss_ctx_vnic()
10892 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", in bnxt_hwrm_realloc_rss_ctx_vnic()
10894 bnxt_del_one_rss_ctx(bp, rss_ctx, true); in bnxt_hwrm_realloc_rss_ctx_vnic()
10895 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); in bnxt_hwrm_realloc_rss_ctx_vnic()
10900 static void bnxt_clear_rss_ctxs(struct bnxt *bp) in bnxt_clear_rss_ctxs() argument
10905 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { in bnxt_clear_rss_ctxs()
10908 bnxt_del_one_rss_ctx(bp, rss_ctx, false); in bnxt_clear_rss_ctxs()
10913 static bool bnxt_promisc_ok(struct bnxt *bp) in bnxt_promisc_ok() argument
10916 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) in bnxt_promisc_ok()
10922 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) in bnxt_setup_nitroa0_vnic() argument
10924 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; in bnxt_setup_nitroa0_vnic()
10927 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); in bnxt_setup_nitroa0_vnic()
10929 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", in bnxt_setup_nitroa0_vnic()
10934 rc = bnxt_hwrm_vnic_cfg(bp, vnic); in bnxt_setup_nitroa0_vnic()
10936 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", in bnxt_setup_nitroa0_vnic()
10946 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) in bnxt_init_chip() argument
10948 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_init_chip()
10950 unsigned int rx_nr_rings = bp->rx_nr_rings; in bnxt_init_chip()
10953 rc = bnxt_hwrm_stat_ctx_alloc(bp); in bnxt_init_chip()
10955 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", in bnxt_init_chip()
10961 rc = bnxt_hwrm_ring_alloc(bp); in bnxt_init_chip()
10963 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); in bnxt_init_chip()
10967 rc = bnxt_hwrm_ring_grp_alloc(bp); in bnxt_init_chip()
10969 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); in bnxt_init_chip()
10973 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_init_chip()
10977 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); in bnxt_init_chip()
10979 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); in bnxt_init_chip()
10983 if (BNXT_VF(bp)) in bnxt_init_chip()
10984 bnxt_hwrm_func_qcfg(bp); in bnxt_init_chip()
10986 rc = bnxt_setup_vnic(bp, vnic); in bnxt_init_chip()
10989 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) in bnxt_init_chip()
10990 bnxt_hwrm_update_rss_hash_cfg(bp); in bnxt_init_chip()
10992 if (bp->flags & BNXT_FLAG_RFS) { in bnxt_init_chip()
10993 rc = bnxt_alloc_rfs_vnics(bp); in bnxt_init_chip()
10998 if (bp->flags & BNXT_FLAG_TPA) { in bnxt_init_chip()
10999 rc = bnxt_set_tpa(bp, true); in bnxt_init_chip()
11004 if (BNXT_VF(bp)) in bnxt_init_chip()
11005 bnxt_update_vf_mac(bp); in bnxt_init_chip()
11008 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); in bnxt_init_chip()
11010 if (BNXT_VF(bp) && rc == -ENODEV) in bnxt_init_chip()
11011 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); in bnxt_init_chip()
11013 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); in bnxt_init_chip()
11019 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) in bnxt_init_chip()
11022 if (bp->dev->flags & IFF_BROADCAST) in bnxt_init_chip()
11025 if (bp->dev->flags & IFF_PROMISC) in bnxt_init_chip()
11028 if (bp->dev->flags & IFF_ALLMULTI) { in bnxt_init_chip()
11031 } else if (bp->dev->flags & IFF_MULTICAST) { in bnxt_init_chip()
11034 bnxt_mc_list_updated(bp, &mask); in bnxt_init_chip()
11038 rc = bnxt_cfg_rx_mode(bp); in bnxt_init_chip()
11043 rc = bnxt_hwrm_set_coal(bp); in bnxt_init_chip()
11045 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", in bnxt_init_chip()
11048 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { in bnxt_init_chip()
11049 rc = bnxt_setup_nitroa0_vnic(bp); in bnxt_init_chip()
11051 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", in bnxt_init_chip()
11055 if (BNXT_VF(bp)) { in bnxt_init_chip()
11056 bnxt_hwrm_func_qcfg(bp); in bnxt_init_chip()
11057 netdev_update_features(bp->dev); in bnxt_init_chip()
11063 bnxt_hwrm_resource_free(bp, 0, true); in bnxt_init_chip()
11068 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) in bnxt_shutdown_nic() argument
11070 bnxt_hwrm_resource_free(bp, 1, irq_re_init); in bnxt_shutdown_nic()
11074 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) in bnxt_init_nic() argument
11076 bnxt_init_cp_rings(bp); in bnxt_init_nic()
11077 bnxt_init_rx_rings(bp); in bnxt_init_nic()
11078 bnxt_init_tx_rings(bp); in bnxt_init_nic()
11079 bnxt_init_ring_grps(bp, irq_re_init); in bnxt_init_nic()
11080 bnxt_init_vnics(bp); in bnxt_init_nic()
11082 return bnxt_init_chip(bp, irq_re_init); in bnxt_init_nic()
11085 static int bnxt_set_real_num_queues(struct bnxt *bp) in bnxt_set_real_num_queues() argument
11088 struct net_device *dev = bp->dev; in bnxt_set_real_num_queues()
11090 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - in bnxt_set_real_num_queues()
11091 bp->tx_nr_rings_xdp); in bnxt_set_real_num_queues()
11095 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); in bnxt_set_real_num_queues()
11100 if (bp->flags & BNXT_FLAG_RFS) in bnxt_set_real_num_queues()
11101 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); in bnxt_set_real_num_queues()
11107 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, in __bnxt_trim_rings() argument
11131 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) in __bnxt_num_tx_to_cp() argument
11136 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) in bnxt_num_tx_to_cp() argument
11138 int tcs = bp->num_tc; in bnxt_num_tx_to_cp()
11142 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); in bnxt_num_tx_to_cp()
11145 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) in bnxt_num_cp_to_tx() argument
11147 int tcs = bp->num_tc; in bnxt_num_cp_to_tx()
11149 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + in bnxt_num_cp_to_tx()
11150 bp->tx_nr_rings_xdp; in bnxt_num_cp_to_tx()
11153 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, in bnxt_trim_rings() argument
11156 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); in bnxt_trim_rings()
11161 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); in bnxt_trim_rings()
11165 *tx = bnxt_num_cp_to_tx(bp, tx_cp); in bnxt_trim_rings()
11168 return __bnxt_trim_rings(bp, rx, tx, max, sh); in bnxt_trim_rings()
11171 static void bnxt_setup_msix(struct bnxt *bp) in bnxt_setup_msix() argument
11173 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_msix()
11174 struct net_device *dev = bp->dev; in bnxt_setup_msix()
11177 tcs = bp->num_tc; in bnxt_setup_msix()
11182 count = bp->tx_nr_rings_per_tc; in bnxt_setup_msix()
11183 off = BNXT_TC_TO_RING_BASE(bp, i); in bnxt_setup_msix()
11188 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_setup_msix()
11189 int map_idx = bnxt_cp_num_to_irq_num(bp, i); in bnxt_setup_msix()
11192 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_setup_msix()
11194 else if (i < bp->rx_nr_rings) in bnxt_setup_msix()
11199 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, in bnxt_setup_msix()
11201 bp->irq_tbl[map_idx].handler = bnxt_msix; in bnxt_setup_msix()
11205 static int bnxt_init_int_mode(struct bnxt *bp);
11207 static int bnxt_change_msix(struct bnxt *bp, int total) in bnxt_change_msix() argument
11213 for (i = bp->total_irqs; i < total; i++) { in bnxt_change_msix()
11214 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); in bnxt_change_msix()
11216 return bp->total_irqs; in bnxt_change_msix()
11217 bp->irq_tbl[i].vector = map.virq; in bnxt_change_msix()
11218 bp->total_irqs++; in bnxt_change_msix()
11222 for (i = bp->total_irqs; i > total; i--) { in bnxt_change_msix()
11224 map.virq = bp->irq_tbl[i - 1].vector; in bnxt_change_msix()
11225 pci_msix_free_irq(bp->pdev, map); in bnxt_change_msix()
11226 bp->total_irqs--; in bnxt_change_msix()
11228 return bp->total_irqs; in bnxt_change_msix()
11231 static int bnxt_setup_int_mode(struct bnxt *bp) in bnxt_setup_int_mode() argument
11235 if (!bp->irq_tbl) { in bnxt_setup_int_mode()
11236 rc = bnxt_init_int_mode(bp); in bnxt_setup_int_mode()
11237 if (rc || !bp->irq_tbl) in bnxt_setup_int_mode()
11241 bnxt_setup_msix(bp); in bnxt_setup_int_mode()
11243 rc = bnxt_set_real_num_queues(bp); in bnxt_setup_int_mode()
11247 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) in bnxt_get_max_func_rss_ctxs() argument
11249 return bp->hw_resc.max_rsscos_ctxs; in bnxt_get_max_func_rss_ctxs()
11252 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) in bnxt_get_max_func_vnics() argument
11254 return bp->hw_resc.max_vnics; in bnxt_get_max_func_vnics()
11257 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) in bnxt_get_max_func_stat_ctxs() argument
11259 return bp->hw_resc.max_stat_ctxs; in bnxt_get_max_func_stat_ctxs()
11262 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) in bnxt_get_max_func_cp_rings() argument
11264 return bp->hw_resc.max_cp_rings; in bnxt_get_max_func_cp_rings()
11267 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) in bnxt_get_max_func_cp_rings_for_en() argument
11269 unsigned int cp = bp->hw_resc.max_cp_rings; in bnxt_get_max_func_cp_rings_for_en()
11271 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_get_max_func_cp_rings_for_en()
11272 cp -= bnxt_get_ulp_msix_num(bp); in bnxt_get_max_func_cp_rings_for_en()
11277 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) in bnxt_get_max_func_irqs() argument
11279 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_get_max_func_irqs()
11281 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_get_max_func_irqs()
11287 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) in bnxt_set_max_func_irqs() argument
11289 bp->hw_resc.max_irqs = max_irqs; in bnxt_set_max_func_irqs()
11292 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) in bnxt_get_avail_cp_rings_for_en() argument
11296 cp = bnxt_get_max_func_cp_rings_for_en(bp); in bnxt_get_avail_cp_rings_for_en()
11297 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_get_avail_cp_rings_for_en()
11298 return cp - bp->rx_nr_rings - bp->tx_nr_rings; in bnxt_get_avail_cp_rings_for_en()
11300 return cp - bp->cp_nr_rings; in bnxt_get_avail_cp_rings_for_en()
11303 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) in bnxt_get_avail_stat_ctxs_for_en() argument
11305 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); in bnxt_get_avail_stat_ctxs_for_en()
11308 static int bnxt_get_avail_msix(struct bnxt *bp, int num) in bnxt_get_avail_msix() argument
11310 int max_irq = bnxt_get_max_func_irqs(bp); in bnxt_get_avail_msix()
11311 int total_req = bp->cp_nr_rings + num; in bnxt_get_avail_msix()
11314 num = max_irq - bp->cp_nr_rings; in bnxt_get_avail_msix()
11321 static int bnxt_get_num_msix(struct bnxt *bp) in bnxt_get_num_msix() argument
11323 if (!BNXT_NEW_RM(bp)) in bnxt_get_num_msix()
11324 return bnxt_get_max_func_irqs(bp); in bnxt_get_num_msix()
11326 return bnxt_nq_rings_in_use(bp); in bnxt_get_num_msix()
11329 static int bnxt_init_int_mode(struct bnxt *bp) in bnxt_init_int_mode() argument
11333 total_vecs = bnxt_get_num_msix(bp); in bnxt_init_int_mode()
11334 max = bnxt_get_max_func_irqs(bp); in bnxt_init_int_mode()
11341 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) in bnxt_init_int_mode()
11344 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, in bnxt_init_int_mode()
11346 ulp_msix = bnxt_get_ulp_msix_num(bp); in bnxt_init_int_mode()
11353 if (pci_msix_can_alloc_dyn(bp->pdev)) in bnxt_init_int_mode()
11355 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); in bnxt_init_int_mode()
11356 if (bp->irq_tbl) { in bnxt_init_int_mode()
11358 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); in bnxt_init_int_mode()
11360 bp->total_irqs = total_vecs; in bnxt_init_int_mode()
11362 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, in bnxt_init_int_mode()
11367 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_init_int_mode()
11368 bp->cp_nr_rings = (min == 1) ? in bnxt_init_int_mode()
11369 max_t(int, tx_cp, bp->rx_nr_rings) : in bnxt_init_int_mode()
11370 tx_cp + bp->rx_nr_rings; in bnxt_init_int_mode()
11379 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); in bnxt_init_int_mode()
11380 kfree(bp->irq_tbl); in bnxt_init_int_mode()
11381 bp->irq_tbl = NULL; in bnxt_init_int_mode()
11382 pci_free_irq_vectors(bp->pdev); in bnxt_init_int_mode()
11386 static void bnxt_clear_int_mode(struct bnxt *bp) in bnxt_clear_int_mode() argument
11388 pci_free_irq_vectors(bp->pdev); in bnxt_clear_int_mode()
11390 kfree(bp->irq_tbl); in bnxt_clear_int_mode()
11391 bp->irq_tbl = NULL; in bnxt_clear_int_mode()
11394 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) in bnxt_reserve_rings() argument
11398 int tcs = bp->num_tc; in bnxt_reserve_rings()
11402 if (!bnxt_need_reserve_rings(bp)) in bnxt_reserve_rings()
11405 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { in bnxt_reserve_rings()
11406 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); in bnxt_reserve_rings()
11408 if (ulp_msix > bp->ulp_num_msix_want) in bnxt_reserve_rings()
11409 ulp_msix = bp->ulp_num_msix_want; in bnxt_reserve_rings()
11410 irqs_required = ulp_msix + bp->cp_nr_rings; in bnxt_reserve_rings()
11412 irqs_required = bnxt_get_num_msix(bp); in bnxt_reserve_rings()
11415 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { in bnxt_reserve_rings()
11417 if (!pci_msix_can_alloc_dyn(bp->pdev)) { in bnxt_reserve_rings()
11418 bnxt_ulp_irq_stop(bp); in bnxt_reserve_rings()
11419 bnxt_clear_int_mode(bp); in bnxt_reserve_rings()
11423 rc = __bnxt_reserve_rings(bp); in bnxt_reserve_rings()
11426 rc = bnxt_init_int_mode(bp); in bnxt_reserve_rings()
11427 bnxt_ulp_irq_restart(bp, rc); in bnxt_reserve_rings()
11429 if (bnxt_change_msix(bp, irqs_required) != irqs_required) in bnxt_reserve_rings()
11433 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); in bnxt_reserve_rings()
11436 if (tcs && (bp->tx_nr_rings_per_tc * tcs != in bnxt_reserve_rings()
11437 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { in bnxt_reserve_rings()
11438 netdev_err(bp->dev, "tx ring reservation failure\n"); in bnxt_reserve_rings()
11439 netdev_reset_tc(bp->dev); in bnxt_reserve_rings()
11440 bp->num_tc = 0; in bnxt_reserve_rings()
11441 if (bp->tx_nr_rings_xdp) in bnxt_reserve_rings()
11442 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; in bnxt_reserve_rings()
11444 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_reserve_rings()
11450 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx) in bnxt_tx_queue_stop() argument
11457 bnapi = bp->bnapi[idx]; in bnxt_tx_queue_stop()
11463 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); in bnxt_tx_queue_stop()
11471 if (!bp->tph_mode) in bnxt_tx_queue_stop()
11474 bnxt_hwrm_tx_ring_free(bp, txr, true); in bnxt_tx_queue_stop()
11475 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr); in bnxt_tx_queue_stop()
11476 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index); in bnxt_tx_queue_stop()
11477 bnxt_clear_one_cp_ring(bp, txr->tx_cpr); in bnxt_tx_queue_stop()
11481 static int bnxt_tx_queue_start(struct bnxt *bp, int idx) in bnxt_tx_queue_start() argument
11488 bnapi = bp->bnapi[idx]; in bnxt_tx_queue_start()
11493 if (!bp->tph_mode) in bnxt_tx_queue_start()
11496 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); in bnxt_tx_queue_start()
11500 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false); in bnxt_tx_queue_start()
11514 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); in bnxt_tx_queue_start()
11531 if (!irq->bp->tph_mode) in bnxt_irq_affinity_notify()
11536 if (irq->ring_nr >= irq->bp->rx_nr_rings) in bnxt_irq_affinity_notify()
11539 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, in bnxt_irq_affinity_notify()
11543 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) in bnxt_irq_affinity_notify()
11546 netdev_lock(irq->bp->dev); in bnxt_irq_affinity_notify()
11547 if (netif_running(irq->bp->dev)) { in bnxt_irq_affinity_notify()
11548 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); in bnxt_irq_affinity_notify()
11550 netdev_err(irq->bp->dev, in bnxt_irq_affinity_notify()
11553 netdev_unlock(irq->bp->dev); in bnxt_irq_affinity_notify()
11564 if (!irq->bp->tph_mode) in bnxt_irq_affinity_release()
11567 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) { in bnxt_irq_affinity_release()
11568 netdev_err(irq->bp->dev, in bnxt_irq_affinity_release()
11580 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq) in bnxt_register_irq_notifier() argument
11584 irq->bp = bp; in bnxt_register_irq_notifier()
11587 if (!bp->tph_mode) in bnxt_register_irq_notifier()
11599 static void bnxt_free_irq(struct bnxt *bp) in bnxt_free_irq() argument
11605 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); in bnxt_free_irq()
11606 bp->dev->rx_cpu_rmap = NULL; in bnxt_free_irq()
11608 if (!bp->irq_tbl || !bp->bnapi) in bnxt_free_irq()
11611 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_irq()
11612 int map_idx = bnxt_cp_num_to_irq_num(bp, i); in bnxt_free_irq()
11614 irq = &bp->irq_tbl[map_idx]; in bnxt_free_irq()
11624 free_irq(irq->vector, bp->bnapi[i]); in bnxt_free_irq()
11631 pcie_disable_tph(bp->pdev); in bnxt_free_irq()
11632 bp->tph_mode = 0; in bnxt_free_irq()
11635 static int bnxt_request_irq(struct bnxt *bp) in bnxt_request_irq() argument
11641 rc = bnxt_setup_int_mode(bp); in bnxt_request_irq()
11643 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", in bnxt_request_irq()
11648 rmap = bp->dev->rx_cpu_rmap; in bnxt_request_irq()
11652 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE); in bnxt_request_irq()
11654 bp->tph_mode = PCI_TPH_ST_IV_MODE; in bnxt_request_irq()
11656 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_request_irq()
11657 int map_idx = bnxt_cp_num_to_irq_num(bp, i); in bnxt_request_irq()
11658 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; in bnxt_request_irq()
11661 rmap && bp->bnapi[i]->rx_ring) { in bnxt_request_irq()
11664 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", in bnxt_request_irq()
11670 bp->bnapi[i]); in bnxt_request_irq()
11674 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector); in bnxt_request_irq()
11678 int numa_node = dev_to_node(&bp->pdev->dev); in bnxt_request_irq()
11688 netdev_warn(bp->dev, in bnxt_request_irq()
11694 bnxt_register_irq_notifier(bp, irq); in bnxt_request_irq()
11697 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, in bnxt_request_irq()
11702 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag); in bnxt_request_irq()
11708 static void bnxt_del_napi(struct bnxt *bp) in bnxt_del_napi() argument
11712 if (!bp->bnapi) in bnxt_del_napi()
11715 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_del_napi()
11716 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); in bnxt_del_napi()
11717 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) in bnxt_del_napi()
11718 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); in bnxt_del_napi()
11720 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_del_napi()
11721 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_del_napi()
11731 static void bnxt_init_napi(struct bnxt *bp) in bnxt_init_napi() argument
11734 unsigned int cp_nr_rings = bp->cp_nr_rings; in bnxt_init_napi()
11738 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_init_napi()
11740 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_init_napi()
11743 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); in bnxt_init_napi()
11746 bnapi = bp->bnapi[i]; in bnxt_init_napi()
11747 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, in bnxt_init_napi()
11750 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { in bnxt_init_napi()
11751 bnapi = bp->bnapi[cp_nr_rings]; in bnxt_init_napi()
11752 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); in bnxt_init_napi()
11756 static void bnxt_disable_napi(struct bnxt *bp) in bnxt_disable_napi() argument
11760 if (!bp->bnapi || in bnxt_disable_napi()
11761 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) in bnxt_disable_napi()
11764 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_napi()
11765 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_disable_napi()
11777 static void bnxt_enable_napi(struct bnxt *bp) in bnxt_enable_napi() argument
11781 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); in bnxt_enable_napi()
11782 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_napi()
11783 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_enable_napi()
11799 void bnxt_tx_disable(struct bnxt *bp) in bnxt_tx_disable() argument
11804 if (bp->tx_ring) { in bnxt_tx_disable()
11805 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_disable()
11806 txr = &bp->tx_ring[i]; in bnxt_tx_disable()
11813 netif_carrier_off(bp->dev); in bnxt_tx_disable()
11815 netif_tx_disable(bp->dev); in bnxt_tx_disable()
11818 void bnxt_tx_enable(struct bnxt *bp) in bnxt_tx_enable() argument
11823 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_enable()
11824 txr = &bp->tx_ring[i]; in bnxt_tx_enable()
11829 netif_tx_wake_all_queues(bp->dev); in bnxt_tx_enable()
11830 if (BNXT_LINK_IS_UP(bp)) in bnxt_tx_enable()
11831 netif_carrier_on(bp->dev); in bnxt_tx_enable()
11858 void bnxt_report_link(struct bnxt *bp) in bnxt_report_link() argument
11860 if (BNXT_LINK_IS_UP(bp)) { in bnxt_report_link()
11867 netif_carrier_on(bp->dev); in bnxt_report_link()
11868 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); in bnxt_report_link()
11870 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); in bnxt_report_link()
11873 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) in bnxt_report_link()
11877 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) in bnxt_report_link()
11879 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) in bnxt_report_link()
11881 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) in bnxt_report_link()
11885 if (bp->link_info.phy_qcfg_resp.option_flags & in bnxt_report_link()
11887 u8 sig_mode = bp->link_info.active_fec_sig_mode & in bnxt_report_link()
11903 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", in bnxt_report_link()
11905 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) in bnxt_report_link()
11906 netdev_info(bp->dev, "EEE is %s\n", in bnxt_report_link()
11907 bp->eee.eee_active ? "active" : in bnxt_report_link()
11909 fec = bp->link_info.fec_cfg; in bnxt_report_link()
11911 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", in bnxt_report_link()
11913 bnxt_report_fec(&bp->link_info)); in bnxt_report_link()
11915 netif_carrier_off(bp->dev); in bnxt_report_link()
11916 netdev_err(bp->dev, "NIC Link is Down\n"); in bnxt_report_link()
11932 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) in bnxt_hwrm_phy_qcaps() argument
11934 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_hwrm_phy_qcaps()
11939 if (bp->hwrm_spec_code < 0x10201) in bnxt_hwrm_phy_qcaps()
11942 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); in bnxt_hwrm_phy_qcaps()
11946 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_phy_qcaps()
11947 rc = hwrm_req_send(bp, req); in bnxt_hwrm_phy_qcaps()
11951 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); in bnxt_hwrm_phy_qcaps()
11953 struct ethtool_keee *eee = &bp->eee; in bnxt_hwrm_phy_qcaps()
11957 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & in bnxt_hwrm_phy_qcaps()
11959 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & in bnxt_hwrm_phy_qcaps()
11963 if (bp->hwrm_spec_code >= 0x10a01) { in bnxt_hwrm_phy_qcaps()
11966 netdev_warn(bp->dev, "Ethernet link disabled\n"); in bnxt_hwrm_phy_qcaps()
11969 netdev_info(bp->dev, "Ethernet link enabled\n"); in bnxt_hwrm_phy_qcaps()
11986 bp->port_count = resp->port_cnt; in bnxt_hwrm_phy_qcaps()
11989 hwrm_req_drop(bp, req); in bnxt_hwrm_phy_qcaps()
11993 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp) in bnxt_hwrm_mac_qcaps() argument
11999 if (bp->hwrm_spec_code < 0x10a03) in bnxt_hwrm_mac_qcaps()
12002 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); in bnxt_hwrm_mac_qcaps()
12006 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_mac_qcaps()
12007 rc = hwrm_req_send_silent(bp, req); in bnxt_hwrm_mac_qcaps()
12009 bp->mac_flags = resp->flags; in bnxt_hwrm_mac_qcaps()
12010 hwrm_req_drop(bp, req); in bnxt_hwrm_mac_qcaps()
12022 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); in bnxt_support_speed_dropped() local
12027 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_support_speed_dropped()
12048 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) in bnxt_update_link() argument
12050 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_link()
12057 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); in bnxt_update_link()
12061 resp = hwrm_req_hold(bp, req); in bnxt_update_link()
12062 rc = hwrm_req_send(bp, req); in bnxt_update_link()
12064 hwrm_req_drop(bp, req); in bnxt_update_link()
12065 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_update_link()
12066 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); in bnxt_update_link()
12075 if (bp->hwrm_spec_code >= 0x10800) in bnxt_update_link()
12085 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) in bnxt_update_link()
12117 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { in bnxt_update_link()
12118 struct ethtool_keee *eee = &bp->eee; in bnxt_update_link()
12152 if (bp->hwrm_spec_code >= 0x10504) { in bnxt_update_link()
12163 bnxt_report_link(bp); in bnxt_update_link()
12168 hwrm_req_drop(bp, req); in bnxt_update_link()
12170 if (!BNXT_PHY_CFG_ABLE(bp)) in bnxt_update_link()
12175 bnxt_hwrm_set_link_setting(bp, true, false); in bnxt_update_link()
12179 static void bnxt_get_port_module_status(struct bnxt *bp) in bnxt_get_port_module_status() argument
12181 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_get_port_module_status()
12185 if (bnxt_update_link(bp, true)) in bnxt_get_port_module_status()
12193 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", in bnxt_get_port_module_status()
12194 bp->pf.port_id); in bnxt_get_port_module_status()
12195 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_get_port_module_status()
12196 netdev_warn(bp->dev, "Module part number %s\n", in bnxt_get_port_module_status()
12200 netdev_warn(bp->dev, "TX is disabled\n"); in bnxt_get_port_module_status()
12202 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); in bnxt_get_port_module_status()
12207 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) in bnxt_hwrm_set_pause_common() argument
12209 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { in bnxt_hwrm_set_pause_common()
12210 if (bp->hwrm_spec_code >= 0x10201) in bnxt_hwrm_set_pause_common()
12213 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
12215 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
12220 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
12222 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
12226 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_hwrm_set_pause_common()
12234 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) in bnxt_hwrm_set_link_common() argument
12236 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { in bnxt_hwrm_set_link_common()
12238 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_hwrm_set_link_common()
12241 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); in bnxt_hwrm_set_link_common()
12242 } else if (bp->link_info.advertising) { in bnxt_hwrm_set_link_common()
12244 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); in bnxt_hwrm_set_link_common()
12246 if (bp->link_info.advertising_pam4) { in bnxt_hwrm_set_link_common()
12250 cpu_to_le16(bp->link_info.advertising_pam4); in bnxt_hwrm_set_link_common()
12256 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_hwrm_set_link_common()
12257 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
12259 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", in bnxt_hwrm_set_link_common()
12260 (u32)bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
12261 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { in bnxt_hwrm_set_link_common()
12262 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
12265 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
12273 int bnxt_hwrm_set_pause(struct bnxt *bp) in bnxt_hwrm_set_pause() argument
12278 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); in bnxt_hwrm_set_pause()
12282 bnxt_hwrm_set_pause_common(bp, req); in bnxt_hwrm_set_pause()
12284 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || in bnxt_hwrm_set_pause()
12285 bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
12286 bnxt_hwrm_set_link_common(bp, req); in bnxt_hwrm_set_pause()
12288 rc = hwrm_req_send(bp, req); in bnxt_hwrm_set_pause()
12289 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { in bnxt_hwrm_set_pause()
12294 bp->link_info.pause = in bnxt_hwrm_set_pause()
12295 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; in bnxt_hwrm_set_pause()
12296 bp->link_info.auto_pause_setting = 0; in bnxt_hwrm_set_pause()
12297 if (!bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
12298 bnxt_report_link(bp); in bnxt_hwrm_set_pause()
12300 bp->link_info.force_link_chng = false; in bnxt_hwrm_set_pause()
12304 static void bnxt_hwrm_set_eee(struct bnxt *bp, in bnxt_hwrm_set_eee() argument
12307 struct ethtool_keee *eee = &bp->eee; in bnxt_hwrm_set_eee()
12327 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) in bnxt_hwrm_set_link_setting() argument
12332 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); in bnxt_hwrm_set_link_setting()
12337 bnxt_hwrm_set_pause_common(bp, req); in bnxt_hwrm_set_link_setting()
12339 bnxt_hwrm_set_link_common(bp, req); in bnxt_hwrm_set_link_setting()
12342 bnxt_hwrm_set_eee(bp, req); in bnxt_hwrm_set_link_setting()
12343 return hwrm_req_send(bp, req); in bnxt_hwrm_set_link_setting()
12346 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) in bnxt_hwrm_shutdown_link() argument
12351 if (!BNXT_SINGLE_PF(bp)) in bnxt_hwrm_shutdown_link()
12354 if (pci_num_vf(bp->pdev) && in bnxt_hwrm_shutdown_link()
12355 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) in bnxt_hwrm_shutdown_link()
12358 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); in bnxt_hwrm_shutdown_link()
12363 rc = hwrm_req_send(bp, req); in bnxt_hwrm_shutdown_link()
12365 mutex_lock(&bp->link_lock); in bnxt_hwrm_shutdown_link()
12371 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; in bnxt_hwrm_shutdown_link()
12372 mutex_unlock(&bp->link_lock); in bnxt_hwrm_shutdown_link()
12377 static int bnxt_fw_reset_via_optee(struct bnxt *bp) in bnxt_fw_reset_via_optee() argument
12383 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); in bnxt_fw_reset_via_optee()
12387 netdev_err(bp->dev, "OP-TEE not supported\n"); in bnxt_fw_reset_via_optee()
12392 static int bnxt_try_recover_fw(struct bnxt *bp) in bnxt_try_recover_fw() argument
12394 if (bp->fw_health && bp->fw_health->status_reliable) { in bnxt_try_recover_fw()
12399 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); in bnxt_try_recover_fw()
12400 rc = bnxt_hwrm_poll(bp); in bnxt_try_recover_fw()
12408 netdev_err(bp->dev, in bnxt_try_recover_fw()
12414 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); in bnxt_try_recover_fw()
12415 return bnxt_fw_reset_via_optee(bp); in bnxt_try_recover_fw()
12423 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) in bnxt_clear_reservations() argument
12425 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_clear_reservations()
12427 if (!BNXT_NEW_RM(bp)) in bnxt_clear_reservations()
12439 bp->tx_nr_rings = 0; in bnxt_clear_reservations()
12440 bp->rx_nr_rings = 0; in bnxt_clear_reservations()
12444 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) in bnxt_cancel_reservations() argument
12448 if (!BNXT_NEW_RM(bp)) in bnxt_cancel_reservations()
12451 rc = bnxt_hwrm_func_resc_qcaps(bp, true); in bnxt_cancel_reservations()
12453 netdev_err(bp->dev, "resc_qcaps failed\n"); in bnxt_cancel_reservations()
12455 bnxt_clear_reservations(bp, fw_reset); in bnxt_cancel_reservations()
12460 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) in bnxt_hwrm_if_change() argument
12470 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); in bnxt_hwrm_if_change()
12471 bp->fw_reset_state = 0; in bnxt_hwrm_if_change()
12473 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) in bnxt_hwrm_if_change()
12476 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); in bnxt_hwrm_if_change()
12482 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_if_change()
12484 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); in bnxt_hwrm_if_change()
12486 rc = hwrm_req_send(bp, req); in bnxt_hwrm_if_change()
12495 hwrm_req_drop(bp, req); in bnxt_hwrm_if_change()
12500 rc = bnxt_try_recover_fw(bp); in bnxt_hwrm_if_change()
12503 hwrm_req_drop(bp, req); in bnxt_hwrm_if_change()
12508 bnxt_inv_fw_health_reg(bp); in bnxt_hwrm_if_change()
12515 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) in bnxt_hwrm_if_change()
12518 bnxt_remap_fw_health_regs(bp); in bnxt_hwrm_if_change()
12520 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { in bnxt_hwrm_if_change()
12521 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); in bnxt_hwrm_if_change()
12522 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_hwrm_if_change()
12530 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_hwrm_if_change()
12531 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_hwrm_if_change()
12532 bnxt_ulp_irq_stop(bp); in bnxt_hwrm_if_change()
12533 bnxt_free_ctx_mem(bp, false); in bnxt_hwrm_if_change()
12534 bnxt_dcb_free(bp); in bnxt_hwrm_if_change()
12535 rc = bnxt_fw_init_one(bp); in bnxt_hwrm_if_change()
12537 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_hwrm_if_change()
12538 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_hwrm_if_change()
12542 bnxt_clear_int_mode(bp); in bnxt_hwrm_if_change()
12544 rc = bnxt_cancel_reservations(bp, fw_reset); in bnxt_hwrm_if_change()
12549 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) in bnxt_hwrm_port_led_qcaps() argument
12553 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_led_qcaps()
12556 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
12557 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_port_led_qcaps()
12560 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); in bnxt_hwrm_port_led_qcaps()
12565 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_port_led_qcaps()
12566 rc = hwrm_req_send(bp, req); in bnxt_hwrm_port_led_qcaps()
12568 hwrm_req_drop(bp, req); in bnxt_hwrm_port_led_qcaps()
12574 bp->num_leds = resp->num_leds; in bnxt_hwrm_port_led_qcaps()
12575 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * in bnxt_hwrm_port_led_qcaps()
12576 bp->num_leds); in bnxt_hwrm_port_led_qcaps()
12577 for (i = 0; i < bp->num_leds; i++) { in bnxt_hwrm_port_led_qcaps()
12578 struct bnxt_led_info *led = &bp->leds[i]; in bnxt_hwrm_port_led_qcaps()
12583 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
12588 hwrm_req_drop(bp, req); in bnxt_hwrm_port_led_qcaps()
12592 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) in bnxt_hwrm_alloc_wol_fltr() argument
12598 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); in bnxt_hwrm_alloc_wol_fltr()
12602 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_alloc_wol_fltr()
12605 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); in bnxt_hwrm_alloc_wol_fltr()
12607 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_alloc_wol_fltr()
12608 rc = hwrm_req_send(bp, req); in bnxt_hwrm_alloc_wol_fltr()
12610 bp->wol_filter_id = resp->wol_filter_id; in bnxt_hwrm_alloc_wol_fltr()
12611 hwrm_req_drop(bp, req); in bnxt_hwrm_alloc_wol_fltr()
12615 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) in bnxt_hwrm_free_wol_fltr() argument
12620 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); in bnxt_hwrm_free_wol_fltr()
12624 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_free_wol_fltr()
12626 req->wol_filter_id = bp->wol_filter_id; in bnxt_hwrm_free_wol_fltr()
12628 return hwrm_req_send(bp, req); in bnxt_hwrm_free_wol_fltr()
12631 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) in bnxt_hwrm_get_wol_fltrs() argument
12638 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); in bnxt_hwrm_get_wol_fltrs()
12642 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_get_wol_fltrs()
12644 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_get_wol_fltrs()
12645 rc = hwrm_req_send(bp, req); in bnxt_hwrm_get_wol_fltrs()
12651 bp->wol = 1; in bnxt_hwrm_get_wol_fltrs()
12652 bp->wol_filter_id = resp->wol_filter_id; in bnxt_hwrm_get_wol_fltrs()
12656 hwrm_req_drop(bp, req); in bnxt_hwrm_get_wol_fltrs()
12660 static void bnxt_get_wol_settings(struct bnxt *bp) in bnxt_get_wol_settings() argument
12664 bp->wol = 0; in bnxt_get_wol_settings()
12665 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) in bnxt_get_wol_settings()
12669 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); in bnxt_get_wol_settings()
12673 static bool bnxt_eee_config_ok(struct bnxt *bp) in bnxt_eee_config_ok() argument
12675 struct ethtool_keee *eee = &bp->eee; in bnxt_eee_config_ok()
12676 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_eee_config_ok()
12678 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) in bnxt_eee_config_ok()
12700 static int bnxt_update_phy_setting(struct bnxt *bp) in bnxt_update_phy_setting() argument
12706 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_phy_setting()
12708 rc = bnxt_update_link(bp, true); in bnxt_update_phy_setting()
12710 netdev_err(bp->dev, "failed to update link (rc: %x)\n", in bnxt_update_phy_setting()
12714 if (!BNXT_SINGLE_PF(bp)) in bnxt_update_phy_setting()
12741 if (!BNXT_LINK_IS_UP(bp)) in bnxt_update_phy_setting()
12744 if (!bnxt_eee_config_ok(bp)) in bnxt_update_phy_setting()
12748 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); in bnxt_update_phy_setting()
12750 rc = bnxt_hwrm_set_pause(bp); in bnxt_update_phy_setting()
12752 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", in bnxt_update_phy_setting()
12760 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12762 static int bnxt_reinit_after_abort(struct bnxt *bp) in bnxt_reinit_after_abort() argument
12766 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_reinit_after_abort()
12769 if (bp->dev->reg_state == NETREG_UNREGISTERED) in bnxt_reinit_after_abort()
12772 rc = bnxt_fw_init_one(bp); in bnxt_reinit_after_abort()
12774 bnxt_clear_int_mode(bp); in bnxt_reinit_after_abort()
12775 rc = bnxt_init_int_mode(bp); in bnxt_reinit_after_abort()
12777 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_reinit_after_abort()
12778 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_reinit_after_abort()
12784 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) in bnxt_cfg_one_usr_fltr() argument
12794 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_cfg_one_usr_fltr()
12797 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { in bnxt_cfg_one_usr_fltr()
12798 bnxt_del_ntp_filter(bp, ntp_fltr); in bnxt_cfg_one_usr_fltr()
12799 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", in bnxt_cfg_one_usr_fltr()
12804 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { in bnxt_cfg_one_usr_fltr()
12805 bnxt_del_l2_filter(bp, l2_fltr); in bnxt_cfg_one_usr_fltr()
12806 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", in bnxt_cfg_one_usr_fltr()
12812 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) in bnxt_cfg_usr_fltrs() argument
12816 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) in bnxt_cfg_usr_fltrs()
12817 bnxt_cfg_one_usr_fltr(bp, usr_fltr); in bnxt_cfg_usr_fltrs()
12820 static int bnxt_set_xps_mapping(struct bnxt *bp) in bnxt_set_xps_mapping() argument
12822 int numa_node = dev_to_node(&bp->pdev->dev); in bnxt_set_xps_mapping()
12829 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); in bnxt_set_xps_mapping()
12838 map_idx = i % bp->tx_nr_rings_per_tc; in bnxt_set_xps_mapping()
12845 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { in bnxt_set_xps_mapping()
12846 map_idx = q_idx % bp->tx_nr_rings_per_tc; in bnxt_set_xps_mapping()
12847 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); in bnxt_set_xps_mapping()
12849 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", in bnxt_set_xps_mapping()
12860 static int bnxt_tx_nr_rings(struct bnxt *bp) in bnxt_tx_nr_rings() argument
12862 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc : in bnxt_tx_nr_rings()
12863 bp->tx_nr_rings_per_tc; in bnxt_tx_nr_rings()
12866 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp) in bnxt_tx_nr_rings_per_tc() argument
12868 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings; in bnxt_tx_nr_rings_per_tc()
12871 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in __bnxt_open_nic() argument
12875 netif_carrier_off(bp->dev); in __bnxt_open_nic()
12878 rc = bnxt_init_dflt_ring_mode(bp); in __bnxt_open_nic()
12880 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); in __bnxt_open_nic()
12884 rc = bnxt_reserve_rings(bp, irq_re_init); in __bnxt_open_nic()
12889 bp->tx_nr_rings -= bp->tx_nr_rings_xdp; in __bnxt_open_nic()
12890 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); in __bnxt_open_nic()
12891 if (bp->tx_nr_rings_xdp) { in __bnxt_open_nic()
12892 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc; in __bnxt_open_nic()
12893 bp->tx_nr_rings += bp->tx_nr_rings_xdp; in __bnxt_open_nic()
12895 rc = bnxt_alloc_mem(bp, irq_re_init); in __bnxt_open_nic()
12897 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); in __bnxt_open_nic()
12902 bnxt_init_napi(bp); in __bnxt_open_nic()
12903 rc = bnxt_request_irq(bp); in __bnxt_open_nic()
12905 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); in __bnxt_open_nic()
12910 rc = bnxt_init_nic(bp, irq_re_init); in __bnxt_open_nic()
12912 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); in __bnxt_open_nic()
12916 bnxt_enable_napi(bp); in __bnxt_open_nic()
12917 bnxt_debug_dev_init(bp); in __bnxt_open_nic()
12920 mutex_lock(&bp->link_lock); in __bnxt_open_nic()
12921 rc = bnxt_update_phy_setting(bp); in __bnxt_open_nic()
12922 mutex_unlock(&bp->link_lock); in __bnxt_open_nic()
12924 netdev_warn(bp->dev, "failed to update phy settings\n"); in __bnxt_open_nic()
12925 if (BNXT_SINGLE_PF(bp)) { in __bnxt_open_nic()
12926 bp->link_info.phy_retry = true; in __bnxt_open_nic()
12927 bp->link_info.phy_retry_expires = in __bnxt_open_nic()
12934 udp_tunnel_nic_reset_ntf(bp->dev); in __bnxt_open_nic()
12935 rc = bnxt_set_xps_mapping(bp); in __bnxt_open_nic()
12937 netdev_warn(bp->dev, "failed to set xps mapping\n"); in __bnxt_open_nic()
12940 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { in __bnxt_open_nic()
12946 set_bit(BNXT_STATE_OPEN, &bp->state); in __bnxt_open_nic()
12947 bnxt_enable_int(bp); in __bnxt_open_nic()
12949 bnxt_tx_enable(bp); in __bnxt_open_nic()
12950 mod_timer(&bp->timer, jiffies + bp->current_interval); in __bnxt_open_nic()
12952 mutex_lock(&bp->link_lock); in __bnxt_open_nic()
12953 bnxt_get_port_module_status(bp); in __bnxt_open_nic()
12954 mutex_unlock(&bp->link_lock); in __bnxt_open_nic()
12957 if (BNXT_PF(bp)) in __bnxt_open_nic()
12958 bnxt_vf_reps_open(bp); in __bnxt_open_nic()
12959 bnxt_ptp_init_rtc(bp, true); in __bnxt_open_nic()
12960 bnxt_ptp_cfg_tstamp_filters(bp); in __bnxt_open_nic()
12961 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) in __bnxt_open_nic()
12962 bnxt_hwrm_realloc_rss_ctx_vnic(bp); in __bnxt_open_nic()
12963 bnxt_cfg_usr_fltrs(bp); in __bnxt_open_nic()
12967 bnxt_del_napi(bp); in __bnxt_open_nic()
12970 bnxt_free_skbs(bp); in __bnxt_open_nic()
12971 bnxt_free_irq(bp); in __bnxt_open_nic()
12972 bnxt_free_mem(bp, true); in __bnxt_open_nic()
12976 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in bnxt_open_nic() argument
12980 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) in bnxt_open_nic()
12983 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); in bnxt_open_nic()
12985 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); in bnxt_open_nic()
12986 netif_close(bp->dev); in bnxt_open_nic()
12995 int bnxt_half_open_nic(struct bnxt *bp) in bnxt_half_open_nic() argument
12999 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_half_open_nic()
13000 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); in bnxt_half_open_nic()
13005 rc = bnxt_alloc_mem(bp, true); in bnxt_half_open_nic()
13007 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); in bnxt_half_open_nic()
13010 bnxt_init_napi(bp); in bnxt_half_open_nic()
13011 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_open_nic()
13012 rc = bnxt_init_nic(bp, true); in bnxt_half_open_nic()
13014 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_open_nic()
13015 bnxt_del_napi(bp); in bnxt_half_open_nic()
13016 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); in bnxt_half_open_nic()
13022 bnxt_free_skbs(bp); in bnxt_half_open_nic()
13023 bnxt_free_mem(bp, true); in bnxt_half_open_nic()
13024 netif_close(bp->dev); in bnxt_half_open_nic()
13031 void bnxt_half_close_nic(struct bnxt *bp) in bnxt_half_close_nic() argument
13033 bnxt_hwrm_resource_free(bp, false, true); in bnxt_half_close_nic()
13034 bnxt_del_napi(bp); in bnxt_half_close_nic()
13035 bnxt_free_skbs(bp); in bnxt_half_close_nic()
13036 bnxt_free_mem(bp, true); in bnxt_half_close_nic()
13037 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_close_nic()
13040 void bnxt_reenable_sriov(struct bnxt *bp) in bnxt_reenable_sriov() argument
13042 if (BNXT_PF(bp)) { in bnxt_reenable_sriov()
13043 struct bnxt_pf_info *pf = &bp->pf; in bnxt_reenable_sriov()
13047 bnxt_cfg_hw_sriov(bp, &n, true); in bnxt_reenable_sriov()
13053 struct bnxt *bp = netdev_priv(dev); in bnxt_open() local
13056 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_open()
13057 rc = bnxt_reinit_after_abort(bp); in bnxt_open()
13060 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); in bnxt_open()
13062 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); in bnxt_open()
13067 rc = bnxt_hwrm_if_change(bp, true); in bnxt_open()
13071 rc = __bnxt_open_nic(bp, true, true); in bnxt_open()
13073 bnxt_hwrm_if_change(bp, false); in bnxt_open()
13075 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { in bnxt_open()
13076 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_open()
13077 bnxt_queue_sp_work(bp, in bnxt_open()
13085 static bool bnxt_drv_busy(struct bnxt *bp) in bnxt_drv_busy() argument
13087 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || in bnxt_drv_busy()
13088 test_bit(BNXT_STATE_READ_STATS, &bp->state)); in bnxt_drv_busy()
13091 static void bnxt_get_ring_stats(struct bnxt *bp,
13094 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, in __bnxt_close_nic() argument
13098 if (BNXT_PF(bp)) in __bnxt_close_nic()
13099 bnxt_vf_reps_close(bp); in __bnxt_close_nic()
13102 bnxt_tx_disable(bp); in __bnxt_close_nic()
13104 clear_bit(BNXT_STATE_OPEN, &bp->state); in __bnxt_close_nic()
13106 while (bnxt_drv_busy(bp)) in __bnxt_close_nic()
13109 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) in __bnxt_close_nic()
13110 bnxt_clear_rss_ctxs(bp); in __bnxt_close_nic()
13112 bnxt_shutdown_nic(bp, irq_re_init); in __bnxt_close_nic()
13116 bnxt_debug_dev_exit(bp); in __bnxt_close_nic()
13117 bnxt_disable_napi(bp); in __bnxt_close_nic()
13118 timer_delete_sync(&bp->timer); in __bnxt_close_nic()
13119 bnxt_free_skbs(bp); in __bnxt_close_nic()
13122 if (bp->bnapi && irq_re_init) { in __bnxt_close_nic()
13123 bnxt_get_ring_stats(bp, &bp->net_stats_prev); in __bnxt_close_nic()
13124 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); in __bnxt_close_nic()
13127 bnxt_free_irq(bp); in __bnxt_close_nic()
13128 bnxt_del_napi(bp); in __bnxt_close_nic()
13130 bnxt_free_mem(bp, irq_re_init); in __bnxt_close_nic()
13133 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in bnxt_close_nic() argument
13135 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_close_nic()
13144 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); in bnxt_close_nic()
13145 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_close_nic()
13149 if (bp->sriov_cfg) { in bnxt_close_nic()
13152 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, in bnxt_close_nic()
13153 !bp->sriov_cfg, in bnxt_close_nic()
13156 …netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!… in bnxt_close_nic()
13158 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); in bnxt_close_nic()
13161 __bnxt_close_nic(bp, irq_re_init, link_re_init); in bnxt_close_nic()
13166 struct bnxt *bp = netdev_priv(dev); in bnxt_close() local
13168 bnxt_close_nic(bp, true, true); in bnxt_close()
13169 bnxt_hwrm_shutdown_link(bp); in bnxt_close()
13170 bnxt_hwrm_if_change(bp, false); in bnxt_close()
13174 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, in bnxt_hwrm_port_phy_read() argument
13181 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_read()
13184 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); in bnxt_hwrm_port_phy_read()
13188 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_port_phy_read()
13198 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_port_phy_read()
13199 rc = hwrm_req_send(bp, req); in bnxt_hwrm_port_phy_read()
13202 hwrm_req_drop(bp, req); in bnxt_hwrm_port_phy_read()
13206 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, in bnxt_hwrm_port_phy_write() argument
13212 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_write()
13215 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); in bnxt_hwrm_port_phy_write()
13219 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_port_phy_write()
13230 return hwrm_req_send(bp, req); in bnxt_hwrm_port_phy_write()
13237 struct bnxt *bp = netdev_priv(dev); in bnxt_ioctl() local
13242 mdio->phy_id = bp->link_info.phy_addr; in bnxt_ioctl()
13251 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, in bnxt_ioctl()
13261 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, in bnxt_ioctl()
13277 static void bnxt_get_ring_stats(struct bnxt *bp, in bnxt_get_ring_stats() argument
13282 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_get_ring_stats()
13283 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_get_ring_stats()
13316 static void bnxt_add_prev_stats(struct bnxt *bp, in bnxt_add_prev_stats() argument
13319 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; in bnxt_add_prev_stats()
13334 struct bnxt *bp = netdev_priv(dev); in bnxt_get_stats64() local
13336 set_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
13341 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_get_stats64()
13342 clear_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
13343 *stats = bp->net_stats_prev; in bnxt_get_stats64()
13347 bnxt_get_ring_stats(bp, stats); in bnxt_get_stats64()
13348 bnxt_add_prev_stats(bp, stats); in bnxt_get_stats64()
13350 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_get_stats64()
13351 u64 *rx = bp->port_stats.sw_stats; in bnxt_get_stats64()
13352 u64 *tx = bp->port_stats.sw_stats + in bnxt_get_stats64()
13372 clear_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
13375 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, in bnxt_get_one_ring_err_stats() argument
13395 void bnxt_get_ring_err_stats(struct bnxt *bp, in bnxt_get_ring_err_stats() argument
13400 for (i = 0; i < bp->cp_nr_rings; i++) in bnxt_get_ring_err_stats()
13401 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); in bnxt_get_ring_err_stats()
13404 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) in bnxt_mc_list_updated() argument
13406 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_mc_list_updated()
13407 struct net_device *dev = bp->dev; in bnxt_mc_list_updated()
13438 static bool bnxt_uc_list_updated(struct bnxt *bp) in bnxt_uc_list_updated() argument
13440 struct net_device *dev = bp->dev; in bnxt_uc_list_updated()
13441 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_uc_list_updated()
13459 struct bnxt *bp = netdev_priv(dev); in bnxt_set_rx_mode() local
13465 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_set_rx_mode()
13468 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_set_rx_mode()
13478 uc_update = bnxt_uc_list_updated(bp); in bnxt_set_rx_mode()
13486 mc_update = bnxt_mc_list_updated(bp, &mask); in bnxt_set_rx_mode()
13492 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); in bnxt_set_rx_mode()
13496 static int bnxt_cfg_rx_mode(struct bnxt *bp) in bnxt_cfg_rx_mode() argument
13498 struct net_device *dev = bp->dev; in bnxt_cfg_rx_mode()
13499 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_cfg_rx_mode()
13505 uc_update = bnxt_uc_list_updated(bp); in bnxt_cfg_rx_mode()
13514 bnxt_hwrm_l2_filter_free(bp, fltr); in bnxt_cfg_rx_mode()
13515 bnxt_del_l2_filter(bp, fltr); in bnxt_cfg_rx_mode()
13533 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); in bnxt_cfg_rx_mode()
13535 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_cfg_rx_mode()
13536 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_cfg_rx_mode()
13537 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); in bnxt_cfg_rx_mode()
13539 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); in bnxt_cfg_rx_mode()
13542 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); in bnxt_cfg_rx_mode()
13548 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_cfg_rx_mode()
13549 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); in bnxt_cfg_rx_mode()
13553 !bnxt_promisc_ok(bp)) in bnxt_cfg_rx_mode()
13555 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
13557 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", in bnxt_cfg_rx_mode()
13562 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
13565 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", in bnxt_cfg_rx_mode()
13571 static bool bnxt_can_reserve_rings(struct bnxt *bp) in bnxt_can_reserve_rings() argument
13574 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { in bnxt_can_reserve_rings()
13575 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_can_reserve_rings()
13583 if (!netif_running(bp->dev)) in bnxt_can_reserve_rings()
13591 static bool bnxt_rfs_supported(struct bnxt *bp) in bnxt_rfs_supported() argument
13593 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_rfs_supported()
13594 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) in bnxt_rfs_supported()
13599 if (BNXT_FW_MAJ(bp) == 212) in bnxt_rfs_supported()
13601 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) in bnxt_rfs_supported()
13603 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) in bnxt_rfs_supported()
13609 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) in bnxt_rfs_capable() argument
13614 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_rfs_capable()
13615 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) in bnxt_rfs_capable()
13616 return bnxt_rfs_supported(bp); in bnxt_rfs_capable()
13618 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) in bnxt_rfs_capable()
13621 hwr.grp = bp->rx_nr_rings; in bnxt_rfs_capable()
13622 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); in bnxt_rfs_capable()
13625 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); in bnxt_rfs_capable()
13626 max_vnics = bnxt_get_max_func_vnics(bp); in bnxt_rfs_capable()
13627 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); in bnxt_rfs_capable()
13630 if (bp->rx_nr_rings > 1) in bnxt_rfs_capable()
13631 netdev_warn(bp->dev, in bnxt_rfs_capable()
13637 if (!BNXT_NEW_RM(bp)) in bnxt_rfs_capable()
13644 if (hwr.vnic <= bp->hw_resc.resv_vnics && in bnxt_rfs_capable()
13645 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) in bnxt_rfs_capable()
13648 bnxt_hwrm_reserve_rings(bp, &hwr); in bnxt_rfs_capable()
13649 if (hwr.vnic <= bp->hw_resc.resv_vnics && in bnxt_rfs_capable()
13650 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) in bnxt_rfs_capable()
13653 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); in bnxt_rfs_capable()
13656 bnxt_hwrm_reserve_rings(bp, &hwr); in bnxt_rfs_capable()
13663 struct bnxt *bp = netdev_priv(dev); in bnxt_fix_features() local
13666 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) in bnxt_fix_features()
13669 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) in bnxt_fix_features()
13689 if (BNXT_VF(bp) && bp->vf.vlan) in bnxt_fix_features()
13695 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, in bnxt_reinit_features() argument
13698 bnxt_close_nic(bp, irq_re_init, link_re_init); in bnxt_reinit_features()
13699 bp->flags = flags; in bnxt_reinit_features()
13701 bnxt_set_ring_params(bp); in bnxt_reinit_features()
13702 return bnxt_open_nic(bp, irq_re_init, link_re_init); in bnxt_reinit_features()
13708 struct bnxt *bp = netdev_priv(dev); in bnxt_set_features() local
13709 u32 flags = bp->flags; in bnxt_set_features()
13720 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) in bnxt_set_features()
13729 bnxt_clear_usr_fltrs(bp, true); in bnxt_set_features()
13731 changes = flags ^ bp->flags; in bnxt_set_features()
13734 if ((bp->flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
13736 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_set_features()
13746 if (flags != bp->flags) { in bnxt_set_features()
13747 u32 old_flags = bp->flags; in bnxt_set_features()
13749 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_set_features()
13750 bp->flags = flags; in bnxt_set_features()
13752 bnxt_set_ring_params(bp); in bnxt_set_features()
13757 return bnxt_reinit_features(bp, true, false, flags, update_tpa); in bnxt_set_features()
13760 return bnxt_reinit_features(bp, false, false, flags, update_tpa); in bnxt_set_features()
13763 bp->flags = flags; in bnxt_set_features()
13764 rc = bnxt_set_tpa(bp, in bnxt_set_features()
13768 bp->flags = old_flags; in bnxt_set_features()
13774 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, in bnxt_exthdr_check() argument
13841 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) in bnxt_udp_tunl_check() argument
13846 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && in bnxt_udp_tunl_check()
13847 udp_port != bp->vxlan_gpe_port) in bnxt_udp_tunl_check()
13856 return bnxt_exthdr_check(bp, skb, in bnxt_udp_tunl_check()
13863 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), in bnxt_udp_tunl_check()
13869 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) in bnxt_tunl_check() argument
13873 return bnxt_udp_tunl_check(bp, skb); in bnxt_tunl_check()
13888 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), in bnxt_tunl_check()
13898 struct bnxt *bp = netdev_priv(dev); in bnxt_features_check() local
13907 if (bnxt_tunl_check(bp, skb, *l4_proto)) in bnxt_features_check()
13911 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), in bnxt_features_check()
13914 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) in bnxt_features_check()
13921 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, in bnxt_dbg_hwrm_rd_reg() argument
13930 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); in bnxt_dbg_hwrm_rd_reg()
13934 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, in bnxt_dbg_hwrm_rd_reg()
13943 resp = hwrm_req_hold(bp, req); in bnxt_dbg_hwrm_rd_reg()
13947 rc = hwrm_req_send(bp, req); in bnxt_dbg_hwrm_rd_reg()
13956 hwrm_req_drop(bp, req); in bnxt_dbg_hwrm_rd_reg()
13960 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, in bnxt_dbg_hwrm_ring_info_get() argument
13967 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); in bnxt_dbg_hwrm_ring_info_get()
13973 resp = hwrm_req_hold(bp, req); in bnxt_dbg_hwrm_ring_info_get()
13974 rc = hwrm_req_send(bp, req); in bnxt_dbg_hwrm_ring_info_get()
13979 hwrm_req_drop(bp, req); in bnxt_dbg_hwrm_ring_info_get()
13989 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", in bnxt_dump_tx_sw_state()
14002 …netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg… in bnxt_dump_rx_sw_state()
14013 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", in bnxt_dump_cp_sw_state()
14017 static void bnxt_dbg_dump_states(struct bnxt *bp) in bnxt_dbg_dump_states() argument
14022 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_dbg_dump_states()
14023 bnapi = bp->bnapi[i]; in bnxt_dbg_dump_states()
14024 if (netif_msg_drv(bp)) { in bnxt_dbg_dump_states()
14032 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) in bnxt_hwrm_rx_ring_reset() argument
14034 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; in bnxt_hwrm_rx_ring_reset()
14041 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); in bnxt_hwrm_rx_ring_reset()
14049 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); in bnxt_hwrm_rx_ring_reset()
14050 return hwrm_req_send_silent(bp, req); in bnxt_hwrm_rx_ring_reset()
14053 static void bnxt_reset_task(struct bnxt *bp, bool silent) in bnxt_reset_task() argument
14056 bnxt_dbg_dump_states(bp); in bnxt_reset_task()
14057 if (netif_running(bp->dev)) { in bnxt_reset_task()
14058 bnxt_close_nic(bp, !silent, false); in bnxt_reset_task()
14059 bnxt_open_nic(bp, !silent, false); in bnxt_reset_task()
14065 struct bnxt *bp = netdev_priv(dev); in bnxt_tx_timeout() local
14067 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); in bnxt_tx_timeout()
14068 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); in bnxt_tx_timeout()
14071 static void bnxt_fw_health_check(struct bnxt *bp) in bnxt_fw_health_check() argument
14073 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_health_check()
14074 struct pci_dev *pdev = bp->pdev; in bnxt_fw_health_check()
14077 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_fw_health_check()
14087 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); in bnxt_fw_health_check()
14095 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); in bnxt_fw_health_check()
14105 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); in bnxt_fw_health_check()
14110 struct bnxt *bp = timer_container_of(bp, t, timer); in bnxt_timer() local
14111 struct net_device *dev = bp->dev; in bnxt_timer()
14113 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_timer()
14116 if (atomic_read(&bp->intr_sem) != 0) in bnxt_timer()
14119 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) in bnxt_timer()
14120 bnxt_fw_health_check(bp); in bnxt_timer()
14122 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) in bnxt_timer()
14123 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); in bnxt_timer()
14125 if (bnxt_tc_flower_enabled(bp)) in bnxt_timer()
14126 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); in bnxt_timer()
14129 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) in bnxt_timer()
14130 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); in bnxt_timer()
14133 if (bp->link_info.phy_retry) { in bnxt_timer()
14134 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { in bnxt_timer()
14135 bp->link_info.phy_retry = false; in bnxt_timer()
14136 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); in bnxt_timer()
14138 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); in bnxt_timer()
14142 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_timer()
14143 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); in bnxt_timer()
14145 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) in bnxt_timer()
14146 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); in bnxt_timer()
14149 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnxt_timer()
14152 static void bnxt_lock_sp(struct bnxt *bp) in bnxt_lock_sp() argument
14160 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_lock_sp()
14161 netdev_lock(bp->dev); in bnxt_lock_sp()
14164 static void bnxt_unlock_sp(struct bnxt *bp) in bnxt_unlock_sp() argument
14166 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_unlock_sp()
14167 netdev_unlock(bp->dev); in bnxt_unlock_sp()
14171 static void bnxt_reset(struct bnxt *bp, bool silent) in bnxt_reset() argument
14173 bnxt_lock_sp(bp); in bnxt_reset()
14174 if (test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_reset()
14175 bnxt_reset_task(bp, silent); in bnxt_reset()
14176 bnxt_unlock_sp(bp); in bnxt_reset()
14180 static void bnxt_rx_ring_reset(struct bnxt *bp) in bnxt_rx_ring_reset() argument
14184 bnxt_lock_sp(bp); in bnxt_rx_ring_reset()
14185 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_rx_ring_reset()
14186 bnxt_unlock_sp(bp); in bnxt_rx_ring_reset()
14190 if (bp->flags & BNXT_FLAG_TPA) in bnxt_rx_ring_reset()
14191 bnxt_set_tpa(bp, false); in bnxt_rx_ring_reset()
14192 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_rx_ring_reset()
14193 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_rx_ring_reset()
14200 rc = bnxt_hwrm_rx_ring_reset(bp, i); in bnxt_rx_ring_reset()
14203 …netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n… in bnxt_rx_ring_reset()
14205 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", in bnxt_rx_ring_reset()
14207 bnxt_reset_task(bp, true); in bnxt_rx_ring_reset()
14210 bnxt_free_one_rx_ring_skbs(bp, rxr); in bnxt_rx_ring_reset()
14216 bnxt_alloc_one_rx_ring(bp, i); in bnxt_rx_ring_reset()
14219 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_rx_ring_reset()
14220 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_rx_ring_reset()
14221 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_rx_ring_reset()
14223 if (bp->flags & BNXT_FLAG_TPA) in bnxt_rx_ring_reset()
14224 bnxt_set_tpa(bp, true); in bnxt_rx_ring_reset()
14225 bnxt_unlock_sp(bp); in bnxt_rx_ring_reset()
14228 static void bnxt_fw_fatal_close(struct bnxt *bp) in bnxt_fw_fatal_close() argument
14230 bnxt_tx_disable(bp); in bnxt_fw_fatal_close()
14231 bnxt_disable_napi(bp); in bnxt_fw_fatal_close()
14232 bnxt_disable_int_sync(bp); in bnxt_fw_fatal_close()
14233 bnxt_free_irq(bp); in bnxt_fw_fatal_close()
14234 bnxt_clear_int_mode(bp); in bnxt_fw_fatal_close()
14235 pci_disable_device(bp->pdev); in bnxt_fw_fatal_close()
14238 static void bnxt_fw_reset_close(struct bnxt *bp) in bnxt_fw_reset_close() argument
14244 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { in bnxt_fw_reset_close()
14247 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); in bnxt_fw_reset_close()
14249 bp->fw_reset_min_dsecs = 0; in bnxt_fw_reset_close()
14250 bnxt_fw_fatal_close(bp); in bnxt_fw_reset_close()
14252 __bnxt_close_nic(bp, true, false); in bnxt_fw_reset_close()
14253 bnxt_vf_reps_free(bp); in bnxt_fw_reset_close()
14254 bnxt_clear_int_mode(bp); in bnxt_fw_reset_close()
14255 bnxt_hwrm_func_drv_unrgtr(bp); in bnxt_fw_reset_close()
14256 if (pci_is_enabled(bp->pdev)) in bnxt_fw_reset_close()
14257 pci_disable_device(bp->pdev); in bnxt_fw_reset_close()
14258 bnxt_free_ctx_mem(bp, false); in bnxt_fw_reset_close()
14261 static bool is_bnxt_fw_ok(struct bnxt *bp) in is_bnxt_fw_ok() argument
14263 struct bnxt_fw_health *fw_health = bp->fw_health; in is_bnxt_fw_ok()
14267 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); in is_bnxt_fw_ok()
14271 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); in is_bnxt_fw_ok()
14282 static void bnxt_force_fw_reset(struct bnxt *bp) in bnxt_force_fw_reset() argument
14284 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_force_fw_reset()
14285 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_force_fw_reset()
14288 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_force_fw_reset()
14289 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_force_fw_reset()
14297 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_force_fw_reset()
14300 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_force_fw_reset()
14302 bnxt_fw_reset_close(bp); in bnxt_force_fw_reset()
14307 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; in bnxt_force_fw_reset()
14309 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; in bnxt_force_fw_reset()
14311 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_force_fw_reset()
14314 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; in bnxt_force_fw_reset()
14315 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; in bnxt_force_fw_reset()
14316 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); in bnxt_force_fw_reset()
14319 void bnxt_fw_exception(struct bnxt *bp) in bnxt_fw_exception() argument
14321 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); in bnxt_fw_exception()
14322 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_fw_exception()
14323 bnxt_ulp_stop(bp); in bnxt_fw_exception()
14324 bnxt_lock_sp(bp); in bnxt_fw_exception()
14325 bnxt_force_fw_reset(bp); in bnxt_fw_exception()
14326 bnxt_unlock_sp(bp); in bnxt_fw_exception()
14332 static int bnxt_get_registered_vfs(struct bnxt *bp) in bnxt_get_registered_vfs() argument
14337 if (!BNXT_PF(bp)) in bnxt_get_registered_vfs()
14340 rc = bnxt_hwrm_func_qcfg(bp); in bnxt_get_registered_vfs()
14342 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); in bnxt_get_registered_vfs()
14345 if (bp->pf.registered_vfs) in bnxt_get_registered_vfs()
14346 return bp->pf.registered_vfs; in bnxt_get_registered_vfs()
14347 if (bp->sriov_cfg) in bnxt_get_registered_vfs()
14353 void bnxt_fw_reset(struct bnxt *bp) in bnxt_fw_reset() argument
14355 bnxt_ulp_stop(bp); in bnxt_fw_reset()
14356 bnxt_lock_sp(bp); in bnxt_fw_reset()
14357 if (test_bit(BNXT_STATE_OPEN, &bp->state) && in bnxt_fw_reset()
14358 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_fw_reset()
14359 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_fw_reset()
14367 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
14370 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
14372 if (bp->pf.active_vfs && in bnxt_fw_reset()
14373 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) in bnxt_fw_reset()
14374 n = bnxt_get_registered_vfs(bp); in bnxt_fw_reset()
14376 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", in bnxt_fw_reset()
14378 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
14379 netif_close(bp->dev); in bnxt_fw_reset()
14384 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) in bnxt_fw_reset()
14385 bp->fw_reset_max_dsecs = vf_tmo_dsecs; in bnxt_fw_reset()
14386 bp->fw_reset_state = in bnxt_fw_reset()
14388 bnxt_queue_fw_reset_work(bp, HZ / 10); in bnxt_fw_reset()
14391 bnxt_fw_reset_close(bp); in bnxt_fw_reset()
14392 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_fw_reset()
14393 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; in bnxt_fw_reset()
14396 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset()
14397 tmo = bp->fw_reset_min_dsecs * HZ / 10; in bnxt_fw_reset()
14399 bnxt_queue_fw_reset_work(bp, tmo); in bnxt_fw_reset()
14402 bnxt_unlock_sp(bp); in bnxt_fw_reset()
14405 static void bnxt_chk_missed_irq(struct bnxt *bp) in bnxt_chk_missed_irq() argument
14409 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_chk_missed_irq()
14412 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_chk_missed_irq()
14413 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_chk_missed_irq()
14426 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) in bnxt_chk_missed_irq()
14434 bnxt_dbg_hwrm_ring_info_get(bp, in bnxt_chk_missed_irq()
14444 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) in bnxt_init_ethtool_link_settings() argument
14446 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_init_ethtool_link_settings()
14450 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_init_ethtool_link_settings()
14469 static void bnxt_fw_echo_reply(struct bnxt *bp) in bnxt_fw_echo_reply() argument
14471 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_echo_reply()
14475 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); in bnxt_fw_echo_reply()
14480 hwrm_req_send(bp, req); in bnxt_fw_echo_reply()
14483 static void bnxt_ulp_restart(struct bnxt *bp) in bnxt_ulp_restart() argument
14485 bnxt_ulp_stop(bp); in bnxt_ulp_restart()
14486 bnxt_ulp_start(bp, 0); in bnxt_ulp_restart()
14491 struct bnxt *bp = container_of(work, struct bnxt, sp_task); in bnxt_sp_task() local
14493 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
14495 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_sp_task()
14496 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
14500 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14501 bnxt_ulp_restart(bp); in bnxt_sp_task()
14502 bnxt_reenable_sriov(bp); in bnxt_sp_task()
14505 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14506 bnxt_cfg_rx_mode(bp); in bnxt_sp_task()
14508 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14509 bnxt_cfg_ntp_filters(bp); in bnxt_sp_task()
14510 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14511 bnxt_hwrm_exec_fwd_req(bp); in bnxt_sp_task()
14512 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14513 netdev_info(bp->dev, "Receive PF driver unload event!\n"); in bnxt_sp_task()
14514 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14515 bnxt_hwrm_port_qstats(bp, 0); in bnxt_sp_task()
14516 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_sp_task()
14517 bnxt_accumulate_all_stats(bp); in bnxt_sp_task()
14520 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14523 mutex_lock(&bp->link_lock); in bnxt_sp_task()
14525 &bp->sp_event)) in bnxt_sp_task()
14526 bnxt_hwrm_phy_qcaps(bp); in bnxt_sp_task()
14528 rc = bnxt_update_link(bp, true); in bnxt_sp_task()
14530 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", in bnxt_sp_task()
14534 &bp->sp_event)) in bnxt_sp_task()
14535 bnxt_init_ethtool_link_settings(bp); in bnxt_sp_task()
14536 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
14538 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14541 mutex_lock(&bp->link_lock); in bnxt_sp_task()
14542 rc = bnxt_update_phy_setting(bp); in bnxt_sp_task()
14543 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
14545 netdev_warn(bp->dev, "update phy settings retry failed\n"); in bnxt_sp_task()
14547 bp->link_info.phy_retry = false; in bnxt_sp_task()
14548 netdev_info(bp->dev, "update phy settings retry succeeded\n"); in bnxt_sp_task()
14551 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14552 mutex_lock(&bp->link_lock); in bnxt_sp_task()
14553 bnxt_get_port_module_status(bp); in bnxt_sp_task()
14554 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
14557 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14558 bnxt_tc_flow_stats_work(bp); in bnxt_sp_task()
14560 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14561 bnxt_chk_missed_irq(bp); in bnxt_sp_task()
14563 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14564 bnxt_fw_echo_reply(bp); in bnxt_sp_task()
14566 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14567 bnxt_hwmon_notify_event(bp); in bnxt_sp_task()
14572 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14573 bnxt_reset(bp, false); in bnxt_sp_task()
14575 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14576 bnxt_reset(bp, true); in bnxt_sp_task()
14578 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
14579 bnxt_rx_ring_reset(bp); in bnxt_sp_task()
14581 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14582 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || in bnxt_sp_task()
14583 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) in bnxt_sp_task()
14584 bnxt_devlink_health_fw_report(bp); in bnxt_sp_task()
14586 bnxt_fw_reset(bp); in bnxt_sp_task()
14589 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
14590 if (!is_bnxt_fw_ok(bp)) in bnxt_sp_task()
14591 bnxt_devlink_health_fw_report(bp); in bnxt_sp_task()
14595 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
14598 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14602 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, in bnxt_check_rings() argument
14613 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); in bnxt_check_rings()
14618 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_check_rings()
14626 hwr.vnic = bnxt_get_total_vnics(bp, rx); in bnxt_check_rings()
14628 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); in bnxt_check_rings()
14633 if (BNXT_NEW_RM(bp)) { in bnxt_check_rings()
14634 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); in bnxt_check_rings()
14635 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); in bnxt_check_rings()
14637 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); in bnxt_check_rings()
14639 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_check_rings()
14641 rc = bnxt_hwrm_check_rings(bp, &hwr); in bnxt_check_rings()
14642 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { in bnxt_check_rings()
14643 if (!bnxt_ulp_registered(bp->edev)) { in bnxt_check_rings()
14644 hwr.cp += bnxt_get_ulp_msix_num(bp); in bnxt_check_rings()
14645 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); in bnxt_check_rings()
14647 if (hwr.cp > bp->total_irqs) { in bnxt_check_rings()
14648 int total_msix = bnxt_change_msix(bp, hwr.cp); in bnxt_check_rings()
14651 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", in bnxt_check_rings()
14660 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) in bnxt_unmap_bars() argument
14662 if (bp->bar2) { in bnxt_unmap_bars()
14663 pci_iounmap(pdev, bp->bar2); in bnxt_unmap_bars()
14664 bp->bar2 = NULL; in bnxt_unmap_bars()
14667 if (bp->bar1) { in bnxt_unmap_bars()
14668 pci_iounmap(pdev, bp->bar1); in bnxt_unmap_bars()
14669 bp->bar1 = NULL; in bnxt_unmap_bars()
14672 if (bp->bar0) { in bnxt_unmap_bars()
14673 pci_iounmap(pdev, bp->bar0); in bnxt_unmap_bars()
14674 bp->bar0 = NULL; in bnxt_unmap_bars()
14678 static void bnxt_cleanup_pci(struct bnxt *bp) in bnxt_cleanup_pci() argument
14680 bnxt_unmap_bars(bp, bp->pdev); in bnxt_cleanup_pci()
14681 pci_release_regions(bp->pdev); in bnxt_cleanup_pci()
14682 if (pci_is_enabled(bp->pdev)) in bnxt_cleanup_pci()
14683 pci_disable_device(bp->pdev); in bnxt_cleanup_pci()
14686 static void bnxt_init_dflt_coal(struct bnxt *bp) in bnxt_init_dflt_coal() argument
14688 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_init_dflt_coal()
14699 coal = &bp->rx_coal; in bnxt_init_dflt_coal()
14709 coal = &bp->tx_coal; in bnxt_init_dflt_coal()
14717 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; in bnxt_init_dflt_coal()
14721 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) in bnxt_fw_pre_resv_vnics() argument
14723 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); in bnxt_fw_pre_resv_vnics()
14725 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_fw_pre_resv_vnics()
14728 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_fw_pre_resv_vnics()
14734 static int bnxt_fw_init_one_p1(struct bnxt *bp) in bnxt_fw_init_one_p1() argument
14738 bp->fw_cap = 0; in bnxt_fw_init_one_p1()
14739 rc = bnxt_hwrm_ver_get(bp); in bnxt_fw_init_one_p1()
14745 bnxt_try_map_fw_health_reg(bp); in bnxt_fw_init_one_p1()
14747 rc = bnxt_try_recover_fw(bp); in bnxt_fw_init_one_p1()
14750 rc = bnxt_hwrm_ver_get(bp); in bnxt_fw_init_one_p1()
14755 bnxt_nvm_cfg_ver_get(bp); in bnxt_fw_init_one_p1()
14757 rc = bnxt_hwrm_func_reset(bp); in bnxt_fw_init_one_p1()
14761 bnxt_hwrm_fw_set_time(bp); in bnxt_fw_init_one_p1()
14765 static int bnxt_fw_init_one_p2(struct bnxt *bp) in bnxt_fw_init_one_p2() argument
14770 rc = bnxt_hwrm_func_qcaps(bp); in bnxt_fw_init_one_p2()
14772 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", in bnxt_fw_init_one_p2()
14777 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); in bnxt_fw_init_one_p2()
14779 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", in bnxt_fw_init_one_p2()
14782 if (bnxt_alloc_fw_health(bp)) { in bnxt_fw_init_one_p2()
14783 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); in bnxt_fw_init_one_p2()
14785 rc = bnxt_hwrm_error_recovery_qcfg(bp); in bnxt_fw_init_one_p2()
14787 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", in bnxt_fw_init_one_p2()
14791 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); in bnxt_fw_init_one_p2()
14795 rc = bnxt_alloc_crash_dump_mem(bp); in bnxt_fw_init_one_p2()
14797 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", in bnxt_fw_init_one_p2()
14800 rc = bnxt_hwrm_crash_dump_mem_cfg(bp); in bnxt_fw_init_one_p2()
14802 bnxt_free_crash_dump_mem(bp); in bnxt_fw_init_one_p2()
14803 netdev_warn(bp->dev, in bnxt_fw_init_one_p2()
14808 if (bnxt_fw_pre_resv_vnics(bp)) in bnxt_fw_init_one_p2()
14809 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; in bnxt_fw_init_one_p2()
14811 bnxt_hwrm_func_qcfg(bp); in bnxt_fw_init_one_p2()
14812 bnxt_hwrm_vnic_qcaps(bp); in bnxt_fw_init_one_p2()
14813 bnxt_hwrm_port_led_qcaps(bp); in bnxt_fw_init_one_p2()
14814 bnxt_ethtool_init(bp); in bnxt_fw_init_one_p2()
14815 if (bp->fw_cap & BNXT_FW_CAP_PTP) in bnxt_fw_init_one_p2()
14816 __bnxt_hwrm_ptp_qcfg(bp); in bnxt_fw_init_one_p2()
14817 bnxt_dcb_init(bp); in bnxt_fw_init_one_p2()
14818 bnxt_hwmon_init(bp); in bnxt_fw_init_one_p2()
14822 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) in bnxt_set_dflt_rss_hash_type() argument
14824 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; in bnxt_set_dflt_rss_hash_type()
14825 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | in bnxt_set_dflt_rss_hash_type()
14829 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) in bnxt_set_dflt_rss_hash_type()
14830 bp->rss_hash_delta = bp->rss_hash_cfg; in bnxt_set_dflt_rss_hash_type()
14831 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { in bnxt_set_dflt_rss_hash_type()
14832 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; in bnxt_set_dflt_rss_hash_type()
14833 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | in bnxt_set_dflt_rss_hash_type()
14838 static void bnxt_set_dflt_rfs(struct bnxt *bp) in bnxt_set_dflt_rfs() argument
14840 struct net_device *dev = bp->dev; in bnxt_set_dflt_rfs()
14844 bp->flags &= ~BNXT_FLAG_RFS; in bnxt_set_dflt_rfs()
14845 if (bnxt_rfs_supported(bp)) { in bnxt_set_dflt_rfs()
14847 if (bnxt_rfs_capable(bp, false)) { in bnxt_set_dflt_rfs()
14848 bp->flags |= BNXT_FLAG_RFS; in bnxt_set_dflt_rfs()
14854 static void bnxt_fw_init_one_p3(struct bnxt *bp) in bnxt_fw_init_one_p3() argument
14856 struct pci_dev *pdev = bp->pdev; in bnxt_fw_init_one_p3()
14858 bnxt_set_dflt_rss_hash_type(bp); in bnxt_fw_init_one_p3()
14859 bnxt_set_dflt_rfs(bp); in bnxt_fw_init_one_p3()
14861 bnxt_get_wol_settings(bp); in bnxt_fw_init_one_p3()
14862 if (bp->flags & BNXT_FLAG_WOL_CAP) in bnxt_fw_init_one_p3()
14863 device_set_wakeup_enable(&pdev->dev, bp->wol); in bnxt_fw_init_one_p3()
14867 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); in bnxt_fw_init_one_p3()
14868 bnxt_hwrm_coal_params_qcaps(bp); in bnxt_fw_init_one_p3()
14871 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14873 int bnxt_fw_init_one(struct bnxt *bp) in bnxt_fw_init_one() argument
14877 rc = bnxt_fw_init_one_p1(bp); in bnxt_fw_init_one()
14879 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); in bnxt_fw_init_one()
14882 rc = bnxt_fw_init_one_p2(bp); in bnxt_fw_init_one()
14884 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); in bnxt_fw_init_one()
14887 rc = bnxt_probe_phy(bp, false); in bnxt_fw_init_one()
14890 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); in bnxt_fw_init_one()
14894 bnxt_fw_init_one_p3(bp); in bnxt_fw_init_one()
14898 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) in bnxt_fw_reset_writel() argument
14900 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_reset_writel()
14910 pci_write_config_dword(bp->pdev, reg_off, val); in bnxt_fw_reset_writel()
14914 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); in bnxt_fw_reset_writel()
14918 writel(val, bp->bar0 + reg_off); in bnxt_fw_reset_writel()
14921 writel(val, bp->bar1 + reg_off); in bnxt_fw_reset_writel()
14925 pci_read_config_dword(bp->pdev, 0, &val); in bnxt_fw_reset_writel()
14930 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) in bnxt_hwrm_reset_permitted() argument
14936 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) in bnxt_hwrm_reset_permitted()
14939 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) in bnxt_hwrm_reset_permitted()
14943 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_reset_permitted()
14944 if (!hwrm_req_send(bp, req)) in bnxt_hwrm_reset_permitted()
14947 hwrm_req_drop(bp, req); in bnxt_hwrm_reset_permitted()
14951 static void bnxt_reset_all(struct bnxt *bp) in bnxt_reset_all() argument
14953 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_reset_all()
14956 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_reset_all()
14957 bnxt_fw_reset_via_optee(bp); in bnxt_reset_all()
14958 bp->fw_reset_timestamp = jiffies; in bnxt_reset_all()
14964 bnxt_fw_reset_writel(bp, i); in bnxt_reset_all()
14968 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); in bnxt_reset_all()
14974 rc = hwrm_req_send(bp, req); in bnxt_reset_all()
14977 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); in bnxt_reset_all()
14979 bp->fw_reset_timestamp = jiffies; in bnxt_reset_all()
14982 static bool bnxt_fw_reset_timeout(struct bnxt *bp) in bnxt_fw_reset_timeout() argument
14984 return time_after(jiffies, bp->fw_reset_timestamp + in bnxt_fw_reset_timeout()
14985 (bp->fw_reset_max_dsecs * HZ / 10)); in bnxt_fw_reset_timeout()
14988 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) in bnxt_fw_reset_abort() argument
14990 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_abort()
14991 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) in bnxt_fw_reset_abort()
14992 bnxt_dl_health_fw_status_update(bp, false); in bnxt_fw_reset_abort()
14993 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; in bnxt_fw_reset_abort()
14994 netif_close(bp->dev); in bnxt_fw_reset_abort()
14999 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); in bnxt_fw_reset_task() local
15002 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_fw_reset_task()
15003 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); in bnxt_fw_reset_task()
15007 switch (bp->fw_reset_state) { in bnxt_fw_reset_task()
15009 int n = bnxt_get_registered_vfs(bp); in bnxt_fw_reset_task()
15013 …netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs si… in bnxt_fw_reset_task()
15015 bp->fw_reset_timestamp)); in bnxt_fw_reset_task()
15018 if (bnxt_fw_reset_timeout(bp)) { in bnxt_fw_reset_task()
15019 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_task()
15020 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
15021 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", in bnxt_fw_reset_task()
15025 bnxt_queue_fw_reset_work(bp, HZ / 10); in bnxt_fw_reset_task()
15028 bp->fw_reset_timestamp = jiffies; in bnxt_fw_reset_task()
15029 netdev_lock(bp->dev); in bnxt_fw_reset_task()
15030 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_fw_reset_task()
15031 bnxt_fw_reset_abort(bp, rc); in bnxt_fw_reset_task()
15032 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15035 bnxt_fw_reset_close(bp); in bnxt_fw_reset_task()
15036 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_fw_reset_task()
15037 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; in bnxt_fw_reset_task()
15040 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
15041 tmo = bp->fw_reset_min_dsecs * HZ / 10; in bnxt_fw_reset_task()
15043 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15044 bnxt_queue_fw_reset_work(bp, tmo); in bnxt_fw_reset_task()
15050 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); in bnxt_fw_reset_task()
15052 !bnxt_fw_reset_timeout(bp)) { in bnxt_fw_reset_task()
15053 bnxt_queue_fw_reset_work(bp, HZ / 5); in bnxt_fw_reset_task()
15057 if (!bp->fw_health->primary) { in bnxt_fw_reset_task()
15058 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; in bnxt_fw_reset_task()
15060 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
15061 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); in bnxt_fw_reset_task()
15064 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; in bnxt_fw_reset_task()
15068 bnxt_reset_all(bp); in bnxt_fw_reset_task()
15069 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
15070 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); in bnxt_fw_reset_task()
15073 bnxt_inv_fw_health_reg(bp); in bnxt_fw_reset_task()
15074 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && in bnxt_fw_reset_task()
15075 !bp->fw_reset_min_dsecs) { in bnxt_fw_reset_task()
15078 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); in bnxt_fw_reset_task()
15080 if (bnxt_fw_reset_timeout(bp)) { in bnxt_fw_reset_task()
15081 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); in bnxt_fw_reset_task()
15085 bnxt_queue_fw_reset_work(bp, HZ / 1000); in bnxt_fw_reset_task()
15089 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_fw_reset_task()
15090 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); in bnxt_fw_reset_task()
15091 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && in bnxt_fw_reset_task()
15092 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) in bnxt_fw_reset_task()
15093 bnxt_dl_remote_reload(bp); in bnxt_fw_reset_task()
15094 if (pci_enable_device(bp->pdev)) { in bnxt_fw_reset_task()
15095 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); in bnxt_fw_reset_task()
15099 pci_set_master(bp->pdev); in bnxt_fw_reset_task()
15100 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; in bnxt_fw_reset_task()
15103 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; in bnxt_fw_reset_task()
15104 rc = bnxt_hwrm_poll(bp); in bnxt_fw_reset_task()
15106 if (bnxt_fw_reset_timeout(bp)) { in bnxt_fw_reset_task()
15107 netdev_err(bp->dev, "Firmware reset aborted\n"); in bnxt_fw_reset_task()
15110 bnxt_queue_fw_reset_work(bp, HZ / 5); in bnxt_fw_reset_task()
15113 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; in bnxt_fw_reset_task()
15114 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; in bnxt_fw_reset_task()
15117 while (!netdev_trylock(bp->dev)) { in bnxt_fw_reset_task()
15118 bnxt_queue_fw_reset_work(bp, HZ / 10); in bnxt_fw_reset_task()
15121 rc = bnxt_open(bp->dev); in bnxt_fw_reset_task()
15123 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); in bnxt_fw_reset_task()
15124 bnxt_fw_reset_abort(bp, rc); in bnxt_fw_reset_task()
15125 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15129 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && in bnxt_fw_reset_task()
15130 bp->fw_health->enabled) { in bnxt_fw_reset_task()
15131 bp->fw_health->last_fw_reset_cnt = in bnxt_fw_reset_task()
15132 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); in bnxt_fw_reset_task()
15134 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
15137 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_task()
15138 bnxt_ptp_reapply_pps(bp); in bnxt_fw_reset_task()
15139 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); in bnxt_fw_reset_task()
15140 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { in bnxt_fw_reset_task()
15141 bnxt_dl_health_fw_recovery_done(bp); in bnxt_fw_reset_task()
15142 bnxt_dl_health_fw_status_update(bp, true); in bnxt_fw_reset_task()
15144 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15145 bnxt_ulp_start(bp, 0); in bnxt_fw_reset_task()
15146 bnxt_reenable_sriov(bp); in bnxt_fw_reset_task()
15147 netdev_lock(bp->dev); in bnxt_fw_reset_task()
15148 bnxt_vf_reps_alloc(bp); in bnxt_fw_reset_task()
15149 bnxt_vf_reps_open(bp); in bnxt_fw_reset_task()
15150 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15156 if (bp->fw_health->status_reliable || in bnxt_fw_reset_task()
15157 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { in bnxt_fw_reset_task()
15158 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); in bnxt_fw_reset_task()
15160 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); in bnxt_fw_reset_task()
15163 netdev_lock(bp->dev); in bnxt_fw_reset_task()
15164 bnxt_fw_reset_abort(bp, rc); in bnxt_fw_reset_task()
15165 netdev_unlock(bp->dev); in bnxt_fw_reset_task()
15167 bnxt_ulp_start(bp, rc); in bnxt_fw_reset_task()
15173 struct bnxt *bp = netdev_priv(dev); in bnxt_init_board() local
15206 bp->dev = dev; in bnxt_init_board()
15207 bp->pdev = pdev; in bnxt_init_board()
15209 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() in bnxt_init_board()
15212 bp->bar0 = pci_ioremap_bar(pdev, 0); in bnxt_init_board()
15213 if (!bp->bar0) { in bnxt_init_board()
15219 bp->bar2 = pci_ioremap_bar(pdev, 4); in bnxt_init_board()
15220 if (!bp->bar2) { in bnxt_init_board()
15226 INIT_WORK(&bp->sp_task, bnxt_sp_task); in bnxt_init_board()
15227 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); in bnxt_init_board()
15229 spin_lock_init(&bp->ntp_fltr_lock); in bnxt_init_board()
15231 spin_lock_init(&bp->db_lock); in bnxt_init_board()
15234 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; in bnxt_init_board()
15235 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; in bnxt_init_board()
15237 timer_setup(&bp->timer, bnxt_timer, 0); in bnxt_init_board()
15238 bp->current_interval = BNXT_TIMER_INTERVAL; in bnxt_init_board()
15240 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_init_board()
15241 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_init_board()
15243 clear_bit(BNXT_STATE_OPEN, &bp->state); in bnxt_init_board()
15247 bnxt_unmap_bars(bp, pdev); in bnxt_init_board()
15260 struct bnxt *bp = netdev_priv(dev); in bnxt_change_mac_addr() local
15271 rc = bnxt_approve_mac(bp, addr->sa_data, true); in bnxt_change_mac_addr()
15276 bnxt_clear_usr_fltrs(bp, true); in bnxt_change_mac_addr()
15278 bnxt_close_nic(bp, false, false); in bnxt_change_mac_addr()
15279 rc = bnxt_open_nic(bp, false, false); in bnxt_change_mac_addr()
15287 struct bnxt *bp = netdev_priv(dev); in bnxt_change_mtu() local
15292 bnxt_close_nic(bp, true, false); in bnxt_change_mtu()
15300 if (READ_ONCE(bp->xdp_prog)) in bnxt_change_mtu()
15301 bnxt_set_rx_skb_mode(bp, true); in bnxt_change_mtu()
15303 bnxt_set_ring_params(bp); in bnxt_change_mtu()
15306 return bnxt_open_nic(bp, true, false); in bnxt_change_mtu()
15313 struct bnxt *bp = netdev_priv(dev); in bnxt_setup_mq_tc() local
15317 if (tc > bp->max_tc) { in bnxt_setup_mq_tc()
15319 tc, bp->max_tc); in bnxt_setup_mq_tc()
15323 if (bp->num_tc == tc) in bnxt_setup_mq_tc()
15326 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_setup_mq_tc()
15329 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_setup_mq_tc()
15330 sh, tc, bp->tx_nr_rings_xdp); in bnxt_setup_mq_tc()
15335 if (netif_running(bp->dev)) in bnxt_setup_mq_tc()
15336 bnxt_close_nic(bp, true, false); in bnxt_setup_mq_tc()
15339 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; in bnxt_setup_mq_tc()
15341 bp->num_tc = tc; in bnxt_setup_mq_tc()
15343 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_setup_mq_tc()
15345 bp->num_tc = 0; in bnxt_setup_mq_tc()
15347 bp->tx_nr_rings += bp->tx_nr_rings_xdp; in bnxt_setup_mq_tc()
15348 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_setup_mq_tc()
15349 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : in bnxt_setup_mq_tc()
15350 tx_cp + bp->rx_nr_rings; in bnxt_setup_mq_tc()
15352 if (netif_running(bp->dev)) in bnxt_setup_mq_tc()
15353 return bnxt_open_nic(bp, true, false); in bnxt_setup_mq_tc()
15361 struct bnxt *bp = cb_priv; in bnxt_setup_tc_block_cb() local
15363 if (!bnxt_tc_flower_enabled(bp) || in bnxt_setup_tc_block_cb()
15364 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) in bnxt_setup_tc_block_cb()
15369 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); in bnxt_setup_tc_block_cb()
15380 struct bnxt *bp = netdev_priv(dev); in bnxt_setup_tc() local
15387 bp, bp, true); in bnxt_setup_tc()
15400 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, in bnxt_get_ntp_filter_idx() argument
15408 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; in bnxt_get_ntp_filter_idx()
15409 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); in bnxt_get_ntp_filter_idx()
15412 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, in bnxt_insert_ntp_filter() argument
15418 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
15419 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); in bnxt_insert_ntp_filter()
15421 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
15428 head = &bp->ntp_fltr_hash_tbl[idx]; in bnxt_insert_ntp_filter()
15431 bnxt_insert_usr_fltr(bp, &fltr->base); in bnxt_insert_ntp_filter()
15432 bp->ntp_fltr_count++; in bnxt_insert_ntp_filter()
15433 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
15476 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, in bnxt_lookup_ntp_filter_from_idx() argument
15482 head = &bp->ntp_fltr_hash_tbl[idx]; in bnxt_lookup_ntp_filter_from_idx()
15494 struct bnxt *bp = netdev_priv(dev); in bnxt_rx_flow_steer() local
15503 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_rx_flow_steer()
15510 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); in bnxt_rx_flow_steer()
15514 bnxt_del_l2_filter(bp, l2_fltr); in bnxt_rx_flow_steer()
15520 bnxt_del_l2_filter(bp, l2_fltr); in bnxt_rx_flow_steer()
15539 if (bp->hwrm_spec_code < 0x10601) { in bnxt_rx_flow_steer()
15547 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { in bnxt_rx_flow_steer()
15553 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); in bnxt_rx_flow_steer()
15555 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); in bnxt_rx_flow_steer()
15565 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); in bnxt_rx_flow_steer()
15567 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); in bnxt_rx_flow_steer()
15572 bnxt_del_l2_filter(bp, l2_fltr); in bnxt_rx_flow_steer()
15578 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) in bnxt_del_ntp_filter() argument
15580 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
15582 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
15586 bnxt_del_one_usr_fltr(bp, &fltr->base); in bnxt_del_ntp_filter()
15587 bp->ntp_fltr_count--; in bnxt_del_ntp_filter()
15588 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
15589 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_del_ntp_filter()
15590 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_ntp_filter()
15594 static void bnxt_cfg_ntp_filters(struct bnxt *bp) in bnxt_cfg_ntp_filters() argument
15605 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_cfg_ntp_filters()
15612 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, in bnxt_cfg_ntp_filters()
15615 bnxt_hwrm_cfa_ntuple_filter_free(bp, in bnxt_cfg_ntp_filters()
15620 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, in bnxt_cfg_ntp_filters()
15629 bnxt_del_ntp_filter(bp, fltr); in bnxt_cfg_ntp_filters()
15638 struct bnxt *bp = netdev_priv(netdev); in bnxt_udp_tunnel_set_port() local
15648 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); in bnxt_udp_tunnel_set_port()
15654 struct bnxt *bp = netdev_priv(netdev); in bnxt_udp_tunnel_unset_port() local
15664 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); in bnxt_udp_tunnel_unset_port()
15690 struct bnxt *bp = netdev_priv(dev); in bnxt_bridge_getlink() local
15692 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, in bnxt_bridge_getlink()
15699 struct bnxt *bp = netdev_priv(dev); in bnxt_bridge_setlink() local
15703 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) in bnxt_bridge_setlink()
15714 if (mode == bp->br_mode) in bnxt_bridge_setlink()
15717 rc = bnxt_hwrm_set_br_mode(bp, mode); in bnxt_bridge_setlink()
15719 bp->br_mode = mode; in bnxt_bridge_setlink()
15728 struct bnxt *bp = netdev_priv(dev); in bnxt_get_port_parent_id() local
15730 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_get_port_parent_id()
15734 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) in bnxt_get_port_parent_id()
15737 ppid->id_len = sizeof(bp->dsn); in bnxt_get_port_parent_id()
15738 memcpy(ppid->id, bp->dsn, ppid->id_len); in bnxt_get_port_parent_id()
15779 struct bnxt *bp = netdev_priv(dev); in bnxt_get_queue_stats_rx() local
15783 if (!bp->bnapi) in bnxt_get_queue_stats_rx()
15786 cpr = &bp->bnapi[i]->cp_ring; in bnxt_get_queue_stats_rx()
15805 struct bnxt *bp = netdev_priv(dev); in bnxt_get_queue_stats_tx() local
15809 if (!bp->tx_ring) in bnxt_get_queue_stats_tx()
15812 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; in bnxt_get_queue_stats_tx()
15830 struct bnxt *bp = netdev_priv(dev); in bnxt_get_base_stats() local
15832 rx->packets = bp->net_stats_prev.rx_packets; in bnxt_get_base_stats()
15833 rx->bytes = bp->net_stats_prev.rx_bytes; in bnxt_get_base_stats()
15834 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; in bnxt_get_base_stats()
15836 tx->packets = bp->net_stats_prev.tx_packets; in bnxt_get_base_stats()
15837 tx->bytes = bp->net_stats_prev.tx_bytes; in bnxt_get_base_stats()
15849 struct bnxt *bp = netdev_priv(dev); in bnxt_queue_mem_alloc() local
15853 if (!bp->rx_ring) in bnxt_queue_mem_alloc()
15856 rxr = &bp->rx_ring[idx]; in bnxt_queue_mem_alloc()
15859 bnxt_init_rx_ring_struct(bp, clone); in bnxt_queue_mem_alloc()
15860 bnxt_reset_rx_ring_struct(bp, clone); in bnxt_queue_mem_alloc()
15868 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); in bnxt_queue_mem_alloc()
15872 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); in bnxt_queue_mem_alloc()
15883 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_queue_mem_alloc()
15887 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in bnxt_queue_mem_alloc()
15889 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_queue_mem_alloc()
15893 rc = bnxt_alloc_rx_agg_bmap(bp, clone); in bnxt_queue_mem_alloc()
15898 if (bp->flags & BNXT_FLAG_TPA) { in bnxt_queue_mem_alloc()
15899 rc = bnxt_alloc_one_tpa_info(bp, clone); in bnxt_queue_mem_alloc()
15904 bnxt_init_one_rx_ring_rxbd(bp, clone); in bnxt_queue_mem_alloc()
15905 bnxt_init_one_rx_agg_ring_rxbd(bp, clone); in bnxt_queue_mem_alloc()
15907 bnxt_alloc_one_rx_ring_skb(bp, clone, idx); in bnxt_queue_mem_alloc()
15908 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_queue_mem_alloc()
15909 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx); in bnxt_queue_mem_alloc()
15910 if (bp->flags & BNXT_FLAG_TPA) in bnxt_queue_mem_alloc()
15911 bnxt_alloc_one_tpa_info_data(bp, clone); in bnxt_queue_mem_alloc()
15916 bnxt_free_one_tpa_info(bp, clone); in bnxt_queue_mem_alloc()
15918 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); in bnxt_queue_mem_alloc()
15920 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); in bnxt_queue_mem_alloc()
15935 struct bnxt *bp = netdev_priv(dev); in bnxt_queue_mem_free() local
15938 bnxt_free_one_rx_ring_skbs(bp, rxr); in bnxt_queue_mem_free()
15939 bnxt_free_one_tpa_info(bp, rxr); in bnxt_queue_mem_free()
15950 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_queue_mem_free()
15953 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_queue_mem_free()
15959 static void bnxt_copy_rx_ring(struct bnxt *bp, in bnxt_copy_rx_ring() argument
15987 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) in bnxt_copy_rx_ring()
16016 struct bnxt *bp = netdev_priv(dev); in bnxt_queue_start() local
16024 rxr = &bp->rx_ring[idx]; in bnxt_queue_start()
16038 bnxt_copy_rx_ring(bp, rxr, clone); in bnxt_queue_start()
16046 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); in bnxt_queue_start()
16050 if (bp->tph_mode) { in bnxt_queue_start()
16051 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); in bnxt_queue_start()
16056 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); in bnxt_queue_start()
16060 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_queue_start()
16061 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_queue_start()
16062 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_queue_start()
16064 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { in bnxt_queue_start()
16065 rc = bnxt_tx_queue_start(bp, idx); in bnxt_queue_start()
16072 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_queue_start()
16074 mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; in bnxt_queue_start()
16075 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_queue_start()
16076 vnic = &bp->vnic_info[i]; in bnxt_queue_start()
16078 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx); in bnxt_queue_start()
16082 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx); in bnxt_queue_start()
16085 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", in bnxt_queue_start()
16088 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_queue_start()
16089 bnxt_reset_task(bp, true); in bnxt_queue_start()
16095 struct bnxt *bp = netdev_priv(dev); in bnxt_queue_stop() local
16102 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_queue_stop()
16103 vnic = &bp->vnic_info[i]; in bnxt_queue_stop()
16105 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx); in bnxt_queue_stop()
16107 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx); in bnxt_queue_stop()
16110 rxr = &bp->rx_ring[idx]; in bnxt_queue_stop()
16114 bnxt_hwrm_rx_ring_free(bp, rxr, false); in bnxt_queue_stop()
16115 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); in bnxt_queue_stop()
16120 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_queue_stop()
16121 bnxt_tx_queue_stop(bp, idx); in bnxt_queue_stop()
16129 if (bp->tph_mode) { in bnxt_queue_stop()
16130 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); in bnxt_queue_stop()
16131 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); in bnxt_queue_stop()
16133 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_queue_stop()
16136 bnxt_init_rx_ring_struct(bp, qmem); in bnxt_queue_stop()
16152 struct bnxt *bp = netdev_priv(dev); in bnxt_remove_one() local
16154 if (BNXT_PF(bp)) in bnxt_remove_one()
16155 bnxt_sriov_disable(bp); in bnxt_remove_one()
16157 bnxt_rdma_aux_device_del(bp); in bnxt_remove_one()
16160 bnxt_ptp_clear(bp); in bnxt_remove_one()
16162 bnxt_rdma_aux_device_uninit(bp); in bnxt_remove_one()
16164 bnxt_free_l2_filters(bp, true); in bnxt_remove_one()
16165 bnxt_free_ntp_fltrs(bp, true); in bnxt_remove_one()
16166 WARN_ON(bp->num_rss_ctx); in bnxt_remove_one()
16167 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_remove_one()
16169 cancel_work_sync(&bp->sp_task); in bnxt_remove_one()
16170 cancel_delayed_work_sync(&bp->fw_reset_task); in bnxt_remove_one()
16171 bp->sp_event = 0; in bnxt_remove_one()
16173 bnxt_dl_fw_reporters_destroy(bp); in bnxt_remove_one()
16174 bnxt_dl_unregister(bp); in bnxt_remove_one()
16175 bnxt_shutdown_tc(bp); in bnxt_remove_one()
16177 bnxt_clear_int_mode(bp); in bnxt_remove_one()
16178 bnxt_hwrm_func_drv_unrgtr(bp); in bnxt_remove_one()
16179 bnxt_free_hwrm_resources(bp); in bnxt_remove_one()
16180 bnxt_hwmon_uninit(bp); in bnxt_remove_one()
16181 bnxt_ethtool_free(bp); in bnxt_remove_one()
16182 bnxt_dcb_free(bp); in bnxt_remove_one()
16183 kfree(bp->ptp_cfg); in bnxt_remove_one()
16184 bp->ptp_cfg = NULL; in bnxt_remove_one()
16185 kfree(bp->fw_health); in bnxt_remove_one()
16186 bp->fw_health = NULL; in bnxt_remove_one()
16187 bnxt_cleanup_pci(bp); in bnxt_remove_one()
16188 bnxt_free_ctx_mem(bp, true); in bnxt_remove_one()
16189 bnxt_free_crash_dump_mem(bp); in bnxt_remove_one()
16190 kfree(bp->rss_indir_tbl); in bnxt_remove_one()
16191 bp->rss_indir_tbl = NULL; in bnxt_remove_one()
16192 bnxt_free_port_stats(bp); in bnxt_remove_one()
16196 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) in bnxt_probe_phy() argument
16199 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_probe_phy()
16201 bp->phy_flags = 0; in bnxt_probe_phy()
16202 rc = bnxt_hwrm_phy_qcaps(bp); in bnxt_probe_phy()
16204 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", in bnxt_probe_phy()
16208 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) in bnxt_probe_phy()
16209 bp->dev->priv_flags |= IFF_SUPP_NOFCS; in bnxt_probe_phy()
16211 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; in bnxt_probe_phy()
16213 bp->mac_flags = 0; in bnxt_probe_phy()
16214 bnxt_hwrm_mac_qcaps(bp); in bnxt_probe_phy()
16219 mutex_lock(&bp->link_lock); in bnxt_probe_phy()
16220 rc = bnxt_update_link(bp, false); in bnxt_probe_phy()
16222 mutex_unlock(&bp->link_lock); in bnxt_probe_phy()
16223 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", in bnxt_probe_phy()
16234 bnxt_init_ethtool_link_settings(bp); in bnxt_probe_phy()
16235 mutex_unlock(&bp->link_lock); in bnxt_probe_phy()
16250 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, in _bnxt_get_max_rings() argument
16253 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in _bnxt_get_max_rings()
16258 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); in _bnxt_get_max_rings()
16259 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - in _bnxt_get_max_rings()
16260 bnxt_get_ulp_msix_num_in_use(bp), in _bnxt_get_max_rings()
16262 bnxt_get_ulp_stat_ctxs_in_use(bp)); in _bnxt_get_max_rings()
16263 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in _bnxt_get_max_rings()
16266 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { in _bnxt_get_max_rings()
16270 if (bp->flags & BNXT_FLAG_AGG_RINGS) in _bnxt_get_max_rings()
16272 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in _bnxt_get_max_rings()
16275 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); in _bnxt_get_max_rings()
16286 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) in bnxt_get_max_rings() argument
16290 _bnxt_get_max_rings(bp, &rx, &tx, &cp); in bnxt_get_max_rings()
16296 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); in bnxt_get_max_rings()
16299 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, in bnxt_get_dflt_rings() argument
16304 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); in bnxt_get_dflt_rings()
16305 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { in bnxt_get_dflt_rings()
16307 bp->flags &= ~BNXT_FLAG_AGG_RINGS; in bnxt_get_dflt_rings()
16308 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); in bnxt_get_dflt_rings()
16311 bp->flags |= BNXT_FLAG_AGG_RINGS; in bnxt_get_dflt_rings()
16314 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in bnxt_get_dflt_rings()
16315 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnxt_get_dflt_rings()
16316 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnxt_get_dflt_rings()
16317 bnxt_set_ring_params(bp); in bnxt_get_dflt_rings()
16320 if (bp->flags & BNXT_FLAG_ROCE_CAP) { in bnxt_get_dflt_rings()
16324 max_cp = bnxt_get_max_func_cp_rings(bp); in bnxt_get_dflt_rings()
16325 max_stat = bnxt_get_max_func_stat_ctxs(bp); in bnxt_get_dflt_rings()
16326 max_irq = bnxt_get_max_func_irqs(bp); in bnxt_get_dflt_rings()
16337 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); in bnxt_get_dflt_rings()
16347 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) in bnxt_trim_dflt_sh_rings() argument
16349 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); in bnxt_trim_dflt_sh_rings()
16350 bp->rx_nr_rings = bp->cp_nr_rings; in bnxt_trim_dflt_sh_rings()
16351 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; in bnxt_trim_dflt_sh_rings()
16352 bp->tx_nr_rings = bnxt_tx_nr_rings(bp); in bnxt_trim_dflt_sh_rings()
16355 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) in bnxt_set_dflt_rings() argument
16360 if (!bnxt_can_reserve_rings(bp)) in bnxt_set_dflt_rings()
16364 bp->flags |= BNXT_FLAG_SHARED_RINGS; in bnxt_set_dflt_rings()
16369 if (bp->port_count > 1) { in bnxt_set_dflt_rings()
16371 max_t(int, num_online_cpus() / bp->port_count, 1); in bnxt_set_dflt_rings()
16375 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); in bnxt_set_dflt_rings()
16378 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); in bnxt_set_dflt_rings()
16379 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); in bnxt_set_dflt_rings()
16381 bnxt_trim_dflt_sh_rings(bp); in bnxt_set_dflt_rings()
16383 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; in bnxt_set_dflt_rings()
16384 bp->tx_nr_rings = bnxt_tx_nr_rings(bp); in bnxt_set_dflt_rings()
16386 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; in bnxt_set_dflt_rings()
16388 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); in bnxt_set_dflt_rings()
16390 bnxt_set_ulp_msix_num(bp, ulp_num_msix); in bnxt_set_dflt_rings()
16391 bnxt_set_dflt_ulp_stat_ctxs(bp); in bnxt_set_dflt_rings()
16394 rc = __bnxt_reserve_rings(bp); in bnxt_set_dflt_rings()
16396 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); in bnxt_set_dflt_rings()
16397 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); in bnxt_set_dflt_rings()
16399 bnxt_trim_dflt_sh_rings(bp); in bnxt_set_dflt_rings()
16402 if (bnxt_need_reserve_rings(bp)) { in bnxt_set_dflt_rings()
16403 rc = __bnxt_reserve_rings(bp); in bnxt_set_dflt_rings()
16405 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); in bnxt_set_dflt_rings()
16406 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); in bnxt_set_dflt_rings()
16408 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { in bnxt_set_dflt_rings()
16409 bp->rx_nr_rings++; in bnxt_set_dflt_rings()
16410 bp->cp_nr_rings++; in bnxt_set_dflt_rings()
16413 bp->tx_nr_rings = 0; in bnxt_set_dflt_rings()
16414 bp->rx_nr_rings = 0; in bnxt_set_dflt_rings()
16419 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) in bnxt_init_dflt_ring_mode() argument
16423 if (bp->tx_nr_rings) in bnxt_init_dflt_ring_mode()
16426 bnxt_ulp_irq_stop(bp); in bnxt_init_dflt_ring_mode()
16427 bnxt_clear_int_mode(bp); in bnxt_init_dflt_ring_mode()
16428 rc = bnxt_set_dflt_rings(bp, true); in bnxt_init_dflt_ring_mode()
16430 if (BNXT_VF(bp) && rc == -ENODEV) in bnxt_init_dflt_ring_mode()
16431 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); in bnxt_init_dflt_ring_mode()
16433 netdev_err(bp->dev, "Not enough rings available.\n"); in bnxt_init_dflt_ring_mode()
16436 rc = bnxt_init_int_mode(bp); in bnxt_init_dflt_ring_mode()
16440 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); in bnxt_init_dflt_ring_mode()
16442 bnxt_set_dflt_rfs(bp); in bnxt_init_dflt_ring_mode()
16445 bnxt_ulp_irq_restart(bp, rc); in bnxt_init_dflt_ring_mode()
16449 int bnxt_restore_pf_fw_resources(struct bnxt *bp) in bnxt_restore_pf_fw_resources() argument
16453 netdev_ops_assert_locked(bp->dev); in bnxt_restore_pf_fw_resources()
16454 bnxt_hwrm_func_qcaps(bp); in bnxt_restore_pf_fw_resources()
16456 if (netif_running(bp->dev)) in bnxt_restore_pf_fw_resources()
16457 __bnxt_close_nic(bp, true, false); in bnxt_restore_pf_fw_resources()
16459 bnxt_ulp_irq_stop(bp); in bnxt_restore_pf_fw_resources()
16460 bnxt_clear_int_mode(bp); in bnxt_restore_pf_fw_resources()
16461 rc = bnxt_init_int_mode(bp); in bnxt_restore_pf_fw_resources()
16462 bnxt_ulp_irq_restart(bp, rc); in bnxt_restore_pf_fw_resources()
16464 if (netif_running(bp->dev)) { in bnxt_restore_pf_fw_resources()
16466 netif_close(bp->dev); in bnxt_restore_pf_fw_resources()
16468 rc = bnxt_open_nic(bp, true, false); in bnxt_restore_pf_fw_resources()
16474 static int bnxt_init_mac_addr(struct bnxt *bp) in bnxt_init_mac_addr() argument
16478 if (BNXT_PF(bp)) { in bnxt_init_mac_addr()
16479 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); in bnxt_init_mac_addr()
16482 struct bnxt_vf_info *vf = &bp->vf; in bnxt_init_mac_addr()
16487 eth_hw_addr_set(bp->dev, vf->mac_addr); in bnxt_init_mac_addr()
16493 eth_hw_addr_random(bp->dev); in bnxt_init_mac_addr()
16495 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); in bnxt_init_mac_addr()
16501 static void bnxt_vpd_read_info(struct bnxt *bp) in bnxt_vpd_read_info() argument
16503 struct pci_dev *pdev = bp->pdev; in bnxt_vpd_read_info()
16520 memcpy(bp->board_partno, &vpd_data[pos], size); in bnxt_vpd_read_info()
16530 memcpy(bp->board_serialno, &vpd_data[pos], size); in bnxt_vpd_read_info()
16535 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) in bnxt_pcie_dsn_get() argument
16537 struct pci_dev *pdev = bp->pdev; in bnxt_pcie_dsn_get()
16542 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); in bnxt_pcie_dsn_get()
16548 bp->flags |= BNXT_FLAG_DSN_VALID; in bnxt_pcie_dsn_get()
16552 static int bnxt_map_db_bar(struct bnxt *bp) in bnxt_map_db_bar() argument
16554 if (!bp->db_size) in bnxt_map_db_bar()
16556 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); in bnxt_map_db_bar()
16557 if (!bp->bar1) in bnxt_map_db_bar()
16562 void bnxt_print_device_info(struct bnxt *bp) in bnxt_print_device_info() argument
16564 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", in bnxt_print_device_info()
16565 board_info[bp->board_idx].name, in bnxt_print_device_info()
16566 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); in bnxt_print_device_info()
16568 pcie_print_link_status(bp->pdev); in bnxt_print_device_info()
16575 struct bnxt *bp; in bnxt_init_one() local
16595 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, in bnxt_init_one()
16600 bp = netdev_priv(dev); in bnxt_init_one()
16601 bp->board_idx = ent->driver_data; in bnxt_init_one()
16602 bp->msg_enable = BNXT_DEF_MSG_ENABLE; in bnxt_init_one()
16603 bnxt_set_max_func_irqs(bp, max_irqs); in bnxt_init_one()
16605 if (bnxt_vf_pciid(bp->board_idx)) in bnxt_init_one()
16606 bp->flags |= BNXT_FLAG_VF; in bnxt_init_one()
16609 if (BNXT_PF(bp)) in bnxt_init_one()
16610 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); in bnxt_init_one()
16622 rc = bnxt_alloc_hwrm_resources(bp); in bnxt_init_one()
16626 mutex_init(&bp->hwrm_cmd_lock); in bnxt_init_one()
16627 mutex_init(&bp->link_lock); in bnxt_init_one()
16629 rc = bnxt_fw_init_one_p1(bp); in bnxt_init_one()
16633 if (BNXT_PF(bp)) in bnxt_init_one()
16634 bnxt_vpd_read_info(bp); in bnxt_init_one()
16636 if (BNXT_CHIP_P5_PLUS(bp)) { in bnxt_init_one()
16637 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; in bnxt_init_one()
16638 if (BNXT_CHIP_P7(bp)) in bnxt_init_one()
16639 bp->flags |= BNXT_FLAG_CHIP_P7; in bnxt_init_one()
16642 rc = bnxt_alloc_rss_indir_tbl(bp); in bnxt_init_one()
16646 rc = bnxt_fw_init_one_p2(bp); in bnxt_init_one()
16650 rc = bnxt_map_db_bar(bp); in bnxt_init_one()
16664 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) in bnxt_init_one()
16667 if (BNXT_SUPPORTS_TPA(bp)) in bnxt_init_one()
16676 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) in bnxt_init_one()
16678 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_init_one()
16686 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) in bnxt_init_one()
16688 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) in bnxt_init_one()
16690 if (BNXT_SUPPORTS_TPA(bp)) in bnxt_init_one()
16698 if (bp->tso_max_segs) in bnxt_init_one()
16699 netif_set_tso_max_segs(dev, bp->tso_max_segs); in bnxt_init_one()
16705 init_waitqueue_head(&bp->sriov_cfg_wait); in bnxt_init_one()
16707 if (BNXT_SUPPORTS_TPA(bp)) { in bnxt_init_one()
16708 bp->gro_func = bnxt_gro_func_5730x; in bnxt_init_one()
16709 if (BNXT_CHIP_P4(bp)) in bnxt_init_one()
16710 bp->gro_func = bnxt_gro_func_5731x; in bnxt_init_one()
16711 else if (BNXT_CHIP_P5_PLUS(bp)) in bnxt_init_one()
16712 bp->gro_func = bnxt_gro_func_5750x; in bnxt_init_one()
16714 if (!BNXT_CHIP_P4_PLUS(bp)) in bnxt_init_one()
16715 bp->flags |= BNXT_FLAG_DOUBLE_DB; in bnxt_init_one()
16717 rc = bnxt_init_mac_addr(bp); in bnxt_init_one()
16724 if (BNXT_PF(bp)) { in bnxt_init_one()
16726 rc = bnxt_pcie_dsn_get(bp, bp->dsn); in bnxt_init_one()
16731 dev->max_mtu = bp->max_mtu; in bnxt_init_one()
16733 rc = bnxt_probe_phy(bp, true); in bnxt_init_one()
16737 hw_resc = &bp->hw_resc; in bnxt_init_one()
16738 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + in bnxt_init_one()
16741 if (bp->max_fltr < BNXT_MAX_FLTR) in bnxt_init_one()
16742 bp->max_fltr = BNXT_MAX_FLTR; in bnxt_init_one()
16743 bnxt_init_l2_fltr_tbl(bp); in bnxt_init_one()
16744 __bnxt_set_rx_skb_mode(bp, false); in bnxt_init_one()
16745 bnxt_set_tpa_flags(bp); in bnxt_init_one()
16746 bnxt_init_ring_params(bp); in bnxt_init_one()
16747 bnxt_set_ring_params(bp); in bnxt_init_one()
16748 bnxt_rdma_aux_device_init(bp); in bnxt_init_one()
16749 rc = bnxt_set_dflt_rings(bp, true); in bnxt_init_one()
16751 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_init_one()
16752 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); in bnxt_init_one()
16754 netdev_err(bp->dev, "Not enough rings available.\n"); in bnxt_init_one()
16760 bnxt_fw_init_one_p3(bp); in bnxt_init_one()
16762 bnxt_init_dflt_coal(bp); in bnxt_init_one()
16765 bp->flags |= BNXT_FLAG_STRIP_VLAN; in bnxt_init_one()
16767 rc = bnxt_init_int_mode(bp); in bnxt_init_one()
16774 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_init_one()
16776 if (BNXT_PF(bp)) { in bnxt_init_one()
16786 rc = bnxt_init_tc(bp); in bnxt_init_one()
16792 bnxt_inv_fw_health_reg(bp); in bnxt_init_one()
16793 rc = bnxt_dl_register(bp); in bnxt_init_one()
16797 INIT_LIST_HEAD(&bp->usr_fltr_list); in bnxt_init_one()
16799 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) in bnxt_init_one()
16800 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; in bnxt_init_one()
16801 if (BNXT_SUPPORTS_QUEUE_API(bp)) in bnxt_init_one()
16810 bnxt_dl_fw_reporters_create(bp); in bnxt_init_one()
16812 bnxt_rdma_aux_device_add(bp); in bnxt_init_one()
16814 bnxt_print_device_info(bp); in bnxt_init_one()
16820 bnxt_rdma_aux_device_uninit(bp); in bnxt_init_one()
16821 bnxt_dl_unregister(bp); in bnxt_init_one()
16823 bnxt_shutdown_tc(bp); in bnxt_init_one()
16824 bnxt_clear_int_mode(bp); in bnxt_init_one()
16827 bnxt_hwrm_func_drv_unrgtr(bp); in bnxt_init_one()
16828 bnxt_free_hwrm_resources(bp); in bnxt_init_one()
16829 bnxt_hwmon_uninit(bp); in bnxt_init_one()
16830 bnxt_ethtool_free(bp); in bnxt_init_one()
16831 bnxt_ptp_clear(bp); in bnxt_init_one()
16832 kfree(bp->ptp_cfg); in bnxt_init_one()
16833 bp->ptp_cfg = NULL; in bnxt_init_one()
16834 kfree(bp->fw_health); in bnxt_init_one()
16835 bp->fw_health = NULL; in bnxt_init_one()
16836 bnxt_cleanup_pci(bp); in bnxt_init_one()
16837 bnxt_free_ctx_mem(bp, true); in bnxt_init_one()
16838 bnxt_free_crash_dump_mem(bp); in bnxt_init_one()
16839 kfree(bp->rss_indir_tbl); in bnxt_init_one()
16840 bp->rss_indir_tbl = NULL; in bnxt_init_one()
16850 struct bnxt *bp; in bnxt_shutdown() local
16857 bp = netdev_priv(dev); in bnxt_shutdown()
16858 if (!bp) in bnxt_shutdown()
16864 bnxt_ptp_clear(bp); in bnxt_shutdown()
16865 bnxt_clear_int_mode(bp); in bnxt_shutdown()
16869 pci_wake_from_d3(pdev, bp->wol); in bnxt_shutdown()
16882 struct bnxt *bp = netdev_priv(dev); in bnxt_suspend() local
16885 bnxt_ulp_stop(bp); in bnxt_suspend()
16892 bnxt_hwrm_func_drv_unrgtr(bp); in bnxt_suspend()
16893 bnxt_ptp_clear(bp); in bnxt_suspend()
16894 pci_disable_device(bp->pdev); in bnxt_suspend()
16895 bnxt_free_ctx_mem(bp, false); in bnxt_suspend()
16903 struct bnxt *bp = netdev_priv(dev); in bnxt_resume() local
16907 rc = pci_enable_device(bp->pdev); in bnxt_resume()
16913 pci_set_master(bp->pdev); in bnxt_resume()
16914 if (bnxt_hwrm_ver_get(bp)) { in bnxt_resume()
16918 rc = bnxt_hwrm_func_reset(bp); in bnxt_resume()
16924 rc = bnxt_hwrm_func_qcaps(bp); in bnxt_resume()
16928 bnxt_clear_reservations(bp, true); in bnxt_resume()
16930 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { in bnxt_resume()
16934 if (bp->fw_crash_mem) in bnxt_resume()
16935 bnxt_hwrm_crash_dump_mem_cfg(bp); in bnxt_resume()
16937 if (bnxt_ptp_init(bp)) { in bnxt_resume()
16938 kfree(bp->ptp_cfg); in bnxt_resume()
16939 bp->ptp_cfg = NULL; in bnxt_resume()
16941 bnxt_get_wol_settings(bp); in bnxt_resume()
16949 netdev_unlock(bp->dev); in bnxt_resume()
16950 bnxt_ulp_start(bp, rc); in bnxt_resume()
16952 bnxt_reenable_sriov(bp); in bnxt_resume()
16977 struct bnxt *bp = netdev_priv(netdev); in bnxt_io_error_detected() local
16982 bnxt_ulp_stop(bp); in bnxt_io_error_detected()
16987 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_io_error_detected()
16988 netdev_err(bp->dev, "Firmware reset already in progress\n"); in bnxt_io_error_detected()
17002 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); in bnxt_io_error_detected()
17003 bnxt_fw_fatal_close(bp); in bnxt_io_error_detected()
17007 __bnxt_close_nic(bp, true, true); in bnxt_io_error_detected()
17011 bnxt_free_ctx_mem(bp, false); in bnxt_io_error_detected()
17031 struct bnxt *bp = netdev_priv(netdev); in bnxt_io_slot_reset() local
17036 netdev_info(bp->dev, "PCI Slot Reset\n"); in bnxt_io_slot_reset()
17038 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_io_slot_reset()
17039 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) in bnxt_io_slot_reset()
17058 &bp->state)) { in bnxt_io_slot_reset()
17061 pci_write_config_dword(bp->pdev, off, 0); in bnxt_io_slot_reset()
17066 bnxt_inv_fw_health_reg(bp); in bnxt_io_slot_reset()
17067 bnxt_try_map_fw_health_reg(bp); in bnxt_io_slot_reset()
17073 err = bnxt_try_recover_fw(bp); in bnxt_io_slot_reset()
17084 err = bnxt_hwrm_func_reset(bp); in bnxt_io_slot_reset()
17089 bnxt_ulp_irq_stop(bp); in bnxt_io_slot_reset()
17090 bnxt_clear_int_mode(bp); in bnxt_io_slot_reset()
17094 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_io_slot_reset()
17095 bnxt_clear_reservations(bp, true); in bnxt_io_slot_reset()
17111 struct bnxt *bp = netdev_priv(netdev); in bnxt_io_resume() local
17114 netdev_info(bp->dev, "PCI Slot Resume\n"); in bnxt_io_resume()
17117 err = bnxt_hwrm_func_qcaps(bp); in bnxt_io_resume()
17122 err = bnxt_reserve_rings(bp, true); in bnxt_io_resume()
17124 err = bnxt_init_int_mode(bp); in bnxt_io_resume()
17132 bnxt_ulp_start(bp, err); in bnxt_io_resume()
17134 bnxt_reenable_sriov(bp); in bnxt_io_resume()