Lines Matching +full:0 +full:x1806
145 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
146 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
147 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
148 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
149 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
150 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
151 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
152 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
154 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
155 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
156 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
157 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
158 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
159 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
161 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
162 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
163 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
164 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
165 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
167 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
168 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
169 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
171 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
179 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
180 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
181 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
182 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
183 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
184 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
185 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
186 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
187 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
194 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
196 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
197 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
198 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
199 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
201 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
202 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
212 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
213 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
214 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
217 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
219 { 0 }
252 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
253 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
254 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
258 .src = 0,
259 .dst = 0,
271 .src = cpu_to_be16(0xffff),
272 .dst = cpu_to_be16(0xffff),
284 .src = cpu_to_be16(0xffff),
285 .dst = cpu_to_be16(0xffff),
289 .src = cpu_to_be32(0xffffffff),
290 .dst = cpu_to_be32(0xffffffff),
386 rxr->rx_next_cons = 0xffff; in bnxt_sched_reset_rxr()
432 return 0; in bnxt_xmit_get_cfa_action()
443 txr->kick_pending = 0; in bnxt_txr_db_kick()
454 unsigned int length, pad = 0; in bnxt_start_xmit()
461 __le32 lflags = 0; in bnxt_start_xmit()
498 vlan_tag_flags = 0; in bnxt_start_xmit()
559 tx_push1->tx_bd_hsize_lflags = 0; in bnxt_start_xmit()
567 *end = 0; in bnxt_start_xmit()
571 for (j = 0; j < last_frag; j++) { in bnxt_start_xmit()
664 txbd1->tx_bd_mss = 0; in bnxt_start_xmit()
671 i = 0; in bnxt_start_xmit()
681 for (i = 0; i < last_frag; i++) { in bnxt_start_xmit()
688 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, in bnxt_start_xmit()
749 for (i = 0; i < last_frag; i++) { in bnxt_start_xmit()
761 txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; in bnxt_start_xmit()
781 unsigned int tx_bytes = 0; in __bnxt_tx_int()
783 int tx_pkts = 0; in __bnxt_tx_int()
810 tx_buf->is_ts_pkt = 0; in __bnxt_tx_int()
813 tx_buf->is_push = 0; in __bnxt_tx_int()
821 for (j = 0; j < last; j++) { in __bnxt_tx_int()
879 *offset = 0; in __bnxt_alloc_rx_page()
942 return 0; in bnxt_alloc_rx_data()
986 unsigned int offset = 0; in bnxt_alloc_rx_page()
1005 return 0; in bnxt_alloc_rx_page()
1043 for (i = 0; i < agg_bufs; i++) { in bnxt_reuse_rx_agg_bufs()
1092 unsigned int len = offset_and_len & 0xffff; in bnxt_rx_multi_page_skb()
1125 unsigned int len = offset_and_len & 0xffff; in bnxt_rx_page_skb()
1152 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); in bnxt_rx_page_skb()
1156 frag = &skb_shinfo(skb)->frags[0]; in bnxt_rx_page_skb()
1190 skb_put(skb, offset_and_len & 0xffff); in bnxt_rx_skb()
1204 u32 i, total_frag_len = 0; in __bnxt_rx_agg_pages()
1210 for (i = 0; i < agg_bufs; i++) { in __bnxt_rx_agg_pages()
1243 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { in __bnxt_rx_agg_pages()
1252 return 0; in __bnxt_rx_agg_pages()
1271 u32 total_frag_len = 0; in bnxt_rx_agg_pages_skb()
1293 u32 total_frag_len = 0; in bnxt_rx_agg_pages_xdp()
1296 shinfo->nr_frags = 0; in bnxt_rx_agg_pages_xdp()
1359 unsigned int metasize = 0; in bnxt_copy_xdp()
1384 u8 cmp_type, agg_bufs = 0; in bnxt_discard_rx()
1396 return 0; in bnxt_discard_rx()
1406 return 0; in bnxt_discard_rx()
1442 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata()
1454 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata_v2()
1520 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ in bnxt_tpa_start()
1528 tpa_info->gso_type = 0; in bnxt_tpa_start()
1537 tpa_info->agg_count = 0; in bnxt_tpa_start()
1552 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); in bnxt_abort_tpa()
1626 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5731x()
1633 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5731x()
1684 int len, nw_off, tcp_opt_len = 0; in bnxt_gro_func_5730x()
1699 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5730x()
1710 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5730x()
1777 u16 idx = 0, agg_id; in bnxt_tpa_end()
1784 if (rc < 0) in bnxt_tpa_end()
1799 tpa_info->agg_count = 0; in bnxt_tpa_end()
2013 * 0 - successful TPA_START, packet not completed yet
2038 int rc = 0; in bnxt_rx_pkt()
2094 /* 0xffff is forced error, don't print it */ in bnxt_rx_pkt()
2095 if (rxr->rx_next_cons != 0xffff) in bnxt_rx_pkt()
2126 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, in bnxt_rx_pkt()
2174 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, in bnxt_rx_pkt()
2187 payload = 0; in bnxt_rx_pkt()
2217 /* RSS profiles 1 and 3 with extract code 0 for inner in bnxt_rx_pkt()
2262 memset(skb_hwtstamps(skb), 0, in bnxt_rx_pkt()
2342 u32 reg_type, reg_off, val = 0; in bnxt_fw_health_readl()
2369 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_agg_ring_id_to_grp_idx()
2583 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", in bnxt_async_event_process()
2596 (data1 & 0x20000)) { in bnxt_async_event_process()
2660 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", in bnxt_async_event_process()
2694 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", in bnxt_async_event_process()
2708 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", in bnxt_async_event_process()
2718 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", in bnxt_async_event_process()
2725 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", in bnxt_async_event_process()
2737 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", in bnxt_async_event_process()
2779 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; in bnxt_async_event_process()
2789 return 0; in bnxt_async_event_process()
2828 return 0; in bnxt_hwrm_handler()
2861 int rx_pkts = 0; in __bnxt_poll_work()
2862 u8 event = 0; in __bnxt_poll_work()
2865 cpr->has_more_work = 0; in __bnxt_poll_work()
2913 if (likely(rc >= 0)) in __bnxt_poll_work()
2943 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; in __bnxt_poll_work()
3007 u32 rx_pkts = 0; in bnxt_poll_nitroa0()
3008 u8 event = 0; in bnxt_poll_nitroa0()
3023 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { in bnxt_poll_nitroa0()
3077 int work_done = 0; in bnxt_poll()
3081 return 0; in bnxt_poll()
3113 int i, work_done = 0; in __bnxt_poll_cqs()
3115 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs()
3133 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs_done()
3138 u32 tgl = 0; in __bnxt_poll_cqs_done()
3141 cpr2->had_nqe_notify = 0; in __bnxt_poll_cqs_done()
3149 cpr2->had_work_done = 0; in __bnxt_poll_cqs_done()
3163 int work_done = 0; in bnxt_poll_p5()
3168 return 0; in bnxt_poll_p5()
3171 cpr->has_more_work = 0; in bnxt_poll_p5()
3227 cpr_rx = &cpr->cp_ring_arr[0]; in bnxt_poll_p5()
3250 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_skbs()
3257 for (j = 0; j < max_idx;) { in bnxt_free_tx_skbs()
3269 tx_buf->action = 0; in bnxt_free_tx_skbs()
3296 for (k = 0; k < last; k++, j++) { in bnxt_free_tx_skbs()
3319 for (i = 0; i < max_idx; i++) { in bnxt_free_one_rx_ring()
3345 for (i = 0; i < max_idx; i++) { in bnxt_free_one_rx_agg_ring()
3369 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_rx_ring_skbs()
3400 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); in bnxt_free_one_rx_ring_skbs()
3410 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_free_rx_skbs()
3433 for (i = 0; i < len; i += ctxm->entry_size) in bnxt_init_ctx_mem()
3445 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_free_ring()
3473 u64 valid_bit = 0; in bnxt_alloc_ring()
3478 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { in bnxt_alloc_ring()
3490 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_alloc_ring()
3503 if (rmem->nr_pages > 1 || rmem->depth > 0) { in bnxt_alloc_ring()
3520 return 0; in bnxt_alloc_ring()
3527 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_tpa_info()
3533 for (j = 0; j < bp->max_tpa; j++) { in bnxt_free_tpa_info()
3550 return 0; in bnxt_alloc_tpa_info()
3554 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_tpa_info()
3565 for (j = 0; j < bp->max_tpa; j++) { in bnxt_alloc_tpa_info()
3576 return 0; in bnxt_alloc_tpa_info()
3587 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_rings()
3615 struct page_pool_params pp = { 0 }; in bnxt_alloc_rx_page_pool()
3635 return 0; in bnxt_alloc_rx_page_pool()
3641 int i, rc = 0, agg_rings = 0, cpu; in bnxt_alloc_rx_rings()
3649 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rx_rings()
3664 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); in bnxt_alloc_rx_rings()
3665 if (rc < 0) in bnxt_alloc_rx_rings()
3710 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_rings()
3740 bp->tx_push_size = 0; in bnxt_alloc_tx_rings()
3748 push_size = 0; in bnxt_alloc_tx_rings()
3749 bp->tx_push_thresh = 0; in bnxt_alloc_tx_rings()
3755 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_tx_rings()
3793 return 0; in bnxt_alloc_tx_rings()
3817 return 0; in bnxt_alloc_cp_arrays()
3826 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_all_cp_arrays()
3839 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_all_cp_arrays()
3849 return 0; in bnxt_alloc_all_cp_arrays()
3859 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_cp_rings()
3876 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_free_cp_rings()
3885 cpr->cp_ring_count = 0; in bnxt_free_cp_rings()
3925 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_cp_rings()
3929 int cp_count = 0, k; in bnxt_alloc_cp_rings()
3930 int rx = 0, tx = 0; in bnxt_alloc_cp_rings()
3967 for (k = 0; k < cp_count; k++) { in bnxt_alloc_cp_rings()
3989 return 0; in bnxt_alloc_cp_rings()
4026 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); in bnxt_reset_rx_ring_struct()
4031 rmem->pg_tbl_map = 0; in bnxt_reset_rx_ring_struct()
4032 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_reset_rx_ring_struct()
4034 rmem->dma_arr[i] = 0; in bnxt_reset_rx_ring_struct()
4041 rmem->pg_tbl_map = 0; in bnxt_reset_rx_ring_struct()
4042 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_reset_rx_ring_struct()
4044 rmem->dma_arr[i] = 0; in bnxt_reset_rx_ring_struct()
4053 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_struct()
4071 rmem->vmem_size = 0; in bnxt_init_ring_struct()
4116 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { in bnxt_init_rxbd_pages()
4124 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { in bnxt_init_rxbd_pages()
4139 for (i = 0; i < bp->rx_ring_size; i++) { in bnxt_alloc_one_rx_ring_skb()
4158 for (i = 0; i < bp->rx_agg_ring_size; i++) { in bnxt_alloc_one_rx_ring_page()
4177 return 0; in bnxt_alloc_one_rx_ring()
4185 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_rx_ring()
4195 return 0; in bnxt_alloc_one_rx_ring()
4255 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_cp_rings()
4264 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_init_cp_rings()
4277 int i, rc = 0; in bnxt_init_rx_rings()
4287 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_init_rx_rings()
4303 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_init_tx_rings()
4315 return 0; in bnxt_init_tx_rings()
4335 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_grps()
4343 return 0; in bnxt_init_ring_grps()
4350 bp->nr_vnics = 0; in bnxt_free_vnics()
4375 return 0; in bnxt_alloc_vnics()
4383 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_init_vnics()
4389 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) in bnxt_init_vnics()
4415 bp->toeplitz_prefix = 0; in bnxt_init_vnics()
4416 for (k = 0; k < 8; k++) { in bnxt_init_vnics()
4462 u32 agg_factor = 0, agg_ring_size = 0; in bnxt_set_ring_params()
4472 bp->rx_agg_ring_size = 0; in bnxt_set_ring_params()
4473 bp->rx_agg_nr_pages = 0; in bnxt_set_ring_params()
4585 return 0; in bnxt_set_rx_skb_mode()
4597 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_free_vnic_attributes()
4620 vnic->flags = 0; in bnxt_free_vnic_attributes()
4626 int i, rc = 0, size; in bnxt_alloc_vnic_attributes()
4631 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_alloc_vnic_attributes()
4637 if (mem_size > 0) { in bnxt_alloc_vnic_attributes()
4695 return 0; in bnxt_alloc_vnic_attributes()
4718 BNXT_HWRM_DMA_ALIGN, 0); in bnxt_alloc_hwrm_resources()
4724 return 0; in bnxt_alloc_hwrm_resources()
4757 return 0; in bnxt_alloc_stats_mem()
4768 for (i = 0; i < count; i++) in bnxt_fill_masks()
4776 for (i = 0; i < count; i++) in bnxt_copy_hw_masks()
4796 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qstat_ext()
4814 struct bnxt_napi *bnapi = bp->bnapi[0]; in bnxt_init_stats()
4852 bnxt_hwrm_port_qstats(bp, 0); in bnxt_init_stats()
4878 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_init_stats()
4900 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_ring_stats()
4918 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_stats()
4935 return 0; in bnxt_alloc_stats()
4949 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) in bnxt_alloc_stats()
4951 return 0; in bnxt_alloc_stats()
4960 return 0; in bnxt_alloc_stats()
4964 return 0; in bnxt_alloc_stats()
4966 if (bp->hwrm_spec_code >= 0x10902 || in bnxt_alloc_stats()
4972 return 0; in bnxt_alloc_stats()
4975 return 0; in bnxt_alloc_stats()
4985 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_clear_ring_indices()
4995 cpr->cp_raw_cons = 0; in bnxt_clear_ring_indices()
4998 txr->tx_prod = 0; in bnxt_clear_ring_indices()
4999 txr->tx_cons = 0; in bnxt_clear_ring_indices()
5000 txr->tx_hw_cons = 0; in bnxt_clear_ring_indices()
5005 rxr->rx_prod = 0; in bnxt_clear_ring_indices()
5006 rxr->rx_agg_prod = 0; in bnxt_clear_ring_indices()
5007 rxr->rx_sw_agg_prod = 0; in bnxt_clear_ring_indices()
5008 rxr->rx_next_cons = 0; in bnxt_clear_ring_indices()
5010 bnapi->events = 0; in bnxt_clear_ring_indices()
5059 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { in bnxt_free_ntp_fltrs()
5078 bp->ntp_fltr_count = 0; in bnxt_free_ntp_fltrs()
5083 int i, rc = 0; in bnxt_alloc_ntp_fltrs()
5086 return 0; in bnxt_alloc_ntp_fltrs()
5088 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) in bnxt_alloc_ntp_fltrs()
5091 bp->ntp_fltr_count = 0; in bnxt_alloc_ntp_fltrs()
5104 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { in bnxt_free_l2_filters()
5123 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) in bnxt_init_l2_fltr_tbl()
5175 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { in bnxt_alloc_mem()
5194 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_mem()
5222 j = 0; in bnxt_alloc_mem()
5226 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_mem()
5246 bnapi2->tx_ring[0] = txr; in bnxt_alloc_mem()
5297 return 0; in bnxt_alloc_mem()
5311 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int()
5340 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int_sync()
5351 atomic_set(&bp->intr_sem, 0); in bnxt_enable_int()
5352 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_int()
5397 memset(data, 0, sizeof(data)); in bnxt_hwrm_func_drv_rgtr()
5398 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { in bnxt_hwrm_func_drv_rgtr()
5407 for (i = 0; i < 8; i++) in bnxt_hwrm_func_drv_rgtr()
5418 memset(async_events_bmap, 0, sizeof(async_events_bmap)); in bnxt_hwrm_func_drv_rgtr()
5419 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { in bnxt_hwrm_func_drv_rgtr()
5431 for (i = 0; i < bmap_size; i++) { in bnxt_hwrm_func_drv_rgtr()
5436 for (i = 0; i < 8; i++) in bnxt_hwrm_func_drv_rgtr()
5461 return 0; in bnxt_hwrm_func_drv_unrgtr()
5478 return 0; in bnxt_hwrm_tunnel_dst_port_free()
5481 return 0; in bnxt_hwrm_tunnel_dst_port_free()
5492 bp->vxlan_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5497 bp->nge_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5502 bp->vxlan_gpe_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5667 return 0; in bnxt_get_rss_flow_tuple_len()
5673 u64 prefix = bp->toeplitz_prefix, hash = 0; in bnxt_toeplitz()
5676 int i, j, len = 0; in bnxt_toeplitz()
5681 return 0; in bnxt_toeplitz()
5693 for (i = 0, j = 8; i < len; i++, j++) { in bnxt_toeplitz()
5697 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { in bnxt_toeplitz()
5698 if (byte & 0x80) in bnxt_toeplitz()
5701 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; in bnxt_toeplitz()
5734 bp->max_fltr, 0); in bnxt_init_l2_filter()
5735 if (bit_id < 0) in bnxt_init_l2_filter()
5745 return 0; in bnxt_init_l2_filter()
5823 u16 target_id = 0xffff; in bnxt_hwrm_l2_filter_free()
5850 u16 target_id = 0xffff; in bnxt_hwrm_l2_filter_alloc()
5886 req->l2_ivlan_mask = cpu_to_le16(0xfff); in bnxt_hwrm_l2_filter_alloc()
5936 for (i = 0; i < 4; i++) in bnxt_fill_ipv6_mask()
5937 mask[i] = cpu_to_be32(~0); in bnxt_fill_ipv6_mask()
6017 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6018 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6019 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6020 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6022 req->src_ipaddr[0] = keys->addrs.v4addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6023 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6024 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6025 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
6054 key.vlan = 0; in bnxt_hwrm_set_vnic_filter()
6070 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ in bnxt_hwrm_clear_vnic_filter()
6073 for (i = 0; i < num_of_vnics; i++) { in bnxt_hwrm_clear_vnic_filter()
6076 for (j = 0; j < vnic->uc_filter_count; j++) { in bnxt_hwrm_clear_vnic_filter()
6082 vnic->uc_filter_count = 0; in bnxt_hwrm_clear_vnic_filter()
6118 return 0; in bnxt_hwrm_vnic_set_tpa()
6126 u32 nsegs, n, segs = 0, flags; in bnxt_hwrm_vnic_set_tpa()
6212 return 0; in bnxt_alloc_rss_indir_tbl()
6233 rss_indir_tbl = &bp->rss_indir_tbl[0]; in bnxt_set_dflt_rss_indir_tbl()
6235 for (i = 0; i < max_entries; i++) in bnxt_set_dflt_rss_indir_tbl()
6240 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); in bnxt_set_dflt_rss_indir_tbl()
6245 u32 i, tbl_size, max_ring = 0; in bnxt_get_max_rss_ring()
6248 return 0; in bnxt_get_max_rss_ring()
6251 for (i = 0; i < tbl_size; i++) in bnxt_get_max_rss_ring()
6260 return 0; in bnxt_get_nr_rss_ctxs()
6275 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { in bnxt_fill_hw_rss_tbl()
6291 for (i = 0; i < tbl_size; i++) { in bnxt_fill_hw_rss_tbl_p5()
6342 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) in bnxt_hwrm_vnic_set_rss()
6343 return 0; in bnxt_hwrm_vnic_set_rss()
6351 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_set_rss()
6376 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { in bnxt_hwrm_vnic_set_rss_p5()
6401 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_update_rss_hash_cfg()
6405 bp->rss_hash_delta = 0; in bnxt_hwrm_update_rss_hash_cfg()
6456 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_hwrm_vnic_ctx_free()
6459 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { in bnxt_hwrm_vnic_ctx_free()
6464 bp->rsscos_nr_ctxs = 0; in bnxt_hwrm_vnic_ctx_free()
6499 unsigned int ring = 0, grp_idx; in bnxt_hwrm_vnic_cfg()
6500 u16 def_vlan = 0; in bnxt_hwrm_vnic_cfg()
6508 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; in bnxt_hwrm_vnic_cfg()
6521 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_cfg()
6522 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6526 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6531 req->rss_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6535 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { in bnxt_hwrm_vnic_cfg()
6539 req->cos_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6543 ring = 0; in bnxt_hwrm_vnic_cfg()
6551 req->lb_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6589 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_vnic_free()
6610 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { in bnxt_hwrm_vnic_alloc()
6621 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) in bnxt_hwrm_vnic_alloc()
6643 if (bp->hwrm_spec_code < 0x10600) in bnxt_hwrm_vnic_qcaps()
6644 return 0; in bnxt_hwrm_vnic_qcaps()
6705 return 0; in bnxt_hwrm_ring_grp_alloc()
6712 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_grp_alloc()
6744 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_grp_free()
6764 int rc, err = 0; in hwrm_ring_alloc_send_msg()
6771 req->enables = 0; in hwrm_ring_alloc_send_msg()
6778 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); in hwrm_ring_alloc_send_msg()
6780 req->fbo = 0; in hwrm_ring_alloc_send_msg()
6787 u16 flags = 0; in hwrm_ring_alloc_send_msg()
6810 u16 flags = 0; in hwrm_ring_alloc_send_msg()
6891 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_async_event_cr()
6959 db->doorbell = bp->bar1 + map_idx * 0x80; in bnxt_set_db()
6992 return 0; in bnxt_hwrm_rx_ring_alloc()
7015 return 0; in bnxt_hwrm_rx_agg_ring_alloc()
7021 int i, rc = 0; in bnxt_hwrm_ring_alloc()
7028 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7055 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7083 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7111 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
7127 u16 error_code = 0; in hwrm_ring_free_send_msg()
7131 return 0; in hwrm_ring_free_send_msg()
7151 return 0; in hwrm_ring_free_send_msg()
7206 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_free()
7221 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
7236 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_free()
7242 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { in bnxt_hwrm_ring_free()
7275 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_get_rings()
7276 return 0; in bnxt_hwrm_get_rings()
7282 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_get_rings()
7334 if (bp->hwrm_spec_code < 0x10601) in __bnxt_hwrm_get_tx_rings()
7335 return 0; in __bnxt_hwrm_get_tx_rings()
7357 u32 enables = 0; in __bnxt_hwrm_reserve_pf_rings()
7362 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_reserve_pf_rings()
7363 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7366 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7367 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; in __bnxt_hwrm_reserve_pf_rings()
7369 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; in __bnxt_hwrm_reserve_pf_rings()
7371 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7374 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
7376 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; in __bnxt_hwrm_reserve_pf_rings()
7378 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; in __bnxt_hwrm_reserve_pf_rings()
7380 0; in __bnxt_hwrm_reserve_pf_rings()
7401 u32 enables = 0; in __bnxt_hwrm_reserve_vf_rings()
7406 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7408 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7409 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7410 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; in __bnxt_hwrm_reserve_vf_rings()
7413 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7415 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
7417 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; in __bnxt_hwrm_reserve_vf_rings()
7419 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; in __bnxt_hwrm_reserve_vf_rings()
7451 return 0; in bnxt_hwrm_reserve_pf_rings()
7458 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_reserve_pf_rings()
7472 return 0; in bnxt_hwrm_reserve_vf_rings()
7518 return 0; in bnxt_get_total_rss_ctxs()
7576 bp->hwrm_spec_code >= 0x10601) in bnxt_need_reserve_rings()
7625 struct bnxt_hw_rings hwr = {0}; in __bnxt_reserve_rings()
7628 int ulp_msix = 0; in __bnxt_reserve_rings()
7633 return 0; in __bnxt_reserve_rings()
7638 bnxt_set_ulp_stat_ctxs(bp, 0); in __bnxt_reserve_rings()
7742 return 0; in bnxt_hwrm_check_vf_rings()
7783 if (bp->hwrm_spec_code < 0x10801) in bnxt_hwrm_check_rings()
7784 return 0; in bnxt_hwrm_check_rings()
7809 if (bp->hwrm_spec_code < 0x10902) in bnxt_hwrm_coal_params_qcaps()
7913 return 0; in __bnxt_hwrm_set_coal_nq()
7985 return 0; in bnxt_hwrm_set_tx_coal()
7987 return 0; in bnxt_hwrm_set_tx_coal()
8010 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_set_coal()
8024 if (bnapi->rx_ring && bnapi->tx_ring[0]) { in bnxt_hwrm_set_coal()
8062 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_free()
8089 return 0; in bnxt_hwrm_stat_ctx_alloc()
8099 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_alloc()
8128 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qcfg()
8163 if (bp->hwrm_spec_code < 0x10707 || in bnxt_hwrm_func_qcfg()
8205 ctxm->init_value = 0; in bnxt_init_ctx_initializer()
8213 for (type = 0; type < ctx_max; type++) { in bnxt_alloc_all_ctx_pg_info()
8226 return 0; in bnxt_alloc_all_ctx_pg_info()
8252 for (type = 0; type < BNXT_CTX_V2_MAX; ) { in bnxt_hwrm_func_backing_store_qcaps_v2()
8280 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; in bnxt_hwrm_func_backing_store_qcaps_v2()
8297 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) in bnxt_hwrm_func_backing_store_qcaps()
8298 return 0; in bnxt_hwrm_func_backing_store_qcaps()
8312 u8 init_val, init_idx = 0; in bnxt_hwrm_func_backing_store_qcaps()
8334 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8341 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8348 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8357 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8364 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8383 (init_mask & (1 << init_idx++)) != 0); in bnxt_hwrm_func_backing_store_qcaps()
8401 rc = 0; in bnxt_hwrm_func_backing_store_qcaps()
8422 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); in bnxt_hwrm_set_pg_attr()
8443 u32 flags = 0; in bnxt_hwrm_func_backing_store_cfg()
8450 return 0; in bnxt_hwrm_func_backing_store_cfg()
8544 for (i = 0, num_entries = &req->tqm_sp_num_entries, in bnxt_hwrm_func_backing_store_cfg()
8589 ctx_pg->nr_pages = 0; in bnxt_alloc_ctx_pg_tbls()
8605 for (i = 0; i < nr_tbls; i++) { in bnxt_alloc_ctx_pg_tbls()
8647 for (i = 0; i < nr_tbls; i++) { in bnxt_free_ctx_pg_tbls()
8664 ctx_pg->nr_pages = 0; in bnxt_free_ctx_pg_tbls()
8672 int i, rc = 0, n = 1; in bnxt_setup_ctxm_pg_tbls()
8683 for (i = 0; i < n && !rc; i++) { in bnxt_setup_ctxm_pg_tbls()
8697 int i, j, rc = 0, n = 1; in bnxt_hwrm_func_backing_store_cfg_v2()
8701 return 0; in bnxt_hwrm_func_backing_store_cfg_v2()
8715 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) in bnxt_hwrm_func_backing_store_cfg_v2()
8717 for (i = 0, j = 0; j < n && !rc; i++) { in bnxt_hwrm_func_backing_store_cfg_v2()
8744 int rc = 0; in bnxt_backing_store_cfg_v2()
8748 return 0; in bnxt_backing_store_cfg_v2()
8755 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { in bnxt_backing_store_cfg_v2()
8762 return 0; in bnxt_backing_store_cfg_v2()
8773 for (type = 0; type < BNXT_CTX_V2_MAX; type++) { in bnxt_free_ctx_mem()
8782 for (i = 0; i < n; i++) in bnxt_free_ctx_mem()
8802 u32 extra_srqs = 0; in bnxt_alloc_ctx_mem()
8803 u32 extra_qps = 0; in bnxt_alloc_ctx_mem()
8816 return 0; in bnxt_alloc_ctx_mem()
8826 ena = 0; in bnxt_alloc_ctx_mem()
8902 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) in bnxt_alloc_ctx_mem()
8916 return 0; in bnxt_alloc_ctx_mem()
8926 return 0; in bnxt_hwrm_crash_dump_mem_cfg()
8932 if (BNXT_PAGE_SIZE == 0x2000) in bnxt_hwrm_crash_dump_mem_cfg()
8934 else if (BNXT_PAGE_SIZE == 0x10000) in bnxt_hwrm_crash_dump_mem_cfg()
8957 u32 mem_size = 0; in bnxt_alloc_crash_dump_mem()
8961 return 0; in bnxt_alloc_crash_dump_mem()
8990 return 0; in bnxt_alloc_crash_dump_mem()
9004 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_resc_qcaps()
9060 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { in __bnxt_hwrm_ptp_qcfg()
9094 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); in __bnxt_hwrm_ptp_qcfg()
9097 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; in __bnxt_hwrm_ptp_qcfg()
9103 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; in __bnxt_hwrm_ptp_qcfg()
9110 return 0; in __bnxt_hwrm_ptp_qcfg()
9131 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_func_qcaps()
9183 bp->tx_push_thresh = 0; in __bnxt_hwrm_func_qcaps()
9245 bp->fw_dbg_cap = 0; in bnxt_hwrm_dbg_qcaps()
9253 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_dbg_qcaps()
9282 if (bp->hwrm_spec_code >= 0x10803) { in bnxt_hwrm_func_qcaps()
9290 return 0; in bnxt_hwrm_func_qcaps()
9301 return 0; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
9333 return 0; in __bnxt_alloc_fw_health()
9340 return 0; in __bnxt_alloc_fw_health()
9349 return 0; in bnxt_alloc_fw_health()
9358 return 0; in bnxt_alloc_fw_health()
9435 u32 reg_base = 0xffffffff; in bnxt_map_fw_health_regs()
9441 for (i = 0; i < 4; i++) { in bnxt_map_fw_health_regs()
9446 if (reg_base == 0xffffffff) in bnxt_map_fw_health_regs()
9454 if (reg_base == 0xffffffff) in bnxt_map_fw_health_regs()
9455 return 0; in bnxt_map_fw_health_regs()
9458 return 0; in bnxt_map_fw_health_regs()
9482 return 0; in bnxt_hwrm_error_recovery_qcfg()
9522 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { in bnxt_hwrm_error_recovery_qcfg()
9548 req->enables = 0; in bnxt_hwrm_func_reset()
9569 int rc = 0; in bnxt_hwrm_queue_qportcfg()
9591 for (i = 0, j = 0; i < bp->max_tc; i++) { in bnxt_hwrm_queue_qportcfg()
9661 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", in bnxt_hwrm_ver_get()
9664 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); in bnxt_hwrm_ver_get()
9680 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { in bnxt_hwrm_ver_get()
9760 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || in bnxt_hwrm_fw_set_time()
9761 bp->hwrm_spec_code < 0x10400) in bnxt_hwrm_fw_set_time()
9764 time64_to_tm(now, 0, &tm); in bnxt_hwrm_fw_set_time()
9794 for (i = 0; i < count; i++) { in __bnxt_accumulate_stats()
9822 /* Chip bug. Counter intermittently becomes 0. */ in bnxt_accumulate_all_stats()
9826 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_accumulate_all_stats()
9868 return 0; in bnxt_hwrm_port_qstats()
9896 return 0; in bnxt_hwrm_port_qstats_ext()
9910 sizeof(struct tx_port_stats_ext) : 0; in bnxt_hwrm_port_qstats_ext()
9923 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; in bnxt_hwrm_port_qstats_ext()
9925 bp->fw_rx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
9926 bp->fw_tx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
9935 bp->pri2cos_valid = 0; in bnxt_hwrm_port_qstats_ext()
9952 for (i = 0; i < 8; i++) { in bnxt_hwrm_port_qstats_ext()
9956 /* Per port queue IDs start from 0, 10, 20, etc */ in bnxt_hwrm_port_qstats_ext()
9963 for (j = 0; j < bp->max_q; j++) { in bnxt_hwrm_port_qstats_ext()
9986 u32 tpa_flags = 0; in bnxt_set_tpa()
9991 return 0; in bnxt_set_tpa()
9992 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_set_tpa()
10000 return 0; in bnxt_set_tpa()
10007 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_clear_vnic_rss()
10059 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_br_mode()
10070 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) in bnxt_hwrm_set_cache_line_size()
10071 return 0; in bnxt_hwrm_set_cache_line_size()
10077 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_cache_line_size()
10094 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); in __bnxt_setup_vnic()
10183 for (i = 0; i < nr_ctxs; i++) { in __bnxt_setup_vnic_p5()
10235 int i, rc = 0; in bnxt_alloc_rfs_vnics()
10239 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); in bnxt_alloc_rfs_vnics()
10243 return 0; in bnxt_alloc_rfs_vnics()
10245 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rfs_vnics()
10272 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { in bnxt_del_one_rss_ctx()
10309 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || in bnxt_hwrm_realloc_rss_ctx_vnic()
10345 unsigned int rc = 0; in bnxt_setup_nitroa0_vnic()
10369 int rc = 0; in bnxt_init_chip()
10396 /* default vnic 0 */ in bnxt_init_chip()
10397 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); in bnxt_init_chip()
10427 /* Filter for default vnic 0 */ in bnxt_init_chip()
10428 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); in bnxt_init_chip()
10438 vnic->rx_mask = 0; in bnxt_init_chip()
10450 vnic->mc_list_count = 0; in bnxt_init_chip()
10452 u32 mask = 0; in bnxt_init_chip()
10480 return 0; in bnxt_init_chip()
10483 bnxt_hwrm_resource_free(bp, 0, true); in bnxt_init_chip()
10491 return 0; in bnxt_shutdown_nic()
10548 return 0; in __bnxt_trim_rings()
10586 return 0; in bnxt_trim_rings()
10593 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_msix()
10601 for (i = 0; i < tcs; i++) { in bnxt_setup_msix()
10608 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_setup_msix()
10635 if (map.index < 0) in bnxt_change_msix()
10735 if (num <= 0) in bnxt_get_avail_msix()
10736 return 0; in bnxt_get_avail_msix()
10751 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; in bnxt_init_int_mode()
10759 return 0; in bnxt_init_int_mode()
10767 if (total_vecs < 0 || total_vecs < ulp_msix) { in bnxt_init_int_mode()
10777 for (i = 0; i < total_vecs; i++) in bnxt_init_int_mode()
10796 return 0; in bnxt_init_int_mode()
10823 return 0; in bnxt_reserve_rings()
10860 bp->num_tc = 0; in bnxt_reserve_rings()
10867 return 0; in bnxt_reserve_rings()
10882 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_irq()
10890 irq->have_cpumask = 0; in bnxt_free_irq()
10895 irq->requested = 0; in bnxt_free_irq()
10901 int i, j, rc = 0; in bnxt_request_irq()
10902 unsigned long flags = 0; in bnxt_request_irq()
10916 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_request_irq()
10962 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_del_napi()
10964 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) in bnxt_del_napi()
10967 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_del_napi()
10989 for (i = 0; i < cp_nr_rings; i++) { in bnxt_init_napi()
11007 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_napi()
11027 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_napi()
11031 bnapi->tx_fault = 0; in bnxt_enable_napi()
11050 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_disable()
11068 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_enable()
11070 WRITE_ONCE(txr->dev_state, 0); in bnxt_tx_enable()
11182 int rc = 0; in bnxt_hwrm_phy_qcaps()
11184 if (bp->hwrm_spec_code < 0x10201) in bnxt_hwrm_phy_qcaps()
11185 return 0; in bnxt_hwrm_phy_qcaps()
11208 if (bp->hwrm_spec_code >= 0x10a01) { in bnxt_hwrm_phy_qcaps()
11216 link_info->support_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
11217 link_info->support_pam4_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
11218 link_info->support_auto_speeds2 = 0; in bnxt_hwrm_phy_qcaps()
11292 rc = 0; in bnxt_update_link()
11300 if (bp->hwrm_spec_code >= 0x10800) in bnxt_update_link()
11313 link_info->link_speed = 0; in bnxt_update_link()
11314 link_info->active_lanes = 0; in bnxt_update_link()
11332 link_info->phy_ver[0] = resp->phy_maj; in bnxt_update_link()
11346 eee->eee_active = 0; in bnxt_update_link()
11377 if (bp->hwrm_spec_code >= 0x10504) { in bnxt_update_link()
11396 return 0; in bnxt_update_link()
11401 return 0; in bnxt_update_link()
11420 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_get_port_module_status()
11435 if (bp->hwrm_spec_code >= 0x10201) in bnxt_hwrm_set_pause_common()
11451 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_hwrm_set_pause_common()
11521 bp->link_info.auto_pause_setting = 0; in bnxt_hwrm_set_pause()
11577 return 0; in bnxt_hwrm_shutdown_link()
11581 return 0; in bnxt_hwrm_shutdown_link()
11620 int retry = 0, rc; in bnxt_try_recover_fw()
11634 "Firmware not responding, status: 0x%x\n", in bnxt_try_recover_fw()
11655 hw_resc->resv_cp_rings = 0; in bnxt_clear_reservations()
11656 hw_resc->resv_stat_ctxs = 0; in bnxt_clear_reservations()
11657 hw_resc->resv_irqs = 0; in bnxt_clear_reservations()
11658 hw_resc->resv_tx_rings = 0; in bnxt_clear_reservations()
11659 hw_resc->resv_rx_rings = 0; in bnxt_clear_reservations()
11660 hw_resc->resv_hw_ring_grps = 0; in bnxt_clear_reservations()
11661 hw_resc->resv_vnics = 0; in bnxt_clear_reservations()
11662 hw_resc->resv_rsscos_ctxs = 0; in bnxt_clear_reservations()
11664 bp->tx_nr_rings = 0; in bnxt_clear_reservations()
11665 bp->rx_nr_rings = 0; in bnxt_clear_reservations()
11674 return 0; /* no resource reservations required */ in bnxt_cancel_reservations()
11691 int rc, retry = 0; in bnxt_hwrm_if_change()
11692 u32 flags = 0; in bnxt_hwrm_if_change()
11695 return 0; in bnxt_hwrm_if_change()
11730 return 0; in bnxt_hwrm_if_change()
11779 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
11780 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_port_led_qcaps()
11781 return 0; in bnxt_hwrm_port_led_qcaps()
11794 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { in bnxt_hwrm_port_led_qcaps()
11798 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * in bnxt_hwrm_port_led_qcaps()
11800 for (i = 0; i < bp->num_leds; i++) { in bnxt_hwrm_port_led_qcaps()
11806 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
11812 return 0; in bnxt_hwrm_port_led_qcaps()
11858 u16 next_handle = 0; in bnxt_hwrm_get_wol_fltrs()
11871 if (next_handle != 0) { in bnxt_hwrm_get_wol_fltrs()
11885 u16 handle = 0; in bnxt_get_wol_settings()
11887 bp->wol = 0; in bnxt_get_wol_settings()
11893 } while (handle && handle != 0xffff); in bnxt_get_wol_settings()
11911 eee->eee_enabled = 0; in bnxt_eee_config_ok()
11938 return 0; in bnxt_update_phy_setting()
12017 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_cfg_one_usr_fltr()
12050 int rc = 0; in bnxt_set_xps_mapping()
12060 for (i = 0; i < nr_cpus; i++) { in bnxt_set_xps_mapping()
12068 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { in bnxt_set_xps_mapping()
12085 int rc = 0; in __bnxt_open_nic()
12171 return 0; in __bnxt_open_nic()
12186 int rc = 0; in bnxt_open_nic()
12205 int rc = 0; in bnxt_half_open_nic()
12227 return 0; in bnxt_half_open_nic()
12364 else if (rc < 0) in bnxt_close_nic()
12378 return 0; in bnxt_close()
12388 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_read()
12397 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_read()
12419 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_write()
12428 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_write()
12453 u16 mii_regval = 0; in bnxt_ioctl()
12489 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_get_ring_stats()
12607 for (i = 0; i < bp->cp_nr_rings; i++) in bnxt_get_ring_err_stats()
12617 int mc_count = 0; in bnxt_mc_list_updated()
12619 int off = 0; in bnxt_mc_list_updated()
12624 vnic->mc_list_count = 0; in bnxt_mc_list_updated()
12650 int off = 0; in bnxt_uc_list_updated()
12691 vnic->mc_list_count = 0; in bnxt_set_rx_mode()
12708 int i, off = 0, rc; in bnxt_cfg_rx_mode()
12739 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { in bnxt_cfg_rx_mode()
12740 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); in bnxt_cfg_rx_mode()
12747 rc = 0; in bnxt_cfg_rx_mode()
12762 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
12768 vnic->mc_list_count = 0; in bnxt_cfg_rx_mode()
12769 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
12818 struct bnxt_hw_rings hwr = {0}; in bnxt_rfs_capable()
12862 hwr.rss_ctx = 0; in bnxt_rfs_capable()
12918 int rc = 0; in bnxt_set_features()
12941 if ((bp->flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
12942 (flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
12986 int hdr_count = 0; in bnxt_exthdr_check()
13023 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || in bnxt_exthdr_check()
13159 for (i = 0; i < num_words; i++) in bnxt_dbg_hwrm_rd_reg()
13229 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_dbg_dump_states()
13323 if (atomic_read(&bp->intr_sem) != 0) in bnxt_timer()
13398 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_rx_ring_reset()
13417 rxr->rx_prod = 0; in bnxt_rx_ring_reset()
13418 rxr->rx_agg_prod = 0; in bnxt_rx_ring_reset()
13419 rxr->rx_sw_agg_prod = 0; in bnxt_rx_ring_reset()
13420 rxr->rx_next_cons = 0; in bnxt_rx_ring_reset()
13451 u16 val = 0; in bnxt_fw_reset_close()
13454 if (val == 0xffff) in bnxt_fw_reset_close()
13455 bp->fw_reset_min_dsecs = 0; in bnxt_fw_reset_close()
13511 wait_dsecs = 0; in bnxt_force_fw_reset()
13535 * < 0 on error.
13543 return 0; in bnxt_get_registered_vfs()
13555 return 0; in bnxt_get_registered_vfs()
13565 int n = 0, tmo; in bnxt_fw_reset()
13579 if (n < 0) { in bnxt_fw_reset()
13585 } else if (n > 0) { in bnxt_fw_reset()
13616 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_chk_missed_irq()
13626 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_chk_missed_irq()
13640 fw_ring_id, &val[0], &val[1]); in bnxt_chk_missed_irq()
13654 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_init_ethtool_link_settings()
13690 bnxt_ulp_start(bp, 0); in bnxt_ulp_restart()
13719 bnxt_hwrm_port_qstats(bp, 0); in bnxt_sp_task()
13720 bnxt_hwrm_port_qstats_ext(bp, 0); in bnxt_sp_task()
13810 struct bnxt_hw_rings hwr = {0}; in bnxt_check_rings()
13894 u16 flags = 0; in bnxt_init_dflt_coal()
13942 bp->fw_cap = 0; in bnxt_fw_init_one_p1()
13966 return 0; in bnxt_fw_init_one_p1()
13995 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); in bnxt_fw_init_one_p2()
14023 return 0; in bnxt_fw_init_one_p2()
14035 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { in bnxt_set_dflt_rss_hash_type()
14099 return 0; in bnxt_fw_init_one()
14119 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; in bnxt_fw_reset_writel()
14129 pci_read_config_dword(bp->pdev, 0, &val); in bnxt_fw_reset_writel()
14146 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_reset_permitted()
14167 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) in bnxt_reset_all()
14197 bp->fw_reset_state = 0; in bnxt_fw_reset_abort()
14204 int rc = 0; in bnxt_fw_reset_task()
14216 if (n < 0) { in bnxt_fw_reset_task()
14221 } else if (n > 0) { in bnxt_fw_reset_task()
14224 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
14283 if (val == 0xffff) { in bnxt_fw_reset_task()
14338 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
14339 /* Make sure fw_reset_state is 0 before clearing the flag */ in bnxt_fw_reset_task()
14349 bnxt_ulp_start(bp, 0); in bnxt_fw_reset_task()
14364 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); in bnxt_fw_reset_task()
14388 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { in bnxt_init_board()
14401 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && in bnxt_init_board()
14402 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { in bnxt_init_board()
14416 bp->bar0 = pci_ioremap_bar(pdev, 0); in bnxt_init_board()
14441 timer_setup(&bp->timer, bnxt_timer, 0); in bnxt_init_board()
14448 return 0; in bnxt_init_board()
14466 int rc = 0; in bnxt_change_mac_addr()
14472 return 0; in bnxt_change_mac_addr()
14502 return 0; in bnxt_change_mtu()
14518 return 0; in bnxt_setup_mq_tc()
14539 bp->num_tc = 0; in bnxt_setup_mq_tc()
14549 return 0; in bnxt_setup_mq_tc()
14613 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); in bnxt_insert_ntp_filter()
14614 if (bit_id < 0) { in bnxt_insert_ntp_filter()
14628 return 0; in bnxt_insert_ntp_filter()
14693 int rc = 0, idx; in bnxt_rx_flow_steer()
14697 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; in bnxt_rx_flow_steer()
14703 key.vlan = 0; in bnxt_rx_flow_steer()
14719 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { in bnxt_rx_flow_steer()
14733 if (bp->hwrm_spec_code < 0x10601) { in bnxt_rx_flow_steer()
14741 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { in bnxt_rx_flow_steer()
14793 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { in bnxt_cfg_ntp_filters()
14888 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, in bnxt_bridge_getlink()
14897 int rem, rc = 0; in bnxt_bridge_setlink()
14899 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) in bnxt_bridge_setlink()
14936 return 0; in bnxt_get_port_parent_id()
14982 stats->packets = 0; in bnxt_get_queue_stats_rx()
14987 stats->bytes = 0; in bnxt_get_queue_stats_rx()
15005 stats->packets = 0; in bnxt_get_queue_stats_tx()
15010 stats->bytes = 0; in bnxt_get_queue_stats_tx()
15046 return 0; in bnxt_alloc_rx_agg_bmap()
15062 clone->rx_prod = 0; in bnxt_queue_mem_alloc()
15063 clone->rx_agg_prod = 0; in bnxt_queue_mem_alloc()
15064 clone->rx_sw_agg_prod = 0; in bnxt_queue_mem_alloc()
15065 clone->rx_next_cons = 0; in bnxt_queue_mem_alloc()
15071 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); in bnxt_queue_mem_alloc()
15072 if (rc < 0) in bnxt_queue_mem_alloc()
15104 return 0; in bnxt_queue_mem_alloc()
15166 for (i = 0; i < dst_rmem->nr_pages; i++) { in bnxt_copy_rx_ring()
15190 for (i = 0; i < dst_rmem->nr_pages; i++) { in bnxt_copy_rx_ring()
15232 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { in bnxt_queue_start()
15239 return 0; in bnxt_queue_start()
15253 for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { in bnxt_queue_stop()
15255 vnic->mru = 0; in bnxt_queue_stop()
15263 rxr->rx_next_cons = 0; in bnxt_queue_stop()
15269 return 0; in bnxt_queue_stop()
15302 bp->sp_event = 0; in bnxt_remove_one()
15329 int rc = 0; in bnxt_probe_phy()
15332 bp->phy_flags = 0; in bnxt_probe_phy()
15344 return 0; in bnxt_probe_phy()
15363 return 0; in bnxt_probe_phy()
15381 int max_ring_grps = 0, max_irq; in _bnxt_get_max_rings()
15404 *max_rx = 0; in _bnxt_get_max_rings()
15405 *max_tx = 0; in _bnxt_get_max_rings()
15457 return 0; in bnxt_get_dflt_rings()
15466 rc = 0; in bnxt_get_dflt_rings()
15488 return 0; in bnxt_set_dflt_rings()
15540 bp->tx_nr_rings = 0; in bnxt_set_dflt_rings()
15541 bp->rx_nr_rings = 0; in bnxt_set_dflt_rings()
15551 return 0; in bnxt_init_dflt_ring_mode()
15603 int rc = 0; in bnxt_init_mac_addr()
15643 if (pos < 0) in bnxt_vpd_read_info()
15653 if (pos < 0) in bnxt_vpd_read_info()
15676 return 0; in bnxt_pcie_dsn_get()
15686 return 0; in bnxt_map_db_bar()
15693 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); in bnxt_print_device_info()
15740 if (rc < 0) in bnxt_init_one()
15942 return 0; in bnxt_init_one()
16004 int rc = 0; in bnxt_suspend()
16024 int rc = 0; in bnxt_resume()
16050 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { in bnxt_resume()
16148 int retry = 0; in bnxt_io_slot_reset()
16149 int err = 0; in bnxt_io_slot_reset()
16171 * write the BARs to 0 to force restore, in case of fatal error. in bnxt_io_slot_reset()
16177 pci_write_config_dword(bp->pdev, off, 0); in bnxt_io_slot_reset()
16277 return 0; in bnxt_init()