xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c (revision 4ce06406958b67fdddcc2e6948237dd6ff6ba112)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62 
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77 
78 #define BNXT_TX_TIMEOUT		(5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
80 				 NETIF_MSG_TX_ERR)
81 
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85 
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88 
89 #define BNXT_TX_PUSH_THRESH 164
90 
91 /* indexed by enum board_idx */
92 static const struct {
93 	char *name;
94 } board_info[] = {
95 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 	[BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 	[BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 	[BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 	[BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 	[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 	[NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147 
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 	{ PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 	{ PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 	{ PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 	{ PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 	{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 	{ PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 	{ 0 }
225 };
226 
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228 
229 static const u16 bnxt_vf_req_snif[] = {
230 	HWRM_FUNC_CFG,
231 	HWRM_FUNC_VF_CFG,
232 	HWRM_PORT_PHY_QCFG,
233 	HWRM_CFA_L2_FILTER_ALLOC,
234 };
235 
236 static const u16 bnxt_async_events_arr[] = {
237 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 	ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255 
256 const u16 bnxt_bstore_to_trace[] = {
257 	[BNXT_CTX_SRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 	[BNXT_CTX_SRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 	[BNXT_CTX_CRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 	[BNXT_CTX_CRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 	[BNXT_CTX_RIGP0]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 	[BNXT_CTX_L2HWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 	[BNXT_CTX_REHWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 	[BNXT_CTX_CA0]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 	[BNXT_CTX_CA1]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 	[BNXT_CTX_CA2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 	[BNXT_CTX_RIGP1]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 	[BNXT_CTX_KONG]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 	[BNXT_CTX_QPC]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271 
272 static struct workqueue_struct *bnxt_pf_wq;
273 
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277 
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 	.ports = {
280 		.src = 0,
281 		.dst = 0,
282 	},
283 	.addrs = {
284 		.v6addrs = {
285 			.src = BNXT_IPV6_MASK_NONE,
286 			.dst = BNXT_IPV6_MASK_NONE,
287 		},
288 	},
289 };
290 
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 	.ports = {
293 		.src = cpu_to_be16(0xffff),
294 		.dst = cpu_to_be16(0xffff),
295 	},
296 	.addrs = {
297 		.v6addrs = {
298 			.src = BNXT_IPV6_MASK_ALL,
299 			.dst = BNXT_IPV6_MASK_ALL,
300 		},
301 	},
302 };
303 
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 	.ports = {
306 		.src = cpu_to_be16(0xffff),
307 		.dst = cpu_to_be16(0xffff),
308 	},
309 	.addrs = {
310 		.v4addrs = {
311 			.src = cpu_to_be32(0xffffffff),
312 			.dst = cpu_to_be32(0xffffffff),
313 		},
314 	},
315 };
316 
317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 		idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 		idx == NETXTREME_E_P7_VF_HV);
324 }
325 
326 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328 
329 #define BNXT_DB_CQ(db, idx)						\
330 	writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331 
332 #define BNXT_DB_NQ_P5(db, idx)						\
333 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 		    (db)->doorbell)
335 
336 #define BNXT_DB_NQ_P7(db, idx)						\
337 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |		\
338 		    DB_RING_IDX(db, idx), (db)->doorbell)
339 
340 #define BNXT_DB_CQ_ARM(db, idx)						\
341 	writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342 
343 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
344 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |		\
345 		    DB_RING_IDX(db, idx), (db)->doorbell)
346 
347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 	if (bp->flags & BNXT_FLAG_CHIP_P7)
350 		BNXT_DB_NQ_P7(db, idx);
351 	else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 		BNXT_DB_NQ_P5(db, idx);
353 	else
354 		BNXT_DB_CQ(db, idx);
355 }
356 
357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 		BNXT_DB_NQ_ARM_P5(db, idx);
361 	else
362 		BNXT_DB_CQ_ARM(db, idx);
363 }
364 
365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 			    DB_RING_IDX(db, idx), db->doorbell);
370 	else
371 		BNXT_DB_CQ(db, idx);
372 }
373 
374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 		return;
378 
379 	if (BNXT_PF(bp))
380 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 	else
382 		schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384 
385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 	if (BNXT_PF(bp))
388 		queue_work(bnxt_pf_wq, &bp->sp_task);
389 	else
390 		schedule_work(&bp->sp_task);
391 }
392 
393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 	set_bit(event, &bp->sp_event);
396 	__bnxt_queue_sp_work(bp);
397 }
398 
399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 	if (!rxr->bnapi->in_reset) {
402 		rxr->bnapi->in_reset = true;
403 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 		else
406 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 		__bnxt_queue_sp_work(bp);
408 	}
409 	rxr->rx_next_cons = 0xffff;
410 }
411 
412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 			  u16 curr)
414 {
415 	struct bnxt_napi *bnapi = txr->bnapi;
416 
417 	if (bnapi->tx_fault)
418 		return;
419 
420 	netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 		   txr->txq_index, txr->tx_hw_cons,
422 		   txr->tx_cons, txr->tx_prod, curr);
423 	WARN_ON_ONCE(1);
424 	bnapi->tx_fault = 1;
425 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427 
428 const u16 bnxt_lhint_arr[] = {
429 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 	TX_BD_FLAGS_LHINT_512_TO_1023,
431 	TX_BD_FLAGS_LHINT_1024_TO_2047,
432 	TX_BD_FLAGS_LHINT_1024_TO_2047,
433 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449 
450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
453 
454 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 		return 0;
456 
457 	return md_dst->u.port_info.port_id;
458 }
459 
460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 			     u16 prod)
462 {
463 	/* Sync BD data before updating doorbell */
464 	wmb();
465 	bnxt_db_write(bp, &txr->tx_db, prod);
466 	txr->kick_pending = 0;
467 }
468 
469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 	struct bnxt *bp = netdev_priv(dev);
472 	struct tx_bd *txbd, *txbd0;
473 	struct tx_bd_ext *txbd1;
474 	struct netdev_queue *txq;
475 	int i;
476 	dma_addr_t mapping;
477 	unsigned int length, pad = 0;
478 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 	struct pci_dev *pdev = bp->pdev;
481 	u16 prod, last_frag, txts_prod;
482 	struct bnxt_tx_ring_info *txr;
483 	struct bnxt_sw_tx_bd *tx_buf;
484 	__le32 lflags = 0;
485 	skb_frag_t *frag;
486 
487 	i = skb_get_queue_mapping(skb);
488 	if (unlikely(i >= bp->tx_nr_rings)) {
489 		dev_kfree_skb_any(skb);
490 		dev_core_stats_tx_dropped_inc(dev);
491 		return NETDEV_TX_OK;
492 	}
493 
494 	txq = netdev_get_tx_queue(dev, i);
495 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 	prod = txr->tx_prod;
497 
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 	if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 		netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d.  SKB will be linearized.\n",
501 				 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 		if (skb_linearize(skb)) {
503 			dev_kfree_skb_any(skb);
504 			dev_core_stats_tx_dropped_inc(dev);
505 			return NETDEV_TX_OK;
506 		}
507 	}
508 #endif
509 	free_size = bnxt_tx_avail(bp, txr);
510 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 		/* We must have raced with NAPI cleanup */
512 		if (net_ratelimit() && txr->kick_pending)
513 			netif_warn(bp, tx_err, dev,
514 				   "bnxt: ring busy w/ flush pending!\n");
515 		if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 					bp->tx_wake_thresh))
517 			return NETDEV_TX_BUSY;
518 	}
519 
520 	length = skb->len;
521 	len = skb_headlen(skb);
522 	last_frag = skb_shinfo(skb)->nr_frags;
523 
524 	txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
525 
526 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
527 	tx_buf->skb = skb;
528 	tx_buf->nr_frags = last_frag;
529 
530 	vlan_tag_flags = 0;
531 	cfa_action = bnxt_xmit_get_cfa_action(skb);
532 	if (skb_vlan_tag_present(skb)) {
533 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
534 				 skb_vlan_tag_get(skb);
535 		/* Currently supports 8021Q, 8021AD vlan offloads
536 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
537 		 */
538 		if (skb->vlan_proto == htons(ETH_P_8021Q))
539 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
540 	}
541 
542 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
543 	    ptp->tx_tstamp_en) {
544 		if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
545 			lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
546 			tx_buf->is_ts_pkt = 1;
547 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
548 		} else if (!skb_is_gso(skb)) {
549 			u16 seq_id, hdr_off;
550 
551 			if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
552 			    !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
553 				if (vlan_tag_flags)
554 					hdr_off += VLAN_HLEN;
555 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
556 				tx_buf->is_ts_pkt = 1;
557 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
558 
559 				ptp->txts_req[txts_prod].tx_seqid = seq_id;
560 				ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
561 				tx_buf->txts_prod = txts_prod;
562 			}
563 		}
564 	}
565 	if (unlikely(skb->no_fcs))
566 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
567 
568 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
569 	    skb_frags_readable(skb) && !lflags) {
570 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
571 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
572 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
573 		void __iomem *db = txr->tx_db.doorbell;
574 		void *pdata = tx_push_buf->data;
575 		u64 *end;
576 		int j, push_len;
577 
578 		/* Set COAL_NOW to be ready quickly for the next push */
579 		tx_push->tx_bd_len_flags_type =
580 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
581 					TX_BD_TYPE_LONG_TX_BD |
582 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
583 					TX_BD_FLAGS_COAL_NOW |
584 					TX_BD_FLAGS_PACKET_END |
585 					TX_BD_CNT(2));
586 
587 		if (skb->ip_summed == CHECKSUM_PARTIAL)
588 			tx_push1->tx_bd_hsize_lflags =
589 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
590 		else
591 			tx_push1->tx_bd_hsize_lflags = 0;
592 
593 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
594 		tx_push1->tx_bd_cfa_action =
595 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
596 
597 		end = pdata + length;
598 		end = PTR_ALIGN(end, 8) - 1;
599 		*end = 0;
600 
601 		skb_copy_from_linear_data(skb, pdata, len);
602 		pdata += len;
603 		for (j = 0; j < last_frag; j++) {
604 			void *fptr;
605 
606 			frag = &skb_shinfo(skb)->frags[j];
607 			fptr = skb_frag_address_safe(frag);
608 			if (!fptr)
609 				goto normal_tx;
610 
611 			memcpy(pdata, fptr, skb_frag_size(frag));
612 			pdata += skb_frag_size(frag);
613 		}
614 
615 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
616 		txbd->tx_bd_haddr = txr->data_mapping;
617 		txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
618 		prod = NEXT_TX(prod);
619 		tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
620 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
621 		memcpy(txbd, tx_push1, sizeof(*txbd));
622 		prod = NEXT_TX(prod);
623 		tx_push->doorbell =
624 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
625 				    DB_RING_IDX(&txr->tx_db, prod));
626 		WRITE_ONCE(txr->tx_prod, prod);
627 
628 		tx_buf->is_push = 1;
629 		netdev_tx_sent_queue(txq, skb->len);
630 		wmb();	/* Sync is_push and byte queue before pushing data */
631 
632 		push_len = (length + sizeof(*tx_push) + 7) / 8;
633 		if (push_len > 16) {
634 			__iowrite64_copy(db, tx_push_buf, 16);
635 			__iowrite32_copy(db + 4, tx_push_buf + 1,
636 					 (push_len - 16) << 1);
637 		} else {
638 			__iowrite64_copy(db, tx_push_buf, push_len);
639 		}
640 
641 		goto tx_done;
642 	}
643 
644 normal_tx:
645 	if (length < BNXT_MIN_PKT_SIZE) {
646 		pad = BNXT_MIN_PKT_SIZE - length;
647 		if (skb_pad(skb, pad))
648 			/* SKB already freed. */
649 			goto tx_kick_pending;
650 		length = BNXT_MIN_PKT_SIZE;
651 	}
652 
653 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
654 
655 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
656 		goto tx_free;
657 
658 	dma_unmap_addr_set(tx_buf, mapping, mapping);
659 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
660 		TX_BD_CNT(last_frag + 2);
661 
662 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
663 	txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
664 
665 	prod = NEXT_TX(prod);
666 	txbd1 = (struct tx_bd_ext *)
667 		&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
668 
669 	txbd1->tx_bd_hsize_lflags = lflags;
670 	if (skb_is_gso(skb)) {
671 		bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
672 		u32 hdr_len;
673 
674 		if (skb->encapsulation) {
675 			if (udp_gso)
676 				hdr_len = skb_inner_transport_offset(skb) +
677 					  sizeof(struct udphdr);
678 			else
679 				hdr_len = skb_inner_tcp_all_headers(skb);
680 		} else if (udp_gso) {
681 			hdr_len = skb_transport_offset(skb) +
682 				  sizeof(struct udphdr);
683 		} else {
684 			hdr_len = skb_tcp_all_headers(skb);
685 		}
686 
687 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
688 					TX_BD_FLAGS_T_IPID |
689 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
690 		length = skb_shinfo(skb)->gso_size;
691 		txbd1->tx_bd_mss = cpu_to_le32(length);
692 		length += hdr_len;
693 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
694 		txbd1->tx_bd_hsize_lflags |=
695 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
696 		txbd1->tx_bd_mss = 0;
697 	}
698 
699 	length >>= 9;
700 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
701 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
702 				     skb->len);
703 		i = 0;
704 		goto tx_dma_error;
705 	}
706 	flags |= bnxt_lhint_arr[length];
707 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
708 
709 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
710 	txbd1->tx_bd_cfa_action =
711 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
712 	txbd0 = txbd;
713 	for (i = 0; i < last_frag; i++) {
714 		frag = &skb_shinfo(skb)->frags[i];
715 		prod = NEXT_TX(prod);
716 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
717 
718 		len = skb_frag_size(frag);
719 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
720 					   DMA_TO_DEVICE);
721 
722 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
723 			goto tx_dma_error;
724 
725 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
726 		netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
727 					  mapping, mapping);
728 
729 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
730 
731 		flags = len << TX_BD_LEN_SHIFT;
732 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
733 	}
734 
735 	flags &= ~TX_BD_LEN;
736 	txbd->tx_bd_len_flags_type =
737 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
738 			    TX_BD_FLAGS_PACKET_END);
739 
740 	netdev_tx_sent_queue(txq, skb->len);
741 
742 	skb_tx_timestamp(skb);
743 
744 	prod = NEXT_TX(prod);
745 	WRITE_ONCE(txr->tx_prod, prod);
746 
747 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
748 		bnxt_txr_db_kick(bp, txr, prod);
749 	} else {
750 		if (free_size >= bp->tx_wake_thresh)
751 			txbd0->tx_bd_len_flags_type |=
752 				cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
753 		txr->kick_pending = 1;
754 	}
755 
756 tx_done:
757 
758 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
759 		if (netdev_xmit_more() && !tx_buf->is_push) {
760 			txbd0->tx_bd_len_flags_type &=
761 				cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
762 			bnxt_txr_db_kick(bp, txr, prod);
763 		}
764 
765 		netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
766 				   bp->tx_wake_thresh);
767 	}
768 	return NETDEV_TX_OK;
769 
770 tx_dma_error:
771 	last_frag = i;
772 
773 	/* start back at beginning and unmap skb */
774 	prod = txr->tx_prod;
775 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
776 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
777 			 skb_headlen(skb), DMA_TO_DEVICE);
778 	prod = NEXT_TX(prod);
779 
780 	/* unmap remaining mapped pages */
781 	for (i = 0; i < last_frag; i++) {
782 		prod = NEXT_TX(prod);
783 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
784 		frag = &skb_shinfo(skb)->frags[i];
785 		netmem_dma_unmap_page_attrs(&pdev->dev,
786 					    dma_unmap_addr(tx_buf, mapping),
787 					    skb_frag_size(frag),
788 					    DMA_TO_DEVICE, 0);
789 	}
790 
791 tx_free:
792 	dev_kfree_skb_any(skb);
793 tx_kick_pending:
794 	if (BNXT_TX_PTP_IS_SET(lflags)) {
795 		txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
796 		atomic64_inc(&bp->ptp_cfg->stats.ts_err);
797 		if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
798 			/* set SKB to err so PTP worker will clean up */
799 			ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
800 	}
801 	if (txr->kick_pending)
802 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
803 	txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
804 	dev_core_stats_tx_dropped_inc(dev);
805 	return NETDEV_TX_OK;
806 }
807 
808 /* Returns true if some remaining TX packets not processed. */
809 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
810 			  int budget)
811 {
812 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
813 	struct pci_dev *pdev = bp->pdev;
814 	u16 hw_cons = txr->tx_hw_cons;
815 	unsigned int tx_bytes = 0;
816 	u16 cons = txr->tx_cons;
817 	skb_frag_t *frag;
818 	int tx_pkts = 0;
819 	bool rc = false;
820 
821 	while (RING_TX(bp, cons) != hw_cons) {
822 		struct bnxt_sw_tx_bd *tx_buf;
823 		struct sk_buff *skb;
824 		bool is_ts_pkt;
825 		int j, last;
826 
827 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
828 		skb = tx_buf->skb;
829 
830 		if (unlikely(!skb)) {
831 			bnxt_sched_reset_txr(bp, txr, cons);
832 			return rc;
833 		}
834 
835 		is_ts_pkt = tx_buf->is_ts_pkt;
836 		if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
837 			rc = true;
838 			break;
839 		}
840 
841 		cons = NEXT_TX(cons);
842 		tx_pkts++;
843 		tx_bytes += skb->len;
844 		tx_buf->skb = NULL;
845 		tx_buf->is_ts_pkt = 0;
846 
847 		if (tx_buf->is_push) {
848 			tx_buf->is_push = 0;
849 			goto next_tx_int;
850 		}
851 
852 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
853 				 skb_headlen(skb), DMA_TO_DEVICE);
854 		last = tx_buf->nr_frags;
855 
856 		for (j = 0; j < last; j++) {
857 			frag = &skb_shinfo(skb)->frags[j];
858 			cons = NEXT_TX(cons);
859 			tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
860 			netmem_dma_unmap_page_attrs(&pdev->dev,
861 						    dma_unmap_addr(tx_buf,
862 								   mapping),
863 						    skb_frag_size(frag),
864 						    DMA_TO_DEVICE, 0);
865 		}
866 		if (unlikely(is_ts_pkt)) {
867 			if (BNXT_CHIP_P5(bp)) {
868 				/* PTP worker takes ownership of the skb */
869 				bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
870 				skb = NULL;
871 			}
872 		}
873 
874 next_tx_int:
875 		cons = NEXT_TX(cons);
876 
877 		napi_consume_skb(skb, budget);
878 	}
879 
880 	WRITE_ONCE(txr->tx_cons, cons);
881 
882 	__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
883 				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
884 				   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
885 
886 	return rc;
887 }
888 
889 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
890 {
891 	struct bnxt_tx_ring_info *txr;
892 	bool more = false;
893 	int i;
894 
895 	bnxt_for_each_napi_tx(i, bnapi, txr) {
896 		if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
897 			more |= __bnxt_tx_int(bp, txr, budget);
898 	}
899 	if (!more)
900 		bnapi->events &= ~BNXT_TX_CMP_EVENT;
901 }
902 
903 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
904 {
905 	return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
906 }
907 
908 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
909 					 struct bnxt_rx_ring_info *rxr,
910 					 unsigned int *offset,
911 					 gfp_t gfp)
912 {
913 	struct page *page;
914 
915 	if (rxr->rx_page_size < PAGE_SIZE) {
916 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
917 						rxr->rx_page_size);
918 	} else {
919 		page = page_pool_dev_alloc_pages(rxr->page_pool);
920 		*offset = 0;
921 	}
922 	if (!page)
923 		return NULL;
924 
925 	*mapping = page_pool_get_dma_addr(page) + *offset;
926 	return page;
927 }
928 
929 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
930 					 struct bnxt_rx_ring_info *rxr,
931 					 unsigned int *offset,
932 					 gfp_t gfp)
933 {
934 	netmem_ref netmem;
935 
936 	if (rxr->rx_page_size < PAGE_SIZE) {
937 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
938 						     rxr->rx_page_size, gfp);
939 	} else {
940 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
941 		*offset = 0;
942 	}
943 	if (!netmem)
944 		return 0;
945 
946 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
947 	return netmem;
948 }
949 
950 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
951 				       struct bnxt_rx_ring_info *rxr,
952 				       gfp_t gfp)
953 {
954 	unsigned int offset;
955 	struct page *page;
956 
957 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
958 				    bp->rx_buf_size, gfp);
959 	if (!page)
960 		return NULL;
961 
962 	*mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
963 	return page_address(page) + offset;
964 }
965 
966 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
967 		       u16 prod, gfp_t gfp)
968 {
969 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
970 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
971 	dma_addr_t mapping;
972 
973 	if (BNXT_RX_PAGE_MODE(bp)) {
974 		unsigned int offset;
975 		struct page *page =
976 			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
977 
978 		if (!page)
979 			return -ENOMEM;
980 
981 		mapping += bp->rx_dma_offset;
982 		rx_buf->data = page;
983 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
984 	} else {
985 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
986 
987 		if (!data)
988 			return -ENOMEM;
989 
990 		rx_buf->data = data;
991 		rx_buf->data_ptr = data + bp->rx_offset;
992 	}
993 	rx_buf->mapping = mapping;
994 
995 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
996 	return 0;
997 }
998 
999 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1000 {
1001 	u16 prod = rxr->rx_prod;
1002 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1003 	struct bnxt *bp = rxr->bnapi->bp;
1004 	struct rx_bd *cons_bd, *prod_bd;
1005 
1006 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1007 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1008 
1009 	prod_rx_buf->data = data;
1010 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1011 
1012 	prod_rx_buf->mapping = cons_rx_buf->mapping;
1013 
1014 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1015 	cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1016 
1017 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1018 }
1019 
1020 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1021 {
1022 	u16 next, max = rxr->rx_agg_bmap_size;
1023 
1024 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1025 	if (next >= max)
1026 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1027 	return next;
1028 }
1029 
1030 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1031 				u16 prod, gfp_t gfp)
1032 {
1033 	struct rx_bd *rxbd =
1034 		&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1035 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1036 	u16 sw_prod = rxr->rx_sw_agg_prod;
1037 	unsigned int offset = 0;
1038 	dma_addr_t mapping;
1039 	netmem_ref netmem;
1040 
1041 	netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1042 	if (!netmem)
1043 		return -ENOMEM;
1044 
1045 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1046 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1047 
1048 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1049 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1050 	rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1051 
1052 	rx_agg_buf->netmem = netmem;
1053 	rx_agg_buf->offset = offset;
1054 	rx_agg_buf->mapping = mapping;
1055 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1056 	rxbd->rx_bd_opaque = sw_prod;
1057 	return 0;
1058 }
1059 
1060 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1061 				       struct bnxt_cp_ring_info *cpr,
1062 				       u16 cp_cons, u16 curr)
1063 {
1064 	struct rx_agg_cmp *agg;
1065 
1066 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1067 	agg = (struct rx_agg_cmp *)
1068 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1069 	return agg;
1070 }
1071 
1072 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1073 					      struct bnxt_rx_ring_info *rxr,
1074 					      u16 agg_id, u16 curr)
1075 {
1076 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1077 
1078 	return &tpa_info->agg_arr[curr];
1079 }
1080 
1081 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1082 				   u16 start, u32 agg_bufs, bool tpa)
1083 {
1084 	struct bnxt_napi *bnapi = cpr->bnapi;
1085 	struct bnxt *bp = bnapi->bp;
1086 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1087 	u16 prod = rxr->rx_agg_prod;
1088 	u16 sw_prod = rxr->rx_sw_agg_prod;
1089 	bool p5_tpa = false;
1090 	u32 i;
1091 
1092 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1093 		p5_tpa = true;
1094 
1095 	for (i = 0; i < agg_bufs; i++) {
1096 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1097 		struct rx_agg_cmp *agg;
1098 		struct rx_bd *prod_bd;
1099 		netmem_ref netmem;
1100 		u16 cons;
1101 
1102 		if (p5_tpa)
1103 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1104 		else
1105 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
1106 		cons = agg->rx_agg_cmp_opaque;
1107 		__clear_bit(cons, rxr->rx_agg_bmap);
1108 
1109 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1110 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1111 
1112 		__set_bit(sw_prod, rxr->rx_agg_bmap);
1113 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1114 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1115 
1116 		/* It is possible for sw_prod to be equal to cons, so
1117 		 * set cons_rx_buf->netmem to 0 first.
1118 		 */
1119 		netmem = cons_rx_buf->netmem;
1120 		cons_rx_buf->netmem = 0;
1121 		prod_rx_buf->netmem = netmem;
1122 		prod_rx_buf->offset = cons_rx_buf->offset;
1123 
1124 		prod_rx_buf->mapping = cons_rx_buf->mapping;
1125 
1126 		prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1127 
1128 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1129 		prod_bd->rx_bd_opaque = sw_prod;
1130 
1131 		prod = NEXT_RX_AGG(prod);
1132 		sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1133 	}
1134 	rxr->rx_agg_prod = prod;
1135 	rxr->rx_sw_agg_prod = sw_prod;
1136 }
1137 
1138 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1139 					      struct bnxt_rx_ring_info *rxr,
1140 					      u16 cons, void *data, u8 *data_ptr,
1141 					      dma_addr_t dma_addr,
1142 					      unsigned int offset_and_len)
1143 {
1144 	unsigned int len = offset_and_len & 0xffff;
1145 	struct page *page = data;
1146 	u16 prod = rxr->rx_prod;
1147 	struct sk_buff *skb;
1148 	int err;
1149 
1150 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1151 	if (unlikely(err)) {
1152 		bnxt_reuse_rx_data(rxr, cons, data);
1153 		return NULL;
1154 	}
1155 	dma_addr -= bp->rx_dma_offset;
1156 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1157 				bp->rx_dir);
1158 	skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1159 	if (!skb) {
1160 		page_pool_recycle_direct(rxr->page_pool, page);
1161 		return NULL;
1162 	}
1163 	skb_mark_for_recycle(skb);
1164 	skb_reserve(skb, bp->rx_offset);
1165 	__skb_put(skb, len);
1166 
1167 	return skb;
1168 }
1169 
1170 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1171 					struct bnxt_rx_ring_info *rxr,
1172 					u16 cons, void *data, u8 *data_ptr,
1173 					dma_addr_t dma_addr,
1174 					unsigned int offset_and_len)
1175 {
1176 	unsigned int payload = offset_and_len >> 16;
1177 	unsigned int len = offset_and_len & 0xffff;
1178 	skb_frag_t *frag;
1179 	struct page *page = data;
1180 	u16 prod = rxr->rx_prod;
1181 	struct sk_buff *skb;
1182 	int off, err;
1183 
1184 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1185 	if (unlikely(err)) {
1186 		bnxt_reuse_rx_data(rxr, cons, data);
1187 		return NULL;
1188 	}
1189 	dma_addr -= bp->rx_dma_offset;
1190 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1191 				bp->rx_dir);
1192 
1193 	if (unlikely(!payload))
1194 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1195 
1196 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1197 	if (!skb) {
1198 		page_pool_recycle_direct(rxr->page_pool, page);
1199 		return NULL;
1200 	}
1201 
1202 	skb_mark_for_recycle(skb);
1203 	off = (void *)data_ptr - page_address(page);
1204 	skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1205 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1206 	       payload + NET_IP_ALIGN);
1207 
1208 	frag = &skb_shinfo(skb)->frags[0];
1209 	skb_frag_size_sub(frag, payload);
1210 	skb_frag_off_add(frag, payload);
1211 	skb->data_len -= payload;
1212 	skb->tail += payload;
1213 
1214 	return skb;
1215 }
1216 
1217 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1218 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1219 				   void *data, u8 *data_ptr,
1220 				   dma_addr_t dma_addr,
1221 				   unsigned int offset_and_len)
1222 {
1223 	u16 prod = rxr->rx_prod;
1224 	struct sk_buff *skb;
1225 	int err;
1226 
1227 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1228 	if (unlikely(err)) {
1229 		bnxt_reuse_rx_data(rxr, cons, data);
1230 		return NULL;
1231 	}
1232 
1233 	skb = napi_build_skb(data, bp->rx_buf_size);
1234 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1235 				bp->rx_dir);
1236 	if (!skb) {
1237 		page_pool_free_va(rxr->head_pool, data, true);
1238 		return NULL;
1239 	}
1240 
1241 	skb_mark_for_recycle(skb);
1242 	skb_reserve(skb, bp->rx_offset);
1243 	skb_put(skb, offset_and_len & 0xffff);
1244 	return skb;
1245 }
1246 
1247 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1248 				 struct bnxt_cp_ring_info *cpr,
1249 				 u16 idx, u32 agg_bufs, bool tpa,
1250 				 struct sk_buff *skb,
1251 				 struct xdp_buff *xdp)
1252 {
1253 	struct bnxt_napi *bnapi = cpr->bnapi;
1254 	struct skb_shared_info *shinfo;
1255 	struct bnxt_rx_ring_info *rxr;
1256 	u32 i, total_frag_len = 0;
1257 	bool p5_tpa = false;
1258 	u16 prod;
1259 
1260 	rxr = bnapi->rx_ring;
1261 	prod = rxr->rx_agg_prod;
1262 
1263 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1264 		p5_tpa = true;
1265 
1266 	if (skb)
1267 		shinfo = skb_shinfo(skb);
1268 	else
1269 		shinfo = xdp_get_shared_info_from_buff(xdp);
1270 
1271 	for (i = 0; i < agg_bufs; i++) {
1272 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1273 		struct rx_agg_cmp *agg;
1274 		u16 cons, frag_len;
1275 		netmem_ref netmem;
1276 
1277 		if (p5_tpa)
1278 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1279 		else
1280 			agg = bnxt_get_agg(bp, cpr, idx, i);
1281 		cons = agg->rx_agg_cmp_opaque;
1282 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1283 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1284 
1285 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1286 		if (skb) {
1287 			skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1288 					       cons_rx_buf->offset,
1289 					       frag_len, rxr->rx_page_size);
1290 		} else {
1291 			skb_frag_t *frag = &shinfo->frags[i];
1292 
1293 			skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1294 						  cons_rx_buf->offset,
1295 						  frag_len);
1296 			shinfo->nr_frags = i + 1;
1297 		}
1298 		__clear_bit(cons, rxr->rx_agg_bmap);
1299 
1300 		/* It is possible for bnxt_alloc_rx_netmem() to allocate
1301 		 * a sw_prod index that equals the cons index, so we
1302 		 * need to clear the cons entry now.
1303 		 */
1304 		netmem = cons_rx_buf->netmem;
1305 		cons_rx_buf->netmem = 0;
1306 
1307 		if (xdp && netmem_is_pfmemalloc(netmem))
1308 			xdp_buff_set_frag_pfmemalloc(xdp);
1309 
1310 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1311 			if (skb) {
1312 				skb->len -= frag_len;
1313 				skb->data_len -= frag_len;
1314 				skb->truesize -= rxr->rx_page_size;
1315 			}
1316 
1317 			--shinfo->nr_frags;
1318 			cons_rx_buf->netmem = netmem;
1319 
1320 			/* Update prod since possibly some netmems have been
1321 			 * allocated already.
1322 			 */
1323 			rxr->rx_agg_prod = prod;
1324 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1325 			return 0;
1326 		}
1327 
1328 		page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1329 						  rxr->rx_page_size);
1330 
1331 		total_frag_len += frag_len;
1332 		prod = NEXT_RX_AGG(prod);
1333 	}
1334 	rxr->rx_agg_prod = prod;
1335 	return total_frag_len;
1336 }
1337 
1338 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1339 					       struct bnxt_cp_ring_info *cpr,
1340 					       struct sk_buff *skb, u16 idx,
1341 					       u32 agg_bufs, bool tpa)
1342 {
1343 	u32 total_frag_len = 0;
1344 
1345 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1346 					       skb, NULL);
1347 	if (!total_frag_len) {
1348 		skb_mark_for_recycle(skb);
1349 		dev_kfree_skb(skb);
1350 		return NULL;
1351 	}
1352 
1353 	return skb;
1354 }
1355 
1356 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1357 				   struct bnxt_cp_ring_info *cpr,
1358 				   struct xdp_buff *xdp, u16 idx,
1359 				   u32 agg_bufs, bool tpa)
1360 {
1361 	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1362 	u32 total_frag_len = 0;
1363 
1364 	if (!xdp_buff_has_frags(xdp))
1365 		shinfo->nr_frags = 0;
1366 
1367 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1368 					       NULL, xdp);
1369 	if (total_frag_len) {
1370 		xdp_buff_set_frags_flag(xdp);
1371 		shinfo->nr_frags = agg_bufs;
1372 		shinfo->xdp_frags_size = total_frag_len;
1373 	}
1374 	return total_frag_len;
1375 }
1376 
1377 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1378 			       u8 agg_bufs, u32 *raw_cons)
1379 {
1380 	u16 last;
1381 	struct rx_agg_cmp *agg;
1382 
1383 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1384 	last = RING_CMP(*raw_cons);
1385 	agg = (struct rx_agg_cmp *)
1386 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1387 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1388 }
1389 
1390 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1391 				      unsigned int len,
1392 				      dma_addr_t mapping)
1393 {
1394 	struct bnxt *bp = bnapi->bp;
1395 	struct pci_dev *pdev = bp->pdev;
1396 	struct sk_buff *skb;
1397 
1398 	skb = napi_alloc_skb(&bnapi->napi, len);
1399 	if (!skb)
1400 		return NULL;
1401 
1402 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1403 				bp->rx_dir);
1404 
1405 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1406 	       len + NET_IP_ALIGN);
1407 
1408 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1409 				   bp->rx_dir);
1410 
1411 	skb_put(skb, len);
1412 
1413 	return skb;
1414 }
1415 
1416 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1417 				     unsigned int len,
1418 				     dma_addr_t mapping)
1419 {
1420 	return bnxt_copy_data(bnapi, data, len, mapping);
1421 }
1422 
1423 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1424 				     struct xdp_buff *xdp,
1425 				     unsigned int len,
1426 				     dma_addr_t mapping)
1427 {
1428 	unsigned int metasize = 0;
1429 	u8 *data = xdp->data;
1430 	struct sk_buff *skb;
1431 
1432 	len = xdp->data_end - xdp->data_meta;
1433 	metasize = xdp->data - xdp->data_meta;
1434 	data = xdp->data_meta;
1435 
1436 	skb = bnxt_copy_data(bnapi, data, len, mapping);
1437 	if (!skb)
1438 		return skb;
1439 
1440 	if (metasize) {
1441 		skb_metadata_set(skb, metasize);
1442 		__skb_pull(skb, metasize);
1443 	}
1444 
1445 	return skb;
1446 }
1447 
1448 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1449 			   u32 *raw_cons, void *cmp)
1450 {
1451 	struct rx_cmp *rxcmp = cmp;
1452 	u32 tmp_raw_cons = *raw_cons;
1453 	u8 cmp_type, agg_bufs = 0;
1454 
1455 	cmp_type = RX_CMP_TYPE(rxcmp);
1456 
1457 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1458 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1459 			    RX_CMP_AGG_BUFS) >>
1460 			   RX_CMP_AGG_BUFS_SHIFT;
1461 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1462 		struct rx_tpa_end_cmp *tpa_end = cmp;
1463 
1464 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1465 			return 0;
1466 
1467 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1468 	}
1469 
1470 	if (agg_bufs) {
1471 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1472 			return -EBUSY;
1473 	}
1474 	*raw_cons = tmp_raw_cons;
1475 	return 0;
1476 }
1477 
1478 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1479 {
1480 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1481 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1482 
1483 	if (test_bit(idx, map->agg_idx_bmap)) {
1484 		idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1485 		if (idx >= MAX_TPA_P5)
1486 			return INVALID_HW_RING_ID;
1487 	}
1488 	__set_bit(idx, map->agg_idx_bmap);
1489 	map->agg_id_tbl[agg_id] = idx;
1490 	return idx;
1491 }
1492 
1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496 
1497 	__clear_bit(idx, map->agg_idx_bmap);
1498 }
1499 
1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503 
1504 	return map->agg_id_tbl[agg_id];
1505 }
1506 
1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 			      struct rx_tpa_start_cmp *tpa_start,
1509 			      struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 	tpa_info->cfa_code_valid = 1;
1512 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 	tpa_info->vlan_valid = 0;
1514 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 		tpa_info->vlan_valid = 1;
1516 		tpa_info->metadata =
1517 			le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 	}
1519 }
1520 
1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 				 struct rx_tpa_start_cmp *tpa_start,
1523 				 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 	tpa_info->vlan_valid = 0;
1526 	if (TPA_START_VLAN_VALID(tpa_start)) {
1527 		u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 		u32 vlan_proto = ETH_P_8021Q;
1529 
1530 		tpa_info->vlan_valid = 1;
1531 		if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 			vlan_proto = ETH_P_8021AD;
1533 		tpa_info->metadata = vlan_proto << 16 |
1534 				     TPA_START_METADATA0_TCI(tpa_start1);
1535 	}
1536 }
1537 
1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 	struct bnxt_tpa_info *tpa_info;
1544 	u16 cons, prod, agg_id;
1545 	struct rx_bd *prod_bd;
1546 	dma_addr_t mapping;
1547 
1548 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 		if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1552 			netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1553 				    rxr->bnapi->index,
1554 				    TPA_START_AGG_ID_P5(tpa_start));
1555 			bnxt_sched_reset_rxr(bp, rxr);
1556 			return;
1557 		}
1558 	} else {
1559 		agg_id = TPA_START_AGG_ID(tpa_start);
1560 	}
1561 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1562 	prod = rxr->rx_prod;
1563 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1564 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1565 	tpa_info = &rxr->rx_tpa[agg_id];
1566 
1567 	if (unlikely(cons != rxr->rx_next_cons ||
1568 		     TPA_START_ERROR(tpa_start))) {
1569 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1570 			    cons, rxr->rx_next_cons,
1571 			    TPA_START_ERROR_CODE(tpa_start1));
1572 		bnxt_sched_reset_rxr(bp, rxr);
1573 		return;
1574 	}
1575 	prod_rx_buf->data = tpa_info->data;
1576 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1577 
1578 	mapping = tpa_info->mapping;
1579 	prod_rx_buf->mapping = mapping;
1580 
1581 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1582 
1583 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1584 
1585 	tpa_info->data = cons_rx_buf->data;
1586 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1587 	cons_rx_buf->data = NULL;
1588 	tpa_info->mapping = cons_rx_buf->mapping;
1589 
1590 	tpa_info->len =
1591 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1592 				RX_TPA_START_CMP_LEN_SHIFT;
1593 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1594 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1595 		tpa_info->gso_type = SKB_GSO_TCPV4;
1596 		if (TPA_START_IS_IPV6(tpa_start1))
1597 			tpa_info->gso_type = SKB_GSO_TCPV6;
1598 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1599 		else if (!BNXT_CHIP_P4_PLUS(bp) &&
1600 			 TPA_START_HASH_TYPE(tpa_start) == 3)
1601 			tpa_info->gso_type = SKB_GSO_TCPV6;
1602 		tpa_info->rss_hash =
1603 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1604 	} else {
1605 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1606 		tpa_info->gso_type = 0;
1607 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1608 	}
1609 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1610 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1611 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1612 		bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1613 	else
1614 		bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1615 	tpa_info->agg_count = 0;
1616 
1617 	rxr->rx_prod = NEXT_RX(prod);
1618 	cons = RING_RX(bp, NEXT_RX(cons));
1619 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1620 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1621 
1622 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1623 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1624 	cons_rx_buf->data = NULL;
1625 }
1626 
1627 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1628 {
1629 	if (agg_bufs)
1630 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1631 }
1632 
1633 #ifdef CONFIG_INET
1634 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1635 {
1636 	struct udphdr *uh = NULL;
1637 
1638 	if (ip_proto == htons(ETH_P_IP)) {
1639 		struct iphdr *iph = (struct iphdr *)skb->data;
1640 
1641 		if (iph->protocol == IPPROTO_UDP)
1642 			uh = (struct udphdr *)(iph + 1);
1643 	} else {
1644 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1645 
1646 		if (iph->nexthdr == IPPROTO_UDP)
1647 			uh = (struct udphdr *)(iph + 1);
1648 	}
1649 	if (uh) {
1650 		if (uh->check)
1651 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1652 		else
1653 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1654 	}
1655 }
1656 #endif
1657 
1658 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1659 					   int payload_off, int tcp_ts,
1660 					   struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 	struct tcphdr *th;
1664 	int len, nw_off;
1665 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1666 	u32 hdr_info = tpa_info->hdr_info;
1667 	bool loopback = false;
1668 
1669 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1670 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1671 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1672 
1673 	/* If the packet is an internal loopback packet, the offsets will
1674 	 * have an extra 4 bytes.
1675 	 */
1676 	if (inner_mac_off == 4) {
1677 		loopback = true;
1678 	} else if (inner_mac_off > 4) {
1679 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1680 					    ETH_HLEN - 2));
1681 
1682 		/* We only support inner iPv4/ipv6.  If we don't see the
1683 		 * correct protocol ID, it must be a loopback packet where
1684 		 * the offsets are off by 4.
1685 		 */
1686 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1687 			loopback = true;
1688 	}
1689 	if (loopback) {
1690 		/* internal loopback packet, subtract all offsets by 4 */
1691 		inner_ip_off -= 4;
1692 		inner_mac_off -= 4;
1693 		outer_ip_off -= 4;
1694 	}
1695 
1696 	nw_off = inner_ip_off - ETH_HLEN;
1697 	skb_set_network_header(skb, nw_off);
1698 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1699 		struct ipv6hdr *iph = ipv6_hdr(skb);
1700 
1701 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1702 		len = skb->len - skb_transport_offset(skb);
1703 		th = tcp_hdr(skb);
1704 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1705 	} else {
1706 		struct iphdr *iph = ip_hdr(skb);
1707 
1708 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1709 		len = skb->len - skb_transport_offset(skb);
1710 		th = tcp_hdr(skb);
1711 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1712 	}
1713 
1714 	if (inner_mac_off) { /* tunnel */
1715 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1716 					    ETH_HLEN - 2));
1717 
1718 		bnxt_gro_tunnel(skb, proto);
1719 	}
1720 #endif
1721 	return skb;
1722 }
1723 
1724 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1725 					   int payload_off, int tcp_ts,
1726 					   struct sk_buff *skb)
1727 {
1728 #ifdef CONFIG_INET
1729 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1730 	u32 hdr_info = tpa_info->hdr_info;
1731 	int iphdr_len, nw_off;
1732 
1733 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1734 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1735 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1736 
1737 	nw_off = inner_ip_off - ETH_HLEN;
1738 	skb_set_network_header(skb, nw_off);
1739 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1740 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1741 	skb_set_transport_header(skb, nw_off + iphdr_len);
1742 
1743 	if (inner_mac_off) { /* tunnel */
1744 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1745 					    ETH_HLEN - 2));
1746 
1747 		bnxt_gro_tunnel(skb, proto);
1748 	}
1749 #endif
1750 	return skb;
1751 }
1752 
1753 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1754 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1755 
1756 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1757 					   int payload_off, int tcp_ts,
1758 					   struct sk_buff *skb)
1759 {
1760 #ifdef CONFIG_INET
1761 	struct tcphdr *th;
1762 	int len, nw_off, tcp_opt_len = 0;
1763 
1764 	if (tcp_ts)
1765 		tcp_opt_len = 12;
1766 
1767 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1768 		struct iphdr *iph;
1769 
1770 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1771 			 ETH_HLEN;
1772 		skb_set_network_header(skb, nw_off);
1773 		iph = ip_hdr(skb);
1774 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1775 		len = skb->len - skb_transport_offset(skb);
1776 		th = tcp_hdr(skb);
1777 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1778 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1779 		struct ipv6hdr *iph;
1780 
1781 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1782 			 ETH_HLEN;
1783 		skb_set_network_header(skb, nw_off);
1784 		iph = ipv6_hdr(skb);
1785 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1786 		len = skb->len - skb_transport_offset(skb);
1787 		th = tcp_hdr(skb);
1788 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1789 	} else {
1790 		dev_kfree_skb_any(skb);
1791 		return NULL;
1792 	}
1793 
1794 	if (nw_off) /* tunnel */
1795 		bnxt_gro_tunnel(skb, skb->protocol);
1796 #endif
1797 	return skb;
1798 }
1799 
1800 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1801 					   struct bnxt_tpa_info *tpa_info,
1802 					   struct rx_tpa_end_cmp *tpa_end,
1803 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1804 					   struct sk_buff *skb,
1805 					   struct bnxt_rx_sw_stats *rx_stats)
1806 {
1807 #ifdef CONFIG_INET
1808 	int payload_off;
1809 	u16 segs;
1810 
1811 	segs = TPA_END_TPA_SEGS(tpa_end);
1812 	if (segs == 1)
1813 		return skb;
1814 
1815 	rx_stats->rx_hw_gro_packets++;
1816 	rx_stats->rx_hw_gro_wire_packets += segs;
1817 
1818 	NAPI_GRO_CB(skb)->count = segs;
1819 	skb_shinfo(skb)->gso_size =
1820 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1821 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1822 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1823 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1824 	else
1825 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1826 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1827 	if (likely(skb))
1828 		tcp_gro_complete(skb);
1829 #endif
1830 	return skb;
1831 }
1832 
1833 /* Given the cfa_code of a received packet determine which
1834  * netdev (vf-rep or PF) the packet is destined to.
1835  */
1836 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1837 {
1838 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1839 
1840 	/* if vf-rep dev is NULL, it must belong to the PF */
1841 	return dev ? dev : bp->dev;
1842 }
1843 
1844 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1845 					   struct bnxt_cp_ring_info *cpr,
1846 					   u32 *raw_cons,
1847 					   struct rx_tpa_end_cmp *tpa_end,
1848 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1849 					   u8 *event)
1850 {
1851 	struct bnxt_napi *bnapi = cpr->bnapi;
1852 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1853 	struct net_device *dev = bp->dev;
1854 	u8 *data_ptr, agg_bufs;
1855 	unsigned int len;
1856 	struct bnxt_tpa_info *tpa_info;
1857 	dma_addr_t mapping;
1858 	struct sk_buff *skb;
1859 	u16 idx = 0, agg_id;
1860 	void *data;
1861 	bool gro;
1862 
1863 	if (unlikely(bnapi->in_reset)) {
1864 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1865 
1866 		if (rc < 0)
1867 			return ERR_PTR(-EBUSY);
1868 		return NULL;
1869 	}
1870 
1871 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1872 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1873 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1874 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1875 		tpa_info = &rxr->rx_tpa[agg_id];
1876 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1877 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1878 				    agg_bufs, tpa_info->agg_count);
1879 			agg_bufs = tpa_info->agg_count;
1880 		}
1881 		tpa_info->agg_count = 0;
1882 		*event |= BNXT_AGG_EVENT;
1883 		bnxt_free_agg_idx(rxr, agg_id);
1884 		idx = agg_id;
1885 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1886 	} else {
1887 		agg_id = TPA_END_AGG_ID(tpa_end);
1888 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1889 		tpa_info = &rxr->rx_tpa[agg_id];
1890 		idx = RING_CMP(*raw_cons);
1891 		if (agg_bufs) {
1892 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1893 				return ERR_PTR(-EBUSY);
1894 
1895 			*event |= BNXT_AGG_EVENT;
1896 			idx = NEXT_CMP(idx);
1897 		}
1898 		gro = !!TPA_END_GRO(tpa_end);
1899 	}
1900 	data = tpa_info->data;
1901 	data_ptr = tpa_info->data_ptr;
1902 	prefetch(data_ptr);
1903 	len = tpa_info->len;
1904 	mapping = tpa_info->mapping;
1905 
1906 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1907 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1908 		if (agg_bufs > MAX_SKB_FRAGS)
1909 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1910 				    agg_bufs, (int)MAX_SKB_FRAGS);
1911 		return NULL;
1912 	}
1913 
1914 	if (len <= bp->rx_copybreak) {
1915 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1916 		if (!skb) {
1917 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 			cpr->sw_stats->rx.rx_oom_discards += 1;
1919 			return NULL;
1920 		}
1921 	} else {
1922 		u8 *new_data;
1923 		dma_addr_t new_mapping;
1924 
1925 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1926 						GFP_ATOMIC);
1927 		if (!new_data) {
1928 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1929 			cpr->sw_stats->rx.rx_oom_discards += 1;
1930 			return NULL;
1931 		}
1932 
1933 		tpa_info->data = new_data;
1934 		tpa_info->data_ptr = new_data + bp->rx_offset;
1935 		tpa_info->mapping = new_mapping;
1936 
1937 		skb = napi_build_skb(data, bp->rx_buf_size);
1938 		dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1939 					bp->rx_buf_use_size, bp->rx_dir);
1940 
1941 		if (!skb) {
1942 			page_pool_free_va(rxr->head_pool, data, true);
1943 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1944 			cpr->sw_stats->rx.rx_oom_discards += 1;
1945 			return NULL;
1946 		}
1947 		skb_mark_for_recycle(skb);
1948 		skb_reserve(skb, bp->rx_offset);
1949 		skb_put(skb, len);
1950 	}
1951 
1952 	if (agg_bufs) {
1953 		skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1954 					      true);
1955 		if (!skb) {
1956 			/* Page reuse already handled by bnxt_rx_pages(). */
1957 			cpr->sw_stats->rx.rx_oom_discards += 1;
1958 			return NULL;
1959 		}
1960 	}
1961 
1962 	if (tpa_info->cfa_code_valid)
1963 		dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1964 	skb->protocol = eth_type_trans(skb, dev);
1965 
1966 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1967 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1968 
1969 	if (tpa_info->vlan_valid &&
1970 	    (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1971 		__be16 vlan_proto = htons(tpa_info->metadata >>
1972 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1973 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1974 
1975 		if (eth_type_vlan(vlan_proto)) {
1976 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1977 		} else {
1978 			dev_kfree_skb(skb);
1979 			return NULL;
1980 		}
1981 	}
1982 
1983 	skb_checksum_none_assert(skb);
1984 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1985 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 		skb->csum_level =
1987 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1988 	}
1989 
1990 	if (gro)
1991 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
1992 				   &cpr->sw_stats->rx);
1993 
1994 	return skb;
1995 }
1996 
1997 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1998 			 struct rx_agg_cmp *rx_agg)
1999 {
2000 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2001 	struct bnxt_tpa_info *tpa_info;
2002 
2003 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2004 	tpa_info = &rxr->rx_tpa[agg_id];
2005 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2006 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2007 }
2008 
2009 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2010 			     struct sk_buff *skb)
2011 {
2012 	skb_mark_for_recycle(skb);
2013 
2014 	if (skb->dev != bp->dev) {
2015 		/* this packet belongs to a vf-rep */
2016 		bnxt_vf_rep_rx(bp, skb);
2017 		return;
2018 	}
2019 	skb_record_rx_queue(skb, bnapi->index);
2020 	napi_gro_receive(&bnapi->napi, skb);
2021 }
2022 
2023 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2024 			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2025 {
2026 	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2027 
2028 	if (BNXT_PTP_RX_TS_VALID(flags))
2029 		goto ts_valid;
2030 	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2031 		return false;
2032 
2033 ts_valid:
2034 	*cmpl_ts = ts;
2035 	return true;
2036 }
2037 
2038 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2039 				    struct rx_cmp *rxcmp,
2040 				    struct rx_cmp_ext *rxcmp1)
2041 {
2042 	__be16 vlan_proto;
2043 	u16 vtag;
2044 
2045 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2046 		__le32 flags2 = rxcmp1->rx_cmp_flags2;
2047 		u32 meta_data;
2048 
2049 		if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2050 			return skb;
2051 
2052 		meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2053 		vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2054 		vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2055 		if (eth_type_vlan(vlan_proto))
2056 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2057 		else
2058 			goto vlan_err;
2059 	} else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2060 		if (RX_CMP_VLAN_VALID(rxcmp)) {
2061 			u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2062 
2063 			if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2064 				vlan_proto = htons(ETH_P_8021Q);
2065 			else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2066 				vlan_proto = htons(ETH_P_8021AD);
2067 			else
2068 				goto vlan_err;
2069 			vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2070 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2071 		}
2072 	}
2073 	return skb;
2074 vlan_err:
2075 	skb_mark_for_recycle(skb);
2076 	dev_kfree_skb(skb);
2077 	return NULL;
2078 }
2079 
2080 /* returns the following:
2081  * 1       - 1 packet successfully received
2082  * 0       - successful TPA_START, packet not completed yet
2083  * -EBUSY  - completion ring does not have all the agg buffers yet
2084  * -ENOMEM - packet aborted due to out of memory
2085  * -EIO    - packet aborted due to hw error indicated in BD
2086  */
2087 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2088 		       u32 *raw_cons, u8 *event)
2089 {
2090 	struct bnxt_napi *bnapi = cpr->bnapi;
2091 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2092 	struct net_device *dev = bp->dev;
2093 	struct rx_cmp *rxcmp;
2094 	struct rx_cmp_ext *rxcmp1;
2095 	u32 tmp_raw_cons = *raw_cons;
2096 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2097 	struct skb_shared_info *sinfo;
2098 	struct bnxt_xdp_buff bnxt_xdp;
2099 	struct bnxt_sw_rx_bd *rx_buf;
2100 	unsigned int len;
2101 	u8 *data_ptr, agg_bufs, cmp_type;
2102 	bool xdp_active = false;
2103 	dma_addr_t dma_addr;
2104 	struct sk_buff *skb;
2105 	u32 flags, misc;
2106 	u32 cmpl_ts;
2107 	void *data;
2108 	int rc = 0;
2109 
2110 	rxcmp = (struct rx_cmp *)
2111 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2112 
2113 	cmp_type = RX_CMP_TYPE(rxcmp);
2114 
2115 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2116 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2117 		goto next_rx_no_prod_no_len;
2118 	}
2119 
2120 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2121 	cp_cons = RING_CMP(tmp_raw_cons);
2122 	rxcmp1 = (struct rx_cmp_ext *)
2123 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2124 
2125 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2126 		return -EBUSY;
2127 
2128 	/* The valid test of the entry must be done first before
2129 	 * reading any further.
2130 	 */
2131 	dma_rmb();
2132 	prod = rxr->rx_prod;
2133 
2134 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2135 	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2136 		bnxt_tpa_start(bp, rxr, cmp_type,
2137 			       (struct rx_tpa_start_cmp *)rxcmp,
2138 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
2139 
2140 		*event |= BNXT_RX_EVENT;
2141 		goto next_rx_no_prod_no_len;
2142 
2143 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2144 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2145 				   (struct rx_tpa_end_cmp *)rxcmp,
2146 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2147 
2148 		if (IS_ERR(skb))
2149 			return -EBUSY;
2150 
2151 		rc = -ENOMEM;
2152 		if (likely(skb)) {
2153 			bnxt_deliver_skb(bp, bnapi, skb);
2154 			rc = 1;
2155 		}
2156 		*event |= BNXT_RX_EVENT;
2157 		goto next_rx_no_prod_no_len;
2158 	}
2159 
2160 	cons = rxcmp->rx_cmp_opaque;
2161 	if (unlikely(cons != rxr->rx_next_cons)) {
2162 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2163 
2164 		/* 0xffff is forced error, don't print it */
2165 		if (rxr->rx_next_cons != 0xffff)
2166 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2167 				    cons, rxr->rx_next_cons);
2168 		bnxt_sched_reset_rxr(bp, rxr);
2169 		if (rc1)
2170 			return rc1;
2171 		goto next_rx_no_prod_no_len;
2172 	}
2173 	rx_buf = &rxr->rx_buf_ring[cons];
2174 	data = rx_buf->data;
2175 	data_ptr = rx_buf->data_ptr;
2176 	prefetch(data_ptr);
2177 
2178 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2179 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2180 
2181 	if (agg_bufs) {
2182 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2183 			return -EBUSY;
2184 
2185 		cp_cons = NEXT_CMP(cp_cons);
2186 		*event |= BNXT_AGG_EVENT;
2187 	}
2188 	*event |= BNXT_RX_EVENT;
2189 
2190 	rx_buf->data = NULL;
2191 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2192 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2193 
2194 		bnxt_reuse_rx_data(rxr, cons, data);
2195 		if (agg_bufs)
2196 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2197 					       false);
2198 
2199 		rc = -EIO;
2200 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2201 			bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2202 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2203 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2204 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
2205 						 rx_err);
2206 				bnxt_sched_reset_rxr(bp, rxr);
2207 			}
2208 		}
2209 		goto next_rx_no_len;
2210 	}
2211 
2212 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2213 	len = flags >> RX_CMP_LEN_SHIFT;
2214 	dma_addr = rx_buf->mapping;
2215 
2216 	if (bnxt_xdp_attached(bp, rxr)) {
2217 		bnxt_xdp.rxcmp = rxcmp;
2218 		bnxt_xdp.rxcmp1 = rxcmp1;
2219 		bnxt_xdp.cmp_type = cmp_type;
2220 
2221 		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &bnxt_xdp.xdp);
2222 		if (agg_bufs) {
2223 			u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr,
2224 							       &bnxt_xdp.xdp,
2225 							       cp_cons,
2226 							       agg_bufs,
2227 							       false);
2228 			if (!frag_len)
2229 				goto oom_next_rx;
2230 
2231 		}
2232 		xdp_active = true;
2233 	}
2234 
2235 	if (xdp_active) {
2236 		if (bnxt_rx_xdp(bp, rxr, cons, &bnxt_xdp.xdp, data, &data_ptr,
2237 				&len, event)) {
2238 			rc = 1;
2239 			goto next_rx;
2240 		}
2241 		if (xdp_buff_has_frags(&bnxt_xdp.xdp)) {
2242 			sinfo = xdp_get_shared_info_from_buff(&bnxt_xdp.xdp);
2243 			agg_bufs = sinfo->nr_frags;
2244 		} else {
2245 			agg_bufs = 0;
2246 		}
2247 	}
2248 
2249 	if (len <= bp->rx_copybreak) {
2250 		if (!xdp_active)
2251 			skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2252 		else
2253 			skb = bnxt_copy_xdp(bnapi, &bnxt_xdp.xdp, len,
2254 					    dma_addr);
2255 		bnxt_reuse_rx_data(rxr, cons, data);
2256 		if (!skb) {
2257 			if (agg_bufs) {
2258 				if (!xdp_active)
2259 					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2260 							       agg_bufs, false);
2261 				else
2262 					bnxt_xdp_buff_frags_free(rxr,
2263 								 &bnxt_xdp.xdp);
2264 			}
2265 			goto oom_next_rx;
2266 		}
2267 	} else {
2268 		u32 payload;
2269 
2270 		if (rx_buf->data_ptr == data_ptr)
2271 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
2272 		else
2273 			payload = 0;
2274 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2275 				      payload | len);
2276 		if (!skb)
2277 			goto oom_next_rx;
2278 	}
2279 
2280 	if (agg_bufs) {
2281 		if (!xdp_active) {
2282 			skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2283 						      agg_bufs, false);
2284 			if (!skb)
2285 				goto oom_next_rx;
2286 		} else {
2287 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2288 						 rxr, &bnxt_xdp.xdp);
2289 			if (!skb) {
2290 				/* we should be able to free the old skb here */
2291 				bnxt_xdp_buff_frags_free(rxr, &bnxt_xdp.xdp);
2292 				goto oom_next_rx;
2293 			}
2294 		}
2295 	}
2296 
2297 	if (RX_CMP_HASH_VALID(rxcmp)) {
2298 		enum pkt_hash_types type;
2299 
2300 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2301 			type = bnxt_rss_ext_op(bp, rxcmp);
2302 		} else {
2303 			u32 itypes = RX_CMP_ITYPES(rxcmp);
2304 
2305 			if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2306 			    itypes == RX_CMP_FLAGS_ITYPE_UDP)
2307 				type = PKT_HASH_TYPE_L4;
2308 			else
2309 				type = PKT_HASH_TYPE_L3;
2310 		}
2311 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2312 	}
2313 
2314 	if (cmp_type == CMP_TYPE_RX_L2_CMP)
2315 		dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2316 	skb->protocol = eth_type_trans(skb, dev);
2317 
2318 	if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2319 		skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2320 		if (!skb)
2321 			goto next_rx;
2322 	}
2323 
2324 	skb_checksum_none_assert(skb);
2325 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
2326 		if (dev->features & NETIF_F_RXCSUM) {
2327 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2328 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2329 		}
2330 	} else {
2331 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2332 			if (dev->features & NETIF_F_RXCSUM)
2333 				bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2334 		}
2335 	}
2336 
2337 	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2338 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2339 			u64 ns, ts;
2340 
2341 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2342 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2343 
2344 				ns = bnxt_timecounter_cyc2time(ptp, ts);
2345 				memset(skb_hwtstamps(skb), 0,
2346 				       sizeof(*skb_hwtstamps(skb)));
2347 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2348 			}
2349 		}
2350 	}
2351 	bnxt_deliver_skb(bp, bnapi, skb);
2352 	rc = 1;
2353 
2354 next_rx:
2355 	cpr->rx_packets += 1;
2356 	cpr->rx_bytes += len;
2357 
2358 next_rx_no_len:
2359 	rxr->rx_prod = NEXT_RX(prod);
2360 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2361 
2362 next_rx_no_prod_no_len:
2363 	*raw_cons = tmp_raw_cons;
2364 
2365 	return rc;
2366 
2367 oom_next_rx:
2368 	cpr->sw_stats->rx.rx_oom_discards += 1;
2369 	rc = -ENOMEM;
2370 	goto next_rx;
2371 }
2372 
2373 /* In netpoll mode, if we are using a combined completion ring, we need to
2374  * discard the rx packets and recycle the buffers.
2375  */
2376 static int bnxt_force_rx_discard(struct bnxt *bp,
2377 				 struct bnxt_cp_ring_info *cpr,
2378 				 u32 *raw_cons, u8 *event)
2379 {
2380 	u32 tmp_raw_cons = *raw_cons;
2381 	struct rx_cmp_ext *rxcmp1;
2382 	struct rx_cmp *rxcmp;
2383 	u16 cp_cons;
2384 	u8 cmp_type;
2385 	int rc;
2386 
2387 	cp_cons = RING_CMP(tmp_raw_cons);
2388 	rxcmp = (struct rx_cmp *)
2389 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2390 
2391 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2392 	cp_cons = RING_CMP(tmp_raw_cons);
2393 	rxcmp1 = (struct rx_cmp_ext *)
2394 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2395 
2396 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2397 		return -EBUSY;
2398 
2399 	/* The valid test of the entry must be done first before
2400 	 * reading any further.
2401 	 */
2402 	dma_rmb();
2403 	cmp_type = RX_CMP_TYPE(rxcmp);
2404 	if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2405 	    cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2406 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2407 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2408 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2409 		struct rx_tpa_end_cmp_ext *tpa_end1;
2410 
2411 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2412 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2413 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2414 	}
2415 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2416 	if (rc && rc != -EBUSY)
2417 		cpr->sw_stats->rx.rx_netpoll_discards += 1;
2418 	return rc;
2419 }
2420 
2421 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2422 {
2423 	struct bnxt_fw_health *fw_health = bp->fw_health;
2424 	u32 reg = fw_health->regs[reg_idx];
2425 	u32 reg_type, reg_off, val = 0;
2426 
2427 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2428 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2429 	switch (reg_type) {
2430 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2431 		pci_read_config_dword(bp->pdev, reg_off, &val);
2432 		break;
2433 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2434 		reg_off = fw_health->mapped_regs[reg_idx];
2435 		fallthrough;
2436 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2437 		val = readl(bp->bar0 + reg_off);
2438 		break;
2439 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2440 		val = readl(bp->bar1 + reg_off);
2441 		break;
2442 	}
2443 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2444 		val &= fw_health->fw_reset_inprog_reg_mask;
2445 	return val;
2446 }
2447 
2448 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2449 {
2450 	int i;
2451 
2452 	for (i = 0; i < bp->rx_nr_rings; i++) {
2453 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2454 		struct bnxt_ring_grp_info *grp_info;
2455 
2456 		grp_info = &bp->grp_info[grp_idx];
2457 		if (grp_info->agg_fw_ring_id == ring_id)
2458 			return grp_idx;
2459 	}
2460 	return INVALID_HW_RING_ID;
2461 }
2462 
2463 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2464 {
2465 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2466 
2467 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2468 		return link_info->force_link_speed2;
2469 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2470 		return link_info->force_pam4_link_speed;
2471 	return link_info->force_link_speed;
2472 }
2473 
2474 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2475 {
2476 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2477 
2478 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2479 		link_info->req_link_speed = link_info->force_link_speed2;
2480 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2481 		switch (link_info->req_link_speed) {
2482 		case BNXT_LINK_SPEED_50GB_PAM4:
2483 		case BNXT_LINK_SPEED_100GB_PAM4:
2484 		case BNXT_LINK_SPEED_200GB_PAM4:
2485 		case BNXT_LINK_SPEED_400GB_PAM4:
2486 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2487 			break;
2488 		case BNXT_LINK_SPEED_100GB_PAM4_112:
2489 		case BNXT_LINK_SPEED_200GB_PAM4_112:
2490 		case BNXT_LINK_SPEED_400GB_PAM4_112:
2491 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2492 			break;
2493 		default:
2494 			link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2495 		}
2496 		return;
2497 	}
2498 	link_info->req_link_speed = link_info->force_link_speed;
2499 	link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2500 	if (link_info->force_pam4_link_speed) {
2501 		link_info->req_link_speed = link_info->force_pam4_link_speed;
2502 		link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2503 	}
2504 }
2505 
2506 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2507 {
2508 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2509 
2510 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2511 		link_info->advertising = link_info->auto_link_speeds2;
2512 		return;
2513 	}
2514 	link_info->advertising = link_info->auto_link_speeds;
2515 	link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2516 }
2517 
2518 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2519 {
2520 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2521 
2522 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2523 		if (link_info->req_link_speed != link_info->force_link_speed2)
2524 			return true;
2525 		return false;
2526 	}
2527 	if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2528 	    link_info->req_link_speed != link_info->force_link_speed)
2529 		return true;
2530 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2531 	    link_info->req_link_speed != link_info->force_pam4_link_speed)
2532 		return true;
2533 	return false;
2534 }
2535 
2536 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2537 {
2538 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2539 
2540 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2541 		if (link_info->advertising != link_info->auto_link_speeds2)
2542 			return true;
2543 		return false;
2544 	}
2545 	if (link_info->advertising != link_info->auto_link_speeds ||
2546 	    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2547 		return true;
2548 	return false;
2549 }
2550 
2551 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2552 {
2553 	u32 flags = bp->ctx->ctx_arr[type].flags;
2554 
2555 	return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2556 		((flags & BNXT_CTX_MEM_FW_TRACE) ||
2557 		 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2558 }
2559 
2560 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2561 {
2562 	u32 mem_size, pages, rem_bytes, magic_byte_offset;
2563 	u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2564 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2565 	struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2566 	struct bnxt_bs_trace_info *bs_trace;
2567 	int last_pg;
2568 
2569 	if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2570 		return;
2571 
2572 	mem_size = ctxm->max_entries * ctxm->entry_size;
2573 	rem_bytes = mem_size % BNXT_PAGE_SIZE;
2574 	pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2575 
2576 	last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2577 	magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2578 
2579 	rmem = &ctx_pg[0].ring_mem;
2580 	bs_trace = &bp->bs_trace[trace_type];
2581 	bs_trace->ctx_type = ctxm->type;
2582 	bs_trace->trace_type = trace_type;
2583 	if (pages > MAX_CTX_PAGES) {
2584 		int last_pg_dir = rmem->nr_pages - 1;
2585 
2586 		rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2587 		bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2588 	} else {
2589 		bs_trace->magic_byte = rmem->pg_arr[last_pg];
2590 	}
2591 	bs_trace->magic_byte += magic_byte_offset;
2592 	*bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2593 }
2594 
2595 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1)				\
2596 	(((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2597 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2598 
2599 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2)				\
2600 	(((data2) &							\
2601 	  ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2602 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2603 
2604 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)				\
2605 	((data2) &							\
2606 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2607 
2608 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)			\
2609 	(((data2) &							\
2610 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2611 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2612 
2613 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)			\
2614 	((data1) &							\
2615 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2616 
2617 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)		\
2618 	(((data1) &							\
2619 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2620 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2621 
2622 /* Return true if the workqueue has to be scheduled */
2623 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2624 {
2625 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2626 
2627 	switch (err_type) {
2628 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2629 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2630 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2631 		break;
2632 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2633 		netdev_warn(bp->dev, "Pause Storm detected!\n");
2634 		break;
2635 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2636 		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2637 		break;
2638 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2639 		u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2640 		char *threshold_type;
2641 		bool notify = false;
2642 		char *dir_str;
2643 
2644 		switch (type) {
2645 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2646 			threshold_type = "warning";
2647 			break;
2648 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2649 			threshold_type = "critical";
2650 			break;
2651 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2652 			threshold_type = "fatal";
2653 			break;
2654 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2655 			threshold_type = "shutdown";
2656 			break;
2657 		default:
2658 			netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2659 			return false;
2660 		}
2661 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2662 			dir_str = "above";
2663 			notify = true;
2664 		} else {
2665 			dir_str = "below";
2666 		}
2667 		netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2668 			    dir_str, threshold_type);
2669 		netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2670 			    BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2671 			    BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2672 		if (notify) {
2673 			bp->thermal_threshold_type = type;
2674 			set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2675 			return true;
2676 		}
2677 		return false;
2678 	}
2679 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2680 		netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2681 		break;
2682 	default:
2683 		netdev_err(bp->dev, "FW reported unknown error type %u\n",
2684 			   err_type);
2685 		break;
2686 	}
2687 	return false;
2688 }
2689 
2690 #define BNXT_GET_EVENT_PORT(data)	\
2691 	((data) &			\
2692 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2693 
2694 #define BNXT_EVENT_RING_TYPE(data2)	\
2695 	((data2) &			\
2696 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2697 
2698 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2699 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2700 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2701 
2702 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
2703 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2704 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2705 
2706 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
2707 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2708 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2709 
2710 #define BNXT_PHC_BITS	48
2711 
2712 static int bnxt_async_event_process(struct bnxt *bp,
2713 				    struct hwrm_async_event_cmpl *cmpl)
2714 {
2715 	u16 event_id = le16_to_cpu(cmpl->event_id);
2716 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2717 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2718 
2719 	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2720 		   event_id, data1, data2);
2721 
2722 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2723 	switch (event_id) {
2724 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2725 		struct bnxt_link_info *link_info = &bp->link_info;
2726 
2727 		if (BNXT_VF(bp))
2728 			goto async_event_process_exit;
2729 
2730 		/* print unsupported speed warning in forced speed mode only */
2731 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2732 		    (data1 & 0x20000)) {
2733 			u16 fw_speed = bnxt_get_force_speed(link_info);
2734 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2735 
2736 			if (speed != SPEED_UNKNOWN)
2737 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2738 					    speed);
2739 		}
2740 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2741 	}
2742 		fallthrough;
2743 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2744 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2745 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2746 		fallthrough;
2747 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2748 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2749 		break;
2750 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2751 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2752 		break;
2753 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2754 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2755 
2756 		if (BNXT_VF(bp))
2757 			break;
2758 
2759 		if (bp->pf.port_id != port_id)
2760 			break;
2761 
2762 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2763 		break;
2764 	}
2765 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2766 		if (BNXT_PF(bp))
2767 			goto async_event_process_exit;
2768 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2769 		break;
2770 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2771 		char *type_str = "Solicited";
2772 
2773 		if (!bp->fw_health)
2774 			goto async_event_process_exit;
2775 
2776 		bp->fw_reset_timestamp = jiffies;
2777 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2778 		if (!bp->fw_reset_min_dsecs)
2779 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2780 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2781 		if (!bp->fw_reset_max_dsecs)
2782 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2783 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2784 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2785 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2786 			type_str = "Fatal";
2787 			bp->fw_health->fatalities++;
2788 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2789 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2790 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2791 			type_str = "Non-fatal";
2792 			bp->fw_health->survivals++;
2793 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2794 		}
2795 		netif_warn(bp, hw, bp->dev,
2796 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2797 			   type_str, data1, data2,
2798 			   bp->fw_reset_min_dsecs * 100,
2799 			   bp->fw_reset_max_dsecs * 100);
2800 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2801 		break;
2802 	}
2803 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2804 		struct bnxt_fw_health *fw_health = bp->fw_health;
2805 		char *status_desc = "healthy";
2806 		u32 status;
2807 
2808 		if (!fw_health)
2809 			goto async_event_process_exit;
2810 
2811 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2812 			fw_health->enabled = false;
2813 			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2814 			break;
2815 		}
2816 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2817 		fw_health->tmr_multiplier =
2818 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2819 				     bp->current_interval * 10);
2820 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2821 		if (!fw_health->enabled)
2822 			fw_health->last_fw_heartbeat =
2823 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2824 		fw_health->last_fw_reset_cnt =
2825 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2826 		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2827 		if (status != BNXT_FW_STATUS_HEALTHY)
2828 			status_desc = "unhealthy";
2829 		netif_info(bp, drv, bp->dev,
2830 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2831 			   fw_health->primary ? "primary" : "backup", status,
2832 			   status_desc, fw_health->last_fw_reset_cnt);
2833 		if (!fw_health->enabled) {
2834 			/* Make sure tmr_counter is set and visible to
2835 			 * bnxt_health_check() before setting enabled to true.
2836 			 */
2837 			smp_wmb();
2838 			fw_health->enabled = true;
2839 		}
2840 		goto async_event_process_exit;
2841 	}
2842 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2843 		netif_notice(bp, hw, bp->dev,
2844 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2845 			     data1, data2);
2846 		goto async_event_process_exit;
2847 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2848 		struct bnxt_rx_ring_info *rxr;
2849 		u16 grp_idx;
2850 
2851 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2852 			goto async_event_process_exit;
2853 
2854 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2855 			    BNXT_EVENT_RING_TYPE(data2), data1);
2856 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2857 			goto async_event_process_exit;
2858 
2859 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2860 		if (grp_idx == INVALID_HW_RING_ID) {
2861 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2862 				    data1);
2863 			goto async_event_process_exit;
2864 		}
2865 		rxr = bp->bnapi[grp_idx]->rx_ring;
2866 		bnxt_sched_reset_rxr(bp, rxr);
2867 		goto async_event_process_exit;
2868 	}
2869 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2870 		struct bnxt_fw_health *fw_health = bp->fw_health;
2871 
2872 		netif_notice(bp, hw, bp->dev,
2873 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2874 			     data1, data2);
2875 		if (fw_health) {
2876 			fw_health->echo_req_data1 = data1;
2877 			fw_health->echo_req_data2 = data2;
2878 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2879 			break;
2880 		}
2881 		goto async_event_process_exit;
2882 	}
2883 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2884 		bnxt_ptp_pps_event(bp, data1, data2);
2885 		goto async_event_process_exit;
2886 	}
2887 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2888 		if (bnxt_event_error_report(bp, data1, data2))
2889 			break;
2890 		goto async_event_process_exit;
2891 	}
2892 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2893 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2894 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2895 			if (BNXT_PTP_USE_RTC(bp)) {
2896 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2897 				unsigned long flags;
2898 				u64 ns;
2899 
2900 				if (!ptp)
2901 					goto async_event_process_exit;
2902 
2903 				bnxt_ptp_update_current_time(bp);
2904 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2905 				       BNXT_PHC_BITS) | ptp->current_time);
2906 				write_seqlock_irqsave(&ptp->ptp_lock, flags);
2907 				bnxt_ptp_rtc_timecounter_init(ptp, ns);
2908 				write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2909 			}
2910 			break;
2911 		}
2912 		goto async_event_process_exit;
2913 	}
2914 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2915 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2916 
2917 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2918 		goto async_event_process_exit;
2919 	}
2920 	case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2921 		u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2922 		u32 offset =  BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2923 
2924 		if (type >= ARRAY_SIZE(bp->bs_trace))
2925 			goto async_event_process_exit;
2926 		bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2927 		goto async_event_process_exit;
2928 	}
2929 	default:
2930 		goto async_event_process_exit;
2931 	}
2932 	__bnxt_queue_sp_work(bp);
2933 async_event_process_exit:
2934 	bnxt_ulp_async_events(bp, cmpl);
2935 	return 0;
2936 }
2937 
2938 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2939 {
2940 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2941 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2942 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2943 				(struct hwrm_fwd_req_cmpl *)txcmp;
2944 
2945 	switch (cmpl_type) {
2946 	case CMPL_BASE_TYPE_HWRM_DONE:
2947 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2948 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2949 		break;
2950 
2951 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2952 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2953 
2954 		if ((vf_id < bp->pf.first_vf_id) ||
2955 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2956 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2957 				   vf_id);
2958 			return -EINVAL;
2959 		}
2960 
2961 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2962 		bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2963 		break;
2964 
2965 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2966 		bnxt_async_event_process(bp,
2967 					 (struct hwrm_async_event_cmpl *)txcmp);
2968 		break;
2969 
2970 	default:
2971 		break;
2972 	}
2973 
2974 	return 0;
2975 }
2976 
2977 static bool bnxt_vnic_is_active(struct bnxt *bp)
2978 {
2979 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2980 
2981 	return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2982 }
2983 
2984 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2985 {
2986 	struct bnxt_napi *bnapi = dev_instance;
2987 	struct bnxt *bp = bnapi->bp;
2988 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2989 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2990 
2991 	cpr->event_ctr++;
2992 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2993 	napi_schedule(&bnapi->napi);
2994 	return IRQ_HANDLED;
2995 }
2996 
2997 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2998 {
2999 	u32 raw_cons = cpr->cp_raw_cons;
3000 	u16 cons = RING_CMP(raw_cons);
3001 	struct tx_cmp *txcmp;
3002 
3003 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3004 
3005 	return TX_CMP_VALID(txcmp, raw_cons);
3006 }
3007 
3008 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3009 			    int budget)
3010 {
3011 	struct bnxt_napi *bnapi = cpr->bnapi;
3012 	u32 raw_cons = cpr->cp_raw_cons;
3013 	bool flush_xdp = false;
3014 	u32 cons;
3015 	int rx_pkts = 0;
3016 	u8 event = 0;
3017 	struct tx_cmp *txcmp;
3018 
3019 	cpr->has_more_work = 0;
3020 	cpr->had_work_done = 1;
3021 	while (1) {
3022 		u8 cmp_type;
3023 		int rc;
3024 
3025 		cons = RING_CMP(raw_cons);
3026 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3027 
3028 		if (!TX_CMP_VALID(txcmp, raw_cons))
3029 			break;
3030 
3031 		/* The valid test of the entry must be done first before
3032 		 * reading any further.
3033 		 */
3034 		dma_rmb();
3035 		cmp_type = TX_CMP_TYPE(txcmp);
3036 		if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3037 		    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3038 			u32 opaque = txcmp->tx_cmp_opaque;
3039 			struct bnxt_tx_ring_info *txr;
3040 			u16 tx_freed;
3041 
3042 			txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3043 			event |= BNXT_TX_CMP_EVENT;
3044 			if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3045 				txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3046 			else
3047 				txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3048 			tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3049 				   bp->tx_ring_mask;
3050 			/* return full budget so NAPI will complete. */
3051 			if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3052 				rx_pkts = budget;
3053 				raw_cons = NEXT_RAW_CMP(raw_cons);
3054 				if (budget)
3055 					cpr->has_more_work = 1;
3056 				break;
3057 			}
3058 		} else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3059 			bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3060 		} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3061 			   cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3062 			if (likely(budget))
3063 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3064 			else
3065 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3066 							   &event);
3067 			if (event & BNXT_REDIRECT_EVENT)
3068 				flush_xdp = true;
3069 			if (likely(rc >= 0))
3070 				rx_pkts += rc;
3071 			/* Increment rx_pkts when rc is -ENOMEM to count towards
3072 			 * the NAPI budget.  Otherwise, we may potentially loop
3073 			 * here forever if we consistently cannot allocate
3074 			 * buffers.
3075 			 */
3076 			else if (rc == -ENOMEM && budget)
3077 				rx_pkts++;
3078 			else if (rc == -EBUSY)	/* partial completion */
3079 				break;
3080 		} else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3081 				    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3082 				    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3083 			bnxt_hwrm_handler(bp, txcmp);
3084 		}
3085 		raw_cons = NEXT_RAW_CMP(raw_cons);
3086 
3087 		if (rx_pkts && rx_pkts == budget) {
3088 			cpr->has_more_work = 1;
3089 			break;
3090 		}
3091 	}
3092 
3093 	if (flush_xdp) {
3094 		xdp_do_flush();
3095 		event &= ~BNXT_REDIRECT_EVENT;
3096 	}
3097 
3098 	if (event & BNXT_TX_EVENT) {
3099 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3100 		u16 prod = txr->tx_prod;
3101 
3102 		/* Sync BD data before updating doorbell */
3103 		wmb();
3104 
3105 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3106 		event &= ~BNXT_TX_EVENT;
3107 	}
3108 
3109 	cpr->cp_raw_cons = raw_cons;
3110 	bnapi->events |= event;
3111 	return rx_pkts;
3112 }
3113 
3114 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3115 				  int budget)
3116 {
3117 	if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3118 		bnapi->tx_int(bp, bnapi, budget);
3119 
3120 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3121 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3122 
3123 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3124 		bnapi->events &= ~BNXT_RX_EVENT;
3125 	}
3126 	if (bnapi->events & BNXT_AGG_EVENT) {
3127 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3128 
3129 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3130 		bnapi->events &= ~BNXT_AGG_EVENT;
3131 	}
3132 }
3133 
3134 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3135 			  int budget)
3136 {
3137 	struct bnxt_napi *bnapi = cpr->bnapi;
3138 	int rx_pkts;
3139 
3140 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3141 
3142 	/* ACK completion ring before freeing tx ring and producing new
3143 	 * buffers in rx/agg rings to prevent overflowing the completion
3144 	 * ring.
3145 	 */
3146 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3147 
3148 	__bnxt_poll_work_done(bp, bnapi, budget);
3149 	return rx_pkts;
3150 }
3151 
3152 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3153 {
3154 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3155 	struct bnxt *bp = bnapi->bp;
3156 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3157 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3158 	struct tx_cmp *txcmp;
3159 	struct rx_cmp_ext *rxcmp1;
3160 	u32 cp_cons, tmp_raw_cons;
3161 	u32 raw_cons = cpr->cp_raw_cons;
3162 	bool flush_xdp = false;
3163 	u32 rx_pkts = 0;
3164 	u8 event = 0;
3165 
3166 	while (1) {
3167 		int rc;
3168 
3169 		cp_cons = RING_CMP(raw_cons);
3170 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3171 
3172 		if (!TX_CMP_VALID(txcmp, raw_cons))
3173 			break;
3174 
3175 		/* The valid test of the entry must be done first before
3176 		 * reading any further.
3177 		 */
3178 		dma_rmb();
3179 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3180 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3181 			cp_cons = RING_CMP(tmp_raw_cons);
3182 			rxcmp1 = (struct rx_cmp_ext *)
3183 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3184 
3185 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3186 				break;
3187 
3188 			/* force an error to recycle the buffer */
3189 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3190 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3191 
3192 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3193 			if (likely(rc == -EIO) && budget)
3194 				rx_pkts++;
3195 			else if (rc == -EBUSY)	/* partial completion */
3196 				break;
3197 			if (event & BNXT_REDIRECT_EVENT)
3198 				flush_xdp = true;
3199 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
3200 				    CMPL_BASE_TYPE_HWRM_DONE)) {
3201 			bnxt_hwrm_handler(bp, txcmp);
3202 		} else {
3203 			netdev_err(bp->dev,
3204 				   "Invalid completion received on special ring\n");
3205 		}
3206 		raw_cons = NEXT_RAW_CMP(raw_cons);
3207 
3208 		if (rx_pkts == budget)
3209 			break;
3210 	}
3211 
3212 	cpr->cp_raw_cons = raw_cons;
3213 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3214 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3215 
3216 	if (event & BNXT_AGG_EVENT)
3217 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3218 	if (flush_xdp)
3219 		xdp_do_flush();
3220 
3221 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3222 		napi_complete_done(napi, rx_pkts);
3223 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3224 	}
3225 	return rx_pkts;
3226 }
3227 
3228 static int bnxt_poll(struct napi_struct *napi, int budget)
3229 {
3230 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3231 	struct bnxt *bp = bnapi->bp;
3232 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3233 	int work_done = 0;
3234 
3235 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3236 		napi_complete(napi);
3237 		return 0;
3238 	}
3239 	while (1) {
3240 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3241 
3242 		if (work_done >= budget) {
3243 			if (!budget)
3244 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3245 			break;
3246 		}
3247 
3248 		if (!bnxt_has_work(bp, cpr)) {
3249 			if (napi_complete_done(napi, work_done))
3250 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3251 			break;
3252 		}
3253 	}
3254 	if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3255 		struct dim_sample dim_sample = {};
3256 
3257 		dim_update_sample(cpr->event_ctr,
3258 				  cpr->rx_packets,
3259 				  cpr->rx_bytes,
3260 				  &dim_sample);
3261 		net_dim(&cpr->dim, &dim_sample);
3262 	}
3263 	return work_done;
3264 }
3265 
3266 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3267 {
3268 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3269 	int i, work_done = 0;
3270 
3271 	for (i = 0; i < cpr->cp_ring_count; i++) {
3272 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3273 
3274 		if (cpr2->had_nqe_notify) {
3275 			work_done += __bnxt_poll_work(bp, cpr2,
3276 						      budget - work_done);
3277 			cpr->has_more_work |= cpr2->has_more_work;
3278 		}
3279 	}
3280 	return work_done;
3281 }
3282 
3283 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3284 				 u64 dbr_type, int budget)
3285 {
3286 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3287 	int i;
3288 
3289 	for (i = 0; i < cpr->cp_ring_count; i++) {
3290 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3291 		struct bnxt_db_info *db;
3292 
3293 		if (cpr2->had_work_done) {
3294 			u32 tgl = 0;
3295 
3296 			if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3297 				cpr2->had_nqe_notify = 0;
3298 				tgl = cpr2->toggle;
3299 			}
3300 			db = &cpr2->cp_db;
3301 			bnxt_writeq(bp,
3302 				    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3303 				    DB_RING_IDX(db, cpr2->cp_raw_cons),
3304 				    db->doorbell);
3305 			cpr2->had_work_done = 0;
3306 		}
3307 	}
3308 	__bnxt_poll_work_done(bp, bnapi, budget);
3309 }
3310 
3311 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3312 {
3313 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3314 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3315 	struct bnxt_cp_ring_info *cpr_rx;
3316 	u32 raw_cons = cpr->cp_raw_cons;
3317 	struct bnxt *bp = bnapi->bp;
3318 	struct nqe_cn *nqcmp;
3319 	int work_done = 0;
3320 	u32 cons;
3321 
3322 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3323 		napi_complete(napi);
3324 		return 0;
3325 	}
3326 	if (cpr->has_more_work) {
3327 		cpr->has_more_work = 0;
3328 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3329 	}
3330 	while (1) {
3331 		u16 type;
3332 
3333 		cons = RING_CMP(raw_cons);
3334 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3335 
3336 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3337 			if (cpr->has_more_work)
3338 				break;
3339 
3340 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3341 					     budget);
3342 			cpr->cp_raw_cons = raw_cons;
3343 			if (napi_complete_done(napi, work_done))
3344 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3345 						  cpr->cp_raw_cons);
3346 			goto poll_done;
3347 		}
3348 
3349 		/* The valid test of the entry must be done first before
3350 		 * reading any further.
3351 		 */
3352 		dma_rmb();
3353 
3354 		type = le16_to_cpu(nqcmp->type);
3355 		if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3356 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3357 			u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3358 			struct bnxt_cp_ring_info *cpr2;
3359 
3360 			/* No more budget for RX work */
3361 			if (budget && work_done >= budget &&
3362 			    cq_type == BNXT_NQ_HDL_TYPE_RX)
3363 				break;
3364 
3365 			idx = BNXT_NQ_HDL_IDX(idx);
3366 			cpr2 = &cpr->cp_ring_arr[idx];
3367 			cpr2->had_nqe_notify = 1;
3368 			cpr2->toggle = NQE_CN_TOGGLE(type);
3369 			work_done += __bnxt_poll_work(bp, cpr2,
3370 						      budget - work_done);
3371 			cpr->has_more_work |= cpr2->has_more_work;
3372 		} else {
3373 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3374 		}
3375 		raw_cons = NEXT_RAW_CMP(raw_cons);
3376 	}
3377 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3378 	if (raw_cons != cpr->cp_raw_cons) {
3379 		cpr->cp_raw_cons = raw_cons;
3380 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3381 	}
3382 poll_done:
3383 	cpr_rx = &cpr->cp_ring_arr[0];
3384 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3385 	    (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3386 		struct dim_sample dim_sample = {};
3387 
3388 		dim_update_sample(cpr->event_ctr,
3389 				  cpr_rx->rx_packets,
3390 				  cpr_rx->rx_bytes,
3391 				  &dim_sample);
3392 		net_dim(&cpr->dim, &dim_sample);
3393 	}
3394 	return work_done;
3395 }
3396 
3397 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3398 				       struct bnxt_tx_ring_info *txr, int idx)
3399 {
3400 	int i, max_idx;
3401 	struct pci_dev *pdev = bp->pdev;
3402 
3403 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3404 
3405 	for (i = 0; i < max_idx;) {
3406 		struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3407 		struct sk_buff *skb;
3408 		int j, last;
3409 
3410 		if (idx  < bp->tx_nr_rings_xdp &&
3411 		    tx_buf->action == XDP_REDIRECT) {
3412 			dma_unmap_single(&pdev->dev,
3413 					 dma_unmap_addr(tx_buf, mapping),
3414 					 dma_unmap_len(tx_buf, len),
3415 					 DMA_TO_DEVICE);
3416 			xdp_return_frame(tx_buf->xdpf);
3417 			tx_buf->action = 0;
3418 			tx_buf->xdpf = NULL;
3419 			i++;
3420 			continue;
3421 		}
3422 
3423 		skb = tx_buf->skb;
3424 		if (!skb) {
3425 			i++;
3426 			continue;
3427 		}
3428 
3429 		tx_buf->skb = NULL;
3430 
3431 		if (tx_buf->is_push) {
3432 			dev_kfree_skb(skb);
3433 			i += 2;
3434 			continue;
3435 		}
3436 
3437 		dma_unmap_single(&pdev->dev,
3438 				 dma_unmap_addr(tx_buf, mapping),
3439 				 skb_headlen(skb),
3440 				 DMA_TO_DEVICE);
3441 
3442 		last = tx_buf->nr_frags;
3443 		i += 2;
3444 		for (j = 0; j < last; j++, i++) {
3445 			int ring_idx = i & bp->tx_ring_mask;
3446 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3447 
3448 			tx_buf = &txr->tx_buf_ring[ring_idx];
3449 			netmem_dma_unmap_page_attrs(&pdev->dev,
3450 						    dma_unmap_addr(tx_buf,
3451 								   mapping),
3452 						    skb_frag_size(frag),
3453 						    DMA_TO_DEVICE, 0);
3454 		}
3455 		dev_kfree_skb(skb);
3456 	}
3457 	netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3458 }
3459 
3460 static void bnxt_free_tx_skbs(struct bnxt *bp)
3461 {
3462 	int i;
3463 
3464 	if (!bp->tx_ring)
3465 		return;
3466 
3467 	for (i = 0; i < bp->tx_nr_rings; i++) {
3468 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3469 
3470 		if (!txr->tx_buf_ring)
3471 			continue;
3472 
3473 		bnxt_free_one_tx_ring_skbs(bp, txr, i);
3474 	}
3475 
3476 	if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3477 		bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3478 }
3479 
3480 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3481 {
3482 	int i, max_idx;
3483 
3484 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3485 
3486 	for (i = 0; i < max_idx; i++) {
3487 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3488 		void *data = rx_buf->data;
3489 
3490 		if (!data)
3491 			continue;
3492 
3493 		rx_buf->data = NULL;
3494 		if (BNXT_RX_PAGE_MODE(bp))
3495 			page_pool_recycle_direct(rxr->page_pool, data);
3496 		else
3497 			page_pool_free_va(rxr->head_pool, data, true);
3498 	}
3499 }
3500 
3501 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3502 {
3503 	int i, max_idx;
3504 
3505 	max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3506 
3507 	for (i = 0; i < max_idx; i++) {
3508 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3509 		netmem_ref netmem = rx_agg_buf->netmem;
3510 
3511 		if (!netmem)
3512 			continue;
3513 
3514 		rx_agg_buf->netmem = 0;
3515 		__clear_bit(i, rxr->rx_agg_bmap);
3516 
3517 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3518 	}
3519 }
3520 
3521 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3522 					struct bnxt_rx_ring_info *rxr)
3523 {
3524 	int i;
3525 
3526 	for (i = 0; i < bp->max_tpa; i++) {
3527 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3528 		u8 *data = tpa_info->data;
3529 
3530 		if (!data)
3531 			continue;
3532 
3533 		tpa_info->data = NULL;
3534 		page_pool_free_va(rxr->head_pool, data, false);
3535 	}
3536 }
3537 
3538 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3539 				       struct bnxt_rx_ring_info *rxr)
3540 {
3541 	struct bnxt_tpa_idx_map *map;
3542 
3543 	if (!rxr->rx_tpa)
3544 		goto skip_rx_tpa_free;
3545 
3546 	bnxt_free_one_tpa_info_data(bp, rxr);
3547 
3548 skip_rx_tpa_free:
3549 	if (!rxr->rx_buf_ring)
3550 		goto skip_rx_buf_free;
3551 
3552 	bnxt_free_one_rx_ring(bp, rxr);
3553 
3554 skip_rx_buf_free:
3555 	if (!rxr->rx_agg_ring)
3556 		goto skip_rx_agg_free;
3557 
3558 	bnxt_free_one_rx_agg_ring(bp, rxr);
3559 
3560 skip_rx_agg_free:
3561 	map = rxr->rx_tpa_idx_map;
3562 	if (map)
3563 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3564 }
3565 
3566 static void bnxt_free_rx_skbs(struct bnxt *bp)
3567 {
3568 	int i;
3569 
3570 	if (!bp->rx_ring)
3571 		return;
3572 
3573 	for (i = 0; i < bp->rx_nr_rings; i++)
3574 		bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3575 }
3576 
3577 static void bnxt_free_skbs(struct bnxt *bp)
3578 {
3579 	bnxt_free_tx_skbs(bp);
3580 	bnxt_free_rx_skbs(bp);
3581 }
3582 
3583 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3584 {
3585 	u8 init_val = ctxm->init_value;
3586 	u16 offset = ctxm->init_offset;
3587 	u8 *p2 = p;
3588 	int i;
3589 
3590 	if (!init_val)
3591 		return;
3592 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3593 		memset(p, init_val, len);
3594 		return;
3595 	}
3596 	for (i = 0; i < len; i += ctxm->entry_size)
3597 		*(p2 + i + offset) = init_val;
3598 }
3599 
3600 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3601 			       void *buf, size_t offset, size_t head,
3602 			       size_t tail)
3603 {
3604 	int i, head_page, start_idx, source_offset;
3605 	size_t len, rem_len, total_len, max_bytes;
3606 
3607 	head_page = head / rmem->page_size;
3608 	source_offset = head % rmem->page_size;
3609 	total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3610 	if (!total_len)
3611 		total_len = MAX_CTX_BYTES;
3612 	start_idx = head_page % MAX_CTX_PAGES;
3613 	max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3614 		    source_offset;
3615 	total_len = min(total_len, max_bytes);
3616 	rem_len = total_len;
3617 
3618 	for (i = start_idx; rem_len; i++, source_offset = 0) {
3619 		len = min((size_t)(rmem->page_size - source_offset), rem_len);
3620 		if (buf)
3621 			memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3622 			       len);
3623 		offset += len;
3624 		rem_len -= len;
3625 	}
3626 	return total_len;
3627 }
3628 
3629 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3630 {
3631 	struct pci_dev *pdev = bp->pdev;
3632 	int i;
3633 
3634 	if (!rmem->pg_arr)
3635 		goto skip_pages;
3636 
3637 	for (i = 0; i < rmem->nr_pages; i++) {
3638 		if (!rmem->pg_arr[i])
3639 			continue;
3640 
3641 		dma_free_coherent(&pdev->dev, rmem->page_size,
3642 				  rmem->pg_arr[i], rmem->dma_arr[i]);
3643 
3644 		rmem->pg_arr[i] = NULL;
3645 	}
3646 skip_pages:
3647 	if (rmem->pg_tbl) {
3648 		size_t pg_tbl_size = rmem->nr_pages * 8;
3649 
3650 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3651 			pg_tbl_size = rmem->page_size;
3652 		dma_free_coherent(&pdev->dev, pg_tbl_size,
3653 				  rmem->pg_tbl, rmem->pg_tbl_map);
3654 		rmem->pg_tbl = NULL;
3655 	}
3656 	if (rmem->vmem_size && *rmem->vmem) {
3657 		vfree(*rmem->vmem);
3658 		*rmem->vmem = NULL;
3659 	}
3660 }
3661 
3662 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3663 {
3664 	struct pci_dev *pdev = bp->pdev;
3665 	u64 valid_bit = 0;
3666 	int i;
3667 
3668 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3669 		valid_bit = PTU_PTE_VALID;
3670 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3671 		size_t pg_tbl_size = rmem->nr_pages * 8;
3672 
3673 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3674 			pg_tbl_size = rmem->page_size;
3675 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3676 						  &rmem->pg_tbl_map,
3677 						  GFP_KERNEL);
3678 		if (!rmem->pg_tbl)
3679 			return -ENOMEM;
3680 	}
3681 
3682 	for (i = 0; i < rmem->nr_pages; i++) {
3683 		u64 extra_bits = valid_bit;
3684 
3685 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3686 						     rmem->page_size,
3687 						     &rmem->dma_arr[i],
3688 						     GFP_KERNEL);
3689 		if (!rmem->pg_arr[i])
3690 			return -ENOMEM;
3691 
3692 		if (rmem->ctx_mem)
3693 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3694 					  rmem->page_size);
3695 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
3696 			if (i == rmem->nr_pages - 2 &&
3697 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3698 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
3699 			else if (i == rmem->nr_pages - 1 &&
3700 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3701 				extra_bits |= PTU_PTE_LAST;
3702 			rmem->pg_tbl[i] =
3703 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3704 		}
3705 	}
3706 
3707 	if (rmem->vmem_size) {
3708 		*rmem->vmem = vzalloc(rmem->vmem_size);
3709 		if (!(*rmem->vmem))
3710 			return -ENOMEM;
3711 	}
3712 	return 0;
3713 }
3714 
3715 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3716 				   struct bnxt_rx_ring_info *rxr)
3717 {
3718 	int i;
3719 
3720 	kfree(rxr->rx_tpa_idx_map);
3721 	rxr->rx_tpa_idx_map = NULL;
3722 	if (rxr->rx_tpa) {
3723 		for (i = 0; i < bp->max_tpa; i++) {
3724 			kfree(rxr->rx_tpa[i].agg_arr);
3725 			rxr->rx_tpa[i].agg_arr = NULL;
3726 		}
3727 	}
3728 	kfree(rxr->rx_tpa);
3729 	rxr->rx_tpa = NULL;
3730 }
3731 
3732 static void bnxt_free_tpa_info(struct bnxt *bp)
3733 {
3734 	int i;
3735 
3736 	for (i = 0; i < bp->rx_nr_rings; i++) {
3737 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3738 
3739 		bnxt_free_one_tpa_info(bp, rxr);
3740 	}
3741 }
3742 
3743 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3744 				   struct bnxt_rx_ring_info *rxr)
3745 {
3746 	struct rx_agg_cmp *agg;
3747 	int i;
3748 
3749 	rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3750 	if (!rxr->rx_tpa)
3751 		return -ENOMEM;
3752 
3753 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3754 		return 0;
3755 	for (i = 0; i < bp->max_tpa; i++) {
3756 		agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3757 		if (!agg)
3758 			return -ENOMEM;
3759 		rxr->rx_tpa[i].agg_arr = agg;
3760 	}
3761 	rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3762 	if (!rxr->rx_tpa_idx_map)
3763 		return -ENOMEM;
3764 
3765 	return 0;
3766 }
3767 
3768 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3769 {
3770 	int i, rc;
3771 
3772 	bp->max_tpa = MAX_TPA;
3773 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3774 		if (!bp->max_tpa_v2)
3775 			return 0;
3776 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3777 	}
3778 
3779 	for (i = 0; i < bp->rx_nr_rings; i++) {
3780 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3781 
3782 		rc = bnxt_alloc_one_tpa_info(bp, rxr);
3783 		if (rc)
3784 			return rc;
3785 	}
3786 	return 0;
3787 }
3788 
3789 static void bnxt_free_rx_rings(struct bnxt *bp)
3790 {
3791 	int i;
3792 
3793 	if (!bp->rx_ring)
3794 		return;
3795 
3796 	bnxt_free_tpa_info(bp);
3797 	for (i = 0; i < bp->rx_nr_rings; i++) {
3798 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3799 		struct bnxt_ring_struct *ring;
3800 
3801 		if (rxr->xdp_prog)
3802 			bpf_prog_put(rxr->xdp_prog);
3803 
3804 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3805 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3806 
3807 		page_pool_destroy(rxr->page_pool);
3808 		page_pool_destroy(rxr->head_pool);
3809 		rxr->page_pool = rxr->head_pool = NULL;
3810 
3811 		kfree(rxr->rx_agg_bmap);
3812 		rxr->rx_agg_bmap = NULL;
3813 
3814 		ring = &rxr->rx_ring_struct;
3815 		bnxt_free_ring(bp, &ring->ring_mem);
3816 
3817 		ring = &rxr->rx_agg_ring_struct;
3818 		bnxt_free_ring(bp, &ring->ring_mem);
3819 	}
3820 }
3821 
3822 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3823 				       struct bnxt_rx_ring_info *rxr)
3824 {
3825 	/* User may have chosen larger than default rx_page_size,
3826 	 * we keep the ring sizes uniform and also want uniform amount
3827 	 * of bytes consumed per ring, so cap how much of the rings we fill.
3828 	 */
3829 	int fill_level = bp->rx_agg_ring_size;
3830 
3831 	if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3832 		fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3833 
3834 	return fill_level;
3835 }
3836 
3837 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3838 				   struct bnxt_rx_ring_info *rxr,
3839 				   int numa_node)
3840 {
3841 	unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3842 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3843 	struct page_pool_params pp = { 0 };
3844 	struct page_pool *pool;
3845 
3846 	pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3847 	if (BNXT_RX_PAGE_MODE(bp))
3848 		pp.pool_size += bp->rx_ring_size / rx_size_fac;
3849 
3850 	pp.order = get_order(rxr->rx_page_size);
3851 	pp.nid = numa_node;
3852 	pp.netdev = bp->dev;
3853 	pp.dev = &bp->pdev->dev;
3854 	pp.dma_dir = bp->rx_dir;
3855 	pp.max_len = PAGE_SIZE << pp.order;
3856 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3857 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3858 	pp.queue_idx = rxr->bnapi->index;
3859 
3860 	pool = page_pool_create(&pp);
3861 	if (IS_ERR(pool))
3862 		return PTR_ERR(pool);
3863 	rxr->page_pool = pool;
3864 
3865 	rxr->need_head_pool = page_pool_is_unreadable(pool);
3866 	rxr->need_head_pool |= !!pp.order;
3867 	if (bnxt_separate_head_pool(rxr)) {
3868 		pp.order = 0;
3869 		pp.max_len = PAGE_SIZE;
3870 		pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3871 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3872 		pool = page_pool_create(&pp);
3873 		if (IS_ERR(pool))
3874 			goto err_destroy_pp;
3875 	} else {
3876 		page_pool_get(pool);
3877 	}
3878 	rxr->head_pool = pool;
3879 
3880 	return 0;
3881 
3882 err_destroy_pp:
3883 	page_pool_destroy(rxr->page_pool);
3884 	rxr->page_pool = NULL;
3885 	return PTR_ERR(pool);
3886 }
3887 
3888 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3889 {
3890 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3891 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3892 }
3893 
3894 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3895 {
3896 	u16 mem_size;
3897 
3898 	rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3899 	mem_size = rxr->rx_agg_bmap_size / 8;
3900 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3901 	if (!rxr->rx_agg_bmap)
3902 		return -ENOMEM;
3903 
3904 	return 0;
3905 }
3906 
3907 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3908 {
3909 	int numa_node = dev_to_node(&bp->pdev->dev);
3910 	int i, rc = 0, agg_rings = 0, cpu;
3911 
3912 	if (!bp->rx_ring)
3913 		return -ENOMEM;
3914 
3915 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3916 		agg_rings = 1;
3917 
3918 	for (i = 0; i < bp->rx_nr_rings; i++) {
3919 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3920 		struct bnxt_ring_struct *ring;
3921 		int cpu_node;
3922 
3923 		ring = &rxr->rx_ring_struct;
3924 
3925 		cpu = cpumask_local_spread(i, numa_node);
3926 		cpu_node = cpu_to_node(cpu);
3927 		netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3928 			   i, cpu_node);
3929 		rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3930 		if (rc)
3931 			return rc;
3932 		bnxt_enable_rx_page_pool(rxr);
3933 
3934 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3935 		if (rc < 0)
3936 			return rc;
3937 
3938 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3939 						MEM_TYPE_PAGE_POOL,
3940 						rxr->page_pool);
3941 		if (rc) {
3942 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3943 			return rc;
3944 		}
3945 
3946 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3947 		if (rc)
3948 			return rc;
3949 
3950 		ring->grp_idx = i;
3951 		if (agg_rings) {
3952 			ring = &rxr->rx_agg_ring_struct;
3953 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3954 			if (rc)
3955 				return rc;
3956 
3957 			ring->grp_idx = i;
3958 			rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3959 			if (rc)
3960 				return rc;
3961 		}
3962 	}
3963 	if (bp->flags & BNXT_FLAG_TPA)
3964 		rc = bnxt_alloc_tpa_info(bp);
3965 	return rc;
3966 }
3967 
3968 static void bnxt_free_tx_rings(struct bnxt *bp)
3969 {
3970 	int i;
3971 	struct pci_dev *pdev = bp->pdev;
3972 
3973 	if (!bp->tx_ring)
3974 		return;
3975 
3976 	for (i = 0; i < bp->tx_nr_rings; i++) {
3977 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3978 		struct bnxt_ring_struct *ring;
3979 
3980 		if (txr->tx_push) {
3981 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3982 					  txr->tx_push, txr->tx_push_mapping);
3983 			txr->tx_push = NULL;
3984 		}
3985 
3986 		ring = &txr->tx_ring_struct;
3987 
3988 		bnxt_free_ring(bp, &ring->ring_mem);
3989 	}
3990 }
3991 
3992 #define BNXT_TC_TO_RING_BASE(bp, tc)	\
3993 	((tc) * (bp)->tx_nr_rings_per_tc)
3994 
3995 #define BNXT_RING_TO_TC_OFF(bp, tx)	\
3996 	((tx) % (bp)->tx_nr_rings_per_tc)
3997 
3998 #define BNXT_RING_TO_TC(bp, tx)		\
3999 	((tx) / (bp)->tx_nr_rings_per_tc)
4000 
4001 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4002 {
4003 	int i, j, rc;
4004 	struct pci_dev *pdev = bp->pdev;
4005 
4006 	bp->tx_push_size = 0;
4007 	if (bp->tx_push_thresh) {
4008 		int push_size;
4009 
4010 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4011 					bp->tx_push_thresh);
4012 
4013 		if (push_size > 256) {
4014 			push_size = 0;
4015 			bp->tx_push_thresh = 0;
4016 		}
4017 
4018 		bp->tx_push_size = push_size;
4019 	}
4020 
4021 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4022 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4023 		struct bnxt_ring_struct *ring;
4024 		u8 qidx;
4025 
4026 		ring = &txr->tx_ring_struct;
4027 
4028 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4029 		if (rc)
4030 			return rc;
4031 
4032 		ring->grp_idx = txr->bnapi->index;
4033 		if (bp->tx_push_size) {
4034 			dma_addr_t mapping;
4035 
4036 			/* One pre-allocated DMA buffer to backup
4037 			 * TX push operation
4038 			 */
4039 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
4040 						bp->tx_push_size,
4041 						&txr->tx_push_mapping,
4042 						GFP_KERNEL);
4043 
4044 			if (!txr->tx_push)
4045 				return -ENOMEM;
4046 
4047 			mapping = txr->tx_push_mapping +
4048 				sizeof(struct tx_push_bd);
4049 			txr->data_mapping = cpu_to_le64(mapping);
4050 		}
4051 		qidx = bp->tc_to_qidx[j];
4052 		ring->queue_id = bp->q_info[qidx].queue_id;
4053 		spin_lock_init(&txr->xdp_tx_lock);
4054 		if (i < bp->tx_nr_rings_xdp)
4055 			continue;
4056 		if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4057 			j++;
4058 	}
4059 	return 0;
4060 }
4061 
4062 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4063 {
4064 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4065 
4066 	kfree(cpr->cp_desc_ring);
4067 	cpr->cp_desc_ring = NULL;
4068 	ring->ring_mem.pg_arr = NULL;
4069 	kfree(cpr->cp_desc_mapping);
4070 	cpr->cp_desc_mapping = NULL;
4071 	ring->ring_mem.dma_arr = NULL;
4072 }
4073 
4074 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4075 {
4076 	cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4077 	if (!cpr->cp_desc_ring)
4078 		return -ENOMEM;
4079 	cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4080 	if (!cpr->cp_desc_mapping)
4081 		return -ENOMEM;
4082 	return 0;
4083 }
4084 
4085 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4086 {
4087 	int i;
4088 
4089 	if (!bp->bnapi)
4090 		return;
4091 	for (i = 0; i < bp->cp_nr_rings; i++) {
4092 		struct bnxt_napi *bnapi = bp->bnapi[i];
4093 
4094 		if (!bnapi)
4095 			continue;
4096 		bnxt_free_cp_arrays(&bnapi->cp_ring);
4097 	}
4098 }
4099 
4100 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4101 {
4102 	int i, n = bp->cp_nr_pages;
4103 
4104 	for (i = 0; i < bp->cp_nr_rings; i++) {
4105 		struct bnxt_napi *bnapi = bp->bnapi[i];
4106 		int rc;
4107 
4108 		if (!bnapi)
4109 			continue;
4110 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4111 		if (rc)
4112 			return rc;
4113 	}
4114 	return 0;
4115 }
4116 
4117 static void bnxt_free_cp_rings(struct bnxt *bp)
4118 {
4119 	int i;
4120 
4121 	if (!bp->bnapi)
4122 		return;
4123 
4124 	for (i = 0; i < bp->cp_nr_rings; i++) {
4125 		struct bnxt_napi *bnapi = bp->bnapi[i];
4126 		struct bnxt_cp_ring_info *cpr;
4127 		struct bnxt_ring_struct *ring;
4128 		int j;
4129 
4130 		if (!bnapi)
4131 			continue;
4132 
4133 		cpr = &bnapi->cp_ring;
4134 		ring = &cpr->cp_ring_struct;
4135 
4136 		bnxt_free_ring(bp, &ring->ring_mem);
4137 
4138 		if (!cpr->cp_ring_arr)
4139 			continue;
4140 
4141 		for (j = 0; j < cpr->cp_ring_count; j++) {
4142 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4143 
4144 			ring = &cpr2->cp_ring_struct;
4145 			bnxt_free_ring(bp, &ring->ring_mem);
4146 			bnxt_free_cp_arrays(cpr2);
4147 		}
4148 		kfree(cpr->cp_ring_arr);
4149 		cpr->cp_ring_arr = NULL;
4150 		cpr->cp_ring_count = 0;
4151 	}
4152 }
4153 
4154 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4155 				  struct bnxt_cp_ring_info *cpr)
4156 {
4157 	struct bnxt_ring_mem_info *rmem;
4158 	struct bnxt_ring_struct *ring;
4159 	int rc;
4160 
4161 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4162 	if (rc) {
4163 		bnxt_free_cp_arrays(cpr);
4164 		return -ENOMEM;
4165 	}
4166 	ring = &cpr->cp_ring_struct;
4167 	rmem = &ring->ring_mem;
4168 	rmem->nr_pages = bp->cp_nr_pages;
4169 	rmem->page_size = HW_CMPD_RING_SIZE;
4170 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
4171 	rmem->dma_arr = cpr->cp_desc_mapping;
4172 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4173 	rc = bnxt_alloc_ring(bp, rmem);
4174 	if (rc) {
4175 		bnxt_free_ring(bp, rmem);
4176 		bnxt_free_cp_arrays(cpr);
4177 	}
4178 	return rc;
4179 }
4180 
4181 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4182 {
4183 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4184 	int i, j, rc, ulp_msix;
4185 	int tcs = bp->num_tc;
4186 
4187 	if (!tcs)
4188 		tcs = 1;
4189 	ulp_msix = bnxt_get_ulp_msix_num(bp);
4190 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4191 		struct bnxt_napi *bnapi = bp->bnapi[i];
4192 		struct bnxt_cp_ring_info *cpr, *cpr2;
4193 		struct bnxt_ring_struct *ring;
4194 		int cp_count = 0, k;
4195 		int rx = 0, tx = 0;
4196 
4197 		if (!bnapi)
4198 			continue;
4199 
4200 		cpr = &bnapi->cp_ring;
4201 		cpr->bnapi = bnapi;
4202 		ring = &cpr->cp_ring_struct;
4203 
4204 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4205 		if (rc)
4206 			return rc;
4207 
4208 		ring->map_idx = ulp_msix + i;
4209 
4210 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4211 			continue;
4212 
4213 		if (i < bp->rx_nr_rings) {
4214 			cp_count++;
4215 			rx = 1;
4216 		}
4217 		if (i < bp->tx_nr_rings_xdp) {
4218 			cp_count++;
4219 			tx = 1;
4220 		} else if ((sh && i < bp->tx_nr_rings) ||
4221 			 (!sh && i >= bp->rx_nr_rings)) {
4222 			cp_count += tcs;
4223 			tx = 1;
4224 		}
4225 
4226 		cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4227 		if (!cpr->cp_ring_arr)
4228 			return -ENOMEM;
4229 		cpr->cp_ring_count = cp_count;
4230 
4231 		for (k = 0; k < cp_count; k++) {
4232 			cpr2 = &cpr->cp_ring_arr[k];
4233 			rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4234 			if (rc)
4235 				return rc;
4236 			cpr2->bnapi = bnapi;
4237 			cpr2->sw_stats = cpr->sw_stats;
4238 			cpr2->cp_idx = k;
4239 			if (!k && rx) {
4240 				bp->rx_ring[i].rx_cpr = cpr2;
4241 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4242 			} else {
4243 				int n, tc = k - rx;
4244 
4245 				n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4246 				bp->tx_ring[n].tx_cpr = cpr2;
4247 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4248 			}
4249 		}
4250 		if (tx)
4251 			j++;
4252 	}
4253 	return 0;
4254 }
4255 
4256 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4257 				     struct bnxt_rx_ring_info *rxr)
4258 {
4259 	struct bnxt_ring_mem_info *rmem;
4260 	struct bnxt_ring_struct *ring;
4261 
4262 	ring = &rxr->rx_ring_struct;
4263 	rmem = &ring->ring_mem;
4264 	rmem->nr_pages = bp->rx_nr_pages;
4265 	rmem->page_size = HW_RXBD_RING_SIZE;
4266 	rmem->pg_arr = (void **)rxr->rx_desc_ring;
4267 	rmem->dma_arr = rxr->rx_desc_mapping;
4268 	rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4269 	rmem->vmem = (void **)&rxr->rx_buf_ring;
4270 
4271 	ring = &rxr->rx_agg_ring_struct;
4272 	rmem = &ring->ring_mem;
4273 	rmem->nr_pages = bp->rx_agg_nr_pages;
4274 	rmem->page_size = HW_RXBD_RING_SIZE;
4275 	rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4276 	rmem->dma_arr = rxr->rx_agg_desc_mapping;
4277 	rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4278 	rmem->vmem = (void **)&rxr->rx_agg_ring;
4279 }
4280 
4281 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4282 				      struct bnxt_rx_ring_info *rxr)
4283 {
4284 	struct bnxt_ring_mem_info *rmem;
4285 	struct bnxt_ring_struct *ring;
4286 	int i;
4287 
4288 	rxr->page_pool->p.napi = NULL;
4289 	rxr->page_pool = NULL;
4290 	rxr->head_pool->p.napi = NULL;
4291 	rxr->head_pool = NULL;
4292 	memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4293 
4294 	ring = &rxr->rx_ring_struct;
4295 	rmem = &ring->ring_mem;
4296 	rmem->pg_tbl = NULL;
4297 	rmem->pg_tbl_map = 0;
4298 	for (i = 0; i < rmem->nr_pages; i++) {
4299 		rmem->pg_arr[i] = NULL;
4300 		rmem->dma_arr[i] = 0;
4301 	}
4302 	*rmem->vmem = NULL;
4303 
4304 	ring = &rxr->rx_agg_ring_struct;
4305 	rmem = &ring->ring_mem;
4306 	rmem->pg_tbl = NULL;
4307 	rmem->pg_tbl_map = 0;
4308 	for (i = 0; i < rmem->nr_pages; i++) {
4309 		rmem->pg_arr[i] = NULL;
4310 		rmem->dma_arr[i] = 0;
4311 	}
4312 	*rmem->vmem = NULL;
4313 }
4314 
4315 static void bnxt_init_ring_struct(struct bnxt *bp)
4316 {
4317 	int i, j;
4318 
4319 	for (i = 0; i < bp->cp_nr_rings; i++) {
4320 		struct bnxt_napi *bnapi = bp->bnapi[i];
4321 		struct netdev_queue_config qcfg;
4322 		struct bnxt_ring_mem_info *rmem;
4323 		struct bnxt_cp_ring_info *cpr;
4324 		struct bnxt_rx_ring_info *rxr;
4325 		struct bnxt_tx_ring_info *txr;
4326 		struct bnxt_ring_struct *ring;
4327 
4328 		if (!bnapi)
4329 			continue;
4330 
4331 		cpr = &bnapi->cp_ring;
4332 		ring = &cpr->cp_ring_struct;
4333 		rmem = &ring->ring_mem;
4334 		rmem->nr_pages = bp->cp_nr_pages;
4335 		rmem->page_size = HW_CMPD_RING_SIZE;
4336 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
4337 		rmem->dma_arr = cpr->cp_desc_mapping;
4338 		rmem->vmem_size = 0;
4339 
4340 		rxr = bnapi->rx_ring;
4341 		if (!rxr)
4342 			goto skip_rx;
4343 
4344 		netdev_queue_config(bp->dev, i, &qcfg);
4345 		rxr->rx_page_size = qcfg.rx_page_size;
4346 
4347 		ring = &rxr->rx_ring_struct;
4348 		rmem = &ring->ring_mem;
4349 		rmem->nr_pages = bp->rx_nr_pages;
4350 		rmem->page_size = HW_RXBD_RING_SIZE;
4351 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
4352 		rmem->dma_arr = rxr->rx_desc_mapping;
4353 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4354 		rmem->vmem = (void **)&rxr->rx_buf_ring;
4355 
4356 		ring = &rxr->rx_agg_ring_struct;
4357 		rmem = &ring->ring_mem;
4358 		rmem->nr_pages = bp->rx_agg_nr_pages;
4359 		rmem->page_size = HW_RXBD_RING_SIZE;
4360 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4361 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
4362 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4363 		rmem->vmem = (void **)&rxr->rx_agg_ring;
4364 
4365 skip_rx:
4366 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4367 			ring = &txr->tx_ring_struct;
4368 			rmem = &ring->ring_mem;
4369 			rmem->nr_pages = bp->tx_nr_pages;
4370 			rmem->page_size = HW_TXBD_RING_SIZE;
4371 			rmem->pg_arr = (void **)txr->tx_desc_ring;
4372 			rmem->dma_arr = txr->tx_desc_mapping;
4373 			rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4374 			rmem->vmem = (void **)&txr->tx_buf_ring;
4375 		}
4376 	}
4377 }
4378 
4379 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4380 {
4381 	int i;
4382 	u32 prod;
4383 	struct rx_bd **rx_buf_ring;
4384 
4385 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4386 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4387 		int j;
4388 		struct rx_bd *rxbd;
4389 
4390 		rxbd = rx_buf_ring[i];
4391 		if (!rxbd)
4392 			continue;
4393 
4394 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4395 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4396 			rxbd->rx_bd_opaque = prod;
4397 		}
4398 	}
4399 }
4400 
4401 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4402 				       struct bnxt_rx_ring_info *rxr,
4403 				       int ring_nr)
4404 {
4405 	u32 prod;
4406 	int i;
4407 
4408 	prod = rxr->rx_prod;
4409 	for (i = 0; i < bp->rx_ring_size; i++) {
4410 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4411 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4412 				    ring_nr, i, bp->rx_ring_size);
4413 			break;
4414 		}
4415 		prod = NEXT_RX(prod);
4416 	}
4417 	rxr->rx_prod = prod;
4418 }
4419 
4420 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4421 					  struct bnxt_rx_ring_info *rxr,
4422 					  int ring_nr)
4423 {
4424 	int fill_level, i;
4425 	u32 prod;
4426 
4427 	fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4428 
4429 	prod = rxr->rx_agg_prod;
4430 	for (i = 0; i < fill_level; i++) {
4431 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4432 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4433 				    ring_nr, i, bp->rx_agg_ring_size);
4434 			break;
4435 		}
4436 		prod = NEXT_RX_AGG(prod);
4437 	}
4438 	rxr->rx_agg_prod = prod;
4439 }
4440 
4441 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4442 					struct bnxt_rx_ring_info *rxr)
4443 {
4444 	dma_addr_t mapping;
4445 	u8 *data;
4446 	int i;
4447 
4448 	for (i = 0; i < bp->max_tpa; i++) {
4449 		data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4450 					    GFP_KERNEL);
4451 		if (!data)
4452 			return -ENOMEM;
4453 
4454 		rxr->rx_tpa[i].data = data;
4455 		rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4456 		rxr->rx_tpa[i].mapping = mapping;
4457 	}
4458 
4459 	return 0;
4460 }
4461 
4462 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4463 {
4464 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4465 	int rc;
4466 
4467 	bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4468 
4469 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4470 		return 0;
4471 
4472 	bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4473 
4474 	if (rxr->rx_tpa) {
4475 		rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4476 		if (rc)
4477 			return rc;
4478 	}
4479 	return 0;
4480 }
4481 
4482 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4483 				       struct bnxt_rx_ring_info *rxr)
4484 {
4485 	struct bnxt_ring_struct *ring;
4486 	u32 type;
4487 
4488 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4489 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4490 
4491 	if (NET_IP_ALIGN == 2)
4492 		type |= RX_BD_FLAGS_SOP;
4493 
4494 	ring = &rxr->rx_ring_struct;
4495 	bnxt_init_rxbd_pages(ring, type);
4496 	ring->fw_ring_id = INVALID_HW_RING_ID;
4497 }
4498 
4499 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4500 					   struct bnxt_rx_ring_info *rxr)
4501 {
4502 	struct bnxt_ring_struct *ring;
4503 	u32 type;
4504 
4505 	ring = &rxr->rx_agg_ring_struct;
4506 	ring->fw_ring_id = INVALID_HW_RING_ID;
4507 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4508 		type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4509 			RX_BD_TYPE_RX_AGG_BD;
4510 
4511 		/* On P7, setting EOP will cause the chip to disable
4512 		 * Relaxed Ordering (RO) for TPA data.  Disable EOP for
4513 		 * potentially higher performance with RO.
4514 		 */
4515 		if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4516 			type |= RX_BD_FLAGS_AGG_EOP;
4517 
4518 		bnxt_init_rxbd_pages(ring, type);
4519 	}
4520 }
4521 
4522 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4523 {
4524 	struct bnxt_rx_ring_info *rxr;
4525 
4526 	rxr = &bp->rx_ring[ring_nr];
4527 	bnxt_init_one_rx_ring_rxbd(bp, rxr);
4528 
4529 	netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4530 			     &rxr->bnapi->napi);
4531 
4532 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4533 		bpf_prog_add(bp->xdp_prog, 1);
4534 		rxr->xdp_prog = bp->xdp_prog;
4535 	}
4536 
4537 	bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4538 
4539 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
4540 }
4541 
4542 static void bnxt_init_cp_rings(struct bnxt *bp)
4543 {
4544 	int i, j;
4545 
4546 	for (i = 0; i < bp->cp_nr_rings; i++) {
4547 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4548 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4549 
4550 		ring->fw_ring_id = INVALID_HW_RING_ID;
4551 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4552 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4553 		if (!cpr->cp_ring_arr)
4554 			continue;
4555 		for (j = 0; j < cpr->cp_ring_count; j++) {
4556 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4557 
4558 			ring = &cpr2->cp_ring_struct;
4559 			ring->fw_ring_id = INVALID_HW_RING_ID;
4560 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4561 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4562 		}
4563 	}
4564 }
4565 
4566 static int bnxt_init_rx_rings(struct bnxt *bp)
4567 {
4568 	int i, rc = 0;
4569 
4570 	if (BNXT_RX_PAGE_MODE(bp)) {
4571 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4572 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4573 	} else {
4574 		bp->rx_offset = BNXT_RX_OFFSET;
4575 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4576 	}
4577 
4578 	for (i = 0; i < bp->rx_nr_rings; i++) {
4579 		rc = bnxt_init_one_rx_ring(bp, i);
4580 		if (rc)
4581 			break;
4582 	}
4583 
4584 	return rc;
4585 }
4586 
4587 static int bnxt_init_tx_rings(struct bnxt *bp)
4588 {
4589 	u16 i;
4590 
4591 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4592 				   BNXT_MIN_TX_DESC_CNT);
4593 
4594 	for (i = 0; i < bp->tx_nr_rings; i++) {
4595 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4596 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4597 
4598 		ring->fw_ring_id = INVALID_HW_RING_ID;
4599 
4600 		if (i >= bp->tx_nr_rings_xdp)
4601 			netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4602 					     NETDEV_QUEUE_TYPE_TX,
4603 					     &txr->bnapi->napi);
4604 	}
4605 
4606 	return 0;
4607 }
4608 
4609 static void bnxt_free_ring_grps(struct bnxt *bp)
4610 {
4611 	kfree(bp->grp_info);
4612 	bp->grp_info = NULL;
4613 }
4614 
4615 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4616 {
4617 	int i;
4618 
4619 	if (irq_re_init) {
4620 		bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4621 					    bp->cp_nr_rings);
4622 		if (!bp->grp_info)
4623 			return -ENOMEM;
4624 	}
4625 	for (i = 0; i < bp->cp_nr_rings; i++) {
4626 		if (irq_re_init)
4627 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4628 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4629 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4630 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4631 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4632 	}
4633 	return 0;
4634 }
4635 
4636 static void bnxt_free_vnics(struct bnxt *bp)
4637 {
4638 	kfree(bp->vnic_info);
4639 	bp->vnic_info = NULL;
4640 	bp->nr_vnics = 0;
4641 }
4642 
4643 static int bnxt_alloc_vnics(struct bnxt *bp)
4644 {
4645 	int num_vnics = 1;
4646 
4647 #ifdef CONFIG_RFS_ACCEL
4648 	if (bp->flags & BNXT_FLAG_RFS) {
4649 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4650 			num_vnics++;
4651 		else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4652 			num_vnics += bp->rx_nr_rings;
4653 	}
4654 #endif
4655 
4656 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4657 		num_vnics++;
4658 
4659 	bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4660 	if (!bp->vnic_info)
4661 		return -ENOMEM;
4662 
4663 	bp->nr_vnics = num_vnics;
4664 	return 0;
4665 }
4666 
4667 static void bnxt_init_vnics(struct bnxt *bp)
4668 {
4669 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4670 	int i;
4671 
4672 	for (i = 0; i < bp->nr_vnics; i++) {
4673 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4674 		int j;
4675 
4676 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
4677 		vnic->vnic_id = i;
4678 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4679 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4680 
4681 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4682 
4683 		if (bp->vnic_info[i].rss_hash_key) {
4684 			if (i == BNXT_VNIC_DEFAULT) {
4685 				u8 *key = (void *)vnic->rss_hash_key;
4686 				int k;
4687 
4688 				if (!bp->rss_hash_key_valid &&
4689 				    !bp->rss_hash_key_updated) {
4690 					get_random_bytes(bp->rss_hash_key,
4691 							 HW_HASH_KEY_SIZE);
4692 					bp->rss_hash_key_updated = true;
4693 				}
4694 
4695 				memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4696 				       HW_HASH_KEY_SIZE);
4697 
4698 				if (!bp->rss_hash_key_updated)
4699 					continue;
4700 
4701 				bp->rss_hash_key_updated = false;
4702 				bp->rss_hash_key_valid = true;
4703 
4704 				bp->toeplitz_prefix = 0;
4705 				for (k = 0; k < 8; k++) {
4706 					bp->toeplitz_prefix <<= 8;
4707 					bp->toeplitz_prefix |= key[k];
4708 				}
4709 			} else {
4710 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4711 				       HW_HASH_KEY_SIZE);
4712 			}
4713 		}
4714 	}
4715 }
4716 
4717 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4718 {
4719 	int pages;
4720 
4721 	pages = ring_size / desc_per_pg;
4722 
4723 	if (!pages)
4724 		return 1;
4725 
4726 	pages++;
4727 
4728 	while (pages & (pages - 1))
4729 		pages++;
4730 
4731 	return pages;
4732 }
4733 
4734 void bnxt_set_tpa_flags(struct bnxt *bp)
4735 {
4736 	bp->flags &= ~BNXT_FLAG_TPA;
4737 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4738 		return;
4739 	if (bp->dev->features & NETIF_F_LRO)
4740 		bp->flags |= BNXT_FLAG_LRO;
4741 	else if (bp->dev->features & NETIF_F_GRO_HW)
4742 		bp->flags |= BNXT_FLAG_GRO;
4743 }
4744 
4745 static void bnxt_init_ring_params(struct bnxt *bp)
4746 {
4747 	unsigned int rx_size;
4748 
4749 	bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4750 	/* Try to fit 4 chunks into a 4k page */
4751 	rx_size = SZ_1K -
4752 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4753 	bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4754 }
4755 
4756 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4757  * be set on entry.
4758  */
4759 void bnxt_set_ring_params(struct bnxt *bp)
4760 {
4761 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4762 	u32 agg_factor = 0, agg_ring_size = 0;
4763 
4764 	/* 8 for CRC and VLAN */
4765 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4766 
4767 	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4768 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4769 
4770 	ring_size = bp->rx_ring_size;
4771 	bp->rx_agg_ring_size = 0;
4772 	bp->rx_agg_nr_pages = 0;
4773 
4774 	if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4775 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4776 
4777 	bp->flags &= ~BNXT_FLAG_JUMBO;
4778 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4779 		u32 jumbo_factor;
4780 
4781 		bp->flags |= BNXT_FLAG_JUMBO;
4782 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4783 		if (jumbo_factor > agg_factor)
4784 			agg_factor = jumbo_factor;
4785 	}
4786 	if (agg_factor) {
4787 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4788 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4789 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4790 				    bp->rx_ring_size, ring_size);
4791 			bp->rx_ring_size = ring_size;
4792 		}
4793 		agg_ring_size = ring_size * agg_factor;
4794 
4795 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4796 							RX_DESC_CNT);
4797 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4798 			u32 tmp = agg_ring_size;
4799 
4800 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4801 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4802 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4803 				    tmp, agg_ring_size);
4804 		}
4805 		bp->rx_agg_ring_size = agg_ring_size;
4806 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4807 
4808 		if (BNXT_RX_PAGE_MODE(bp)) {
4809 			rx_space = PAGE_SIZE;
4810 			rx_size = PAGE_SIZE -
4811 				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4812 				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4813 		} else {
4814 			rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4815 				       bp->rx_copybreak,
4816 				       bp->dev->cfg_pending->hds_thresh);
4817 			rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4818 			rx_space = rx_size + NET_SKB_PAD +
4819 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4820 		}
4821 	}
4822 
4823 	bp->rx_buf_use_size = rx_size;
4824 	bp->rx_buf_size = rx_space;
4825 
4826 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4827 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4828 
4829 	ring_size = bp->tx_ring_size;
4830 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4831 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4832 
4833 	max_rx_cmpl = bp->rx_ring_size;
4834 	/* MAX TPA needs to be added because TPA_START completions are
4835 	 * immediately recycled, so the TPA completions are not bound by
4836 	 * the RX ring size.
4837 	 */
4838 	if (bp->flags & BNXT_FLAG_TPA)
4839 		max_rx_cmpl += bp->max_tpa;
4840 	/* RX and TPA completions are 32-byte, all others are 16-byte */
4841 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4842 	bp->cp_ring_size = ring_size;
4843 
4844 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4845 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
4846 		bp->cp_nr_pages = MAX_CP_PAGES;
4847 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4848 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4849 			    ring_size, bp->cp_ring_size);
4850 	}
4851 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4852 	bp->cp_ring_mask = bp->cp_bit - 1;
4853 }
4854 
4855 /* Changing allocation mode of RX rings.
4856  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4857  */
4858 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4859 {
4860 	struct net_device *dev = bp->dev;
4861 
4862 	if (page_mode) {
4863 		bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4864 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4865 
4866 		if (bp->xdp_prog->aux->xdp_has_frags)
4867 			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4868 		else
4869 			dev->max_mtu =
4870 				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4871 		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4872 			bp->flags |= BNXT_FLAG_JUMBO;
4873 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
4874 		} else {
4875 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4876 			bp->rx_skb_func = bnxt_rx_page_skb;
4877 		}
4878 		bp->rx_dir = DMA_BIDIRECTIONAL;
4879 	} else {
4880 		dev->max_mtu = bp->max_mtu;
4881 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4882 		bp->rx_dir = DMA_FROM_DEVICE;
4883 		bp->rx_skb_func = bnxt_rx_skb;
4884 	}
4885 }
4886 
4887 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4888 {
4889 	__bnxt_set_rx_skb_mode(bp, page_mode);
4890 
4891 	if (!page_mode) {
4892 		int rx, tx;
4893 
4894 		bnxt_get_max_rings(bp, &rx, &tx, true);
4895 		if (rx > 1) {
4896 			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4897 			bp->dev->hw_features |= NETIF_F_LRO;
4898 		}
4899 	}
4900 
4901 	/* Update LRO and GRO_HW availability */
4902 	netdev_update_features(bp->dev);
4903 }
4904 
4905 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4906 {
4907 	int i;
4908 	struct bnxt_vnic_info *vnic;
4909 	struct pci_dev *pdev = bp->pdev;
4910 
4911 	if (!bp->vnic_info)
4912 		return;
4913 
4914 	for (i = 0; i < bp->nr_vnics; i++) {
4915 		vnic = &bp->vnic_info[i];
4916 
4917 		kfree(vnic->fw_grp_ids);
4918 		vnic->fw_grp_ids = NULL;
4919 
4920 		kfree(vnic->uc_list);
4921 		vnic->uc_list = NULL;
4922 
4923 		if (vnic->mc_list) {
4924 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4925 					  vnic->mc_list, vnic->mc_list_mapping);
4926 			vnic->mc_list = NULL;
4927 		}
4928 
4929 		if (vnic->rss_table) {
4930 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4931 					  vnic->rss_table,
4932 					  vnic->rss_table_dma_addr);
4933 			vnic->rss_table = NULL;
4934 		}
4935 
4936 		vnic->rss_hash_key = NULL;
4937 		vnic->flags = 0;
4938 	}
4939 }
4940 
4941 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4942 {
4943 	int i, rc = 0, size;
4944 	struct bnxt_vnic_info *vnic;
4945 	struct pci_dev *pdev = bp->pdev;
4946 	int max_rings;
4947 
4948 	for (i = 0; i < bp->nr_vnics; i++) {
4949 		vnic = &bp->vnic_info[i];
4950 
4951 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4952 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4953 
4954 			if (mem_size > 0) {
4955 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4956 				if (!vnic->uc_list) {
4957 					rc = -ENOMEM;
4958 					goto out;
4959 				}
4960 			}
4961 		}
4962 
4963 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4964 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4965 			vnic->mc_list =
4966 				dma_alloc_coherent(&pdev->dev,
4967 						   vnic->mc_list_size,
4968 						   &vnic->mc_list_mapping,
4969 						   GFP_KERNEL);
4970 			if (!vnic->mc_list) {
4971 				rc = -ENOMEM;
4972 				goto out;
4973 			}
4974 		}
4975 
4976 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4977 			goto vnic_skip_grps;
4978 
4979 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4980 			max_rings = bp->rx_nr_rings;
4981 		else
4982 			max_rings = 1;
4983 
4984 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4985 		if (!vnic->fw_grp_ids) {
4986 			rc = -ENOMEM;
4987 			goto out;
4988 		}
4989 vnic_skip_grps:
4990 		if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4991 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4992 			continue;
4993 
4994 		/* Allocate rss table and hash key */
4995 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4996 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4997 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4998 
4999 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5000 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5001 						     vnic->rss_table_size,
5002 						     &vnic->rss_table_dma_addr,
5003 						     GFP_KERNEL);
5004 		if (!vnic->rss_table) {
5005 			rc = -ENOMEM;
5006 			goto out;
5007 		}
5008 
5009 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5010 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5011 	}
5012 	return 0;
5013 
5014 out:
5015 	return rc;
5016 }
5017 
5018 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5019 {
5020 	struct bnxt_hwrm_wait_token *token;
5021 
5022 	dma_pool_destroy(bp->hwrm_dma_pool);
5023 	bp->hwrm_dma_pool = NULL;
5024 
5025 	rcu_read_lock();
5026 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5027 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5028 	rcu_read_unlock();
5029 }
5030 
5031 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5032 {
5033 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5034 					    BNXT_HWRM_DMA_SIZE,
5035 					    BNXT_HWRM_DMA_ALIGN, 0);
5036 	if (!bp->hwrm_dma_pool)
5037 		return -ENOMEM;
5038 
5039 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5040 
5041 	return 0;
5042 }
5043 
5044 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5045 {
5046 	kfree(stats->hw_masks);
5047 	stats->hw_masks = NULL;
5048 	kfree(stats->sw_stats);
5049 	stats->sw_stats = NULL;
5050 	if (stats->hw_stats) {
5051 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5052 				  stats->hw_stats_map);
5053 		stats->hw_stats = NULL;
5054 	}
5055 }
5056 
5057 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5058 				bool alloc_masks)
5059 {
5060 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5061 					     &stats->hw_stats_map, GFP_KERNEL);
5062 	if (!stats->hw_stats)
5063 		return -ENOMEM;
5064 
5065 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5066 	if (!stats->sw_stats)
5067 		goto stats_mem_err;
5068 
5069 	if (alloc_masks) {
5070 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5071 		if (!stats->hw_masks)
5072 			goto stats_mem_err;
5073 	}
5074 	return 0;
5075 
5076 stats_mem_err:
5077 	bnxt_free_stats_mem(bp, stats);
5078 	return -ENOMEM;
5079 }
5080 
5081 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5082 {
5083 	int i;
5084 
5085 	for (i = 0; i < count; i++)
5086 		mask_arr[i] = mask;
5087 }
5088 
5089 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5090 {
5091 	int i;
5092 
5093 	for (i = 0; i < count; i++)
5094 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5095 }
5096 
5097 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5098 				    struct bnxt_stats_mem *stats)
5099 {
5100 	struct hwrm_func_qstats_ext_output *resp;
5101 	struct hwrm_func_qstats_ext_input *req;
5102 	__le64 *hw_masks;
5103 	int rc;
5104 
5105 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5106 	    !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5107 		return -EOPNOTSUPP;
5108 
5109 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5110 	if (rc)
5111 		return rc;
5112 
5113 	req->fid = cpu_to_le16(0xffff);
5114 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5115 
5116 	resp = hwrm_req_hold(bp, req);
5117 	rc = hwrm_req_send(bp, req);
5118 	if (!rc) {
5119 		hw_masks = &resp->rx_ucast_pkts;
5120 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5121 	}
5122 	hwrm_req_drop(bp, req);
5123 	return rc;
5124 }
5125 
5126 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5127 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5128 
5129 static void bnxt_init_stats(struct bnxt *bp)
5130 {
5131 	struct bnxt_napi *bnapi = bp->bnapi[0];
5132 	struct bnxt_cp_ring_info *cpr;
5133 	struct bnxt_stats_mem *stats;
5134 	__le64 *rx_stats, *tx_stats;
5135 	int rc, rx_count, tx_count;
5136 	u64 *rx_masks, *tx_masks;
5137 	u64 mask;
5138 	u8 flags;
5139 
5140 	cpr = &bnapi->cp_ring;
5141 	stats = &cpr->stats;
5142 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5143 	if (rc) {
5144 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5145 			mask = (1ULL << 48) - 1;
5146 		else
5147 			mask = -1ULL;
5148 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5149 	}
5150 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
5151 		stats = &bp->port_stats;
5152 		rx_stats = stats->hw_stats;
5153 		rx_masks = stats->hw_masks;
5154 		rx_count = sizeof(struct rx_port_stats) / 8;
5155 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5156 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5157 		tx_count = sizeof(struct tx_port_stats) / 8;
5158 
5159 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5160 		rc = bnxt_hwrm_port_qstats(bp, flags);
5161 		if (rc) {
5162 			mask = (1ULL << 40) - 1;
5163 
5164 			bnxt_fill_masks(rx_masks, mask, rx_count);
5165 			bnxt_fill_masks(tx_masks, mask, tx_count);
5166 		} else {
5167 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5168 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5169 			bnxt_hwrm_port_qstats(bp, 0);
5170 		}
5171 	}
5172 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5173 		stats = &bp->rx_port_stats_ext;
5174 		rx_stats = stats->hw_stats;
5175 		rx_masks = stats->hw_masks;
5176 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
5177 		stats = &bp->tx_port_stats_ext;
5178 		tx_stats = stats->hw_stats;
5179 		tx_masks = stats->hw_masks;
5180 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
5181 
5182 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5183 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5184 		if (rc) {
5185 			mask = (1ULL << 40) - 1;
5186 
5187 			bnxt_fill_masks(rx_masks, mask, rx_count);
5188 			if (tx_stats)
5189 				bnxt_fill_masks(tx_masks, mask, tx_count);
5190 		} else {
5191 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5192 			if (tx_stats)
5193 				bnxt_copy_hw_masks(tx_masks, tx_stats,
5194 						   tx_count);
5195 			bnxt_hwrm_port_qstats_ext(bp, 0);
5196 		}
5197 	}
5198 }
5199 
5200 static void bnxt_free_port_stats(struct bnxt *bp)
5201 {
5202 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
5203 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5204 
5205 	bnxt_free_stats_mem(bp, &bp->port_stats);
5206 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5207 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5208 }
5209 
5210 static void bnxt_free_ring_stats(struct bnxt *bp)
5211 {
5212 	int i;
5213 
5214 	if (!bp->bnapi)
5215 		return;
5216 
5217 	for (i = 0; i < bp->cp_nr_rings; i++) {
5218 		struct bnxt_napi *bnapi = bp->bnapi[i];
5219 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5220 
5221 		bnxt_free_stats_mem(bp, &cpr->stats);
5222 
5223 		kfree(cpr->sw_stats);
5224 		cpr->sw_stats = NULL;
5225 	}
5226 }
5227 
5228 static int bnxt_alloc_stats(struct bnxt *bp)
5229 {
5230 	u32 size, i;
5231 	int rc;
5232 
5233 	size = bp->hw_ring_stats_size;
5234 
5235 	for (i = 0; i < bp->cp_nr_rings; i++) {
5236 		struct bnxt_napi *bnapi = bp->bnapi[i];
5237 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5238 
5239 		cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5240 		if (!cpr->sw_stats)
5241 			return -ENOMEM;
5242 
5243 		cpr->stats.len = size;
5244 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5245 		if (rc)
5246 			return rc;
5247 
5248 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5249 	}
5250 
5251 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5252 		return 0;
5253 
5254 	if (bp->port_stats.hw_stats)
5255 		goto alloc_ext_stats;
5256 
5257 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5258 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5259 	if (rc)
5260 		return rc;
5261 
5262 	bp->flags |= BNXT_FLAG_PORT_STATS;
5263 
5264 alloc_ext_stats:
5265 	/* Display extended statistics only if FW supports it */
5266 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5267 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5268 			return 0;
5269 
5270 	if (bp->rx_port_stats_ext.hw_stats)
5271 		goto alloc_tx_ext_stats;
5272 
5273 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5274 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5275 	/* Extended stats are optional */
5276 	if (rc)
5277 		return 0;
5278 
5279 alloc_tx_ext_stats:
5280 	if (bp->tx_port_stats_ext.hw_stats)
5281 		return 0;
5282 
5283 	if (bp->hwrm_spec_code >= 0x10902 ||
5284 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5285 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5286 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5287 		/* Extended stats are optional */
5288 		if (rc)
5289 			return 0;
5290 	}
5291 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5292 	return 0;
5293 }
5294 
5295 static void bnxt_clear_ring_indices(struct bnxt *bp)
5296 {
5297 	int i, j;
5298 
5299 	if (!bp->bnapi)
5300 		return;
5301 
5302 	for (i = 0; i < bp->cp_nr_rings; i++) {
5303 		struct bnxt_napi *bnapi = bp->bnapi[i];
5304 		struct bnxt_cp_ring_info *cpr;
5305 		struct bnxt_rx_ring_info *rxr;
5306 		struct bnxt_tx_ring_info *txr;
5307 
5308 		if (!bnapi)
5309 			continue;
5310 
5311 		cpr = &bnapi->cp_ring;
5312 		cpr->cp_raw_cons = 0;
5313 
5314 		bnxt_for_each_napi_tx(j, bnapi, txr) {
5315 			txr->tx_prod = 0;
5316 			txr->tx_cons = 0;
5317 			txr->tx_hw_cons = 0;
5318 		}
5319 
5320 		rxr = bnapi->rx_ring;
5321 		if (rxr) {
5322 			rxr->rx_prod = 0;
5323 			rxr->rx_agg_prod = 0;
5324 			rxr->rx_sw_agg_prod = 0;
5325 			rxr->rx_next_cons = 0;
5326 		}
5327 		bnapi->events = 0;
5328 	}
5329 }
5330 
5331 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5332 {
5333 	u8 type = fltr->type, flags = fltr->flags;
5334 
5335 	INIT_LIST_HEAD(&fltr->list);
5336 	if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5337 	    (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5338 		list_add_tail(&fltr->list, &bp->usr_fltr_list);
5339 }
5340 
5341 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5342 {
5343 	if (!list_empty(&fltr->list))
5344 		list_del_init(&fltr->list);
5345 }
5346 
5347 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5348 {
5349 	struct bnxt_filter_base *usr_fltr, *tmp;
5350 
5351 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5352 		if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5353 			continue;
5354 		bnxt_del_one_usr_fltr(bp, usr_fltr);
5355 	}
5356 }
5357 
5358 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5359 {
5360 	hlist_del(&fltr->hash);
5361 	bnxt_del_one_usr_fltr(bp, fltr);
5362 	if (fltr->flags) {
5363 		clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5364 		bp->ntp_fltr_count--;
5365 	}
5366 	kfree(fltr);
5367 }
5368 
5369 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5370 {
5371 	int i;
5372 
5373 	netdev_assert_locked_or_invisible(bp->dev);
5374 
5375 	/* Under netdev instance lock and all our NAPIs have been disabled.
5376 	 * It's safe to delete the hash table.
5377 	 */
5378 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5379 		struct hlist_head *head;
5380 		struct hlist_node *tmp;
5381 		struct bnxt_ntuple_filter *fltr;
5382 
5383 		head = &bp->ntp_fltr_hash_tbl[i];
5384 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5385 			bnxt_del_l2_filter(bp, fltr->l2_fltr);
5386 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5387 				     !list_empty(&fltr->base.list)))
5388 				continue;
5389 			bnxt_del_fltr(bp, &fltr->base);
5390 		}
5391 	}
5392 	if (!all)
5393 		return;
5394 
5395 	bitmap_free(bp->ntp_fltr_bmap);
5396 	bp->ntp_fltr_bmap = NULL;
5397 	bp->ntp_fltr_count = 0;
5398 }
5399 
5400 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5401 {
5402 	int i, rc = 0;
5403 
5404 	if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5405 		return 0;
5406 
5407 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5408 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5409 
5410 	bp->ntp_fltr_count = 0;
5411 	bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5412 
5413 	if (!bp->ntp_fltr_bmap)
5414 		rc = -ENOMEM;
5415 
5416 	return rc;
5417 }
5418 
5419 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5420 {
5421 	int i;
5422 
5423 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5424 		struct hlist_head *head;
5425 		struct hlist_node *tmp;
5426 		struct bnxt_l2_filter *fltr;
5427 
5428 		head = &bp->l2_fltr_hash_tbl[i];
5429 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5430 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5431 				     !list_empty(&fltr->base.list)))
5432 				continue;
5433 			bnxt_del_fltr(bp, &fltr->base);
5434 		}
5435 	}
5436 }
5437 
5438 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5439 {
5440 	int i;
5441 
5442 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5443 		INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5444 	get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5445 }
5446 
5447 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5448 {
5449 	bnxt_free_vnic_attributes(bp);
5450 	bnxt_free_tx_rings(bp);
5451 	bnxt_free_rx_rings(bp);
5452 	bnxt_free_cp_rings(bp);
5453 	bnxt_free_all_cp_arrays(bp);
5454 	bnxt_free_ntp_fltrs(bp, false);
5455 	bnxt_free_l2_filters(bp, false);
5456 	if (irq_re_init) {
5457 		bnxt_free_ring_stats(bp);
5458 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5459 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5460 			bnxt_free_port_stats(bp);
5461 		bnxt_free_ring_grps(bp);
5462 		bnxt_free_vnics(bp);
5463 		kfree(bp->tx_ring_map);
5464 		bp->tx_ring_map = NULL;
5465 		kfree(bp->tx_ring);
5466 		bp->tx_ring = NULL;
5467 		kfree(bp->rx_ring);
5468 		bp->rx_ring = NULL;
5469 		kfree(bp->bnapi);
5470 		bp->bnapi = NULL;
5471 	} else {
5472 		bnxt_clear_ring_indices(bp);
5473 	}
5474 }
5475 
5476 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5477 {
5478 	int i, j, rc, size, arr_size;
5479 	void *bnapi;
5480 
5481 	if (irq_re_init) {
5482 		/* Allocate bnapi mem pointer array and mem block for
5483 		 * all queues
5484 		 */
5485 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5486 				bp->cp_nr_rings);
5487 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5488 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5489 		if (!bnapi)
5490 			return -ENOMEM;
5491 
5492 		bp->bnapi = bnapi;
5493 		bnapi += arr_size;
5494 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5495 			bp->bnapi[i] = bnapi;
5496 			bp->bnapi[i]->index = i;
5497 			bp->bnapi[i]->bp = bp;
5498 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5499 				struct bnxt_cp_ring_info *cpr =
5500 					&bp->bnapi[i]->cp_ring;
5501 
5502 				cpr->cp_ring_struct.ring_mem.flags =
5503 					BNXT_RMEM_RING_PTE_FLAG;
5504 			}
5505 		}
5506 
5507 		bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5508 					   bp->rx_nr_rings);
5509 		if (!bp->rx_ring)
5510 			return -ENOMEM;
5511 
5512 		for (i = 0; i < bp->rx_nr_rings; i++) {
5513 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5514 
5515 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5516 				rxr->rx_ring_struct.ring_mem.flags =
5517 					BNXT_RMEM_RING_PTE_FLAG;
5518 				rxr->rx_agg_ring_struct.ring_mem.flags =
5519 					BNXT_RMEM_RING_PTE_FLAG;
5520 			} else {
5521 				rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5522 			}
5523 			rxr->bnapi = bp->bnapi[i];
5524 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5525 		}
5526 
5527 		bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5528 					   bp->tx_nr_rings);
5529 		if (!bp->tx_ring)
5530 			return -ENOMEM;
5531 
5532 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5533 					  GFP_KERNEL);
5534 
5535 		if (!bp->tx_ring_map)
5536 			return -ENOMEM;
5537 
5538 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5539 			j = 0;
5540 		else
5541 			j = bp->rx_nr_rings;
5542 
5543 		for (i = 0; i < bp->tx_nr_rings; i++) {
5544 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5545 			struct bnxt_napi *bnapi2;
5546 
5547 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5548 				txr->tx_ring_struct.ring_mem.flags =
5549 					BNXT_RMEM_RING_PTE_FLAG;
5550 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5551 			if (i >= bp->tx_nr_rings_xdp) {
5552 				int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5553 
5554 				bnapi2 = bp->bnapi[k];
5555 				txr->txq_index = i - bp->tx_nr_rings_xdp;
5556 				txr->tx_napi_idx =
5557 					BNXT_RING_TO_TC(bp, txr->txq_index);
5558 				bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5559 				bnapi2->tx_int = bnxt_tx_int;
5560 			} else {
5561 				bnapi2 = bp->bnapi[j];
5562 				bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5563 				bnapi2->tx_ring[0] = txr;
5564 				bnapi2->tx_int = bnxt_tx_int_xdp;
5565 				j++;
5566 			}
5567 			txr->bnapi = bnapi2;
5568 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5569 				txr->tx_cpr = &bnapi2->cp_ring;
5570 		}
5571 
5572 		rc = bnxt_alloc_stats(bp);
5573 		if (rc)
5574 			goto alloc_mem_err;
5575 		bnxt_init_stats(bp);
5576 
5577 		rc = bnxt_alloc_ntp_fltrs(bp);
5578 		if (rc)
5579 			goto alloc_mem_err;
5580 
5581 		rc = bnxt_alloc_vnics(bp);
5582 		if (rc)
5583 			goto alloc_mem_err;
5584 	}
5585 
5586 	rc = bnxt_alloc_all_cp_arrays(bp);
5587 	if (rc)
5588 		goto alloc_mem_err;
5589 
5590 	bnxt_init_ring_struct(bp);
5591 
5592 	rc = bnxt_alloc_rx_rings(bp);
5593 	if (rc)
5594 		goto alloc_mem_err;
5595 
5596 	rc = bnxt_alloc_tx_rings(bp);
5597 	if (rc)
5598 		goto alloc_mem_err;
5599 
5600 	rc = bnxt_alloc_cp_rings(bp);
5601 	if (rc)
5602 		goto alloc_mem_err;
5603 
5604 	bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5605 						  BNXT_VNIC_MCAST_FLAG |
5606 						  BNXT_VNIC_UCAST_FLAG;
5607 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5608 		bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5609 			BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5610 
5611 	rc = bnxt_alloc_vnic_attributes(bp);
5612 	if (rc)
5613 		goto alloc_mem_err;
5614 	return 0;
5615 
5616 alloc_mem_err:
5617 	bnxt_free_mem(bp, true);
5618 	return rc;
5619 }
5620 
5621 static void bnxt_disable_int(struct bnxt *bp)
5622 {
5623 	int i;
5624 
5625 	if (!bp->bnapi)
5626 		return;
5627 
5628 	for (i = 0; i < bp->cp_nr_rings; i++) {
5629 		struct bnxt_napi *bnapi = bp->bnapi[i];
5630 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5631 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5632 
5633 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
5634 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5635 	}
5636 }
5637 
5638 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5639 {
5640 	struct bnxt_napi *bnapi = bp->bnapi[n];
5641 	struct bnxt_cp_ring_info *cpr;
5642 
5643 	cpr = &bnapi->cp_ring;
5644 	return cpr->cp_ring_struct.map_idx;
5645 }
5646 
5647 static void bnxt_disable_int_sync(struct bnxt *bp)
5648 {
5649 	int i;
5650 
5651 	if (!bp->irq_tbl)
5652 		return;
5653 
5654 	atomic_inc(&bp->intr_sem);
5655 
5656 	bnxt_disable_int(bp);
5657 	for (i = 0; i < bp->cp_nr_rings; i++) {
5658 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5659 
5660 		synchronize_irq(bp->irq_tbl[map_idx].vector);
5661 	}
5662 }
5663 
5664 static void bnxt_enable_int(struct bnxt *bp)
5665 {
5666 	int i;
5667 
5668 	atomic_set(&bp->intr_sem, 0);
5669 	for (i = 0; i < bp->cp_nr_rings; i++) {
5670 		struct bnxt_napi *bnapi = bp->bnapi[i];
5671 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5672 
5673 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5674 	}
5675 }
5676 
5677 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5678 			    bool async_only)
5679 {
5680 	DECLARE_BITMAP(async_events_bmap, 256);
5681 	u32 *events = (u32 *)async_events_bmap;
5682 	struct hwrm_func_drv_rgtr_output *resp;
5683 	struct hwrm_func_drv_rgtr_input *req;
5684 	u32 flags;
5685 	int rc, i;
5686 
5687 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5688 	if (rc)
5689 		return rc;
5690 
5691 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5692 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5693 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5694 
5695 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5696 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5697 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5698 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5699 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5700 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5701 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5702 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5703 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5704 	req->flags = cpu_to_le32(flags);
5705 	req->ver_maj_8b = DRV_VER_MAJ;
5706 	req->ver_min_8b = DRV_VER_MIN;
5707 	req->ver_upd_8b = DRV_VER_UPD;
5708 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5709 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
5710 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5711 
5712 	if (BNXT_PF(bp)) {
5713 		u32 data[8];
5714 		int i;
5715 
5716 		memset(data, 0, sizeof(data));
5717 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5718 			u16 cmd = bnxt_vf_req_snif[i];
5719 			unsigned int bit, idx;
5720 
5721 			if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5722 			    cmd == HWRM_PORT_PHY_QCFG)
5723 				continue;
5724 
5725 			idx = cmd / 32;
5726 			bit = cmd % 32;
5727 			data[idx] |= 1 << bit;
5728 		}
5729 
5730 		for (i = 0; i < 8; i++)
5731 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5732 
5733 		req->enables |=
5734 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5735 	}
5736 
5737 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5738 		req->flags |= cpu_to_le32(
5739 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5740 
5741 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
5742 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5743 		u16 event_id = bnxt_async_events_arr[i];
5744 
5745 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5746 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5747 			continue;
5748 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5749 		    !bp->ptp_cfg)
5750 			continue;
5751 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
5752 	}
5753 	if (bmap && bmap_size) {
5754 		for (i = 0; i < bmap_size; i++) {
5755 			if (test_bit(i, bmap))
5756 				__set_bit(i, async_events_bmap);
5757 		}
5758 	}
5759 	for (i = 0; i < 8; i++)
5760 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5761 
5762 	if (async_only)
5763 		req->enables =
5764 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5765 
5766 	resp = hwrm_req_hold(bp, req);
5767 	rc = hwrm_req_send(bp, req);
5768 	if (!rc) {
5769 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5770 		if (resp->flags &
5771 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5772 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5773 	}
5774 	hwrm_req_drop(bp, req);
5775 	return rc;
5776 }
5777 
5778 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5779 {
5780 	struct hwrm_func_drv_unrgtr_input *req;
5781 	int rc;
5782 
5783 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5784 		return 0;
5785 
5786 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5787 	if (rc)
5788 		return rc;
5789 	return hwrm_req_send(bp, req);
5790 }
5791 
5792 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5793 
5794 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5795 {
5796 	struct hwrm_tunnel_dst_port_free_input *req;
5797 	int rc;
5798 
5799 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5800 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5801 		return 0;
5802 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5803 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5804 		return 0;
5805 
5806 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5807 	if (rc)
5808 		return rc;
5809 
5810 	req->tunnel_type = tunnel_type;
5811 
5812 	switch (tunnel_type) {
5813 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5814 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5815 		bp->vxlan_port = 0;
5816 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5817 		break;
5818 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5819 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5820 		bp->nge_port = 0;
5821 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5822 		break;
5823 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5824 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5825 		bp->vxlan_gpe_port = 0;
5826 		bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5827 		break;
5828 	default:
5829 		break;
5830 	}
5831 
5832 	rc = hwrm_req_send(bp, req);
5833 	if (rc)
5834 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5835 			   rc);
5836 	if (bp->flags & BNXT_FLAG_TPA)
5837 		bnxt_set_tpa(bp, true);
5838 	return rc;
5839 }
5840 
5841 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5842 					   u8 tunnel_type)
5843 {
5844 	struct hwrm_tunnel_dst_port_alloc_output *resp;
5845 	struct hwrm_tunnel_dst_port_alloc_input *req;
5846 	int rc;
5847 
5848 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5849 	if (rc)
5850 		return rc;
5851 
5852 	req->tunnel_type = tunnel_type;
5853 	req->tunnel_dst_port_val = port;
5854 
5855 	resp = hwrm_req_hold(bp, req);
5856 	rc = hwrm_req_send(bp, req);
5857 	if (rc) {
5858 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5859 			   rc);
5860 		goto err_out;
5861 	}
5862 
5863 	switch (tunnel_type) {
5864 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5865 		bp->vxlan_port = port;
5866 		bp->vxlan_fw_dst_port_id =
5867 			le16_to_cpu(resp->tunnel_dst_port_id);
5868 		break;
5869 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5870 		bp->nge_port = port;
5871 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5872 		break;
5873 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5874 		bp->vxlan_gpe_port = port;
5875 		bp->vxlan_gpe_fw_dst_port_id =
5876 			le16_to_cpu(resp->tunnel_dst_port_id);
5877 		break;
5878 	default:
5879 		break;
5880 	}
5881 	if (bp->flags & BNXT_FLAG_TPA)
5882 		bnxt_set_tpa(bp, true);
5883 
5884 err_out:
5885 	hwrm_req_drop(bp, req);
5886 	return rc;
5887 }
5888 
5889 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5890 {
5891 	struct hwrm_cfa_l2_set_rx_mask_input *req;
5892 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5893 	int rc;
5894 
5895 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5896 	if (rc)
5897 		return rc;
5898 
5899 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5900 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5901 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5902 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5903 	}
5904 	req->mask = cpu_to_le32(vnic->rx_mask);
5905 	return hwrm_req_send_silent(bp, req);
5906 }
5907 
5908 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5909 {
5910 	if (!atomic_dec_and_test(&fltr->refcnt))
5911 		return;
5912 	spin_lock_bh(&bp->ntp_fltr_lock);
5913 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5914 		spin_unlock_bh(&bp->ntp_fltr_lock);
5915 		return;
5916 	}
5917 	hlist_del_rcu(&fltr->base.hash);
5918 	bnxt_del_one_usr_fltr(bp, &fltr->base);
5919 	if (fltr->base.flags) {
5920 		clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5921 		bp->ntp_fltr_count--;
5922 	}
5923 	spin_unlock_bh(&bp->ntp_fltr_lock);
5924 	kfree_rcu(fltr, base.rcu);
5925 }
5926 
5927 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5928 						      struct bnxt_l2_key *key,
5929 						      u32 idx)
5930 {
5931 	struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5932 	struct bnxt_l2_filter *fltr;
5933 
5934 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
5935 		struct bnxt_l2_key *l2_key = &fltr->l2_key;
5936 
5937 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5938 		    l2_key->vlan == key->vlan)
5939 			return fltr;
5940 	}
5941 	return NULL;
5942 }
5943 
5944 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5945 						    struct bnxt_l2_key *key,
5946 						    u32 idx)
5947 {
5948 	struct bnxt_l2_filter *fltr = NULL;
5949 
5950 	rcu_read_lock();
5951 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5952 	if (fltr)
5953 		atomic_inc(&fltr->refcnt);
5954 	rcu_read_unlock();
5955 	return fltr;
5956 }
5957 
5958 #define BNXT_IPV4_4TUPLE(bp, fkeys)					\
5959 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5960 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||	\
5961 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5962 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5963 
5964 #define BNXT_IPV6_4TUPLE(bp, fkeys)					\
5965 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5966 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||	\
5967 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5968 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5969 
5970 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5971 {
5972 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5973 		if (BNXT_IPV4_4TUPLE(bp, fkeys))
5974 			return sizeof(fkeys->addrs.v4addrs) +
5975 			       sizeof(fkeys->ports);
5976 
5977 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5978 			return sizeof(fkeys->addrs.v4addrs);
5979 	}
5980 
5981 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5982 		if (BNXT_IPV6_4TUPLE(bp, fkeys))
5983 			return sizeof(fkeys->addrs.v6addrs) +
5984 			       sizeof(fkeys->ports);
5985 
5986 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5987 			return sizeof(fkeys->addrs.v6addrs);
5988 	}
5989 
5990 	return 0;
5991 }
5992 
5993 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5994 			 const unsigned char *key)
5995 {
5996 	u64 prefix = bp->toeplitz_prefix, hash = 0;
5997 	struct bnxt_ipv4_tuple tuple4;
5998 	struct bnxt_ipv6_tuple tuple6;
5999 	int i, j, len = 0;
6000 	u8 *four_tuple;
6001 
6002 	len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6003 	if (!len)
6004 		return 0;
6005 
6006 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6007 		tuple4.v4addrs = fkeys->addrs.v4addrs;
6008 		tuple4.ports = fkeys->ports;
6009 		four_tuple = (unsigned char *)&tuple4;
6010 	} else {
6011 		tuple6.v6addrs = fkeys->addrs.v6addrs;
6012 		tuple6.ports = fkeys->ports;
6013 		four_tuple = (unsigned char *)&tuple6;
6014 	}
6015 
6016 	for (i = 0, j = 8; i < len; i++, j++) {
6017 		u8 byte = four_tuple[i];
6018 		int bit;
6019 
6020 		for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6021 			if (byte & 0x80)
6022 				hash ^= prefix;
6023 		}
6024 		prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6025 	}
6026 
6027 	/* The valid part of the hash is in the upper 32 bits. */
6028 	return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6029 }
6030 
6031 #ifdef CONFIG_RFS_ACCEL
6032 static struct bnxt_l2_filter *
6033 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6034 {
6035 	struct bnxt_l2_filter *fltr;
6036 	u32 idx;
6037 
6038 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6039 	      BNXT_L2_FLTR_HASH_MASK;
6040 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6041 	return fltr;
6042 }
6043 #endif
6044 
6045 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6046 			       struct bnxt_l2_key *key, u32 idx)
6047 {
6048 	struct hlist_head *head;
6049 
6050 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6051 	fltr->l2_key.vlan = key->vlan;
6052 	fltr->base.type = BNXT_FLTR_TYPE_L2;
6053 	if (fltr->base.flags) {
6054 		int bit_id;
6055 
6056 		bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6057 						 bp->max_fltr, 0);
6058 		if (bit_id < 0)
6059 			return -ENOMEM;
6060 		fltr->base.sw_id = (u16)bit_id;
6061 		bp->ntp_fltr_count++;
6062 	}
6063 	head = &bp->l2_fltr_hash_tbl[idx];
6064 	hlist_add_head_rcu(&fltr->base.hash, head);
6065 	bnxt_insert_usr_fltr(bp, &fltr->base);
6066 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6067 	atomic_set(&fltr->refcnt, 1);
6068 	return 0;
6069 }
6070 
6071 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6072 						   struct bnxt_l2_key *key,
6073 						   gfp_t gfp)
6074 {
6075 	struct bnxt_l2_filter *fltr;
6076 	u32 idx;
6077 	int rc;
6078 
6079 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6080 	      BNXT_L2_FLTR_HASH_MASK;
6081 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6082 	if (fltr)
6083 		return fltr;
6084 
6085 	fltr = kzalloc_obj(*fltr, gfp);
6086 	if (!fltr)
6087 		return ERR_PTR(-ENOMEM);
6088 	spin_lock_bh(&bp->ntp_fltr_lock);
6089 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6090 	spin_unlock_bh(&bp->ntp_fltr_lock);
6091 	if (rc) {
6092 		bnxt_del_l2_filter(bp, fltr);
6093 		fltr = ERR_PTR(rc);
6094 	}
6095 	return fltr;
6096 }
6097 
6098 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6099 						struct bnxt_l2_key *key,
6100 						u16 flags)
6101 {
6102 	struct bnxt_l2_filter *fltr;
6103 	u32 idx;
6104 	int rc;
6105 
6106 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6107 	      BNXT_L2_FLTR_HASH_MASK;
6108 	spin_lock_bh(&bp->ntp_fltr_lock);
6109 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6110 	if (fltr) {
6111 		fltr = ERR_PTR(-EEXIST);
6112 		goto l2_filter_exit;
6113 	}
6114 	fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6115 	if (!fltr) {
6116 		fltr = ERR_PTR(-ENOMEM);
6117 		goto l2_filter_exit;
6118 	}
6119 	fltr->base.flags = flags;
6120 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6121 	if (rc) {
6122 		spin_unlock_bh(&bp->ntp_fltr_lock);
6123 		bnxt_del_l2_filter(bp, fltr);
6124 		return ERR_PTR(rc);
6125 	}
6126 
6127 l2_filter_exit:
6128 	spin_unlock_bh(&bp->ntp_fltr_lock);
6129 	return fltr;
6130 }
6131 
6132 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6133 {
6134 #ifdef CONFIG_BNXT_SRIOV
6135 	struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6136 
6137 	return vf->fw_fid;
6138 #else
6139 	return INVALID_HW_RING_ID;
6140 #endif
6141 }
6142 
6143 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6144 {
6145 	struct hwrm_cfa_l2_filter_free_input *req;
6146 	u16 target_id = 0xffff;
6147 	int rc;
6148 
6149 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6150 		struct bnxt_pf_info *pf = &bp->pf;
6151 
6152 		if (fltr->base.vf_idx >= pf->active_vfs)
6153 			return -EINVAL;
6154 
6155 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6156 		if (target_id == INVALID_HW_RING_ID)
6157 			return -EINVAL;
6158 	}
6159 
6160 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6161 	if (rc)
6162 		return rc;
6163 
6164 	req->target_id = cpu_to_le16(target_id);
6165 	req->l2_filter_id = fltr->base.filter_id;
6166 	return hwrm_req_send(bp, req);
6167 }
6168 
6169 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6170 {
6171 	struct hwrm_cfa_l2_filter_alloc_output *resp;
6172 	struct hwrm_cfa_l2_filter_alloc_input *req;
6173 	u16 target_id = 0xffff;
6174 	int rc;
6175 
6176 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6177 		struct bnxt_pf_info *pf = &bp->pf;
6178 
6179 		if (fltr->base.vf_idx >= pf->active_vfs)
6180 			return -EINVAL;
6181 
6182 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6183 	}
6184 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6185 	if (rc)
6186 		return rc;
6187 
6188 	req->target_id = cpu_to_le16(target_id);
6189 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6190 
6191 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6192 		req->flags |=
6193 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6194 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6195 	req->enables =
6196 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6197 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6198 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6199 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6200 	eth_broadcast_addr(req->l2_addr_mask);
6201 
6202 	if (fltr->l2_key.vlan) {
6203 		req->enables |=
6204 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6205 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6206 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6207 		req->num_vlans = 1;
6208 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6209 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
6210 	}
6211 
6212 	resp = hwrm_req_hold(bp, req);
6213 	rc = hwrm_req_send(bp, req);
6214 	if (!rc) {
6215 		fltr->base.filter_id = resp->l2_filter_id;
6216 		set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6217 	}
6218 	hwrm_req_drop(bp, req);
6219 	return rc;
6220 }
6221 
6222 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6223 				     struct bnxt_ntuple_filter *fltr)
6224 {
6225 	struct hwrm_cfa_ntuple_filter_free_input *req;
6226 	int rc;
6227 
6228 	set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6229 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6230 		return 0;
6231 
6232 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6233 	if (rc)
6234 		return rc;
6235 
6236 	req->ntuple_filter_id = fltr->base.filter_id;
6237 	return hwrm_req_send(bp, req);
6238 }
6239 
6240 #define BNXT_NTP_FLTR_FLAGS					\
6241 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
6242 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
6243 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
6244 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
6245 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
6246 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
6247 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
6248 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
6249 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
6250 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
6251 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
6252 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
6253 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6254 
6255 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
6256 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6257 
6258 void bnxt_fill_ipv6_mask(__be32 mask[4])
6259 {
6260 	int i;
6261 
6262 	for (i = 0; i < 4; i++)
6263 		mask[i] = cpu_to_be32(~0);
6264 }
6265 
6266 static void
6267 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6268 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
6269 			  struct bnxt_ntuple_filter *fltr)
6270 {
6271 	u16 rxq = fltr->base.rxq;
6272 
6273 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6274 		struct ethtool_rxfh_context *ctx;
6275 		struct bnxt_rss_ctx *rss_ctx;
6276 		struct bnxt_vnic_info *vnic;
6277 
6278 		ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6279 			      fltr->base.fw_vnic_id);
6280 		if (ctx) {
6281 			rss_ctx = ethtool_rxfh_context_priv(ctx);
6282 			vnic = &rss_ctx->vnic;
6283 
6284 			req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6285 		}
6286 		return;
6287 	}
6288 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6289 		struct bnxt_vnic_info *vnic;
6290 		u32 enables;
6291 
6292 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6293 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6294 		enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6295 		req->enables |= cpu_to_le32(enables);
6296 		req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6297 	} else {
6298 		u32 flags;
6299 
6300 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6301 		req->flags |= cpu_to_le32(flags);
6302 		req->dst_id = cpu_to_le16(rxq);
6303 	}
6304 }
6305 
6306 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6307 				      struct bnxt_ntuple_filter *fltr)
6308 {
6309 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6310 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
6311 	struct bnxt_flow_masks *masks = &fltr->fmasks;
6312 	struct flow_keys *keys = &fltr->fkeys;
6313 	struct bnxt_l2_filter *l2_fltr;
6314 	struct bnxt_vnic_info *vnic;
6315 	int rc;
6316 
6317 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6318 	if (rc)
6319 		return rc;
6320 
6321 	l2_fltr = fltr->l2_fltr;
6322 	req->l2_filter_id = l2_fltr->base.filter_id;
6323 
6324 	if (fltr->base.flags & BNXT_ACT_DROP) {
6325 		req->flags =
6326 			cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6327 	} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6328 		bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6329 	} else {
6330 		vnic = &bp->vnic_info[fltr->base.rxq + 1];
6331 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6332 	}
6333 	req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6334 
6335 	req->ethertype = htons(ETH_P_IP);
6336 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6337 	req->ip_protocol = keys->basic.ip_proto;
6338 
6339 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6340 		req->ethertype = htons(ETH_P_IPV6);
6341 		req->ip_addr_type =
6342 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6343 		*(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6344 		*(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6345 		*(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6346 		*(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6347 	} else {
6348 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6349 		req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6350 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6351 		req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6352 	}
6353 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6354 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6355 		req->tunnel_type =
6356 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6357 	}
6358 
6359 	req->src_port = keys->ports.src;
6360 	req->src_port_mask = masks->ports.src;
6361 	req->dst_port = keys->ports.dst;
6362 	req->dst_port_mask = masks->ports.dst;
6363 
6364 	resp = hwrm_req_hold(bp, req);
6365 	rc = hwrm_req_send(bp, req);
6366 	if (!rc)
6367 		fltr->base.filter_id = resp->ntuple_filter_id;
6368 	hwrm_req_drop(bp, req);
6369 	return rc;
6370 }
6371 
6372 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6373 				     const u8 *mac_addr)
6374 {
6375 	struct bnxt_l2_filter *fltr;
6376 	struct bnxt_l2_key key;
6377 	int rc;
6378 
6379 	ether_addr_copy(key.dst_mac_addr, mac_addr);
6380 	key.vlan = 0;
6381 	fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6382 	if (IS_ERR(fltr))
6383 		return PTR_ERR(fltr);
6384 
6385 	fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6386 	rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6387 	if (rc)
6388 		bnxt_del_l2_filter(bp, fltr);
6389 	else
6390 		bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6391 	return rc;
6392 }
6393 
6394 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6395 {
6396 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6397 
6398 	/* Any associated ntuple filters will also be cleared by firmware. */
6399 	for (i = 0; i < num_of_vnics; i++) {
6400 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6401 
6402 		for (j = 0; j < vnic->uc_filter_count; j++) {
6403 			struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6404 
6405 			bnxt_hwrm_l2_filter_free(bp, fltr);
6406 			bnxt_del_l2_filter(bp, fltr);
6407 		}
6408 		vnic->uc_filter_count = 0;
6409 	}
6410 }
6411 
6412 #define BNXT_DFLT_TUNL_TPA_BMAP				\
6413 	(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |	\
6414 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |	\
6415 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6416 
6417 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6418 					   struct hwrm_vnic_tpa_cfg_input *req)
6419 {
6420 	u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6421 
6422 	if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6423 		return;
6424 
6425 	if (bp->vxlan_port)
6426 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6427 	if (bp->vxlan_gpe_port)
6428 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6429 	if (bp->nge_port)
6430 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6431 
6432 	req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6433 	req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6434 }
6435 
6436 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6437 			   u32 tpa_flags)
6438 {
6439 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6440 	struct hwrm_vnic_tpa_cfg_input *req;
6441 	int rc;
6442 
6443 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6444 		return 0;
6445 
6446 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6447 	if (rc)
6448 		return rc;
6449 
6450 	if (tpa_flags) {
6451 		u16 mss = bp->dev->mtu - 40;
6452 		u32 nsegs, n, segs = 0, flags;
6453 
6454 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6455 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6456 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6457 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6458 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6459 		if (tpa_flags & BNXT_FLAG_GRO)
6460 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6461 
6462 		req->flags = cpu_to_le32(flags);
6463 
6464 		req->enables =
6465 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6466 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6467 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6468 
6469 		/* Number of segs are log2 units, and first packet is not
6470 		 * included as part of this units.
6471 		 */
6472 		if (mss <= BNXT_RX_PAGE_SIZE) {
6473 			n = BNXT_RX_PAGE_SIZE / mss;
6474 			nsegs = (MAX_SKB_FRAGS - 1) * n;
6475 		} else {
6476 			n = mss / BNXT_RX_PAGE_SIZE;
6477 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
6478 				n++;
6479 			nsegs = (MAX_SKB_FRAGS - n) / n;
6480 		}
6481 
6482 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6483 			segs = MAX_TPA_SEGS_P5;
6484 			max_aggs = bp->max_tpa;
6485 		} else {
6486 			segs = ilog2(nsegs);
6487 		}
6488 		req->max_agg_segs = cpu_to_le16(segs);
6489 		req->max_aggs = cpu_to_le16(max_aggs);
6490 
6491 		req->min_agg_len = cpu_to_le32(512);
6492 		bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6493 	}
6494 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6495 
6496 	return hwrm_req_send(bp, req);
6497 }
6498 
6499 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6500 {
6501 	struct bnxt_ring_grp_info *grp_info;
6502 
6503 	grp_info = &bp->grp_info[ring->grp_idx];
6504 	return grp_info->cp_fw_ring_id;
6505 }
6506 
6507 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6508 {
6509 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6510 		return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6511 	else
6512 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6513 }
6514 
6515 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6516 {
6517 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6518 		return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6519 	else
6520 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6521 }
6522 
6523 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6524 {
6525 	int entries;
6526 
6527 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6528 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6529 	else
6530 		entries = HW_HASH_INDEX_SIZE;
6531 
6532 	bp->rss_indir_tbl_entries = entries;
6533 	bp->rss_indir_tbl =
6534 		kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6535 	if (!bp->rss_indir_tbl)
6536 		return -ENOMEM;
6537 
6538 	return 0;
6539 }
6540 
6541 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6542 				 struct ethtool_rxfh_context *rss_ctx)
6543 {
6544 	u16 max_rings, max_entries, pad, i;
6545 	u32 *rss_indir_tbl;
6546 
6547 	if (!bp->rx_nr_rings)
6548 		return;
6549 
6550 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6551 		max_rings = bp->rx_nr_rings - 1;
6552 	else
6553 		max_rings = bp->rx_nr_rings;
6554 
6555 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6556 	if (rss_ctx)
6557 		rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6558 	else
6559 		rss_indir_tbl = &bp->rss_indir_tbl[0];
6560 
6561 	for (i = 0; i < max_entries; i++)
6562 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6563 
6564 	pad = bp->rss_indir_tbl_entries - max_entries;
6565 	if (pad)
6566 		memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6567 }
6568 
6569 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6570 {
6571 	u32 i, tbl_size, max_ring = 0;
6572 
6573 	if (!bp->rss_indir_tbl)
6574 		return 0;
6575 
6576 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6577 	for (i = 0; i < tbl_size; i++)
6578 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6579 	return max_ring;
6580 }
6581 
6582 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6583 {
6584 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6585 		if (!rx_rings)
6586 			return 0;
6587 		if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6588 			return BNXT_RSS_TABLE_MAX_TBL_P5;
6589 
6590 		return bnxt_calc_nr_ring_pages(rx_rings - 1,
6591 					       BNXT_RSS_TABLE_ENTRIES_P5);
6592 	}
6593 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6594 		return 2;
6595 	return 1;
6596 }
6597 
6598 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6599 {
6600 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6601 	u16 i, j;
6602 
6603 	/* Fill the RSS indirection table with ring group ids */
6604 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6605 		if (!no_rss)
6606 			j = bp->rss_indir_tbl[i];
6607 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6608 	}
6609 }
6610 
6611 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6612 				    struct bnxt_vnic_info *vnic)
6613 {
6614 	__le16 *ring_tbl = vnic->rss_table;
6615 	struct bnxt_rx_ring_info *rxr;
6616 	u16 tbl_size, i;
6617 
6618 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6619 
6620 	for (i = 0; i < tbl_size; i++) {
6621 		u16 ring_id, j;
6622 
6623 		if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6624 			j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6625 		else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6626 			j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6627 		else
6628 			j = bp->rss_indir_tbl[i];
6629 		rxr = &bp->rx_ring[j];
6630 
6631 		ring_id = rxr->rx_ring_struct.fw_ring_id;
6632 		*ring_tbl++ = cpu_to_le16(ring_id);
6633 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6634 		*ring_tbl++ = cpu_to_le16(ring_id);
6635 	}
6636 }
6637 
6638 static void
6639 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6640 			 struct bnxt_vnic_info *vnic)
6641 {
6642 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6643 		bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6644 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6645 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6646 	} else {
6647 		bnxt_fill_hw_rss_tbl(bp, vnic);
6648 	}
6649 
6650 	if (bp->rss_hash_delta) {
6651 		req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6652 		if (bp->rss_hash_cfg & bp->rss_hash_delta)
6653 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6654 		else
6655 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6656 	} else {
6657 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6658 	}
6659 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6660 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6661 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6662 }
6663 
6664 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6665 				  bool set_rss)
6666 {
6667 	struct hwrm_vnic_rss_cfg_input *req;
6668 	int rc;
6669 
6670 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6671 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6672 		return 0;
6673 
6674 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6675 	if (rc)
6676 		return rc;
6677 
6678 	if (set_rss)
6679 		__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6680 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6681 	return hwrm_req_send(bp, req);
6682 }
6683 
6684 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6685 				     struct bnxt_vnic_info *vnic, bool set_rss)
6686 {
6687 	struct hwrm_vnic_rss_cfg_input *req;
6688 	dma_addr_t ring_tbl_map;
6689 	u32 i, nr_ctxs;
6690 	int rc;
6691 
6692 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6693 	if (rc)
6694 		return rc;
6695 
6696 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6697 	if (!set_rss)
6698 		return hwrm_req_send(bp, req);
6699 
6700 	__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6701 	ring_tbl_map = vnic->rss_table_dma_addr;
6702 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6703 
6704 	hwrm_req_hold(bp, req);
6705 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6706 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6707 		req->ring_table_pair_index = i;
6708 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6709 		rc = hwrm_req_send(bp, req);
6710 		if (rc)
6711 			goto exit;
6712 	}
6713 
6714 exit:
6715 	hwrm_req_drop(bp, req);
6716 	return rc;
6717 }
6718 
6719 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6720 {
6721 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6722 	struct hwrm_vnic_rss_qcfg_output *resp;
6723 	struct hwrm_vnic_rss_qcfg_input *req;
6724 
6725 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6726 		return;
6727 
6728 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6729 	/* all contexts configured to same hash_type, zero always exists */
6730 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6731 	resp = hwrm_req_hold(bp, req);
6732 	if (!hwrm_req_send(bp, req)) {
6733 		bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6734 		bp->rss_hash_delta = 0;
6735 	}
6736 	hwrm_req_drop(bp, req);
6737 }
6738 
6739 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6740 {
6741 	u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6742 	struct hwrm_vnic_plcmodes_cfg_input *req;
6743 	int rc;
6744 
6745 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6746 	if (rc)
6747 		return rc;
6748 
6749 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6750 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6751 	req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6752 
6753 	if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6754 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6755 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6756 		req->enables |=
6757 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6758 		req->hds_threshold = cpu_to_le16(hds_thresh);
6759 	}
6760 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6761 	return hwrm_req_send(bp, req);
6762 }
6763 
6764 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6765 					struct bnxt_vnic_info *vnic,
6766 					u16 ctx_idx)
6767 {
6768 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6769 
6770 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6771 		return;
6772 
6773 	req->rss_cos_lb_ctx_id =
6774 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6775 
6776 	hwrm_req_send(bp, req);
6777 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6778 }
6779 
6780 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6781 {
6782 	int i, j;
6783 
6784 	for (i = 0; i < bp->nr_vnics; i++) {
6785 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6786 
6787 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6788 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6789 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6790 		}
6791 	}
6792 	bp->rsscos_nr_ctxs = 0;
6793 }
6794 
6795 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6796 				    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6797 {
6798 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6799 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6800 	int rc;
6801 
6802 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6803 	if (rc)
6804 		return rc;
6805 
6806 	resp = hwrm_req_hold(bp, req);
6807 	rc = hwrm_req_send(bp, req);
6808 	if (!rc)
6809 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6810 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
6811 	hwrm_req_drop(bp, req);
6812 
6813 	return rc;
6814 }
6815 
6816 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6817 {
6818 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6819 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6820 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6821 }
6822 
6823 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6824 {
6825 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6826 	struct hwrm_vnic_cfg_input *req;
6827 	unsigned int ring = 0, grp_idx;
6828 	u16 def_vlan = 0;
6829 	int rc;
6830 
6831 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6832 	if (rc)
6833 		return rc;
6834 
6835 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6836 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6837 
6838 		req->default_rx_ring_id =
6839 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6840 		req->default_cmpl_ring_id =
6841 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6842 		req->enables =
6843 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6844 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6845 		goto vnic_mru;
6846 	}
6847 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6848 	/* Only RSS support for now TBD: COS & LB */
6849 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6850 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6851 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6852 					   VNIC_CFG_REQ_ENABLES_MRU);
6853 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6854 		req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6855 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6856 					   VNIC_CFG_REQ_ENABLES_MRU);
6857 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6858 	} else {
6859 		req->rss_rule = cpu_to_le16(0xffff);
6860 	}
6861 
6862 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6863 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6864 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6865 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6866 	} else {
6867 		req->cos_rule = cpu_to_le16(0xffff);
6868 	}
6869 
6870 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6871 		ring = 0;
6872 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6873 		ring = vnic->vnic_id - 1;
6874 	else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6875 		ring = bp->rx_nr_rings - 1;
6876 
6877 	grp_idx = bp->rx_ring[ring].bnapi->index;
6878 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6879 	req->lb_rule = cpu_to_le16(0xffff);
6880 vnic_mru:
6881 	vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6882 	req->mru = cpu_to_le16(vnic->mru);
6883 
6884 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6885 #ifdef CONFIG_BNXT_SRIOV
6886 	if (BNXT_VF(bp))
6887 		def_vlan = bp->vf.vlan;
6888 #endif
6889 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6890 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6891 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6892 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6893 
6894 	return hwrm_req_send(bp, req);
6895 }
6896 
6897 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6898 				    struct bnxt_vnic_info *vnic)
6899 {
6900 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6901 		struct hwrm_vnic_free_input *req;
6902 
6903 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6904 			return;
6905 
6906 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6907 
6908 		hwrm_req_send(bp, req);
6909 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
6910 	}
6911 }
6912 
6913 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6914 {
6915 	u16 i;
6916 
6917 	for (i = 0; i < bp->nr_vnics; i++)
6918 		bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6919 }
6920 
6921 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6922 			 unsigned int start_rx_ring_idx,
6923 			 unsigned int nr_rings)
6924 {
6925 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6926 	struct hwrm_vnic_alloc_output *resp;
6927 	struct hwrm_vnic_alloc_input *req;
6928 	int rc;
6929 
6930 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6931 	if (rc)
6932 		return rc;
6933 
6934 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6935 		goto vnic_no_ring_grps;
6936 
6937 	/* map ring groups to this vnic */
6938 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6939 		grp_idx = bp->rx_ring[i].bnapi->index;
6940 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6941 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6942 				   j, nr_rings);
6943 			break;
6944 		}
6945 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6946 	}
6947 
6948 vnic_no_ring_grps:
6949 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6950 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6951 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6952 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6953 
6954 	resp = hwrm_req_hold(bp, req);
6955 	rc = hwrm_req_send(bp, req);
6956 	if (!rc)
6957 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6958 	hwrm_req_drop(bp, req);
6959 	return rc;
6960 }
6961 
6962 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6963 {
6964 	struct hwrm_vnic_qcaps_output *resp;
6965 	struct hwrm_vnic_qcaps_input *req;
6966 	int rc;
6967 
6968 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6969 	bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6970 	bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6971 	if (bp->hwrm_spec_code < 0x10600)
6972 		return 0;
6973 
6974 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6975 	if (rc)
6976 		return rc;
6977 
6978 	resp = hwrm_req_hold(bp, req);
6979 	rc = hwrm_req_send(bp, req);
6980 	if (!rc) {
6981 		u32 flags = le32_to_cpu(resp->flags);
6982 
6983 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6984 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6985 			bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6986 		if (flags &
6987 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6988 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6989 
6990 		/* Older P5 fw before EXT_HW_STATS support did not set
6991 		 * VLAN_STRIP_CAP properly.
6992 		 */
6993 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6994 		    (BNXT_CHIP_P5(bp) &&
6995 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6996 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6997 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6998 			bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6999 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7000 			bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7001 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7002 		if (bp->max_tpa_v2) {
7003 			if (BNXT_CHIP_P5(bp))
7004 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7005 			else
7006 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7007 		}
7008 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7009 			bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7010 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7011 			bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7012 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7013 			bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7014 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7015 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7016 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7017 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7018 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7019 			bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7020 		if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7021 			bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7022 	}
7023 	hwrm_req_drop(bp, req);
7024 	return rc;
7025 }
7026 
7027 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7028 {
7029 	struct hwrm_ring_grp_alloc_output *resp;
7030 	struct hwrm_ring_grp_alloc_input *req;
7031 	int rc;
7032 	u16 i;
7033 
7034 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7035 		return 0;
7036 
7037 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7038 	if (rc)
7039 		return rc;
7040 
7041 	resp = hwrm_req_hold(bp, req);
7042 	for (i = 0; i < bp->rx_nr_rings; i++) {
7043 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7044 
7045 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7046 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7047 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7048 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7049 
7050 		rc = hwrm_req_send(bp, req);
7051 
7052 		if (rc)
7053 			break;
7054 
7055 		bp->grp_info[grp_idx].fw_grp_id =
7056 			le32_to_cpu(resp->ring_group_id);
7057 	}
7058 	hwrm_req_drop(bp, req);
7059 	return rc;
7060 }
7061 
7062 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7063 {
7064 	struct hwrm_ring_grp_free_input *req;
7065 	u16 i;
7066 
7067 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7068 		return;
7069 
7070 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7071 		return;
7072 
7073 	hwrm_req_hold(bp, req);
7074 	for (i = 0; i < bp->cp_nr_rings; i++) {
7075 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7076 			continue;
7077 		req->ring_group_id =
7078 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
7079 
7080 		hwrm_req_send(bp, req);
7081 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7082 	}
7083 	hwrm_req_drop(bp, req);
7084 }
7085 
7086 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7087 				       struct hwrm_ring_alloc_input *req,
7088 				       struct bnxt_rx_ring_info *rxr,
7089 				       struct bnxt_ring_struct *ring)
7090 {
7091 	struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7092 	u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7093 		      RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7094 
7095 	if (ring_type == HWRM_RING_ALLOC_AGG) {
7096 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7097 		req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7098 		req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7099 		enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7100 	} else {
7101 		req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7102 		if (NET_IP_ALIGN == 2)
7103 			req->flags =
7104 				cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7105 	}
7106 	req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7107 	req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7108 	req->enables |= cpu_to_le32(enables);
7109 }
7110 
7111 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7112 				    struct bnxt_rx_ring_info *rxr,
7113 				    struct bnxt_ring_struct *ring,
7114 				    u32 ring_type, u32 map_index)
7115 {
7116 	struct hwrm_ring_alloc_output *resp;
7117 	struct hwrm_ring_alloc_input *req;
7118 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7119 	struct bnxt_ring_grp_info *grp_info;
7120 	int rc, err = 0;
7121 	u16 ring_id;
7122 
7123 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7124 	if (rc)
7125 		goto exit;
7126 
7127 	req->enables = 0;
7128 	if (rmem->nr_pages > 1) {
7129 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7130 		/* Page size is in log2 units */
7131 		req->page_size = BNXT_PAGE_SHIFT;
7132 		req->page_tbl_depth = 1;
7133 	} else {
7134 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
7135 	}
7136 	req->fbo = 0;
7137 	/* Association of ring index with doorbell index and MSIX number */
7138 	req->logical_id = cpu_to_le16(map_index);
7139 
7140 	switch (ring_type) {
7141 	case HWRM_RING_ALLOC_TX: {
7142 		struct bnxt_tx_ring_info *txr;
7143 		u16 flags = 0;
7144 
7145 		txr = container_of(ring, struct bnxt_tx_ring_info,
7146 				   tx_ring_struct);
7147 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7148 		/* Association of transmit ring with completion ring */
7149 		grp_info = &bp->grp_info[ring->grp_idx];
7150 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7151 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7152 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7153 		req->queue_id = cpu_to_le16(ring->queue_id);
7154 		if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7155 			req->cmpl_coal_cnt =
7156 				RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7157 		if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7158 			flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7159 		req->flags = cpu_to_le16(flags);
7160 		break;
7161 	}
7162 	case HWRM_RING_ALLOC_RX:
7163 	case HWRM_RING_ALLOC_AGG:
7164 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7165 		req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7166 			      cpu_to_le32(bp->rx_ring_mask + 1) :
7167 			      cpu_to_le32(bp->rx_agg_ring_mask + 1);
7168 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7169 			bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7170 						   rxr, ring);
7171 		break;
7172 	case HWRM_RING_ALLOC_CMPL:
7173 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7174 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7175 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7176 			/* Association of cp ring with nq */
7177 			grp_info = &bp->grp_info[map_index];
7178 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7179 			req->cq_handle = cpu_to_le64(ring->handle);
7180 			req->enables |= cpu_to_le32(
7181 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7182 		} else {
7183 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7184 		}
7185 		break;
7186 	case HWRM_RING_ALLOC_NQ:
7187 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7188 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7189 		req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7190 		break;
7191 	default:
7192 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7193 			   ring_type);
7194 		return -EINVAL;
7195 	}
7196 
7197 	resp = hwrm_req_hold(bp, req);
7198 	rc = hwrm_req_send(bp, req);
7199 	err = le16_to_cpu(resp->error_code);
7200 	ring_id = le16_to_cpu(resp->ring_id);
7201 	hwrm_req_drop(bp, req);
7202 
7203 exit:
7204 	if (rc || err) {
7205 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7206 			   ring_type, rc, err);
7207 		return -EIO;
7208 	}
7209 	ring->fw_ring_id = ring_id;
7210 	return rc;
7211 }
7212 
7213 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7214 {
7215 	int rc;
7216 
7217 	if (BNXT_PF(bp)) {
7218 		struct hwrm_func_cfg_input *req;
7219 
7220 		rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7221 		if (rc)
7222 			return rc;
7223 
7224 		req->fid = cpu_to_le16(0xffff);
7225 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7226 		req->async_event_cr = cpu_to_le16(idx);
7227 		return hwrm_req_send(bp, req);
7228 	} else {
7229 		struct hwrm_func_vf_cfg_input *req;
7230 
7231 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7232 		if (rc)
7233 			return rc;
7234 
7235 		req->enables =
7236 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7237 		req->async_event_cr = cpu_to_le16(idx);
7238 		return hwrm_req_send(bp, req);
7239 	}
7240 }
7241 
7242 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7243 			     u32 ring_type)
7244 {
7245 	switch (ring_type) {
7246 	case HWRM_RING_ALLOC_TX:
7247 		db->db_ring_mask = bp->tx_ring_mask;
7248 		break;
7249 	case HWRM_RING_ALLOC_RX:
7250 		db->db_ring_mask = bp->rx_ring_mask;
7251 		break;
7252 	case HWRM_RING_ALLOC_AGG:
7253 		db->db_ring_mask = bp->rx_agg_ring_mask;
7254 		break;
7255 	case HWRM_RING_ALLOC_CMPL:
7256 	case HWRM_RING_ALLOC_NQ:
7257 		db->db_ring_mask = bp->cp_ring_mask;
7258 		break;
7259 	}
7260 	if (bp->flags & BNXT_FLAG_CHIP_P7) {
7261 		db->db_epoch_mask = db->db_ring_mask + 1;
7262 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7263 	}
7264 }
7265 
7266 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7267 			u32 map_idx, u32 xid)
7268 {
7269 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7270 		switch (ring_type) {
7271 		case HWRM_RING_ALLOC_TX:
7272 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7273 			break;
7274 		case HWRM_RING_ALLOC_RX:
7275 		case HWRM_RING_ALLOC_AGG:
7276 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7277 			break;
7278 		case HWRM_RING_ALLOC_CMPL:
7279 			db->db_key64 = DBR_PATH_L2;
7280 			break;
7281 		case HWRM_RING_ALLOC_NQ:
7282 			db->db_key64 = DBR_PATH_L2;
7283 			break;
7284 		}
7285 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
7286 
7287 		if (bp->flags & BNXT_FLAG_CHIP_P7)
7288 			db->db_key64 |= DBR_VALID;
7289 
7290 		db->doorbell = bp->bar1 + bp->db_offset;
7291 	} else {
7292 		db->doorbell = bp->bar1 + map_idx * 0x80;
7293 		switch (ring_type) {
7294 		case HWRM_RING_ALLOC_TX:
7295 			db->db_key32 = DB_KEY_TX;
7296 			break;
7297 		case HWRM_RING_ALLOC_RX:
7298 		case HWRM_RING_ALLOC_AGG:
7299 			db->db_key32 = DB_KEY_RX;
7300 			break;
7301 		case HWRM_RING_ALLOC_CMPL:
7302 			db->db_key32 = DB_KEY_CP;
7303 			break;
7304 		}
7305 	}
7306 	bnxt_set_db_mask(bp, db, ring_type);
7307 }
7308 
7309 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7310 				   struct bnxt_rx_ring_info *rxr)
7311 {
7312 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7313 	struct bnxt_napi *bnapi = rxr->bnapi;
7314 	u32 type = HWRM_RING_ALLOC_RX;
7315 	u32 map_idx = bnapi->index;
7316 	int rc;
7317 
7318 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7319 	if (rc)
7320 		return rc;
7321 
7322 	bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7323 	bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7324 
7325 	return 0;
7326 }
7327 
7328 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7329 				       struct bnxt_rx_ring_info *rxr)
7330 {
7331 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7332 	u32 type = HWRM_RING_ALLOC_AGG;
7333 	u32 grp_idx = ring->grp_idx;
7334 	u32 map_idx;
7335 	int rc;
7336 
7337 	map_idx = grp_idx + bp->rx_nr_rings;
7338 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7339 	if (rc)
7340 		return rc;
7341 
7342 	bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7343 		    ring->fw_ring_id);
7344 	bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7345 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7346 	bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7347 
7348 	return 0;
7349 }
7350 
7351 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7352 				      struct bnxt_cp_ring_info *cpr)
7353 {
7354 	const u32 type = HWRM_RING_ALLOC_CMPL;
7355 	struct bnxt_napi *bnapi = cpr->bnapi;
7356 	struct bnxt_ring_struct *ring;
7357 	u32 map_idx = bnapi->index;
7358 	int rc;
7359 
7360 	ring = &cpr->cp_ring_struct;
7361 	ring->handle = BNXT_SET_NQ_HDL(cpr);
7362 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7363 	if (rc)
7364 		return rc;
7365 	bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7366 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7367 	return 0;
7368 }
7369 
7370 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7371 				   struct bnxt_tx_ring_info *txr, u32 tx_idx)
7372 {
7373 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7374 	const u32 type = HWRM_RING_ALLOC_TX;
7375 	int rc;
7376 
7377 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7378 	if (rc)
7379 		return rc;
7380 	bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7381 	return 0;
7382 }
7383 
7384 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7385 {
7386 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7387 	int i, rc = 0;
7388 	u32 type;
7389 
7390 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7391 		type = HWRM_RING_ALLOC_NQ;
7392 	else
7393 		type = HWRM_RING_ALLOC_CMPL;
7394 	for (i = 0; i < bp->cp_nr_rings; i++) {
7395 		struct bnxt_napi *bnapi = bp->bnapi[i];
7396 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7397 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7398 		u32 map_idx = ring->map_idx;
7399 		unsigned int vector;
7400 
7401 		vector = bp->irq_tbl[map_idx].vector;
7402 		disable_irq_nosync(vector);
7403 		rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7404 		if (rc) {
7405 			enable_irq(vector);
7406 			goto err_out;
7407 		}
7408 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7409 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7410 		enable_irq(vector);
7411 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7412 
7413 		if (!i) {
7414 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7415 			if (rc)
7416 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7417 		}
7418 	}
7419 
7420 	for (i = 0; i < bp->tx_nr_rings; i++) {
7421 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7422 
7423 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7424 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7425 			if (rc)
7426 				goto err_out;
7427 		}
7428 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7429 		if (rc)
7430 			goto err_out;
7431 	}
7432 
7433 	for (i = 0; i < bp->rx_nr_rings; i++) {
7434 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7435 
7436 		rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7437 		if (rc)
7438 			goto err_out;
7439 		/* If we have agg rings, post agg buffers first. */
7440 		if (!agg_rings)
7441 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7442 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7443 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7444 			if (rc)
7445 				goto err_out;
7446 		}
7447 	}
7448 
7449 	if (agg_rings) {
7450 		for (i = 0; i < bp->rx_nr_rings; i++) {
7451 			rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7452 			if (rc)
7453 				goto err_out;
7454 		}
7455 	}
7456 err_out:
7457 	return rc;
7458 }
7459 
7460 static void bnxt_cancel_dim(struct bnxt *bp)
7461 {
7462 	int i;
7463 
7464 	/* DIM work is initialized in bnxt_enable_napi().  Proceed only
7465 	 * if NAPI is enabled.
7466 	 */
7467 	if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7468 		return;
7469 
7470 	/* Make sure NAPI sees that the VNIC is disabled */
7471 	synchronize_net();
7472 	for (i = 0; i < bp->rx_nr_rings; i++) {
7473 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7474 		struct bnxt_napi *bnapi = rxr->bnapi;
7475 
7476 		cancel_work_sync(&bnapi->cp_ring.dim.work);
7477 	}
7478 }
7479 
7480 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7481 				   struct bnxt_ring_struct *ring,
7482 				   u32 ring_type, int cmpl_ring_id)
7483 {
7484 	struct hwrm_ring_free_output *resp;
7485 	struct hwrm_ring_free_input *req;
7486 	u16 error_code = 0;
7487 	int rc;
7488 
7489 	if (BNXT_NO_FW_ACCESS(bp))
7490 		return 0;
7491 
7492 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7493 	if (rc)
7494 		goto exit;
7495 
7496 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7497 	req->ring_type = ring_type;
7498 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
7499 
7500 	resp = hwrm_req_hold(bp, req);
7501 	rc = hwrm_req_send(bp, req);
7502 	error_code = le16_to_cpu(resp->error_code);
7503 	hwrm_req_drop(bp, req);
7504 exit:
7505 	if (rc || error_code) {
7506 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7507 			   ring_type, rc, error_code);
7508 		return -EIO;
7509 	}
7510 	return 0;
7511 }
7512 
7513 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7514 				   struct bnxt_tx_ring_info *txr,
7515 				   bool close_path)
7516 {
7517 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7518 	u32 cmpl_ring_id;
7519 
7520 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7521 		return;
7522 
7523 	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7524 		       INVALID_HW_RING_ID;
7525 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7526 				cmpl_ring_id);
7527 	ring->fw_ring_id = INVALID_HW_RING_ID;
7528 }
7529 
7530 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7531 				   struct bnxt_rx_ring_info *rxr,
7532 				   bool close_path)
7533 {
7534 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7535 	u32 grp_idx = rxr->bnapi->index;
7536 	u32 cmpl_ring_id;
7537 
7538 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7539 		return;
7540 
7541 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7542 	hwrm_ring_free_send_msg(bp, ring,
7543 				RING_FREE_REQ_RING_TYPE_RX,
7544 				close_path ? cmpl_ring_id :
7545 				INVALID_HW_RING_ID);
7546 	ring->fw_ring_id = INVALID_HW_RING_ID;
7547 	bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7548 }
7549 
7550 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7551 				       struct bnxt_rx_ring_info *rxr,
7552 				       bool close_path)
7553 {
7554 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7555 	u32 grp_idx = rxr->bnapi->index;
7556 	u32 type, cmpl_ring_id;
7557 
7558 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7559 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7560 	else
7561 		type = RING_FREE_REQ_RING_TYPE_RX;
7562 
7563 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7564 		return;
7565 
7566 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7567 	hwrm_ring_free_send_msg(bp, ring, type,
7568 				close_path ? cmpl_ring_id :
7569 				INVALID_HW_RING_ID);
7570 	ring->fw_ring_id = INVALID_HW_RING_ID;
7571 	bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7572 }
7573 
7574 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7575 				   struct bnxt_cp_ring_info *cpr)
7576 {
7577 	struct bnxt_ring_struct *ring;
7578 
7579 	ring = &cpr->cp_ring_struct;
7580 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7581 		return;
7582 
7583 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7584 				INVALID_HW_RING_ID);
7585 	ring->fw_ring_id = INVALID_HW_RING_ID;
7586 }
7587 
7588 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7589 {
7590 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7591 	int i, size = ring->ring_mem.page_size;
7592 
7593 	cpr->cp_raw_cons = 0;
7594 	cpr->toggle = 0;
7595 
7596 	for (i = 0; i < bp->cp_nr_pages; i++)
7597 		if (cpr->cp_desc_ring[i])
7598 			memset(cpr->cp_desc_ring[i], 0, size);
7599 }
7600 
7601 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7602 {
7603 	u32 type;
7604 	int i;
7605 
7606 	if (!bp->bnapi)
7607 		return;
7608 
7609 	for (i = 0; i < bp->tx_nr_rings; i++)
7610 		bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7611 
7612 	bnxt_cancel_dim(bp);
7613 	for (i = 0; i < bp->rx_nr_rings; i++) {
7614 		bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7615 		bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7616 	}
7617 
7618 	/* The completion rings are about to be freed.  After that the
7619 	 * IRQ doorbell will not work anymore.  So we need to disable
7620 	 * IRQ here.
7621 	 */
7622 	bnxt_disable_int_sync(bp);
7623 
7624 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7625 		type = RING_FREE_REQ_RING_TYPE_NQ;
7626 	else
7627 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7628 	for (i = 0; i < bp->cp_nr_rings; i++) {
7629 		struct bnxt_napi *bnapi = bp->bnapi[i];
7630 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7631 		struct bnxt_ring_struct *ring;
7632 		int j;
7633 
7634 		for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7635 			bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7636 
7637 		ring = &cpr->cp_ring_struct;
7638 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7639 			hwrm_ring_free_send_msg(bp, ring, type,
7640 						INVALID_HW_RING_ID);
7641 			ring->fw_ring_id = INVALID_HW_RING_ID;
7642 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7643 		}
7644 	}
7645 }
7646 
7647 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7648 			     bool shared);
7649 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7650 			   bool shared);
7651 
7652 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7653 {
7654 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7655 	struct hwrm_func_qcfg_output *resp;
7656 	struct hwrm_func_qcfg_input *req;
7657 	int rc;
7658 
7659 	if (bp->hwrm_spec_code < 0x10601)
7660 		return 0;
7661 
7662 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7663 	if (rc)
7664 		return rc;
7665 
7666 	req->fid = cpu_to_le16(0xffff);
7667 	resp = hwrm_req_hold(bp, req);
7668 	rc = hwrm_req_send(bp, req);
7669 	if (rc) {
7670 		hwrm_req_drop(bp, req);
7671 		return rc;
7672 	}
7673 
7674 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7675 	if (BNXT_NEW_RM(bp)) {
7676 		u16 cp, stats;
7677 
7678 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7679 		hw_resc->resv_hw_ring_grps =
7680 			le32_to_cpu(resp->alloc_hw_ring_grps);
7681 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7682 		hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7683 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
7684 		stats = le16_to_cpu(resp->alloc_stat_ctx);
7685 		hw_resc->resv_irqs = cp;
7686 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7687 			int rx = hw_resc->resv_rx_rings;
7688 			int tx = hw_resc->resv_tx_rings;
7689 
7690 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
7691 				rx >>= 1;
7692 			if (cp < (rx + tx)) {
7693 				rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7694 				if (rc)
7695 					goto get_rings_exit;
7696 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
7697 					rx <<= 1;
7698 				hw_resc->resv_rx_rings = rx;
7699 				hw_resc->resv_tx_rings = tx;
7700 			}
7701 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7702 			hw_resc->resv_hw_ring_grps = rx;
7703 		}
7704 		hw_resc->resv_cp_rings = cp;
7705 		hw_resc->resv_stat_ctxs = stats;
7706 	}
7707 get_rings_exit:
7708 	hwrm_req_drop(bp, req);
7709 	return rc;
7710 }
7711 
7712 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7713 {
7714 	struct hwrm_func_qcfg_output *resp;
7715 	struct hwrm_func_qcfg_input *req;
7716 	int rc;
7717 
7718 	if (bp->hwrm_spec_code < 0x10601)
7719 		return 0;
7720 
7721 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7722 	if (rc)
7723 		return rc;
7724 
7725 	req->fid = cpu_to_le16(fid);
7726 	resp = hwrm_req_hold(bp, req);
7727 	rc = hwrm_req_send(bp, req);
7728 	if (!rc)
7729 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7730 
7731 	hwrm_req_drop(bp, req);
7732 	return rc;
7733 }
7734 
7735 static bool bnxt_rfs_supported(struct bnxt *bp);
7736 
7737 static struct hwrm_func_cfg_input *
7738 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7739 {
7740 	struct hwrm_func_cfg_input *req;
7741 	u32 enables = 0;
7742 
7743 	if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7744 		return NULL;
7745 
7746 	req->fid = cpu_to_le16(0xffff);
7747 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7748 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7749 	if (BNXT_NEW_RM(bp)) {
7750 		enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7751 		enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7752 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7753 			enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7754 			enables |= hwr->cp_p5 ?
7755 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7756 		} else {
7757 			enables |= hwr->cp ?
7758 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7759 			enables |= hwr->grp ?
7760 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7761 		}
7762 		enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7763 		enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7764 					  0;
7765 		req->num_rx_rings = cpu_to_le16(hwr->rx);
7766 		req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7767 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7768 			req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7769 			req->num_msix = cpu_to_le16(hwr->cp);
7770 		} else {
7771 			req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7772 			req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7773 		}
7774 		req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7775 		req->num_vnics = cpu_to_le16(hwr->vnic);
7776 	}
7777 	req->enables = cpu_to_le32(enables);
7778 	return req;
7779 }
7780 
7781 static struct hwrm_func_vf_cfg_input *
7782 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7783 {
7784 	struct hwrm_func_vf_cfg_input *req;
7785 	u32 enables = 0;
7786 
7787 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7788 		return NULL;
7789 
7790 	enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7791 	enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7792 			     FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7793 	enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7794 	enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7795 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7796 		enables |= hwr->cp_p5 ?
7797 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7798 	} else {
7799 		enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7800 		enables |= hwr->grp ?
7801 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7802 	}
7803 	enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7804 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7805 
7806 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7807 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7808 	req->num_rx_rings = cpu_to_le16(hwr->rx);
7809 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7810 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7811 		req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7812 	} else {
7813 		req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7814 		req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7815 	}
7816 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7817 	req->num_vnics = cpu_to_le16(hwr->vnic);
7818 
7819 	req->enables = cpu_to_le32(enables);
7820 	return req;
7821 }
7822 
7823 static int
7824 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7825 {
7826 	struct hwrm_func_cfg_input *req;
7827 	int rc;
7828 
7829 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7830 	if (!req)
7831 		return -ENOMEM;
7832 
7833 	if (!req->enables) {
7834 		hwrm_req_drop(bp, req);
7835 		return 0;
7836 	}
7837 
7838 	rc = hwrm_req_send(bp, req);
7839 	if (rc)
7840 		return rc;
7841 
7842 	if (bp->hwrm_spec_code < 0x10601)
7843 		bp->hw_resc.resv_tx_rings = hwr->tx;
7844 
7845 	return bnxt_hwrm_get_rings(bp);
7846 }
7847 
7848 static int
7849 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7850 {
7851 	struct hwrm_func_vf_cfg_input *req;
7852 	int rc;
7853 
7854 	if (!BNXT_NEW_RM(bp)) {
7855 		bp->hw_resc.resv_tx_rings = hwr->tx;
7856 		return 0;
7857 	}
7858 
7859 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7860 	if (!req)
7861 		return -ENOMEM;
7862 
7863 	rc = hwrm_req_send(bp, req);
7864 	if (rc)
7865 		return rc;
7866 
7867 	return bnxt_hwrm_get_rings(bp);
7868 }
7869 
7870 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7871 {
7872 	if (BNXT_PF(bp))
7873 		return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7874 	else
7875 		return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7876 }
7877 
7878 int bnxt_nq_rings_in_use(struct bnxt *bp)
7879 {
7880 	return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7881 }
7882 
7883 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7884 {
7885 	int cp;
7886 
7887 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7888 		return bnxt_nq_rings_in_use(bp);
7889 
7890 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
7891 	return cp;
7892 }
7893 
7894 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7895 {
7896 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7897 }
7898 
7899 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7900 {
7901 	if (!hwr->grp)
7902 		return 0;
7903 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7904 		int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7905 
7906 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7907 			rss_ctx *= hwr->vnic;
7908 		return rss_ctx;
7909 	}
7910 	if (BNXT_VF(bp))
7911 		return BNXT_VF_MAX_RSS_CTX;
7912 	if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7913 		return hwr->grp + 1;
7914 	return 1;
7915 }
7916 
7917 /* Check if a default RSS map needs to be setup.  This function is only
7918  * used on older firmware that does not require reserving RX rings.
7919  */
7920 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7921 {
7922 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7923 
7924 	/* The RSS map is valid for RX rings set to resv_rx_rings */
7925 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7926 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
7927 		if (!netif_is_rxfh_configured(bp->dev))
7928 			bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7929 	}
7930 }
7931 
7932 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7933 {
7934 	if (bp->flags & BNXT_FLAG_RFS) {
7935 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7936 			return 2 + bp->num_rss_ctx;
7937 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7938 			return rx_rings + 1;
7939 	}
7940 	return 1;
7941 }
7942 
7943 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7944 {
7945 	hwr->cp = bnxt_nq_rings_in_use(bp);
7946 	hwr->cp_p5 = 0;
7947 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7948 		hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
7949 	hwr->tx = bp->tx_nr_rings;
7950 	hwr->rx = bp->rx_nr_rings;
7951 	hwr->grp = hwr->rx;
7952 	hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
7953 	hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
7954 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7955 		hwr->rx <<= 1;
7956 	hwr->stat = bnxt_get_func_stat_ctxs(bp);
7957 }
7958 
7959 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7960 {
7961 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7962 	struct bnxt_hw_rings hwr;
7963 
7964 	bnxt_get_total_resources(bp, &hwr);
7965 
7966 	/* Old firmware does not need RX ring reservations but we still
7967 	 * need to setup a default RSS map when needed.  With new firmware
7968 	 * we go through RX ring reservations first and then set up the
7969 	 * RSS map for the successfully reserved RX rings when needed.
7970 	 */
7971 	if (!BNXT_NEW_RM(bp))
7972 		bnxt_check_rss_tbl_no_rmgr(bp);
7973 
7974 	if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
7975 		return true;
7976 
7977 	if (!BNXT_NEW_RM(bp))
7978 		return false;
7979 
7980 	if (hw_resc->resv_rx_rings != hwr.rx ||
7981 	    hw_resc->resv_vnics != hwr.vnic ||
7982 	    hw_resc->resv_stat_ctxs != hwr.stat ||
7983 	    hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
7984 	    (hw_resc->resv_hw_ring_grps != hwr.grp &&
7985 	     !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7986 		return true;
7987 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7988 		if (hw_resc->resv_cp_rings != hwr.cp_p5)
7989 			return true;
7990 	} else if (hw_resc->resv_cp_rings != hwr.cp) {
7991 		return true;
7992 	}
7993 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7994 	    hw_resc->resv_irqs != hwr.cp)
7995 		return true;
7996 	return false;
7997 }
7998 
7999 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8000 {
8001 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8002 
8003 	hwr->tx = hw_resc->resv_tx_rings;
8004 	if (BNXT_NEW_RM(bp)) {
8005 		hwr->rx = hw_resc->resv_rx_rings;
8006 		hwr->cp = hw_resc->resv_irqs;
8007 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8008 			hwr->cp_p5 = hw_resc->resv_cp_rings;
8009 		hwr->grp = hw_resc->resv_hw_ring_grps;
8010 		hwr->vnic = hw_resc->resv_vnics;
8011 		hwr->stat = hw_resc->resv_stat_ctxs;
8012 		hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8013 	}
8014 }
8015 
8016 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8017 {
8018 	return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8019 	       hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8020 }
8021 
8022 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8023 
8024 static int __bnxt_reserve_rings(struct bnxt *bp)
8025 {
8026 	struct bnxt_hw_rings hwr = {0};
8027 	int rx_rings, old_rx_rings, rc;
8028 	int cp = bp->cp_nr_rings;
8029 	int ulp_msix = 0;
8030 	bool sh = false;
8031 	int tx_cp;
8032 
8033 	if (!bnxt_need_reserve_rings(bp))
8034 		return 0;
8035 
8036 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
8037 		ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8038 		if (!ulp_msix)
8039 			bnxt_set_ulp_stat_ctxs(bp, 0);
8040 
8041 		if (ulp_msix > bp->ulp_num_msix_want)
8042 			ulp_msix = bp->ulp_num_msix_want;
8043 		hwr.cp = cp + ulp_msix;
8044 	} else {
8045 		hwr.cp = bnxt_nq_rings_in_use(bp);
8046 	}
8047 
8048 	hwr.tx = bp->tx_nr_rings;
8049 	hwr.rx = bp->rx_nr_rings;
8050 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8051 		sh = true;
8052 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8053 		hwr.cp_p5 = hwr.rx + hwr.tx;
8054 
8055 	hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8056 
8057 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8058 		hwr.rx <<= 1;
8059 	hwr.grp = bp->rx_nr_rings;
8060 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8061 	hwr.stat = bnxt_get_func_stat_ctxs(bp);
8062 	old_rx_rings = bp->hw_resc.resv_rx_rings;
8063 
8064 	rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8065 	if (rc)
8066 		return rc;
8067 
8068 	bnxt_copy_reserved_rings(bp, &hwr);
8069 
8070 	rx_rings = hwr.rx;
8071 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8072 		if (hwr.rx >= 2) {
8073 			rx_rings = hwr.rx >> 1;
8074 		} else {
8075 			if (netif_running(bp->dev))
8076 				return -ENOMEM;
8077 
8078 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8079 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8080 			bp->dev->hw_features &= ~NETIF_F_LRO;
8081 			bp->dev->features &= ~NETIF_F_LRO;
8082 			bnxt_set_ring_params(bp);
8083 		}
8084 	}
8085 	rx_rings = min_t(int, rx_rings, hwr.grp);
8086 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8087 	if (bnxt_ulp_registered(bp->edev) &&
8088 	    hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8089 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8090 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
8091 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8092 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8093 		hwr.rx = rx_rings << 1;
8094 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8095 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8096 	if (hwr.tx != bp->tx_nr_rings) {
8097 		netdev_warn(bp->dev,
8098 			    "Able to reserve only %d out of %d requested TX rings\n",
8099 			    hwr.tx, bp->tx_nr_rings);
8100 	}
8101 	bp->tx_nr_rings = hwr.tx;
8102 
8103 	/* If we cannot reserve all the RX rings, reset the RSS map only
8104 	 * if absolutely necessary
8105 	 */
8106 	if (rx_rings != bp->rx_nr_rings) {
8107 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8108 			    rx_rings, bp->rx_nr_rings);
8109 		if (netif_is_rxfh_configured(bp->dev) &&
8110 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8111 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8112 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8113 			ethtool_rxfh_indir_lost(bp->dev);
8114 		}
8115 	}
8116 	bp->rx_nr_rings = rx_rings;
8117 	bp->cp_nr_rings = hwr.cp;
8118 
8119 	/* Fall back if we cannot reserve enough HW RSS contexts */
8120 	if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8121 	    hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8122 		bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8123 
8124 	if (!bnxt_rings_ok(bp, &hwr))
8125 		return -ENOMEM;
8126 
8127 	if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8128 	    !netif_is_rxfh_configured(bp->dev))
8129 		bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8130 
8131 	if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8132 		int resv_msix, resv_ctx, ulp_ctxs;
8133 		struct bnxt_hw_resc *hw_resc;
8134 
8135 		hw_resc = &bp->hw_resc;
8136 		resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8137 		ulp_msix = min_t(int, resv_msix, ulp_msix);
8138 		bnxt_set_ulp_msix_num(bp, ulp_msix);
8139 		resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
8140 		ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8141 		bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8142 	}
8143 
8144 	return rc;
8145 }
8146 
8147 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8148 {
8149 	struct hwrm_func_vf_cfg_input *req;
8150 	u32 flags;
8151 
8152 	if (!BNXT_NEW_RM(bp))
8153 		return 0;
8154 
8155 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8156 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8157 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8158 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8159 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8160 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8161 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8162 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8163 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8164 
8165 	req->flags = cpu_to_le32(flags);
8166 	return hwrm_req_send_silent(bp, req);
8167 }
8168 
8169 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8170 {
8171 	struct hwrm_func_cfg_input *req;
8172 	u32 flags;
8173 
8174 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8175 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8176 	if (BNXT_NEW_RM(bp)) {
8177 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8178 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8179 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8180 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8181 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8182 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8183 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8184 		else
8185 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8186 	}
8187 
8188 	req->flags = cpu_to_le32(flags);
8189 	return hwrm_req_send_silent(bp, req);
8190 }
8191 
8192 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8193 {
8194 	if (bp->hwrm_spec_code < 0x10801)
8195 		return 0;
8196 
8197 	if (BNXT_PF(bp))
8198 		return bnxt_hwrm_check_pf_rings(bp, hwr);
8199 
8200 	return bnxt_hwrm_check_vf_rings(bp, hwr);
8201 }
8202 
8203 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8204 {
8205 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8206 	struct hwrm_ring_aggint_qcaps_output *resp;
8207 	struct hwrm_ring_aggint_qcaps_input *req;
8208 	int rc;
8209 
8210 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8211 	coal_cap->num_cmpl_dma_aggr_max = 63;
8212 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8213 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8214 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8215 	coal_cap->int_lat_tmr_min_max = 65535;
8216 	coal_cap->int_lat_tmr_max_max = 65535;
8217 	coal_cap->num_cmpl_aggr_int_max = 65535;
8218 	coal_cap->timer_units = 80;
8219 
8220 	if (bp->hwrm_spec_code < 0x10902)
8221 		return;
8222 
8223 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8224 		return;
8225 
8226 	resp = hwrm_req_hold(bp, req);
8227 	rc = hwrm_req_send_silent(bp, req);
8228 	if (!rc) {
8229 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8230 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8231 		coal_cap->num_cmpl_dma_aggr_max =
8232 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8233 		coal_cap->num_cmpl_dma_aggr_during_int_max =
8234 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8235 		coal_cap->cmpl_aggr_dma_tmr_max =
8236 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8237 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8238 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8239 		coal_cap->int_lat_tmr_min_max =
8240 			le16_to_cpu(resp->int_lat_tmr_min_max);
8241 		coal_cap->int_lat_tmr_max_max =
8242 			le16_to_cpu(resp->int_lat_tmr_max_max);
8243 		coal_cap->num_cmpl_aggr_int_max =
8244 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
8245 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8246 	}
8247 	hwrm_req_drop(bp, req);
8248 }
8249 
8250 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8251 {
8252 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8253 
8254 	return usec * 1000 / coal_cap->timer_units;
8255 }
8256 
8257 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8258 	struct bnxt_coal *hw_coal,
8259 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8260 {
8261 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8262 	u16 val, tmr, max, flags = hw_coal->flags;
8263 	u32 cmpl_params = coal_cap->cmpl_params;
8264 
8265 	max = hw_coal->bufs_per_record * 128;
8266 	if (hw_coal->budget)
8267 		max = hw_coal->bufs_per_record * hw_coal->budget;
8268 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8269 
8270 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8271 	req->num_cmpl_aggr_int = cpu_to_le16(val);
8272 
8273 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8274 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
8275 
8276 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8277 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
8278 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8279 
8280 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8281 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8282 	req->int_lat_tmr_max = cpu_to_le16(tmr);
8283 
8284 	/* min timer set to 1/2 of interrupt timer */
8285 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8286 		val = tmr / 2;
8287 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8288 		req->int_lat_tmr_min = cpu_to_le16(val);
8289 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8290 	}
8291 
8292 	/* buf timer set to 1/4 of interrupt timer */
8293 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8294 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8295 
8296 	if (cmpl_params &
8297 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8298 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8299 		val = clamp_t(u16, tmr, 1,
8300 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8301 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8302 		req->enables |=
8303 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8304 	}
8305 
8306 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8307 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8308 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8309 	req->flags = cpu_to_le16(flags);
8310 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8311 }
8312 
8313 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8314 				   struct bnxt_coal *hw_coal)
8315 {
8316 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8317 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8318 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8319 	u32 nq_params = coal_cap->nq_params;
8320 	u16 tmr;
8321 	int rc;
8322 
8323 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8324 		return 0;
8325 
8326 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8327 	if (rc)
8328 		return rc;
8329 
8330 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8331 	req->flags =
8332 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8333 
8334 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8335 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8336 	req->int_lat_tmr_min = cpu_to_le16(tmr);
8337 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8338 	return hwrm_req_send(bp, req);
8339 }
8340 
8341 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8342 {
8343 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8344 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8345 	struct bnxt_coal coal;
8346 	int rc;
8347 
8348 	/* Tick values in micro seconds.
8349 	 * 1 coal_buf x bufs_per_record = 1 completion record.
8350 	 */
8351 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8352 
8353 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8354 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8355 
8356 	if (!bnapi->rx_ring)
8357 		return -ENODEV;
8358 
8359 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8360 	if (rc)
8361 		return rc;
8362 
8363 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8364 
8365 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8366 
8367 	return hwrm_req_send(bp, req_rx);
8368 }
8369 
8370 static int
8371 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8372 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8373 {
8374 	u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8375 
8376 	req->ring_id = cpu_to_le16(ring_id);
8377 	return hwrm_req_send(bp, req);
8378 }
8379 
8380 static int
8381 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8382 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8383 {
8384 	struct bnxt_tx_ring_info *txr;
8385 	int i, rc;
8386 
8387 	bnxt_for_each_napi_tx(i, bnapi, txr) {
8388 		u16 ring_id;
8389 
8390 		ring_id = bnxt_cp_ring_for_tx(bp, txr);
8391 		req->ring_id = cpu_to_le16(ring_id);
8392 		rc = hwrm_req_send(bp, req);
8393 		if (rc)
8394 			return rc;
8395 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8396 			return 0;
8397 	}
8398 	return 0;
8399 }
8400 
8401 int bnxt_hwrm_set_coal(struct bnxt *bp)
8402 {
8403 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8404 	int i, rc;
8405 
8406 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8407 	if (rc)
8408 		return rc;
8409 
8410 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8411 	if (rc) {
8412 		hwrm_req_drop(bp, req_rx);
8413 		return rc;
8414 	}
8415 
8416 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8417 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8418 
8419 	hwrm_req_hold(bp, req_rx);
8420 	hwrm_req_hold(bp, req_tx);
8421 	for (i = 0; i < bp->cp_nr_rings; i++) {
8422 		struct bnxt_napi *bnapi = bp->bnapi[i];
8423 		struct bnxt_coal *hw_coal;
8424 
8425 		if (!bnapi->rx_ring)
8426 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8427 		else
8428 			rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8429 		if (rc)
8430 			break;
8431 
8432 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8433 			continue;
8434 
8435 		if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8436 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8437 			if (rc)
8438 				break;
8439 		}
8440 		if (bnapi->rx_ring)
8441 			hw_coal = &bp->rx_coal;
8442 		else
8443 			hw_coal = &bp->tx_coal;
8444 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8445 	}
8446 	hwrm_req_drop(bp, req_rx);
8447 	hwrm_req_drop(bp, req_tx);
8448 	return rc;
8449 }
8450 
8451 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8452 {
8453 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8454 	struct hwrm_stat_ctx_free_input *req;
8455 	int i;
8456 
8457 	if (!bp->bnapi)
8458 		return;
8459 
8460 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8461 		return;
8462 
8463 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8464 		return;
8465 	if (BNXT_FW_MAJ(bp) <= 20) {
8466 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8467 			hwrm_req_drop(bp, req);
8468 			return;
8469 		}
8470 		hwrm_req_hold(bp, req0);
8471 	}
8472 	hwrm_req_hold(bp, req);
8473 	for (i = 0; i < bp->cp_nr_rings; i++) {
8474 		struct bnxt_napi *bnapi = bp->bnapi[i];
8475 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8476 
8477 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8478 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8479 			if (req0) {
8480 				req0->stat_ctx_id = req->stat_ctx_id;
8481 				hwrm_req_send(bp, req0);
8482 			}
8483 			hwrm_req_send(bp, req);
8484 
8485 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8486 		}
8487 	}
8488 	hwrm_req_drop(bp, req);
8489 	if (req0)
8490 		hwrm_req_drop(bp, req0);
8491 }
8492 
8493 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8494 {
8495 	struct hwrm_stat_ctx_alloc_output *resp;
8496 	struct hwrm_stat_ctx_alloc_input *req;
8497 	int rc, i;
8498 
8499 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8500 		return 0;
8501 
8502 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8503 	if (rc)
8504 		return rc;
8505 
8506 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8507 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8508 
8509 	resp = hwrm_req_hold(bp, req);
8510 	for (i = 0; i < bp->cp_nr_rings; i++) {
8511 		struct bnxt_napi *bnapi = bp->bnapi[i];
8512 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8513 
8514 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8515 
8516 		rc = hwrm_req_send(bp, req);
8517 		if (rc)
8518 			break;
8519 
8520 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8521 
8522 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8523 	}
8524 	hwrm_req_drop(bp, req);
8525 	return rc;
8526 }
8527 
8528 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8529 {
8530 	struct hwrm_func_qcfg_output *resp;
8531 	struct hwrm_func_qcfg_input *req;
8532 	u16 flags;
8533 	int rc;
8534 
8535 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8536 	if (rc)
8537 		return rc;
8538 
8539 	req->fid = cpu_to_le16(0xffff);
8540 	resp = hwrm_req_hold(bp, req);
8541 	rc = hwrm_req_send(bp, req);
8542 	if (rc)
8543 		goto func_qcfg_exit;
8544 
8545 	flags = le16_to_cpu(resp->flags);
8546 #ifdef CONFIG_BNXT_SRIOV
8547 	if (BNXT_VF(bp)) {
8548 		struct bnxt_vf_info *vf = &bp->vf;
8549 
8550 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8551 		if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8552 			vf->flags |= BNXT_VF_TRUST;
8553 		else
8554 			vf->flags &= ~BNXT_VF_TRUST;
8555 	} else {
8556 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8557 	}
8558 #endif
8559 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8560 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8561 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8562 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8563 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8564 	}
8565 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8566 		bp->flags |= BNXT_FLAG_MULTI_HOST;
8567 
8568 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8569 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8570 
8571 	if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8572 		bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8573 	if (resp->roce_bidi_opt_mode &
8574 	    FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8575 		bp->cos0_cos1_shared = 1;
8576 	else
8577 		bp->cos0_cos1_shared = 0;
8578 
8579 	switch (resp->port_partition_type) {
8580 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8581 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8582 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8583 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8584 		bp->port_partition_type = resp->port_partition_type;
8585 		break;
8586 	}
8587 	if (bp->hwrm_spec_code < 0x10707 ||
8588 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8589 		bp->br_mode = BRIDGE_MODE_VEB;
8590 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8591 		bp->br_mode = BRIDGE_MODE_VEPA;
8592 	else
8593 		bp->br_mode = BRIDGE_MODE_UNDEF;
8594 
8595 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8596 	if (!bp->max_mtu)
8597 		bp->max_mtu = BNXT_MAX_MTU;
8598 
8599 	if (bp->db_size)
8600 		goto func_qcfg_exit;
8601 
8602 	bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8603 	if (BNXT_CHIP_P5(bp)) {
8604 		if (BNXT_PF(bp))
8605 			bp->db_offset = DB_PF_OFFSET_P5;
8606 		else
8607 			bp->db_offset = DB_VF_OFFSET_P5;
8608 	}
8609 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8610 				 1024);
8611 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8612 	    bp->db_size <= bp->db_offset)
8613 		bp->db_size = pci_resource_len(bp->pdev, 2);
8614 
8615 func_qcfg_exit:
8616 	hwrm_req_drop(bp, req);
8617 	return rc;
8618 }
8619 
8620 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8621 				      u8 init_val, u8 init_offset,
8622 				      bool init_mask_set)
8623 {
8624 	ctxm->init_value = init_val;
8625 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8626 	if (init_mask_set)
8627 		ctxm->init_offset = init_offset * 4;
8628 	else
8629 		ctxm->init_value = 0;
8630 }
8631 
8632 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8633 {
8634 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8635 	u16 type;
8636 
8637 	for (type = 0; type < ctx_max; type++) {
8638 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8639 		int n = 1;
8640 
8641 		if (!ctxm->max_entries || ctxm->pg_info)
8642 			continue;
8643 
8644 		if (ctxm->instance_bmap)
8645 			n = hweight32(ctxm->instance_bmap);
8646 		ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8647 		if (!ctxm->pg_info)
8648 			return -ENOMEM;
8649 	}
8650 	return 0;
8651 }
8652 
8653 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8654 				  struct bnxt_ctx_mem_type *ctxm, bool force);
8655 
8656 #define BNXT_CTX_INIT_VALID(flags)	\
8657 	(!!((flags) &			\
8658 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8659 
8660 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8661 {
8662 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
8663 	struct hwrm_func_backing_store_qcaps_v2_input *req;
8664 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8665 	u16 type;
8666 	int rc;
8667 
8668 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8669 	if (rc)
8670 		return rc;
8671 
8672 	if (!ctx) {
8673 		ctx = kzalloc_obj(*ctx);
8674 		if (!ctx)
8675 			return -ENOMEM;
8676 		bp->ctx = ctx;
8677 	}
8678 
8679 	resp = hwrm_req_hold(bp, req);
8680 
8681 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8682 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8683 		u8 init_val, init_off, i;
8684 		u32 max_entries;
8685 		u16 entry_size;
8686 		__le32 *p;
8687 		u32 flags;
8688 
8689 		req->type = cpu_to_le16(type);
8690 		rc = hwrm_req_send(bp, req);
8691 		if (rc)
8692 			goto ctx_done;
8693 		flags = le32_to_cpu(resp->flags);
8694 		type = le16_to_cpu(resp->next_valid_type);
8695 		if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8696 			bnxt_free_one_ctx_mem(bp, ctxm, true);
8697 			continue;
8698 		}
8699 		entry_size = le16_to_cpu(resp->entry_size);
8700 		max_entries = le32_to_cpu(resp->max_num_entries);
8701 		if (ctxm->mem_valid) {
8702 			if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8703 			    ctxm->entry_size != entry_size ||
8704 			    ctxm->max_entries != max_entries)
8705 				bnxt_free_one_ctx_mem(bp, ctxm, true);
8706 			else
8707 				continue;
8708 		}
8709 		ctxm->type = le16_to_cpu(resp->type);
8710 		ctxm->entry_size = entry_size;
8711 		ctxm->flags = flags;
8712 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8713 		ctxm->entry_multiple = resp->entry_multiple;
8714 		ctxm->max_entries = max_entries;
8715 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8716 		init_val = resp->ctx_init_value;
8717 		init_off = resp->ctx_init_offset;
8718 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8719 					  BNXT_CTX_INIT_VALID(flags));
8720 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8721 					      BNXT_MAX_SPLIT_ENTRY);
8722 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8723 		     i++, p++)
8724 			ctxm->split[i] = le32_to_cpu(*p);
8725 	}
8726 	rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8727 
8728 ctx_done:
8729 	hwrm_req_drop(bp, req);
8730 	return rc;
8731 }
8732 
8733 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8734 {
8735 	struct hwrm_func_backing_store_qcaps_output *resp;
8736 	struct hwrm_func_backing_store_qcaps_input *req;
8737 	int rc;
8738 
8739 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8740 	    (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8741 		return 0;
8742 
8743 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8744 		return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8745 
8746 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8747 	if (rc)
8748 		return rc;
8749 
8750 	resp = hwrm_req_hold(bp, req);
8751 	rc = hwrm_req_send_silent(bp, req);
8752 	if (!rc) {
8753 		struct bnxt_ctx_mem_type *ctxm;
8754 		struct bnxt_ctx_mem_info *ctx;
8755 		u8 init_val, init_idx = 0;
8756 		u16 init_mask;
8757 
8758 		ctx = bp->ctx;
8759 		if (!ctx) {
8760 			ctx = kzalloc_obj(*ctx);
8761 			if (!ctx) {
8762 				rc = -ENOMEM;
8763 				goto ctx_err;
8764 			}
8765 			bp->ctx = ctx;
8766 		}
8767 		init_val = resp->ctx_kind_initializer;
8768 		init_mask = le16_to_cpu(resp->ctx_init_mask);
8769 
8770 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8771 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8772 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8773 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8774 		ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8775 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8776 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8777 					  (init_mask & (1 << init_idx++)) != 0);
8778 
8779 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8780 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8781 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8782 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8783 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8784 					  (init_mask & (1 << init_idx++)) != 0);
8785 
8786 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8787 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8788 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8789 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8790 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8791 					  (init_mask & (1 << init_idx++)) != 0);
8792 
8793 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8794 		ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8795 		ctxm->max_entries = ctxm->vnic_entries +
8796 			le16_to_cpu(resp->vnic_max_ring_table_entries);
8797 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8798 		bnxt_init_ctx_initializer(ctxm, init_val,
8799 					  resp->vnic_init_offset,
8800 					  (init_mask & (1 << init_idx++)) != 0);
8801 
8802 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8803 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8804 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8805 		bnxt_init_ctx_initializer(ctxm, init_val,
8806 					  resp->stat_init_offset,
8807 					  (init_mask & (1 << init_idx++)) != 0);
8808 
8809 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8810 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8811 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8812 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8813 		ctxm->entry_multiple = resp->tqm_entries_multiple;
8814 		if (!ctxm->entry_multiple)
8815 			ctxm->entry_multiple = 1;
8816 
8817 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8818 
8819 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8820 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8821 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8822 		ctxm->mrav_num_entries_units =
8823 			le16_to_cpu(resp->mrav_num_entries_units);
8824 		bnxt_init_ctx_initializer(ctxm, init_val,
8825 					  resp->mrav_init_offset,
8826 					  (init_mask & (1 << init_idx++)) != 0);
8827 
8828 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8829 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8830 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8831 
8832 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8833 		if (!ctx->tqm_fp_rings_count)
8834 			ctx->tqm_fp_rings_count = bp->max_q;
8835 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8836 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8837 
8838 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8839 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8840 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8841 
8842 		rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8843 	} else {
8844 		rc = 0;
8845 	}
8846 ctx_err:
8847 	hwrm_req_drop(bp, req);
8848 	return rc;
8849 }
8850 
8851 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8852 				  __le64 *pg_dir)
8853 {
8854 	if (!rmem->nr_pages)
8855 		return;
8856 
8857 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8858 	if (rmem->depth >= 1) {
8859 		if (rmem->depth == 2)
8860 			*pg_attr |= 2;
8861 		else
8862 			*pg_attr |= 1;
8863 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8864 	} else {
8865 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8866 	}
8867 }
8868 
8869 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
8870 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
8871 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
8872 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
8873 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
8874 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8875 
8876 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8877 {
8878 	struct hwrm_func_backing_store_cfg_input *req;
8879 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8880 	struct bnxt_ctx_pg_info *ctx_pg;
8881 	struct bnxt_ctx_mem_type *ctxm;
8882 	void **__req = (void **)&req;
8883 	u32 req_len = sizeof(*req);
8884 	__le32 *num_entries;
8885 	__le64 *pg_dir;
8886 	u32 flags = 0;
8887 	u8 *pg_attr;
8888 	u32 ena;
8889 	int rc;
8890 	int i;
8891 
8892 	if (!ctx)
8893 		return 0;
8894 
8895 	if (req_len > bp->hwrm_max_ext_req_len)
8896 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8897 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8898 	if (rc)
8899 		return rc;
8900 
8901 	req->enables = cpu_to_le32(enables);
8902 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8903 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8904 		ctx_pg = ctxm->pg_info;
8905 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8906 		req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8907 		req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8908 		req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8909 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8910 				      &req->qpc_pg_size_qpc_lvl,
8911 				      &req->qpc_page_dir);
8912 
8913 		if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8914 			req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8915 	}
8916 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8917 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8918 		ctx_pg = ctxm->pg_info;
8919 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8920 		req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8921 		req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8922 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8923 				      &req->srq_pg_size_srq_lvl,
8924 				      &req->srq_page_dir);
8925 	}
8926 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8927 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8928 		ctx_pg = ctxm->pg_info;
8929 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8930 		req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8931 		req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8932 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8933 				      &req->cq_pg_size_cq_lvl,
8934 				      &req->cq_page_dir);
8935 	}
8936 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8937 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8938 		ctx_pg = ctxm->pg_info;
8939 		req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8940 		req->vnic_num_ring_table_entries =
8941 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8942 		req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8943 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8944 				      &req->vnic_pg_size_vnic_lvl,
8945 				      &req->vnic_page_dir);
8946 	}
8947 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8948 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8949 		ctx_pg = ctxm->pg_info;
8950 		req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8951 		req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8952 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8953 				      &req->stat_pg_size_stat_lvl,
8954 				      &req->stat_page_dir);
8955 	}
8956 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8957 		u32 units;
8958 
8959 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8960 		ctx_pg = ctxm->pg_info;
8961 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8962 		units = ctxm->mrav_num_entries_units;
8963 		if (units) {
8964 			u32 num_mr, num_ah = ctxm->mrav_av_entries;
8965 			u32 entries;
8966 
8967 			num_mr = ctx_pg->entries - num_ah;
8968 			entries = ((num_mr / units) << 16) | (num_ah / units);
8969 			req->mrav_num_entries = cpu_to_le32(entries);
8970 			flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8971 		}
8972 		req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8973 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8974 				      &req->mrav_pg_size_mrav_lvl,
8975 				      &req->mrav_page_dir);
8976 	}
8977 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8978 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8979 		ctx_pg = ctxm->pg_info;
8980 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8981 		req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8982 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8983 				      &req->tim_pg_size_tim_lvl,
8984 				      &req->tim_page_dir);
8985 	}
8986 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8987 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
8988 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8989 	     pg_dir = &req->tqm_sp_page_dir,
8990 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8991 	     ctx_pg = ctxm->pg_info;
8992 	     i < BNXT_MAX_TQM_RINGS;
8993 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8994 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8995 		if (!(enables & ena))
8996 			continue;
8997 
8998 		req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8999 		*num_entries = cpu_to_le32(ctx_pg->entries);
9000 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9001 	}
9002 	req->flags = cpu_to_le32(flags);
9003 	return hwrm_req_send(bp, req);
9004 }
9005 
9006 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9007 				  struct bnxt_ctx_pg_info *ctx_pg)
9008 {
9009 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9010 
9011 	rmem->page_size = BNXT_PAGE_SIZE;
9012 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
9013 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
9014 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9015 	if (rmem->depth >= 1)
9016 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9017 	return bnxt_alloc_ring(bp, rmem);
9018 }
9019 
9020 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9021 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9022 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
9023 {
9024 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9025 	int rc;
9026 
9027 	if (!mem_size)
9028 		return -EINVAL;
9029 
9030 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9031 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9032 		ctx_pg->nr_pages = 0;
9033 		return -EINVAL;
9034 	}
9035 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9036 		int nr_tbls, i;
9037 
9038 		rmem->depth = 2;
9039 		ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9040 		if (!ctx_pg->ctx_pg_tbl)
9041 			return -ENOMEM;
9042 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9043 		rmem->nr_pages = nr_tbls;
9044 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9045 		if (rc)
9046 			return rc;
9047 		for (i = 0; i < nr_tbls; i++) {
9048 			struct bnxt_ctx_pg_info *pg_tbl;
9049 
9050 			pg_tbl = kzalloc_obj(*pg_tbl);
9051 			if (!pg_tbl)
9052 				return -ENOMEM;
9053 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9054 			rmem = &pg_tbl->ring_mem;
9055 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9056 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9057 			rmem->depth = 1;
9058 			rmem->nr_pages = MAX_CTX_PAGES;
9059 			rmem->ctx_mem = ctxm;
9060 			if (i == (nr_tbls - 1)) {
9061 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9062 
9063 				if (rem)
9064 					rmem->nr_pages = rem;
9065 			}
9066 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9067 			if (rc)
9068 				break;
9069 		}
9070 	} else {
9071 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9072 		if (rmem->nr_pages > 1 || depth)
9073 			rmem->depth = 1;
9074 		rmem->ctx_mem = ctxm;
9075 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9076 	}
9077 	return rc;
9078 }
9079 
9080 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9081 				    struct bnxt_ctx_pg_info *ctx_pg,
9082 				    void *buf, size_t offset, size_t head,
9083 				    size_t tail)
9084 {
9085 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9086 	size_t nr_pages = ctx_pg->nr_pages;
9087 	int page_size = rmem->page_size;
9088 	size_t len = 0, total_len = 0;
9089 	u16 depth = rmem->depth;
9090 
9091 	tail %= nr_pages * page_size;
9092 	do {
9093 		if (depth > 1) {
9094 			int i = head / (page_size * MAX_CTX_PAGES);
9095 			struct bnxt_ctx_pg_info *pg_tbl;
9096 
9097 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9098 			rmem = &pg_tbl->ring_mem;
9099 		}
9100 		len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9101 		head += len;
9102 		offset += len;
9103 		total_len += len;
9104 		if (head >= nr_pages * page_size)
9105 			head = 0;
9106 	} while (head != tail);
9107 	return total_len;
9108 }
9109 
9110 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9111 				  struct bnxt_ctx_pg_info *ctx_pg)
9112 {
9113 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9114 
9115 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9116 	    ctx_pg->ctx_pg_tbl) {
9117 		int i, nr_tbls = rmem->nr_pages;
9118 
9119 		for (i = 0; i < nr_tbls; i++) {
9120 			struct bnxt_ctx_pg_info *pg_tbl;
9121 			struct bnxt_ring_mem_info *rmem2;
9122 
9123 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9124 			if (!pg_tbl)
9125 				continue;
9126 			rmem2 = &pg_tbl->ring_mem;
9127 			bnxt_free_ring(bp, rmem2);
9128 			ctx_pg->ctx_pg_arr[i] = NULL;
9129 			kfree(pg_tbl);
9130 			ctx_pg->ctx_pg_tbl[i] = NULL;
9131 		}
9132 		kfree(ctx_pg->ctx_pg_tbl);
9133 		ctx_pg->ctx_pg_tbl = NULL;
9134 	}
9135 	bnxt_free_ring(bp, rmem);
9136 	ctx_pg->nr_pages = 0;
9137 }
9138 
9139 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9140 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
9141 				   u8 pg_lvl)
9142 {
9143 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9144 	int i, rc = 0, n = 1;
9145 	u32 mem_size;
9146 
9147 	if (!ctxm->entry_size || !ctx_pg)
9148 		return -EINVAL;
9149 	if (ctxm->instance_bmap)
9150 		n = hweight32(ctxm->instance_bmap);
9151 	if (ctxm->entry_multiple)
9152 		entries = roundup(entries, ctxm->entry_multiple);
9153 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9154 	mem_size = entries * ctxm->entry_size;
9155 	for (i = 0; i < n && !rc; i++) {
9156 		ctx_pg[i].entries = entries;
9157 		rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9158 					    ctxm->init_value ? ctxm : NULL);
9159 	}
9160 	if (!rc)
9161 		ctxm->mem_valid = 1;
9162 	return rc;
9163 }
9164 
9165 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9166 					       struct bnxt_ctx_mem_type *ctxm,
9167 					       bool last)
9168 {
9169 	struct hwrm_func_backing_store_cfg_v2_input *req;
9170 	u32 instance_bmap = ctxm->instance_bmap;
9171 	int i, j, rc = 0, n = 1;
9172 	__le32 *p;
9173 
9174 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9175 		return 0;
9176 
9177 	if (instance_bmap)
9178 		n = hweight32(ctxm->instance_bmap);
9179 	else
9180 		instance_bmap = 1;
9181 
9182 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9183 	if (rc)
9184 		return rc;
9185 	hwrm_req_hold(bp, req);
9186 	req->type = cpu_to_le16(ctxm->type);
9187 	req->entry_size = cpu_to_le16(ctxm->entry_size);
9188 	if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9189 	    bnxt_bs_trace_avail(bp, ctxm->type)) {
9190 		struct bnxt_bs_trace_info *bs_trace;
9191 		u32 enables;
9192 
9193 		enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9194 		req->enables = cpu_to_le32(enables);
9195 		bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9196 		req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9197 	}
9198 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
9199 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9200 		p[i] = cpu_to_le32(ctxm->split[i]);
9201 	for (i = 0, j = 0; j < n && !rc; i++) {
9202 		struct bnxt_ctx_pg_info *ctx_pg;
9203 
9204 		if (!(instance_bmap & (1 << i)))
9205 			continue;
9206 		req->instance = cpu_to_le16(i);
9207 		ctx_pg = &ctxm->pg_info[j++];
9208 		if (!ctx_pg->entries)
9209 			continue;
9210 		req->num_entries = cpu_to_le32(ctx_pg->entries);
9211 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9212 				      &req->page_size_pbl_level,
9213 				      &req->page_dir);
9214 		if (last && j == n)
9215 			req->flags =
9216 				cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9217 		rc = hwrm_req_send(bp, req);
9218 	}
9219 	hwrm_req_drop(bp, req);
9220 	return rc;
9221 }
9222 
9223 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9224 {
9225 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9226 	struct bnxt_ctx_mem_type *ctxm;
9227 	u16 last_type = BNXT_CTX_INV;
9228 	int rc = 0;
9229 	u16 type;
9230 
9231 	for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9232 		ctxm = &ctx->ctx_arr[type];
9233 		if (!bnxt_bs_trace_avail(bp, type))
9234 			continue;
9235 		if (!ctxm->mem_valid) {
9236 			rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9237 						     ctxm->max_entries, 1);
9238 			if (rc) {
9239 				netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9240 					    type);
9241 				continue;
9242 			}
9243 			bnxt_bs_trace_init(bp, ctxm);
9244 		}
9245 		last_type = type;
9246 	}
9247 
9248 	if (last_type == BNXT_CTX_INV) {
9249 		for (type = 0; type < BNXT_CTX_MAX; type++) {
9250 			ctxm = &ctx->ctx_arr[type];
9251 			if (ctxm->mem_valid)
9252 				last_type = type;
9253 		}
9254 		if (last_type == BNXT_CTX_INV)
9255 			return 0;
9256 	}
9257 	ctx->ctx_arr[last_type].last = 1;
9258 
9259 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9260 		ctxm = &ctx->ctx_arr[type];
9261 
9262 		if (!ctxm->mem_valid)
9263 			continue;
9264 		rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9265 		if (rc)
9266 			return rc;
9267 	}
9268 	return 0;
9269 }
9270 
9271 /**
9272  * __bnxt_copy_ctx_mem - copy host context memory
9273  * @bp: The driver context
9274  * @ctxm: The pointer to the context memory type
9275  * @buf: The destination buffer or NULL to just obtain the length
9276  * @offset: The buffer offset to copy the data to
9277  * @head: The head offset of context memory to copy from
9278  * @tail: The tail offset (last byte + 1) of context memory to end the copy
9279  *
9280  * This function is called for debugging purposes to dump the host context
9281  * used by the chip.
9282  *
9283  * Return: Length of memory copied
9284  */
9285 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9286 				  struct bnxt_ctx_mem_type *ctxm, void *buf,
9287 				  size_t offset, size_t head, size_t tail)
9288 {
9289 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9290 	size_t len = 0, total_len = 0;
9291 	int i, n = 1;
9292 
9293 	if (!ctx_pg)
9294 		return 0;
9295 
9296 	if (ctxm->instance_bmap)
9297 		n = hweight32(ctxm->instance_bmap);
9298 	for (i = 0; i < n; i++) {
9299 		len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9300 					    tail);
9301 		offset += len;
9302 		total_len += len;
9303 	}
9304 	return total_len;
9305 }
9306 
9307 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9308 			 void *buf, size_t offset)
9309 {
9310 	size_t tail = ctxm->max_entries * ctxm->entry_size;
9311 
9312 	return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9313 }
9314 
9315 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9316 				  struct bnxt_ctx_mem_type *ctxm, bool force)
9317 {
9318 	struct bnxt_ctx_pg_info *ctx_pg;
9319 	int i, n = 1;
9320 
9321 	ctxm->last = 0;
9322 
9323 	if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9324 		return;
9325 
9326 	ctx_pg = ctxm->pg_info;
9327 	if (ctx_pg) {
9328 		if (ctxm->instance_bmap)
9329 			n = hweight32(ctxm->instance_bmap);
9330 		for (i = 0; i < n; i++)
9331 			bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9332 
9333 		kfree(ctx_pg);
9334 		ctxm->pg_info = NULL;
9335 		ctxm->mem_valid = 0;
9336 	}
9337 	memset(ctxm, 0, sizeof(*ctxm));
9338 }
9339 
9340 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9341 {
9342 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9343 	u16 type;
9344 
9345 	if (!ctx)
9346 		return;
9347 
9348 	for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9349 		bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9350 
9351 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9352 	if (force) {
9353 		kfree(ctx);
9354 		bp->ctx = NULL;
9355 	}
9356 }
9357 
9358 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9359 {
9360 	struct bnxt_ctx_mem_type *ctxm;
9361 	struct bnxt_ctx_mem_info *ctx;
9362 	u32 l2_qps, qp1_qps, max_qps;
9363 	u32 ena, entries_sp, entries;
9364 	u32 srqs, max_srqs, min;
9365 	u32 num_mr, num_ah;
9366 	u32 extra_srqs = 0;
9367 	u32 extra_qps = 0;
9368 	u32 fast_qpmd_qps;
9369 	u8 pg_lvl = 1;
9370 	int i, rc;
9371 
9372 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9373 	if (rc) {
9374 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9375 			   rc);
9376 		return rc;
9377 	}
9378 	ctx = bp->ctx;
9379 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9380 		return 0;
9381 
9382 	ena = 0;
9383 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9384 		goto skip_legacy;
9385 
9386 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9387 	l2_qps = ctxm->qp_l2_entries;
9388 	qp1_qps = ctxm->qp_qp1_entries;
9389 	fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9390 	max_qps = ctxm->max_entries;
9391 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9392 	srqs = ctxm->srq_l2_entries;
9393 	max_srqs = ctxm->max_entries;
9394 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9395 		pg_lvl = 2;
9396 		if (BNXT_SW_RES_LMT(bp)) {
9397 			extra_qps = max_qps - l2_qps - qp1_qps;
9398 			extra_srqs = max_srqs - srqs;
9399 		} else {
9400 			extra_qps = min_t(u32, 65536,
9401 					  max_qps - l2_qps - qp1_qps);
9402 			/* allocate extra qps if fw supports RoCE fast qp
9403 			 * destroy feature
9404 			 */
9405 			extra_qps += fast_qpmd_qps;
9406 			extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9407 		}
9408 		if (fast_qpmd_qps)
9409 			ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9410 	}
9411 
9412 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9413 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9414 				     pg_lvl);
9415 	if (rc)
9416 		return rc;
9417 
9418 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9419 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9420 	if (rc)
9421 		return rc;
9422 
9423 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9424 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9425 				     extra_qps * 2, pg_lvl);
9426 	if (rc)
9427 		return rc;
9428 
9429 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9430 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9431 	if (rc)
9432 		return rc;
9433 
9434 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9435 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9436 	if (rc)
9437 		return rc;
9438 
9439 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9440 		goto skip_rdma;
9441 
9442 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9443 	if (BNXT_SW_RES_LMT(bp) &&
9444 	    ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9445 		num_ah = ctxm->mrav_av_entries;
9446 		num_mr = ctxm->max_entries - num_ah;
9447 	} else {
9448 		/* 128K extra is needed to accommodate static AH context
9449 		 * allocation by f/w.
9450 		 */
9451 		num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9452 		num_ah = min_t(u32, num_mr, 1024 * 128);
9453 		ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9454 		if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9455 			ctxm->mrav_av_entries = num_ah;
9456 	}
9457 
9458 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9459 	if (rc)
9460 		return rc;
9461 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9462 
9463 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9464 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9465 	if (rc)
9466 		return rc;
9467 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9468 
9469 skip_rdma:
9470 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9471 	min = ctxm->min_entries;
9472 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9473 		     2 * (extra_qps + qp1_qps) + min;
9474 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9475 	if (rc)
9476 		return rc;
9477 
9478 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9479 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
9480 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9481 	if (rc)
9482 		return rc;
9483 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9484 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9485 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9486 
9487 skip_legacy:
9488 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9489 		rc = bnxt_backing_store_cfg_v2(bp);
9490 	else
9491 		rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9492 	if (rc) {
9493 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9494 			   rc);
9495 		return rc;
9496 	}
9497 	ctx->flags |= BNXT_CTX_FLAG_INITED;
9498 	return 0;
9499 }
9500 
9501 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9502 {
9503 	struct hwrm_dbg_crashdump_medium_cfg_input *req;
9504 	u16 page_attr;
9505 	int rc;
9506 
9507 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9508 		return 0;
9509 
9510 	rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9511 	if (rc)
9512 		return rc;
9513 
9514 	if (BNXT_PAGE_SIZE == 0x2000)
9515 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9516 	else if (BNXT_PAGE_SIZE == 0x10000)
9517 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9518 	else
9519 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9520 	req->pg_size_lvl = cpu_to_le16(page_attr |
9521 				       bp->fw_crash_mem->ring_mem.depth);
9522 	req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9523 	req->size = cpu_to_le32(bp->fw_crash_len);
9524 	req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9525 	return hwrm_req_send(bp, req);
9526 }
9527 
9528 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9529 {
9530 	if (bp->fw_crash_mem) {
9531 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9532 		kfree(bp->fw_crash_mem);
9533 		bp->fw_crash_mem = NULL;
9534 	}
9535 }
9536 
9537 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9538 {
9539 	u32 mem_size = 0;
9540 	int rc;
9541 
9542 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9543 		return 0;
9544 
9545 	rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9546 	if (rc)
9547 		return rc;
9548 
9549 	mem_size = round_up(mem_size, 4);
9550 
9551 	/* keep and use the existing pages */
9552 	if (bp->fw_crash_mem &&
9553 	    mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9554 		goto alloc_done;
9555 
9556 	if (bp->fw_crash_mem)
9557 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9558 	else
9559 		bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9560 	if (!bp->fw_crash_mem)
9561 		return -ENOMEM;
9562 
9563 	rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9564 	if (rc) {
9565 		bnxt_free_crash_dump_mem(bp);
9566 		return rc;
9567 	}
9568 
9569 alloc_done:
9570 	bp->fw_crash_len = mem_size;
9571 	return 0;
9572 }
9573 
9574 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9575 {
9576 	struct hwrm_func_resource_qcaps_output *resp;
9577 	struct hwrm_func_resource_qcaps_input *req;
9578 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9579 	int rc;
9580 
9581 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9582 	if (rc)
9583 		return rc;
9584 
9585 	req->fid = cpu_to_le16(0xffff);
9586 	resp = hwrm_req_hold(bp, req);
9587 	rc = hwrm_req_send_silent(bp, req);
9588 	if (rc)
9589 		goto hwrm_func_resc_qcaps_exit;
9590 
9591 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9592 	if (!all)
9593 		goto hwrm_func_resc_qcaps_exit;
9594 
9595 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9596 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9597 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9598 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9599 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9600 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9601 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9602 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9603 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9604 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9605 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9606 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9607 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9608 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9609 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9610 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9611 
9612 	if (hw_resc->max_rsscos_ctxs >=
9613 	    hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9614 		bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9615 
9616 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9617 		u16 max_msix = le16_to_cpu(resp->max_msix);
9618 
9619 		hw_resc->max_nqs = max_msix;
9620 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9621 	}
9622 
9623 	if (BNXT_PF(bp)) {
9624 		struct bnxt_pf_info *pf = &bp->pf;
9625 
9626 		pf->vf_resv_strategy =
9627 			le16_to_cpu(resp->vf_reservation_strategy);
9628 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9629 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9630 	}
9631 hwrm_func_resc_qcaps_exit:
9632 	hwrm_req_drop(bp, req);
9633 	return rc;
9634 }
9635 
9636 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9637 {
9638 	struct hwrm_port_mac_ptp_qcfg_output *resp;
9639 	struct hwrm_port_mac_ptp_qcfg_input *req;
9640 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9641 	u8 flags;
9642 	int rc;
9643 
9644 	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9645 		rc = -ENODEV;
9646 		goto no_ptp;
9647 	}
9648 
9649 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9650 	if (rc)
9651 		goto no_ptp;
9652 
9653 	req->port_id = cpu_to_le16(bp->pf.port_id);
9654 	resp = hwrm_req_hold(bp, req);
9655 	rc = hwrm_req_send(bp, req);
9656 	if (rc)
9657 		goto exit;
9658 
9659 	flags = resp->flags;
9660 	if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9661 	    !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9662 		rc = -ENODEV;
9663 		goto exit;
9664 	}
9665 	if (!ptp) {
9666 		ptp = kzalloc_obj(*ptp);
9667 		if (!ptp) {
9668 			rc = -ENOMEM;
9669 			goto exit;
9670 		}
9671 		ptp->bp = bp;
9672 		bp->ptp_cfg = ptp;
9673 	}
9674 
9675 	if (flags &
9676 	    (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9677 	     PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9678 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9679 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9680 	} else if (BNXT_CHIP_P5(bp)) {
9681 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9682 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9683 	} else {
9684 		rc = -ENODEV;
9685 		goto exit;
9686 	}
9687 	ptp->rtc_configured =
9688 		(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9689 	rc = bnxt_ptp_init(bp);
9690 	if (rc)
9691 		netdev_warn(bp->dev, "PTP initialization failed.\n");
9692 exit:
9693 	hwrm_req_drop(bp, req);
9694 	if (!rc)
9695 		return 0;
9696 
9697 no_ptp:
9698 	bnxt_ptp_clear(bp);
9699 	kfree(ptp);
9700 	bp->ptp_cfg = NULL;
9701 	return rc;
9702 }
9703 
9704 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9705 {
9706 	u32 flags, flags_ext, flags_ext2, flags_ext3;
9707 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9708 	struct hwrm_func_qcaps_output *resp;
9709 	struct hwrm_func_qcaps_input *req;
9710 	int rc;
9711 
9712 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9713 	if (rc)
9714 		return rc;
9715 
9716 	req->fid = cpu_to_le16(0xffff);
9717 	resp = hwrm_req_hold(bp, req);
9718 	rc = hwrm_req_send(bp, req);
9719 	if (rc)
9720 		goto hwrm_func_qcaps_exit;
9721 
9722 	flags = le32_to_cpu(resp->flags);
9723 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9724 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9725 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9726 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9727 	if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9728 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9729 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9730 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9731 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9732 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9733 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9734 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9735 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9736 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9737 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9738 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9739 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9740 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9741 	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9742 		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9743 
9744 	flags_ext = le32_to_cpu(resp->flags_ext);
9745 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9746 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9747 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9748 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9749 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9750 		bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9751 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9752 		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9753 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9754 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9755 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9756 		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9757 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9758 		bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9759 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9760 		bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9761 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9762 		bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9763 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9764 		bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9765 
9766 	flags_ext2 = le32_to_cpu(resp->flags_ext2);
9767 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9768 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9769 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9770 		bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9771 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9772 		bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9773 	if (flags_ext2 &
9774 	    FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9775 		bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9776 	if (BNXT_PF(bp) &&
9777 	    (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9778 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9779 
9780 	flags_ext3 = le32_to_cpu(resp->flags_ext3);
9781 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9782 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9783 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9784 		bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9785 
9786 	bp->tx_push_thresh = 0;
9787 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9788 	    BNXT_FW_MAJ(bp) > 217)
9789 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9790 
9791 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9792 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9793 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9794 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9795 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9796 	if (!hw_resc->max_hw_ring_grps)
9797 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9798 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9799 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9800 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9801 
9802 	hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9803 	hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9804 	hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9805 	hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9806 	hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9807 	hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9808 
9809 	if (BNXT_PF(bp)) {
9810 		struct bnxt_pf_info *pf = &bp->pf;
9811 
9812 		pf->fw_fid = le16_to_cpu(resp->fid);
9813 		pf->port_id = le16_to_cpu(resp->port_id);
9814 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9815 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9816 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
9817 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
9818 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9819 			bp->flags |= BNXT_FLAG_WOL_CAP;
9820 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9821 			bp->fw_cap |= BNXT_FW_CAP_PTP;
9822 		} else {
9823 			bnxt_ptp_clear(bp);
9824 			kfree(bp->ptp_cfg);
9825 			bp->ptp_cfg = NULL;
9826 		}
9827 	} else {
9828 #ifdef CONFIG_BNXT_SRIOV
9829 		struct bnxt_vf_info *vf = &bp->vf;
9830 
9831 		vf->fw_fid = le16_to_cpu(resp->fid);
9832 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9833 #endif
9834 	}
9835 	bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9836 
9837 hwrm_func_qcaps_exit:
9838 	hwrm_req_drop(bp, req);
9839 	return rc;
9840 }
9841 
9842 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9843 {
9844 	struct hwrm_dbg_qcaps_output *resp;
9845 	struct hwrm_dbg_qcaps_input *req;
9846 	int rc;
9847 
9848 	bp->fw_dbg_cap = 0;
9849 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9850 		return;
9851 
9852 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9853 	if (rc)
9854 		return;
9855 
9856 	req->fid = cpu_to_le16(0xffff);
9857 	resp = hwrm_req_hold(bp, req);
9858 	rc = hwrm_req_send(bp, req);
9859 	if (rc)
9860 		goto hwrm_dbg_qcaps_exit;
9861 
9862 	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9863 
9864 hwrm_dbg_qcaps_exit:
9865 	hwrm_req_drop(bp, req);
9866 }
9867 
9868 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9869 
9870 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9871 {
9872 	int rc;
9873 
9874 	rc = __bnxt_hwrm_func_qcaps(bp);
9875 	if (rc)
9876 		return rc;
9877 
9878 	bnxt_hwrm_dbg_qcaps(bp);
9879 
9880 	rc = bnxt_hwrm_queue_qportcfg(bp);
9881 	if (rc) {
9882 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9883 		return rc;
9884 	}
9885 	if (bp->hwrm_spec_code >= 0x10803) {
9886 		rc = bnxt_alloc_ctx_mem(bp);
9887 		if (rc)
9888 			return rc;
9889 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9890 		if (!rc)
9891 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9892 	}
9893 	return 0;
9894 }
9895 
9896 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9897 {
9898 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9899 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9900 	u32 flags;
9901 	int rc;
9902 
9903 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9904 		return 0;
9905 
9906 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9907 	if (rc)
9908 		return rc;
9909 
9910 	resp = hwrm_req_hold(bp, req);
9911 	rc = hwrm_req_send(bp, req);
9912 	if (rc)
9913 		goto hwrm_cfa_adv_qcaps_exit;
9914 
9915 	flags = le32_to_cpu(resp->flags);
9916 	if (flags &
9917 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9918 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9919 
9920 	if (flags &
9921 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9922 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9923 
9924 	if (flags &
9925 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9926 		bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9927 
9928 hwrm_cfa_adv_qcaps_exit:
9929 	hwrm_req_drop(bp, req);
9930 	return rc;
9931 }
9932 
9933 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9934 {
9935 	if (bp->fw_health)
9936 		return 0;
9937 
9938 	bp->fw_health = kzalloc_obj(*bp->fw_health);
9939 	if (!bp->fw_health)
9940 		return -ENOMEM;
9941 
9942 	mutex_init(&bp->fw_health->lock);
9943 	return 0;
9944 }
9945 
9946 static int bnxt_alloc_fw_health(struct bnxt *bp)
9947 {
9948 	int rc;
9949 
9950 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9951 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9952 		return 0;
9953 
9954 	rc = __bnxt_alloc_fw_health(bp);
9955 	if (rc) {
9956 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9957 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9958 		return rc;
9959 	}
9960 
9961 	return 0;
9962 }
9963 
9964 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9965 {
9966 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9967 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9968 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
9969 }
9970 
9971 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9972 {
9973 	struct bnxt_fw_health *fw_health = bp->fw_health;
9974 	u32 reg_type;
9975 
9976 	if (!fw_health)
9977 		return;
9978 
9979 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9980 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9981 		fw_health->status_reliable = false;
9982 
9983 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9984 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9985 		fw_health->resets_reliable = false;
9986 }
9987 
9988 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9989 {
9990 	void __iomem *hs;
9991 	u32 status_loc;
9992 	u32 reg_type;
9993 	u32 sig;
9994 
9995 	if (bp->fw_health)
9996 		bp->fw_health->status_reliable = false;
9997 
9998 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9999 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10000 
10001 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10002 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10003 		if (!bp->chip_num) {
10004 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10005 			bp->chip_num = readl(bp->bar0 +
10006 					     BNXT_FW_HEALTH_WIN_BASE +
10007 					     BNXT_GRC_REG_CHIP_NUM);
10008 		}
10009 		if (!BNXT_CHIP_P5_PLUS(bp))
10010 			return;
10011 
10012 		status_loc = BNXT_GRC_REG_STATUS_P5 |
10013 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
10014 	} else {
10015 		status_loc = readl(hs + offsetof(struct hcomm_status,
10016 						 fw_status_loc));
10017 	}
10018 
10019 	if (__bnxt_alloc_fw_health(bp)) {
10020 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
10021 		return;
10022 	}
10023 
10024 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10025 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10026 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10027 		__bnxt_map_fw_health_reg(bp, status_loc);
10028 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10029 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
10030 	}
10031 
10032 	bp->fw_health->status_reliable = true;
10033 }
10034 
10035 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10036 {
10037 	struct bnxt_fw_health *fw_health = bp->fw_health;
10038 	u32 reg_base = 0xffffffff;
10039 	int i;
10040 
10041 	bp->fw_health->status_reliable = false;
10042 	bp->fw_health->resets_reliable = false;
10043 	/* Only pre-map the monitoring GRC registers using window 3 */
10044 	for (i = 0; i < 4; i++) {
10045 		u32 reg = fw_health->regs[i];
10046 
10047 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10048 			continue;
10049 		if (reg_base == 0xffffffff)
10050 			reg_base = reg & BNXT_GRC_BASE_MASK;
10051 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10052 			return -ERANGE;
10053 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10054 	}
10055 	bp->fw_health->status_reliable = true;
10056 	bp->fw_health->resets_reliable = true;
10057 	if (reg_base == 0xffffffff)
10058 		return 0;
10059 
10060 	__bnxt_map_fw_health_reg(bp, reg_base);
10061 	return 0;
10062 }
10063 
10064 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10065 {
10066 	if (!bp->fw_health)
10067 		return;
10068 
10069 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10070 		bp->fw_health->status_reliable = true;
10071 		bp->fw_health->resets_reliable = true;
10072 	} else {
10073 		bnxt_try_map_fw_health_reg(bp);
10074 	}
10075 }
10076 
10077 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10078 {
10079 	struct bnxt_fw_health *fw_health = bp->fw_health;
10080 	struct hwrm_error_recovery_qcfg_output *resp;
10081 	struct hwrm_error_recovery_qcfg_input *req;
10082 	int rc, i;
10083 
10084 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10085 		return 0;
10086 
10087 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10088 	if (rc)
10089 		return rc;
10090 
10091 	resp = hwrm_req_hold(bp, req);
10092 	rc = hwrm_req_send(bp, req);
10093 	if (rc)
10094 		goto err_recovery_out;
10095 	fw_health->flags = le32_to_cpu(resp->flags);
10096 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10097 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10098 		rc = -EINVAL;
10099 		goto err_recovery_out;
10100 	}
10101 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10102 	fw_health->master_func_wait_dsecs =
10103 		le32_to_cpu(resp->master_func_wait_period);
10104 	fw_health->normal_func_wait_dsecs =
10105 		le32_to_cpu(resp->normal_func_wait_period);
10106 	fw_health->post_reset_wait_dsecs =
10107 		le32_to_cpu(resp->master_func_wait_period_after_reset);
10108 	fw_health->post_reset_max_wait_dsecs =
10109 		le32_to_cpu(resp->max_bailout_time_after_reset);
10110 	fw_health->regs[BNXT_FW_HEALTH_REG] =
10111 		le32_to_cpu(resp->fw_health_status_reg);
10112 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10113 		le32_to_cpu(resp->fw_heartbeat_reg);
10114 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10115 		le32_to_cpu(resp->fw_reset_cnt_reg);
10116 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10117 		le32_to_cpu(resp->reset_inprogress_reg);
10118 	fw_health->fw_reset_inprog_reg_mask =
10119 		le32_to_cpu(resp->reset_inprogress_reg_mask);
10120 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10121 	if (fw_health->fw_reset_seq_cnt >= 16) {
10122 		rc = -EINVAL;
10123 		goto err_recovery_out;
10124 	}
10125 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10126 		fw_health->fw_reset_seq_regs[i] =
10127 			le32_to_cpu(resp->reset_reg[i]);
10128 		fw_health->fw_reset_seq_vals[i] =
10129 			le32_to_cpu(resp->reset_reg_val[i]);
10130 		fw_health->fw_reset_seq_delay_msec[i] =
10131 			resp->delay_after_reset[i];
10132 	}
10133 err_recovery_out:
10134 	hwrm_req_drop(bp, req);
10135 	if (!rc)
10136 		rc = bnxt_map_fw_health_regs(bp);
10137 	if (rc)
10138 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10139 	return rc;
10140 }
10141 
10142 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10143 {
10144 	struct hwrm_func_reset_input *req;
10145 	int rc;
10146 
10147 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10148 	if (rc)
10149 		return rc;
10150 
10151 	req->enables = 0;
10152 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10153 	return hwrm_req_send(bp, req);
10154 }
10155 
10156 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10157 {
10158 	struct hwrm_nvm_get_dev_info_output nvm_info;
10159 
10160 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10161 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10162 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10163 			 nvm_info.nvm_cfg_ver_upd);
10164 }
10165 
10166 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10167 {
10168 	struct hwrm_queue_qportcfg_output *resp;
10169 	struct hwrm_queue_qportcfg_input *req;
10170 	u8 i, j, *qptr;
10171 	bool no_rdma;
10172 	int rc = 0;
10173 
10174 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10175 	if (rc)
10176 		return rc;
10177 
10178 	resp = hwrm_req_hold(bp, req);
10179 	rc = hwrm_req_send(bp, req);
10180 	if (rc)
10181 		goto qportcfg_exit;
10182 
10183 	if (!resp->max_configurable_queues) {
10184 		rc = -EINVAL;
10185 		goto qportcfg_exit;
10186 	}
10187 	bp->max_tc = resp->max_configurable_queues;
10188 	bp->max_lltc = resp->max_configurable_lossless_queues;
10189 	if (bp->max_tc > BNXT_MAX_QUEUE)
10190 		bp->max_tc = BNXT_MAX_QUEUE;
10191 
10192 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10193 	qptr = &resp->queue_id0;
10194 	for (i = 0, j = 0; i < bp->max_tc; i++) {
10195 		bp->q_info[j].queue_id = *qptr;
10196 		bp->q_ids[i] = *qptr++;
10197 		bp->q_info[j].queue_profile = *qptr++;
10198 		bp->tc_to_qidx[j] = j;
10199 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10200 		    (no_rdma && BNXT_PF(bp)))
10201 			j++;
10202 	}
10203 	bp->max_q = bp->max_tc;
10204 	bp->max_tc = max_t(u8, j, 1);
10205 
10206 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10207 		bp->max_tc = 1;
10208 
10209 	if (bp->max_lltc > bp->max_tc)
10210 		bp->max_lltc = bp->max_tc;
10211 
10212 qportcfg_exit:
10213 	hwrm_req_drop(bp, req);
10214 	return rc;
10215 }
10216 
10217 static int bnxt_hwrm_poll(struct bnxt *bp)
10218 {
10219 	struct hwrm_ver_get_input *req;
10220 	int rc;
10221 
10222 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10223 	if (rc)
10224 		return rc;
10225 
10226 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10227 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10228 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10229 
10230 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10231 	rc = hwrm_req_send(bp, req);
10232 	return rc;
10233 }
10234 
10235 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10236 {
10237 	struct hwrm_ver_get_output *resp;
10238 	struct hwrm_ver_get_input *req;
10239 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
10240 	u32 dev_caps_cfg, hwrm_ver;
10241 	int rc, len, max_tmo_secs;
10242 
10243 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10244 	if (rc)
10245 		return rc;
10246 
10247 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10248 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10249 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10250 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10251 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10252 
10253 	resp = hwrm_req_hold(bp, req);
10254 	rc = hwrm_req_send(bp, req);
10255 	if (rc)
10256 		goto hwrm_ver_get_exit;
10257 
10258 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10259 
10260 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10261 			     resp->hwrm_intf_min_8b << 8 |
10262 			     resp->hwrm_intf_upd_8b;
10263 	if (resp->hwrm_intf_maj_8b < 1) {
10264 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10265 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10266 			    resp->hwrm_intf_upd_8b);
10267 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10268 	}
10269 
10270 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10271 			HWRM_VERSION_UPDATE;
10272 
10273 	if (bp->hwrm_spec_code > hwrm_ver)
10274 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10275 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10276 			 HWRM_VERSION_UPDATE);
10277 	else
10278 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10279 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10280 			 resp->hwrm_intf_upd_8b);
10281 
10282 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10283 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10284 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10285 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10286 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10287 		len = FW_VER_STR_LEN;
10288 	} else {
10289 		fw_maj = resp->hwrm_fw_maj_8b;
10290 		fw_min = resp->hwrm_fw_min_8b;
10291 		fw_bld = resp->hwrm_fw_bld_8b;
10292 		fw_rsv = resp->hwrm_fw_rsvd_8b;
10293 		len = BC_HWRM_STR_LEN;
10294 	}
10295 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10296 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10297 		 fw_rsv);
10298 
10299 	if (strlen(resp->active_pkg_name)) {
10300 		int fw_ver_len = strlen(bp->fw_ver_str);
10301 
10302 		snprintf(bp->fw_ver_str + fw_ver_len,
10303 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10304 			 resp->active_pkg_name);
10305 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10306 	}
10307 
10308 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10309 	if (!bp->hwrm_cmd_timeout)
10310 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10311 	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10312 	if (!bp->hwrm_cmd_max_timeout)
10313 		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10314 	max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10315 #ifdef CONFIG_DETECT_HUNG_TASK
10316 	if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10317 	    max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10318 		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10319 			    max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10320 	}
10321 #endif
10322 
10323 	if (resp->hwrm_intf_maj_8b >= 1) {
10324 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10325 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10326 	}
10327 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10328 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10329 
10330 	bp->chip_num = le16_to_cpu(resp->chip_num);
10331 	bp->chip_rev = resp->chip_rev;
10332 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10333 	    !resp->chip_metal)
10334 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10335 
10336 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10337 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10338 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10339 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10340 
10341 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10342 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10343 
10344 	if (dev_caps_cfg &
10345 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10346 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10347 
10348 	if (dev_caps_cfg &
10349 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10350 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10351 
10352 	if (dev_caps_cfg &
10353 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10354 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10355 
10356 hwrm_ver_get_exit:
10357 	hwrm_req_drop(bp, req);
10358 	return rc;
10359 }
10360 
10361 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10362 {
10363 	struct hwrm_fw_set_time_input *req;
10364 	struct tm tm;
10365 	time64_t now = ktime_get_real_seconds();
10366 	int rc;
10367 
10368 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10369 	    bp->hwrm_spec_code < 0x10400)
10370 		return -EOPNOTSUPP;
10371 
10372 	time64_to_tm(now, 0, &tm);
10373 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10374 	if (rc)
10375 		return rc;
10376 
10377 	req->year = cpu_to_le16(1900 + tm.tm_year);
10378 	req->month = 1 + tm.tm_mon;
10379 	req->day = tm.tm_mday;
10380 	req->hour = tm.tm_hour;
10381 	req->minute = tm.tm_min;
10382 	req->second = tm.tm_sec;
10383 	return hwrm_req_send(bp, req);
10384 }
10385 
10386 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10387 {
10388 	u64 sw_tmp;
10389 
10390 	hw &= mask;
10391 	sw_tmp = (*sw & ~mask) | hw;
10392 	if (hw < (*sw & mask))
10393 		sw_tmp += mask + 1;
10394 	WRITE_ONCE(*sw, sw_tmp);
10395 }
10396 
10397 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10398 				    int count, bool ignore_zero)
10399 {
10400 	int i;
10401 
10402 	for (i = 0; i < count; i++) {
10403 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10404 
10405 		if (ignore_zero && !hw)
10406 			continue;
10407 
10408 		if (masks[i] == -1ULL)
10409 			sw_stats[i] = hw;
10410 		else
10411 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10412 	}
10413 }
10414 
10415 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10416 {
10417 	if (!stats->hw_stats)
10418 		return;
10419 
10420 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10421 				stats->hw_masks, stats->len / 8, false);
10422 }
10423 
10424 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10425 {
10426 	struct bnxt_stats_mem *ring0_stats;
10427 	bool ignore_zero = false;
10428 	int i;
10429 
10430 	/* Chip bug.  Counter intermittently becomes 0. */
10431 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10432 		ignore_zero = true;
10433 
10434 	for (i = 0; i < bp->cp_nr_rings; i++) {
10435 		struct bnxt_napi *bnapi = bp->bnapi[i];
10436 		struct bnxt_cp_ring_info *cpr;
10437 		struct bnxt_stats_mem *stats;
10438 
10439 		cpr = &bnapi->cp_ring;
10440 		stats = &cpr->stats;
10441 		if (!i)
10442 			ring0_stats = stats;
10443 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10444 					ring0_stats->hw_masks,
10445 					ring0_stats->len / 8, ignore_zero);
10446 	}
10447 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
10448 		struct bnxt_stats_mem *stats = &bp->port_stats;
10449 		__le64 *hw_stats = stats->hw_stats;
10450 		u64 *sw_stats = stats->sw_stats;
10451 		u64 *masks = stats->hw_masks;
10452 		int cnt;
10453 
10454 		cnt = sizeof(struct rx_port_stats) / 8;
10455 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10456 
10457 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10458 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10459 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10460 		cnt = sizeof(struct tx_port_stats) / 8;
10461 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10462 	}
10463 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10464 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10465 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10466 	}
10467 }
10468 
10469 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10470 {
10471 	struct hwrm_port_qstats_input *req;
10472 	struct bnxt_pf_info *pf = &bp->pf;
10473 	int rc;
10474 
10475 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10476 		return 0;
10477 
10478 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10479 		return -EOPNOTSUPP;
10480 
10481 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10482 	if (rc)
10483 		return rc;
10484 
10485 	req->flags = flags;
10486 	req->port_id = cpu_to_le16(pf->port_id);
10487 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10488 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
10489 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10490 	return hwrm_req_send(bp, req);
10491 }
10492 
10493 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10494 {
10495 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10496 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10497 	struct hwrm_port_qstats_ext_output *resp_qs;
10498 	struct hwrm_port_qstats_ext_input *req_qs;
10499 	struct bnxt_pf_info *pf = &bp->pf;
10500 	u32 tx_stat_size;
10501 	int rc;
10502 
10503 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10504 		return 0;
10505 
10506 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10507 		return -EOPNOTSUPP;
10508 
10509 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10510 	if (rc)
10511 		return rc;
10512 
10513 	req_qs->flags = flags;
10514 	req_qs->port_id = cpu_to_le16(pf->port_id);
10515 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10516 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10517 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10518 		       sizeof(struct tx_port_stats_ext) : 0;
10519 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10520 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10521 	resp_qs = hwrm_req_hold(bp, req_qs);
10522 	rc = hwrm_req_send(bp, req_qs);
10523 	if (!rc) {
10524 		bp->fw_rx_stats_ext_size =
10525 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
10526 		if (BNXT_FW_MAJ(bp) < 220 &&
10527 		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10528 			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10529 
10530 		bp->fw_tx_stats_ext_size = tx_stat_size ?
10531 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10532 	} else {
10533 		bp->fw_rx_stats_ext_size = 0;
10534 		bp->fw_tx_stats_ext_size = 0;
10535 	}
10536 	hwrm_req_drop(bp, req_qs);
10537 
10538 	if (flags)
10539 		return rc;
10540 
10541 	if (bp->fw_tx_stats_ext_size <=
10542 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10543 		bp->pri2cos_valid = 0;
10544 		return rc;
10545 	}
10546 
10547 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10548 	if (rc)
10549 		return rc;
10550 
10551 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10552 
10553 	resp_qc = hwrm_req_hold(bp, req_qc);
10554 	rc = hwrm_req_send(bp, req_qc);
10555 	if (!rc) {
10556 		u8 *pri2cos;
10557 		int i, j;
10558 
10559 		pri2cos = &resp_qc->pri0_cos_queue_id;
10560 		for (i = 0; i < 8; i++) {
10561 			u8 queue_id = pri2cos[i];
10562 			u8 queue_idx;
10563 
10564 			/* Per port queue IDs start from 0, 10, 20, etc */
10565 			queue_idx = queue_id % 10;
10566 			if (queue_idx > BNXT_MAX_QUEUE) {
10567 				bp->pri2cos_valid = false;
10568 				hwrm_req_drop(bp, req_qc);
10569 				return rc;
10570 			}
10571 			for (j = 0; j < bp->max_q; j++) {
10572 				if (bp->q_ids[j] == queue_id)
10573 					bp->pri2cos_idx[i] = queue_idx;
10574 			}
10575 		}
10576 		bp->pri2cos_valid = true;
10577 	}
10578 	hwrm_req_drop(bp, req_qc);
10579 
10580 	return rc;
10581 }
10582 
10583 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10584 {
10585 	bnxt_hwrm_tunnel_dst_port_free(bp,
10586 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10587 	bnxt_hwrm_tunnel_dst_port_free(bp,
10588 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10589 }
10590 
10591 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10592 {
10593 	int rc, i;
10594 	u32 tpa_flags = 0;
10595 
10596 	if (set_tpa)
10597 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
10598 	else if (BNXT_NO_FW_ACCESS(bp))
10599 		return 0;
10600 	for (i = 0; i < bp->nr_vnics; i++) {
10601 		rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10602 		if (rc) {
10603 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10604 				   i, rc);
10605 			return rc;
10606 		}
10607 	}
10608 	return 0;
10609 }
10610 
10611 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10612 {
10613 	int i;
10614 
10615 	for (i = 0; i < bp->nr_vnics; i++)
10616 		bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10617 }
10618 
10619 static void bnxt_clear_vnic(struct bnxt *bp)
10620 {
10621 	if (!bp->vnic_info)
10622 		return;
10623 
10624 	bnxt_hwrm_clear_vnic_filter(bp);
10625 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10626 		/* clear all RSS setting before free vnic ctx */
10627 		bnxt_hwrm_clear_vnic_rss(bp);
10628 		bnxt_hwrm_vnic_ctx_free(bp);
10629 	}
10630 	/* before free the vnic, undo the vnic tpa settings */
10631 	if (bp->flags & BNXT_FLAG_TPA)
10632 		bnxt_set_tpa(bp, false);
10633 	bnxt_hwrm_vnic_free(bp);
10634 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10635 		bnxt_hwrm_vnic_ctx_free(bp);
10636 }
10637 
10638 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10639 				    bool irq_re_init)
10640 {
10641 	bnxt_clear_vnic(bp);
10642 	bnxt_hwrm_ring_free(bp, close_path);
10643 	bnxt_hwrm_ring_grp_free(bp);
10644 	if (irq_re_init) {
10645 		bnxt_hwrm_stat_ctx_free(bp);
10646 		bnxt_hwrm_free_tunnel_ports(bp);
10647 	}
10648 }
10649 
10650 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10651 {
10652 	struct hwrm_func_cfg_input *req;
10653 	u8 evb_mode;
10654 	int rc;
10655 
10656 	if (br_mode == BRIDGE_MODE_VEB)
10657 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10658 	else if (br_mode == BRIDGE_MODE_VEPA)
10659 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10660 	else
10661 		return -EINVAL;
10662 
10663 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10664 	if (rc)
10665 		return rc;
10666 
10667 	req->fid = cpu_to_le16(0xffff);
10668 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10669 	req->evb_mode = evb_mode;
10670 	return hwrm_req_send(bp, req);
10671 }
10672 
10673 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10674 {
10675 	struct hwrm_func_cfg_input *req;
10676 	int rc;
10677 
10678 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10679 		return 0;
10680 
10681 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10682 	if (rc)
10683 		return rc;
10684 
10685 	req->fid = cpu_to_le16(0xffff);
10686 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10687 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10688 	if (size == 128)
10689 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10690 
10691 	return hwrm_req_send(bp, req);
10692 }
10693 
10694 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10695 {
10696 	int rc;
10697 
10698 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10699 		goto skip_rss_ctx;
10700 
10701 	/* allocate context for vnic */
10702 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10703 	if (rc) {
10704 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10705 			   vnic->vnic_id, rc);
10706 		goto vnic_setup_err;
10707 	}
10708 	bp->rsscos_nr_ctxs++;
10709 
10710 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10711 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10712 		if (rc) {
10713 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10714 				   vnic->vnic_id, rc);
10715 			goto vnic_setup_err;
10716 		}
10717 		bp->rsscos_nr_ctxs++;
10718 	}
10719 
10720 skip_rss_ctx:
10721 	/* configure default vnic, ring grp */
10722 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10723 	if (rc) {
10724 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10725 			   vnic->vnic_id, rc);
10726 		goto vnic_setup_err;
10727 	}
10728 
10729 	/* Enable RSS hashing on vnic */
10730 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10731 	if (rc) {
10732 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10733 			   vnic->vnic_id, rc);
10734 		goto vnic_setup_err;
10735 	}
10736 
10737 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10738 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10739 		if (rc) {
10740 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10741 				   vnic->vnic_id, rc);
10742 		}
10743 	}
10744 
10745 vnic_setup_err:
10746 	return rc;
10747 }
10748 
10749 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10750 			  u8 valid)
10751 {
10752 	struct hwrm_vnic_update_input *req;
10753 	int rc;
10754 
10755 	rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10756 	if (rc)
10757 		return rc;
10758 
10759 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10760 
10761 	if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10762 		req->mru = cpu_to_le16(vnic->mru);
10763 
10764 	req->enables = cpu_to_le32(valid);
10765 
10766 	return hwrm_req_send(bp, req);
10767 }
10768 
10769 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10770 {
10771 	int rc;
10772 
10773 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10774 	if (rc) {
10775 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10776 			   vnic->vnic_id, rc);
10777 		return rc;
10778 	}
10779 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10780 	if (rc)
10781 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10782 			   vnic->vnic_id, rc);
10783 	return rc;
10784 }
10785 
10786 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10787 {
10788 	int rc, i, nr_ctxs;
10789 
10790 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10791 	for (i = 0; i < nr_ctxs; i++) {
10792 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10793 		if (rc) {
10794 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10795 				   vnic->vnic_id, i, rc);
10796 			break;
10797 		}
10798 		bp->rsscos_nr_ctxs++;
10799 	}
10800 	if (i < nr_ctxs)
10801 		return -ENOMEM;
10802 
10803 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10804 	if (rc)
10805 		return rc;
10806 
10807 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10808 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10809 		if (rc) {
10810 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10811 				   vnic->vnic_id, rc);
10812 		}
10813 	}
10814 	return rc;
10815 }
10816 
10817 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10818 {
10819 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10820 		return __bnxt_setup_vnic_p5(bp, vnic);
10821 	else
10822 		return __bnxt_setup_vnic(bp, vnic);
10823 }
10824 
10825 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10826 				     struct bnxt_vnic_info *vnic,
10827 				     u16 start_rx_ring_idx, int rx_rings)
10828 {
10829 	int rc;
10830 
10831 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10832 	if (rc) {
10833 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10834 			   vnic->vnic_id, rc);
10835 		return rc;
10836 	}
10837 	return bnxt_setup_vnic(bp, vnic);
10838 }
10839 
10840 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10841 {
10842 	struct bnxt_vnic_info *vnic;
10843 	int i, rc = 0;
10844 
10845 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10846 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10847 		return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10848 	}
10849 
10850 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10851 		return 0;
10852 
10853 	for (i = 0; i < bp->rx_nr_rings; i++) {
10854 		u16 vnic_id = i + 1;
10855 		u16 ring_id = i;
10856 
10857 		if (vnic_id >= bp->nr_vnics)
10858 			break;
10859 
10860 		vnic = &bp->vnic_info[vnic_id];
10861 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
10862 		if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10863 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10864 		if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10865 			break;
10866 	}
10867 	return rc;
10868 }
10869 
10870 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10871 			  bool all)
10872 {
10873 	struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10874 	struct bnxt_filter_base *usr_fltr, *tmp;
10875 	struct bnxt_ntuple_filter *ntp_fltr;
10876 	int i;
10877 
10878 	bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10879 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10880 		if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10881 			bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10882 	}
10883 	if (!all)
10884 		return;
10885 
10886 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10887 		if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10888 		    usr_fltr->fw_vnic_id == rss_ctx->index) {
10889 			ntp_fltr = container_of(usr_fltr,
10890 						struct bnxt_ntuple_filter,
10891 						base);
10892 			bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10893 			bnxt_del_ntp_filter(bp, ntp_fltr);
10894 			bnxt_del_one_usr_fltr(bp, usr_fltr);
10895 		}
10896 	}
10897 
10898 	if (vnic->rss_table)
10899 		dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10900 				  vnic->rss_table,
10901 				  vnic->rss_table_dma_addr);
10902 	bp->num_rss_ctx--;
10903 }
10904 
10905 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10906 				  int rxr_id)
10907 {
10908 	u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10909 	int i, vnic_rx;
10910 
10911 	/* Ntuple VNIC always has all the rx rings. Any change of ring id
10912 	 * must be updated because a future filter may use it.
10913 	 */
10914 	if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10915 		return true;
10916 
10917 	for (i = 0; i < tbl_size; i++) {
10918 		if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10919 			vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10920 		else
10921 			vnic_rx = bp->rss_indir_tbl[i];
10922 
10923 		if (rxr_id == vnic_rx)
10924 			return true;
10925 	}
10926 
10927 	return false;
10928 }
10929 
10930 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10931 				u16 mru, int rxr_id)
10932 {
10933 	int rc;
10934 
10935 	if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10936 		return 0;
10937 
10938 	if (mru) {
10939 		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10940 		if (rc) {
10941 			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10942 				   vnic->vnic_id, rc);
10943 			return rc;
10944 		}
10945 	}
10946 	vnic->mru = mru;
10947 	bnxt_hwrm_vnic_update(bp, vnic,
10948 			      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10949 
10950 	return 0;
10951 }
10952 
10953 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10954 {
10955 	struct ethtool_rxfh_context *ctx;
10956 	unsigned long context;
10957 	int rc;
10958 
10959 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10960 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10961 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10962 
10963 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10964 		if (rc)
10965 			return rc;
10966 	}
10967 
10968 	return 0;
10969 }
10970 
10971 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10972 {
10973 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10974 	struct ethtool_rxfh_context *ctx;
10975 	unsigned long context;
10976 
10977 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10978 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10979 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10980 
10981 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10982 		    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10983 		    __bnxt_setup_vnic_p5(bp, vnic)) {
10984 			netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10985 				   rss_ctx->index);
10986 			bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10987 			ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10988 		}
10989 	}
10990 }
10991 
10992 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10993 {
10994 	struct ethtool_rxfh_context *ctx;
10995 	unsigned long context;
10996 
10997 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10998 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10999 
11000 		bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11001 	}
11002 }
11003 
11004 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
11005 static bool bnxt_promisc_ok(struct bnxt *bp)
11006 {
11007 #ifdef CONFIG_BNXT_SRIOV
11008 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11009 		return false;
11010 #endif
11011 	return true;
11012 }
11013 
11014 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11015 {
11016 	struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11017 	unsigned int rc = 0;
11018 
11019 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11020 	if (rc) {
11021 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11022 			   rc);
11023 		return rc;
11024 	}
11025 
11026 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11027 	if (rc) {
11028 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11029 			   rc);
11030 		return rc;
11031 	}
11032 	return rc;
11033 }
11034 
11035 static int bnxt_cfg_rx_mode(struct bnxt *);
11036 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
11037 
11038 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11039 {
11040 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11041 	int rc = 0;
11042 	unsigned int rx_nr_rings = bp->rx_nr_rings;
11043 
11044 	if (irq_re_init) {
11045 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
11046 		if (rc) {
11047 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11048 				   rc);
11049 			goto err_out;
11050 		}
11051 	}
11052 
11053 	rc = bnxt_hwrm_ring_alloc(bp);
11054 	if (rc) {
11055 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11056 		goto err_out;
11057 	}
11058 
11059 	rc = bnxt_hwrm_ring_grp_alloc(bp);
11060 	if (rc) {
11061 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11062 		goto err_out;
11063 	}
11064 
11065 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11066 		rx_nr_rings--;
11067 
11068 	/* default vnic 0 */
11069 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11070 	if (rc) {
11071 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11072 		goto err_out;
11073 	}
11074 
11075 	if (BNXT_VF(bp))
11076 		bnxt_hwrm_func_qcfg(bp);
11077 
11078 	rc = bnxt_setup_vnic(bp, vnic);
11079 	if (rc)
11080 		goto err_out;
11081 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11082 		bnxt_hwrm_update_rss_hash_cfg(bp);
11083 
11084 	if (bp->flags & BNXT_FLAG_RFS) {
11085 		rc = bnxt_alloc_rfs_vnics(bp);
11086 		if (rc)
11087 			goto err_out;
11088 	}
11089 
11090 	if (bp->flags & BNXT_FLAG_TPA) {
11091 		rc = bnxt_set_tpa(bp, true);
11092 		if (rc)
11093 			goto err_out;
11094 	}
11095 
11096 	if (BNXT_VF(bp))
11097 		bnxt_update_vf_mac(bp);
11098 
11099 	/* Filter for default vnic 0 */
11100 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11101 	if (rc) {
11102 		if (BNXT_VF(bp) && rc == -ENODEV)
11103 			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11104 		else
11105 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11106 		goto err_out;
11107 	}
11108 	vnic->uc_filter_count = 1;
11109 
11110 	vnic->rx_mask = 0;
11111 	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11112 		goto skip_rx_mask;
11113 
11114 	if (bp->dev->flags & IFF_BROADCAST)
11115 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11116 
11117 	if (bp->dev->flags & IFF_PROMISC)
11118 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11119 
11120 	if (bp->dev->flags & IFF_ALLMULTI) {
11121 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11122 		vnic->mc_list_count = 0;
11123 	} else if (bp->dev->flags & IFF_MULTICAST) {
11124 		u32 mask = 0;
11125 
11126 		bnxt_mc_list_updated(bp, &mask);
11127 		vnic->rx_mask |= mask;
11128 	}
11129 
11130 	rc = bnxt_cfg_rx_mode(bp);
11131 	if (rc)
11132 		goto err_out;
11133 
11134 skip_rx_mask:
11135 	rc = bnxt_hwrm_set_coal(bp);
11136 	if (rc)
11137 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11138 				rc);
11139 
11140 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11141 		rc = bnxt_setup_nitroa0_vnic(bp);
11142 		if (rc)
11143 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11144 				   rc);
11145 	}
11146 
11147 	if (BNXT_VF(bp)) {
11148 		bnxt_hwrm_func_qcfg(bp);
11149 		netdev_update_features(bp->dev);
11150 	}
11151 
11152 	return 0;
11153 
11154 err_out:
11155 	bnxt_hwrm_resource_free(bp, 0, true);
11156 
11157 	return rc;
11158 }
11159 
11160 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11161 {
11162 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11163 	return 0;
11164 }
11165 
11166 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11167 {
11168 	bnxt_init_cp_rings(bp);
11169 	bnxt_init_rx_rings(bp);
11170 	bnxt_init_tx_rings(bp);
11171 	bnxt_init_ring_grps(bp, irq_re_init);
11172 	bnxt_init_vnics(bp);
11173 
11174 	return bnxt_init_chip(bp, irq_re_init);
11175 }
11176 
11177 static int bnxt_set_real_num_queues(struct bnxt *bp)
11178 {
11179 	int rc;
11180 	struct net_device *dev = bp->dev;
11181 
11182 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11183 					  bp->tx_nr_rings_xdp);
11184 	if (rc)
11185 		return rc;
11186 
11187 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11188 	if (rc)
11189 		return rc;
11190 
11191 #ifdef CONFIG_RFS_ACCEL
11192 	if (bp->flags & BNXT_FLAG_RFS)
11193 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11194 #endif
11195 
11196 	return rc;
11197 }
11198 
11199 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11200 			     bool shared)
11201 {
11202 	int _rx = *rx, _tx = *tx;
11203 
11204 	if (shared) {
11205 		*rx = min_t(int, _rx, max);
11206 		*tx = min_t(int, _tx, max);
11207 	} else {
11208 		if (max < 2)
11209 			return -ENOMEM;
11210 
11211 		while (_rx + _tx > max) {
11212 			if (_rx > _tx && _rx > 1)
11213 				_rx--;
11214 			else if (_tx > 1)
11215 				_tx--;
11216 		}
11217 		*rx = _rx;
11218 		*tx = _tx;
11219 	}
11220 	return 0;
11221 }
11222 
11223 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11224 {
11225 	return (tx - tx_xdp) / tx_sets + tx_xdp;
11226 }
11227 
11228 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11229 {
11230 	int tcs = bp->num_tc;
11231 
11232 	if (!tcs)
11233 		tcs = 1;
11234 	return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11235 }
11236 
11237 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11238 {
11239 	int tcs = bp->num_tc;
11240 
11241 	return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11242 	       bp->tx_nr_rings_xdp;
11243 }
11244 
11245 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11246 			   bool sh)
11247 {
11248 	int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11249 
11250 	if (tx_cp != *tx) {
11251 		int tx_saved = tx_cp, rc;
11252 
11253 		rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11254 		if (rc)
11255 			return rc;
11256 		if (tx_cp != tx_saved)
11257 			*tx = bnxt_num_cp_to_tx(bp, tx_cp);
11258 		return 0;
11259 	}
11260 	return __bnxt_trim_rings(bp, rx, tx, max, sh);
11261 }
11262 
11263 static void bnxt_setup_msix(struct bnxt *bp)
11264 {
11265 	const int len = sizeof(bp->irq_tbl[0].name);
11266 	struct net_device *dev = bp->dev;
11267 	int tcs, i;
11268 
11269 	tcs = bp->num_tc;
11270 	if (tcs) {
11271 		int i, off, count;
11272 
11273 		for (i = 0; i < tcs; i++) {
11274 			count = bp->tx_nr_rings_per_tc;
11275 			off = BNXT_TC_TO_RING_BASE(bp, i);
11276 			netdev_set_tc_queue(dev, i, count, off);
11277 		}
11278 	}
11279 
11280 	for (i = 0; i < bp->cp_nr_rings; i++) {
11281 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11282 		char *attr;
11283 
11284 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11285 			attr = "TxRx";
11286 		else if (i < bp->rx_nr_rings)
11287 			attr = "rx";
11288 		else
11289 			attr = "tx";
11290 
11291 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11292 			 attr, i);
11293 		bp->irq_tbl[map_idx].handler = bnxt_msix;
11294 	}
11295 }
11296 
11297 static int bnxt_init_int_mode(struct bnxt *bp);
11298 
11299 static int bnxt_change_msix(struct bnxt *bp, int total)
11300 {
11301 	struct msi_map map;
11302 	int i;
11303 
11304 	/* add MSIX to the end if needed */
11305 	for (i = bp->total_irqs; i < total; i++) {
11306 		map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11307 		if (map.index < 0)
11308 			return bp->total_irqs;
11309 		bp->irq_tbl[i].vector = map.virq;
11310 		bp->total_irqs++;
11311 	}
11312 
11313 	/* trim MSIX from the end if needed */
11314 	for (i = bp->total_irqs; i > total; i--) {
11315 		map.index = i - 1;
11316 		map.virq = bp->irq_tbl[i - 1].vector;
11317 		pci_msix_free_irq(bp->pdev, map);
11318 		bp->total_irqs--;
11319 	}
11320 	return bp->total_irqs;
11321 }
11322 
11323 static int bnxt_setup_int_mode(struct bnxt *bp)
11324 {
11325 	int rc;
11326 
11327 	if (!bp->irq_tbl) {
11328 		rc = bnxt_init_int_mode(bp);
11329 		if (rc || !bp->irq_tbl)
11330 			return rc ?: -ENODEV;
11331 	}
11332 
11333 	bnxt_setup_msix(bp);
11334 
11335 	rc = bnxt_set_real_num_queues(bp);
11336 	return rc;
11337 }
11338 
11339 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11340 {
11341 	return bp->hw_resc.max_rsscos_ctxs;
11342 }
11343 
11344 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11345 {
11346 	return bp->hw_resc.max_vnics;
11347 }
11348 
11349 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11350 {
11351 	return bp->hw_resc.max_stat_ctxs;
11352 }
11353 
11354 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11355 {
11356 	return bp->hw_resc.max_cp_rings;
11357 }
11358 
11359 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11360 {
11361 	unsigned int cp = bp->hw_resc.max_cp_rings;
11362 
11363 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11364 		cp -= bnxt_get_ulp_msix_num(bp);
11365 
11366 	return cp;
11367 }
11368 
11369 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11370 {
11371 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11372 
11373 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11374 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11375 
11376 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11377 }
11378 
11379 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11380 {
11381 	bp->hw_resc.max_irqs = max_irqs;
11382 }
11383 
11384 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11385 {
11386 	unsigned int cp;
11387 
11388 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
11389 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11390 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11391 	else
11392 		return cp - bp->cp_nr_rings;
11393 }
11394 
11395 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11396 {
11397 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11398 }
11399 
11400 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11401 {
11402 	int max_irq = bnxt_get_max_func_irqs(bp);
11403 	int total_req = bp->cp_nr_rings + num;
11404 
11405 	if (max_irq < total_req) {
11406 		num = max_irq - bp->cp_nr_rings;
11407 		if (num <= 0)
11408 			return 0;
11409 	}
11410 	return num;
11411 }
11412 
11413 static int bnxt_get_num_msix(struct bnxt *bp)
11414 {
11415 	if (!BNXT_NEW_RM(bp))
11416 		return bnxt_get_max_func_irqs(bp);
11417 
11418 	return bnxt_nq_rings_in_use(bp);
11419 }
11420 
11421 static int bnxt_init_int_mode(struct bnxt *bp)
11422 {
11423 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11424 
11425 	total_vecs = bnxt_get_num_msix(bp);
11426 	max = bnxt_get_max_func_irqs(bp);
11427 	if (total_vecs > max)
11428 		total_vecs = max;
11429 
11430 	if (!total_vecs)
11431 		return 0;
11432 
11433 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11434 		min = 2;
11435 
11436 	total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11437 					   PCI_IRQ_MSIX);
11438 	ulp_msix = bnxt_get_ulp_msix_num(bp);
11439 	if (total_vecs < 0 || total_vecs < ulp_msix) {
11440 		rc = -ENODEV;
11441 		goto msix_setup_exit;
11442 	}
11443 
11444 	tbl_size = total_vecs;
11445 	if (pci_msix_can_alloc_dyn(bp->pdev))
11446 		tbl_size = max;
11447 	bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11448 	if (bp->irq_tbl) {
11449 		for (i = 0; i < total_vecs; i++)
11450 			bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11451 
11452 		bp->total_irqs = total_vecs;
11453 		/* Trim rings based upon num of vectors allocated */
11454 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11455 				     total_vecs - ulp_msix, min == 1);
11456 		if (rc)
11457 			goto msix_setup_exit;
11458 
11459 		tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11460 		bp->cp_nr_rings = (min == 1) ?
11461 				  max_t(int, tx_cp, bp->rx_nr_rings) :
11462 				  tx_cp + bp->rx_nr_rings;
11463 
11464 	} else {
11465 		rc = -ENOMEM;
11466 		goto msix_setup_exit;
11467 	}
11468 	return 0;
11469 
11470 msix_setup_exit:
11471 	netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11472 	kfree(bp->irq_tbl);
11473 	bp->irq_tbl = NULL;
11474 	pci_free_irq_vectors(bp->pdev);
11475 	return rc;
11476 }
11477 
11478 static void bnxt_clear_int_mode(struct bnxt *bp)
11479 {
11480 	pci_free_irq_vectors(bp->pdev);
11481 
11482 	kfree(bp->irq_tbl);
11483 	bp->irq_tbl = NULL;
11484 }
11485 
11486 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11487 {
11488 	bool irq_cleared = false;
11489 	bool irq_change = false;
11490 	int tcs = bp->num_tc;
11491 	int irqs_required;
11492 	int rc;
11493 
11494 	if (!bnxt_need_reserve_rings(bp))
11495 		return 0;
11496 
11497 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11498 		int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11499 
11500 		if (ulp_msix > bp->ulp_num_msix_want)
11501 			ulp_msix = bp->ulp_num_msix_want;
11502 		irqs_required = ulp_msix + bp->cp_nr_rings;
11503 	} else {
11504 		irqs_required = bnxt_get_num_msix(bp);
11505 	}
11506 
11507 	if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11508 		irq_change = true;
11509 		if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11510 			bnxt_ulp_irq_stop(bp);
11511 			bnxt_clear_int_mode(bp);
11512 			irq_cleared = true;
11513 		}
11514 	}
11515 	rc = __bnxt_reserve_rings(bp);
11516 	if (irq_cleared) {
11517 		if (!rc)
11518 			rc = bnxt_init_int_mode(bp);
11519 		bnxt_ulp_irq_restart(bp, rc);
11520 	} else if (irq_change && !rc) {
11521 		if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11522 			rc = -ENOSPC;
11523 	}
11524 	if (rc) {
11525 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11526 		return rc;
11527 	}
11528 	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11529 		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11530 		netdev_err(bp->dev, "tx ring reservation failure\n");
11531 		netdev_reset_tc(bp->dev);
11532 		bp->num_tc = 0;
11533 		if (bp->tx_nr_rings_xdp)
11534 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11535 		else
11536 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11537 		return -ENOMEM;
11538 	}
11539 	return 0;
11540 }
11541 
11542 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11543 {
11544 	struct bnxt_tx_ring_info *txr;
11545 	struct netdev_queue *txq;
11546 	struct bnxt_napi *bnapi;
11547 	int i;
11548 
11549 	bnapi = bp->bnapi[idx];
11550 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11551 		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11552 		synchronize_net();
11553 
11554 		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11555 			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11556 			if (txq) {
11557 				__netif_tx_lock_bh(txq);
11558 				netif_tx_stop_queue(txq);
11559 				__netif_tx_unlock_bh(txq);
11560 			}
11561 		}
11562 
11563 		if (!bp->tph_mode)
11564 			continue;
11565 
11566 		bnxt_hwrm_tx_ring_free(bp, txr, true);
11567 		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11568 		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11569 		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11570 	}
11571 }
11572 
11573 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11574 {
11575 	struct bnxt_tx_ring_info *txr;
11576 	struct netdev_queue *txq;
11577 	struct bnxt_napi *bnapi;
11578 	int rc, i;
11579 
11580 	bnapi = bp->bnapi[idx];
11581 	/* All rings have been reserved and previously allocated.
11582 	 * Reallocating with the same parameters should never fail.
11583 	 */
11584 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11585 		if (!bp->tph_mode)
11586 			goto start_tx;
11587 
11588 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11589 		if (rc)
11590 			return rc;
11591 
11592 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11593 		if (rc)
11594 			return rc;
11595 
11596 		txr->tx_prod = 0;
11597 		txr->tx_cons = 0;
11598 		txr->tx_hw_cons = 0;
11599 start_tx:
11600 		WRITE_ONCE(txr->dev_state, 0);
11601 		synchronize_net();
11602 
11603 		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11604 			continue;
11605 
11606 		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11607 		if (txq)
11608 			netif_tx_start_queue(txq);
11609 	}
11610 
11611 	return 0;
11612 }
11613 
11614 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11615 				     const cpumask_t *mask)
11616 {
11617 	struct bnxt_irq *irq;
11618 	u16 tag;
11619 	int err;
11620 
11621 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11622 
11623 	if (!irq->bp->tph_mode)
11624 		return;
11625 
11626 	cpumask_copy(irq->cpu_mask, mask);
11627 
11628 	if (irq->ring_nr >= irq->bp->rx_nr_rings)
11629 		return;
11630 
11631 	if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11632 				cpumask_first(irq->cpu_mask), &tag))
11633 		return;
11634 
11635 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11636 		return;
11637 
11638 	netdev_lock(irq->bp->dev);
11639 	if (netif_running(irq->bp->dev)) {
11640 		err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11641 		if (err)
11642 			netdev_err(irq->bp->dev,
11643 				   "RX queue restart failed: err=%d\n", err);
11644 	}
11645 	netdev_unlock(irq->bp->dev);
11646 }
11647 
11648 static void bnxt_irq_affinity_release(struct kref *ref)
11649 {
11650 	struct irq_affinity_notify *notify =
11651 		container_of(ref, struct irq_affinity_notify, kref);
11652 	struct bnxt_irq *irq;
11653 
11654 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11655 
11656 	if (!irq->bp->tph_mode)
11657 		return;
11658 
11659 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11660 		netdev_err(irq->bp->dev,
11661 			   "Setting ST=0 for MSIX entry %d failed\n",
11662 			   irq->msix_nr);
11663 		return;
11664 	}
11665 }
11666 
11667 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11668 {
11669 	irq_set_affinity_notifier(irq->vector, NULL);
11670 }
11671 
11672 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11673 {
11674 	struct irq_affinity_notify *notify;
11675 
11676 	irq->bp = bp;
11677 
11678 	/* Nothing to do if TPH is not enabled */
11679 	if (!bp->tph_mode)
11680 		return;
11681 
11682 	/* Register IRQ affinity notifier */
11683 	notify = &irq->affinity_notify;
11684 	notify->irq = irq->vector;
11685 	notify->notify = bnxt_irq_affinity_notify;
11686 	notify->release = bnxt_irq_affinity_release;
11687 
11688 	irq_set_affinity_notifier(irq->vector, notify);
11689 }
11690 
11691 static void bnxt_free_irq(struct bnxt *bp)
11692 {
11693 	struct bnxt_irq *irq;
11694 	int i;
11695 
11696 #ifdef CONFIG_RFS_ACCEL
11697 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11698 	bp->dev->rx_cpu_rmap = NULL;
11699 #endif
11700 	if (!bp->irq_tbl || !bp->bnapi)
11701 		return;
11702 
11703 	for (i = 0; i < bp->cp_nr_rings; i++) {
11704 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11705 
11706 		irq = &bp->irq_tbl[map_idx];
11707 		if (irq->requested) {
11708 			if (irq->have_cpumask) {
11709 				irq_update_affinity_hint(irq->vector, NULL);
11710 				free_cpumask_var(irq->cpu_mask);
11711 				irq->have_cpumask = 0;
11712 			}
11713 
11714 			bnxt_release_irq_notifier(irq);
11715 
11716 			free_irq(irq->vector, bp->bnapi[i]);
11717 		}
11718 
11719 		irq->requested = 0;
11720 	}
11721 
11722 	/* Disable TPH support */
11723 	pcie_disable_tph(bp->pdev);
11724 	bp->tph_mode = 0;
11725 }
11726 
11727 static int bnxt_request_irq(struct bnxt *bp)
11728 {
11729 	struct cpu_rmap *rmap = NULL;
11730 	int i, j, rc = 0;
11731 	unsigned long flags = 0;
11732 
11733 	rc = bnxt_setup_int_mode(bp);
11734 	if (rc) {
11735 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11736 			   rc);
11737 		return rc;
11738 	}
11739 #ifdef CONFIG_RFS_ACCEL
11740 	rmap = bp->dev->rx_cpu_rmap;
11741 #endif
11742 
11743 	/* Enable TPH support as part of IRQ request */
11744 	rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11745 	if (!rc)
11746 		bp->tph_mode = PCI_TPH_ST_IV_MODE;
11747 
11748 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11749 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11750 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11751 
11752 		if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11753 		    rmap && bp->bnapi[i]->rx_ring) {
11754 			rc = irq_cpu_rmap_add(rmap, irq->vector);
11755 			if (rc)
11756 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11757 					    j);
11758 			j++;
11759 		}
11760 
11761 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11762 				 bp->bnapi[i]);
11763 		if (rc)
11764 			break;
11765 
11766 		netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11767 		irq->requested = 1;
11768 
11769 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11770 			int numa_node = dev_to_node(&bp->pdev->dev);
11771 			u16 tag;
11772 
11773 			irq->have_cpumask = 1;
11774 			irq->msix_nr = map_idx;
11775 			irq->ring_nr = i;
11776 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11777 					irq->cpu_mask);
11778 			rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11779 			if (rc) {
11780 				netdev_warn(bp->dev,
11781 					    "Update affinity hint failed, IRQ = %d\n",
11782 					    irq->vector);
11783 				break;
11784 			}
11785 
11786 			bnxt_register_irq_notifier(bp, irq);
11787 
11788 			/* Init ST table entry */
11789 			if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11790 						cpumask_first(irq->cpu_mask),
11791 						&tag))
11792 				continue;
11793 
11794 			pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11795 		}
11796 	}
11797 	return rc;
11798 }
11799 
11800 static void bnxt_del_napi(struct bnxt *bp)
11801 {
11802 	int i;
11803 
11804 	if (!bp->bnapi)
11805 		return;
11806 
11807 	for (i = 0; i < bp->rx_nr_rings; i++)
11808 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11809 	for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11810 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11811 
11812 	for (i = 0; i < bp->cp_nr_rings; i++) {
11813 		struct bnxt_napi *bnapi = bp->bnapi[i];
11814 
11815 		__netif_napi_del_locked(&bnapi->napi);
11816 	}
11817 	/* We called __netif_napi_del_locked(), we need
11818 	 * to respect an RCU grace period before freeing napi structures.
11819 	 */
11820 	synchronize_net();
11821 }
11822 
11823 static void bnxt_init_napi(struct bnxt *bp)
11824 {
11825 	int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11826 	unsigned int cp_nr_rings = bp->cp_nr_rings;
11827 	struct bnxt_napi *bnapi;
11828 	int i;
11829 
11830 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11831 		poll_fn = bnxt_poll_p5;
11832 	else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11833 		cp_nr_rings--;
11834 
11835 	set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11836 
11837 	for (i = 0; i < cp_nr_rings; i++) {
11838 		bnapi = bp->bnapi[i];
11839 		netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11840 					     bnapi->index);
11841 	}
11842 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11843 		bnapi = bp->bnapi[cp_nr_rings];
11844 		netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11845 	}
11846 }
11847 
11848 static void bnxt_disable_napi(struct bnxt *bp)
11849 {
11850 	int i;
11851 
11852 	if (!bp->bnapi ||
11853 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11854 		return;
11855 
11856 	for (i = 0; i < bp->cp_nr_rings; i++) {
11857 		struct bnxt_napi *bnapi = bp->bnapi[i];
11858 		struct bnxt_cp_ring_info *cpr;
11859 
11860 		cpr = &bnapi->cp_ring;
11861 		if (bnapi->tx_fault)
11862 			cpr->sw_stats->tx.tx_resets++;
11863 		if (bnapi->in_reset)
11864 			cpr->sw_stats->rx.rx_resets++;
11865 		napi_disable_locked(&bnapi->napi);
11866 	}
11867 }
11868 
11869 static void bnxt_enable_napi(struct bnxt *bp)
11870 {
11871 	int i;
11872 
11873 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11874 	for (i = 0; i < bp->cp_nr_rings; i++) {
11875 		struct bnxt_napi *bnapi = bp->bnapi[i];
11876 		struct bnxt_cp_ring_info *cpr;
11877 
11878 		bnapi->tx_fault = 0;
11879 
11880 		cpr = &bnapi->cp_ring;
11881 		bnapi->in_reset = false;
11882 
11883 		if (bnapi->rx_ring) {
11884 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11885 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11886 		}
11887 		napi_enable_locked(&bnapi->napi);
11888 	}
11889 }
11890 
11891 void bnxt_tx_disable(struct bnxt *bp)
11892 {
11893 	int i;
11894 	struct bnxt_tx_ring_info *txr;
11895 
11896 	if (bp->tx_ring) {
11897 		for (i = 0; i < bp->tx_nr_rings; i++) {
11898 			txr = &bp->tx_ring[i];
11899 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11900 		}
11901 	}
11902 	/* Make sure napi polls see @dev_state change */
11903 	synchronize_net();
11904 	/* Drop carrier first to prevent TX timeout */
11905 	netif_carrier_off(bp->dev);
11906 	/* Stop all TX queues */
11907 	netif_tx_disable(bp->dev);
11908 }
11909 
11910 void bnxt_tx_enable(struct bnxt *bp)
11911 {
11912 	int i;
11913 	struct bnxt_tx_ring_info *txr;
11914 
11915 	for (i = 0; i < bp->tx_nr_rings; i++) {
11916 		txr = &bp->tx_ring[i];
11917 		WRITE_ONCE(txr->dev_state, 0);
11918 	}
11919 	/* Make sure napi polls see @dev_state change */
11920 	synchronize_net();
11921 	netif_tx_wake_all_queues(bp->dev);
11922 	if (BNXT_LINK_IS_UP(bp))
11923 		netif_carrier_on(bp->dev);
11924 }
11925 
11926 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11927 {
11928 	u8 active_fec = link_info->active_fec_sig_mode &
11929 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11930 
11931 	switch (active_fec) {
11932 	default:
11933 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11934 		return "None";
11935 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11936 		return "Clause 74 BaseR";
11937 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11938 		return "Clause 91 RS(528,514)";
11939 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11940 		return "Clause 91 RS544_1XN";
11941 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11942 		return "Clause 91 RS(544,514)";
11943 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11944 		return "Clause 91 RS272_1XN";
11945 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11946 		return "Clause 91 RS(272,257)";
11947 	}
11948 }
11949 
11950 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
11951 {
11952 	u8 reason = link_info->link_down_reason;
11953 
11954 	/* Multiple bits can be set, we report 1 bit only in order of
11955 	 * priority.
11956 	 */
11957 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
11958 		return "(Remote fault)";
11959 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
11960 		return "(OTP Speed limit violation)";
11961 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
11962 		return "(Cable removed)";
11963 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
11964 		return "(Module fault)";
11965 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
11966 		return "(BMC request down)";
11967 	return "";
11968 }
11969 
11970 void bnxt_report_link(struct bnxt *bp)
11971 {
11972 	if (BNXT_LINK_IS_UP(bp)) {
11973 		const char *signal = "";
11974 		const char *flow_ctrl;
11975 		const char *duplex;
11976 		u32 speed;
11977 		u16 fec;
11978 
11979 		netif_carrier_on(bp->dev);
11980 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11981 		if (speed == SPEED_UNKNOWN) {
11982 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11983 			return;
11984 		}
11985 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11986 			duplex = "full";
11987 		else
11988 			duplex = "half";
11989 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11990 			flow_ctrl = "ON - receive & transmit";
11991 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11992 			flow_ctrl = "ON - transmit";
11993 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11994 			flow_ctrl = "ON - receive";
11995 		else
11996 			flow_ctrl = "none";
11997 		if (bp->link_info.phy_qcfg_resp.option_flags &
11998 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11999 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
12000 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12001 			switch (sig_mode) {
12002 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12003 				signal = "(NRZ) ";
12004 				break;
12005 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12006 				signal = "(PAM4 56Gbps) ";
12007 				break;
12008 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12009 				signal = "(PAM4 112Gbps) ";
12010 				break;
12011 			default:
12012 				break;
12013 			}
12014 		}
12015 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12016 			    speed, signal, duplex, flow_ctrl);
12017 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12018 			netdev_info(bp->dev, "EEE is %s\n",
12019 				    bp->eee.eee_active ? "active" :
12020 							 "not active");
12021 		fec = bp->link_info.fec_cfg;
12022 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12023 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12024 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12025 				    bnxt_report_fec(&bp->link_info));
12026 	} else {
12027 		char *str = bnxt_link_down_reason(&bp->link_info);
12028 
12029 		netif_carrier_off(bp->dev);
12030 		netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12031 	}
12032 }
12033 
12034 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12035 {
12036 	if (!resp->supported_speeds_auto_mode &&
12037 	    !resp->supported_speeds_force_mode &&
12038 	    !resp->supported_pam4_speeds_auto_mode &&
12039 	    !resp->supported_pam4_speeds_force_mode &&
12040 	    !resp->supported_speeds2_auto_mode &&
12041 	    !resp->supported_speeds2_force_mode)
12042 		return true;
12043 	return false;
12044 }
12045 
12046 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12047 {
12048 	struct bnxt_link_info *link_info = &bp->link_info;
12049 	struct hwrm_port_phy_qcaps_output *resp;
12050 	struct hwrm_port_phy_qcaps_input *req;
12051 	int rc = 0;
12052 
12053 	if (bp->hwrm_spec_code < 0x10201)
12054 		return 0;
12055 
12056 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12057 	if (rc)
12058 		return rc;
12059 
12060 	resp = hwrm_req_hold(bp, req);
12061 	rc = hwrm_req_send(bp, req);
12062 	if (rc)
12063 		goto hwrm_phy_qcaps_exit;
12064 
12065 	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12066 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12067 		struct ethtool_keee *eee = &bp->eee;
12068 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12069 
12070 		_bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12071 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12072 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12073 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12074 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12075 	}
12076 
12077 	if (bp->hwrm_spec_code >= 0x10a01) {
12078 		if (bnxt_phy_qcaps_no_speed(resp)) {
12079 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12080 			netdev_warn(bp->dev, "Ethernet link disabled\n");
12081 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12082 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12083 			netdev_info(bp->dev, "Ethernet link enabled\n");
12084 			/* Phy re-enabled, reprobe the speeds */
12085 			link_info->support_auto_speeds = 0;
12086 			link_info->support_pam4_auto_speeds = 0;
12087 			link_info->support_auto_speeds2 = 0;
12088 		}
12089 	}
12090 	if (resp->supported_speeds_auto_mode)
12091 		link_info->support_auto_speeds =
12092 			le16_to_cpu(resp->supported_speeds_auto_mode);
12093 	if (resp->supported_pam4_speeds_auto_mode)
12094 		link_info->support_pam4_auto_speeds =
12095 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12096 	if (resp->supported_speeds2_auto_mode)
12097 		link_info->support_auto_speeds2 =
12098 			le16_to_cpu(resp->supported_speeds2_auto_mode);
12099 
12100 	bp->port_count = resp->port_cnt;
12101 
12102 hwrm_phy_qcaps_exit:
12103 	hwrm_req_drop(bp, req);
12104 	return rc;
12105 }
12106 
12107 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12108 {
12109 	struct hwrm_port_mac_qcaps_output *resp;
12110 	struct hwrm_port_mac_qcaps_input *req;
12111 	int rc;
12112 
12113 	if (bp->hwrm_spec_code < 0x10a03)
12114 		return;
12115 
12116 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12117 	if (rc)
12118 		return;
12119 
12120 	resp = hwrm_req_hold(bp, req);
12121 	rc = hwrm_req_send_silent(bp, req);
12122 	if (!rc)
12123 		bp->mac_flags = resp->flags;
12124 	hwrm_req_drop(bp, req);
12125 }
12126 
12127 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12128 {
12129 	u16 diff = advertising ^ supported;
12130 
12131 	return ((supported | diff) != supported);
12132 }
12133 
12134 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12135 {
12136 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12137 
12138 	/* Check if any advertised speeds are no longer supported. The caller
12139 	 * holds the link_lock mutex, so we can modify link_info settings.
12140 	 */
12141 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12142 		if (bnxt_support_dropped(link_info->advertising,
12143 					 link_info->support_auto_speeds2)) {
12144 			link_info->advertising = link_info->support_auto_speeds2;
12145 			return true;
12146 		}
12147 		return false;
12148 	}
12149 	if (bnxt_support_dropped(link_info->advertising,
12150 				 link_info->support_auto_speeds)) {
12151 		link_info->advertising = link_info->support_auto_speeds;
12152 		return true;
12153 	}
12154 	if (bnxt_support_dropped(link_info->advertising_pam4,
12155 				 link_info->support_pam4_auto_speeds)) {
12156 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12157 		return true;
12158 	}
12159 	return false;
12160 }
12161 
12162 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12163 {
12164 	struct bnxt_link_info *link_info = &bp->link_info;
12165 	struct hwrm_port_phy_qcfg_output *resp;
12166 	struct hwrm_port_phy_qcfg_input *req;
12167 	u8 link_state = link_info->link_state;
12168 	bool support_changed;
12169 	int rc;
12170 
12171 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12172 	if (rc)
12173 		return rc;
12174 
12175 	resp = hwrm_req_hold(bp, req);
12176 	rc = hwrm_req_send(bp, req);
12177 	if (rc) {
12178 		hwrm_req_drop(bp, req);
12179 		if (BNXT_VF(bp) && rc == -ENODEV) {
12180 			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12181 			rc = 0;
12182 		}
12183 		return rc;
12184 	}
12185 
12186 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12187 	link_info->phy_link_status = resp->link;
12188 	link_info->duplex = resp->duplex_cfg;
12189 	if (bp->hwrm_spec_code >= 0x10800)
12190 		link_info->duplex = resp->duplex_state;
12191 	link_info->pause = resp->pause;
12192 	link_info->auto_mode = resp->auto_mode;
12193 	link_info->auto_pause_setting = resp->auto_pause;
12194 	link_info->lp_pause = resp->link_partner_adv_pause;
12195 	link_info->force_pause_setting = resp->force_pause;
12196 	link_info->duplex_setting = resp->duplex_cfg;
12197 	if (link_info->phy_link_status == BNXT_LINK_LINK) {
12198 		link_info->link_speed = le16_to_cpu(resp->link_speed);
12199 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12200 			link_info->active_lanes = resp->active_lanes;
12201 	} else {
12202 		link_info->link_speed = 0;
12203 		link_info->active_lanes = 0;
12204 	}
12205 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12206 	link_info->force_pam4_link_speed =
12207 		le16_to_cpu(resp->force_pam4_link_speed);
12208 	link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12209 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12210 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12211 	link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12212 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12213 	link_info->auto_pam4_link_speeds =
12214 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
12215 	link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12216 	link_info->lp_auto_link_speeds =
12217 		le16_to_cpu(resp->link_partner_adv_speeds);
12218 	link_info->lp_auto_pam4_link_speeds =
12219 		resp->link_partner_pam4_adv_speeds;
12220 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12221 	link_info->phy_ver[0] = resp->phy_maj;
12222 	link_info->phy_ver[1] = resp->phy_min;
12223 	link_info->phy_ver[2] = resp->phy_bld;
12224 	link_info->media_type = resp->media_type;
12225 	link_info->phy_type = resp->phy_type;
12226 	link_info->transceiver = resp->xcvr_pkg_type;
12227 	link_info->phy_addr = resp->eee_config_phy_addr &
12228 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12229 	link_info->module_status = resp->module_status;
12230 	link_info->link_down_reason = resp->link_down_reason;
12231 
12232 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12233 		struct ethtool_keee *eee = &bp->eee;
12234 		u16 fw_speeds;
12235 
12236 		eee->eee_active = 0;
12237 		if (resp->eee_config_phy_addr &
12238 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12239 			eee->eee_active = 1;
12240 			fw_speeds = le16_to_cpu(
12241 				resp->link_partner_adv_eee_link_speed_mask);
12242 			_bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12243 		}
12244 
12245 		/* Pull initial EEE config */
12246 		if (!chng_link_state) {
12247 			if (resp->eee_config_phy_addr &
12248 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12249 				eee->eee_enabled = 1;
12250 
12251 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12252 			_bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12253 
12254 			if (resp->eee_config_phy_addr &
12255 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12256 				__le32 tmr;
12257 
12258 				eee->tx_lpi_enabled = 1;
12259 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12260 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
12261 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12262 			}
12263 		}
12264 	}
12265 
12266 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12267 	if (bp->hwrm_spec_code >= 0x10504) {
12268 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12269 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12270 	}
12271 	/* TODO: need to add more logic to report VF link */
12272 	if (chng_link_state) {
12273 		if (link_info->phy_link_status == BNXT_LINK_LINK)
12274 			link_info->link_state = BNXT_LINK_STATE_UP;
12275 		else
12276 			link_info->link_state = BNXT_LINK_STATE_DOWN;
12277 		if (link_state != link_info->link_state)
12278 			bnxt_report_link(bp);
12279 	} else {
12280 		/* always link down if not require to update link state */
12281 		link_info->link_state = BNXT_LINK_STATE_DOWN;
12282 	}
12283 	hwrm_req_drop(bp, req);
12284 
12285 	if (!BNXT_PHY_CFG_ABLE(bp))
12286 		return 0;
12287 
12288 	support_changed = bnxt_support_speed_dropped(link_info);
12289 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12290 		bnxt_hwrm_set_link_setting(bp, true, false);
12291 	return 0;
12292 }
12293 
12294 static void bnxt_get_port_module_status(struct bnxt *bp)
12295 {
12296 	struct bnxt_link_info *link_info = &bp->link_info;
12297 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12298 	u8 module_status;
12299 
12300 	if (bnxt_update_link(bp, true))
12301 		return;
12302 
12303 	module_status = link_info->module_status;
12304 	switch (module_status) {
12305 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12306 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12307 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12308 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12309 			    bp->pf.port_id);
12310 		if (bp->hwrm_spec_code >= 0x10201) {
12311 			netdev_warn(bp->dev, "Module part number %s\n",
12312 				    resp->phy_vendor_partnumber);
12313 		}
12314 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12315 			netdev_warn(bp->dev, "TX is disabled\n");
12316 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12317 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12318 	}
12319 }
12320 
12321 static void
12322 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12323 {
12324 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12325 		if (bp->hwrm_spec_code >= 0x10201)
12326 			req->auto_pause =
12327 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12328 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12329 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12330 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12331 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12332 		req->enables |=
12333 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12334 	} else {
12335 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12336 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12337 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12338 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12339 		req->enables |=
12340 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12341 		if (bp->hwrm_spec_code >= 0x10201) {
12342 			req->auto_pause = req->force_pause;
12343 			req->enables |= cpu_to_le32(
12344 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12345 		}
12346 	}
12347 }
12348 
12349 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12350 {
12351 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12352 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12353 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12354 			req->enables |=
12355 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12356 			req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12357 		} else if (bp->link_info.advertising) {
12358 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12359 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12360 		}
12361 		if (bp->link_info.advertising_pam4) {
12362 			req->enables |=
12363 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12364 			req->auto_link_pam4_speed_mask =
12365 				cpu_to_le16(bp->link_info.advertising_pam4);
12366 		}
12367 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12368 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12369 	} else {
12370 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12371 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12372 			req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12373 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12374 			netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12375 				   (u32)bp->link_info.req_link_speed);
12376 		} else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12377 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12378 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12379 		} else {
12380 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12381 		}
12382 	}
12383 
12384 	/* tell chimp that the setting takes effect immediately */
12385 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12386 }
12387 
12388 int bnxt_hwrm_set_pause(struct bnxt *bp)
12389 {
12390 	struct hwrm_port_phy_cfg_input *req;
12391 	int rc;
12392 
12393 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12394 	if (rc)
12395 		return rc;
12396 
12397 	bnxt_hwrm_set_pause_common(bp, req);
12398 
12399 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12400 	    bp->link_info.force_link_chng)
12401 		bnxt_hwrm_set_link_common(bp, req);
12402 
12403 	rc = hwrm_req_send(bp, req);
12404 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12405 		/* since changing of pause setting doesn't trigger any link
12406 		 * change event, the driver needs to update the current pause
12407 		 * result upon successfully return of the phy_cfg command
12408 		 */
12409 		bp->link_info.pause =
12410 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12411 		bp->link_info.auto_pause_setting = 0;
12412 		if (!bp->link_info.force_link_chng)
12413 			bnxt_report_link(bp);
12414 	}
12415 	bp->link_info.force_link_chng = false;
12416 	return rc;
12417 }
12418 
12419 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12420 			      struct hwrm_port_phy_cfg_input *req)
12421 {
12422 	struct ethtool_keee *eee = &bp->eee;
12423 
12424 	if (eee->eee_enabled) {
12425 		u16 eee_speeds;
12426 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12427 
12428 		if (eee->tx_lpi_enabled)
12429 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12430 		else
12431 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12432 
12433 		req->flags |= cpu_to_le32(flags);
12434 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12435 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12436 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12437 	} else {
12438 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12439 	}
12440 }
12441 
12442 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12443 {
12444 	struct hwrm_port_phy_cfg_input *req;
12445 	int rc;
12446 
12447 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12448 	if (rc)
12449 		return rc;
12450 
12451 	if (set_pause)
12452 		bnxt_hwrm_set_pause_common(bp, req);
12453 
12454 	bnxt_hwrm_set_link_common(bp, req);
12455 
12456 	if (set_eee)
12457 		bnxt_hwrm_set_eee(bp, req);
12458 	return hwrm_req_send(bp, req);
12459 }
12460 
12461 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12462 {
12463 	struct hwrm_port_phy_cfg_input *req;
12464 	int rc;
12465 
12466 	if (!BNXT_SINGLE_PF(bp))
12467 		return 0;
12468 
12469 	if (pci_num_vf(bp->pdev) &&
12470 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12471 		return 0;
12472 
12473 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12474 	if (rc)
12475 		return rc;
12476 
12477 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12478 	rc = hwrm_req_send(bp, req);
12479 	if (!rc) {
12480 		mutex_lock(&bp->link_lock);
12481 		/* Device is not obliged link down in certain scenarios, even
12482 		 * when forced. Setting the state unknown is consistent with
12483 		 * driver startup and will force link state to be reported
12484 		 * during subsequent open based on PORT_PHY_QCFG.
12485 		 */
12486 		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12487 		mutex_unlock(&bp->link_lock);
12488 	}
12489 	return rc;
12490 }
12491 
12492 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12493 {
12494 #ifdef CONFIG_TEE_BNXT_FW
12495 	int rc = tee_bnxt_fw_load();
12496 
12497 	if (rc)
12498 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12499 
12500 	return rc;
12501 #else
12502 	netdev_err(bp->dev, "OP-TEE not supported\n");
12503 	return -ENODEV;
12504 #endif
12505 }
12506 
12507 static int bnxt_try_recover_fw(struct bnxt *bp)
12508 {
12509 	if (bp->fw_health && bp->fw_health->status_reliable) {
12510 		int retry = 0, rc;
12511 		u32 sts;
12512 
12513 		do {
12514 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12515 			rc = bnxt_hwrm_poll(bp);
12516 			if (!BNXT_FW_IS_BOOTING(sts) &&
12517 			    !BNXT_FW_IS_RECOVERING(sts))
12518 				break;
12519 			retry++;
12520 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12521 
12522 		if (!BNXT_FW_IS_HEALTHY(sts)) {
12523 			netdev_err(bp->dev,
12524 				   "Firmware not responding, status: 0x%x\n",
12525 				   sts);
12526 			rc = -ENODEV;
12527 		}
12528 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12529 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12530 			return bnxt_fw_reset_via_optee(bp);
12531 		}
12532 		return rc;
12533 	}
12534 
12535 	return -ENODEV;
12536 }
12537 
12538 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12539 {
12540 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12541 
12542 	if (!BNXT_NEW_RM(bp))
12543 		return; /* no resource reservations required */
12544 
12545 	hw_resc->resv_cp_rings = 0;
12546 	hw_resc->resv_stat_ctxs = 0;
12547 	hw_resc->resv_irqs = 0;
12548 	hw_resc->resv_tx_rings = 0;
12549 	hw_resc->resv_rx_rings = 0;
12550 	hw_resc->resv_hw_ring_grps = 0;
12551 	hw_resc->resv_vnics = 0;
12552 	hw_resc->resv_rsscos_ctxs = 0;
12553 	if (!fw_reset) {
12554 		bp->tx_nr_rings = 0;
12555 		bp->rx_nr_rings = 0;
12556 	}
12557 }
12558 
12559 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12560 {
12561 	int rc;
12562 
12563 	if (!BNXT_NEW_RM(bp))
12564 		return 0; /* no resource reservations required */
12565 
12566 	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12567 	if (rc)
12568 		netdev_err(bp->dev, "resc_qcaps failed\n");
12569 
12570 	bnxt_clear_reservations(bp, fw_reset);
12571 
12572 	return rc;
12573 }
12574 
12575 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12576 {
12577 	struct hwrm_func_drv_if_change_output *resp;
12578 	struct hwrm_func_drv_if_change_input *req;
12579 	bool resc_reinit = false;
12580 	bool caps_change = false;
12581 	int rc, retry = 0;
12582 	bool fw_reset;
12583 	u32 flags = 0;
12584 
12585 	fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12586 	bp->fw_reset_state = 0;
12587 
12588 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12589 		return 0;
12590 
12591 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12592 	if (rc)
12593 		return rc;
12594 
12595 	if (up)
12596 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12597 	resp = hwrm_req_hold(bp, req);
12598 
12599 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12600 	while (retry < BNXT_FW_IF_RETRY) {
12601 		rc = hwrm_req_send(bp, req);
12602 		if (rc != -EAGAIN)
12603 			break;
12604 
12605 		msleep(50);
12606 		retry++;
12607 	}
12608 
12609 	if (rc == -EAGAIN) {
12610 		hwrm_req_drop(bp, req);
12611 		return rc;
12612 	} else if (!rc) {
12613 		flags = le32_to_cpu(resp->flags);
12614 	} else if (up) {
12615 		rc = bnxt_try_recover_fw(bp);
12616 		fw_reset = true;
12617 	}
12618 	hwrm_req_drop(bp, req);
12619 	if (rc)
12620 		return rc;
12621 
12622 	if (!up) {
12623 		bnxt_inv_fw_health_reg(bp);
12624 		return 0;
12625 	}
12626 
12627 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12628 		resc_reinit = true;
12629 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12630 	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12631 		fw_reset = true;
12632 	else
12633 		bnxt_remap_fw_health_regs(bp);
12634 
12635 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12636 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12637 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12638 		return -ENODEV;
12639 	}
12640 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12641 		caps_change = true;
12642 
12643 	if (resc_reinit || fw_reset || caps_change) {
12644 		if (fw_reset || caps_change) {
12645 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12646 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12647 				bnxt_ulp_irq_stop(bp);
12648 			bnxt_free_ctx_mem(bp, false);
12649 			bnxt_dcb_free(bp);
12650 			rc = bnxt_fw_init_one(bp);
12651 			if (rc) {
12652 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12653 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12654 				return rc;
12655 			}
12656 			/* IRQ will be initialized later in bnxt_request_irq()*/
12657 			bnxt_clear_int_mode(bp);
12658 		}
12659 		rc = bnxt_cancel_reservations(bp, fw_reset);
12660 	}
12661 	return rc;
12662 }
12663 
12664 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12665 {
12666 	struct hwrm_port_led_qcaps_output *resp;
12667 	struct hwrm_port_led_qcaps_input *req;
12668 	struct bnxt_pf_info *pf = &bp->pf;
12669 	int rc;
12670 
12671 	bp->num_leds = 0;
12672 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12673 		return 0;
12674 
12675 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12676 	if (rc)
12677 		return rc;
12678 
12679 	req->port_id = cpu_to_le16(pf->port_id);
12680 	resp = hwrm_req_hold(bp, req);
12681 	rc = hwrm_req_send(bp, req);
12682 	if (rc) {
12683 		hwrm_req_drop(bp, req);
12684 		return rc;
12685 	}
12686 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12687 		int i;
12688 
12689 		bp->num_leds = resp->num_leds;
12690 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12691 						 bp->num_leds);
12692 		for (i = 0; i < bp->num_leds; i++) {
12693 			struct bnxt_led_info *led = &bp->leds[i];
12694 			__le16 caps = led->led_state_caps;
12695 
12696 			if (!led->led_group_id ||
12697 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
12698 				bp->num_leds = 0;
12699 				break;
12700 			}
12701 		}
12702 	}
12703 	hwrm_req_drop(bp, req);
12704 	return 0;
12705 }
12706 
12707 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12708 {
12709 	struct hwrm_wol_filter_alloc_output *resp;
12710 	struct hwrm_wol_filter_alloc_input *req;
12711 	int rc;
12712 
12713 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12714 	if (rc)
12715 		return rc;
12716 
12717 	req->port_id = cpu_to_le16(bp->pf.port_id);
12718 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12719 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12720 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12721 
12722 	resp = hwrm_req_hold(bp, req);
12723 	rc = hwrm_req_send(bp, req);
12724 	if (!rc)
12725 		bp->wol_filter_id = resp->wol_filter_id;
12726 	hwrm_req_drop(bp, req);
12727 	return rc;
12728 }
12729 
12730 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12731 {
12732 	struct hwrm_wol_filter_free_input *req;
12733 	int rc;
12734 
12735 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12736 	if (rc)
12737 		return rc;
12738 
12739 	req->port_id = cpu_to_le16(bp->pf.port_id);
12740 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12741 	req->wol_filter_id = bp->wol_filter_id;
12742 
12743 	return hwrm_req_send(bp, req);
12744 }
12745 
12746 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12747 {
12748 	struct hwrm_wol_filter_qcfg_output *resp;
12749 	struct hwrm_wol_filter_qcfg_input *req;
12750 	u16 next_handle = 0;
12751 	int rc;
12752 
12753 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12754 	if (rc)
12755 		return rc;
12756 
12757 	req->port_id = cpu_to_le16(bp->pf.port_id);
12758 	req->handle = cpu_to_le16(handle);
12759 	resp = hwrm_req_hold(bp, req);
12760 	rc = hwrm_req_send(bp, req);
12761 	if (!rc) {
12762 		next_handle = le16_to_cpu(resp->next_handle);
12763 		if (next_handle != 0) {
12764 			if (resp->wol_type ==
12765 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12766 				bp->wol = 1;
12767 				bp->wol_filter_id = resp->wol_filter_id;
12768 			}
12769 		}
12770 	}
12771 	hwrm_req_drop(bp, req);
12772 	return next_handle;
12773 }
12774 
12775 static void bnxt_get_wol_settings(struct bnxt *bp)
12776 {
12777 	u16 handle = 0;
12778 
12779 	bp->wol = 0;
12780 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12781 		return;
12782 
12783 	do {
12784 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12785 	} while (handle && handle != 0xffff);
12786 }
12787 
12788 static bool bnxt_eee_config_ok(struct bnxt *bp)
12789 {
12790 	struct ethtool_keee *eee = &bp->eee;
12791 	struct bnxt_link_info *link_info = &bp->link_info;
12792 
12793 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12794 		return true;
12795 
12796 	if (eee->eee_enabled) {
12797 		__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12798 		__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12799 
12800 		_bnxt_fw_to_linkmode(advertising, link_info->advertising);
12801 
12802 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12803 			eee->eee_enabled = 0;
12804 			return false;
12805 		}
12806 		if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12807 			linkmode_and(eee->advertised, advertising,
12808 				     eee->supported);
12809 			return false;
12810 		}
12811 	}
12812 	return true;
12813 }
12814 
12815 static int bnxt_update_phy_setting(struct bnxt *bp)
12816 {
12817 	int rc;
12818 	bool update_link = false;
12819 	bool update_pause = false;
12820 	bool update_eee = false;
12821 	struct bnxt_link_info *link_info = &bp->link_info;
12822 
12823 	rc = bnxt_update_link(bp, true);
12824 	if (rc) {
12825 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12826 			   rc);
12827 		return rc;
12828 	}
12829 	if (!BNXT_SINGLE_PF(bp))
12830 		return 0;
12831 
12832 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12833 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12834 	    link_info->req_flow_ctrl)
12835 		update_pause = true;
12836 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12837 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
12838 		update_pause = true;
12839 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12840 		if (BNXT_AUTO_MODE(link_info->auto_mode))
12841 			update_link = true;
12842 		if (bnxt_force_speed_updated(link_info))
12843 			update_link = true;
12844 		if (link_info->req_duplex != link_info->duplex_setting)
12845 			update_link = true;
12846 	} else {
12847 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12848 			update_link = true;
12849 		if (bnxt_auto_speed_updated(link_info))
12850 			update_link = true;
12851 	}
12852 
12853 	/* The last close may have shutdown the link, so need to call
12854 	 * PHY_CFG to bring it back up.
12855 	 */
12856 	if (!BNXT_LINK_IS_UP(bp))
12857 		update_link = true;
12858 
12859 	if (!bnxt_eee_config_ok(bp))
12860 		update_eee = true;
12861 
12862 	if (update_link)
12863 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12864 	else if (update_pause)
12865 		rc = bnxt_hwrm_set_pause(bp);
12866 	if (rc) {
12867 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12868 			   rc);
12869 		return rc;
12870 	}
12871 
12872 	return rc;
12873 }
12874 
12875 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12876 
12877 static int bnxt_reinit_after_abort(struct bnxt *bp)
12878 {
12879 	int rc;
12880 
12881 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12882 		return -EBUSY;
12883 
12884 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
12885 		return -ENODEV;
12886 
12887 	rc = bnxt_fw_init_one(bp);
12888 	if (!rc) {
12889 		bnxt_clear_int_mode(bp);
12890 		rc = bnxt_init_int_mode(bp);
12891 		if (!rc) {
12892 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12893 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12894 		}
12895 	}
12896 	return rc;
12897 }
12898 
12899 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12900 {
12901 	struct bnxt_ntuple_filter *ntp_fltr;
12902 	struct bnxt_l2_filter *l2_fltr;
12903 
12904 	if (list_empty(&fltr->list))
12905 		return;
12906 
12907 	if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12908 		ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12909 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12910 		atomic_inc(&l2_fltr->refcnt);
12911 		ntp_fltr->l2_fltr = l2_fltr;
12912 		if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12913 			bnxt_del_ntp_filter(bp, ntp_fltr);
12914 			netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12915 				   fltr->sw_id);
12916 		}
12917 	} else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12918 		l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12919 		if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12920 			bnxt_del_l2_filter(bp, l2_fltr);
12921 			netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12922 				   fltr->sw_id);
12923 		}
12924 	}
12925 }
12926 
12927 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12928 {
12929 	struct bnxt_filter_base *usr_fltr, *tmp;
12930 
12931 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12932 		bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12933 }
12934 
12935 static int bnxt_set_xps_mapping(struct bnxt *bp)
12936 {
12937 	int numa_node = dev_to_node(&bp->pdev->dev);
12938 	unsigned int q_idx, map_idx, cpu, i;
12939 	const struct cpumask *cpu_mask_ptr;
12940 	int nr_cpus = num_online_cpus();
12941 	cpumask_t *q_map;
12942 	int rc = 0;
12943 
12944 	q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
12945 	if (!q_map)
12946 		return -ENOMEM;
12947 
12948 	/* Create CPU mask for all TX queues across MQPRIO traffic classes.
12949 	 * Each TC has the same number of TX queues. The nth TX queue for each
12950 	 * TC will have the same CPU mask.
12951 	 */
12952 	for (i = 0; i < nr_cpus; i++) {
12953 		map_idx = i % bp->tx_nr_rings_per_tc;
12954 		cpu = cpumask_local_spread(i, numa_node);
12955 		cpu_mask_ptr = get_cpu_mask(cpu);
12956 		cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12957 	}
12958 
12959 	/* Register CPU mask for each TX queue except the ones marked for XDP */
12960 	for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12961 		map_idx = q_idx % bp->tx_nr_rings_per_tc;
12962 		rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12963 		if (rc) {
12964 			netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12965 				    q_idx);
12966 			break;
12967 		}
12968 	}
12969 
12970 	kfree(q_map);
12971 
12972 	return rc;
12973 }
12974 
12975 static int bnxt_tx_nr_rings(struct bnxt *bp)
12976 {
12977 	return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12978 			    bp->tx_nr_rings_per_tc;
12979 }
12980 
12981 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12982 {
12983 	return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12984 }
12985 
12986 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12987 {
12988 	int rc = 0;
12989 
12990 	netif_carrier_off(bp->dev);
12991 	if (irq_re_init) {
12992 		/* Reserve rings now if none were reserved at driver probe. */
12993 		rc = bnxt_init_dflt_ring_mode(bp);
12994 		if (rc) {
12995 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12996 			return rc;
12997 		}
12998 	}
12999 	rc = bnxt_reserve_rings(bp, irq_re_init);
13000 	if (rc)
13001 		return rc;
13002 
13003 	/* Make adjustments if reserved TX rings are less than requested */
13004 	bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13005 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13006 	if (bp->tx_nr_rings_xdp) {
13007 		bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13008 		bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13009 	}
13010 	rc = bnxt_alloc_mem(bp, irq_re_init);
13011 	if (rc) {
13012 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13013 		goto open_err_free_mem;
13014 	}
13015 
13016 	if (irq_re_init) {
13017 		bnxt_init_napi(bp);
13018 		rc = bnxt_request_irq(bp);
13019 		if (rc) {
13020 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13021 			goto open_err_irq;
13022 		}
13023 	}
13024 
13025 	rc = bnxt_init_nic(bp, irq_re_init);
13026 	if (rc) {
13027 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13028 		goto open_err_irq;
13029 	}
13030 
13031 	bnxt_enable_napi(bp);
13032 	bnxt_debug_dev_init(bp);
13033 
13034 	if (link_re_init) {
13035 		mutex_lock(&bp->link_lock);
13036 		rc = bnxt_update_phy_setting(bp);
13037 		mutex_unlock(&bp->link_lock);
13038 		if (rc) {
13039 			netdev_warn(bp->dev, "failed to update phy settings\n");
13040 			if (BNXT_SINGLE_PF(bp)) {
13041 				bp->link_info.phy_retry = true;
13042 				bp->link_info.phy_retry_expires =
13043 					jiffies + 5 * HZ;
13044 			}
13045 		}
13046 	}
13047 
13048 	if (irq_re_init) {
13049 		udp_tunnel_nic_reset_ntf(bp->dev);
13050 		rc = bnxt_set_xps_mapping(bp);
13051 		if (rc)
13052 			netdev_warn(bp->dev, "failed to set xps mapping\n");
13053 	}
13054 
13055 	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13056 		if (!static_key_enabled(&bnxt_xdp_locking_key))
13057 			static_branch_enable(&bnxt_xdp_locking_key);
13058 	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13059 		static_branch_disable(&bnxt_xdp_locking_key);
13060 	}
13061 	set_bit(BNXT_STATE_OPEN, &bp->state);
13062 	bnxt_enable_int(bp);
13063 	/* Enable TX queues */
13064 	bnxt_tx_enable(bp);
13065 	mod_timer(&bp->timer, jiffies + bp->current_interval);
13066 	/* Poll link status and check for SFP+ module status */
13067 	mutex_lock(&bp->link_lock);
13068 	bnxt_get_port_module_status(bp);
13069 	mutex_unlock(&bp->link_lock);
13070 
13071 	/* VF-reps may need to be re-opened after the PF is re-opened */
13072 	if (BNXT_PF(bp))
13073 		bnxt_vf_reps_open(bp);
13074 	bnxt_ptp_init_rtc(bp, true);
13075 	bnxt_ptp_cfg_tstamp_filters(bp);
13076 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13077 		bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13078 	bnxt_cfg_usr_fltrs(bp);
13079 	return 0;
13080 
13081 open_err_irq:
13082 	bnxt_del_napi(bp);
13083 
13084 open_err_free_mem:
13085 	bnxt_free_skbs(bp);
13086 	bnxt_free_irq(bp);
13087 	bnxt_free_mem(bp, true);
13088 	return rc;
13089 }
13090 
13091 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13092 {
13093 	int rc = 0;
13094 
13095 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13096 		rc = -EIO;
13097 	if (!rc)
13098 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13099 	if (rc) {
13100 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13101 		netif_close(bp->dev);
13102 	}
13103 	return rc;
13104 }
13105 
13106 /* netdev instance lock held, open the NIC half way by allocating all
13107  * resources, but NAPI, IRQ, and TX are not enabled.  This is mainly used
13108  * for offline self tests.
13109  */
13110 int bnxt_half_open_nic(struct bnxt *bp)
13111 {
13112 	int rc = 0;
13113 
13114 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13115 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13116 		rc = -ENODEV;
13117 		goto half_open_err;
13118 	}
13119 
13120 	rc = bnxt_alloc_mem(bp, true);
13121 	if (rc) {
13122 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13123 		goto half_open_err;
13124 	}
13125 	bnxt_init_napi(bp);
13126 	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13127 	rc = bnxt_init_nic(bp, true);
13128 	if (rc) {
13129 		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13130 		bnxt_del_napi(bp);
13131 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13132 		goto half_open_err;
13133 	}
13134 	return 0;
13135 
13136 half_open_err:
13137 	bnxt_free_skbs(bp);
13138 	bnxt_free_mem(bp, true);
13139 	netif_close(bp->dev);
13140 	return rc;
13141 }
13142 
13143 /* netdev instance lock held, this call can only be made after a previous
13144  * successful call to bnxt_half_open_nic().
13145  */
13146 void bnxt_half_close_nic(struct bnxt *bp)
13147 {
13148 	bnxt_hwrm_resource_free(bp, false, true);
13149 	bnxt_del_napi(bp);
13150 	bnxt_free_skbs(bp);
13151 	bnxt_free_mem(bp, true);
13152 	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13153 }
13154 
13155 void bnxt_reenable_sriov(struct bnxt *bp)
13156 {
13157 	if (BNXT_PF(bp)) {
13158 		struct bnxt_pf_info *pf = &bp->pf;
13159 		int n = pf->active_vfs;
13160 
13161 		if (n)
13162 			bnxt_cfg_hw_sriov(bp, &n, true);
13163 	}
13164 }
13165 
13166 static int bnxt_open(struct net_device *dev)
13167 {
13168 	struct bnxt *bp = netdev_priv(dev);
13169 	int rc;
13170 
13171 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13172 		rc = bnxt_reinit_after_abort(bp);
13173 		if (rc) {
13174 			if (rc == -EBUSY)
13175 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13176 			else
13177 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13178 			return -ENODEV;
13179 		}
13180 	}
13181 
13182 	rc = bnxt_hwrm_if_change(bp, true);
13183 	if (rc)
13184 		return rc;
13185 
13186 	rc = __bnxt_open_nic(bp, true, true);
13187 	if (rc) {
13188 		bnxt_hwrm_if_change(bp, false);
13189 	} else {
13190 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13191 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13192 				bnxt_queue_sp_work(bp,
13193 						   BNXT_RESTART_ULP_SP_EVENT);
13194 		}
13195 	}
13196 
13197 	return rc;
13198 }
13199 
13200 static bool bnxt_drv_busy(struct bnxt *bp)
13201 {
13202 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13203 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
13204 }
13205 
13206 static void bnxt_get_ring_stats(struct bnxt *bp,
13207 				struct rtnl_link_stats64 *stats);
13208 
13209 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13210 			     bool link_re_init)
13211 {
13212 	/* Close the VF-reps before closing PF */
13213 	if (BNXT_PF(bp))
13214 		bnxt_vf_reps_close(bp);
13215 
13216 	/* Change device state to avoid TX queue wake up's */
13217 	bnxt_tx_disable(bp);
13218 
13219 	clear_bit(BNXT_STATE_OPEN, &bp->state);
13220 	smp_mb__after_atomic();
13221 	while (bnxt_drv_busy(bp))
13222 		msleep(20);
13223 
13224 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13225 		bnxt_clear_rss_ctxs(bp);
13226 	/* Flush rings and disable interrupts */
13227 	bnxt_shutdown_nic(bp, irq_re_init);
13228 
13229 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13230 
13231 	bnxt_debug_dev_exit(bp);
13232 	bnxt_disable_napi(bp);
13233 	timer_delete_sync(&bp->timer);
13234 	bnxt_free_skbs(bp);
13235 
13236 	/* Save ring stats before shutdown */
13237 	if (bp->bnapi && irq_re_init) {
13238 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13239 		bnxt_get_ring_drv_stats(bp, &bp->ring_drv_stats_prev);
13240 	}
13241 	if (irq_re_init) {
13242 		bnxt_free_irq(bp);
13243 		bnxt_del_napi(bp);
13244 	}
13245 	bnxt_free_mem(bp, irq_re_init);
13246 }
13247 
13248 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13249 {
13250 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13251 		/* If we get here, it means firmware reset is in progress
13252 		 * while we are trying to close.  We can safely proceed with
13253 		 * the close because we are holding netdev instance lock.
13254 		 * Some firmware messages may fail as we proceed to close.
13255 		 * We set the ABORT_ERR flag here so that the FW reset thread
13256 		 * will later abort when it gets the netdev instance lock
13257 		 * and sees the flag.
13258 		 */
13259 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13260 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13261 	}
13262 
13263 #ifdef CONFIG_BNXT_SRIOV
13264 	if (bp->sriov_cfg) {
13265 		int rc;
13266 
13267 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13268 						      !bp->sriov_cfg,
13269 						      BNXT_SRIOV_CFG_WAIT_TMO);
13270 		if (!rc)
13271 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13272 		else if (rc < 0)
13273 			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13274 	}
13275 #endif
13276 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
13277 }
13278 
13279 static int bnxt_close(struct net_device *dev)
13280 {
13281 	struct bnxt *bp = netdev_priv(dev);
13282 
13283 	bnxt_close_nic(bp, true, true);
13284 	bnxt_hwrm_shutdown_link(bp);
13285 	bnxt_hwrm_if_change(bp, false);
13286 	return 0;
13287 }
13288 
13289 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13290 				   u16 *val)
13291 {
13292 	struct hwrm_port_phy_mdio_read_output *resp;
13293 	struct hwrm_port_phy_mdio_read_input *req;
13294 	int rc;
13295 
13296 	if (bp->hwrm_spec_code < 0x10a00)
13297 		return -EOPNOTSUPP;
13298 
13299 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13300 	if (rc)
13301 		return rc;
13302 
13303 	req->port_id = cpu_to_le16(bp->pf.port_id);
13304 	req->phy_addr = phy_addr;
13305 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13306 	if (mdio_phy_id_is_c45(phy_addr)) {
13307 		req->cl45_mdio = 1;
13308 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13309 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13310 		req->reg_addr = cpu_to_le16(reg);
13311 	}
13312 
13313 	resp = hwrm_req_hold(bp, req);
13314 	rc = hwrm_req_send(bp, req);
13315 	if (!rc)
13316 		*val = le16_to_cpu(resp->reg_data);
13317 	hwrm_req_drop(bp, req);
13318 	return rc;
13319 }
13320 
13321 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13322 				    u16 val)
13323 {
13324 	struct hwrm_port_phy_mdio_write_input *req;
13325 	int rc;
13326 
13327 	if (bp->hwrm_spec_code < 0x10a00)
13328 		return -EOPNOTSUPP;
13329 
13330 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13331 	if (rc)
13332 		return rc;
13333 
13334 	req->port_id = cpu_to_le16(bp->pf.port_id);
13335 	req->phy_addr = phy_addr;
13336 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13337 	if (mdio_phy_id_is_c45(phy_addr)) {
13338 		req->cl45_mdio = 1;
13339 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13340 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13341 		req->reg_addr = cpu_to_le16(reg);
13342 	}
13343 	req->reg_data = cpu_to_le16(val);
13344 
13345 	return hwrm_req_send(bp, req);
13346 }
13347 
13348 /* netdev instance lock held */
13349 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13350 {
13351 	struct mii_ioctl_data *mdio = if_mii(ifr);
13352 	struct bnxt *bp = netdev_priv(dev);
13353 	int rc;
13354 
13355 	switch (cmd) {
13356 	case SIOCGMIIPHY:
13357 		mdio->phy_id = bp->link_info.phy_addr;
13358 
13359 		fallthrough;
13360 	case SIOCGMIIREG: {
13361 		u16 mii_regval = 0;
13362 
13363 		if (!netif_running(dev))
13364 			return -EAGAIN;
13365 
13366 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13367 					     &mii_regval);
13368 		mdio->val_out = mii_regval;
13369 		return rc;
13370 	}
13371 
13372 	case SIOCSMIIREG:
13373 		if (!netif_running(dev))
13374 			return -EAGAIN;
13375 
13376 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13377 						mdio->val_in);
13378 
13379 	default:
13380 		/* do nothing */
13381 		break;
13382 	}
13383 	return -EOPNOTSUPP;
13384 }
13385 
13386 static void bnxt_get_ring_stats(struct bnxt *bp,
13387 				struct rtnl_link_stats64 *stats)
13388 {
13389 	int i;
13390 
13391 	for (i = 0; i < bp->cp_nr_rings; i++) {
13392 		struct bnxt_napi *bnapi = bp->bnapi[i];
13393 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13394 		u64 *sw = cpr->stats.sw_stats;
13395 
13396 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13397 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13398 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13399 
13400 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13401 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13402 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13403 
13404 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13405 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13406 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13407 
13408 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13409 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13410 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13411 
13412 		stats->rx_missed_errors +=
13413 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13414 
13415 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13416 
13417 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13418 
13419 		stats->rx_dropped +=
13420 			cpr->sw_stats->rx.rx_netpoll_discards +
13421 			cpr->sw_stats->rx.rx_oom_discards;
13422 	}
13423 }
13424 
13425 static void bnxt_add_prev_stats(struct bnxt *bp,
13426 				struct rtnl_link_stats64 *stats)
13427 {
13428 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13429 
13430 	stats->rx_packets += prev_stats->rx_packets;
13431 	stats->tx_packets += prev_stats->tx_packets;
13432 	stats->rx_bytes += prev_stats->rx_bytes;
13433 	stats->tx_bytes += prev_stats->tx_bytes;
13434 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
13435 	stats->multicast += prev_stats->multicast;
13436 	stats->rx_dropped += prev_stats->rx_dropped;
13437 	stats->tx_dropped += prev_stats->tx_dropped;
13438 }
13439 
13440 static void
13441 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13442 {
13443 	struct bnxt *bp = netdev_priv(dev);
13444 
13445 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
13446 	/* Make sure bnxt_close_nic() sees that we are reading stats before
13447 	 * we check the BNXT_STATE_OPEN flag.
13448 	 */
13449 	smp_mb__after_atomic();
13450 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13451 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13452 		*stats = bp->net_stats_prev;
13453 		return;
13454 	}
13455 
13456 	bnxt_get_ring_stats(bp, stats);
13457 	bnxt_add_prev_stats(bp, stats);
13458 
13459 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
13460 		u64 *rx = bp->port_stats.sw_stats;
13461 		u64 *tx = bp->port_stats.sw_stats +
13462 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13463 
13464 		stats->rx_crc_errors =
13465 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13466 		stats->rx_frame_errors =
13467 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13468 		stats->rx_length_errors =
13469 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13470 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13471 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13472 		stats->rx_errors =
13473 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13474 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13475 		stats->collisions =
13476 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13477 		stats->tx_fifo_errors =
13478 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13479 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13480 	}
13481 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13482 }
13483 
13484 static void bnxt_get_one_ring_drv_stats(struct bnxt *bp,
13485 					struct bnxt_total_ring_drv_stats *stats,
13486 					struct bnxt_cp_ring_info *cpr)
13487 {
13488 	struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13489 	u64 *hw_stats = cpr->stats.sw_stats;
13490 
13491 	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13492 	stats->rx_total_resets += sw_stats->rx.rx_resets;
13493 	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13494 	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13495 	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13496 	stats->rx_total_ring_discards +=
13497 		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13498 	stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13499 	stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13500 	stats->tx_total_resets += sw_stats->tx.tx_resets;
13501 	stats->tx_total_ring_discards +=
13502 		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13503 	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13504 }
13505 
13506 void bnxt_get_ring_drv_stats(struct bnxt *bp,
13507 			     struct bnxt_total_ring_drv_stats *stats)
13508 {
13509 	int i;
13510 
13511 	for (i = 0; i < bp->cp_nr_rings; i++)
13512 		bnxt_get_one_ring_drv_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13513 }
13514 
13515 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13516 {
13517 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13518 	struct net_device *dev = bp->dev;
13519 	struct netdev_hw_addr *ha;
13520 	u8 *haddr;
13521 	int mc_count = 0;
13522 	bool update = false;
13523 	int off = 0;
13524 
13525 	netdev_for_each_mc_addr(ha, dev) {
13526 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
13527 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13528 			vnic->mc_list_count = 0;
13529 			return false;
13530 		}
13531 		haddr = ha->addr;
13532 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13533 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13534 			update = true;
13535 		}
13536 		off += ETH_ALEN;
13537 		mc_count++;
13538 	}
13539 	if (mc_count)
13540 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13541 
13542 	if (mc_count != vnic->mc_list_count) {
13543 		vnic->mc_list_count = mc_count;
13544 		update = true;
13545 	}
13546 	return update;
13547 }
13548 
13549 static bool bnxt_uc_list_updated(struct bnxt *bp)
13550 {
13551 	struct net_device *dev = bp->dev;
13552 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13553 	struct netdev_hw_addr *ha;
13554 	int off = 0;
13555 
13556 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13557 		return true;
13558 
13559 	netdev_for_each_uc_addr(ha, dev) {
13560 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13561 			return true;
13562 
13563 		off += ETH_ALEN;
13564 	}
13565 	return false;
13566 }
13567 
13568 static void bnxt_set_rx_mode(struct net_device *dev)
13569 {
13570 	struct bnxt *bp = netdev_priv(dev);
13571 	struct bnxt_vnic_info *vnic;
13572 	bool mc_update = false;
13573 	bool uc_update;
13574 	u32 mask;
13575 
13576 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13577 		return;
13578 
13579 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13580 	mask = vnic->rx_mask;
13581 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13582 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13583 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13584 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13585 
13586 	if (dev->flags & IFF_PROMISC)
13587 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13588 
13589 	uc_update = bnxt_uc_list_updated(bp);
13590 
13591 	if (dev->flags & IFF_BROADCAST)
13592 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13593 	if (dev->flags & IFF_ALLMULTI) {
13594 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13595 		vnic->mc_list_count = 0;
13596 	} else if (dev->flags & IFF_MULTICAST) {
13597 		mc_update = bnxt_mc_list_updated(bp, &mask);
13598 	}
13599 
13600 	if (mask != vnic->rx_mask || uc_update || mc_update) {
13601 		vnic->rx_mask = mask;
13602 
13603 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13604 	}
13605 }
13606 
13607 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13608 {
13609 	struct net_device *dev = bp->dev;
13610 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13611 	struct netdev_hw_addr *ha;
13612 	int i, off = 0, rc;
13613 	bool uc_update;
13614 
13615 	netif_addr_lock_bh(dev);
13616 	uc_update = bnxt_uc_list_updated(bp);
13617 	netif_addr_unlock_bh(dev);
13618 
13619 	if (!uc_update)
13620 		goto skip_uc;
13621 
13622 	for (i = 1; i < vnic->uc_filter_count; i++) {
13623 		struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13624 
13625 		bnxt_hwrm_l2_filter_free(bp, fltr);
13626 		bnxt_del_l2_filter(bp, fltr);
13627 	}
13628 
13629 	vnic->uc_filter_count = 1;
13630 
13631 	netif_addr_lock_bh(dev);
13632 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13633 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13634 	} else {
13635 		netdev_for_each_uc_addr(ha, dev) {
13636 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13637 			off += ETH_ALEN;
13638 			vnic->uc_filter_count++;
13639 		}
13640 	}
13641 	netif_addr_unlock_bh(dev);
13642 
13643 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13644 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13645 		if (rc) {
13646 			if (BNXT_VF(bp) && rc == -ENODEV) {
13647 				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13648 					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13649 				else
13650 					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13651 				rc = 0;
13652 			} else {
13653 				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13654 			}
13655 			vnic->uc_filter_count = i;
13656 			return rc;
13657 		}
13658 	}
13659 	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13660 		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13661 
13662 skip_uc:
13663 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13664 	    !bnxt_promisc_ok(bp))
13665 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13666 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13667 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13668 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13669 			    rc);
13670 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13671 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13672 		vnic->mc_list_count = 0;
13673 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13674 	}
13675 	if (rc)
13676 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13677 			   rc);
13678 
13679 	return rc;
13680 }
13681 
13682 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13683 {
13684 #ifdef CONFIG_BNXT_SRIOV
13685 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13686 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13687 
13688 		/* No minimum rings were provisioned by the PF.  Don't
13689 		 * reserve rings by default when device is down.
13690 		 */
13691 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13692 			return true;
13693 
13694 		if (!netif_running(bp->dev))
13695 			return false;
13696 	}
13697 #endif
13698 	return true;
13699 }
13700 
13701 /* If the chip and firmware supports RFS */
13702 static bool bnxt_rfs_supported(struct bnxt *bp)
13703 {
13704 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13705 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13706 			return true;
13707 		return false;
13708 	}
13709 	/* 212 firmware is broken for aRFS */
13710 	if (BNXT_FW_MAJ(bp) == 212)
13711 		return false;
13712 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13713 		return true;
13714 	if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13715 		return true;
13716 	return false;
13717 }
13718 
13719 /* If runtime conditions support RFS */
13720 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13721 {
13722 	struct bnxt_hw_rings hwr = {0};
13723 	int max_vnics, max_rss_ctxs;
13724 
13725 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13726 	    !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13727 		return bnxt_rfs_supported(bp);
13728 
13729 	if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13730 		return false;
13731 
13732 	hwr.grp = bp->rx_nr_rings;
13733 	hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13734 	if (new_rss_ctx)
13735 		hwr.vnic++;
13736 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13737 	max_vnics = bnxt_get_max_func_vnics(bp);
13738 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13739 
13740 	if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13741 		if (bp->rx_nr_rings > 1)
13742 			netdev_warn(bp->dev,
13743 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13744 				    min(max_rss_ctxs - 1, max_vnics - 1));
13745 		return false;
13746 	}
13747 
13748 	if (!BNXT_NEW_RM(bp))
13749 		return true;
13750 
13751 	/* Do not reduce VNIC and RSS ctx reservations.  There is a FW
13752 	 * issue that will mess up the default VNIC if we reduce the
13753 	 * reservations.
13754 	 */
13755 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13756 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13757 		return true;
13758 
13759 	bnxt_hwrm_reserve_rings(bp, &hwr);
13760 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13761 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13762 		return true;
13763 
13764 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13765 	hwr.vnic = 1;
13766 	hwr.rss_ctx = 0;
13767 	bnxt_hwrm_reserve_rings(bp, &hwr);
13768 	return false;
13769 }
13770 
13771 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13772 					   netdev_features_t features)
13773 {
13774 	struct bnxt *bp = netdev_priv(dev);
13775 	netdev_features_t vlan_features;
13776 
13777 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13778 		features &= ~NETIF_F_NTUPLE;
13779 
13780 	if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13781 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13782 
13783 	if (!(features & NETIF_F_GRO))
13784 		features &= ~NETIF_F_GRO_HW;
13785 
13786 	if (features & NETIF_F_GRO_HW)
13787 		features &= ~NETIF_F_LRO;
13788 
13789 	/* Both CTAG and STAG VLAN acceleration on the RX side have to be
13790 	 * turned on or off together.
13791 	 */
13792 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13793 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13794 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13795 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13796 		else if (vlan_features)
13797 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13798 	}
13799 #ifdef CONFIG_BNXT_SRIOV
13800 	if (BNXT_VF(bp) && bp->vf.vlan)
13801 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13802 #endif
13803 	return features;
13804 }
13805 
13806 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13807 				bool link_re_init, u32 flags, bool update_tpa)
13808 {
13809 	bnxt_close_nic(bp, irq_re_init, link_re_init);
13810 	bp->flags = flags;
13811 	if (update_tpa)
13812 		bnxt_set_ring_params(bp);
13813 	return bnxt_open_nic(bp, irq_re_init, link_re_init);
13814 }
13815 
13816 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13817 {
13818 	bool update_tpa = false, update_ntuple = false;
13819 	struct bnxt *bp = netdev_priv(dev);
13820 	u32 flags = bp->flags;
13821 	u32 changes;
13822 	int rc = 0;
13823 	bool re_init = false;
13824 
13825 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13826 	if (features & NETIF_F_GRO_HW)
13827 		flags |= BNXT_FLAG_GRO;
13828 	else if (features & NETIF_F_LRO)
13829 		flags |= BNXT_FLAG_LRO;
13830 
13831 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13832 		flags &= ~BNXT_FLAG_TPA;
13833 
13834 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13835 		flags |= BNXT_FLAG_STRIP_VLAN;
13836 
13837 	if (features & NETIF_F_NTUPLE)
13838 		flags |= BNXT_FLAG_RFS;
13839 	else
13840 		bnxt_clear_usr_fltrs(bp, true);
13841 
13842 	changes = flags ^ bp->flags;
13843 	if (changes & BNXT_FLAG_TPA) {
13844 		update_tpa = true;
13845 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13846 		    (flags & BNXT_FLAG_TPA) == 0 ||
13847 		    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13848 			re_init = true;
13849 	}
13850 
13851 	if (changes & ~BNXT_FLAG_TPA)
13852 		re_init = true;
13853 
13854 	if (changes & BNXT_FLAG_RFS)
13855 		update_ntuple = true;
13856 
13857 	if (flags != bp->flags) {
13858 		u32 old_flags = bp->flags;
13859 
13860 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13861 			bp->flags = flags;
13862 			if (update_tpa)
13863 				bnxt_set_ring_params(bp);
13864 			return rc;
13865 		}
13866 
13867 		if (update_ntuple)
13868 			return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13869 
13870 		if (re_init)
13871 			return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13872 
13873 		if (update_tpa) {
13874 			bp->flags = flags;
13875 			rc = bnxt_set_tpa(bp,
13876 					  (flags & BNXT_FLAG_TPA) ?
13877 					  true : false);
13878 			if (rc)
13879 				bp->flags = old_flags;
13880 		}
13881 	}
13882 	return rc;
13883 }
13884 
13885 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13886 			      u8 **nextp)
13887 {
13888 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13889 	int hdr_count = 0;
13890 	u8 *nexthdr;
13891 	int start;
13892 
13893 	/* Check that there are at most 2 IPv6 extension headers, no
13894 	 * fragment header, and each is <= 64 bytes.
13895 	 */
13896 	start = nw_off + sizeof(*ip6h);
13897 	nexthdr = &ip6h->nexthdr;
13898 	while (ipv6_ext_hdr(*nexthdr)) {
13899 		struct ipv6_opt_hdr *hp;
13900 		int hdrlen;
13901 
13902 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13903 		    *nexthdr == NEXTHDR_FRAGMENT)
13904 			return false;
13905 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13906 					  skb_headlen(skb), NULL);
13907 		if (!hp)
13908 			return false;
13909 		if (*nexthdr == NEXTHDR_AUTH)
13910 			hdrlen = ipv6_authlen(hp);
13911 		else
13912 			hdrlen = ipv6_optlen(hp);
13913 
13914 		if (hdrlen > 64)
13915 			return false;
13916 
13917 		hdr_count++;
13918 		nexthdr = &hp->nexthdr;
13919 		start += hdrlen;
13920 	}
13921 	if (nextp) {
13922 		/* Caller will check inner protocol */
13923 		if (skb->encapsulation) {
13924 			*nextp = nexthdr;
13925 			return true;
13926 		}
13927 		*nextp = NULL;
13928 	}
13929 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13930 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13931 }
13932 
13933 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
13934 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13935 {
13936 	struct udphdr *uh = udp_hdr(skb);
13937 	__be16 udp_port = uh->dest;
13938 
13939 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13940 	    udp_port != bp->vxlan_gpe_port)
13941 		return false;
13942 	if (skb->inner_protocol == htons(ETH_P_TEB)) {
13943 		struct ethhdr *eh = inner_eth_hdr(skb);
13944 
13945 		switch (eh->h_proto) {
13946 		case htons(ETH_P_IP):
13947 			return true;
13948 		case htons(ETH_P_IPV6):
13949 			return bnxt_exthdr_check(bp, skb,
13950 						 skb_inner_network_offset(skb),
13951 						 NULL);
13952 		}
13953 	} else if (skb->inner_protocol == htons(ETH_P_IP)) {
13954 		return true;
13955 	} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13956 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13957 					 NULL);
13958 	}
13959 	return false;
13960 }
13961 
13962 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13963 {
13964 	switch (l4_proto) {
13965 	case IPPROTO_UDP:
13966 		return bnxt_udp_tunl_check(bp, skb);
13967 	case IPPROTO_IPIP:
13968 		return true;
13969 	case IPPROTO_GRE: {
13970 		switch (skb->inner_protocol) {
13971 		default:
13972 			return false;
13973 		case htons(ETH_P_IP):
13974 			return true;
13975 		case htons(ETH_P_IPV6):
13976 			fallthrough;
13977 		}
13978 	}
13979 	case IPPROTO_IPV6:
13980 		/* Check ext headers of inner ipv6 */
13981 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13982 					 NULL);
13983 	}
13984 	return false;
13985 }
13986 
13987 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13988 					     struct net_device *dev,
13989 					     netdev_features_t features)
13990 {
13991 	struct bnxt *bp = netdev_priv(dev);
13992 	u8 *l4_proto;
13993 
13994 	features = vlan_features_check(skb, features);
13995 	switch (vlan_get_protocol(skb)) {
13996 	case htons(ETH_P_IP):
13997 		if (!skb->encapsulation)
13998 			return features;
13999 		l4_proto = &ip_hdr(skb)->protocol;
14000 		if (bnxt_tunl_check(bp, skb, *l4_proto))
14001 			return features;
14002 		break;
14003 	case htons(ETH_P_IPV6):
14004 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14005 				       &l4_proto))
14006 			break;
14007 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14008 			return features;
14009 		break;
14010 	}
14011 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14012 }
14013 
14014 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14015 			 u32 *reg_buf)
14016 {
14017 	struct hwrm_dbg_read_direct_output *resp;
14018 	struct hwrm_dbg_read_direct_input *req;
14019 	__le32 *dbg_reg_buf;
14020 	dma_addr_t mapping;
14021 	int rc, i;
14022 
14023 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14024 	if (rc)
14025 		return rc;
14026 
14027 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14028 					 &mapping);
14029 	if (!dbg_reg_buf) {
14030 		rc = -ENOMEM;
14031 		goto dbg_rd_reg_exit;
14032 	}
14033 
14034 	req->host_dest_addr = cpu_to_le64(mapping);
14035 
14036 	resp = hwrm_req_hold(bp, req);
14037 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14038 	req->read_len32 = cpu_to_le32(num_words);
14039 
14040 	rc = hwrm_req_send(bp, req);
14041 	if (rc || resp->error_code) {
14042 		rc = -EIO;
14043 		goto dbg_rd_reg_exit;
14044 	}
14045 	for (i = 0; i < num_words; i++)
14046 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14047 
14048 dbg_rd_reg_exit:
14049 	hwrm_req_drop(bp, req);
14050 	return rc;
14051 }
14052 
14053 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14054 				       u32 ring_id, u32 *prod, u32 *cons)
14055 {
14056 	struct hwrm_dbg_ring_info_get_output *resp;
14057 	struct hwrm_dbg_ring_info_get_input *req;
14058 	int rc;
14059 
14060 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14061 	if (rc)
14062 		return rc;
14063 
14064 	req->ring_type = ring_type;
14065 	req->fw_ring_id = cpu_to_le32(ring_id);
14066 	resp = hwrm_req_hold(bp, req);
14067 	rc = hwrm_req_send(bp, req);
14068 	if (!rc) {
14069 		*prod = le32_to_cpu(resp->producer_index);
14070 		*cons = le32_to_cpu(resp->consumer_index);
14071 	}
14072 	hwrm_req_drop(bp, req);
14073 	return rc;
14074 }
14075 
14076 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14077 {
14078 	struct bnxt_tx_ring_info *txr;
14079 	int i = bnapi->index, j;
14080 
14081 	bnxt_for_each_napi_tx(j, bnapi, txr)
14082 		netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14083 			    i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14084 			    txr->tx_cons);
14085 }
14086 
14087 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14088 {
14089 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14090 	int i = bnapi->index;
14091 
14092 	if (!rxr)
14093 		return;
14094 
14095 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14096 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14097 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14098 		    rxr->rx_sw_agg_prod);
14099 }
14100 
14101 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14102 {
14103 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14104 	int i = bnapi->index, j;
14105 
14106 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14107 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14108 	for (j = 0; j < cpr->cp_ring_count; j++) {
14109 		cpr2 = &cpr->cp_ring_arr[j];
14110 		if (!cpr2->bnapi)
14111 			continue;
14112 		netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14113 			    i, j, cpr2->cp_ring_struct.fw_ring_id,
14114 			    cpr2->cp_raw_cons);
14115 	}
14116 }
14117 
14118 static void bnxt_dbg_dump_states(struct bnxt *bp)
14119 {
14120 	int i;
14121 	struct bnxt_napi *bnapi;
14122 
14123 	for (i = 0; i < bp->cp_nr_rings; i++) {
14124 		bnapi = bp->bnapi[i];
14125 		if (netif_msg_drv(bp)) {
14126 			bnxt_dump_tx_sw_state(bnapi);
14127 			bnxt_dump_rx_sw_state(bnapi);
14128 			bnxt_dump_cp_sw_state(bnapi);
14129 		}
14130 	}
14131 }
14132 
14133 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14134 {
14135 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14136 	struct hwrm_ring_reset_input *req;
14137 	struct bnxt_napi *bnapi = rxr->bnapi;
14138 	struct bnxt_cp_ring_info *cpr;
14139 	u16 cp_ring_id;
14140 	int rc;
14141 
14142 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14143 	if (rc)
14144 		return rc;
14145 
14146 	cpr = &bnapi->cp_ring;
14147 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14148 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
14149 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14150 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14151 	return hwrm_req_send_silent(bp, req);
14152 }
14153 
14154 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14155 {
14156 	if (!silent)
14157 		bnxt_dbg_dump_states(bp);
14158 	if (netif_running(bp->dev)) {
14159 		bnxt_close_nic(bp, !silent, false);
14160 		bnxt_open_nic(bp, !silent, false);
14161 	}
14162 }
14163 
14164 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14165 {
14166 	struct bnxt *bp = netdev_priv(dev);
14167 
14168 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
14169 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14170 }
14171 
14172 static void bnxt_fw_health_check(struct bnxt *bp)
14173 {
14174 	struct bnxt_fw_health *fw_health = bp->fw_health;
14175 	struct pci_dev *pdev = bp->pdev;
14176 	u32 val;
14177 
14178 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14179 		return;
14180 
14181 	/* Make sure it is enabled before checking the tmr_counter. */
14182 	smp_rmb();
14183 	if (fw_health->tmr_counter) {
14184 		fw_health->tmr_counter--;
14185 		return;
14186 	}
14187 
14188 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14189 	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14190 		fw_health->arrests++;
14191 		goto fw_reset;
14192 	}
14193 
14194 	fw_health->last_fw_heartbeat = val;
14195 
14196 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14197 	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14198 		fw_health->discoveries++;
14199 		goto fw_reset;
14200 	}
14201 
14202 	fw_health->tmr_counter = fw_health->tmr_multiplier;
14203 	return;
14204 
14205 fw_reset:
14206 	bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14207 }
14208 
14209 static void bnxt_timer(struct timer_list *t)
14210 {
14211 	struct bnxt *bp = timer_container_of(bp, t, timer);
14212 	struct net_device *dev = bp->dev;
14213 
14214 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14215 		return;
14216 
14217 	if (atomic_read(&bp->intr_sem) != 0)
14218 		goto bnxt_restart_timer;
14219 
14220 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14221 		bnxt_fw_health_check(bp);
14222 
14223 	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14224 		bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14225 
14226 	if (bnxt_tc_flower_enabled(bp))
14227 		bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14228 
14229 #ifdef CONFIG_RFS_ACCEL
14230 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14231 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14232 #endif /*CONFIG_RFS_ACCEL*/
14233 
14234 	if (bp->link_info.phy_retry) {
14235 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14236 			bp->link_info.phy_retry = false;
14237 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14238 		} else {
14239 			bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14240 		}
14241 	}
14242 
14243 	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14244 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14245 
14246 	if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14247 		bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14248 
14249 bnxt_restart_timer:
14250 	mod_timer(&bp->timer, jiffies + bp->current_interval);
14251 }
14252 
14253 static void bnxt_lock_sp(struct bnxt *bp)
14254 {
14255 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14256 	 * set.  If the device is being closed, bnxt_close() may be holding
14257 	 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14258 	 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14259 	 * instance lock.
14260 	 */
14261 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14262 	netdev_lock(bp->dev);
14263 }
14264 
14265 static void bnxt_unlock_sp(struct bnxt *bp)
14266 {
14267 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14268 	netdev_unlock(bp->dev);
14269 }
14270 
14271 /* Only called from bnxt_sp_task() */
14272 static void bnxt_reset(struct bnxt *bp, bool silent)
14273 {
14274 	bnxt_lock_sp(bp);
14275 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
14276 		bnxt_reset_task(bp, silent);
14277 	bnxt_unlock_sp(bp);
14278 }
14279 
14280 /* Only called from bnxt_sp_task() */
14281 static void bnxt_rx_ring_reset(struct bnxt *bp)
14282 {
14283 	int i;
14284 
14285 	bnxt_lock_sp(bp);
14286 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14287 		bnxt_unlock_sp(bp);
14288 		return;
14289 	}
14290 	/* Disable and flush TPA before resetting the RX ring */
14291 	if (bp->flags & BNXT_FLAG_TPA)
14292 		bnxt_set_tpa(bp, false);
14293 	for (i = 0; i < bp->rx_nr_rings; i++) {
14294 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14295 		struct bnxt_cp_ring_info *cpr;
14296 		int rc;
14297 
14298 		if (!rxr->bnapi->in_reset)
14299 			continue;
14300 
14301 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
14302 		if (rc) {
14303 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
14304 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14305 			else
14306 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14307 					    rc);
14308 			bnxt_reset_task(bp, true);
14309 			break;
14310 		}
14311 		bnxt_free_one_rx_ring_skbs(bp, rxr);
14312 		rxr->rx_prod = 0;
14313 		rxr->rx_agg_prod = 0;
14314 		rxr->rx_sw_agg_prod = 0;
14315 		rxr->rx_next_cons = 0;
14316 		rxr->bnapi->in_reset = false;
14317 		bnxt_alloc_one_rx_ring(bp, i);
14318 		cpr = &rxr->bnapi->cp_ring;
14319 		cpr->sw_stats->rx.rx_resets++;
14320 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
14321 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14322 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14323 	}
14324 	if (bp->flags & BNXT_FLAG_TPA)
14325 		bnxt_set_tpa(bp, true);
14326 	bnxt_unlock_sp(bp);
14327 }
14328 
14329 static void bnxt_fw_fatal_close(struct bnxt *bp)
14330 {
14331 	bnxt_tx_disable(bp);
14332 	bnxt_disable_napi(bp);
14333 	bnxt_disable_int_sync(bp);
14334 	bnxt_free_irq(bp);
14335 	bnxt_clear_int_mode(bp);
14336 	pci_disable_device(bp->pdev);
14337 }
14338 
14339 static void bnxt_fw_reset_close(struct bnxt *bp)
14340 {
14341 	/* When firmware is in fatal state, quiesce device and disable
14342 	 * bus master to prevent any potential bad DMAs before freeing
14343 	 * kernel memory.
14344 	 */
14345 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14346 		u16 val = 0;
14347 
14348 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14349 		if (val == 0xffff)
14350 			bp->fw_reset_min_dsecs = 0;
14351 		bnxt_fw_fatal_close(bp);
14352 	}
14353 	__bnxt_close_nic(bp, true, false);
14354 	bnxt_vf_reps_free(bp);
14355 	bnxt_clear_int_mode(bp);
14356 	bnxt_hwrm_func_drv_unrgtr(bp);
14357 	if (pci_is_enabled(bp->pdev))
14358 		pci_disable_device(bp->pdev);
14359 	bnxt_free_ctx_mem(bp, false);
14360 }
14361 
14362 static bool is_bnxt_fw_ok(struct bnxt *bp)
14363 {
14364 	struct bnxt_fw_health *fw_health = bp->fw_health;
14365 	bool no_heartbeat = false, has_reset = false;
14366 	u32 val;
14367 
14368 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14369 	if (val == fw_health->last_fw_heartbeat)
14370 		no_heartbeat = true;
14371 
14372 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14373 	if (val != fw_health->last_fw_reset_cnt)
14374 		has_reset = true;
14375 
14376 	if (!no_heartbeat && has_reset)
14377 		return true;
14378 
14379 	return false;
14380 }
14381 
14382 /* netdev instance lock is acquired before calling this function */
14383 static void bnxt_force_fw_reset(struct bnxt *bp)
14384 {
14385 	struct bnxt_fw_health *fw_health = bp->fw_health;
14386 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14387 	u32 wait_dsecs;
14388 
14389 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14390 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14391 		return;
14392 
14393 	/* we have to serialize with bnxt_refclk_read()*/
14394 	if (ptp) {
14395 		unsigned long flags;
14396 
14397 		write_seqlock_irqsave(&ptp->ptp_lock, flags);
14398 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14399 		write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14400 	} else {
14401 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14402 	}
14403 	bnxt_fw_reset_close(bp);
14404 	wait_dsecs = fw_health->master_func_wait_dsecs;
14405 	if (fw_health->primary) {
14406 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14407 			wait_dsecs = 0;
14408 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14409 	} else {
14410 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14411 		wait_dsecs = fw_health->normal_func_wait_dsecs;
14412 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14413 	}
14414 
14415 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14416 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14417 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14418 }
14419 
14420 void bnxt_fw_exception(struct bnxt *bp)
14421 {
14422 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14423 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14424 	bnxt_ulp_stop(bp);
14425 	bnxt_lock_sp(bp);
14426 	bnxt_force_fw_reset(bp);
14427 	bnxt_unlock_sp(bp);
14428 }
14429 
14430 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14431  * < 0 on error.
14432  */
14433 static int bnxt_get_registered_vfs(struct bnxt *bp)
14434 {
14435 #ifdef CONFIG_BNXT_SRIOV
14436 	int rc;
14437 
14438 	if (!BNXT_PF(bp))
14439 		return 0;
14440 
14441 	rc = bnxt_hwrm_func_qcfg(bp);
14442 	if (rc) {
14443 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14444 		return rc;
14445 	}
14446 	if (bp->pf.registered_vfs)
14447 		return bp->pf.registered_vfs;
14448 	if (bp->sriov_cfg)
14449 		return 1;
14450 #endif
14451 	return 0;
14452 }
14453 
14454 void bnxt_fw_reset(struct bnxt *bp)
14455 {
14456 	bnxt_ulp_stop(bp);
14457 	bnxt_lock_sp(bp);
14458 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14459 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14460 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14461 		int n = 0, tmo;
14462 
14463 		/* we have to serialize with bnxt_refclk_read()*/
14464 		if (ptp) {
14465 			unsigned long flags;
14466 
14467 			write_seqlock_irqsave(&ptp->ptp_lock, flags);
14468 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14469 			write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14470 		} else {
14471 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14472 		}
14473 		if (bp->pf.active_vfs &&
14474 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14475 			n = bnxt_get_registered_vfs(bp);
14476 		if (n < 0) {
14477 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14478 				   n);
14479 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14480 			netif_close(bp->dev);
14481 			goto fw_reset_exit;
14482 		} else if (n > 0) {
14483 			u16 vf_tmo_dsecs = n * 10;
14484 
14485 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14486 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14487 			bp->fw_reset_state =
14488 				BNXT_FW_RESET_STATE_POLL_VF;
14489 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14490 			goto fw_reset_exit;
14491 		}
14492 		bnxt_fw_reset_close(bp);
14493 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14494 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14495 			tmo = HZ / 10;
14496 		} else {
14497 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14498 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
14499 		}
14500 		bnxt_queue_fw_reset_work(bp, tmo);
14501 	}
14502 fw_reset_exit:
14503 	bnxt_unlock_sp(bp);
14504 }
14505 
14506 static void bnxt_chk_missed_irq(struct bnxt *bp)
14507 {
14508 	int i;
14509 
14510 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14511 		return;
14512 
14513 	for (i = 0; i < bp->cp_nr_rings; i++) {
14514 		struct bnxt_napi *bnapi = bp->bnapi[i];
14515 		struct bnxt_cp_ring_info *cpr;
14516 		u32 fw_ring_id;
14517 		int j;
14518 
14519 		if (!bnapi)
14520 			continue;
14521 
14522 		cpr = &bnapi->cp_ring;
14523 		for (j = 0; j < cpr->cp_ring_count; j++) {
14524 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14525 			u32 val[2];
14526 
14527 			if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14528 				continue;
14529 
14530 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14531 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14532 				continue;
14533 			}
14534 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14535 			bnxt_dbg_hwrm_ring_info_get(bp,
14536 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14537 				fw_ring_id, &val[0], &val[1]);
14538 			cpr->sw_stats->cmn.missed_irqs++;
14539 		}
14540 	}
14541 }
14542 
14543 static void bnxt_cfg_ntp_filters(struct bnxt *);
14544 
14545 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14546 {
14547 	struct bnxt_link_info *link_info = &bp->link_info;
14548 
14549 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14550 		link_info->autoneg = BNXT_AUTONEG_SPEED;
14551 		if (bp->hwrm_spec_code >= 0x10201) {
14552 			if (link_info->auto_pause_setting &
14553 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14554 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14555 		} else {
14556 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14557 		}
14558 		bnxt_set_auto_speed(link_info);
14559 	} else {
14560 		bnxt_set_force_speed(link_info);
14561 		link_info->req_duplex = link_info->duplex_setting;
14562 	}
14563 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14564 		link_info->req_flow_ctrl =
14565 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14566 	else
14567 		link_info->req_flow_ctrl = link_info->force_pause_setting;
14568 }
14569 
14570 static void bnxt_fw_echo_reply(struct bnxt *bp)
14571 {
14572 	struct bnxt_fw_health *fw_health = bp->fw_health;
14573 	struct hwrm_func_echo_response_input *req;
14574 	int rc;
14575 
14576 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14577 	if (rc)
14578 		return;
14579 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14580 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14581 	hwrm_req_send(bp, req);
14582 }
14583 
14584 static void bnxt_ulp_restart(struct bnxt *bp)
14585 {
14586 	bnxt_ulp_stop(bp);
14587 	bnxt_ulp_start(bp, 0);
14588 }
14589 
14590 static void bnxt_sp_task(struct work_struct *work)
14591 {
14592 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14593 
14594 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14595 	smp_mb__after_atomic();
14596 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14597 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14598 		return;
14599 	}
14600 
14601 	if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14602 		bnxt_ulp_restart(bp);
14603 		bnxt_reenable_sriov(bp);
14604 	}
14605 
14606 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14607 		bnxt_cfg_rx_mode(bp);
14608 
14609 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14610 		bnxt_cfg_ntp_filters(bp);
14611 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14612 		bnxt_hwrm_exec_fwd_req(bp);
14613 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14614 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
14615 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14616 		bnxt_hwrm_port_qstats(bp, 0);
14617 		bnxt_hwrm_port_qstats_ext(bp, 0);
14618 		bnxt_accumulate_all_stats(bp);
14619 	}
14620 
14621 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14622 		int rc;
14623 
14624 		mutex_lock(&bp->link_lock);
14625 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14626 				       &bp->sp_event))
14627 			bnxt_hwrm_phy_qcaps(bp);
14628 
14629 		rc = bnxt_update_link(bp, true);
14630 		if (rc)
14631 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14632 				   rc);
14633 
14634 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14635 				       &bp->sp_event))
14636 			bnxt_init_ethtool_link_settings(bp);
14637 		mutex_unlock(&bp->link_lock);
14638 	}
14639 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14640 		int rc;
14641 
14642 		mutex_lock(&bp->link_lock);
14643 		rc = bnxt_update_phy_setting(bp);
14644 		mutex_unlock(&bp->link_lock);
14645 		if (rc) {
14646 			netdev_warn(bp->dev, "update phy settings retry failed\n");
14647 		} else {
14648 			bp->link_info.phy_retry = false;
14649 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
14650 		}
14651 	}
14652 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14653 		mutex_lock(&bp->link_lock);
14654 		bnxt_get_port_module_status(bp);
14655 		mutex_unlock(&bp->link_lock);
14656 	}
14657 
14658 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14659 		bnxt_tc_flow_stats_work(bp);
14660 
14661 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14662 		bnxt_chk_missed_irq(bp);
14663 
14664 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14665 		bnxt_fw_echo_reply(bp);
14666 
14667 	if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14668 		bnxt_hwmon_notify_event(bp);
14669 
14670 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
14671 	 * must be the last functions to be called before exiting.
14672 	 */
14673 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14674 		bnxt_reset(bp, false);
14675 
14676 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14677 		bnxt_reset(bp, true);
14678 
14679 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14680 		bnxt_rx_ring_reset(bp);
14681 
14682 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14683 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14684 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14685 			bnxt_devlink_health_fw_report(bp);
14686 		else
14687 			bnxt_fw_reset(bp);
14688 	}
14689 
14690 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14691 		if (!is_bnxt_fw_ok(bp))
14692 			bnxt_devlink_health_fw_report(bp);
14693 	}
14694 
14695 	smp_mb__before_atomic();
14696 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14697 }
14698 
14699 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14700 				int *max_cp);
14701 
14702 /* Under netdev instance lock */
14703 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14704 		     int tx_xdp)
14705 {
14706 	int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14707 	struct bnxt_hw_rings hwr = {0};
14708 	int rx_rings = rx;
14709 	int rc;
14710 
14711 	if (tcs)
14712 		tx_sets = tcs;
14713 
14714 	_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14715 
14716 	if (max_rx < rx_rings)
14717 		return -ENOMEM;
14718 
14719 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
14720 		rx_rings <<= 1;
14721 
14722 	hwr.rx = rx_rings;
14723 	hwr.tx = tx * tx_sets + tx_xdp;
14724 	if (max_tx < hwr.tx)
14725 		return -ENOMEM;
14726 
14727 	hwr.vnic = bnxt_get_total_vnics(bp, rx);
14728 
14729 	tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14730 	hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14731 	if (max_cp < hwr.cp)
14732 		return -ENOMEM;
14733 	hwr.stat = hwr.cp;
14734 	if (BNXT_NEW_RM(bp)) {
14735 		hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14736 		hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14737 		hwr.grp = rx;
14738 		hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14739 	}
14740 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14741 		hwr.cp_p5 = hwr.tx + rx;
14742 	rc = bnxt_hwrm_check_rings(bp, &hwr);
14743 	if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14744 		if (!bnxt_ulp_registered(bp->edev)) {
14745 			hwr.cp += bnxt_get_ulp_msix_num(bp);
14746 			hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14747 		}
14748 		if (hwr.cp > bp->total_irqs) {
14749 			int total_msix = bnxt_change_msix(bp, hwr.cp);
14750 
14751 			if (total_msix < hwr.cp) {
14752 				netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14753 					    hwr.cp, total_msix);
14754 				rc = -ENOSPC;
14755 			}
14756 		}
14757 	}
14758 	return rc;
14759 }
14760 
14761 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14762 {
14763 	if (bp->bar2) {
14764 		pci_iounmap(pdev, bp->bar2);
14765 		bp->bar2 = NULL;
14766 	}
14767 
14768 	if (bp->bar1) {
14769 		pci_iounmap(pdev, bp->bar1);
14770 		bp->bar1 = NULL;
14771 	}
14772 
14773 	if (bp->bar0) {
14774 		pci_iounmap(pdev, bp->bar0);
14775 		bp->bar0 = NULL;
14776 	}
14777 }
14778 
14779 static void bnxt_cleanup_pci(struct bnxt *bp)
14780 {
14781 	bnxt_unmap_bars(bp, bp->pdev);
14782 	pci_release_regions(bp->pdev);
14783 	if (pci_is_enabled(bp->pdev))
14784 		pci_disable_device(bp->pdev);
14785 }
14786 
14787 static void bnxt_init_dflt_coal(struct bnxt *bp)
14788 {
14789 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14790 	struct bnxt_coal *coal;
14791 	u16 flags = 0;
14792 
14793 	if (coal_cap->cmpl_params &
14794 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14795 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14796 
14797 	/* Tick values in micro seconds.
14798 	 * 1 coal_buf x bufs_per_record = 1 completion record.
14799 	 */
14800 	coal = &bp->rx_coal;
14801 	coal->coal_ticks = 10;
14802 	coal->coal_bufs = 30;
14803 	coal->coal_ticks_irq = 1;
14804 	coal->coal_bufs_irq = 2;
14805 	coal->idle_thresh = 50;
14806 	coal->bufs_per_record = 2;
14807 	coal->budget = 64;		/* NAPI budget */
14808 	coal->flags = flags;
14809 
14810 	coal = &bp->tx_coal;
14811 	coal->coal_ticks = 28;
14812 	coal->coal_bufs = 30;
14813 	coal->coal_ticks_irq = 2;
14814 	coal->coal_bufs_irq = 2;
14815 	coal->bufs_per_record = 1;
14816 	coal->flags = flags;
14817 
14818 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14819 }
14820 
14821 /* FW that pre-reserves 1 VNIC per function */
14822 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14823 {
14824 	u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14825 
14826 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14827 	    (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14828 		return true;
14829 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14830 	    (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14831 		return true;
14832 	return false;
14833 }
14834 
14835 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14836 {
14837 	struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14838 	struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14839 	int rc;
14840 
14841 	bp->max_pfcwd_tmo_ms = 0;
14842 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14843 	if (rc)
14844 		return;
14845 	resp = hwrm_req_hold(bp, req);
14846 	rc = hwrm_req_send_silent(bp, req);
14847 	if (!rc)
14848 		bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14849 	hwrm_req_drop(bp, req);
14850 }
14851 
14852 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14853 {
14854 	int rc;
14855 
14856 	bp->fw_cap = 0;
14857 	rc = bnxt_hwrm_ver_get(bp);
14858 	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
14859 	 * so wait before continuing with recovery.
14860 	 */
14861 	if (rc)
14862 		msleep(100);
14863 	bnxt_try_map_fw_health_reg(bp);
14864 	if (rc) {
14865 		rc = bnxt_try_recover_fw(bp);
14866 		if (rc)
14867 			return rc;
14868 		rc = bnxt_hwrm_ver_get(bp);
14869 		if (rc)
14870 			return rc;
14871 	}
14872 
14873 	bnxt_nvm_cfg_ver_get(bp);
14874 
14875 	rc = bnxt_hwrm_func_reset(bp);
14876 	if (rc)
14877 		return -ENODEV;
14878 
14879 	bnxt_hwrm_fw_set_time(bp);
14880 	return 0;
14881 }
14882 
14883 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14884 {
14885 	int rc;
14886 
14887 	/* Get the MAX capabilities for this function */
14888 	rc = bnxt_hwrm_func_qcaps(bp);
14889 	if (rc) {
14890 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14891 			   rc);
14892 		return -ENODEV;
14893 	}
14894 
14895 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14896 	if (rc)
14897 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14898 			    rc);
14899 
14900 	if (bnxt_alloc_fw_health(bp)) {
14901 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14902 	} else {
14903 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
14904 		if (rc)
14905 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14906 				    rc);
14907 	}
14908 
14909 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14910 	if (rc)
14911 		return -ENODEV;
14912 
14913 	rc = bnxt_alloc_crash_dump_mem(bp);
14914 	if (rc)
14915 		netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14916 			    rc);
14917 	if (!rc) {
14918 		rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14919 		if (rc) {
14920 			bnxt_free_crash_dump_mem(bp);
14921 			netdev_warn(bp->dev,
14922 				    "hwrm crash dump mem failure rc: %d\n", rc);
14923 		}
14924 	}
14925 
14926 	if (bnxt_fw_pre_resv_vnics(bp))
14927 		bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14928 
14929 	bnxt_hwrm_pfcwd_qcaps(bp);
14930 	bnxt_hwrm_func_qcfg(bp);
14931 	bnxt_hwrm_vnic_qcaps(bp);
14932 	bnxt_hwrm_port_led_qcaps(bp);
14933 	bnxt_ethtool_init(bp);
14934 	if (bp->fw_cap & BNXT_FW_CAP_PTP)
14935 		__bnxt_hwrm_ptp_qcfg(bp);
14936 	bnxt_dcb_init(bp);
14937 	bnxt_hwmon_init(bp);
14938 	return 0;
14939 }
14940 
14941 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14942 {
14943 	bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14944 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14945 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14946 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14947 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14948 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14949 		bp->rss_hash_delta = bp->rss_hash_cfg;
14950 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14951 		bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14952 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14953 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14954 	}
14955 }
14956 
14957 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14958 {
14959 	struct net_device *dev = bp->dev;
14960 
14961 	dev->hw_features &= ~NETIF_F_NTUPLE;
14962 	dev->features &= ~NETIF_F_NTUPLE;
14963 	bp->flags &= ~BNXT_FLAG_RFS;
14964 	if (bnxt_rfs_supported(bp)) {
14965 		dev->hw_features |= NETIF_F_NTUPLE;
14966 		if (bnxt_rfs_capable(bp, false)) {
14967 			bp->flags |= BNXT_FLAG_RFS;
14968 			dev->features |= NETIF_F_NTUPLE;
14969 		}
14970 	}
14971 }
14972 
14973 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14974 {
14975 	struct pci_dev *pdev = bp->pdev;
14976 
14977 	bnxt_set_dflt_rss_hash_type(bp);
14978 	bnxt_set_dflt_rfs(bp);
14979 
14980 	bnxt_get_wol_settings(bp);
14981 	if (bp->flags & BNXT_FLAG_WOL_CAP)
14982 		device_set_wakeup_enable(&pdev->dev, bp->wol);
14983 	else
14984 		device_set_wakeup_capable(&pdev->dev, false);
14985 
14986 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14987 	bnxt_hwrm_coal_params_qcaps(bp);
14988 }
14989 
14990 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14991 
14992 int bnxt_fw_init_one(struct bnxt *bp)
14993 {
14994 	int rc;
14995 
14996 	rc = bnxt_fw_init_one_p1(bp);
14997 	if (rc) {
14998 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14999 		return rc;
15000 	}
15001 	rc = bnxt_fw_init_one_p2(bp);
15002 	if (rc) {
15003 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15004 		return rc;
15005 	}
15006 	rc = bnxt_probe_phy(bp, false);
15007 	if (rc)
15008 		return rc;
15009 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15010 	if (rc)
15011 		return rc;
15012 
15013 	bnxt_fw_init_one_p3(bp);
15014 	return 0;
15015 }
15016 
15017 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15018 {
15019 	struct bnxt_fw_health *fw_health = bp->fw_health;
15020 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15021 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15022 	u32 reg_type, reg_off, delay_msecs;
15023 
15024 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15025 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15026 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15027 	switch (reg_type) {
15028 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
15029 		pci_write_config_dword(bp->pdev, reg_off, val);
15030 		break;
15031 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
15032 		writel(reg_off & BNXT_GRC_BASE_MASK,
15033 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15034 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15035 		fallthrough;
15036 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15037 		writel(val, bp->bar0 + reg_off);
15038 		break;
15039 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15040 		writel(val, bp->bar1 + reg_off);
15041 		break;
15042 	}
15043 	if (delay_msecs) {
15044 		pci_read_config_dword(bp->pdev, 0, &val);
15045 		msleep(delay_msecs);
15046 	}
15047 }
15048 
15049 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15050 {
15051 	struct hwrm_func_qcfg_output *resp;
15052 	struct hwrm_func_qcfg_input *req;
15053 	bool result = true; /* firmware will enforce if unknown */
15054 
15055 	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15056 		return result;
15057 
15058 	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15059 		return result;
15060 
15061 	req->fid = cpu_to_le16(0xffff);
15062 	resp = hwrm_req_hold(bp, req);
15063 	if (!hwrm_req_send(bp, req))
15064 		result = !!(le16_to_cpu(resp->flags) &
15065 			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15066 	hwrm_req_drop(bp, req);
15067 	return result;
15068 }
15069 
15070 static void bnxt_reset_all(struct bnxt *bp)
15071 {
15072 	struct bnxt_fw_health *fw_health = bp->fw_health;
15073 	int i, rc;
15074 
15075 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15076 		bnxt_fw_reset_via_optee(bp);
15077 		bp->fw_reset_timestamp = jiffies;
15078 		return;
15079 	}
15080 
15081 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15082 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15083 			bnxt_fw_reset_writel(bp, i);
15084 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15085 		struct hwrm_fw_reset_input *req;
15086 
15087 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15088 		if (!rc) {
15089 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15090 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15091 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15092 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15093 			rc = hwrm_req_send(bp, req);
15094 		}
15095 		if (rc != -ENODEV)
15096 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15097 	}
15098 	bp->fw_reset_timestamp = jiffies;
15099 }
15100 
15101 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15102 {
15103 	return time_after(jiffies, bp->fw_reset_timestamp +
15104 			  (bp->fw_reset_max_dsecs * HZ / 10));
15105 }
15106 
15107 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15108 {
15109 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15110 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15111 		bnxt_dl_health_fw_status_update(bp, false);
15112 	bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15113 	netif_close(bp->dev);
15114 }
15115 
15116 static void bnxt_fw_reset_task(struct work_struct *work)
15117 {
15118 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15119 	int rc = 0;
15120 
15121 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15122 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15123 		return;
15124 	}
15125 
15126 	switch (bp->fw_reset_state) {
15127 	case BNXT_FW_RESET_STATE_POLL_VF: {
15128 		int n = bnxt_get_registered_vfs(bp);
15129 		int tmo;
15130 
15131 		if (n < 0) {
15132 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15133 				   n, jiffies_to_msecs(jiffies -
15134 				   bp->fw_reset_timestamp));
15135 			goto fw_reset_abort;
15136 		} else if (n > 0) {
15137 			if (bnxt_fw_reset_timeout(bp)) {
15138 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15139 				bp->fw_reset_state = 0;
15140 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15141 					   n);
15142 				goto ulp_start;
15143 			}
15144 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15145 			return;
15146 		}
15147 		bp->fw_reset_timestamp = jiffies;
15148 		netdev_lock(bp->dev);
15149 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15150 			bnxt_fw_reset_abort(bp, rc);
15151 			netdev_unlock(bp->dev);
15152 			goto ulp_start;
15153 		}
15154 		bnxt_fw_reset_close(bp);
15155 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15156 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15157 			tmo = HZ / 10;
15158 		} else {
15159 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15160 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
15161 		}
15162 		netdev_unlock(bp->dev);
15163 		bnxt_queue_fw_reset_work(bp, tmo);
15164 		return;
15165 	}
15166 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15167 		u32 val;
15168 
15169 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15170 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15171 		    !bnxt_fw_reset_timeout(bp)) {
15172 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15173 			return;
15174 		}
15175 
15176 		if (!bp->fw_health->primary) {
15177 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15178 
15179 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15180 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15181 			return;
15182 		}
15183 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15184 	}
15185 		fallthrough;
15186 	case BNXT_FW_RESET_STATE_RESET_FW:
15187 		bnxt_reset_all(bp);
15188 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15189 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15190 		return;
15191 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
15192 		bnxt_inv_fw_health_reg(bp);
15193 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15194 		    !bp->fw_reset_min_dsecs) {
15195 			u16 val;
15196 
15197 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15198 			if (val == 0xffff) {
15199 				if (bnxt_fw_reset_timeout(bp)) {
15200 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15201 					rc = -ETIMEDOUT;
15202 					goto fw_reset_abort;
15203 				}
15204 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
15205 				return;
15206 			}
15207 		}
15208 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15209 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15210 		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15211 		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15212 			bnxt_dl_remote_reload(bp);
15213 		if (pci_enable_device(bp->pdev)) {
15214 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15215 			rc = -ENODEV;
15216 			goto fw_reset_abort;
15217 		}
15218 		pci_set_master(bp->pdev);
15219 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15220 		fallthrough;
15221 	case BNXT_FW_RESET_STATE_POLL_FW:
15222 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15223 		rc = bnxt_hwrm_poll(bp);
15224 		if (rc) {
15225 			if (bnxt_fw_reset_timeout(bp)) {
15226 				netdev_err(bp->dev, "Firmware reset aborted\n");
15227 				goto fw_reset_abort_status;
15228 			}
15229 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15230 			return;
15231 		}
15232 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15233 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15234 		fallthrough;
15235 	case BNXT_FW_RESET_STATE_OPENING:
15236 		while (!netdev_trylock(bp->dev)) {
15237 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15238 			return;
15239 		}
15240 		rc = bnxt_open(bp->dev);
15241 		if (rc) {
15242 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15243 			bnxt_fw_reset_abort(bp, rc);
15244 			netdev_unlock(bp->dev);
15245 			goto ulp_start;
15246 		}
15247 
15248 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15249 		    bp->fw_health->enabled) {
15250 			bp->fw_health->last_fw_reset_cnt =
15251 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15252 		}
15253 		bp->fw_reset_state = 0;
15254 		/* Make sure fw_reset_state is 0 before clearing the flag */
15255 		smp_mb__before_atomic();
15256 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15257 		bnxt_ptp_reapply_pps(bp);
15258 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15259 		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15260 			bnxt_dl_health_fw_recovery_done(bp);
15261 			bnxt_dl_health_fw_status_update(bp, true);
15262 		}
15263 		netdev_unlock(bp->dev);
15264 		bnxt_ulp_start(bp, 0);
15265 		bnxt_reenable_sriov(bp);
15266 		netdev_lock(bp->dev);
15267 		bnxt_vf_reps_alloc(bp);
15268 		bnxt_vf_reps_open(bp);
15269 		netdev_unlock(bp->dev);
15270 		break;
15271 	}
15272 	return;
15273 
15274 fw_reset_abort_status:
15275 	if (bp->fw_health->status_reliable ||
15276 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15277 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15278 
15279 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15280 	}
15281 fw_reset_abort:
15282 	netdev_lock(bp->dev);
15283 	bnxt_fw_reset_abort(bp, rc);
15284 	netdev_unlock(bp->dev);
15285 ulp_start:
15286 	bnxt_ulp_start(bp, rc);
15287 }
15288 
15289 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15290 {
15291 	int rc;
15292 	struct bnxt *bp = netdev_priv(dev);
15293 
15294 	SET_NETDEV_DEV(dev, &pdev->dev);
15295 
15296 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
15297 	rc = pci_enable_device(pdev);
15298 	if (rc) {
15299 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15300 		goto init_err;
15301 	}
15302 
15303 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15304 		dev_err(&pdev->dev,
15305 			"Cannot find PCI device base address, aborting\n");
15306 		rc = -ENODEV;
15307 		goto init_err_disable;
15308 	}
15309 
15310 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15311 	if (rc) {
15312 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15313 		goto init_err_disable;
15314 	}
15315 
15316 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15317 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15318 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15319 		rc = -EIO;
15320 		goto init_err_release;
15321 	}
15322 
15323 	pci_set_master(pdev);
15324 
15325 	bp->dev = dev;
15326 	bp->pdev = pdev;
15327 
15328 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15329 	 * determines the BAR size.
15330 	 */
15331 	bp->bar0 = pci_ioremap_bar(pdev, 0);
15332 	if (!bp->bar0) {
15333 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15334 		rc = -ENOMEM;
15335 		goto init_err_release;
15336 	}
15337 
15338 	bp->bar2 = pci_ioremap_bar(pdev, 4);
15339 	if (!bp->bar2) {
15340 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15341 		rc = -ENOMEM;
15342 		goto init_err_release;
15343 	}
15344 
15345 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
15346 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15347 
15348 	spin_lock_init(&bp->ntp_fltr_lock);
15349 #if BITS_PER_LONG == 32
15350 	spin_lock_init(&bp->db_lock);
15351 #endif
15352 
15353 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15354 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15355 
15356 	timer_setup(&bp->timer, bnxt_timer, 0);
15357 	bp->current_interval = BNXT_TIMER_INTERVAL;
15358 
15359 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15360 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15361 
15362 	clear_bit(BNXT_STATE_OPEN, &bp->state);
15363 	return 0;
15364 
15365 init_err_release:
15366 	bnxt_unmap_bars(bp, pdev);
15367 	pci_release_regions(pdev);
15368 
15369 init_err_disable:
15370 	pci_disable_device(pdev);
15371 
15372 init_err:
15373 	return rc;
15374 }
15375 
15376 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15377 {
15378 	struct sockaddr *addr = p;
15379 	struct bnxt *bp = netdev_priv(dev);
15380 	int rc = 0;
15381 
15382 	netdev_assert_locked(dev);
15383 
15384 	if (!is_valid_ether_addr(addr->sa_data))
15385 		return -EADDRNOTAVAIL;
15386 
15387 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15388 		return 0;
15389 
15390 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
15391 	if (rc)
15392 		return rc;
15393 
15394 	eth_hw_addr_set(dev, addr->sa_data);
15395 	bnxt_clear_usr_fltrs(bp, true);
15396 	if (netif_running(dev)) {
15397 		bnxt_close_nic(bp, false, false);
15398 		rc = bnxt_open_nic(bp, false, false);
15399 	}
15400 
15401 	return rc;
15402 }
15403 
15404 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15405 {
15406 	struct bnxt *bp = netdev_priv(dev);
15407 
15408 	netdev_assert_locked(dev);
15409 
15410 	if (netif_running(dev))
15411 		bnxt_close_nic(bp, true, false);
15412 
15413 	WRITE_ONCE(dev->mtu, new_mtu);
15414 
15415 	/* MTU change may change the AGG ring settings if an XDP multi-buffer
15416 	 * program is attached.  We need to set the AGG rings settings and
15417 	 * rx_skb_func accordingly.
15418 	 */
15419 	if (READ_ONCE(bp->xdp_prog))
15420 		bnxt_set_rx_skb_mode(bp, true);
15421 
15422 	bnxt_set_ring_params(bp);
15423 
15424 	if (netif_running(dev))
15425 		return bnxt_open_nic(bp, true, false);
15426 
15427 	return 0;
15428 }
15429 
15430 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15431 {
15432 	struct bnxt *bp = netdev_priv(dev);
15433 	bool sh = false;
15434 	int rc, tx_cp;
15435 
15436 	if (tc > bp->max_tc) {
15437 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15438 			   tc, bp->max_tc);
15439 		return -EINVAL;
15440 	}
15441 
15442 	if (bp->num_tc == tc)
15443 		return 0;
15444 
15445 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15446 		sh = true;
15447 
15448 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15449 			      sh, tc, bp->tx_nr_rings_xdp);
15450 	if (rc)
15451 		return rc;
15452 
15453 	/* Needs to close the device and do hw resource re-allocations */
15454 	if (netif_running(bp->dev))
15455 		bnxt_close_nic(bp, true, false);
15456 
15457 	if (tc) {
15458 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15459 		netdev_set_num_tc(dev, tc);
15460 		bp->num_tc = tc;
15461 	} else {
15462 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15463 		netdev_reset_tc(dev);
15464 		bp->num_tc = 0;
15465 	}
15466 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15467 	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15468 	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15469 			       tx_cp + bp->rx_nr_rings;
15470 
15471 	if (netif_running(bp->dev))
15472 		return bnxt_open_nic(bp, true, false);
15473 
15474 	return 0;
15475 }
15476 
15477 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15478 				  void *cb_priv)
15479 {
15480 	struct bnxt *bp = cb_priv;
15481 
15482 	if (!bnxt_tc_flower_enabled(bp) ||
15483 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15484 		return -EOPNOTSUPP;
15485 
15486 	switch (type) {
15487 	case TC_SETUP_CLSFLOWER:
15488 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15489 	default:
15490 		return -EOPNOTSUPP;
15491 	}
15492 }
15493 
15494 LIST_HEAD(bnxt_block_cb_list);
15495 
15496 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15497 			 void *type_data)
15498 {
15499 	struct bnxt *bp = netdev_priv(dev);
15500 
15501 	switch (type) {
15502 	case TC_SETUP_BLOCK:
15503 		return flow_block_cb_setup_simple(type_data,
15504 						  &bnxt_block_cb_list,
15505 						  bnxt_setup_tc_block_cb,
15506 						  bp, bp, true);
15507 	case TC_SETUP_QDISC_MQPRIO: {
15508 		struct tc_mqprio_qopt *mqprio = type_data;
15509 
15510 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15511 
15512 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15513 	}
15514 	default:
15515 		return -EOPNOTSUPP;
15516 	}
15517 }
15518 
15519 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15520 			    const struct sk_buff *skb)
15521 {
15522 	struct bnxt_vnic_info *vnic;
15523 
15524 	if (skb)
15525 		return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15526 
15527 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15528 	return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15529 }
15530 
15531 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15532 			   u32 idx)
15533 {
15534 	struct hlist_head *head;
15535 	int bit_id;
15536 
15537 	spin_lock_bh(&bp->ntp_fltr_lock);
15538 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15539 	if (bit_id < 0) {
15540 		spin_unlock_bh(&bp->ntp_fltr_lock);
15541 		return -ENOMEM;
15542 	}
15543 
15544 	fltr->base.sw_id = (u16)bit_id;
15545 	fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15546 	fltr->base.flags |= BNXT_ACT_RING_DST;
15547 	head = &bp->ntp_fltr_hash_tbl[idx];
15548 	hlist_add_head_rcu(&fltr->base.hash, head);
15549 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15550 	bnxt_insert_usr_fltr(bp, &fltr->base);
15551 	bp->ntp_fltr_count++;
15552 	spin_unlock_bh(&bp->ntp_fltr_lock);
15553 	return 0;
15554 }
15555 
15556 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15557 			    struct bnxt_ntuple_filter *f2)
15558 {
15559 	struct bnxt_flow_masks *masks1 = &f1->fmasks;
15560 	struct bnxt_flow_masks *masks2 = &f2->fmasks;
15561 	struct flow_keys *keys1 = &f1->fkeys;
15562 	struct flow_keys *keys2 = &f2->fkeys;
15563 
15564 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
15565 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
15566 		return false;
15567 
15568 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15569 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15570 		    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15571 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15572 		    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15573 			return false;
15574 	} else {
15575 		if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15576 				     &keys2->addrs.v6addrs.src) ||
15577 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15578 				     &masks2->addrs.v6addrs.src) ||
15579 		    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15580 				     &keys2->addrs.v6addrs.dst) ||
15581 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15582 				     &masks2->addrs.v6addrs.dst))
15583 			return false;
15584 	}
15585 
15586 	return keys1->ports.src == keys2->ports.src &&
15587 	       masks1->ports.src == masks2->ports.src &&
15588 	       keys1->ports.dst == keys2->ports.dst &&
15589 	       masks1->ports.dst == masks2->ports.dst &&
15590 	       keys1->control.flags == keys2->control.flags &&
15591 	       f1->l2_fltr == f2->l2_fltr;
15592 }
15593 
15594 struct bnxt_ntuple_filter *
15595 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15596 				struct bnxt_ntuple_filter *fltr, u32 idx)
15597 {
15598 	struct bnxt_ntuple_filter *f;
15599 	struct hlist_head *head;
15600 
15601 	head = &bp->ntp_fltr_hash_tbl[idx];
15602 	hlist_for_each_entry_rcu(f, head, base.hash) {
15603 		if (bnxt_fltr_match(f, fltr))
15604 			return f;
15605 	}
15606 	return NULL;
15607 }
15608 
15609 #ifdef CONFIG_RFS_ACCEL
15610 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15611 			      u16 rxq_index, u32 flow_id)
15612 {
15613 	struct bnxt *bp = netdev_priv(dev);
15614 	struct bnxt_ntuple_filter *fltr, *new_fltr;
15615 	struct flow_keys *fkeys;
15616 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15617 	struct bnxt_l2_filter *l2_fltr;
15618 	int rc = 0, idx;
15619 	u32 flags;
15620 
15621 	if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15622 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15623 		atomic_inc(&l2_fltr->refcnt);
15624 	} else {
15625 		struct bnxt_l2_key key;
15626 
15627 		ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15628 		key.vlan = 0;
15629 		l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15630 		if (!l2_fltr)
15631 			return -EINVAL;
15632 		if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15633 			bnxt_del_l2_filter(bp, l2_fltr);
15634 			return -EINVAL;
15635 		}
15636 	}
15637 	new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15638 	if (!new_fltr) {
15639 		bnxt_del_l2_filter(bp, l2_fltr);
15640 		return -ENOMEM;
15641 	}
15642 
15643 	fkeys = &new_fltr->fkeys;
15644 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15645 		rc = -EPROTONOSUPPORT;
15646 		goto err_free;
15647 	}
15648 
15649 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15650 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15651 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15652 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15653 		rc = -EPROTONOSUPPORT;
15654 		goto err_free;
15655 	}
15656 	new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15657 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15658 		if (bp->hwrm_spec_code < 0x10601) {
15659 			rc = -EPROTONOSUPPORT;
15660 			goto err_free;
15661 		}
15662 		new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15663 	}
15664 	flags = fkeys->control.flags;
15665 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
15666 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15667 		rc = -EPROTONOSUPPORT;
15668 		goto err_free;
15669 	}
15670 	new_fltr->l2_fltr = l2_fltr;
15671 
15672 	idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15673 	rcu_read_lock();
15674 	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15675 	if (fltr) {
15676 		rc = fltr->base.sw_id;
15677 		rcu_read_unlock();
15678 		goto err_free;
15679 	}
15680 	rcu_read_unlock();
15681 
15682 	new_fltr->flow_id = flow_id;
15683 	new_fltr->base.rxq = rxq_index;
15684 	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15685 	if (!rc) {
15686 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15687 		return new_fltr->base.sw_id;
15688 	}
15689 
15690 err_free:
15691 	bnxt_del_l2_filter(bp, l2_fltr);
15692 	kfree(new_fltr);
15693 	return rc;
15694 }
15695 #endif
15696 
15697 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15698 {
15699 	spin_lock_bh(&bp->ntp_fltr_lock);
15700 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15701 		spin_unlock_bh(&bp->ntp_fltr_lock);
15702 		return;
15703 	}
15704 	hlist_del_rcu(&fltr->base.hash);
15705 	bnxt_del_one_usr_fltr(bp, &fltr->base);
15706 	bp->ntp_fltr_count--;
15707 	spin_unlock_bh(&bp->ntp_fltr_lock);
15708 	bnxt_del_l2_filter(bp, fltr->l2_fltr);
15709 	clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15710 	kfree_rcu(fltr, base.rcu);
15711 }
15712 
15713 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15714 {
15715 #ifdef CONFIG_RFS_ACCEL
15716 	int i;
15717 
15718 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15719 		struct hlist_head *head;
15720 		struct hlist_node *tmp;
15721 		struct bnxt_ntuple_filter *fltr;
15722 		int rc;
15723 
15724 		head = &bp->ntp_fltr_hash_tbl[i];
15725 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15726 			bool del = false;
15727 
15728 			if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15729 				if (fltr->base.flags & BNXT_ACT_NO_AGING)
15730 					continue;
15731 				if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15732 							fltr->flow_id,
15733 							fltr->base.sw_id)) {
15734 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
15735 									 fltr);
15736 					del = true;
15737 				}
15738 			} else {
15739 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15740 								       fltr);
15741 				if (rc)
15742 					del = true;
15743 				else
15744 					set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15745 			}
15746 
15747 			if (del)
15748 				bnxt_del_ntp_filter(bp, fltr);
15749 		}
15750 	}
15751 #endif
15752 }
15753 
15754 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15755 				    unsigned int entry, struct udp_tunnel_info *ti)
15756 {
15757 	struct bnxt *bp = netdev_priv(netdev);
15758 	unsigned int cmd;
15759 
15760 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15761 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15762 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15763 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15764 	else
15765 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15766 
15767 	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15768 }
15769 
15770 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15771 				      unsigned int entry, struct udp_tunnel_info *ti)
15772 {
15773 	struct bnxt *bp = netdev_priv(netdev);
15774 	unsigned int cmd;
15775 
15776 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15777 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15778 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15779 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15780 	else
15781 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15782 
15783 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15784 }
15785 
15786 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15787 	.set_port	= bnxt_udp_tunnel_set_port,
15788 	.unset_port	= bnxt_udp_tunnel_unset_port,
15789 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15790 	.tables		= {
15791 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15792 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15793 	},
15794 }, bnxt_udp_tunnels_p7 = {
15795 	.set_port	= bnxt_udp_tunnel_set_port,
15796 	.unset_port	= bnxt_udp_tunnel_unset_port,
15797 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15798 	.tables		= {
15799 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15800 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15801 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15802 	},
15803 };
15804 
15805 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15806 			       struct net_device *dev, u32 filter_mask,
15807 			       int nlflags)
15808 {
15809 	struct bnxt *bp = netdev_priv(dev);
15810 
15811 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15812 				       nlflags, filter_mask, NULL);
15813 }
15814 
15815 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15816 			       u16 flags, struct netlink_ext_ack *extack)
15817 {
15818 	struct bnxt *bp = netdev_priv(dev);
15819 	struct nlattr *attr, *br_spec;
15820 	int rem, rc = 0;
15821 
15822 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15823 		return -EOPNOTSUPP;
15824 
15825 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15826 	if (!br_spec)
15827 		return -EINVAL;
15828 
15829 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15830 		u16 mode;
15831 
15832 		mode = nla_get_u16(attr);
15833 		if (mode == bp->br_mode)
15834 			break;
15835 
15836 		rc = bnxt_hwrm_set_br_mode(bp, mode);
15837 		if (!rc)
15838 			bp->br_mode = mode;
15839 		break;
15840 	}
15841 	return rc;
15842 }
15843 
15844 int bnxt_get_port_parent_id(struct net_device *dev,
15845 			    struct netdev_phys_item_id *ppid)
15846 {
15847 	struct bnxt *bp = netdev_priv(dev);
15848 
15849 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15850 		return -EOPNOTSUPP;
15851 
15852 	/* The PF and it's VF-reps only support the switchdev framework */
15853 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15854 		return -EOPNOTSUPP;
15855 
15856 	ppid->id_len = sizeof(bp->dsn);
15857 	memcpy(ppid->id, bp->dsn, ppid->id_len);
15858 
15859 	return 0;
15860 }
15861 
15862 static const struct net_device_ops bnxt_netdev_ops = {
15863 	.ndo_open		= bnxt_open,
15864 	.ndo_start_xmit		= bnxt_start_xmit,
15865 	.ndo_stop		= bnxt_close,
15866 	.ndo_get_stats64	= bnxt_get_stats64,
15867 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
15868 	.ndo_eth_ioctl		= bnxt_ioctl,
15869 	.ndo_validate_addr	= eth_validate_addr,
15870 	.ndo_set_mac_address	= bnxt_change_mac_addr,
15871 	.ndo_change_mtu		= bnxt_change_mtu,
15872 	.ndo_fix_features	= bnxt_fix_features,
15873 	.ndo_set_features	= bnxt_set_features,
15874 	.ndo_features_check	= bnxt_features_check,
15875 	.ndo_tx_timeout		= bnxt_tx_timeout,
15876 #ifdef CONFIG_BNXT_SRIOV
15877 	.ndo_get_vf_config	= bnxt_get_vf_config,
15878 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
15879 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
15880 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
15881 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
15882 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
15883 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
15884 #endif
15885 	.ndo_setup_tc           = bnxt_setup_tc,
15886 #ifdef CONFIG_RFS_ACCEL
15887 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
15888 #endif
15889 	.ndo_bpf		= bnxt_xdp,
15890 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
15891 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
15892 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
15893 	.ndo_hwtstamp_get	= bnxt_hwtstamp_get,
15894 	.ndo_hwtstamp_set	= bnxt_hwtstamp_set,
15895 };
15896 
15897 static const struct xdp_metadata_ops bnxt_xdp_metadata_ops = {
15898 	.xmo_rx_hash		= bnxt_xdp_rx_hash,
15899 };
15900 
15901 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15902 				    struct netdev_queue_stats_rx *stats)
15903 {
15904 	struct bnxt *bp = netdev_priv(dev);
15905 	struct bnxt_cp_ring_info *cpr;
15906 	u64 *sw;
15907 
15908 	if (!bp->bnapi)
15909 		return;
15910 
15911 	cpr = &bp->bnapi[i]->cp_ring;
15912 	sw = cpr->stats.sw_stats;
15913 
15914 	stats->packets = 0;
15915 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15916 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15917 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15918 
15919 	stats->bytes = 0;
15920 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15921 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15922 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15923 
15924 	stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15925 	stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
15926 	stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
15927 }
15928 
15929 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15930 				    struct netdev_queue_stats_tx *stats)
15931 {
15932 	struct bnxt *bp = netdev_priv(dev);
15933 	struct bnxt_napi *bnapi;
15934 	u64 *sw;
15935 
15936 	if (!bp->tx_ring)
15937 		return;
15938 
15939 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15940 	sw = bnapi->cp_ring.stats.sw_stats;
15941 
15942 	stats->packets = 0;
15943 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15944 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15945 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15946 
15947 	stats->bytes = 0;
15948 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15949 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15950 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15951 }
15952 
15953 static void bnxt_get_base_stats(struct net_device *dev,
15954 				struct netdev_queue_stats_rx *rx,
15955 				struct netdev_queue_stats_tx *tx)
15956 {
15957 	struct bnxt *bp = netdev_priv(dev);
15958 
15959 	rx->packets = bp->net_stats_prev.rx_packets;
15960 	rx->bytes = bp->net_stats_prev.rx_bytes;
15961 	rx->alloc_fail = bp->ring_drv_stats_prev.rx_total_oom_discards;
15962 	rx->hw_gro_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_packets;
15963 	rx->hw_gro_wire_packets = bp->ring_drv_stats_prev.rx_total_hw_gro_wire_packets;
15964 
15965 	tx->packets = bp->net_stats_prev.tx_packets;
15966 	tx->bytes = bp->net_stats_prev.tx_bytes;
15967 }
15968 
15969 static const struct netdev_stat_ops bnxt_stat_ops = {
15970 	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
15971 	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
15972 	.get_base_stats		= bnxt_get_base_stats,
15973 };
15974 
15975 static void bnxt_queue_default_qcfg(struct net_device *dev,
15976 				    struct netdev_queue_config *qcfg)
15977 {
15978 	qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
15979 }
15980 
15981 static int bnxt_validate_qcfg(struct net_device *dev,
15982 			      struct netdev_queue_config *qcfg,
15983 			      struct netlink_ext_ack *extack)
15984 {
15985 	struct bnxt *bp = netdev_priv(dev);
15986 
15987 	/* Older chips need MSS calc so rx_page_size is not supported */
15988 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15989 	    qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
15990 		return -EINVAL;
15991 
15992 	if (!is_power_of_2(qcfg->rx_page_size))
15993 		return -ERANGE;
15994 
15995 	if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
15996 	    qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
15997 		return -ERANGE;
15998 
15999 	return 0;
16000 }
16001 
16002 static int bnxt_queue_mem_alloc(struct net_device *dev,
16003 				struct netdev_queue_config *qcfg,
16004 				void *qmem, int idx)
16005 {
16006 	struct bnxt_rx_ring_info *rxr, *clone;
16007 	struct bnxt *bp = netdev_priv(dev);
16008 	struct bnxt_ring_struct *ring;
16009 	int rc;
16010 
16011 	if (!bp->rx_ring)
16012 		return -ENETDOWN;
16013 
16014 	rxr = &bp->rx_ring[idx];
16015 	clone = qmem;
16016 	memcpy(clone, rxr, sizeof(*rxr));
16017 	bnxt_init_rx_ring_struct(bp, clone);
16018 	bnxt_reset_rx_ring_struct(bp, clone);
16019 
16020 	clone->rx_prod = 0;
16021 	clone->rx_agg_prod = 0;
16022 	clone->rx_sw_agg_prod = 0;
16023 	clone->rx_next_cons = 0;
16024 	clone->need_head_pool = false;
16025 	clone->rx_page_size = qcfg->rx_page_size;
16026 
16027 	rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16028 	if (rc)
16029 		return rc;
16030 
16031 	rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16032 	if (rc < 0)
16033 		goto err_page_pool_destroy;
16034 
16035 	rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16036 					MEM_TYPE_PAGE_POOL,
16037 					clone->page_pool);
16038 	if (rc)
16039 		goto err_rxq_info_unreg;
16040 
16041 	ring = &clone->rx_ring_struct;
16042 	rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16043 	if (rc)
16044 		goto err_free_rx_ring;
16045 
16046 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16047 		ring = &clone->rx_agg_ring_struct;
16048 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16049 		if (rc)
16050 			goto err_free_rx_agg_ring;
16051 
16052 		rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16053 		if (rc)
16054 			goto err_free_rx_agg_ring;
16055 	}
16056 
16057 	if (bp->flags & BNXT_FLAG_TPA) {
16058 		rc = bnxt_alloc_one_tpa_info(bp, clone);
16059 		if (rc)
16060 			goto err_free_tpa_info;
16061 	}
16062 
16063 	bnxt_init_one_rx_ring_rxbd(bp, clone);
16064 	bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16065 
16066 	bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16067 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16068 		bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16069 	if (bp->flags & BNXT_FLAG_TPA)
16070 		bnxt_alloc_one_tpa_info_data(bp, clone);
16071 
16072 	return 0;
16073 
16074 err_free_tpa_info:
16075 	bnxt_free_one_tpa_info(bp, clone);
16076 err_free_rx_agg_ring:
16077 	bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16078 err_free_rx_ring:
16079 	bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16080 err_rxq_info_unreg:
16081 	xdp_rxq_info_unreg(&clone->xdp_rxq);
16082 err_page_pool_destroy:
16083 	page_pool_destroy(clone->page_pool);
16084 	page_pool_destroy(clone->head_pool);
16085 	clone->page_pool = NULL;
16086 	clone->head_pool = NULL;
16087 	return rc;
16088 }
16089 
16090 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16091 {
16092 	struct bnxt_rx_ring_info *rxr = qmem;
16093 	struct bnxt *bp = netdev_priv(dev);
16094 	struct bnxt_ring_struct *ring;
16095 
16096 	bnxt_free_one_rx_ring_skbs(bp, rxr);
16097 	bnxt_free_one_tpa_info(bp, rxr);
16098 
16099 	xdp_rxq_info_unreg(&rxr->xdp_rxq);
16100 
16101 	page_pool_destroy(rxr->page_pool);
16102 	page_pool_destroy(rxr->head_pool);
16103 	rxr->page_pool = NULL;
16104 	rxr->head_pool = NULL;
16105 
16106 	ring = &rxr->rx_ring_struct;
16107 	bnxt_free_ring(bp, &ring->ring_mem);
16108 
16109 	ring = &rxr->rx_agg_ring_struct;
16110 	bnxt_free_ring(bp, &ring->ring_mem);
16111 
16112 	kfree(rxr->rx_agg_bmap);
16113 	rxr->rx_agg_bmap = NULL;
16114 }
16115 
16116 static void bnxt_copy_rx_ring(struct bnxt *bp,
16117 			      struct bnxt_rx_ring_info *dst,
16118 			      struct bnxt_rx_ring_info *src)
16119 {
16120 	struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16121 	struct bnxt_ring_struct *dst_ring, *src_ring;
16122 	int i;
16123 
16124 	dst_ring = &dst->rx_ring_struct;
16125 	dst_rmem = &dst_ring->ring_mem;
16126 	src_ring = &src->rx_ring_struct;
16127 	src_rmem = &src_ring->ring_mem;
16128 
16129 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16130 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16131 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16132 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16133 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16134 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16135 
16136 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16137 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16138 	*dst_rmem->vmem = *src_rmem->vmem;
16139 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16140 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16141 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16142 	}
16143 
16144 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16145 		return;
16146 
16147 	dst_ring = &dst->rx_agg_ring_struct;
16148 	dst_rmem = &dst_ring->ring_mem;
16149 	src_ring = &src->rx_agg_ring_struct;
16150 	src_rmem = &src_ring->ring_mem;
16151 
16152 	dst->rx_page_size = src->rx_page_size;
16153 
16154 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16155 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16156 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16157 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16158 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16159 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16160 	WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16161 
16162 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16163 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16164 	*dst_rmem->vmem = *src_rmem->vmem;
16165 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16166 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16167 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16168 	}
16169 
16170 	dst->rx_agg_bmap = src->rx_agg_bmap;
16171 }
16172 
16173 static int bnxt_queue_start(struct net_device *dev,
16174 			    struct netdev_queue_config *qcfg,
16175 			    void *qmem, int idx)
16176 {
16177 	struct bnxt *bp = netdev_priv(dev);
16178 	struct bnxt_rx_ring_info *rxr, *clone;
16179 	struct bnxt_cp_ring_info *cpr;
16180 	struct bnxt_vnic_info *vnic;
16181 	struct bnxt_napi *bnapi;
16182 	int i, rc;
16183 	u16 mru;
16184 
16185 	rxr = &bp->rx_ring[idx];
16186 	clone = qmem;
16187 
16188 	rxr->rx_prod = clone->rx_prod;
16189 	rxr->rx_agg_prod = clone->rx_agg_prod;
16190 	rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16191 	rxr->rx_next_cons = clone->rx_next_cons;
16192 	rxr->rx_tpa = clone->rx_tpa;
16193 	rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16194 	rxr->page_pool = clone->page_pool;
16195 	rxr->head_pool = clone->head_pool;
16196 	rxr->xdp_rxq = clone->xdp_rxq;
16197 	rxr->need_head_pool = clone->need_head_pool;
16198 
16199 	bnxt_copy_rx_ring(bp, rxr, clone);
16200 
16201 	bnapi = rxr->bnapi;
16202 	cpr = &bnapi->cp_ring;
16203 
16204 	/* All rings have been reserved and previously allocated.
16205 	 * Reallocating with the same parameters should never fail.
16206 	 */
16207 	rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16208 	if (rc)
16209 		goto err_reset;
16210 
16211 	if (bp->tph_mode) {
16212 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16213 		if (rc)
16214 			goto err_reset;
16215 	}
16216 
16217 	rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16218 	if (rc)
16219 		goto err_reset;
16220 
16221 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16222 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16223 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16224 
16225 	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16226 		rc = bnxt_tx_queue_start(bp, idx);
16227 		if (rc)
16228 			goto err_reset;
16229 	}
16230 
16231 	bnxt_enable_rx_page_pool(rxr);
16232 	napi_enable_locked(&bnapi->napi);
16233 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16234 
16235 	mru = bp->dev->mtu + VLAN_ETH_HLEN;
16236 	for (i = 0; i < bp->nr_vnics; i++) {
16237 		vnic = &bp->vnic_info[i];
16238 
16239 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16240 		if (rc)
16241 			return rc;
16242 	}
16243 	return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16244 
16245 err_reset:
16246 	netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16247 		   rc);
16248 	napi_enable_locked(&bnapi->napi);
16249 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16250 	bnxt_reset_task(bp, true);
16251 	return rc;
16252 }
16253 
16254 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16255 {
16256 	struct bnxt *bp = netdev_priv(dev);
16257 	struct bnxt_rx_ring_info *rxr;
16258 	struct bnxt_cp_ring_info *cpr;
16259 	struct bnxt_vnic_info *vnic;
16260 	struct bnxt_napi *bnapi;
16261 	int i;
16262 
16263 	for (i = 0; i < bp->nr_vnics; i++) {
16264 		vnic = &bp->vnic_info[i];
16265 
16266 		bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16267 	}
16268 	bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16269 	/* Make sure NAPI sees that the VNIC is disabled */
16270 	synchronize_net();
16271 	rxr = &bp->rx_ring[idx];
16272 	bnapi = rxr->bnapi;
16273 	cpr = &bnapi->cp_ring;
16274 	cancel_work_sync(&cpr->dim.work);
16275 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
16276 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16277 	page_pool_disable_direct_recycling(rxr->page_pool);
16278 	if (bnxt_separate_head_pool(rxr))
16279 		page_pool_disable_direct_recycling(rxr->head_pool);
16280 
16281 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16282 		bnxt_tx_queue_stop(bp, idx);
16283 
16284 	/* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16285 	 * completion is handled in NAPI to guarantee no more DMA on that ring
16286 	 * after seeing the completion.
16287 	 */
16288 	napi_disable_locked(&bnapi->napi);
16289 
16290 	if (bp->tph_mode) {
16291 		bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16292 		bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16293 	}
16294 	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16295 
16296 	memcpy(qmem, rxr, sizeof(*rxr));
16297 	bnxt_init_rx_ring_struct(bp, qmem);
16298 
16299 	return 0;
16300 }
16301 
16302 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16303 	.ndo_queue_mem_size	= sizeof(struct bnxt_rx_ring_info),
16304 	.ndo_queue_mem_alloc	= bnxt_queue_mem_alloc,
16305 	.ndo_queue_mem_free	= bnxt_queue_mem_free,
16306 	.ndo_queue_start	= bnxt_queue_start,
16307 	.ndo_queue_stop		= bnxt_queue_stop,
16308 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16309 	.ndo_validate_qcfg	= bnxt_validate_qcfg,
16310 	.supported_params	= QCFG_RX_PAGE_SIZE,
16311 };
16312 
16313 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16314 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16315 };
16316 
16317 static void bnxt_remove_one(struct pci_dev *pdev)
16318 {
16319 	struct net_device *dev = pci_get_drvdata(pdev);
16320 	struct bnxt *bp = netdev_priv(dev);
16321 
16322 	if (BNXT_PF(bp))
16323 		__bnxt_sriov_disable(bp);
16324 
16325 	bnxt_rdma_aux_device_del(bp);
16326 
16327 	unregister_netdev(dev);
16328 	bnxt_ptp_clear(bp);
16329 
16330 	bnxt_rdma_aux_device_uninit(bp);
16331 
16332 	bnxt_free_l2_filters(bp, true);
16333 	bnxt_free_ntp_fltrs(bp, true);
16334 	WARN_ON(bp->num_rss_ctx);
16335 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16336 	/* Flush any pending tasks */
16337 	cancel_work_sync(&bp->sp_task);
16338 	cancel_delayed_work_sync(&bp->fw_reset_task);
16339 	bp->sp_event = 0;
16340 
16341 	bnxt_dl_fw_reporters_destroy(bp);
16342 	bnxt_dl_unregister(bp);
16343 	bnxt_shutdown_tc(bp);
16344 
16345 	bnxt_clear_int_mode(bp);
16346 	bnxt_hwrm_func_drv_unrgtr(bp);
16347 	bnxt_free_hwrm_resources(bp);
16348 	bnxt_hwmon_uninit(bp);
16349 	bnxt_ethtool_free(bp);
16350 	bnxt_dcb_free(bp);
16351 	kfree(bp->ptp_cfg);
16352 	bp->ptp_cfg = NULL;
16353 	kfree(bp->fw_health);
16354 	bp->fw_health = NULL;
16355 	bnxt_cleanup_pci(bp);
16356 	bnxt_free_ctx_mem(bp, true);
16357 	bnxt_free_crash_dump_mem(bp);
16358 	kfree(bp->rss_indir_tbl);
16359 	bp->rss_indir_tbl = NULL;
16360 	bnxt_free_port_stats(bp);
16361 	free_netdev(dev);
16362 }
16363 
16364 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16365 {
16366 	int rc = 0;
16367 	struct bnxt_link_info *link_info = &bp->link_info;
16368 
16369 	bp->phy_flags = 0;
16370 	rc = bnxt_hwrm_phy_qcaps(bp);
16371 	if (rc) {
16372 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16373 			   rc);
16374 		return rc;
16375 	}
16376 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16377 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16378 	else
16379 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16380 
16381 	bp->mac_flags = 0;
16382 	bnxt_hwrm_mac_qcaps(bp);
16383 
16384 	if (!fw_dflt)
16385 		return 0;
16386 
16387 	mutex_lock(&bp->link_lock);
16388 	rc = bnxt_update_link(bp, false);
16389 	if (rc) {
16390 		mutex_unlock(&bp->link_lock);
16391 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16392 			   rc);
16393 		return rc;
16394 	}
16395 
16396 	/* Older firmware does not have supported_auto_speeds, so assume
16397 	 * that all supported speeds can be autonegotiated.
16398 	 */
16399 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16400 		link_info->support_auto_speeds = link_info->support_speeds;
16401 
16402 	bnxt_init_ethtool_link_settings(bp);
16403 	mutex_unlock(&bp->link_lock);
16404 	return 0;
16405 }
16406 
16407 static int bnxt_get_max_irq(struct pci_dev *pdev)
16408 {
16409 	u16 ctrl;
16410 
16411 	if (!pdev->msix_cap)
16412 		return 1;
16413 
16414 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16415 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16416 }
16417 
16418 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16419 				int *max_cp)
16420 {
16421 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16422 	int max_ring_grps = 0, max_irq;
16423 
16424 	*max_tx = hw_resc->max_tx_rings;
16425 	*max_rx = hw_resc->max_rx_rings;
16426 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16427 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16428 			bnxt_get_ulp_msix_num_in_use(bp),
16429 			hw_resc->max_stat_ctxs -
16430 			bnxt_get_ulp_stat_ctxs_in_use(bp));
16431 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16432 		*max_cp = min_t(int, *max_cp, max_irq);
16433 	max_ring_grps = hw_resc->max_hw_ring_grps;
16434 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16435 		*max_cp -= 1;
16436 		*max_rx -= 2;
16437 	}
16438 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16439 		*max_rx >>= 1;
16440 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16441 		int rc;
16442 
16443 		rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16444 		if (rc) {
16445 			*max_rx = 0;
16446 			*max_tx = 0;
16447 		}
16448 		/* On P5 chips, max_cp output param should be available NQs */
16449 		*max_cp = max_irq;
16450 	}
16451 	*max_rx = min_t(int, *max_rx, max_ring_grps);
16452 }
16453 
16454 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16455 {
16456 	int rx, tx, cp;
16457 
16458 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
16459 	*max_rx = rx;
16460 	*max_tx = tx;
16461 	if (!rx || !tx || !cp)
16462 		return -ENOMEM;
16463 
16464 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16465 }
16466 
16467 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16468 			       bool shared)
16469 {
16470 	int rc;
16471 
16472 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16473 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16474 		/* Not enough rings, try disabling agg rings. */
16475 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16476 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16477 		if (rc) {
16478 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
16479 			bp->flags |= BNXT_FLAG_AGG_RINGS;
16480 			return rc;
16481 		}
16482 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16483 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16484 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16485 		bnxt_set_ring_params(bp);
16486 	}
16487 
16488 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16489 		int max_cp, max_stat, max_irq;
16490 
16491 		/* Reserve minimum resources for RoCE */
16492 		max_cp = bnxt_get_max_func_cp_rings(bp);
16493 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
16494 		max_irq = bnxt_get_max_func_irqs(bp);
16495 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16496 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16497 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16498 			return 0;
16499 
16500 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16501 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16502 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16503 		max_cp = min_t(int, max_cp, max_irq);
16504 		max_cp = min_t(int, max_cp, max_stat);
16505 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16506 		if (rc)
16507 			rc = 0;
16508 	}
16509 	return rc;
16510 }
16511 
16512 /* In initial default shared ring setting, each shared ring must have a
16513  * RX/TX ring pair.
16514  */
16515 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16516 {
16517 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16518 	bp->rx_nr_rings = bp->cp_nr_rings;
16519 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16520 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16521 }
16522 
16523 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16524 {
16525 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
16526 	int avail_msix;
16527 
16528 	if (!bnxt_can_reserve_rings(bp))
16529 		return 0;
16530 
16531 	if (sh)
16532 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
16533 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16534 	/* Reduce default rings on multi-port cards so that total default
16535 	 * rings do not exceed CPU count.
16536 	 */
16537 	if (bp->port_count > 1) {
16538 		int max_rings =
16539 			max_t(int, num_online_cpus() / bp->port_count, 1);
16540 
16541 		dflt_rings = min_t(int, dflt_rings, max_rings);
16542 	}
16543 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16544 	if (rc)
16545 		return rc;
16546 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16547 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16548 	if (sh)
16549 		bnxt_trim_dflt_sh_rings(bp);
16550 	else
16551 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16552 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16553 
16554 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16555 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16556 		int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16557 
16558 		bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16559 		bnxt_set_dflt_ulp_stat_ctxs(bp);
16560 	}
16561 
16562 	rc = __bnxt_reserve_rings(bp);
16563 	if (rc && rc != -ENODEV)
16564 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16565 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16566 	if (sh)
16567 		bnxt_trim_dflt_sh_rings(bp);
16568 
16569 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
16570 	if (bnxt_need_reserve_rings(bp)) {
16571 		rc = __bnxt_reserve_rings(bp);
16572 		if (rc && rc != -ENODEV)
16573 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16574 		bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16575 	}
16576 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16577 		bp->rx_nr_rings++;
16578 		bp->cp_nr_rings++;
16579 	}
16580 	if (rc) {
16581 		bp->tx_nr_rings = 0;
16582 		bp->rx_nr_rings = 0;
16583 	}
16584 	return rc;
16585 }
16586 
16587 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16588 {
16589 	int rc;
16590 
16591 	if (bp->tx_nr_rings)
16592 		return 0;
16593 
16594 	bnxt_ulp_irq_stop(bp);
16595 	bnxt_clear_int_mode(bp);
16596 	rc = bnxt_set_dflt_rings(bp, true);
16597 	if (rc) {
16598 		if (BNXT_VF(bp) && rc == -ENODEV)
16599 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16600 		else
16601 			netdev_err(bp->dev, "Not enough rings available.\n");
16602 		goto init_dflt_ring_err;
16603 	}
16604 	rc = bnxt_init_int_mode(bp);
16605 	if (rc)
16606 		goto init_dflt_ring_err;
16607 
16608 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16609 
16610 	bnxt_set_dflt_rfs(bp);
16611 
16612 init_dflt_ring_err:
16613 	bnxt_ulp_irq_restart(bp, rc);
16614 	return rc;
16615 }
16616 
16617 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16618 {
16619 	int rc;
16620 
16621 	netdev_ops_assert_locked(bp->dev);
16622 	bnxt_hwrm_func_qcaps(bp);
16623 
16624 	if (netif_running(bp->dev))
16625 		__bnxt_close_nic(bp, true, false);
16626 
16627 	bnxt_ulp_irq_stop(bp);
16628 	bnxt_clear_int_mode(bp);
16629 	rc = bnxt_init_int_mode(bp);
16630 	bnxt_ulp_irq_restart(bp, rc);
16631 
16632 	if (netif_running(bp->dev)) {
16633 		if (rc)
16634 			netif_close(bp->dev);
16635 		else
16636 			rc = bnxt_open_nic(bp, true, false);
16637 	}
16638 
16639 	return rc;
16640 }
16641 
16642 static int bnxt_init_mac_addr(struct bnxt *bp)
16643 {
16644 	int rc = 0;
16645 
16646 	if (BNXT_PF(bp)) {
16647 		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16648 	} else {
16649 #ifdef CONFIG_BNXT_SRIOV
16650 		struct bnxt_vf_info *vf = &bp->vf;
16651 		bool strict_approval = true;
16652 
16653 		if (is_valid_ether_addr(vf->mac_addr)) {
16654 			/* overwrite netdev dev_addr with admin VF MAC */
16655 			eth_hw_addr_set(bp->dev, vf->mac_addr);
16656 			/* Older PF driver or firmware may not approve this
16657 			 * correctly.
16658 			 */
16659 			strict_approval = false;
16660 		} else {
16661 			eth_hw_addr_random(bp->dev);
16662 		}
16663 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16664 #endif
16665 	}
16666 	return rc;
16667 }
16668 
16669 static void bnxt_vpd_read_info(struct bnxt *bp)
16670 {
16671 	struct pci_dev *pdev = bp->pdev;
16672 	unsigned int vpd_size, kw_len;
16673 	int pos, size;
16674 	u8 *vpd_data;
16675 
16676 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16677 	if (IS_ERR(vpd_data)) {
16678 		pci_warn(pdev, "Unable to read VPD\n");
16679 		return;
16680 	}
16681 
16682 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16683 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16684 	if (pos < 0)
16685 		goto read_sn;
16686 
16687 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16688 	memcpy(bp->board_partno, &vpd_data[pos], size);
16689 
16690 read_sn:
16691 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16692 					   PCI_VPD_RO_KEYWORD_SERIALNO,
16693 					   &kw_len);
16694 	if (pos < 0)
16695 		goto exit;
16696 
16697 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16698 	memcpy(bp->board_serialno, &vpd_data[pos], size);
16699 exit:
16700 	kfree(vpd_data);
16701 }
16702 
16703 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16704 {
16705 	struct pci_dev *pdev = bp->pdev;
16706 	u64 qword;
16707 
16708 	qword = pci_get_dsn(pdev);
16709 	if (!qword) {
16710 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16711 		return -EOPNOTSUPP;
16712 	}
16713 
16714 	put_unaligned_le64(qword, dsn);
16715 
16716 	bp->flags |= BNXT_FLAG_DSN_VALID;
16717 	return 0;
16718 }
16719 
16720 static int bnxt_map_db_bar(struct bnxt *bp)
16721 {
16722 	if (!bp->db_size)
16723 		return -ENODEV;
16724 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16725 	if (!bp->bar1)
16726 		return -ENOMEM;
16727 	return 0;
16728 }
16729 
16730 void bnxt_print_device_info(struct bnxt *bp)
16731 {
16732 	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16733 		    board_info[bp->board_idx].name,
16734 		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16735 
16736 	pcie_print_link_status(bp->pdev);
16737 }
16738 
16739 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16740 {
16741 	struct bnxt_hw_resc *hw_resc;
16742 	struct net_device *dev;
16743 	struct bnxt *bp;
16744 	int rc, max_irqs;
16745 
16746 	if (pci_is_bridge(pdev))
16747 		return -ENODEV;
16748 
16749 	if (!pdev->msix_cap) {
16750 		dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16751 		return -ENODEV;
16752 	}
16753 
16754 	/* Clear any pending DMA transactions from crash kernel
16755 	 * while loading driver in capture kernel.
16756 	 */
16757 	if (is_kdump_kernel()) {
16758 		pci_clear_master(pdev);
16759 		pcie_flr(pdev);
16760 	}
16761 
16762 	max_irqs = bnxt_get_max_irq(pdev);
16763 	dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16764 				 max_irqs);
16765 	if (!dev)
16766 		return -ENOMEM;
16767 
16768 	bp = netdev_priv(dev);
16769 	bp->board_idx = ent->driver_data;
16770 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16771 	bnxt_set_max_func_irqs(bp, max_irqs);
16772 
16773 	if (bnxt_vf_pciid(bp->board_idx))
16774 		bp->flags |= BNXT_FLAG_VF;
16775 
16776 	/* No devlink port registration in case of a VF */
16777 	if (BNXT_PF(bp))
16778 		SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16779 
16780 	rc = bnxt_init_board(pdev, dev);
16781 	if (rc < 0)
16782 		goto init_err_free;
16783 
16784 	dev->netdev_ops = &bnxt_netdev_ops;
16785 	dev->xdp_metadata_ops = &bnxt_xdp_metadata_ops;
16786 	dev->stat_ops = &bnxt_stat_ops;
16787 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16788 	dev->ethtool_ops = &bnxt_ethtool_ops;
16789 	pci_set_drvdata(pdev, dev);
16790 
16791 	rc = bnxt_alloc_hwrm_resources(bp);
16792 	if (rc)
16793 		goto init_err_pci_clean;
16794 
16795 	mutex_init(&bp->hwrm_cmd_lock);
16796 	mutex_init(&bp->link_lock);
16797 
16798 	rc = bnxt_fw_init_one_p1(bp);
16799 	if (rc)
16800 		goto init_err_pci_clean;
16801 
16802 	if (BNXT_PF(bp))
16803 		bnxt_vpd_read_info(bp);
16804 
16805 	if (BNXT_CHIP_P5_PLUS(bp)) {
16806 		bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16807 		if (BNXT_CHIP_P7(bp))
16808 			bp->flags |= BNXT_FLAG_CHIP_P7;
16809 	}
16810 
16811 	rc = bnxt_alloc_rss_indir_tbl(bp);
16812 	if (rc)
16813 		goto init_err_pci_clean;
16814 
16815 	rc = bnxt_fw_init_one_p2(bp);
16816 	if (rc)
16817 		goto init_err_pci_clean;
16818 
16819 	rc = bnxt_map_db_bar(bp);
16820 	if (rc) {
16821 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16822 			rc);
16823 		goto init_err_pci_clean;
16824 	}
16825 
16826 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16827 			   NETIF_F_TSO | NETIF_F_TSO6 |
16828 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16829 			   NETIF_F_GSO_IPXIP4 |
16830 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16831 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16832 			   NETIF_F_RXCSUM | NETIF_F_GRO;
16833 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16834 		dev->hw_features |= NETIF_F_GSO_UDP_L4;
16835 
16836 	if (BNXT_SUPPORTS_TPA(bp))
16837 		dev->hw_features |= NETIF_F_LRO;
16838 
16839 	dev->hw_enc_features =
16840 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16841 			NETIF_F_TSO | NETIF_F_TSO6 |
16842 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16843 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16844 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16845 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16846 		dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16847 	if (bp->flags & BNXT_FLAG_CHIP_P7)
16848 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16849 	else
16850 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16851 
16852 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16853 				    NETIF_F_GSO_GRE_CSUM;
16854 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16855 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16856 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16857 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16858 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16859 	if (BNXT_SUPPORTS_TPA(bp))
16860 		dev->hw_features |= NETIF_F_GRO_HW;
16861 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16862 	if (dev->features & NETIF_F_GRO_HW)
16863 		dev->features &= ~NETIF_F_LRO;
16864 	dev->priv_flags |= IFF_UNICAST_FLT;
16865 
16866 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16867 	if (bp->tso_max_segs)
16868 		netif_set_tso_max_segs(dev, bp->tso_max_segs);
16869 
16870 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16871 			    NETDEV_XDP_ACT_RX_SG;
16872 
16873 #ifdef CONFIG_BNXT_SRIOV
16874 	init_waitqueue_head(&bp->sriov_cfg_wait);
16875 #endif
16876 	if (BNXT_SUPPORTS_TPA(bp)) {
16877 		bp->gro_func = bnxt_gro_func_5730x;
16878 		if (BNXT_CHIP_P4(bp))
16879 			bp->gro_func = bnxt_gro_func_5731x;
16880 		else if (BNXT_CHIP_P5_PLUS(bp))
16881 			bp->gro_func = bnxt_gro_func_5750x;
16882 	}
16883 	if (!BNXT_CHIP_P4_PLUS(bp))
16884 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
16885 
16886 	rc = bnxt_init_mac_addr(bp);
16887 	if (rc) {
16888 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16889 		rc = -EADDRNOTAVAIL;
16890 		goto init_err_pci_clean;
16891 	}
16892 
16893 	if (BNXT_PF(bp)) {
16894 		/* Read the adapter's DSN to use as the eswitch switch_id */
16895 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16896 	}
16897 
16898 	/* MTU range: 60 - FW defined max */
16899 	dev->min_mtu = ETH_ZLEN;
16900 	dev->max_mtu = bp->max_mtu;
16901 
16902 	rc = bnxt_probe_phy(bp, true);
16903 	if (rc)
16904 		goto init_err_pci_clean;
16905 
16906 	hw_resc = &bp->hw_resc;
16907 	bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16908 		       BNXT_L2_FLTR_MAX_FLTR;
16909 	/* Older firmware may not report these filters properly */
16910 	if (bp->max_fltr < BNXT_MAX_FLTR)
16911 		bp->max_fltr = BNXT_MAX_FLTR;
16912 	bnxt_init_l2_fltr_tbl(bp);
16913 	__bnxt_set_rx_skb_mode(bp, false);
16914 	bnxt_set_tpa_flags(bp);
16915 	bnxt_init_ring_params(bp);
16916 	bnxt_set_ring_params(bp);
16917 	bnxt_rdma_aux_device_init(bp);
16918 	rc = bnxt_set_dflt_rings(bp, true);
16919 	if (rc) {
16920 		if (BNXT_VF(bp) && rc == -ENODEV) {
16921 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16922 		} else {
16923 			netdev_err(bp->dev, "Not enough rings available.\n");
16924 			rc = -ENOMEM;
16925 		}
16926 		goto init_err_pci_clean;
16927 	}
16928 
16929 	bnxt_fw_init_one_p3(bp);
16930 
16931 	bnxt_init_dflt_coal(bp);
16932 
16933 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16934 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
16935 
16936 	rc = bnxt_init_int_mode(bp);
16937 	if (rc)
16938 		goto init_err_pci_clean;
16939 
16940 	/* No TC has been set yet and rings may have been trimmed due to
16941 	 * limited MSIX, so we re-initialize the TX rings per TC.
16942 	 */
16943 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16944 
16945 	if (BNXT_PF(bp)) {
16946 		if (!bnxt_pf_wq) {
16947 			bnxt_pf_wq =
16948 				create_singlethread_workqueue("bnxt_pf_wq");
16949 			if (!bnxt_pf_wq) {
16950 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
16951 				rc = -ENOMEM;
16952 				goto init_err_pci_clean;
16953 			}
16954 		}
16955 		rc = bnxt_init_tc(bp);
16956 		if (rc)
16957 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16958 				   rc);
16959 	}
16960 
16961 	bnxt_inv_fw_health_reg(bp);
16962 	rc = bnxt_dl_register(bp);
16963 	if (rc)
16964 		goto init_err_dl;
16965 
16966 	INIT_LIST_HEAD(&bp->usr_fltr_list);
16967 
16968 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16969 		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16970 
16971 	dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
16972 	if (BNXT_SUPPORTS_QUEUE_API(bp))
16973 		dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16974 	dev->netmem_tx = true;
16975 
16976 	rc = register_netdev(dev);
16977 	if (rc)
16978 		goto init_err_cleanup;
16979 
16980 	bnxt_dl_fw_reporters_create(bp);
16981 
16982 	bnxt_rdma_aux_device_add(bp);
16983 
16984 	bnxt_print_device_info(bp);
16985 
16986 	pci_save_state(pdev);
16987 
16988 	return 0;
16989 init_err_cleanup:
16990 	bnxt_rdma_aux_device_uninit(bp);
16991 	bnxt_dl_unregister(bp);
16992 init_err_dl:
16993 	bnxt_shutdown_tc(bp);
16994 	bnxt_clear_int_mode(bp);
16995 
16996 init_err_pci_clean:
16997 	bnxt_hwrm_func_drv_unrgtr(bp);
16998 	bnxt_ptp_clear(bp);
16999 	kfree(bp->ptp_cfg);
17000 	bp->ptp_cfg = NULL;
17001 	bnxt_free_hwrm_resources(bp);
17002 	bnxt_hwmon_uninit(bp);
17003 	bnxt_ethtool_free(bp);
17004 	kfree(bp->fw_health);
17005 	bp->fw_health = NULL;
17006 	bnxt_cleanup_pci(bp);
17007 	bnxt_free_ctx_mem(bp, true);
17008 	bnxt_free_crash_dump_mem(bp);
17009 	kfree(bp->rss_indir_tbl);
17010 	bp->rss_indir_tbl = NULL;
17011 
17012 init_err_free:
17013 	free_netdev(dev);
17014 	return rc;
17015 }
17016 
17017 static void bnxt_shutdown(struct pci_dev *pdev)
17018 {
17019 	struct net_device *dev = pci_get_drvdata(pdev);
17020 	struct bnxt *bp;
17021 
17022 	if (!dev)
17023 		return;
17024 
17025 	rtnl_lock();
17026 	netdev_lock(dev);
17027 	bp = netdev_priv(dev);
17028 	if (!bp)
17029 		goto shutdown_exit;
17030 
17031 	if (netif_running(dev))
17032 		netif_close(dev);
17033 
17034 	if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17035 		pcie_flr(pdev);
17036 		goto shutdown_exit;
17037 	}
17038 	bnxt_ptp_clear(bp);
17039 	bnxt_clear_int_mode(bp);
17040 	pci_disable_device(pdev);
17041 
17042 	if (system_state == SYSTEM_POWER_OFF) {
17043 		pci_wake_from_d3(pdev, bp->wol);
17044 		pci_set_power_state(pdev, PCI_D3hot);
17045 	}
17046 
17047 shutdown_exit:
17048 	netdev_unlock(dev);
17049 	rtnl_unlock();
17050 }
17051 
17052 #ifdef CONFIG_PM_SLEEP
17053 static int bnxt_suspend(struct device *device)
17054 {
17055 	struct net_device *dev = dev_get_drvdata(device);
17056 	struct bnxt *bp = netdev_priv(dev);
17057 	int rc = 0;
17058 
17059 	bnxt_ulp_stop(bp);
17060 
17061 	netdev_lock(dev);
17062 	if (netif_running(dev)) {
17063 		netif_device_detach(dev);
17064 		rc = bnxt_close(dev);
17065 	}
17066 	bnxt_hwrm_func_drv_unrgtr(bp);
17067 	bnxt_ptp_clear(bp);
17068 	pci_disable_device(bp->pdev);
17069 	bnxt_free_ctx_mem(bp, false);
17070 	netdev_unlock(dev);
17071 	return rc;
17072 }
17073 
17074 static int bnxt_resume(struct device *device)
17075 {
17076 	struct net_device *dev = dev_get_drvdata(device);
17077 	struct bnxt *bp = netdev_priv(dev);
17078 	int rc = 0;
17079 
17080 	netdev_lock(dev);
17081 	rc = pci_enable_device(bp->pdev);
17082 	if (rc) {
17083 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17084 			   rc);
17085 		goto resume_exit;
17086 	}
17087 	pci_set_master(bp->pdev);
17088 	if (bnxt_hwrm_ver_get(bp)) {
17089 		rc = -ENODEV;
17090 		goto resume_exit;
17091 	}
17092 	rc = bnxt_hwrm_func_reset(bp);
17093 	if (rc) {
17094 		rc = -EBUSY;
17095 		goto resume_exit;
17096 	}
17097 
17098 	rc = bnxt_hwrm_func_qcaps(bp);
17099 	if (rc)
17100 		goto resume_exit;
17101 
17102 	bnxt_clear_reservations(bp, true);
17103 
17104 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17105 		rc = -ENODEV;
17106 		goto resume_exit;
17107 	}
17108 	if (bp->fw_crash_mem)
17109 		bnxt_hwrm_crash_dump_mem_cfg(bp);
17110 
17111 	if (bnxt_ptp_init(bp)) {
17112 		kfree(bp->ptp_cfg);
17113 		bp->ptp_cfg = NULL;
17114 	}
17115 	bnxt_get_wol_settings(bp);
17116 	if (netif_running(dev)) {
17117 		rc = bnxt_open(dev);
17118 		if (!rc)
17119 			netif_device_attach(dev);
17120 	}
17121 
17122 resume_exit:
17123 	netdev_unlock(bp->dev);
17124 	bnxt_ulp_start(bp, rc);
17125 	if (!rc)
17126 		bnxt_reenable_sriov(bp);
17127 	return rc;
17128 }
17129 
17130 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17131 #define BNXT_PM_OPS (&bnxt_pm_ops)
17132 
17133 #else
17134 
17135 #define BNXT_PM_OPS NULL
17136 
17137 #endif /* CONFIG_PM_SLEEP */
17138 
17139 /**
17140  * bnxt_io_error_detected - called when PCI error is detected
17141  * @pdev: Pointer to PCI device
17142  * @state: The current pci connection state
17143  *
17144  * This function is called after a PCI bus error affecting
17145  * this device has been detected.
17146  */
17147 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17148 					       pci_channel_state_t state)
17149 {
17150 	struct net_device *netdev = pci_get_drvdata(pdev);
17151 	struct bnxt *bp = netdev_priv(netdev);
17152 	bool abort = false;
17153 
17154 	netdev_info(netdev, "PCI I/O error detected\n");
17155 
17156 	bnxt_ulp_stop(bp);
17157 
17158 	netdev_lock(netdev);
17159 	netif_device_detach(netdev);
17160 
17161 	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17162 		netdev_err(bp->dev, "Firmware reset already in progress\n");
17163 		abort = true;
17164 	}
17165 
17166 	if (abort || state == pci_channel_io_perm_failure) {
17167 		netdev_unlock(netdev);
17168 		return PCI_ERS_RESULT_DISCONNECT;
17169 	}
17170 
17171 	/* Link is not reliable anymore if state is pci_channel_io_frozen
17172 	 * so we disable bus master to prevent any potential bad DMAs before
17173 	 * freeing kernel memory.
17174 	 */
17175 	if (state == pci_channel_io_frozen) {
17176 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17177 		bnxt_fw_fatal_close(bp);
17178 	}
17179 
17180 	if (netif_running(netdev))
17181 		__bnxt_close_nic(bp, true, true);
17182 
17183 	if (pci_is_enabled(pdev))
17184 		pci_disable_device(pdev);
17185 	bnxt_free_ctx_mem(bp, false);
17186 	netdev_unlock(netdev);
17187 
17188 	/* Request a slot reset. */
17189 	return PCI_ERS_RESULT_NEED_RESET;
17190 }
17191 
17192 /**
17193  * bnxt_io_slot_reset - called after the pci bus has been reset.
17194  * @pdev: Pointer to PCI device
17195  *
17196  * Restart the card from scratch, as if from a cold-boot.
17197  * At this point, the card has experienced a hard reset,
17198  * followed by fixups by BIOS, and has its config space
17199  * set up identically to what it was at cold boot.
17200  */
17201 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17202 {
17203 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17204 	struct net_device *netdev = pci_get_drvdata(pdev);
17205 	struct bnxt *bp = netdev_priv(netdev);
17206 	int retry = 0;
17207 	int err = 0;
17208 	int off;
17209 
17210 	netdev_info(bp->dev, "PCI Slot Reset\n");
17211 
17212 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17213 	    test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17214 		msleep(900);
17215 
17216 	netdev_lock(netdev);
17217 
17218 	if (pci_enable_device(pdev)) {
17219 		dev_err(&pdev->dev,
17220 			"Cannot re-enable PCI device after reset.\n");
17221 	} else {
17222 		pci_set_master(pdev);
17223 		/* Upon fatal error, our device internal logic that latches to
17224 		 * BAR value is getting reset and will restore only upon
17225 		 * rewriting the BARs.
17226 		 *
17227 		 * As pci_restore_state() does not re-write the BARs if the
17228 		 * value is same as saved value earlier, driver needs to
17229 		 * write the BARs to 0 to force restore, in case of fatal error.
17230 		 */
17231 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17232 				       &bp->state)) {
17233 			for (off = PCI_BASE_ADDRESS_0;
17234 			     off <= PCI_BASE_ADDRESS_5; off += 4)
17235 				pci_write_config_dword(bp->pdev, off, 0);
17236 		}
17237 		pci_restore_state(pdev);
17238 		pci_save_state(pdev);
17239 
17240 		bnxt_inv_fw_health_reg(bp);
17241 		bnxt_try_map_fw_health_reg(bp);
17242 
17243 		/* In some PCIe AER scenarios, firmware may take up to
17244 		 * 10 seconds to become ready in the worst case.
17245 		 */
17246 		do {
17247 			err = bnxt_try_recover_fw(bp);
17248 			if (!err)
17249 				break;
17250 			retry++;
17251 		} while (retry < BNXT_FW_SLOT_RESET_RETRY);
17252 
17253 		if (err) {
17254 			dev_err(&pdev->dev, "Firmware not ready\n");
17255 			goto reset_exit;
17256 		}
17257 
17258 		err = bnxt_hwrm_func_reset(bp);
17259 		if (!err)
17260 			result = PCI_ERS_RESULT_RECOVERED;
17261 
17262 		/* IRQ will be initialized later in bnxt_io_resume */
17263 		bnxt_ulp_irq_stop(bp);
17264 		bnxt_clear_int_mode(bp);
17265 	}
17266 
17267 reset_exit:
17268 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17269 	bnxt_clear_reservations(bp, true);
17270 	netdev_unlock(netdev);
17271 
17272 	return result;
17273 }
17274 
17275 /**
17276  * bnxt_io_resume - called when traffic can start flowing again.
17277  * @pdev: Pointer to PCI device
17278  *
17279  * This callback is called when the error recovery driver tells
17280  * us that its OK to resume normal operation.
17281  */
17282 static void bnxt_io_resume(struct pci_dev *pdev)
17283 {
17284 	struct net_device *netdev = pci_get_drvdata(pdev);
17285 	struct bnxt *bp = netdev_priv(netdev);
17286 	int err;
17287 
17288 	netdev_info(bp->dev, "PCI Slot Resume\n");
17289 	netdev_lock(netdev);
17290 
17291 	err = bnxt_hwrm_func_qcaps(bp);
17292 	if (!err) {
17293 		if (netif_running(netdev)) {
17294 			err = bnxt_open(netdev);
17295 		} else {
17296 			err = bnxt_reserve_rings(bp, true);
17297 			if (!err)
17298 				err = bnxt_init_int_mode(bp);
17299 		}
17300 	}
17301 
17302 	if (!err)
17303 		netif_device_attach(netdev);
17304 
17305 	netdev_unlock(netdev);
17306 	bnxt_ulp_start(bp, err);
17307 	if (!err)
17308 		bnxt_reenable_sriov(bp);
17309 }
17310 
17311 static const struct pci_error_handlers bnxt_err_handler = {
17312 	.error_detected	= bnxt_io_error_detected,
17313 	.slot_reset	= bnxt_io_slot_reset,
17314 	.resume		= bnxt_io_resume
17315 };
17316 
17317 static struct pci_driver bnxt_pci_driver = {
17318 	.name		= DRV_MODULE_NAME,
17319 	.id_table	= bnxt_pci_tbl,
17320 	.probe		= bnxt_init_one,
17321 	.remove		= bnxt_remove_one,
17322 	.shutdown	= bnxt_shutdown,
17323 	.driver.pm	= BNXT_PM_OPS,
17324 	.err_handler	= &bnxt_err_handler,
17325 #if defined(CONFIG_BNXT_SRIOV)
17326 	.sriov_configure = bnxt_sriov_configure,
17327 #endif
17328 };
17329 
17330 static int __init bnxt_init(void)
17331 {
17332 	int err;
17333 
17334 	bnxt_debug_init();
17335 	err = pci_register_driver(&bnxt_pci_driver);
17336 	if (err) {
17337 		bnxt_debug_exit();
17338 		return err;
17339 	}
17340 
17341 	return 0;
17342 }
17343 
17344 static void __exit bnxt_exit(void)
17345 {
17346 	pci_unregister_driver(&bnxt_pci_driver);
17347 	if (bnxt_pf_wq)
17348 		destroy_workqueue(bnxt_pf_wq);
17349 	bnxt_debug_exit();
17350 }
17351 
17352 module_init(bnxt_init);
17353 module_exit(bnxt_exit);
17354