xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c (revision 071dbfa304e85a6b04a593e950d18fa170997288)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62 
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77 
78 #define BNXT_TX_TIMEOUT		(5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
80 				 NETIF_MSG_TX_ERR)
81 
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85 
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88 
89 #define BNXT_TX_PUSH_THRESH 164
90 
91 /* indexed by enum board_idx */
92 static const struct {
93 	char *name;
94 } board_info[] = {
95 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 	[BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 	[BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 	[BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 	[BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 	[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 	[NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147 
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 	{ PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 	{ PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 	{ PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 	{ PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 	{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 	{ PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 	{ 0 }
225 };
226 
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228 
229 static const u16 bnxt_vf_req_snif[] = {
230 	HWRM_FUNC_CFG,
231 	HWRM_FUNC_VF_CFG,
232 	HWRM_PORT_PHY_QCFG,
233 	HWRM_CFA_L2_FILTER_ALLOC,
234 };
235 
236 static const u16 bnxt_async_events_arr[] = {
237 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 	ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255 
256 const u16 bnxt_bstore_to_trace[] = {
257 	[BNXT_CTX_SRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 	[BNXT_CTX_SRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 	[BNXT_CTX_CRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 	[BNXT_CTX_CRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 	[BNXT_CTX_RIGP0]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 	[BNXT_CTX_L2HWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 	[BNXT_CTX_REHWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 	[BNXT_CTX_CA0]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 	[BNXT_CTX_CA1]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 	[BNXT_CTX_CA2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 	[BNXT_CTX_RIGP1]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 	[BNXT_CTX_KONG]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 	[BNXT_CTX_QPC]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271 
272 static struct workqueue_struct *bnxt_pf_wq;
273 
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277 
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 	.ports = {
280 		.src = 0,
281 		.dst = 0,
282 	},
283 	.addrs = {
284 		.v6addrs = {
285 			.src = BNXT_IPV6_MASK_NONE,
286 			.dst = BNXT_IPV6_MASK_NONE,
287 		},
288 	},
289 };
290 
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 	.ports = {
293 		.src = cpu_to_be16(0xffff),
294 		.dst = cpu_to_be16(0xffff),
295 	},
296 	.addrs = {
297 		.v6addrs = {
298 			.src = BNXT_IPV6_MASK_ALL,
299 			.dst = BNXT_IPV6_MASK_ALL,
300 		},
301 	},
302 };
303 
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 	.ports = {
306 		.src = cpu_to_be16(0xffff),
307 		.dst = cpu_to_be16(0xffff),
308 	},
309 	.addrs = {
310 		.v4addrs = {
311 			.src = cpu_to_be32(0xffffffff),
312 			.dst = cpu_to_be32(0xffffffff),
313 		},
314 	},
315 };
316 
317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 		idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 		idx == NETXTREME_E_P7_VF_HV);
324 }
325 
326 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328 
329 #define BNXT_DB_CQ(db, idx)						\
330 	writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331 
332 #define BNXT_DB_NQ_P5(db, idx)						\
333 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 		    (db)->doorbell)
335 
336 #define BNXT_DB_NQ_P7(db, idx)						\
337 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |		\
338 		    DB_RING_IDX(db, idx), (db)->doorbell)
339 
340 #define BNXT_DB_CQ_ARM(db, idx)						\
341 	writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342 
343 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
344 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |		\
345 		    DB_RING_IDX(db, idx), (db)->doorbell)
346 
347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 	if (bp->flags & BNXT_FLAG_CHIP_P7)
350 		BNXT_DB_NQ_P7(db, idx);
351 	else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 		BNXT_DB_NQ_P5(db, idx);
353 	else
354 		BNXT_DB_CQ(db, idx);
355 }
356 
357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 		BNXT_DB_NQ_ARM_P5(db, idx);
361 	else
362 		BNXT_DB_CQ_ARM(db, idx);
363 }
364 
365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 			    DB_RING_IDX(db, idx), db->doorbell);
370 	else
371 		BNXT_DB_CQ(db, idx);
372 }
373 
374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 		return;
378 
379 	if (BNXT_PF(bp))
380 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 	else
382 		schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384 
385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 	if (BNXT_PF(bp))
388 		queue_work(bnxt_pf_wq, &bp->sp_task);
389 	else
390 		schedule_work(&bp->sp_task);
391 }
392 
393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 	set_bit(event, &bp->sp_event);
396 	__bnxt_queue_sp_work(bp);
397 }
398 
399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 	if (!rxr->bnapi->in_reset) {
402 		rxr->bnapi->in_reset = true;
403 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 		else
406 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 		__bnxt_queue_sp_work(bp);
408 	}
409 	rxr->rx_next_cons = 0xffff;
410 }
411 
412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 			  u16 curr)
414 {
415 	struct bnxt_napi *bnapi = txr->bnapi;
416 
417 	if (bnapi->tx_fault)
418 		return;
419 
420 	netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 		   txr->txq_index, txr->tx_hw_cons,
422 		   txr->tx_cons, txr->tx_prod, curr);
423 	WARN_ON_ONCE(1);
424 	bnapi->tx_fault = 1;
425 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427 
428 const u16 bnxt_lhint_arr[] = {
429 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 	TX_BD_FLAGS_LHINT_512_TO_1023,
431 	TX_BD_FLAGS_LHINT_1024_TO_2047,
432 	TX_BD_FLAGS_LHINT_1024_TO_2047,
433 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449 
450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
453 
454 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 		return 0;
456 
457 	return md_dst->u.port_info.port_id;
458 }
459 
460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 			     u16 prod)
462 {
463 	/* Sync BD data before updating doorbell */
464 	wmb();
465 	bnxt_db_write(bp, &txr->tx_db, prod);
466 	txr->kick_pending = 0;
467 }
468 
469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 	struct bnxt *bp = netdev_priv(dev);
472 	struct tx_bd *txbd, *txbd0;
473 	struct tx_bd_ext *txbd1;
474 	struct netdev_queue *txq;
475 	int i;
476 	dma_addr_t mapping;
477 	unsigned int length, pad = 0;
478 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 	struct pci_dev *pdev = bp->pdev;
481 	u16 prod, last_frag, txts_prod;
482 	struct bnxt_tx_ring_info *txr;
483 	struct bnxt_sw_tx_bd *tx_buf;
484 	__le32 lflags = 0;
485 	skb_frag_t *frag;
486 
487 	i = skb_get_queue_mapping(skb);
488 	if (unlikely(i >= bp->tx_nr_rings)) {
489 		dev_kfree_skb_any(skb);
490 		dev_core_stats_tx_dropped_inc(dev);
491 		return NETDEV_TX_OK;
492 	}
493 
494 	txq = netdev_get_tx_queue(dev, i);
495 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 	prod = txr->tx_prod;
497 
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 	if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 		netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d.  SKB will be linearized.\n",
501 				 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 		if (skb_linearize(skb)) {
503 			dev_kfree_skb_any(skb);
504 			dev_core_stats_tx_dropped_inc(dev);
505 			return NETDEV_TX_OK;
506 		}
507 	}
508 #endif
509 	free_size = bnxt_tx_avail(bp, txr);
510 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 		/* We must have raced with NAPI cleanup */
512 		if (net_ratelimit() && txr->kick_pending)
513 			netif_warn(bp, tx_err, dev,
514 				   "bnxt: ring busy w/ flush pending!\n");
515 		if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 					bp->tx_wake_thresh))
517 			return NETDEV_TX_BUSY;
518 	}
519 
520 	length = skb->len;
521 	len = skb_headlen(skb);
522 	last_frag = skb_shinfo(skb)->nr_frags;
523 
524 	txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
525 
526 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
527 	tx_buf->skb = skb;
528 	tx_buf->nr_frags = last_frag;
529 
530 	vlan_tag_flags = 0;
531 	cfa_action = bnxt_xmit_get_cfa_action(skb);
532 	if (skb_vlan_tag_present(skb)) {
533 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
534 				 skb_vlan_tag_get(skb);
535 		/* Currently supports 8021Q, 8021AD vlan offloads
536 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
537 		 */
538 		if (skb->vlan_proto == htons(ETH_P_8021Q))
539 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
540 	}
541 
542 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
543 	    ptp->tx_tstamp_en) {
544 		if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
545 			lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
546 			tx_buf->is_ts_pkt = 1;
547 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
548 		} else if (!skb_is_gso(skb)) {
549 			u16 seq_id, hdr_off;
550 
551 			if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
552 			    !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
553 				if (vlan_tag_flags)
554 					hdr_off += VLAN_HLEN;
555 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
556 				tx_buf->is_ts_pkt = 1;
557 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
558 
559 				ptp->txts_req[txts_prod].tx_seqid = seq_id;
560 				ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
561 				tx_buf->txts_prod = txts_prod;
562 			}
563 		}
564 	}
565 	if (unlikely(skb->no_fcs))
566 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
567 
568 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
569 	    skb_frags_readable(skb) && !lflags) {
570 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
571 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
572 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
573 		void __iomem *db = txr->tx_db.doorbell;
574 		void *pdata = tx_push_buf->data;
575 		u64 *end;
576 		int j, push_len;
577 
578 		/* Set COAL_NOW to be ready quickly for the next push */
579 		tx_push->tx_bd_len_flags_type =
580 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
581 					TX_BD_TYPE_LONG_TX_BD |
582 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
583 					TX_BD_FLAGS_COAL_NOW |
584 					TX_BD_FLAGS_PACKET_END |
585 					TX_BD_CNT(2));
586 
587 		if (skb->ip_summed == CHECKSUM_PARTIAL)
588 			tx_push1->tx_bd_hsize_lflags =
589 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
590 		else
591 			tx_push1->tx_bd_hsize_lflags = 0;
592 
593 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
594 		tx_push1->tx_bd_cfa_action =
595 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
596 
597 		end = pdata + length;
598 		end = PTR_ALIGN(end, 8) - 1;
599 		*end = 0;
600 
601 		skb_copy_from_linear_data(skb, pdata, len);
602 		pdata += len;
603 		for (j = 0; j < last_frag; j++) {
604 			void *fptr;
605 
606 			frag = &skb_shinfo(skb)->frags[j];
607 			fptr = skb_frag_address_safe(frag);
608 			if (!fptr)
609 				goto normal_tx;
610 
611 			memcpy(pdata, fptr, skb_frag_size(frag));
612 			pdata += skb_frag_size(frag);
613 		}
614 
615 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
616 		txbd->tx_bd_haddr = txr->data_mapping;
617 		txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
618 		prod = NEXT_TX(prod);
619 		tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
620 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
621 		memcpy(txbd, tx_push1, sizeof(*txbd));
622 		prod = NEXT_TX(prod);
623 		tx_push->doorbell =
624 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
625 				    DB_RING_IDX(&txr->tx_db, prod));
626 		WRITE_ONCE(txr->tx_prod, prod);
627 
628 		tx_buf->is_push = 1;
629 		netdev_tx_sent_queue(txq, skb->len);
630 		wmb();	/* Sync is_push and byte queue before pushing data */
631 
632 		push_len = (length + sizeof(*tx_push) + 7) / 8;
633 		if (push_len > 16) {
634 			__iowrite64_copy(db, tx_push_buf, 16);
635 			__iowrite32_copy(db + 4, tx_push_buf + 1,
636 					 (push_len - 16) << 1);
637 		} else {
638 			__iowrite64_copy(db, tx_push_buf, push_len);
639 		}
640 
641 		goto tx_done;
642 	}
643 
644 normal_tx:
645 	if (length < BNXT_MIN_PKT_SIZE) {
646 		pad = BNXT_MIN_PKT_SIZE - length;
647 		if (skb_pad(skb, pad))
648 			/* SKB already freed. */
649 			goto tx_kick_pending;
650 		length = BNXT_MIN_PKT_SIZE;
651 	}
652 
653 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
654 
655 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
656 		goto tx_free;
657 
658 	dma_unmap_addr_set(tx_buf, mapping, mapping);
659 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
660 		TX_BD_CNT(last_frag + 2);
661 
662 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
663 	txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
664 
665 	prod = NEXT_TX(prod);
666 	txbd1 = (struct tx_bd_ext *)
667 		&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
668 
669 	txbd1->tx_bd_hsize_lflags = lflags;
670 	if (skb_is_gso(skb)) {
671 		bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
672 		u32 hdr_len;
673 
674 		if (skb->encapsulation) {
675 			if (udp_gso)
676 				hdr_len = skb_inner_transport_offset(skb) +
677 					  sizeof(struct udphdr);
678 			else
679 				hdr_len = skb_inner_tcp_all_headers(skb);
680 		} else if (udp_gso) {
681 			hdr_len = skb_transport_offset(skb) +
682 				  sizeof(struct udphdr);
683 		} else {
684 			hdr_len = skb_tcp_all_headers(skb);
685 		}
686 
687 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
688 					TX_BD_FLAGS_T_IPID |
689 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
690 		length = skb_shinfo(skb)->gso_size;
691 		txbd1->tx_bd_mss = cpu_to_le32(length);
692 		length += hdr_len;
693 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
694 		txbd1->tx_bd_hsize_lflags |=
695 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
696 		txbd1->tx_bd_mss = 0;
697 	}
698 
699 	length >>= 9;
700 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
701 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
702 				     skb->len);
703 		i = 0;
704 		goto tx_dma_error;
705 	}
706 	flags |= bnxt_lhint_arr[length];
707 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
708 
709 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
710 	txbd1->tx_bd_cfa_action =
711 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
712 	txbd0 = txbd;
713 	for (i = 0; i < last_frag; i++) {
714 		frag = &skb_shinfo(skb)->frags[i];
715 		prod = NEXT_TX(prod);
716 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
717 
718 		len = skb_frag_size(frag);
719 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
720 					   DMA_TO_DEVICE);
721 
722 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
723 			goto tx_dma_error;
724 
725 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
726 		netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
727 					  mapping, mapping);
728 
729 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
730 
731 		flags = len << TX_BD_LEN_SHIFT;
732 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
733 	}
734 
735 	flags &= ~TX_BD_LEN;
736 	txbd->tx_bd_len_flags_type =
737 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
738 			    TX_BD_FLAGS_PACKET_END);
739 
740 	netdev_tx_sent_queue(txq, skb->len);
741 
742 	skb_tx_timestamp(skb);
743 
744 	prod = NEXT_TX(prod);
745 	WRITE_ONCE(txr->tx_prod, prod);
746 
747 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
748 		bnxt_txr_db_kick(bp, txr, prod);
749 	} else {
750 		if (free_size >= bp->tx_wake_thresh)
751 			txbd0->tx_bd_len_flags_type |=
752 				cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
753 		txr->kick_pending = 1;
754 	}
755 
756 tx_done:
757 
758 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
759 		if (netdev_xmit_more() && !tx_buf->is_push) {
760 			txbd0->tx_bd_len_flags_type &=
761 				cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
762 			bnxt_txr_db_kick(bp, txr, prod);
763 		}
764 
765 		netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
766 				   bp->tx_wake_thresh);
767 	}
768 	return NETDEV_TX_OK;
769 
770 tx_dma_error:
771 	last_frag = i;
772 
773 	/* start back at beginning and unmap skb */
774 	prod = txr->tx_prod;
775 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
776 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
777 			 skb_headlen(skb), DMA_TO_DEVICE);
778 	prod = NEXT_TX(prod);
779 
780 	/* unmap remaining mapped pages */
781 	for (i = 0; i < last_frag; i++) {
782 		prod = NEXT_TX(prod);
783 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
784 		frag = &skb_shinfo(skb)->frags[i];
785 		netmem_dma_unmap_page_attrs(&pdev->dev,
786 					    dma_unmap_addr(tx_buf, mapping),
787 					    skb_frag_size(frag),
788 					    DMA_TO_DEVICE, 0);
789 	}
790 
791 tx_free:
792 	dev_kfree_skb_any(skb);
793 tx_kick_pending:
794 	if (BNXT_TX_PTP_IS_SET(lflags)) {
795 		txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
796 		atomic64_inc(&bp->ptp_cfg->stats.ts_err);
797 		if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
798 			/* set SKB to err so PTP worker will clean up */
799 			ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
800 	}
801 	if (txr->kick_pending)
802 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
803 	txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
804 	dev_core_stats_tx_dropped_inc(dev);
805 	return NETDEV_TX_OK;
806 }
807 
808 /* Returns true if some remaining TX packets not processed. */
809 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
810 			  int budget)
811 {
812 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
813 	struct pci_dev *pdev = bp->pdev;
814 	u16 hw_cons = txr->tx_hw_cons;
815 	unsigned int tx_bytes = 0;
816 	u16 cons = txr->tx_cons;
817 	skb_frag_t *frag;
818 	int tx_pkts = 0;
819 	bool rc = false;
820 
821 	while (RING_TX(bp, cons) != hw_cons) {
822 		struct bnxt_sw_tx_bd *tx_buf;
823 		struct sk_buff *skb;
824 		bool is_ts_pkt;
825 		int j, last;
826 
827 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
828 		skb = tx_buf->skb;
829 
830 		if (unlikely(!skb)) {
831 			bnxt_sched_reset_txr(bp, txr, cons);
832 			return rc;
833 		}
834 
835 		is_ts_pkt = tx_buf->is_ts_pkt;
836 		if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
837 			rc = true;
838 			break;
839 		}
840 
841 		cons = NEXT_TX(cons);
842 		tx_pkts++;
843 		tx_bytes += skb->len;
844 		tx_buf->skb = NULL;
845 		tx_buf->is_ts_pkt = 0;
846 
847 		if (tx_buf->is_push) {
848 			tx_buf->is_push = 0;
849 			goto next_tx_int;
850 		}
851 
852 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
853 				 skb_headlen(skb), DMA_TO_DEVICE);
854 		last = tx_buf->nr_frags;
855 
856 		for (j = 0; j < last; j++) {
857 			frag = &skb_shinfo(skb)->frags[j];
858 			cons = NEXT_TX(cons);
859 			tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
860 			netmem_dma_unmap_page_attrs(&pdev->dev,
861 						    dma_unmap_addr(tx_buf,
862 								   mapping),
863 						    skb_frag_size(frag),
864 						    DMA_TO_DEVICE, 0);
865 		}
866 		if (unlikely(is_ts_pkt)) {
867 			if (BNXT_CHIP_P5(bp)) {
868 				/* PTP worker takes ownership of the skb */
869 				bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
870 				skb = NULL;
871 			}
872 		}
873 
874 next_tx_int:
875 		cons = NEXT_TX(cons);
876 
877 		napi_consume_skb(skb, budget);
878 	}
879 
880 	WRITE_ONCE(txr->tx_cons, cons);
881 
882 	__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
883 				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
884 				   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
885 
886 	return rc;
887 }
888 
889 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
890 {
891 	struct bnxt_tx_ring_info *txr;
892 	bool more = false;
893 	int i;
894 
895 	bnxt_for_each_napi_tx(i, bnapi, txr) {
896 		if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
897 			more |= __bnxt_tx_int(bp, txr, budget);
898 	}
899 	if (!more)
900 		bnapi->events &= ~BNXT_TX_CMP_EVENT;
901 }
902 
903 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
904 {
905 	return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
906 }
907 
908 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
909 					 struct bnxt_rx_ring_info *rxr,
910 					 unsigned int *offset,
911 					 gfp_t gfp)
912 {
913 	struct page *page;
914 
915 	if (rxr->rx_page_size < PAGE_SIZE) {
916 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
917 						rxr->rx_page_size);
918 	} else {
919 		page = page_pool_dev_alloc_pages(rxr->page_pool);
920 		*offset = 0;
921 	}
922 	if (!page)
923 		return NULL;
924 
925 	*mapping = page_pool_get_dma_addr(page) + *offset;
926 	return page;
927 }
928 
929 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
930 					 struct bnxt_rx_ring_info *rxr,
931 					 unsigned int *offset,
932 					 gfp_t gfp)
933 {
934 	netmem_ref netmem;
935 
936 	if (rxr->rx_page_size < PAGE_SIZE) {
937 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
938 						     rxr->rx_page_size, gfp);
939 	} else {
940 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
941 		*offset = 0;
942 	}
943 	if (!netmem)
944 		return 0;
945 
946 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
947 	return netmem;
948 }
949 
950 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
951 				       struct bnxt_rx_ring_info *rxr,
952 				       gfp_t gfp)
953 {
954 	unsigned int offset;
955 	struct page *page;
956 
957 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
958 				    bp->rx_buf_size, gfp);
959 	if (!page)
960 		return NULL;
961 
962 	*mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
963 	return page_address(page) + offset;
964 }
965 
966 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
967 		       u16 prod, gfp_t gfp)
968 {
969 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
970 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
971 	dma_addr_t mapping;
972 
973 	if (BNXT_RX_PAGE_MODE(bp)) {
974 		unsigned int offset;
975 		struct page *page =
976 			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
977 
978 		if (!page)
979 			return -ENOMEM;
980 
981 		mapping += bp->rx_dma_offset;
982 		rx_buf->data = page;
983 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
984 	} else {
985 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
986 
987 		if (!data)
988 			return -ENOMEM;
989 
990 		rx_buf->data = data;
991 		rx_buf->data_ptr = data + bp->rx_offset;
992 	}
993 	rx_buf->mapping = mapping;
994 
995 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
996 	return 0;
997 }
998 
999 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1000 {
1001 	u16 prod = rxr->rx_prod;
1002 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1003 	struct bnxt *bp = rxr->bnapi->bp;
1004 	struct rx_bd *cons_bd, *prod_bd;
1005 
1006 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1007 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1008 
1009 	prod_rx_buf->data = data;
1010 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1011 
1012 	prod_rx_buf->mapping = cons_rx_buf->mapping;
1013 
1014 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1015 	cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1016 
1017 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1018 }
1019 
1020 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1021 {
1022 	u16 next, max = rxr->rx_agg_bmap_size;
1023 
1024 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1025 	if (next >= max)
1026 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1027 	return next;
1028 }
1029 
1030 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1031 				u16 prod, gfp_t gfp)
1032 {
1033 	struct rx_bd *rxbd =
1034 		&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1035 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1036 	u16 sw_prod = rxr->rx_sw_agg_prod;
1037 	unsigned int offset = 0;
1038 	dma_addr_t mapping;
1039 	netmem_ref netmem;
1040 
1041 	netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1042 	if (!netmem)
1043 		return -ENOMEM;
1044 
1045 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1046 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1047 
1048 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1049 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1050 	rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1051 
1052 	rx_agg_buf->netmem = netmem;
1053 	rx_agg_buf->offset = offset;
1054 	rx_agg_buf->mapping = mapping;
1055 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1056 	rxbd->rx_bd_opaque = sw_prod;
1057 	return 0;
1058 }
1059 
1060 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1061 				       struct bnxt_cp_ring_info *cpr,
1062 				       u16 cp_cons, u16 curr)
1063 {
1064 	struct rx_agg_cmp *agg;
1065 
1066 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1067 	agg = (struct rx_agg_cmp *)
1068 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1069 	return agg;
1070 }
1071 
1072 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1073 					      struct bnxt_rx_ring_info *rxr,
1074 					      u16 agg_id, u16 curr)
1075 {
1076 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1077 
1078 	return &tpa_info->agg_arr[curr];
1079 }
1080 
1081 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1082 				   u16 start, u32 agg_bufs, bool tpa)
1083 {
1084 	struct bnxt_napi *bnapi = cpr->bnapi;
1085 	struct bnxt *bp = bnapi->bp;
1086 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1087 	u16 prod = rxr->rx_agg_prod;
1088 	u16 sw_prod = rxr->rx_sw_agg_prod;
1089 	bool p5_tpa = false;
1090 	u32 i;
1091 
1092 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1093 		p5_tpa = true;
1094 
1095 	for (i = 0; i < agg_bufs; i++) {
1096 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1097 		struct rx_agg_cmp *agg;
1098 		struct rx_bd *prod_bd;
1099 		netmem_ref netmem;
1100 		u16 cons;
1101 
1102 		if (p5_tpa)
1103 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1104 		else
1105 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
1106 		cons = agg->rx_agg_cmp_opaque;
1107 		__clear_bit(cons, rxr->rx_agg_bmap);
1108 
1109 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1110 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1111 
1112 		__set_bit(sw_prod, rxr->rx_agg_bmap);
1113 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1114 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1115 
1116 		/* It is possible for sw_prod to be equal to cons, so
1117 		 * set cons_rx_buf->netmem to 0 first.
1118 		 */
1119 		netmem = cons_rx_buf->netmem;
1120 		cons_rx_buf->netmem = 0;
1121 		prod_rx_buf->netmem = netmem;
1122 		prod_rx_buf->offset = cons_rx_buf->offset;
1123 
1124 		prod_rx_buf->mapping = cons_rx_buf->mapping;
1125 
1126 		prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1127 
1128 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1129 		prod_bd->rx_bd_opaque = sw_prod;
1130 
1131 		prod = NEXT_RX_AGG(prod);
1132 		sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1133 	}
1134 	rxr->rx_agg_prod = prod;
1135 	rxr->rx_sw_agg_prod = sw_prod;
1136 }
1137 
1138 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1139 					      struct bnxt_rx_ring_info *rxr,
1140 					      u16 cons, void *data, u8 *data_ptr,
1141 					      dma_addr_t dma_addr,
1142 					      unsigned int offset_and_len)
1143 {
1144 	unsigned int len = offset_and_len & 0xffff;
1145 	struct page *page = data;
1146 	u16 prod = rxr->rx_prod;
1147 	struct sk_buff *skb;
1148 	int err;
1149 
1150 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1151 	if (unlikely(err)) {
1152 		bnxt_reuse_rx_data(rxr, cons, data);
1153 		return NULL;
1154 	}
1155 	dma_addr -= bp->rx_dma_offset;
1156 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1157 				bp->rx_dir);
1158 	skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1159 	if (!skb) {
1160 		page_pool_recycle_direct(rxr->page_pool, page);
1161 		return NULL;
1162 	}
1163 	skb_mark_for_recycle(skb);
1164 	skb_reserve(skb, bp->rx_offset);
1165 	__skb_put(skb, len);
1166 
1167 	return skb;
1168 }
1169 
1170 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1171 					struct bnxt_rx_ring_info *rxr,
1172 					u16 cons, void *data, u8 *data_ptr,
1173 					dma_addr_t dma_addr,
1174 					unsigned int offset_and_len)
1175 {
1176 	unsigned int payload = offset_and_len >> 16;
1177 	unsigned int len = offset_and_len & 0xffff;
1178 	skb_frag_t *frag;
1179 	struct page *page = data;
1180 	u16 prod = rxr->rx_prod;
1181 	struct sk_buff *skb;
1182 	int off, err;
1183 
1184 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1185 	if (unlikely(err)) {
1186 		bnxt_reuse_rx_data(rxr, cons, data);
1187 		return NULL;
1188 	}
1189 	dma_addr -= bp->rx_dma_offset;
1190 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1191 				bp->rx_dir);
1192 
1193 	if (unlikely(!payload))
1194 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1195 
1196 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1197 	if (!skb) {
1198 		page_pool_recycle_direct(rxr->page_pool, page);
1199 		return NULL;
1200 	}
1201 
1202 	skb_mark_for_recycle(skb);
1203 	off = (void *)data_ptr - page_address(page);
1204 	skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1205 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1206 	       payload + NET_IP_ALIGN);
1207 
1208 	frag = &skb_shinfo(skb)->frags[0];
1209 	skb_frag_size_sub(frag, payload);
1210 	skb_frag_off_add(frag, payload);
1211 	skb->data_len -= payload;
1212 	skb->tail += payload;
1213 
1214 	return skb;
1215 }
1216 
1217 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1218 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1219 				   void *data, u8 *data_ptr,
1220 				   dma_addr_t dma_addr,
1221 				   unsigned int offset_and_len)
1222 {
1223 	u16 prod = rxr->rx_prod;
1224 	struct sk_buff *skb;
1225 	int err;
1226 
1227 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1228 	if (unlikely(err)) {
1229 		bnxt_reuse_rx_data(rxr, cons, data);
1230 		return NULL;
1231 	}
1232 
1233 	skb = napi_build_skb(data, bp->rx_buf_size);
1234 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1235 				bp->rx_dir);
1236 	if (!skb) {
1237 		page_pool_free_va(rxr->head_pool, data, true);
1238 		return NULL;
1239 	}
1240 
1241 	skb_mark_for_recycle(skb);
1242 	skb_reserve(skb, bp->rx_offset);
1243 	skb_put(skb, offset_and_len & 0xffff);
1244 	return skb;
1245 }
1246 
1247 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1248 				 struct bnxt_cp_ring_info *cpr,
1249 				 u16 idx, u32 agg_bufs, bool tpa,
1250 				 struct sk_buff *skb,
1251 				 struct xdp_buff *xdp)
1252 {
1253 	struct bnxt_napi *bnapi = cpr->bnapi;
1254 	struct skb_shared_info *shinfo;
1255 	struct bnxt_rx_ring_info *rxr;
1256 	u32 i, total_frag_len = 0;
1257 	bool p5_tpa = false;
1258 	u16 prod;
1259 
1260 	rxr = bnapi->rx_ring;
1261 	prod = rxr->rx_agg_prod;
1262 
1263 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1264 		p5_tpa = true;
1265 
1266 	if (skb)
1267 		shinfo = skb_shinfo(skb);
1268 	else
1269 		shinfo = xdp_get_shared_info_from_buff(xdp);
1270 
1271 	for (i = 0; i < agg_bufs; i++) {
1272 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1273 		struct rx_agg_cmp *agg;
1274 		u16 cons, frag_len;
1275 		netmem_ref netmem;
1276 
1277 		if (p5_tpa)
1278 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1279 		else
1280 			agg = bnxt_get_agg(bp, cpr, idx, i);
1281 		cons = agg->rx_agg_cmp_opaque;
1282 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1283 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1284 
1285 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1286 		if (skb) {
1287 			skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1288 					       cons_rx_buf->offset,
1289 					       frag_len, rxr->rx_page_size);
1290 		} else {
1291 			skb_frag_t *frag = &shinfo->frags[i];
1292 
1293 			skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1294 						  cons_rx_buf->offset,
1295 						  frag_len);
1296 			shinfo->nr_frags = i + 1;
1297 		}
1298 		__clear_bit(cons, rxr->rx_agg_bmap);
1299 
1300 		/* It is possible for bnxt_alloc_rx_netmem() to allocate
1301 		 * a sw_prod index that equals the cons index, so we
1302 		 * need to clear the cons entry now.
1303 		 */
1304 		netmem = cons_rx_buf->netmem;
1305 		cons_rx_buf->netmem = 0;
1306 
1307 		if (xdp && netmem_is_pfmemalloc(netmem))
1308 			xdp_buff_set_frag_pfmemalloc(xdp);
1309 
1310 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1311 			if (skb) {
1312 				skb->len -= frag_len;
1313 				skb->data_len -= frag_len;
1314 				skb->truesize -= rxr->rx_page_size;
1315 			}
1316 
1317 			--shinfo->nr_frags;
1318 			cons_rx_buf->netmem = netmem;
1319 
1320 			/* Update prod since possibly some netmems have been
1321 			 * allocated already.
1322 			 */
1323 			rxr->rx_agg_prod = prod;
1324 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1325 			return 0;
1326 		}
1327 
1328 		page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1329 						  rxr->rx_page_size);
1330 
1331 		total_frag_len += frag_len;
1332 		prod = NEXT_RX_AGG(prod);
1333 	}
1334 	rxr->rx_agg_prod = prod;
1335 	return total_frag_len;
1336 }
1337 
1338 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1339 					       struct bnxt_cp_ring_info *cpr,
1340 					       struct sk_buff *skb, u16 idx,
1341 					       u32 agg_bufs, bool tpa)
1342 {
1343 	u32 total_frag_len = 0;
1344 
1345 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1346 					       skb, NULL);
1347 	if (!total_frag_len) {
1348 		skb_mark_for_recycle(skb);
1349 		dev_kfree_skb(skb);
1350 		return NULL;
1351 	}
1352 
1353 	return skb;
1354 }
1355 
1356 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1357 				   struct bnxt_cp_ring_info *cpr,
1358 				   struct xdp_buff *xdp, u16 idx,
1359 				   u32 agg_bufs, bool tpa)
1360 {
1361 	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1362 	u32 total_frag_len = 0;
1363 
1364 	if (!xdp_buff_has_frags(xdp))
1365 		shinfo->nr_frags = 0;
1366 
1367 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1368 					       NULL, xdp);
1369 	if (total_frag_len) {
1370 		xdp_buff_set_frags_flag(xdp);
1371 		shinfo->nr_frags = agg_bufs;
1372 		shinfo->xdp_frags_size = total_frag_len;
1373 	}
1374 	return total_frag_len;
1375 }
1376 
1377 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1378 			       u8 agg_bufs, u32 *raw_cons)
1379 {
1380 	u16 last;
1381 	struct rx_agg_cmp *agg;
1382 
1383 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1384 	last = RING_CMP(*raw_cons);
1385 	agg = (struct rx_agg_cmp *)
1386 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1387 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1388 }
1389 
1390 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1391 				      unsigned int len,
1392 				      dma_addr_t mapping)
1393 {
1394 	struct bnxt *bp = bnapi->bp;
1395 	struct pci_dev *pdev = bp->pdev;
1396 	struct sk_buff *skb;
1397 
1398 	skb = napi_alloc_skb(&bnapi->napi, len);
1399 	if (!skb)
1400 		return NULL;
1401 
1402 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1403 				bp->rx_dir);
1404 
1405 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1406 	       len + NET_IP_ALIGN);
1407 
1408 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1409 				   bp->rx_dir);
1410 
1411 	skb_put(skb, len);
1412 
1413 	return skb;
1414 }
1415 
1416 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1417 				     unsigned int len,
1418 				     dma_addr_t mapping)
1419 {
1420 	return bnxt_copy_data(bnapi, data, len, mapping);
1421 }
1422 
1423 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1424 				     struct xdp_buff *xdp,
1425 				     unsigned int len,
1426 				     dma_addr_t mapping)
1427 {
1428 	unsigned int metasize = 0;
1429 	u8 *data = xdp->data;
1430 	struct sk_buff *skb;
1431 
1432 	len = xdp->data_end - xdp->data_meta;
1433 	metasize = xdp->data - xdp->data_meta;
1434 	data = xdp->data_meta;
1435 
1436 	skb = bnxt_copy_data(bnapi, data, len, mapping);
1437 	if (!skb)
1438 		return skb;
1439 
1440 	if (metasize) {
1441 		skb_metadata_set(skb, metasize);
1442 		__skb_pull(skb, metasize);
1443 	}
1444 
1445 	return skb;
1446 }
1447 
1448 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1449 			   u32 *raw_cons, void *cmp)
1450 {
1451 	struct rx_cmp *rxcmp = cmp;
1452 	u32 tmp_raw_cons = *raw_cons;
1453 	u8 cmp_type, agg_bufs = 0;
1454 
1455 	cmp_type = RX_CMP_TYPE(rxcmp);
1456 
1457 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1458 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1459 			    RX_CMP_AGG_BUFS) >>
1460 			   RX_CMP_AGG_BUFS_SHIFT;
1461 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1462 		struct rx_tpa_end_cmp *tpa_end = cmp;
1463 
1464 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1465 			return 0;
1466 
1467 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1468 	}
1469 
1470 	if (agg_bufs) {
1471 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1472 			return -EBUSY;
1473 	}
1474 	*raw_cons = tmp_raw_cons;
1475 	return 0;
1476 }
1477 
1478 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1479 {
1480 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1481 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1482 
1483 	if (test_bit(idx, map->agg_idx_bmap)) {
1484 		idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1485 		if (idx >= MAX_TPA_P5)
1486 			return INVALID_HW_RING_ID;
1487 	}
1488 	__set_bit(idx, map->agg_idx_bmap);
1489 	map->agg_id_tbl[agg_id] = idx;
1490 	return idx;
1491 }
1492 
1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496 
1497 	__clear_bit(idx, map->agg_idx_bmap);
1498 }
1499 
1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503 
1504 	return map->agg_id_tbl[agg_id];
1505 }
1506 
1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 			      struct rx_tpa_start_cmp *tpa_start,
1509 			      struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 	tpa_info->cfa_code_valid = 1;
1512 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 	tpa_info->vlan_valid = 0;
1514 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 		tpa_info->vlan_valid = 1;
1516 		tpa_info->metadata =
1517 			le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 	}
1519 }
1520 
1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 				 struct rx_tpa_start_cmp *tpa_start,
1523 				 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 	tpa_info->vlan_valid = 0;
1526 	if (TPA_START_VLAN_VALID(tpa_start)) {
1527 		u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 		u32 vlan_proto = ETH_P_8021Q;
1529 
1530 		tpa_info->vlan_valid = 1;
1531 		if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 			vlan_proto = ETH_P_8021AD;
1533 		tpa_info->metadata = vlan_proto << 16 |
1534 				     TPA_START_METADATA0_TCI(tpa_start1);
1535 	}
1536 }
1537 
1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 	struct bnxt_tpa_info *tpa_info;
1544 	u16 cons, prod, agg_id;
1545 	struct rx_bd *prod_bd;
1546 	dma_addr_t mapping;
1547 
1548 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 		if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1552 			netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1553 				    rxr->bnapi->index,
1554 				    TPA_START_AGG_ID_P5(tpa_start));
1555 			bnxt_sched_reset_rxr(bp, rxr);
1556 			return;
1557 		}
1558 	} else {
1559 		agg_id = TPA_START_AGG_ID(tpa_start);
1560 	}
1561 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1562 	prod = rxr->rx_prod;
1563 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1564 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1565 	tpa_info = &rxr->rx_tpa[agg_id];
1566 
1567 	if (unlikely(cons != rxr->rx_next_cons ||
1568 		     TPA_START_ERROR(tpa_start))) {
1569 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1570 			    cons, rxr->rx_next_cons,
1571 			    TPA_START_ERROR_CODE(tpa_start1));
1572 		bnxt_sched_reset_rxr(bp, rxr);
1573 		return;
1574 	}
1575 	prod_rx_buf->data = tpa_info->data;
1576 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1577 
1578 	mapping = tpa_info->mapping;
1579 	prod_rx_buf->mapping = mapping;
1580 
1581 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1582 
1583 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1584 
1585 	tpa_info->data = cons_rx_buf->data;
1586 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1587 	cons_rx_buf->data = NULL;
1588 	tpa_info->mapping = cons_rx_buf->mapping;
1589 
1590 	tpa_info->len =
1591 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1592 				RX_TPA_START_CMP_LEN_SHIFT;
1593 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1594 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1595 		tpa_info->gso_type = SKB_GSO_TCPV4;
1596 		if (TPA_START_IS_IPV6(tpa_start1))
1597 			tpa_info->gso_type = SKB_GSO_TCPV6;
1598 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1599 		else if (!BNXT_CHIP_P4_PLUS(bp) &&
1600 			 TPA_START_HASH_TYPE(tpa_start) == 3)
1601 			tpa_info->gso_type = SKB_GSO_TCPV6;
1602 		tpa_info->rss_hash =
1603 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1604 	} else {
1605 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1606 		tpa_info->gso_type = 0;
1607 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1608 	}
1609 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1610 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1611 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1612 		bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1613 	else
1614 		bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1615 	tpa_info->agg_count = 0;
1616 
1617 	rxr->rx_prod = NEXT_RX(prod);
1618 	cons = RING_RX(bp, NEXT_RX(cons));
1619 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1620 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1621 
1622 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1623 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1624 	cons_rx_buf->data = NULL;
1625 }
1626 
1627 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1628 {
1629 	if (agg_bufs)
1630 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1631 }
1632 
1633 #ifdef CONFIG_INET
1634 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1635 {
1636 	struct udphdr *uh = NULL;
1637 
1638 	if (ip_proto == htons(ETH_P_IP)) {
1639 		struct iphdr *iph = (struct iphdr *)skb->data;
1640 
1641 		if (iph->protocol == IPPROTO_UDP)
1642 			uh = (struct udphdr *)(iph + 1);
1643 	} else {
1644 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1645 
1646 		if (iph->nexthdr == IPPROTO_UDP)
1647 			uh = (struct udphdr *)(iph + 1);
1648 	}
1649 	if (uh) {
1650 		if (uh->check)
1651 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1652 		else
1653 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1654 	}
1655 }
1656 #endif
1657 
1658 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1659 					   int payload_off, int tcp_ts,
1660 					   struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 	struct tcphdr *th;
1664 	int len, nw_off;
1665 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1666 	u32 hdr_info = tpa_info->hdr_info;
1667 	bool loopback = false;
1668 
1669 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1670 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1671 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1672 
1673 	/* If the packet is an internal loopback packet, the offsets will
1674 	 * have an extra 4 bytes.
1675 	 */
1676 	if (inner_mac_off == 4) {
1677 		loopback = true;
1678 	} else if (inner_mac_off > 4) {
1679 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1680 					    ETH_HLEN - 2));
1681 
1682 		/* We only support inner iPv4/ipv6.  If we don't see the
1683 		 * correct protocol ID, it must be a loopback packet where
1684 		 * the offsets are off by 4.
1685 		 */
1686 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1687 			loopback = true;
1688 	}
1689 	if (loopback) {
1690 		/* internal loopback packet, subtract all offsets by 4 */
1691 		inner_ip_off -= 4;
1692 		inner_mac_off -= 4;
1693 		outer_ip_off -= 4;
1694 	}
1695 
1696 	nw_off = inner_ip_off - ETH_HLEN;
1697 	skb_set_network_header(skb, nw_off);
1698 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1699 		struct ipv6hdr *iph = ipv6_hdr(skb);
1700 
1701 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1702 		len = skb->len - skb_transport_offset(skb);
1703 		th = tcp_hdr(skb);
1704 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1705 	} else {
1706 		struct iphdr *iph = ip_hdr(skb);
1707 
1708 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1709 		len = skb->len - skb_transport_offset(skb);
1710 		th = tcp_hdr(skb);
1711 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1712 	}
1713 
1714 	if (inner_mac_off) { /* tunnel */
1715 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1716 					    ETH_HLEN - 2));
1717 
1718 		bnxt_gro_tunnel(skb, proto);
1719 	}
1720 #endif
1721 	return skb;
1722 }
1723 
1724 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1725 					   int payload_off, int tcp_ts,
1726 					   struct sk_buff *skb)
1727 {
1728 #ifdef CONFIG_INET
1729 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1730 	u32 hdr_info = tpa_info->hdr_info;
1731 	int iphdr_len, nw_off;
1732 
1733 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1734 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1735 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1736 
1737 	nw_off = inner_ip_off - ETH_HLEN;
1738 	skb_set_network_header(skb, nw_off);
1739 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1740 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1741 	skb_set_transport_header(skb, nw_off + iphdr_len);
1742 
1743 	if (inner_mac_off) { /* tunnel */
1744 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1745 					    ETH_HLEN - 2));
1746 
1747 		bnxt_gro_tunnel(skb, proto);
1748 	}
1749 #endif
1750 	return skb;
1751 }
1752 
1753 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1754 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1755 
1756 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1757 					   int payload_off, int tcp_ts,
1758 					   struct sk_buff *skb)
1759 {
1760 #ifdef CONFIG_INET
1761 	struct tcphdr *th;
1762 	int len, nw_off, tcp_opt_len = 0;
1763 
1764 	if (tcp_ts)
1765 		tcp_opt_len = 12;
1766 
1767 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1768 		struct iphdr *iph;
1769 
1770 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1771 			 ETH_HLEN;
1772 		skb_set_network_header(skb, nw_off);
1773 		iph = ip_hdr(skb);
1774 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1775 		len = skb->len - skb_transport_offset(skb);
1776 		th = tcp_hdr(skb);
1777 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1778 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1779 		struct ipv6hdr *iph;
1780 
1781 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1782 			 ETH_HLEN;
1783 		skb_set_network_header(skb, nw_off);
1784 		iph = ipv6_hdr(skb);
1785 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1786 		len = skb->len - skb_transport_offset(skb);
1787 		th = tcp_hdr(skb);
1788 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1789 	} else {
1790 		dev_kfree_skb_any(skb);
1791 		return NULL;
1792 	}
1793 
1794 	if (nw_off) /* tunnel */
1795 		bnxt_gro_tunnel(skb, skb->protocol);
1796 #endif
1797 	return skb;
1798 }
1799 
1800 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1801 					   struct bnxt_tpa_info *tpa_info,
1802 					   struct rx_tpa_end_cmp *tpa_end,
1803 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1804 					   struct sk_buff *skb,
1805 					   struct bnxt_rx_sw_stats *rx_stats)
1806 {
1807 #ifdef CONFIG_INET
1808 	int payload_off;
1809 	u16 segs;
1810 
1811 	segs = TPA_END_TPA_SEGS(tpa_end);
1812 	if (segs == 1)
1813 		return skb;
1814 
1815 	rx_stats->rx_hw_gro_packets++;
1816 	rx_stats->rx_hw_gro_wire_packets += segs;
1817 
1818 	NAPI_GRO_CB(skb)->count = segs;
1819 	skb_shinfo(skb)->gso_size =
1820 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1821 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1822 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1823 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1824 	else
1825 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1826 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1827 	if (likely(skb))
1828 		tcp_gro_complete(skb);
1829 #endif
1830 	return skb;
1831 }
1832 
1833 /* Given the cfa_code of a received packet determine which
1834  * netdev (vf-rep or PF) the packet is destined to.
1835  */
1836 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1837 {
1838 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1839 
1840 	/* if vf-rep dev is NULL, it must belong to the PF */
1841 	return dev ? dev : bp->dev;
1842 }
1843 
1844 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1845 					   struct bnxt_cp_ring_info *cpr,
1846 					   u32 *raw_cons,
1847 					   struct rx_tpa_end_cmp *tpa_end,
1848 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1849 					   u8 *event)
1850 {
1851 	struct bnxt_napi *bnapi = cpr->bnapi;
1852 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1853 	struct net_device *dev = bp->dev;
1854 	u8 *data_ptr, agg_bufs;
1855 	unsigned int len;
1856 	struct bnxt_tpa_info *tpa_info;
1857 	dma_addr_t mapping;
1858 	struct sk_buff *skb;
1859 	u16 idx = 0, agg_id;
1860 	void *data;
1861 	bool gro;
1862 
1863 	if (unlikely(bnapi->in_reset)) {
1864 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1865 
1866 		if (rc < 0)
1867 			return ERR_PTR(-EBUSY);
1868 		return NULL;
1869 	}
1870 
1871 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1872 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1873 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1874 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1875 		tpa_info = &rxr->rx_tpa[agg_id];
1876 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1877 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1878 				    agg_bufs, tpa_info->agg_count);
1879 			agg_bufs = tpa_info->agg_count;
1880 		}
1881 		tpa_info->agg_count = 0;
1882 		*event |= BNXT_AGG_EVENT;
1883 		bnxt_free_agg_idx(rxr, agg_id);
1884 		idx = agg_id;
1885 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1886 	} else {
1887 		agg_id = TPA_END_AGG_ID(tpa_end);
1888 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1889 		tpa_info = &rxr->rx_tpa[agg_id];
1890 		idx = RING_CMP(*raw_cons);
1891 		if (agg_bufs) {
1892 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1893 				return ERR_PTR(-EBUSY);
1894 
1895 			*event |= BNXT_AGG_EVENT;
1896 			idx = NEXT_CMP(idx);
1897 		}
1898 		gro = !!TPA_END_GRO(tpa_end);
1899 	}
1900 	data = tpa_info->data;
1901 	data_ptr = tpa_info->data_ptr;
1902 	prefetch(data_ptr);
1903 	len = tpa_info->len;
1904 	mapping = tpa_info->mapping;
1905 
1906 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1907 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1908 		if (agg_bufs > MAX_SKB_FRAGS)
1909 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1910 				    agg_bufs, (int)MAX_SKB_FRAGS);
1911 		return NULL;
1912 	}
1913 
1914 	if (len <= bp->rx_copybreak) {
1915 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1916 		if (!skb) {
1917 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 			cpr->sw_stats->rx.rx_oom_discards += 1;
1919 			return NULL;
1920 		}
1921 	} else {
1922 		u8 *new_data;
1923 		dma_addr_t new_mapping;
1924 
1925 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1926 						GFP_ATOMIC);
1927 		if (!new_data) {
1928 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1929 			cpr->sw_stats->rx.rx_oom_discards += 1;
1930 			return NULL;
1931 		}
1932 
1933 		tpa_info->data = new_data;
1934 		tpa_info->data_ptr = new_data + bp->rx_offset;
1935 		tpa_info->mapping = new_mapping;
1936 
1937 		skb = napi_build_skb(data, bp->rx_buf_size);
1938 		dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1939 					bp->rx_buf_use_size, bp->rx_dir);
1940 
1941 		if (!skb) {
1942 			page_pool_free_va(rxr->head_pool, data, true);
1943 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1944 			cpr->sw_stats->rx.rx_oom_discards += 1;
1945 			return NULL;
1946 		}
1947 		skb_mark_for_recycle(skb);
1948 		skb_reserve(skb, bp->rx_offset);
1949 		skb_put(skb, len);
1950 	}
1951 
1952 	if (agg_bufs) {
1953 		skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1954 					      true);
1955 		if (!skb) {
1956 			/* Page reuse already handled by bnxt_rx_pages(). */
1957 			cpr->sw_stats->rx.rx_oom_discards += 1;
1958 			return NULL;
1959 		}
1960 	}
1961 
1962 	if (tpa_info->cfa_code_valid)
1963 		dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1964 	skb->protocol = eth_type_trans(skb, dev);
1965 
1966 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1967 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1968 
1969 	if (tpa_info->vlan_valid &&
1970 	    (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1971 		__be16 vlan_proto = htons(tpa_info->metadata >>
1972 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1973 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1974 
1975 		if (eth_type_vlan(vlan_proto)) {
1976 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1977 		} else {
1978 			dev_kfree_skb(skb);
1979 			return NULL;
1980 		}
1981 	}
1982 
1983 	skb_checksum_none_assert(skb);
1984 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1985 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 		skb->csum_level =
1987 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1988 	}
1989 
1990 	if (gro)
1991 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb,
1992 				   &cpr->sw_stats->rx);
1993 
1994 	return skb;
1995 }
1996 
1997 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1998 			 struct rx_agg_cmp *rx_agg)
1999 {
2000 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
2001 	struct bnxt_tpa_info *tpa_info;
2002 
2003 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2004 	tpa_info = &rxr->rx_tpa[agg_id];
2005 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2006 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2007 }
2008 
2009 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2010 			     struct sk_buff *skb)
2011 {
2012 	skb_mark_for_recycle(skb);
2013 
2014 	if (skb->dev != bp->dev) {
2015 		/* this packet belongs to a vf-rep */
2016 		bnxt_vf_rep_rx(bp, skb);
2017 		return;
2018 	}
2019 	skb_record_rx_queue(skb, bnapi->index);
2020 	napi_gro_receive(&bnapi->napi, skb);
2021 }
2022 
2023 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2024 			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2025 {
2026 	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2027 
2028 	if (BNXT_PTP_RX_TS_VALID(flags))
2029 		goto ts_valid;
2030 	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2031 		return false;
2032 
2033 ts_valid:
2034 	*cmpl_ts = ts;
2035 	return true;
2036 }
2037 
2038 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2039 				    struct rx_cmp *rxcmp,
2040 				    struct rx_cmp_ext *rxcmp1)
2041 {
2042 	__be16 vlan_proto;
2043 	u16 vtag;
2044 
2045 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2046 		__le32 flags2 = rxcmp1->rx_cmp_flags2;
2047 		u32 meta_data;
2048 
2049 		if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2050 			return skb;
2051 
2052 		meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2053 		vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2054 		vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2055 		if (eth_type_vlan(vlan_proto))
2056 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2057 		else
2058 			goto vlan_err;
2059 	} else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2060 		if (RX_CMP_VLAN_VALID(rxcmp)) {
2061 			u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2062 
2063 			if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2064 				vlan_proto = htons(ETH_P_8021Q);
2065 			else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2066 				vlan_proto = htons(ETH_P_8021AD);
2067 			else
2068 				goto vlan_err;
2069 			vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2070 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2071 		}
2072 	}
2073 	return skb;
2074 vlan_err:
2075 	skb_mark_for_recycle(skb);
2076 	dev_kfree_skb(skb);
2077 	return NULL;
2078 }
2079 
2080 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2081 					   struct rx_cmp *rxcmp)
2082 {
2083 	u8 ext_op;
2084 
2085 	ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2086 	switch (ext_op) {
2087 	case EXT_OP_INNER_4:
2088 	case EXT_OP_OUTER_4:
2089 	case EXT_OP_INNFL_3:
2090 	case EXT_OP_OUTFL_3:
2091 		return PKT_HASH_TYPE_L4;
2092 	default:
2093 		return PKT_HASH_TYPE_L3;
2094 	}
2095 }
2096 
2097 /* returns the following:
2098  * 1       - 1 packet successfully received
2099  * 0       - successful TPA_START, packet not completed yet
2100  * -EBUSY  - completion ring does not have all the agg buffers yet
2101  * -ENOMEM - packet aborted due to out of memory
2102  * -EIO    - packet aborted due to hw error indicated in BD
2103  */
2104 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2105 		       u32 *raw_cons, u8 *event)
2106 {
2107 	struct bnxt_napi *bnapi = cpr->bnapi;
2108 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2109 	struct net_device *dev = bp->dev;
2110 	struct rx_cmp *rxcmp;
2111 	struct rx_cmp_ext *rxcmp1;
2112 	u32 tmp_raw_cons = *raw_cons;
2113 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2114 	struct skb_shared_info *sinfo;
2115 	struct bnxt_sw_rx_bd *rx_buf;
2116 	unsigned int len;
2117 	u8 *data_ptr, agg_bufs, cmp_type;
2118 	bool xdp_active = false;
2119 	dma_addr_t dma_addr;
2120 	struct sk_buff *skb;
2121 	struct xdp_buff xdp;
2122 	u32 flags, misc;
2123 	u32 cmpl_ts;
2124 	void *data;
2125 	int rc = 0;
2126 
2127 	rxcmp = (struct rx_cmp *)
2128 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2129 
2130 	cmp_type = RX_CMP_TYPE(rxcmp);
2131 
2132 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2133 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2134 		goto next_rx_no_prod_no_len;
2135 	}
2136 
2137 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2138 	cp_cons = RING_CMP(tmp_raw_cons);
2139 	rxcmp1 = (struct rx_cmp_ext *)
2140 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2141 
2142 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2143 		return -EBUSY;
2144 
2145 	/* The valid test of the entry must be done first before
2146 	 * reading any further.
2147 	 */
2148 	dma_rmb();
2149 	prod = rxr->rx_prod;
2150 
2151 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2152 	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2153 		bnxt_tpa_start(bp, rxr, cmp_type,
2154 			       (struct rx_tpa_start_cmp *)rxcmp,
2155 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
2156 
2157 		*event |= BNXT_RX_EVENT;
2158 		goto next_rx_no_prod_no_len;
2159 
2160 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2161 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2162 				   (struct rx_tpa_end_cmp *)rxcmp,
2163 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2164 
2165 		if (IS_ERR(skb))
2166 			return -EBUSY;
2167 
2168 		rc = -ENOMEM;
2169 		if (likely(skb)) {
2170 			bnxt_deliver_skb(bp, bnapi, skb);
2171 			rc = 1;
2172 		}
2173 		*event |= BNXT_RX_EVENT;
2174 		goto next_rx_no_prod_no_len;
2175 	}
2176 
2177 	cons = rxcmp->rx_cmp_opaque;
2178 	if (unlikely(cons != rxr->rx_next_cons)) {
2179 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2180 
2181 		/* 0xffff is forced error, don't print it */
2182 		if (rxr->rx_next_cons != 0xffff)
2183 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2184 				    cons, rxr->rx_next_cons);
2185 		bnxt_sched_reset_rxr(bp, rxr);
2186 		if (rc1)
2187 			return rc1;
2188 		goto next_rx_no_prod_no_len;
2189 	}
2190 	rx_buf = &rxr->rx_buf_ring[cons];
2191 	data = rx_buf->data;
2192 	data_ptr = rx_buf->data_ptr;
2193 	prefetch(data_ptr);
2194 
2195 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2196 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2197 
2198 	if (agg_bufs) {
2199 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2200 			return -EBUSY;
2201 
2202 		cp_cons = NEXT_CMP(cp_cons);
2203 		*event |= BNXT_AGG_EVENT;
2204 	}
2205 	*event |= BNXT_RX_EVENT;
2206 
2207 	rx_buf->data = NULL;
2208 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2209 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2210 
2211 		bnxt_reuse_rx_data(rxr, cons, data);
2212 		if (agg_bufs)
2213 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2214 					       false);
2215 
2216 		rc = -EIO;
2217 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2218 			bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2219 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2220 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2221 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
2222 						 rx_err);
2223 				bnxt_sched_reset_rxr(bp, rxr);
2224 			}
2225 		}
2226 		goto next_rx_no_len;
2227 	}
2228 
2229 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2230 	len = flags >> RX_CMP_LEN_SHIFT;
2231 	dma_addr = rx_buf->mapping;
2232 
2233 	if (bnxt_xdp_attached(bp, rxr)) {
2234 		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2235 		if (agg_bufs) {
2236 			u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2237 							       cp_cons,
2238 							       agg_bufs,
2239 							       false);
2240 			if (!frag_len)
2241 				goto oom_next_rx;
2242 
2243 		}
2244 		xdp_active = true;
2245 	}
2246 
2247 	if (xdp_active) {
2248 		if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2249 			rc = 1;
2250 			goto next_rx;
2251 		}
2252 		if (xdp_buff_has_frags(&xdp)) {
2253 			sinfo = xdp_get_shared_info_from_buff(&xdp);
2254 			agg_bufs = sinfo->nr_frags;
2255 		} else {
2256 			agg_bufs = 0;
2257 		}
2258 	}
2259 
2260 	if (len <= bp->rx_copybreak) {
2261 		if (!xdp_active)
2262 			skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2263 		else
2264 			skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2265 		bnxt_reuse_rx_data(rxr, cons, data);
2266 		if (!skb) {
2267 			if (agg_bufs) {
2268 				if (!xdp_active)
2269 					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2270 							       agg_bufs, false);
2271 				else
2272 					bnxt_xdp_buff_frags_free(rxr, &xdp);
2273 			}
2274 			goto oom_next_rx;
2275 		}
2276 	} else {
2277 		u32 payload;
2278 
2279 		if (rx_buf->data_ptr == data_ptr)
2280 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
2281 		else
2282 			payload = 0;
2283 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2284 				      payload | len);
2285 		if (!skb)
2286 			goto oom_next_rx;
2287 	}
2288 
2289 	if (agg_bufs) {
2290 		if (!xdp_active) {
2291 			skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2292 						      agg_bufs, false);
2293 			if (!skb)
2294 				goto oom_next_rx;
2295 		} else {
2296 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
2297 			if (!skb) {
2298 				/* we should be able to free the old skb here */
2299 				bnxt_xdp_buff_frags_free(rxr, &xdp);
2300 				goto oom_next_rx;
2301 			}
2302 		}
2303 	}
2304 
2305 	if (RX_CMP_HASH_VALID(rxcmp)) {
2306 		enum pkt_hash_types type;
2307 
2308 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2309 			type = bnxt_rss_ext_op(bp, rxcmp);
2310 		} else {
2311 			u32 itypes = RX_CMP_ITYPES(rxcmp);
2312 
2313 			if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2314 			    itypes == RX_CMP_FLAGS_ITYPE_UDP)
2315 				type = PKT_HASH_TYPE_L4;
2316 			else
2317 				type = PKT_HASH_TYPE_L3;
2318 		}
2319 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2320 	}
2321 
2322 	if (cmp_type == CMP_TYPE_RX_L2_CMP)
2323 		dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2324 	skb->protocol = eth_type_trans(skb, dev);
2325 
2326 	if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2327 		skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2328 		if (!skb)
2329 			goto next_rx;
2330 	}
2331 
2332 	skb_checksum_none_assert(skb);
2333 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
2334 		if (dev->features & NETIF_F_RXCSUM) {
2335 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2336 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2337 		}
2338 	} else {
2339 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2340 			if (dev->features & NETIF_F_RXCSUM)
2341 				bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2342 		}
2343 	}
2344 
2345 	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2346 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2347 			u64 ns, ts;
2348 
2349 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2350 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2351 
2352 				ns = bnxt_timecounter_cyc2time(ptp, ts);
2353 				memset(skb_hwtstamps(skb), 0,
2354 				       sizeof(*skb_hwtstamps(skb)));
2355 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2356 			}
2357 		}
2358 	}
2359 	bnxt_deliver_skb(bp, bnapi, skb);
2360 	rc = 1;
2361 
2362 next_rx:
2363 	cpr->rx_packets += 1;
2364 	cpr->rx_bytes += len;
2365 
2366 next_rx_no_len:
2367 	rxr->rx_prod = NEXT_RX(prod);
2368 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2369 
2370 next_rx_no_prod_no_len:
2371 	*raw_cons = tmp_raw_cons;
2372 
2373 	return rc;
2374 
2375 oom_next_rx:
2376 	cpr->sw_stats->rx.rx_oom_discards += 1;
2377 	rc = -ENOMEM;
2378 	goto next_rx;
2379 }
2380 
2381 /* In netpoll mode, if we are using a combined completion ring, we need to
2382  * discard the rx packets and recycle the buffers.
2383  */
2384 static int bnxt_force_rx_discard(struct bnxt *bp,
2385 				 struct bnxt_cp_ring_info *cpr,
2386 				 u32 *raw_cons, u8 *event)
2387 {
2388 	u32 tmp_raw_cons = *raw_cons;
2389 	struct rx_cmp_ext *rxcmp1;
2390 	struct rx_cmp *rxcmp;
2391 	u16 cp_cons;
2392 	u8 cmp_type;
2393 	int rc;
2394 
2395 	cp_cons = RING_CMP(tmp_raw_cons);
2396 	rxcmp = (struct rx_cmp *)
2397 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2398 
2399 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2400 	cp_cons = RING_CMP(tmp_raw_cons);
2401 	rxcmp1 = (struct rx_cmp_ext *)
2402 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2403 
2404 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2405 		return -EBUSY;
2406 
2407 	/* The valid test of the entry must be done first before
2408 	 * reading any further.
2409 	 */
2410 	dma_rmb();
2411 	cmp_type = RX_CMP_TYPE(rxcmp);
2412 	if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2413 	    cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2414 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2415 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2416 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2417 		struct rx_tpa_end_cmp_ext *tpa_end1;
2418 
2419 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2420 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2421 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2422 	}
2423 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2424 	if (rc && rc != -EBUSY)
2425 		cpr->sw_stats->rx.rx_netpoll_discards += 1;
2426 	return rc;
2427 }
2428 
2429 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2430 {
2431 	struct bnxt_fw_health *fw_health = bp->fw_health;
2432 	u32 reg = fw_health->regs[reg_idx];
2433 	u32 reg_type, reg_off, val = 0;
2434 
2435 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2436 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2437 	switch (reg_type) {
2438 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2439 		pci_read_config_dword(bp->pdev, reg_off, &val);
2440 		break;
2441 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2442 		reg_off = fw_health->mapped_regs[reg_idx];
2443 		fallthrough;
2444 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2445 		val = readl(bp->bar0 + reg_off);
2446 		break;
2447 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2448 		val = readl(bp->bar1 + reg_off);
2449 		break;
2450 	}
2451 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2452 		val &= fw_health->fw_reset_inprog_reg_mask;
2453 	return val;
2454 }
2455 
2456 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2457 {
2458 	int i;
2459 
2460 	for (i = 0; i < bp->rx_nr_rings; i++) {
2461 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2462 		struct bnxt_ring_grp_info *grp_info;
2463 
2464 		grp_info = &bp->grp_info[grp_idx];
2465 		if (grp_info->agg_fw_ring_id == ring_id)
2466 			return grp_idx;
2467 	}
2468 	return INVALID_HW_RING_ID;
2469 }
2470 
2471 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2472 {
2473 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2474 
2475 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2476 		return link_info->force_link_speed2;
2477 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2478 		return link_info->force_pam4_link_speed;
2479 	return link_info->force_link_speed;
2480 }
2481 
2482 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2483 {
2484 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2485 
2486 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2487 		link_info->req_link_speed = link_info->force_link_speed2;
2488 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2489 		switch (link_info->req_link_speed) {
2490 		case BNXT_LINK_SPEED_50GB_PAM4:
2491 		case BNXT_LINK_SPEED_100GB_PAM4:
2492 		case BNXT_LINK_SPEED_200GB_PAM4:
2493 		case BNXT_LINK_SPEED_400GB_PAM4:
2494 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2495 			break;
2496 		case BNXT_LINK_SPEED_100GB_PAM4_112:
2497 		case BNXT_LINK_SPEED_200GB_PAM4_112:
2498 		case BNXT_LINK_SPEED_400GB_PAM4_112:
2499 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2500 			break;
2501 		default:
2502 			link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2503 		}
2504 		return;
2505 	}
2506 	link_info->req_link_speed = link_info->force_link_speed;
2507 	link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2508 	if (link_info->force_pam4_link_speed) {
2509 		link_info->req_link_speed = link_info->force_pam4_link_speed;
2510 		link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2511 	}
2512 }
2513 
2514 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2515 {
2516 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2517 
2518 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2519 		link_info->advertising = link_info->auto_link_speeds2;
2520 		return;
2521 	}
2522 	link_info->advertising = link_info->auto_link_speeds;
2523 	link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2524 }
2525 
2526 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2527 {
2528 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2529 
2530 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2531 		if (link_info->req_link_speed != link_info->force_link_speed2)
2532 			return true;
2533 		return false;
2534 	}
2535 	if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2536 	    link_info->req_link_speed != link_info->force_link_speed)
2537 		return true;
2538 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2539 	    link_info->req_link_speed != link_info->force_pam4_link_speed)
2540 		return true;
2541 	return false;
2542 }
2543 
2544 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2545 {
2546 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2547 
2548 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2549 		if (link_info->advertising != link_info->auto_link_speeds2)
2550 			return true;
2551 		return false;
2552 	}
2553 	if (link_info->advertising != link_info->auto_link_speeds ||
2554 	    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2555 		return true;
2556 	return false;
2557 }
2558 
2559 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2560 {
2561 	u32 flags = bp->ctx->ctx_arr[type].flags;
2562 
2563 	return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2564 		((flags & BNXT_CTX_MEM_FW_TRACE) ||
2565 		 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2566 }
2567 
2568 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2569 {
2570 	u32 mem_size, pages, rem_bytes, magic_byte_offset;
2571 	u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2572 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2573 	struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2574 	struct bnxt_bs_trace_info *bs_trace;
2575 	int last_pg;
2576 
2577 	if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2578 		return;
2579 
2580 	mem_size = ctxm->max_entries * ctxm->entry_size;
2581 	rem_bytes = mem_size % BNXT_PAGE_SIZE;
2582 	pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2583 
2584 	last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2585 	magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2586 
2587 	rmem = &ctx_pg[0].ring_mem;
2588 	bs_trace = &bp->bs_trace[trace_type];
2589 	bs_trace->ctx_type = ctxm->type;
2590 	bs_trace->trace_type = trace_type;
2591 	if (pages > MAX_CTX_PAGES) {
2592 		int last_pg_dir = rmem->nr_pages - 1;
2593 
2594 		rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2595 		bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2596 	} else {
2597 		bs_trace->magic_byte = rmem->pg_arr[last_pg];
2598 	}
2599 	bs_trace->magic_byte += magic_byte_offset;
2600 	*bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2601 }
2602 
2603 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1)				\
2604 	(((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2605 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2606 
2607 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2)				\
2608 	(((data2) &							\
2609 	  ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2610 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2611 
2612 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)				\
2613 	((data2) &							\
2614 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2615 
2616 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)			\
2617 	(((data2) &							\
2618 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2619 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2620 
2621 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)			\
2622 	((data1) &							\
2623 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2624 
2625 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)		\
2626 	(((data1) &							\
2627 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2628 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2629 
2630 /* Return true if the workqueue has to be scheduled */
2631 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2632 {
2633 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2634 
2635 	switch (err_type) {
2636 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2637 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2638 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2639 		break;
2640 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2641 		netdev_warn(bp->dev, "Pause Storm detected!\n");
2642 		break;
2643 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2644 		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2645 		break;
2646 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2647 		u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2648 		char *threshold_type;
2649 		bool notify = false;
2650 		char *dir_str;
2651 
2652 		switch (type) {
2653 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2654 			threshold_type = "warning";
2655 			break;
2656 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2657 			threshold_type = "critical";
2658 			break;
2659 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2660 			threshold_type = "fatal";
2661 			break;
2662 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2663 			threshold_type = "shutdown";
2664 			break;
2665 		default:
2666 			netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2667 			return false;
2668 		}
2669 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2670 			dir_str = "above";
2671 			notify = true;
2672 		} else {
2673 			dir_str = "below";
2674 		}
2675 		netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2676 			    dir_str, threshold_type);
2677 		netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2678 			    BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2679 			    BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2680 		if (notify) {
2681 			bp->thermal_threshold_type = type;
2682 			set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2683 			return true;
2684 		}
2685 		return false;
2686 	}
2687 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2688 		netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2689 		break;
2690 	default:
2691 		netdev_err(bp->dev, "FW reported unknown error type %u\n",
2692 			   err_type);
2693 		break;
2694 	}
2695 	return false;
2696 }
2697 
2698 #define BNXT_GET_EVENT_PORT(data)	\
2699 	((data) &			\
2700 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2701 
2702 #define BNXT_EVENT_RING_TYPE(data2)	\
2703 	((data2) &			\
2704 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2705 
2706 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2707 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2708 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2709 
2710 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
2711 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2712 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2713 
2714 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
2715 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2716 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2717 
2718 #define BNXT_PHC_BITS	48
2719 
2720 static int bnxt_async_event_process(struct bnxt *bp,
2721 				    struct hwrm_async_event_cmpl *cmpl)
2722 {
2723 	u16 event_id = le16_to_cpu(cmpl->event_id);
2724 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2725 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2726 
2727 	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2728 		   event_id, data1, data2);
2729 
2730 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2731 	switch (event_id) {
2732 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2733 		struct bnxt_link_info *link_info = &bp->link_info;
2734 
2735 		if (BNXT_VF(bp))
2736 			goto async_event_process_exit;
2737 
2738 		/* print unsupported speed warning in forced speed mode only */
2739 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2740 		    (data1 & 0x20000)) {
2741 			u16 fw_speed = bnxt_get_force_speed(link_info);
2742 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2743 
2744 			if (speed != SPEED_UNKNOWN)
2745 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2746 					    speed);
2747 		}
2748 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2749 	}
2750 		fallthrough;
2751 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2752 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2753 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2754 		fallthrough;
2755 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2756 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2757 		break;
2758 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2759 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2760 		break;
2761 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2762 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2763 
2764 		if (BNXT_VF(bp))
2765 			break;
2766 
2767 		if (bp->pf.port_id != port_id)
2768 			break;
2769 
2770 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2771 		break;
2772 	}
2773 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2774 		if (BNXT_PF(bp))
2775 			goto async_event_process_exit;
2776 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2777 		break;
2778 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2779 		char *type_str = "Solicited";
2780 
2781 		if (!bp->fw_health)
2782 			goto async_event_process_exit;
2783 
2784 		bp->fw_reset_timestamp = jiffies;
2785 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2786 		if (!bp->fw_reset_min_dsecs)
2787 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2788 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2789 		if (!bp->fw_reset_max_dsecs)
2790 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2791 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2792 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2793 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2794 			type_str = "Fatal";
2795 			bp->fw_health->fatalities++;
2796 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2797 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2798 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2799 			type_str = "Non-fatal";
2800 			bp->fw_health->survivals++;
2801 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2802 		}
2803 		netif_warn(bp, hw, bp->dev,
2804 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2805 			   type_str, data1, data2,
2806 			   bp->fw_reset_min_dsecs * 100,
2807 			   bp->fw_reset_max_dsecs * 100);
2808 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2809 		break;
2810 	}
2811 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2812 		struct bnxt_fw_health *fw_health = bp->fw_health;
2813 		char *status_desc = "healthy";
2814 		u32 status;
2815 
2816 		if (!fw_health)
2817 			goto async_event_process_exit;
2818 
2819 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2820 			fw_health->enabled = false;
2821 			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2822 			break;
2823 		}
2824 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2825 		fw_health->tmr_multiplier =
2826 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2827 				     bp->current_interval * 10);
2828 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2829 		if (!fw_health->enabled)
2830 			fw_health->last_fw_heartbeat =
2831 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2832 		fw_health->last_fw_reset_cnt =
2833 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2834 		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2835 		if (status != BNXT_FW_STATUS_HEALTHY)
2836 			status_desc = "unhealthy";
2837 		netif_info(bp, drv, bp->dev,
2838 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2839 			   fw_health->primary ? "primary" : "backup", status,
2840 			   status_desc, fw_health->last_fw_reset_cnt);
2841 		if (!fw_health->enabled) {
2842 			/* Make sure tmr_counter is set and visible to
2843 			 * bnxt_health_check() before setting enabled to true.
2844 			 */
2845 			smp_wmb();
2846 			fw_health->enabled = true;
2847 		}
2848 		goto async_event_process_exit;
2849 	}
2850 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2851 		netif_notice(bp, hw, bp->dev,
2852 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2853 			     data1, data2);
2854 		goto async_event_process_exit;
2855 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2856 		struct bnxt_rx_ring_info *rxr;
2857 		u16 grp_idx;
2858 
2859 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2860 			goto async_event_process_exit;
2861 
2862 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2863 			    BNXT_EVENT_RING_TYPE(data2), data1);
2864 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2865 			goto async_event_process_exit;
2866 
2867 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2868 		if (grp_idx == INVALID_HW_RING_ID) {
2869 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2870 				    data1);
2871 			goto async_event_process_exit;
2872 		}
2873 		rxr = bp->bnapi[grp_idx]->rx_ring;
2874 		bnxt_sched_reset_rxr(bp, rxr);
2875 		goto async_event_process_exit;
2876 	}
2877 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2878 		struct bnxt_fw_health *fw_health = bp->fw_health;
2879 
2880 		netif_notice(bp, hw, bp->dev,
2881 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2882 			     data1, data2);
2883 		if (fw_health) {
2884 			fw_health->echo_req_data1 = data1;
2885 			fw_health->echo_req_data2 = data2;
2886 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2887 			break;
2888 		}
2889 		goto async_event_process_exit;
2890 	}
2891 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2892 		bnxt_ptp_pps_event(bp, data1, data2);
2893 		goto async_event_process_exit;
2894 	}
2895 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2896 		if (bnxt_event_error_report(bp, data1, data2))
2897 			break;
2898 		goto async_event_process_exit;
2899 	}
2900 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2901 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2902 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2903 			if (BNXT_PTP_USE_RTC(bp)) {
2904 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2905 				unsigned long flags;
2906 				u64 ns;
2907 
2908 				if (!ptp)
2909 					goto async_event_process_exit;
2910 
2911 				bnxt_ptp_update_current_time(bp);
2912 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2913 				       BNXT_PHC_BITS) | ptp->current_time);
2914 				write_seqlock_irqsave(&ptp->ptp_lock, flags);
2915 				bnxt_ptp_rtc_timecounter_init(ptp, ns);
2916 				write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2917 			}
2918 			break;
2919 		}
2920 		goto async_event_process_exit;
2921 	}
2922 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2923 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2924 
2925 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2926 		goto async_event_process_exit;
2927 	}
2928 	case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2929 		u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2930 		u32 offset =  BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2931 
2932 		if (type >= ARRAY_SIZE(bp->bs_trace))
2933 			goto async_event_process_exit;
2934 		bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2935 		goto async_event_process_exit;
2936 	}
2937 	default:
2938 		goto async_event_process_exit;
2939 	}
2940 	__bnxt_queue_sp_work(bp);
2941 async_event_process_exit:
2942 	bnxt_ulp_async_events(bp, cmpl);
2943 	return 0;
2944 }
2945 
2946 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2947 {
2948 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2949 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2950 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2951 				(struct hwrm_fwd_req_cmpl *)txcmp;
2952 
2953 	switch (cmpl_type) {
2954 	case CMPL_BASE_TYPE_HWRM_DONE:
2955 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2956 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2957 		break;
2958 
2959 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2960 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2961 
2962 		if ((vf_id < bp->pf.first_vf_id) ||
2963 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2964 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2965 				   vf_id);
2966 			return -EINVAL;
2967 		}
2968 
2969 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2970 		bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2971 		break;
2972 
2973 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2974 		bnxt_async_event_process(bp,
2975 					 (struct hwrm_async_event_cmpl *)txcmp);
2976 		break;
2977 
2978 	default:
2979 		break;
2980 	}
2981 
2982 	return 0;
2983 }
2984 
2985 static bool bnxt_vnic_is_active(struct bnxt *bp)
2986 {
2987 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2988 
2989 	return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2990 }
2991 
2992 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2993 {
2994 	struct bnxt_napi *bnapi = dev_instance;
2995 	struct bnxt *bp = bnapi->bp;
2996 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2997 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2998 
2999 	cpr->event_ctr++;
3000 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
3001 	napi_schedule(&bnapi->napi);
3002 	return IRQ_HANDLED;
3003 }
3004 
3005 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3006 {
3007 	u32 raw_cons = cpr->cp_raw_cons;
3008 	u16 cons = RING_CMP(raw_cons);
3009 	struct tx_cmp *txcmp;
3010 
3011 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3012 
3013 	return TX_CMP_VALID(txcmp, raw_cons);
3014 }
3015 
3016 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3017 			    int budget)
3018 {
3019 	struct bnxt_napi *bnapi = cpr->bnapi;
3020 	u32 raw_cons = cpr->cp_raw_cons;
3021 	bool flush_xdp = false;
3022 	u32 cons;
3023 	int rx_pkts = 0;
3024 	u8 event = 0;
3025 	struct tx_cmp *txcmp;
3026 
3027 	cpr->has_more_work = 0;
3028 	cpr->had_work_done = 1;
3029 	while (1) {
3030 		u8 cmp_type;
3031 		int rc;
3032 
3033 		cons = RING_CMP(raw_cons);
3034 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3035 
3036 		if (!TX_CMP_VALID(txcmp, raw_cons))
3037 			break;
3038 
3039 		/* The valid test of the entry must be done first before
3040 		 * reading any further.
3041 		 */
3042 		dma_rmb();
3043 		cmp_type = TX_CMP_TYPE(txcmp);
3044 		if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3045 		    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3046 			u32 opaque = txcmp->tx_cmp_opaque;
3047 			struct bnxt_tx_ring_info *txr;
3048 			u16 tx_freed;
3049 
3050 			txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3051 			event |= BNXT_TX_CMP_EVENT;
3052 			if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3053 				txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3054 			else
3055 				txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3056 			tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3057 				   bp->tx_ring_mask;
3058 			/* return full budget so NAPI will complete. */
3059 			if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3060 				rx_pkts = budget;
3061 				raw_cons = NEXT_RAW_CMP(raw_cons);
3062 				if (budget)
3063 					cpr->has_more_work = 1;
3064 				break;
3065 			}
3066 		} else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3067 			bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3068 		} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3069 			   cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3070 			if (likely(budget))
3071 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3072 			else
3073 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3074 							   &event);
3075 			if (event & BNXT_REDIRECT_EVENT)
3076 				flush_xdp = true;
3077 			if (likely(rc >= 0))
3078 				rx_pkts += rc;
3079 			/* Increment rx_pkts when rc is -ENOMEM to count towards
3080 			 * the NAPI budget.  Otherwise, we may potentially loop
3081 			 * here forever if we consistently cannot allocate
3082 			 * buffers.
3083 			 */
3084 			else if (rc == -ENOMEM && budget)
3085 				rx_pkts++;
3086 			else if (rc == -EBUSY)	/* partial completion */
3087 				break;
3088 		} else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3089 				    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3090 				    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3091 			bnxt_hwrm_handler(bp, txcmp);
3092 		}
3093 		raw_cons = NEXT_RAW_CMP(raw_cons);
3094 
3095 		if (rx_pkts && rx_pkts == budget) {
3096 			cpr->has_more_work = 1;
3097 			break;
3098 		}
3099 	}
3100 
3101 	if (flush_xdp) {
3102 		xdp_do_flush();
3103 		event &= ~BNXT_REDIRECT_EVENT;
3104 	}
3105 
3106 	if (event & BNXT_TX_EVENT) {
3107 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3108 		u16 prod = txr->tx_prod;
3109 
3110 		/* Sync BD data before updating doorbell */
3111 		wmb();
3112 
3113 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3114 		event &= ~BNXT_TX_EVENT;
3115 	}
3116 
3117 	cpr->cp_raw_cons = raw_cons;
3118 	bnapi->events |= event;
3119 	return rx_pkts;
3120 }
3121 
3122 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3123 				  int budget)
3124 {
3125 	if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3126 		bnapi->tx_int(bp, bnapi, budget);
3127 
3128 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3129 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3130 
3131 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3132 		bnapi->events &= ~BNXT_RX_EVENT;
3133 	}
3134 	if (bnapi->events & BNXT_AGG_EVENT) {
3135 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3136 
3137 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3138 		bnapi->events &= ~BNXT_AGG_EVENT;
3139 	}
3140 }
3141 
3142 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3143 			  int budget)
3144 {
3145 	struct bnxt_napi *bnapi = cpr->bnapi;
3146 	int rx_pkts;
3147 
3148 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3149 
3150 	/* ACK completion ring before freeing tx ring and producing new
3151 	 * buffers in rx/agg rings to prevent overflowing the completion
3152 	 * ring.
3153 	 */
3154 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3155 
3156 	__bnxt_poll_work_done(bp, bnapi, budget);
3157 	return rx_pkts;
3158 }
3159 
3160 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3161 {
3162 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3163 	struct bnxt *bp = bnapi->bp;
3164 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3165 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3166 	struct tx_cmp *txcmp;
3167 	struct rx_cmp_ext *rxcmp1;
3168 	u32 cp_cons, tmp_raw_cons;
3169 	u32 raw_cons = cpr->cp_raw_cons;
3170 	bool flush_xdp = false;
3171 	u32 rx_pkts = 0;
3172 	u8 event = 0;
3173 
3174 	while (1) {
3175 		int rc;
3176 
3177 		cp_cons = RING_CMP(raw_cons);
3178 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3179 
3180 		if (!TX_CMP_VALID(txcmp, raw_cons))
3181 			break;
3182 
3183 		/* The valid test of the entry must be done first before
3184 		 * reading any further.
3185 		 */
3186 		dma_rmb();
3187 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3188 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3189 			cp_cons = RING_CMP(tmp_raw_cons);
3190 			rxcmp1 = (struct rx_cmp_ext *)
3191 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3192 
3193 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3194 				break;
3195 
3196 			/* force an error to recycle the buffer */
3197 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3198 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3199 
3200 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3201 			if (likely(rc == -EIO) && budget)
3202 				rx_pkts++;
3203 			else if (rc == -EBUSY)	/* partial completion */
3204 				break;
3205 			if (event & BNXT_REDIRECT_EVENT)
3206 				flush_xdp = true;
3207 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
3208 				    CMPL_BASE_TYPE_HWRM_DONE)) {
3209 			bnxt_hwrm_handler(bp, txcmp);
3210 		} else {
3211 			netdev_err(bp->dev,
3212 				   "Invalid completion received on special ring\n");
3213 		}
3214 		raw_cons = NEXT_RAW_CMP(raw_cons);
3215 
3216 		if (rx_pkts == budget)
3217 			break;
3218 	}
3219 
3220 	cpr->cp_raw_cons = raw_cons;
3221 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3222 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3223 
3224 	if (event & BNXT_AGG_EVENT)
3225 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3226 	if (flush_xdp)
3227 		xdp_do_flush();
3228 
3229 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3230 		napi_complete_done(napi, rx_pkts);
3231 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3232 	}
3233 	return rx_pkts;
3234 }
3235 
3236 static int bnxt_poll(struct napi_struct *napi, int budget)
3237 {
3238 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3239 	struct bnxt *bp = bnapi->bp;
3240 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3241 	int work_done = 0;
3242 
3243 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3244 		napi_complete(napi);
3245 		return 0;
3246 	}
3247 	while (1) {
3248 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3249 
3250 		if (work_done >= budget) {
3251 			if (!budget)
3252 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3253 			break;
3254 		}
3255 
3256 		if (!bnxt_has_work(bp, cpr)) {
3257 			if (napi_complete_done(napi, work_done))
3258 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3259 			break;
3260 		}
3261 	}
3262 	if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3263 		struct dim_sample dim_sample = {};
3264 
3265 		dim_update_sample(cpr->event_ctr,
3266 				  cpr->rx_packets,
3267 				  cpr->rx_bytes,
3268 				  &dim_sample);
3269 		net_dim(&cpr->dim, &dim_sample);
3270 	}
3271 	return work_done;
3272 }
3273 
3274 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3275 {
3276 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3277 	int i, work_done = 0;
3278 
3279 	for (i = 0; i < cpr->cp_ring_count; i++) {
3280 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3281 
3282 		if (cpr2->had_nqe_notify) {
3283 			work_done += __bnxt_poll_work(bp, cpr2,
3284 						      budget - work_done);
3285 			cpr->has_more_work |= cpr2->has_more_work;
3286 		}
3287 	}
3288 	return work_done;
3289 }
3290 
3291 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3292 				 u64 dbr_type, int budget)
3293 {
3294 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3295 	int i;
3296 
3297 	for (i = 0; i < cpr->cp_ring_count; i++) {
3298 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3299 		struct bnxt_db_info *db;
3300 
3301 		if (cpr2->had_work_done) {
3302 			u32 tgl = 0;
3303 
3304 			if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3305 				cpr2->had_nqe_notify = 0;
3306 				tgl = cpr2->toggle;
3307 			}
3308 			db = &cpr2->cp_db;
3309 			bnxt_writeq(bp,
3310 				    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3311 				    DB_RING_IDX(db, cpr2->cp_raw_cons),
3312 				    db->doorbell);
3313 			cpr2->had_work_done = 0;
3314 		}
3315 	}
3316 	__bnxt_poll_work_done(bp, bnapi, budget);
3317 }
3318 
3319 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3320 {
3321 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3322 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3323 	struct bnxt_cp_ring_info *cpr_rx;
3324 	u32 raw_cons = cpr->cp_raw_cons;
3325 	struct bnxt *bp = bnapi->bp;
3326 	struct nqe_cn *nqcmp;
3327 	int work_done = 0;
3328 	u32 cons;
3329 
3330 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3331 		napi_complete(napi);
3332 		return 0;
3333 	}
3334 	if (cpr->has_more_work) {
3335 		cpr->has_more_work = 0;
3336 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3337 	}
3338 	while (1) {
3339 		u16 type;
3340 
3341 		cons = RING_CMP(raw_cons);
3342 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3343 
3344 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3345 			if (cpr->has_more_work)
3346 				break;
3347 
3348 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3349 					     budget);
3350 			cpr->cp_raw_cons = raw_cons;
3351 			if (napi_complete_done(napi, work_done))
3352 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3353 						  cpr->cp_raw_cons);
3354 			goto poll_done;
3355 		}
3356 
3357 		/* The valid test of the entry must be done first before
3358 		 * reading any further.
3359 		 */
3360 		dma_rmb();
3361 
3362 		type = le16_to_cpu(nqcmp->type);
3363 		if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3364 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3365 			u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3366 			struct bnxt_cp_ring_info *cpr2;
3367 
3368 			/* No more budget for RX work */
3369 			if (budget && work_done >= budget &&
3370 			    cq_type == BNXT_NQ_HDL_TYPE_RX)
3371 				break;
3372 
3373 			idx = BNXT_NQ_HDL_IDX(idx);
3374 			cpr2 = &cpr->cp_ring_arr[idx];
3375 			cpr2->had_nqe_notify = 1;
3376 			cpr2->toggle = NQE_CN_TOGGLE(type);
3377 			work_done += __bnxt_poll_work(bp, cpr2,
3378 						      budget - work_done);
3379 			cpr->has_more_work |= cpr2->has_more_work;
3380 		} else {
3381 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3382 		}
3383 		raw_cons = NEXT_RAW_CMP(raw_cons);
3384 	}
3385 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3386 	if (raw_cons != cpr->cp_raw_cons) {
3387 		cpr->cp_raw_cons = raw_cons;
3388 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3389 	}
3390 poll_done:
3391 	cpr_rx = &cpr->cp_ring_arr[0];
3392 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3393 	    (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3394 		struct dim_sample dim_sample = {};
3395 
3396 		dim_update_sample(cpr->event_ctr,
3397 				  cpr_rx->rx_packets,
3398 				  cpr_rx->rx_bytes,
3399 				  &dim_sample);
3400 		net_dim(&cpr->dim, &dim_sample);
3401 	}
3402 	return work_done;
3403 }
3404 
3405 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3406 				       struct bnxt_tx_ring_info *txr, int idx)
3407 {
3408 	int i, max_idx;
3409 	struct pci_dev *pdev = bp->pdev;
3410 
3411 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3412 
3413 	for (i = 0; i < max_idx;) {
3414 		struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3415 		struct sk_buff *skb;
3416 		int j, last;
3417 
3418 		if (idx  < bp->tx_nr_rings_xdp &&
3419 		    tx_buf->action == XDP_REDIRECT) {
3420 			dma_unmap_single(&pdev->dev,
3421 					 dma_unmap_addr(tx_buf, mapping),
3422 					 dma_unmap_len(tx_buf, len),
3423 					 DMA_TO_DEVICE);
3424 			xdp_return_frame(tx_buf->xdpf);
3425 			tx_buf->action = 0;
3426 			tx_buf->xdpf = NULL;
3427 			i++;
3428 			continue;
3429 		}
3430 
3431 		skb = tx_buf->skb;
3432 		if (!skb) {
3433 			i++;
3434 			continue;
3435 		}
3436 
3437 		tx_buf->skb = NULL;
3438 
3439 		if (tx_buf->is_push) {
3440 			dev_kfree_skb(skb);
3441 			i += 2;
3442 			continue;
3443 		}
3444 
3445 		dma_unmap_single(&pdev->dev,
3446 				 dma_unmap_addr(tx_buf, mapping),
3447 				 skb_headlen(skb),
3448 				 DMA_TO_DEVICE);
3449 
3450 		last = tx_buf->nr_frags;
3451 		i += 2;
3452 		for (j = 0; j < last; j++, i++) {
3453 			int ring_idx = i & bp->tx_ring_mask;
3454 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3455 
3456 			tx_buf = &txr->tx_buf_ring[ring_idx];
3457 			netmem_dma_unmap_page_attrs(&pdev->dev,
3458 						    dma_unmap_addr(tx_buf,
3459 								   mapping),
3460 						    skb_frag_size(frag),
3461 						    DMA_TO_DEVICE, 0);
3462 		}
3463 		dev_kfree_skb(skb);
3464 	}
3465 	netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3466 }
3467 
3468 static void bnxt_free_tx_skbs(struct bnxt *bp)
3469 {
3470 	int i;
3471 
3472 	if (!bp->tx_ring)
3473 		return;
3474 
3475 	for (i = 0; i < bp->tx_nr_rings; i++) {
3476 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3477 
3478 		if (!txr->tx_buf_ring)
3479 			continue;
3480 
3481 		bnxt_free_one_tx_ring_skbs(bp, txr, i);
3482 	}
3483 
3484 	if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3485 		bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3486 }
3487 
3488 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3489 {
3490 	int i, max_idx;
3491 
3492 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3493 
3494 	for (i = 0; i < max_idx; i++) {
3495 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3496 		void *data = rx_buf->data;
3497 
3498 		if (!data)
3499 			continue;
3500 
3501 		rx_buf->data = NULL;
3502 		if (BNXT_RX_PAGE_MODE(bp))
3503 			page_pool_recycle_direct(rxr->page_pool, data);
3504 		else
3505 			page_pool_free_va(rxr->head_pool, data, true);
3506 	}
3507 }
3508 
3509 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3510 {
3511 	int i, max_idx;
3512 
3513 	max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3514 
3515 	for (i = 0; i < max_idx; i++) {
3516 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3517 		netmem_ref netmem = rx_agg_buf->netmem;
3518 
3519 		if (!netmem)
3520 			continue;
3521 
3522 		rx_agg_buf->netmem = 0;
3523 		__clear_bit(i, rxr->rx_agg_bmap);
3524 
3525 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3526 	}
3527 }
3528 
3529 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3530 					struct bnxt_rx_ring_info *rxr)
3531 {
3532 	int i;
3533 
3534 	for (i = 0; i < bp->max_tpa; i++) {
3535 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3536 		u8 *data = tpa_info->data;
3537 
3538 		if (!data)
3539 			continue;
3540 
3541 		tpa_info->data = NULL;
3542 		page_pool_free_va(rxr->head_pool, data, false);
3543 	}
3544 }
3545 
3546 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3547 				       struct bnxt_rx_ring_info *rxr)
3548 {
3549 	struct bnxt_tpa_idx_map *map;
3550 
3551 	if (!rxr->rx_tpa)
3552 		goto skip_rx_tpa_free;
3553 
3554 	bnxt_free_one_tpa_info_data(bp, rxr);
3555 
3556 skip_rx_tpa_free:
3557 	if (!rxr->rx_buf_ring)
3558 		goto skip_rx_buf_free;
3559 
3560 	bnxt_free_one_rx_ring(bp, rxr);
3561 
3562 skip_rx_buf_free:
3563 	if (!rxr->rx_agg_ring)
3564 		goto skip_rx_agg_free;
3565 
3566 	bnxt_free_one_rx_agg_ring(bp, rxr);
3567 
3568 skip_rx_agg_free:
3569 	map = rxr->rx_tpa_idx_map;
3570 	if (map)
3571 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3572 }
3573 
3574 static void bnxt_free_rx_skbs(struct bnxt *bp)
3575 {
3576 	int i;
3577 
3578 	if (!bp->rx_ring)
3579 		return;
3580 
3581 	for (i = 0; i < bp->rx_nr_rings; i++)
3582 		bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3583 }
3584 
3585 static void bnxt_free_skbs(struct bnxt *bp)
3586 {
3587 	bnxt_free_tx_skbs(bp);
3588 	bnxt_free_rx_skbs(bp);
3589 }
3590 
3591 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3592 {
3593 	u8 init_val = ctxm->init_value;
3594 	u16 offset = ctxm->init_offset;
3595 	u8 *p2 = p;
3596 	int i;
3597 
3598 	if (!init_val)
3599 		return;
3600 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3601 		memset(p, init_val, len);
3602 		return;
3603 	}
3604 	for (i = 0; i < len; i += ctxm->entry_size)
3605 		*(p2 + i + offset) = init_val;
3606 }
3607 
3608 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3609 			       void *buf, size_t offset, size_t head,
3610 			       size_t tail)
3611 {
3612 	int i, head_page, start_idx, source_offset;
3613 	size_t len, rem_len, total_len, max_bytes;
3614 
3615 	head_page = head / rmem->page_size;
3616 	source_offset = head % rmem->page_size;
3617 	total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3618 	if (!total_len)
3619 		total_len = MAX_CTX_BYTES;
3620 	start_idx = head_page % MAX_CTX_PAGES;
3621 	max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3622 		    source_offset;
3623 	total_len = min(total_len, max_bytes);
3624 	rem_len = total_len;
3625 
3626 	for (i = start_idx; rem_len; i++, source_offset = 0) {
3627 		len = min((size_t)(rmem->page_size - source_offset), rem_len);
3628 		if (buf)
3629 			memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3630 			       len);
3631 		offset += len;
3632 		rem_len -= len;
3633 	}
3634 	return total_len;
3635 }
3636 
3637 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3638 {
3639 	struct pci_dev *pdev = bp->pdev;
3640 	int i;
3641 
3642 	if (!rmem->pg_arr)
3643 		goto skip_pages;
3644 
3645 	for (i = 0; i < rmem->nr_pages; i++) {
3646 		if (!rmem->pg_arr[i])
3647 			continue;
3648 
3649 		dma_free_coherent(&pdev->dev, rmem->page_size,
3650 				  rmem->pg_arr[i], rmem->dma_arr[i]);
3651 
3652 		rmem->pg_arr[i] = NULL;
3653 	}
3654 skip_pages:
3655 	if (rmem->pg_tbl) {
3656 		size_t pg_tbl_size = rmem->nr_pages * 8;
3657 
3658 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3659 			pg_tbl_size = rmem->page_size;
3660 		dma_free_coherent(&pdev->dev, pg_tbl_size,
3661 				  rmem->pg_tbl, rmem->pg_tbl_map);
3662 		rmem->pg_tbl = NULL;
3663 	}
3664 	if (rmem->vmem_size && *rmem->vmem) {
3665 		vfree(*rmem->vmem);
3666 		*rmem->vmem = NULL;
3667 	}
3668 }
3669 
3670 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3671 {
3672 	struct pci_dev *pdev = bp->pdev;
3673 	u64 valid_bit = 0;
3674 	int i;
3675 
3676 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3677 		valid_bit = PTU_PTE_VALID;
3678 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3679 		size_t pg_tbl_size = rmem->nr_pages * 8;
3680 
3681 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3682 			pg_tbl_size = rmem->page_size;
3683 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3684 						  &rmem->pg_tbl_map,
3685 						  GFP_KERNEL);
3686 		if (!rmem->pg_tbl)
3687 			return -ENOMEM;
3688 	}
3689 
3690 	for (i = 0; i < rmem->nr_pages; i++) {
3691 		u64 extra_bits = valid_bit;
3692 
3693 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3694 						     rmem->page_size,
3695 						     &rmem->dma_arr[i],
3696 						     GFP_KERNEL);
3697 		if (!rmem->pg_arr[i])
3698 			return -ENOMEM;
3699 
3700 		if (rmem->ctx_mem)
3701 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3702 					  rmem->page_size);
3703 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
3704 			if (i == rmem->nr_pages - 2 &&
3705 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3706 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
3707 			else if (i == rmem->nr_pages - 1 &&
3708 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3709 				extra_bits |= PTU_PTE_LAST;
3710 			rmem->pg_tbl[i] =
3711 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3712 		}
3713 	}
3714 
3715 	if (rmem->vmem_size) {
3716 		*rmem->vmem = vzalloc(rmem->vmem_size);
3717 		if (!(*rmem->vmem))
3718 			return -ENOMEM;
3719 	}
3720 	return 0;
3721 }
3722 
3723 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3724 				   struct bnxt_rx_ring_info *rxr)
3725 {
3726 	int i;
3727 
3728 	kfree(rxr->rx_tpa_idx_map);
3729 	rxr->rx_tpa_idx_map = NULL;
3730 	if (rxr->rx_tpa) {
3731 		for (i = 0; i < bp->max_tpa; i++) {
3732 			kfree(rxr->rx_tpa[i].agg_arr);
3733 			rxr->rx_tpa[i].agg_arr = NULL;
3734 		}
3735 	}
3736 	kfree(rxr->rx_tpa);
3737 	rxr->rx_tpa = NULL;
3738 }
3739 
3740 static void bnxt_free_tpa_info(struct bnxt *bp)
3741 {
3742 	int i;
3743 
3744 	for (i = 0; i < bp->rx_nr_rings; i++) {
3745 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3746 
3747 		bnxt_free_one_tpa_info(bp, rxr);
3748 	}
3749 }
3750 
3751 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3752 				   struct bnxt_rx_ring_info *rxr)
3753 {
3754 	struct rx_agg_cmp *agg;
3755 	int i;
3756 
3757 	rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
3758 	if (!rxr->rx_tpa)
3759 		return -ENOMEM;
3760 
3761 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3762 		return 0;
3763 	for (i = 0; i < bp->max_tpa; i++) {
3764 		agg = kzalloc_objs(*agg, MAX_SKB_FRAGS);
3765 		if (!agg)
3766 			return -ENOMEM;
3767 		rxr->rx_tpa[i].agg_arr = agg;
3768 	}
3769 	rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
3770 	if (!rxr->rx_tpa_idx_map)
3771 		return -ENOMEM;
3772 
3773 	return 0;
3774 }
3775 
3776 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3777 {
3778 	int i, rc;
3779 
3780 	bp->max_tpa = MAX_TPA;
3781 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3782 		if (!bp->max_tpa_v2)
3783 			return 0;
3784 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3785 	}
3786 
3787 	for (i = 0; i < bp->rx_nr_rings; i++) {
3788 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3789 
3790 		rc = bnxt_alloc_one_tpa_info(bp, rxr);
3791 		if (rc)
3792 			return rc;
3793 	}
3794 	return 0;
3795 }
3796 
3797 static void bnxt_free_rx_rings(struct bnxt *bp)
3798 {
3799 	int i;
3800 
3801 	if (!bp->rx_ring)
3802 		return;
3803 
3804 	bnxt_free_tpa_info(bp);
3805 	for (i = 0; i < bp->rx_nr_rings; i++) {
3806 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3807 		struct bnxt_ring_struct *ring;
3808 
3809 		if (rxr->xdp_prog)
3810 			bpf_prog_put(rxr->xdp_prog);
3811 
3812 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3813 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3814 
3815 		page_pool_destroy(rxr->page_pool);
3816 		page_pool_destroy(rxr->head_pool);
3817 		rxr->page_pool = rxr->head_pool = NULL;
3818 
3819 		kfree(rxr->rx_agg_bmap);
3820 		rxr->rx_agg_bmap = NULL;
3821 
3822 		ring = &rxr->rx_ring_struct;
3823 		bnxt_free_ring(bp, &ring->ring_mem);
3824 
3825 		ring = &rxr->rx_agg_ring_struct;
3826 		bnxt_free_ring(bp, &ring->ring_mem);
3827 	}
3828 }
3829 
3830 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3831 				       struct bnxt_rx_ring_info *rxr)
3832 {
3833 	/* User may have chosen larger than default rx_page_size,
3834 	 * we keep the ring sizes uniform and also want uniform amount
3835 	 * of bytes consumed per ring, so cap how much of the rings we fill.
3836 	 */
3837 	int fill_level = bp->rx_agg_ring_size;
3838 
3839 	if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3840 		fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3841 
3842 	return fill_level;
3843 }
3844 
3845 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3846 				   struct bnxt_rx_ring_info *rxr,
3847 				   int numa_node)
3848 {
3849 	unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3850 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3851 	struct page_pool_params pp = { 0 };
3852 	struct page_pool *pool;
3853 
3854 	pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3855 	if (BNXT_RX_PAGE_MODE(bp))
3856 		pp.pool_size += bp->rx_ring_size / rx_size_fac;
3857 
3858 	pp.order = get_order(rxr->rx_page_size);
3859 	pp.nid = numa_node;
3860 	pp.netdev = bp->dev;
3861 	pp.dev = &bp->pdev->dev;
3862 	pp.dma_dir = bp->rx_dir;
3863 	pp.max_len = PAGE_SIZE << pp.order;
3864 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3865 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3866 	pp.queue_idx = rxr->bnapi->index;
3867 
3868 	pool = page_pool_create(&pp);
3869 	if (IS_ERR(pool))
3870 		return PTR_ERR(pool);
3871 	rxr->page_pool = pool;
3872 
3873 	rxr->need_head_pool = page_pool_is_unreadable(pool);
3874 	rxr->need_head_pool |= !!pp.order;
3875 	if (bnxt_separate_head_pool(rxr)) {
3876 		pp.order = 0;
3877 		pp.max_len = PAGE_SIZE;
3878 		pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3879 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3880 		pool = page_pool_create(&pp);
3881 		if (IS_ERR(pool))
3882 			goto err_destroy_pp;
3883 	} else {
3884 		page_pool_get(pool);
3885 	}
3886 	rxr->head_pool = pool;
3887 
3888 	return 0;
3889 
3890 err_destroy_pp:
3891 	page_pool_destroy(rxr->page_pool);
3892 	rxr->page_pool = NULL;
3893 	return PTR_ERR(pool);
3894 }
3895 
3896 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3897 {
3898 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3899 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3900 }
3901 
3902 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3903 {
3904 	u16 mem_size;
3905 
3906 	rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3907 	mem_size = rxr->rx_agg_bmap_size / 8;
3908 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3909 	if (!rxr->rx_agg_bmap)
3910 		return -ENOMEM;
3911 
3912 	return 0;
3913 }
3914 
3915 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3916 {
3917 	int numa_node = dev_to_node(&bp->pdev->dev);
3918 	int i, rc = 0, agg_rings = 0, cpu;
3919 
3920 	if (!bp->rx_ring)
3921 		return -ENOMEM;
3922 
3923 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3924 		agg_rings = 1;
3925 
3926 	for (i = 0; i < bp->rx_nr_rings; i++) {
3927 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3928 		struct bnxt_ring_struct *ring;
3929 		int cpu_node;
3930 
3931 		ring = &rxr->rx_ring_struct;
3932 
3933 		cpu = cpumask_local_spread(i, numa_node);
3934 		cpu_node = cpu_to_node(cpu);
3935 		netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3936 			   i, cpu_node);
3937 		rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3938 		if (rc)
3939 			return rc;
3940 		bnxt_enable_rx_page_pool(rxr);
3941 
3942 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3943 		if (rc < 0)
3944 			return rc;
3945 
3946 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3947 						MEM_TYPE_PAGE_POOL,
3948 						rxr->page_pool);
3949 		if (rc) {
3950 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3951 			return rc;
3952 		}
3953 
3954 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3955 		if (rc)
3956 			return rc;
3957 
3958 		ring->grp_idx = i;
3959 		if (agg_rings) {
3960 			ring = &rxr->rx_agg_ring_struct;
3961 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3962 			if (rc)
3963 				return rc;
3964 
3965 			ring->grp_idx = i;
3966 			rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3967 			if (rc)
3968 				return rc;
3969 		}
3970 	}
3971 	if (bp->flags & BNXT_FLAG_TPA)
3972 		rc = bnxt_alloc_tpa_info(bp);
3973 	return rc;
3974 }
3975 
3976 static void bnxt_free_tx_rings(struct bnxt *bp)
3977 {
3978 	int i;
3979 	struct pci_dev *pdev = bp->pdev;
3980 
3981 	if (!bp->tx_ring)
3982 		return;
3983 
3984 	for (i = 0; i < bp->tx_nr_rings; i++) {
3985 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3986 		struct bnxt_ring_struct *ring;
3987 
3988 		if (txr->tx_push) {
3989 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3990 					  txr->tx_push, txr->tx_push_mapping);
3991 			txr->tx_push = NULL;
3992 		}
3993 
3994 		ring = &txr->tx_ring_struct;
3995 
3996 		bnxt_free_ring(bp, &ring->ring_mem);
3997 	}
3998 }
3999 
4000 #define BNXT_TC_TO_RING_BASE(bp, tc)	\
4001 	((tc) * (bp)->tx_nr_rings_per_tc)
4002 
4003 #define BNXT_RING_TO_TC_OFF(bp, tx)	\
4004 	((tx) % (bp)->tx_nr_rings_per_tc)
4005 
4006 #define BNXT_RING_TO_TC(bp, tx)		\
4007 	((tx) / (bp)->tx_nr_rings_per_tc)
4008 
4009 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4010 {
4011 	int i, j, rc;
4012 	struct pci_dev *pdev = bp->pdev;
4013 
4014 	bp->tx_push_size = 0;
4015 	if (bp->tx_push_thresh) {
4016 		int push_size;
4017 
4018 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4019 					bp->tx_push_thresh);
4020 
4021 		if (push_size > 256) {
4022 			push_size = 0;
4023 			bp->tx_push_thresh = 0;
4024 		}
4025 
4026 		bp->tx_push_size = push_size;
4027 	}
4028 
4029 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4030 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4031 		struct bnxt_ring_struct *ring;
4032 		u8 qidx;
4033 
4034 		ring = &txr->tx_ring_struct;
4035 
4036 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4037 		if (rc)
4038 			return rc;
4039 
4040 		ring->grp_idx = txr->bnapi->index;
4041 		if (bp->tx_push_size) {
4042 			dma_addr_t mapping;
4043 
4044 			/* One pre-allocated DMA buffer to backup
4045 			 * TX push operation
4046 			 */
4047 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
4048 						bp->tx_push_size,
4049 						&txr->tx_push_mapping,
4050 						GFP_KERNEL);
4051 
4052 			if (!txr->tx_push)
4053 				return -ENOMEM;
4054 
4055 			mapping = txr->tx_push_mapping +
4056 				sizeof(struct tx_push_bd);
4057 			txr->data_mapping = cpu_to_le64(mapping);
4058 		}
4059 		qidx = bp->tc_to_qidx[j];
4060 		ring->queue_id = bp->q_info[qidx].queue_id;
4061 		spin_lock_init(&txr->xdp_tx_lock);
4062 		if (i < bp->tx_nr_rings_xdp)
4063 			continue;
4064 		if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4065 			j++;
4066 	}
4067 	return 0;
4068 }
4069 
4070 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4071 {
4072 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4073 
4074 	kfree(cpr->cp_desc_ring);
4075 	cpr->cp_desc_ring = NULL;
4076 	ring->ring_mem.pg_arr = NULL;
4077 	kfree(cpr->cp_desc_mapping);
4078 	cpr->cp_desc_mapping = NULL;
4079 	ring->ring_mem.dma_arr = NULL;
4080 }
4081 
4082 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4083 {
4084 	cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
4085 	if (!cpr->cp_desc_ring)
4086 		return -ENOMEM;
4087 	cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
4088 	if (!cpr->cp_desc_mapping)
4089 		return -ENOMEM;
4090 	return 0;
4091 }
4092 
4093 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4094 {
4095 	int i;
4096 
4097 	if (!bp->bnapi)
4098 		return;
4099 	for (i = 0; i < bp->cp_nr_rings; i++) {
4100 		struct bnxt_napi *bnapi = bp->bnapi[i];
4101 
4102 		if (!bnapi)
4103 			continue;
4104 		bnxt_free_cp_arrays(&bnapi->cp_ring);
4105 	}
4106 }
4107 
4108 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4109 {
4110 	int i, n = bp->cp_nr_pages;
4111 
4112 	for (i = 0; i < bp->cp_nr_rings; i++) {
4113 		struct bnxt_napi *bnapi = bp->bnapi[i];
4114 		int rc;
4115 
4116 		if (!bnapi)
4117 			continue;
4118 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4119 		if (rc)
4120 			return rc;
4121 	}
4122 	return 0;
4123 }
4124 
4125 static void bnxt_free_cp_rings(struct bnxt *bp)
4126 {
4127 	int i;
4128 
4129 	if (!bp->bnapi)
4130 		return;
4131 
4132 	for (i = 0; i < bp->cp_nr_rings; i++) {
4133 		struct bnxt_napi *bnapi = bp->bnapi[i];
4134 		struct bnxt_cp_ring_info *cpr;
4135 		struct bnxt_ring_struct *ring;
4136 		int j;
4137 
4138 		if (!bnapi)
4139 			continue;
4140 
4141 		cpr = &bnapi->cp_ring;
4142 		ring = &cpr->cp_ring_struct;
4143 
4144 		bnxt_free_ring(bp, &ring->ring_mem);
4145 
4146 		if (!cpr->cp_ring_arr)
4147 			continue;
4148 
4149 		for (j = 0; j < cpr->cp_ring_count; j++) {
4150 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4151 
4152 			ring = &cpr2->cp_ring_struct;
4153 			bnxt_free_ring(bp, &ring->ring_mem);
4154 			bnxt_free_cp_arrays(cpr2);
4155 		}
4156 		kfree(cpr->cp_ring_arr);
4157 		cpr->cp_ring_arr = NULL;
4158 		cpr->cp_ring_count = 0;
4159 	}
4160 }
4161 
4162 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4163 				  struct bnxt_cp_ring_info *cpr)
4164 {
4165 	struct bnxt_ring_mem_info *rmem;
4166 	struct bnxt_ring_struct *ring;
4167 	int rc;
4168 
4169 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4170 	if (rc) {
4171 		bnxt_free_cp_arrays(cpr);
4172 		return -ENOMEM;
4173 	}
4174 	ring = &cpr->cp_ring_struct;
4175 	rmem = &ring->ring_mem;
4176 	rmem->nr_pages = bp->cp_nr_pages;
4177 	rmem->page_size = HW_CMPD_RING_SIZE;
4178 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
4179 	rmem->dma_arr = cpr->cp_desc_mapping;
4180 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4181 	rc = bnxt_alloc_ring(bp, rmem);
4182 	if (rc) {
4183 		bnxt_free_ring(bp, rmem);
4184 		bnxt_free_cp_arrays(cpr);
4185 	}
4186 	return rc;
4187 }
4188 
4189 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4190 {
4191 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4192 	int i, j, rc, ulp_msix;
4193 	int tcs = bp->num_tc;
4194 
4195 	if (!tcs)
4196 		tcs = 1;
4197 	ulp_msix = bnxt_get_ulp_msix_num(bp);
4198 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4199 		struct bnxt_napi *bnapi = bp->bnapi[i];
4200 		struct bnxt_cp_ring_info *cpr, *cpr2;
4201 		struct bnxt_ring_struct *ring;
4202 		int cp_count = 0, k;
4203 		int rx = 0, tx = 0;
4204 
4205 		if (!bnapi)
4206 			continue;
4207 
4208 		cpr = &bnapi->cp_ring;
4209 		cpr->bnapi = bnapi;
4210 		ring = &cpr->cp_ring_struct;
4211 
4212 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4213 		if (rc)
4214 			return rc;
4215 
4216 		ring->map_idx = ulp_msix + i;
4217 
4218 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4219 			continue;
4220 
4221 		if (i < bp->rx_nr_rings) {
4222 			cp_count++;
4223 			rx = 1;
4224 		}
4225 		if (i < bp->tx_nr_rings_xdp) {
4226 			cp_count++;
4227 			tx = 1;
4228 		} else if ((sh && i < bp->tx_nr_rings) ||
4229 			 (!sh && i >= bp->rx_nr_rings)) {
4230 			cp_count += tcs;
4231 			tx = 1;
4232 		}
4233 
4234 		cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
4235 		if (!cpr->cp_ring_arr)
4236 			return -ENOMEM;
4237 		cpr->cp_ring_count = cp_count;
4238 
4239 		for (k = 0; k < cp_count; k++) {
4240 			cpr2 = &cpr->cp_ring_arr[k];
4241 			rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4242 			if (rc)
4243 				return rc;
4244 			cpr2->bnapi = bnapi;
4245 			cpr2->sw_stats = cpr->sw_stats;
4246 			cpr2->cp_idx = k;
4247 			if (!k && rx) {
4248 				bp->rx_ring[i].rx_cpr = cpr2;
4249 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4250 			} else {
4251 				int n, tc = k - rx;
4252 
4253 				n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4254 				bp->tx_ring[n].tx_cpr = cpr2;
4255 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4256 			}
4257 		}
4258 		if (tx)
4259 			j++;
4260 	}
4261 	return 0;
4262 }
4263 
4264 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4265 				     struct bnxt_rx_ring_info *rxr)
4266 {
4267 	struct bnxt_ring_mem_info *rmem;
4268 	struct bnxt_ring_struct *ring;
4269 
4270 	ring = &rxr->rx_ring_struct;
4271 	rmem = &ring->ring_mem;
4272 	rmem->nr_pages = bp->rx_nr_pages;
4273 	rmem->page_size = HW_RXBD_RING_SIZE;
4274 	rmem->pg_arr = (void **)rxr->rx_desc_ring;
4275 	rmem->dma_arr = rxr->rx_desc_mapping;
4276 	rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4277 	rmem->vmem = (void **)&rxr->rx_buf_ring;
4278 
4279 	ring = &rxr->rx_agg_ring_struct;
4280 	rmem = &ring->ring_mem;
4281 	rmem->nr_pages = bp->rx_agg_nr_pages;
4282 	rmem->page_size = HW_RXBD_RING_SIZE;
4283 	rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4284 	rmem->dma_arr = rxr->rx_agg_desc_mapping;
4285 	rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4286 	rmem->vmem = (void **)&rxr->rx_agg_ring;
4287 }
4288 
4289 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4290 				      struct bnxt_rx_ring_info *rxr)
4291 {
4292 	struct bnxt_ring_mem_info *rmem;
4293 	struct bnxt_ring_struct *ring;
4294 	int i;
4295 
4296 	rxr->page_pool->p.napi = NULL;
4297 	rxr->page_pool = NULL;
4298 	rxr->head_pool->p.napi = NULL;
4299 	rxr->head_pool = NULL;
4300 	memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4301 
4302 	ring = &rxr->rx_ring_struct;
4303 	rmem = &ring->ring_mem;
4304 	rmem->pg_tbl = NULL;
4305 	rmem->pg_tbl_map = 0;
4306 	for (i = 0; i < rmem->nr_pages; i++) {
4307 		rmem->pg_arr[i] = NULL;
4308 		rmem->dma_arr[i] = 0;
4309 	}
4310 	*rmem->vmem = NULL;
4311 
4312 	ring = &rxr->rx_agg_ring_struct;
4313 	rmem = &ring->ring_mem;
4314 	rmem->pg_tbl = NULL;
4315 	rmem->pg_tbl_map = 0;
4316 	for (i = 0; i < rmem->nr_pages; i++) {
4317 		rmem->pg_arr[i] = NULL;
4318 		rmem->dma_arr[i] = 0;
4319 	}
4320 	*rmem->vmem = NULL;
4321 }
4322 
4323 static void bnxt_init_ring_struct(struct bnxt *bp)
4324 {
4325 	int i, j;
4326 
4327 	for (i = 0; i < bp->cp_nr_rings; i++) {
4328 		struct bnxt_napi *bnapi = bp->bnapi[i];
4329 		struct netdev_queue_config qcfg;
4330 		struct bnxt_ring_mem_info *rmem;
4331 		struct bnxt_cp_ring_info *cpr;
4332 		struct bnxt_rx_ring_info *rxr;
4333 		struct bnxt_tx_ring_info *txr;
4334 		struct bnxt_ring_struct *ring;
4335 
4336 		if (!bnapi)
4337 			continue;
4338 
4339 		cpr = &bnapi->cp_ring;
4340 		ring = &cpr->cp_ring_struct;
4341 		rmem = &ring->ring_mem;
4342 		rmem->nr_pages = bp->cp_nr_pages;
4343 		rmem->page_size = HW_CMPD_RING_SIZE;
4344 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
4345 		rmem->dma_arr = cpr->cp_desc_mapping;
4346 		rmem->vmem_size = 0;
4347 
4348 		rxr = bnapi->rx_ring;
4349 		if (!rxr)
4350 			goto skip_rx;
4351 
4352 		netdev_queue_config(bp->dev, i, &qcfg);
4353 		rxr->rx_page_size = qcfg.rx_page_size;
4354 
4355 		ring = &rxr->rx_ring_struct;
4356 		rmem = &ring->ring_mem;
4357 		rmem->nr_pages = bp->rx_nr_pages;
4358 		rmem->page_size = HW_RXBD_RING_SIZE;
4359 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
4360 		rmem->dma_arr = rxr->rx_desc_mapping;
4361 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4362 		rmem->vmem = (void **)&rxr->rx_buf_ring;
4363 
4364 		ring = &rxr->rx_agg_ring_struct;
4365 		rmem = &ring->ring_mem;
4366 		rmem->nr_pages = bp->rx_agg_nr_pages;
4367 		rmem->page_size = HW_RXBD_RING_SIZE;
4368 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4369 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
4370 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4371 		rmem->vmem = (void **)&rxr->rx_agg_ring;
4372 
4373 skip_rx:
4374 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4375 			ring = &txr->tx_ring_struct;
4376 			rmem = &ring->ring_mem;
4377 			rmem->nr_pages = bp->tx_nr_pages;
4378 			rmem->page_size = HW_TXBD_RING_SIZE;
4379 			rmem->pg_arr = (void **)txr->tx_desc_ring;
4380 			rmem->dma_arr = txr->tx_desc_mapping;
4381 			rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4382 			rmem->vmem = (void **)&txr->tx_buf_ring;
4383 		}
4384 	}
4385 }
4386 
4387 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4388 {
4389 	int i;
4390 	u32 prod;
4391 	struct rx_bd **rx_buf_ring;
4392 
4393 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4394 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4395 		int j;
4396 		struct rx_bd *rxbd;
4397 
4398 		rxbd = rx_buf_ring[i];
4399 		if (!rxbd)
4400 			continue;
4401 
4402 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4403 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4404 			rxbd->rx_bd_opaque = prod;
4405 		}
4406 	}
4407 }
4408 
4409 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4410 				       struct bnxt_rx_ring_info *rxr,
4411 				       int ring_nr)
4412 {
4413 	u32 prod;
4414 	int i;
4415 
4416 	prod = rxr->rx_prod;
4417 	for (i = 0; i < bp->rx_ring_size; i++) {
4418 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4419 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4420 				    ring_nr, i, bp->rx_ring_size);
4421 			break;
4422 		}
4423 		prod = NEXT_RX(prod);
4424 	}
4425 	rxr->rx_prod = prod;
4426 }
4427 
4428 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4429 					  struct bnxt_rx_ring_info *rxr,
4430 					  int ring_nr)
4431 {
4432 	int fill_level, i;
4433 	u32 prod;
4434 
4435 	fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4436 
4437 	prod = rxr->rx_agg_prod;
4438 	for (i = 0; i < fill_level; i++) {
4439 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4440 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4441 				    ring_nr, i, bp->rx_agg_ring_size);
4442 			break;
4443 		}
4444 		prod = NEXT_RX_AGG(prod);
4445 	}
4446 	rxr->rx_agg_prod = prod;
4447 }
4448 
4449 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4450 					struct bnxt_rx_ring_info *rxr)
4451 {
4452 	dma_addr_t mapping;
4453 	u8 *data;
4454 	int i;
4455 
4456 	for (i = 0; i < bp->max_tpa; i++) {
4457 		data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4458 					    GFP_KERNEL);
4459 		if (!data)
4460 			return -ENOMEM;
4461 
4462 		rxr->rx_tpa[i].data = data;
4463 		rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4464 		rxr->rx_tpa[i].mapping = mapping;
4465 	}
4466 
4467 	return 0;
4468 }
4469 
4470 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4471 {
4472 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4473 	int rc;
4474 
4475 	bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4476 
4477 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4478 		return 0;
4479 
4480 	bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4481 
4482 	if (rxr->rx_tpa) {
4483 		rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4484 		if (rc)
4485 			return rc;
4486 	}
4487 	return 0;
4488 }
4489 
4490 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4491 				       struct bnxt_rx_ring_info *rxr)
4492 {
4493 	struct bnxt_ring_struct *ring;
4494 	u32 type;
4495 
4496 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4497 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4498 
4499 	if (NET_IP_ALIGN == 2)
4500 		type |= RX_BD_FLAGS_SOP;
4501 
4502 	ring = &rxr->rx_ring_struct;
4503 	bnxt_init_rxbd_pages(ring, type);
4504 	ring->fw_ring_id = INVALID_HW_RING_ID;
4505 }
4506 
4507 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4508 					   struct bnxt_rx_ring_info *rxr)
4509 {
4510 	struct bnxt_ring_struct *ring;
4511 	u32 type;
4512 
4513 	ring = &rxr->rx_agg_ring_struct;
4514 	ring->fw_ring_id = INVALID_HW_RING_ID;
4515 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4516 		type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4517 			RX_BD_TYPE_RX_AGG_BD;
4518 
4519 		/* On P7, setting EOP will cause the chip to disable
4520 		 * Relaxed Ordering (RO) for TPA data.  Disable EOP for
4521 		 * potentially higher performance with RO.
4522 		 */
4523 		if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4524 			type |= RX_BD_FLAGS_AGG_EOP;
4525 
4526 		bnxt_init_rxbd_pages(ring, type);
4527 	}
4528 }
4529 
4530 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4531 {
4532 	struct bnxt_rx_ring_info *rxr;
4533 
4534 	rxr = &bp->rx_ring[ring_nr];
4535 	bnxt_init_one_rx_ring_rxbd(bp, rxr);
4536 
4537 	netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4538 			     &rxr->bnapi->napi);
4539 
4540 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4541 		bpf_prog_add(bp->xdp_prog, 1);
4542 		rxr->xdp_prog = bp->xdp_prog;
4543 	}
4544 
4545 	bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4546 
4547 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
4548 }
4549 
4550 static void bnxt_init_cp_rings(struct bnxt *bp)
4551 {
4552 	int i, j;
4553 
4554 	for (i = 0; i < bp->cp_nr_rings; i++) {
4555 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4556 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4557 
4558 		ring->fw_ring_id = INVALID_HW_RING_ID;
4559 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4560 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4561 		if (!cpr->cp_ring_arr)
4562 			continue;
4563 		for (j = 0; j < cpr->cp_ring_count; j++) {
4564 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4565 
4566 			ring = &cpr2->cp_ring_struct;
4567 			ring->fw_ring_id = INVALID_HW_RING_ID;
4568 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4569 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4570 		}
4571 	}
4572 }
4573 
4574 static int bnxt_init_rx_rings(struct bnxt *bp)
4575 {
4576 	int i, rc = 0;
4577 
4578 	if (BNXT_RX_PAGE_MODE(bp)) {
4579 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4580 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4581 	} else {
4582 		bp->rx_offset = BNXT_RX_OFFSET;
4583 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4584 	}
4585 
4586 	for (i = 0; i < bp->rx_nr_rings; i++) {
4587 		rc = bnxt_init_one_rx_ring(bp, i);
4588 		if (rc)
4589 			break;
4590 	}
4591 
4592 	return rc;
4593 }
4594 
4595 static int bnxt_init_tx_rings(struct bnxt *bp)
4596 {
4597 	u16 i;
4598 
4599 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4600 				   BNXT_MIN_TX_DESC_CNT);
4601 
4602 	for (i = 0; i < bp->tx_nr_rings; i++) {
4603 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4604 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4605 
4606 		ring->fw_ring_id = INVALID_HW_RING_ID;
4607 
4608 		if (i >= bp->tx_nr_rings_xdp)
4609 			netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4610 					     NETDEV_QUEUE_TYPE_TX,
4611 					     &txr->bnapi->napi);
4612 	}
4613 
4614 	return 0;
4615 }
4616 
4617 static void bnxt_free_ring_grps(struct bnxt *bp)
4618 {
4619 	kfree(bp->grp_info);
4620 	bp->grp_info = NULL;
4621 }
4622 
4623 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4624 {
4625 	int i;
4626 
4627 	if (irq_re_init) {
4628 		bp->grp_info = kzalloc_objs(struct bnxt_ring_grp_info,
4629 					    bp->cp_nr_rings);
4630 		if (!bp->grp_info)
4631 			return -ENOMEM;
4632 	}
4633 	for (i = 0; i < bp->cp_nr_rings; i++) {
4634 		if (irq_re_init)
4635 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4636 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4637 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4638 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4639 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4640 	}
4641 	return 0;
4642 }
4643 
4644 static void bnxt_free_vnics(struct bnxt *bp)
4645 {
4646 	kfree(bp->vnic_info);
4647 	bp->vnic_info = NULL;
4648 	bp->nr_vnics = 0;
4649 }
4650 
4651 static int bnxt_alloc_vnics(struct bnxt *bp)
4652 {
4653 	int num_vnics = 1;
4654 
4655 #ifdef CONFIG_RFS_ACCEL
4656 	if (bp->flags & BNXT_FLAG_RFS) {
4657 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4658 			num_vnics++;
4659 		else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4660 			num_vnics += bp->rx_nr_rings;
4661 	}
4662 #endif
4663 
4664 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4665 		num_vnics++;
4666 
4667 	bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
4668 	if (!bp->vnic_info)
4669 		return -ENOMEM;
4670 
4671 	bp->nr_vnics = num_vnics;
4672 	return 0;
4673 }
4674 
4675 static void bnxt_init_vnics(struct bnxt *bp)
4676 {
4677 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4678 	int i;
4679 
4680 	for (i = 0; i < bp->nr_vnics; i++) {
4681 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4682 		int j;
4683 
4684 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
4685 		vnic->vnic_id = i;
4686 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4687 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4688 
4689 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4690 
4691 		if (bp->vnic_info[i].rss_hash_key) {
4692 			if (i == BNXT_VNIC_DEFAULT) {
4693 				u8 *key = (void *)vnic->rss_hash_key;
4694 				int k;
4695 
4696 				if (!bp->rss_hash_key_valid &&
4697 				    !bp->rss_hash_key_updated) {
4698 					get_random_bytes(bp->rss_hash_key,
4699 							 HW_HASH_KEY_SIZE);
4700 					bp->rss_hash_key_updated = true;
4701 				}
4702 
4703 				memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4704 				       HW_HASH_KEY_SIZE);
4705 
4706 				if (!bp->rss_hash_key_updated)
4707 					continue;
4708 
4709 				bp->rss_hash_key_updated = false;
4710 				bp->rss_hash_key_valid = true;
4711 
4712 				bp->toeplitz_prefix = 0;
4713 				for (k = 0; k < 8; k++) {
4714 					bp->toeplitz_prefix <<= 8;
4715 					bp->toeplitz_prefix |= key[k];
4716 				}
4717 			} else {
4718 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4719 				       HW_HASH_KEY_SIZE);
4720 			}
4721 		}
4722 	}
4723 }
4724 
4725 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4726 {
4727 	int pages;
4728 
4729 	pages = ring_size / desc_per_pg;
4730 
4731 	if (!pages)
4732 		return 1;
4733 
4734 	pages++;
4735 
4736 	while (pages & (pages - 1))
4737 		pages++;
4738 
4739 	return pages;
4740 }
4741 
4742 void bnxt_set_tpa_flags(struct bnxt *bp)
4743 {
4744 	bp->flags &= ~BNXT_FLAG_TPA;
4745 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4746 		return;
4747 	if (bp->dev->features & NETIF_F_LRO)
4748 		bp->flags |= BNXT_FLAG_LRO;
4749 	else if (bp->dev->features & NETIF_F_GRO_HW)
4750 		bp->flags |= BNXT_FLAG_GRO;
4751 }
4752 
4753 static void bnxt_init_ring_params(struct bnxt *bp)
4754 {
4755 	unsigned int rx_size;
4756 
4757 	bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4758 	/* Try to fit 4 chunks into a 4k page */
4759 	rx_size = SZ_1K -
4760 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4761 	bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4762 }
4763 
4764 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4765  * be set on entry.
4766  */
4767 void bnxt_set_ring_params(struct bnxt *bp)
4768 {
4769 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4770 	u32 agg_factor = 0, agg_ring_size = 0;
4771 
4772 	/* 8 for CRC and VLAN */
4773 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4774 
4775 	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4776 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4777 
4778 	ring_size = bp->rx_ring_size;
4779 	bp->rx_agg_ring_size = 0;
4780 	bp->rx_agg_nr_pages = 0;
4781 
4782 	if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4783 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4784 
4785 	bp->flags &= ~BNXT_FLAG_JUMBO;
4786 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4787 		u32 jumbo_factor;
4788 
4789 		bp->flags |= BNXT_FLAG_JUMBO;
4790 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4791 		if (jumbo_factor > agg_factor)
4792 			agg_factor = jumbo_factor;
4793 	}
4794 	if (agg_factor) {
4795 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4796 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4797 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4798 				    bp->rx_ring_size, ring_size);
4799 			bp->rx_ring_size = ring_size;
4800 		}
4801 		agg_ring_size = ring_size * agg_factor;
4802 
4803 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4804 							RX_DESC_CNT);
4805 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4806 			u32 tmp = agg_ring_size;
4807 
4808 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4809 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4810 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4811 				    tmp, agg_ring_size);
4812 		}
4813 		bp->rx_agg_ring_size = agg_ring_size;
4814 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4815 
4816 		if (BNXT_RX_PAGE_MODE(bp)) {
4817 			rx_space = PAGE_SIZE;
4818 			rx_size = PAGE_SIZE -
4819 				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4820 				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4821 		} else {
4822 			rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4823 				       bp->rx_copybreak,
4824 				       bp->dev->cfg_pending->hds_thresh);
4825 			rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4826 			rx_space = rx_size + NET_SKB_PAD +
4827 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4828 		}
4829 	}
4830 
4831 	bp->rx_buf_use_size = rx_size;
4832 	bp->rx_buf_size = rx_space;
4833 
4834 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4835 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4836 
4837 	ring_size = bp->tx_ring_size;
4838 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4839 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4840 
4841 	max_rx_cmpl = bp->rx_ring_size;
4842 	/* MAX TPA needs to be added because TPA_START completions are
4843 	 * immediately recycled, so the TPA completions are not bound by
4844 	 * the RX ring size.
4845 	 */
4846 	if (bp->flags & BNXT_FLAG_TPA)
4847 		max_rx_cmpl += bp->max_tpa;
4848 	/* RX and TPA completions are 32-byte, all others are 16-byte */
4849 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4850 	bp->cp_ring_size = ring_size;
4851 
4852 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4853 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
4854 		bp->cp_nr_pages = MAX_CP_PAGES;
4855 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4856 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4857 			    ring_size, bp->cp_ring_size);
4858 	}
4859 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4860 	bp->cp_ring_mask = bp->cp_bit - 1;
4861 }
4862 
4863 /* Changing allocation mode of RX rings.
4864  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4865  */
4866 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4867 {
4868 	struct net_device *dev = bp->dev;
4869 
4870 	if (page_mode) {
4871 		bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4872 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4873 
4874 		if (bp->xdp_prog->aux->xdp_has_frags)
4875 			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4876 		else
4877 			dev->max_mtu =
4878 				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4879 		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4880 			bp->flags |= BNXT_FLAG_JUMBO;
4881 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
4882 		} else {
4883 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4884 			bp->rx_skb_func = bnxt_rx_page_skb;
4885 		}
4886 		bp->rx_dir = DMA_BIDIRECTIONAL;
4887 	} else {
4888 		dev->max_mtu = bp->max_mtu;
4889 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4890 		bp->rx_dir = DMA_FROM_DEVICE;
4891 		bp->rx_skb_func = bnxt_rx_skb;
4892 	}
4893 }
4894 
4895 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4896 {
4897 	__bnxt_set_rx_skb_mode(bp, page_mode);
4898 
4899 	if (!page_mode) {
4900 		int rx, tx;
4901 
4902 		bnxt_get_max_rings(bp, &rx, &tx, true);
4903 		if (rx > 1) {
4904 			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4905 			bp->dev->hw_features |= NETIF_F_LRO;
4906 		}
4907 	}
4908 
4909 	/* Update LRO and GRO_HW availability */
4910 	netdev_update_features(bp->dev);
4911 }
4912 
4913 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4914 {
4915 	int i;
4916 	struct bnxt_vnic_info *vnic;
4917 	struct pci_dev *pdev = bp->pdev;
4918 
4919 	if (!bp->vnic_info)
4920 		return;
4921 
4922 	for (i = 0; i < bp->nr_vnics; i++) {
4923 		vnic = &bp->vnic_info[i];
4924 
4925 		kfree(vnic->fw_grp_ids);
4926 		vnic->fw_grp_ids = NULL;
4927 
4928 		kfree(vnic->uc_list);
4929 		vnic->uc_list = NULL;
4930 
4931 		if (vnic->mc_list) {
4932 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4933 					  vnic->mc_list, vnic->mc_list_mapping);
4934 			vnic->mc_list = NULL;
4935 		}
4936 
4937 		if (vnic->rss_table) {
4938 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4939 					  vnic->rss_table,
4940 					  vnic->rss_table_dma_addr);
4941 			vnic->rss_table = NULL;
4942 		}
4943 
4944 		vnic->rss_hash_key = NULL;
4945 		vnic->flags = 0;
4946 	}
4947 }
4948 
4949 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4950 {
4951 	int i, rc = 0, size;
4952 	struct bnxt_vnic_info *vnic;
4953 	struct pci_dev *pdev = bp->pdev;
4954 	int max_rings;
4955 
4956 	for (i = 0; i < bp->nr_vnics; i++) {
4957 		vnic = &bp->vnic_info[i];
4958 
4959 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4960 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4961 
4962 			if (mem_size > 0) {
4963 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4964 				if (!vnic->uc_list) {
4965 					rc = -ENOMEM;
4966 					goto out;
4967 				}
4968 			}
4969 		}
4970 
4971 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4972 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4973 			vnic->mc_list =
4974 				dma_alloc_coherent(&pdev->dev,
4975 						   vnic->mc_list_size,
4976 						   &vnic->mc_list_mapping,
4977 						   GFP_KERNEL);
4978 			if (!vnic->mc_list) {
4979 				rc = -ENOMEM;
4980 				goto out;
4981 			}
4982 		}
4983 
4984 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4985 			goto vnic_skip_grps;
4986 
4987 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4988 			max_rings = bp->rx_nr_rings;
4989 		else
4990 			max_rings = 1;
4991 
4992 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4993 		if (!vnic->fw_grp_ids) {
4994 			rc = -ENOMEM;
4995 			goto out;
4996 		}
4997 vnic_skip_grps:
4998 		if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4999 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
5000 			continue;
5001 
5002 		/* Allocate rss table and hash key */
5003 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5004 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5005 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5006 
5007 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5008 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5009 						     vnic->rss_table_size,
5010 						     &vnic->rss_table_dma_addr,
5011 						     GFP_KERNEL);
5012 		if (!vnic->rss_table) {
5013 			rc = -ENOMEM;
5014 			goto out;
5015 		}
5016 
5017 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5018 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5019 	}
5020 	return 0;
5021 
5022 out:
5023 	return rc;
5024 }
5025 
5026 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5027 {
5028 	struct bnxt_hwrm_wait_token *token;
5029 
5030 	dma_pool_destroy(bp->hwrm_dma_pool);
5031 	bp->hwrm_dma_pool = NULL;
5032 
5033 	rcu_read_lock();
5034 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5035 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5036 	rcu_read_unlock();
5037 }
5038 
5039 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5040 {
5041 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5042 					    BNXT_HWRM_DMA_SIZE,
5043 					    BNXT_HWRM_DMA_ALIGN, 0);
5044 	if (!bp->hwrm_dma_pool)
5045 		return -ENOMEM;
5046 
5047 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5048 
5049 	return 0;
5050 }
5051 
5052 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5053 {
5054 	kfree(stats->hw_masks);
5055 	stats->hw_masks = NULL;
5056 	kfree(stats->sw_stats);
5057 	stats->sw_stats = NULL;
5058 	if (stats->hw_stats) {
5059 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5060 				  stats->hw_stats_map);
5061 		stats->hw_stats = NULL;
5062 	}
5063 }
5064 
5065 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5066 				bool alloc_masks)
5067 {
5068 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5069 					     &stats->hw_stats_map, GFP_KERNEL);
5070 	if (!stats->hw_stats)
5071 		return -ENOMEM;
5072 
5073 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5074 	if (!stats->sw_stats)
5075 		goto stats_mem_err;
5076 
5077 	if (alloc_masks) {
5078 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5079 		if (!stats->hw_masks)
5080 			goto stats_mem_err;
5081 	}
5082 	return 0;
5083 
5084 stats_mem_err:
5085 	bnxt_free_stats_mem(bp, stats);
5086 	return -ENOMEM;
5087 }
5088 
5089 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5090 {
5091 	int i;
5092 
5093 	for (i = 0; i < count; i++)
5094 		mask_arr[i] = mask;
5095 }
5096 
5097 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5098 {
5099 	int i;
5100 
5101 	for (i = 0; i < count; i++)
5102 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5103 }
5104 
5105 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5106 				    struct bnxt_stats_mem *stats)
5107 {
5108 	struct hwrm_func_qstats_ext_output *resp;
5109 	struct hwrm_func_qstats_ext_input *req;
5110 	__le64 *hw_masks;
5111 	int rc;
5112 
5113 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5114 	    !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5115 		return -EOPNOTSUPP;
5116 
5117 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5118 	if (rc)
5119 		return rc;
5120 
5121 	req->fid = cpu_to_le16(0xffff);
5122 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5123 
5124 	resp = hwrm_req_hold(bp, req);
5125 	rc = hwrm_req_send(bp, req);
5126 	if (!rc) {
5127 		hw_masks = &resp->rx_ucast_pkts;
5128 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5129 	}
5130 	hwrm_req_drop(bp, req);
5131 	return rc;
5132 }
5133 
5134 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5135 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5136 
5137 static void bnxt_init_stats(struct bnxt *bp)
5138 {
5139 	struct bnxt_napi *bnapi = bp->bnapi[0];
5140 	struct bnxt_cp_ring_info *cpr;
5141 	struct bnxt_stats_mem *stats;
5142 	__le64 *rx_stats, *tx_stats;
5143 	int rc, rx_count, tx_count;
5144 	u64 *rx_masks, *tx_masks;
5145 	u64 mask;
5146 	u8 flags;
5147 
5148 	cpr = &bnapi->cp_ring;
5149 	stats = &cpr->stats;
5150 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5151 	if (rc) {
5152 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5153 			mask = (1ULL << 48) - 1;
5154 		else
5155 			mask = -1ULL;
5156 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5157 	}
5158 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
5159 		stats = &bp->port_stats;
5160 		rx_stats = stats->hw_stats;
5161 		rx_masks = stats->hw_masks;
5162 		rx_count = sizeof(struct rx_port_stats) / 8;
5163 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5164 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5165 		tx_count = sizeof(struct tx_port_stats) / 8;
5166 
5167 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5168 		rc = bnxt_hwrm_port_qstats(bp, flags);
5169 		if (rc) {
5170 			mask = (1ULL << 40) - 1;
5171 
5172 			bnxt_fill_masks(rx_masks, mask, rx_count);
5173 			bnxt_fill_masks(tx_masks, mask, tx_count);
5174 		} else {
5175 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5176 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5177 			bnxt_hwrm_port_qstats(bp, 0);
5178 		}
5179 	}
5180 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5181 		stats = &bp->rx_port_stats_ext;
5182 		rx_stats = stats->hw_stats;
5183 		rx_masks = stats->hw_masks;
5184 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
5185 		stats = &bp->tx_port_stats_ext;
5186 		tx_stats = stats->hw_stats;
5187 		tx_masks = stats->hw_masks;
5188 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
5189 
5190 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5191 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5192 		if (rc) {
5193 			mask = (1ULL << 40) - 1;
5194 
5195 			bnxt_fill_masks(rx_masks, mask, rx_count);
5196 			if (tx_stats)
5197 				bnxt_fill_masks(tx_masks, mask, tx_count);
5198 		} else {
5199 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5200 			if (tx_stats)
5201 				bnxt_copy_hw_masks(tx_masks, tx_stats,
5202 						   tx_count);
5203 			bnxt_hwrm_port_qstats_ext(bp, 0);
5204 		}
5205 	}
5206 }
5207 
5208 static void bnxt_free_port_stats(struct bnxt *bp)
5209 {
5210 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
5211 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5212 
5213 	bnxt_free_stats_mem(bp, &bp->port_stats);
5214 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5215 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5216 }
5217 
5218 static void bnxt_free_ring_stats(struct bnxt *bp)
5219 {
5220 	int i;
5221 
5222 	if (!bp->bnapi)
5223 		return;
5224 
5225 	for (i = 0; i < bp->cp_nr_rings; i++) {
5226 		struct bnxt_napi *bnapi = bp->bnapi[i];
5227 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5228 
5229 		bnxt_free_stats_mem(bp, &cpr->stats);
5230 
5231 		kfree(cpr->sw_stats);
5232 		cpr->sw_stats = NULL;
5233 	}
5234 }
5235 
5236 static int bnxt_alloc_stats(struct bnxt *bp)
5237 {
5238 	u32 size, i;
5239 	int rc;
5240 
5241 	size = bp->hw_ring_stats_size;
5242 
5243 	for (i = 0; i < bp->cp_nr_rings; i++) {
5244 		struct bnxt_napi *bnapi = bp->bnapi[i];
5245 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5246 
5247 		cpr->sw_stats = kzalloc_obj(*cpr->sw_stats);
5248 		if (!cpr->sw_stats)
5249 			return -ENOMEM;
5250 
5251 		cpr->stats.len = size;
5252 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5253 		if (rc)
5254 			return rc;
5255 
5256 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5257 	}
5258 
5259 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5260 		return 0;
5261 
5262 	if (bp->port_stats.hw_stats)
5263 		goto alloc_ext_stats;
5264 
5265 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5266 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5267 	if (rc)
5268 		return rc;
5269 
5270 	bp->flags |= BNXT_FLAG_PORT_STATS;
5271 
5272 alloc_ext_stats:
5273 	/* Display extended statistics only if FW supports it */
5274 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5275 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5276 			return 0;
5277 
5278 	if (bp->rx_port_stats_ext.hw_stats)
5279 		goto alloc_tx_ext_stats;
5280 
5281 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5282 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5283 	/* Extended stats are optional */
5284 	if (rc)
5285 		return 0;
5286 
5287 alloc_tx_ext_stats:
5288 	if (bp->tx_port_stats_ext.hw_stats)
5289 		return 0;
5290 
5291 	if (bp->hwrm_spec_code >= 0x10902 ||
5292 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5293 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5294 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5295 		/* Extended stats are optional */
5296 		if (rc)
5297 			return 0;
5298 	}
5299 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5300 	return 0;
5301 }
5302 
5303 static void bnxt_clear_ring_indices(struct bnxt *bp)
5304 {
5305 	int i, j;
5306 
5307 	if (!bp->bnapi)
5308 		return;
5309 
5310 	for (i = 0; i < bp->cp_nr_rings; i++) {
5311 		struct bnxt_napi *bnapi = bp->bnapi[i];
5312 		struct bnxt_cp_ring_info *cpr;
5313 		struct bnxt_rx_ring_info *rxr;
5314 		struct bnxt_tx_ring_info *txr;
5315 
5316 		if (!bnapi)
5317 			continue;
5318 
5319 		cpr = &bnapi->cp_ring;
5320 		cpr->cp_raw_cons = 0;
5321 
5322 		bnxt_for_each_napi_tx(j, bnapi, txr) {
5323 			txr->tx_prod = 0;
5324 			txr->tx_cons = 0;
5325 			txr->tx_hw_cons = 0;
5326 		}
5327 
5328 		rxr = bnapi->rx_ring;
5329 		if (rxr) {
5330 			rxr->rx_prod = 0;
5331 			rxr->rx_agg_prod = 0;
5332 			rxr->rx_sw_agg_prod = 0;
5333 			rxr->rx_next_cons = 0;
5334 		}
5335 		bnapi->events = 0;
5336 	}
5337 }
5338 
5339 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5340 {
5341 	u8 type = fltr->type, flags = fltr->flags;
5342 
5343 	INIT_LIST_HEAD(&fltr->list);
5344 	if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5345 	    (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5346 		list_add_tail(&fltr->list, &bp->usr_fltr_list);
5347 }
5348 
5349 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5350 {
5351 	if (!list_empty(&fltr->list))
5352 		list_del_init(&fltr->list);
5353 }
5354 
5355 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5356 {
5357 	struct bnxt_filter_base *usr_fltr, *tmp;
5358 
5359 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5360 		if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5361 			continue;
5362 		bnxt_del_one_usr_fltr(bp, usr_fltr);
5363 	}
5364 }
5365 
5366 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5367 {
5368 	hlist_del(&fltr->hash);
5369 	bnxt_del_one_usr_fltr(bp, fltr);
5370 	if (fltr->flags) {
5371 		clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5372 		bp->ntp_fltr_count--;
5373 	}
5374 	kfree(fltr);
5375 }
5376 
5377 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5378 {
5379 	int i;
5380 
5381 	netdev_assert_locked_or_invisible(bp->dev);
5382 
5383 	/* Under netdev instance lock and all our NAPIs have been disabled.
5384 	 * It's safe to delete the hash table.
5385 	 */
5386 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5387 		struct hlist_head *head;
5388 		struct hlist_node *tmp;
5389 		struct bnxt_ntuple_filter *fltr;
5390 
5391 		head = &bp->ntp_fltr_hash_tbl[i];
5392 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5393 			bnxt_del_l2_filter(bp, fltr->l2_fltr);
5394 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5395 				     !list_empty(&fltr->base.list)))
5396 				continue;
5397 			bnxt_del_fltr(bp, &fltr->base);
5398 		}
5399 	}
5400 	if (!all)
5401 		return;
5402 
5403 	bitmap_free(bp->ntp_fltr_bmap);
5404 	bp->ntp_fltr_bmap = NULL;
5405 	bp->ntp_fltr_count = 0;
5406 }
5407 
5408 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5409 {
5410 	int i, rc = 0;
5411 
5412 	if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5413 		return 0;
5414 
5415 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5416 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5417 
5418 	bp->ntp_fltr_count = 0;
5419 	bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5420 
5421 	if (!bp->ntp_fltr_bmap)
5422 		rc = -ENOMEM;
5423 
5424 	return rc;
5425 }
5426 
5427 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5428 {
5429 	int i;
5430 
5431 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5432 		struct hlist_head *head;
5433 		struct hlist_node *tmp;
5434 		struct bnxt_l2_filter *fltr;
5435 
5436 		head = &bp->l2_fltr_hash_tbl[i];
5437 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5438 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5439 				     !list_empty(&fltr->base.list)))
5440 				continue;
5441 			bnxt_del_fltr(bp, &fltr->base);
5442 		}
5443 	}
5444 }
5445 
5446 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5447 {
5448 	int i;
5449 
5450 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5451 		INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5452 	get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5453 }
5454 
5455 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5456 {
5457 	bnxt_free_vnic_attributes(bp);
5458 	bnxt_free_tx_rings(bp);
5459 	bnxt_free_rx_rings(bp);
5460 	bnxt_free_cp_rings(bp);
5461 	bnxt_free_all_cp_arrays(bp);
5462 	bnxt_free_ntp_fltrs(bp, false);
5463 	bnxt_free_l2_filters(bp, false);
5464 	if (irq_re_init) {
5465 		bnxt_free_ring_stats(bp);
5466 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5467 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5468 			bnxt_free_port_stats(bp);
5469 		bnxt_free_ring_grps(bp);
5470 		bnxt_free_vnics(bp);
5471 		kfree(bp->tx_ring_map);
5472 		bp->tx_ring_map = NULL;
5473 		kfree(bp->tx_ring);
5474 		bp->tx_ring = NULL;
5475 		kfree(bp->rx_ring);
5476 		bp->rx_ring = NULL;
5477 		kfree(bp->bnapi);
5478 		bp->bnapi = NULL;
5479 	} else {
5480 		bnxt_clear_ring_indices(bp);
5481 	}
5482 }
5483 
5484 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5485 {
5486 	int i, j, rc, size, arr_size;
5487 	void *bnapi;
5488 
5489 	if (irq_re_init) {
5490 		/* Allocate bnapi mem pointer array and mem block for
5491 		 * all queues
5492 		 */
5493 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5494 				bp->cp_nr_rings);
5495 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5496 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5497 		if (!bnapi)
5498 			return -ENOMEM;
5499 
5500 		bp->bnapi = bnapi;
5501 		bnapi += arr_size;
5502 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5503 			bp->bnapi[i] = bnapi;
5504 			bp->bnapi[i]->index = i;
5505 			bp->bnapi[i]->bp = bp;
5506 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5507 				struct bnxt_cp_ring_info *cpr =
5508 					&bp->bnapi[i]->cp_ring;
5509 
5510 				cpr->cp_ring_struct.ring_mem.flags =
5511 					BNXT_RMEM_RING_PTE_FLAG;
5512 			}
5513 		}
5514 
5515 		bp->rx_ring = kzalloc_objs(struct bnxt_rx_ring_info,
5516 					   bp->rx_nr_rings);
5517 		if (!bp->rx_ring)
5518 			return -ENOMEM;
5519 
5520 		for (i = 0; i < bp->rx_nr_rings; i++) {
5521 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5522 
5523 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5524 				rxr->rx_ring_struct.ring_mem.flags =
5525 					BNXT_RMEM_RING_PTE_FLAG;
5526 				rxr->rx_agg_ring_struct.ring_mem.flags =
5527 					BNXT_RMEM_RING_PTE_FLAG;
5528 			} else {
5529 				rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5530 			}
5531 			rxr->bnapi = bp->bnapi[i];
5532 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5533 		}
5534 
5535 		bp->tx_ring = kzalloc_objs(struct bnxt_tx_ring_info,
5536 					   bp->tx_nr_rings);
5537 		if (!bp->tx_ring)
5538 			return -ENOMEM;
5539 
5540 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5541 					  GFP_KERNEL);
5542 
5543 		if (!bp->tx_ring_map)
5544 			return -ENOMEM;
5545 
5546 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5547 			j = 0;
5548 		else
5549 			j = bp->rx_nr_rings;
5550 
5551 		for (i = 0; i < bp->tx_nr_rings; i++) {
5552 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5553 			struct bnxt_napi *bnapi2;
5554 
5555 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5556 				txr->tx_ring_struct.ring_mem.flags =
5557 					BNXT_RMEM_RING_PTE_FLAG;
5558 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5559 			if (i >= bp->tx_nr_rings_xdp) {
5560 				int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5561 
5562 				bnapi2 = bp->bnapi[k];
5563 				txr->txq_index = i - bp->tx_nr_rings_xdp;
5564 				txr->tx_napi_idx =
5565 					BNXT_RING_TO_TC(bp, txr->txq_index);
5566 				bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5567 				bnapi2->tx_int = bnxt_tx_int;
5568 			} else {
5569 				bnapi2 = bp->bnapi[j];
5570 				bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5571 				bnapi2->tx_ring[0] = txr;
5572 				bnapi2->tx_int = bnxt_tx_int_xdp;
5573 				j++;
5574 			}
5575 			txr->bnapi = bnapi2;
5576 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5577 				txr->tx_cpr = &bnapi2->cp_ring;
5578 		}
5579 
5580 		rc = bnxt_alloc_stats(bp);
5581 		if (rc)
5582 			goto alloc_mem_err;
5583 		bnxt_init_stats(bp);
5584 
5585 		rc = bnxt_alloc_ntp_fltrs(bp);
5586 		if (rc)
5587 			goto alloc_mem_err;
5588 
5589 		rc = bnxt_alloc_vnics(bp);
5590 		if (rc)
5591 			goto alloc_mem_err;
5592 	}
5593 
5594 	rc = bnxt_alloc_all_cp_arrays(bp);
5595 	if (rc)
5596 		goto alloc_mem_err;
5597 
5598 	bnxt_init_ring_struct(bp);
5599 
5600 	rc = bnxt_alloc_rx_rings(bp);
5601 	if (rc)
5602 		goto alloc_mem_err;
5603 
5604 	rc = bnxt_alloc_tx_rings(bp);
5605 	if (rc)
5606 		goto alloc_mem_err;
5607 
5608 	rc = bnxt_alloc_cp_rings(bp);
5609 	if (rc)
5610 		goto alloc_mem_err;
5611 
5612 	bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5613 						  BNXT_VNIC_MCAST_FLAG |
5614 						  BNXT_VNIC_UCAST_FLAG;
5615 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5616 		bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5617 			BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5618 
5619 	rc = bnxt_alloc_vnic_attributes(bp);
5620 	if (rc)
5621 		goto alloc_mem_err;
5622 	return 0;
5623 
5624 alloc_mem_err:
5625 	bnxt_free_mem(bp, true);
5626 	return rc;
5627 }
5628 
5629 static void bnxt_disable_int(struct bnxt *bp)
5630 {
5631 	int i;
5632 
5633 	if (!bp->bnapi)
5634 		return;
5635 
5636 	for (i = 0; i < bp->cp_nr_rings; i++) {
5637 		struct bnxt_napi *bnapi = bp->bnapi[i];
5638 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5639 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5640 
5641 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
5642 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5643 	}
5644 }
5645 
5646 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5647 {
5648 	struct bnxt_napi *bnapi = bp->bnapi[n];
5649 	struct bnxt_cp_ring_info *cpr;
5650 
5651 	cpr = &bnapi->cp_ring;
5652 	return cpr->cp_ring_struct.map_idx;
5653 }
5654 
5655 static void bnxt_disable_int_sync(struct bnxt *bp)
5656 {
5657 	int i;
5658 
5659 	if (!bp->irq_tbl)
5660 		return;
5661 
5662 	atomic_inc(&bp->intr_sem);
5663 
5664 	bnxt_disable_int(bp);
5665 	for (i = 0; i < bp->cp_nr_rings; i++) {
5666 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5667 
5668 		synchronize_irq(bp->irq_tbl[map_idx].vector);
5669 	}
5670 }
5671 
5672 static void bnxt_enable_int(struct bnxt *bp)
5673 {
5674 	int i;
5675 
5676 	atomic_set(&bp->intr_sem, 0);
5677 	for (i = 0; i < bp->cp_nr_rings; i++) {
5678 		struct bnxt_napi *bnapi = bp->bnapi[i];
5679 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5680 
5681 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5682 	}
5683 }
5684 
5685 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5686 			    bool async_only)
5687 {
5688 	DECLARE_BITMAP(async_events_bmap, 256);
5689 	u32 *events = (u32 *)async_events_bmap;
5690 	struct hwrm_func_drv_rgtr_output *resp;
5691 	struct hwrm_func_drv_rgtr_input *req;
5692 	u32 flags;
5693 	int rc, i;
5694 
5695 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5696 	if (rc)
5697 		return rc;
5698 
5699 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5700 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5701 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5702 
5703 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5704 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5705 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5706 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5707 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5708 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5709 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5710 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5711 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5712 	req->flags = cpu_to_le32(flags);
5713 	req->ver_maj_8b = DRV_VER_MAJ;
5714 	req->ver_min_8b = DRV_VER_MIN;
5715 	req->ver_upd_8b = DRV_VER_UPD;
5716 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5717 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
5718 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5719 
5720 	if (BNXT_PF(bp)) {
5721 		u32 data[8];
5722 		int i;
5723 
5724 		memset(data, 0, sizeof(data));
5725 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5726 			u16 cmd = bnxt_vf_req_snif[i];
5727 			unsigned int bit, idx;
5728 
5729 			if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5730 			    cmd == HWRM_PORT_PHY_QCFG)
5731 				continue;
5732 
5733 			idx = cmd / 32;
5734 			bit = cmd % 32;
5735 			data[idx] |= 1 << bit;
5736 		}
5737 
5738 		for (i = 0; i < 8; i++)
5739 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5740 
5741 		req->enables |=
5742 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5743 	}
5744 
5745 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5746 		req->flags |= cpu_to_le32(
5747 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5748 
5749 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
5750 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5751 		u16 event_id = bnxt_async_events_arr[i];
5752 
5753 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5754 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5755 			continue;
5756 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5757 		    !bp->ptp_cfg)
5758 			continue;
5759 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
5760 	}
5761 	if (bmap && bmap_size) {
5762 		for (i = 0; i < bmap_size; i++) {
5763 			if (test_bit(i, bmap))
5764 				__set_bit(i, async_events_bmap);
5765 		}
5766 	}
5767 	for (i = 0; i < 8; i++)
5768 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5769 
5770 	if (async_only)
5771 		req->enables =
5772 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5773 
5774 	resp = hwrm_req_hold(bp, req);
5775 	rc = hwrm_req_send(bp, req);
5776 	if (!rc) {
5777 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5778 		if (resp->flags &
5779 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5780 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5781 	}
5782 	hwrm_req_drop(bp, req);
5783 	return rc;
5784 }
5785 
5786 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5787 {
5788 	struct hwrm_func_drv_unrgtr_input *req;
5789 	int rc;
5790 
5791 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5792 		return 0;
5793 
5794 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5795 	if (rc)
5796 		return rc;
5797 	return hwrm_req_send(bp, req);
5798 }
5799 
5800 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5801 
5802 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5803 {
5804 	struct hwrm_tunnel_dst_port_free_input *req;
5805 	int rc;
5806 
5807 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5808 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5809 		return 0;
5810 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5811 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5812 		return 0;
5813 
5814 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5815 	if (rc)
5816 		return rc;
5817 
5818 	req->tunnel_type = tunnel_type;
5819 
5820 	switch (tunnel_type) {
5821 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5822 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5823 		bp->vxlan_port = 0;
5824 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5825 		break;
5826 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5827 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5828 		bp->nge_port = 0;
5829 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5830 		break;
5831 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5832 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5833 		bp->vxlan_gpe_port = 0;
5834 		bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5835 		break;
5836 	default:
5837 		break;
5838 	}
5839 
5840 	rc = hwrm_req_send(bp, req);
5841 	if (rc)
5842 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5843 			   rc);
5844 	if (bp->flags & BNXT_FLAG_TPA)
5845 		bnxt_set_tpa(bp, true);
5846 	return rc;
5847 }
5848 
5849 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5850 					   u8 tunnel_type)
5851 {
5852 	struct hwrm_tunnel_dst_port_alloc_output *resp;
5853 	struct hwrm_tunnel_dst_port_alloc_input *req;
5854 	int rc;
5855 
5856 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5857 	if (rc)
5858 		return rc;
5859 
5860 	req->tunnel_type = tunnel_type;
5861 	req->tunnel_dst_port_val = port;
5862 
5863 	resp = hwrm_req_hold(bp, req);
5864 	rc = hwrm_req_send(bp, req);
5865 	if (rc) {
5866 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5867 			   rc);
5868 		goto err_out;
5869 	}
5870 
5871 	switch (tunnel_type) {
5872 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5873 		bp->vxlan_port = port;
5874 		bp->vxlan_fw_dst_port_id =
5875 			le16_to_cpu(resp->tunnel_dst_port_id);
5876 		break;
5877 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5878 		bp->nge_port = port;
5879 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5880 		break;
5881 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5882 		bp->vxlan_gpe_port = port;
5883 		bp->vxlan_gpe_fw_dst_port_id =
5884 			le16_to_cpu(resp->tunnel_dst_port_id);
5885 		break;
5886 	default:
5887 		break;
5888 	}
5889 	if (bp->flags & BNXT_FLAG_TPA)
5890 		bnxt_set_tpa(bp, true);
5891 
5892 err_out:
5893 	hwrm_req_drop(bp, req);
5894 	return rc;
5895 }
5896 
5897 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5898 {
5899 	struct hwrm_cfa_l2_set_rx_mask_input *req;
5900 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5901 	int rc;
5902 
5903 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5904 	if (rc)
5905 		return rc;
5906 
5907 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5908 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5909 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5910 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5911 	}
5912 	req->mask = cpu_to_le32(vnic->rx_mask);
5913 	return hwrm_req_send_silent(bp, req);
5914 }
5915 
5916 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5917 {
5918 	if (!atomic_dec_and_test(&fltr->refcnt))
5919 		return;
5920 	spin_lock_bh(&bp->ntp_fltr_lock);
5921 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5922 		spin_unlock_bh(&bp->ntp_fltr_lock);
5923 		return;
5924 	}
5925 	hlist_del_rcu(&fltr->base.hash);
5926 	bnxt_del_one_usr_fltr(bp, &fltr->base);
5927 	if (fltr->base.flags) {
5928 		clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5929 		bp->ntp_fltr_count--;
5930 	}
5931 	spin_unlock_bh(&bp->ntp_fltr_lock);
5932 	kfree_rcu(fltr, base.rcu);
5933 }
5934 
5935 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5936 						      struct bnxt_l2_key *key,
5937 						      u32 idx)
5938 {
5939 	struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5940 	struct bnxt_l2_filter *fltr;
5941 
5942 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
5943 		struct bnxt_l2_key *l2_key = &fltr->l2_key;
5944 
5945 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5946 		    l2_key->vlan == key->vlan)
5947 			return fltr;
5948 	}
5949 	return NULL;
5950 }
5951 
5952 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5953 						    struct bnxt_l2_key *key,
5954 						    u32 idx)
5955 {
5956 	struct bnxt_l2_filter *fltr = NULL;
5957 
5958 	rcu_read_lock();
5959 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5960 	if (fltr)
5961 		atomic_inc(&fltr->refcnt);
5962 	rcu_read_unlock();
5963 	return fltr;
5964 }
5965 
5966 #define BNXT_IPV4_4TUPLE(bp, fkeys)					\
5967 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5968 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||	\
5969 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5970 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5971 
5972 #define BNXT_IPV6_4TUPLE(bp, fkeys)					\
5973 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5974 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||	\
5975 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5976 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5977 
5978 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5979 {
5980 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5981 		if (BNXT_IPV4_4TUPLE(bp, fkeys))
5982 			return sizeof(fkeys->addrs.v4addrs) +
5983 			       sizeof(fkeys->ports);
5984 
5985 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5986 			return sizeof(fkeys->addrs.v4addrs);
5987 	}
5988 
5989 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5990 		if (BNXT_IPV6_4TUPLE(bp, fkeys))
5991 			return sizeof(fkeys->addrs.v6addrs) +
5992 			       sizeof(fkeys->ports);
5993 
5994 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5995 			return sizeof(fkeys->addrs.v6addrs);
5996 	}
5997 
5998 	return 0;
5999 }
6000 
6001 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6002 			 const unsigned char *key)
6003 {
6004 	u64 prefix = bp->toeplitz_prefix, hash = 0;
6005 	struct bnxt_ipv4_tuple tuple4;
6006 	struct bnxt_ipv6_tuple tuple6;
6007 	int i, j, len = 0;
6008 	u8 *four_tuple;
6009 
6010 	len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6011 	if (!len)
6012 		return 0;
6013 
6014 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6015 		tuple4.v4addrs = fkeys->addrs.v4addrs;
6016 		tuple4.ports = fkeys->ports;
6017 		four_tuple = (unsigned char *)&tuple4;
6018 	} else {
6019 		tuple6.v6addrs = fkeys->addrs.v6addrs;
6020 		tuple6.ports = fkeys->ports;
6021 		four_tuple = (unsigned char *)&tuple6;
6022 	}
6023 
6024 	for (i = 0, j = 8; i < len; i++, j++) {
6025 		u8 byte = four_tuple[i];
6026 		int bit;
6027 
6028 		for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6029 			if (byte & 0x80)
6030 				hash ^= prefix;
6031 		}
6032 		prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6033 	}
6034 
6035 	/* The valid part of the hash is in the upper 32 bits. */
6036 	return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6037 }
6038 
6039 #ifdef CONFIG_RFS_ACCEL
6040 static struct bnxt_l2_filter *
6041 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6042 {
6043 	struct bnxt_l2_filter *fltr;
6044 	u32 idx;
6045 
6046 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6047 	      BNXT_L2_FLTR_HASH_MASK;
6048 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6049 	return fltr;
6050 }
6051 #endif
6052 
6053 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6054 			       struct bnxt_l2_key *key, u32 idx)
6055 {
6056 	struct hlist_head *head;
6057 
6058 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6059 	fltr->l2_key.vlan = key->vlan;
6060 	fltr->base.type = BNXT_FLTR_TYPE_L2;
6061 	if (fltr->base.flags) {
6062 		int bit_id;
6063 
6064 		bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6065 						 bp->max_fltr, 0);
6066 		if (bit_id < 0)
6067 			return -ENOMEM;
6068 		fltr->base.sw_id = (u16)bit_id;
6069 		bp->ntp_fltr_count++;
6070 	}
6071 	head = &bp->l2_fltr_hash_tbl[idx];
6072 	hlist_add_head_rcu(&fltr->base.hash, head);
6073 	bnxt_insert_usr_fltr(bp, &fltr->base);
6074 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6075 	atomic_set(&fltr->refcnt, 1);
6076 	return 0;
6077 }
6078 
6079 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6080 						   struct bnxt_l2_key *key,
6081 						   gfp_t gfp)
6082 {
6083 	struct bnxt_l2_filter *fltr;
6084 	u32 idx;
6085 	int rc;
6086 
6087 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6088 	      BNXT_L2_FLTR_HASH_MASK;
6089 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6090 	if (fltr)
6091 		return fltr;
6092 
6093 	fltr = kzalloc_obj(*fltr, gfp);
6094 	if (!fltr)
6095 		return ERR_PTR(-ENOMEM);
6096 	spin_lock_bh(&bp->ntp_fltr_lock);
6097 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6098 	spin_unlock_bh(&bp->ntp_fltr_lock);
6099 	if (rc) {
6100 		bnxt_del_l2_filter(bp, fltr);
6101 		fltr = ERR_PTR(rc);
6102 	}
6103 	return fltr;
6104 }
6105 
6106 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6107 						struct bnxt_l2_key *key,
6108 						u16 flags)
6109 {
6110 	struct bnxt_l2_filter *fltr;
6111 	u32 idx;
6112 	int rc;
6113 
6114 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6115 	      BNXT_L2_FLTR_HASH_MASK;
6116 	spin_lock_bh(&bp->ntp_fltr_lock);
6117 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6118 	if (fltr) {
6119 		fltr = ERR_PTR(-EEXIST);
6120 		goto l2_filter_exit;
6121 	}
6122 	fltr = kzalloc_obj(*fltr, GFP_ATOMIC);
6123 	if (!fltr) {
6124 		fltr = ERR_PTR(-ENOMEM);
6125 		goto l2_filter_exit;
6126 	}
6127 	fltr->base.flags = flags;
6128 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6129 	if (rc) {
6130 		spin_unlock_bh(&bp->ntp_fltr_lock);
6131 		bnxt_del_l2_filter(bp, fltr);
6132 		return ERR_PTR(rc);
6133 	}
6134 
6135 l2_filter_exit:
6136 	spin_unlock_bh(&bp->ntp_fltr_lock);
6137 	return fltr;
6138 }
6139 
6140 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6141 {
6142 #ifdef CONFIG_BNXT_SRIOV
6143 	struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6144 
6145 	return vf->fw_fid;
6146 #else
6147 	return INVALID_HW_RING_ID;
6148 #endif
6149 }
6150 
6151 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6152 {
6153 	struct hwrm_cfa_l2_filter_free_input *req;
6154 	u16 target_id = 0xffff;
6155 	int rc;
6156 
6157 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6158 		struct bnxt_pf_info *pf = &bp->pf;
6159 
6160 		if (fltr->base.vf_idx >= pf->active_vfs)
6161 			return -EINVAL;
6162 
6163 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6164 		if (target_id == INVALID_HW_RING_ID)
6165 			return -EINVAL;
6166 	}
6167 
6168 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6169 	if (rc)
6170 		return rc;
6171 
6172 	req->target_id = cpu_to_le16(target_id);
6173 	req->l2_filter_id = fltr->base.filter_id;
6174 	return hwrm_req_send(bp, req);
6175 }
6176 
6177 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6178 {
6179 	struct hwrm_cfa_l2_filter_alloc_output *resp;
6180 	struct hwrm_cfa_l2_filter_alloc_input *req;
6181 	u16 target_id = 0xffff;
6182 	int rc;
6183 
6184 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6185 		struct bnxt_pf_info *pf = &bp->pf;
6186 
6187 		if (fltr->base.vf_idx >= pf->active_vfs)
6188 			return -EINVAL;
6189 
6190 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6191 	}
6192 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6193 	if (rc)
6194 		return rc;
6195 
6196 	req->target_id = cpu_to_le16(target_id);
6197 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6198 
6199 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6200 		req->flags |=
6201 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6202 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6203 	req->enables =
6204 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6205 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6206 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6207 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6208 	eth_broadcast_addr(req->l2_addr_mask);
6209 
6210 	if (fltr->l2_key.vlan) {
6211 		req->enables |=
6212 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6213 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6214 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6215 		req->num_vlans = 1;
6216 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6217 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
6218 	}
6219 
6220 	resp = hwrm_req_hold(bp, req);
6221 	rc = hwrm_req_send(bp, req);
6222 	if (!rc) {
6223 		fltr->base.filter_id = resp->l2_filter_id;
6224 		set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6225 	}
6226 	hwrm_req_drop(bp, req);
6227 	return rc;
6228 }
6229 
6230 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6231 				     struct bnxt_ntuple_filter *fltr)
6232 {
6233 	struct hwrm_cfa_ntuple_filter_free_input *req;
6234 	int rc;
6235 
6236 	set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6237 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
6238 		return 0;
6239 
6240 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6241 	if (rc)
6242 		return rc;
6243 
6244 	req->ntuple_filter_id = fltr->base.filter_id;
6245 	return hwrm_req_send(bp, req);
6246 }
6247 
6248 #define BNXT_NTP_FLTR_FLAGS					\
6249 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
6250 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
6251 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
6252 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
6253 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
6254 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
6255 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
6256 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
6257 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
6258 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
6259 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
6260 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
6261 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6262 
6263 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
6264 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6265 
6266 void bnxt_fill_ipv6_mask(__be32 mask[4])
6267 {
6268 	int i;
6269 
6270 	for (i = 0; i < 4; i++)
6271 		mask[i] = cpu_to_be32(~0);
6272 }
6273 
6274 static void
6275 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6276 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
6277 			  struct bnxt_ntuple_filter *fltr)
6278 {
6279 	u16 rxq = fltr->base.rxq;
6280 
6281 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6282 		struct ethtool_rxfh_context *ctx;
6283 		struct bnxt_rss_ctx *rss_ctx;
6284 		struct bnxt_vnic_info *vnic;
6285 
6286 		ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6287 			      fltr->base.fw_vnic_id);
6288 		if (ctx) {
6289 			rss_ctx = ethtool_rxfh_context_priv(ctx);
6290 			vnic = &rss_ctx->vnic;
6291 
6292 			req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6293 		}
6294 		return;
6295 	}
6296 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6297 		struct bnxt_vnic_info *vnic;
6298 		u32 enables;
6299 
6300 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6301 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6302 		enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6303 		req->enables |= cpu_to_le32(enables);
6304 		req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6305 	} else {
6306 		u32 flags;
6307 
6308 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6309 		req->flags |= cpu_to_le32(flags);
6310 		req->dst_id = cpu_to_le16(rxq);
6311 	}
6312 }
6313 
6314 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6315 				      struct bnxt_ntuple_filter *fltr)
6316 {
6317 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6318 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
6319 	struct bnxt_flow_masks *masks = &fltr->fmasks;
6320 	struct flow_keys *keys = &fltr->fkeys;
6321 	struct bnxt_l2_filter *l2_fltr;
6322 	struct bnxt_vnic_info *vnic;
6323 	int rc;
6324 
6325 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6326 	if (rc)
6327 		return rc;
6328 
6329 	l2_fltr = fltr->l2_fltr;
6330 	req->l2_filter_id = l2_fltr->base.filter_id;
6331 
6332 	if (fltr->base.flags & BNXT_ACT_DROP) {
6333 		req->flags =
6334 			cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6335 	} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6336 		bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6337 	} else {
6338 		vnic = &bp->vnic_info[fltr->base.rxq + 1];
6339 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6340 	}
6341 	req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6342 
6343 	req->ethertype = htons(ETH_P_IP);
6344 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6345 	req->ip_protocol = keys->basic.ip_proto;
6346 
6347 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6348 		req->ethertype = htons(ETH_P_IPV6);
6349 		req->ip_addr_type =
6350 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6351 		*(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6352 		*(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6353 		*(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6354 		*(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6355 	} else {
6356 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6357 		req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6358 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6359 		req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6360 	}
6361 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6362 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6363 		req->tunnel_type =
6364 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6365 	}
6366 
6367 	req->src_port = keys->ports.src;
6368 	req->src_port_mask = masks->ports.src;
6369 	req->dst_port = keys->ports.dst;
6370 	req->dst_port_mask = masks->ports.dst;
6371 
6372 	resp = hwrm_req_hold(bp, req);
6373 	rc = hwrm_req_send(bp, req);
6374 	if (!rc)
6375 		fltr->base.filter_id = resp->ntuple_filter_id;
6376 	hwrm_req_drop(bp, req);
6377 	return rc;
6378 }
6379 
6380 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6381 				     const u8 *mac_addr)
6382 {
6383 	struct bnxt_l2_filter *fltr;
6384 	struct bnxt_l2_key key;
6385 	int rc;
6386 
6387 	ether_addr_copy(key.dst_mac_addr, mac_addr);
6388 	key.vlan = 0;
6389 	fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6390 	if (IS_ERR(fltr))
6391 		return PTR_ERR(fltr);
6392 
6393 	fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6394 	rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6395 	if (rc)
6396 		bnxt_del_l2_filter(bp, fltr);
6397 	else
6398 		bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6399 	return rc;
6400 }
6401 
6402 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6403 {
6404 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6405 
6406 	/* Any associated ntuple filters will also be cleared by firmware. */
6407 	for (i = 0; i < num_of_vnics; i++) {
6408 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6409 
6410 		for (j = 0; j < vnic->uc_filter_count; j++) {
6411 			struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6412 
6413 			bnxt_hwrm_l2_filter_free(bp, fltr);
6414 			bnxt_del_l2_filter(bp, fltr);
6415 		}
6416 		vnic->uc_filter_count = 0;
6417 	}
6418 }
6419 
6420 #define BNXT_DFLT_TUNL_TPA_BMAP				\
6421 	(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |	\
6422 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |	\
6423 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6424 
6425 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6426 					   struct hwrm_vnic_tpa_cfg_input *req)
6427 {
6428 	u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6429 
6430 	if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6431 		return;
6432 
6433 	if (bp->vxlan_port)
6434 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6435 	if (bp->vxlan_gpe_port)
6436 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6437 	if (bp->nge_port)
6438 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6439 
6440 	req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6441 	req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6442 }
6443 
6444 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6445 			   u32 tpa_flags)
6446 {
6447 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6448 	struct hwrm_vnic_tpa_cfg_input *req;
6449 	int rc;
6450 
6451 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6452 		return 0;
6453 
6454 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6455 	if (rc)
6456 		return rc;
6457 
6458 	if (tpa_flags) {
6459 		u16 mss = bp->dev->mtu - 40;
6460 		u32 nsegs, n, segs = 0, flags;
6461 
6462 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6463 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6464 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6465 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6466 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6467 		if (tpa_flags & BNXT_FLAG_GRO)
6468 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6469 
6470 		req->flags = cpu_to_le32(flags);
6471 
6472 		req->enables =
6473 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6474 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6475 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6476 
6477 		/* Number of segs are log2 units, and first packet is not
6478 		 * included as part of this units.
6479 		 */
6480 		if (mss <= BNXT_RX_PAGE_SIZE) {
6481 			n = BNXT_RX_PAGE_SIZE / mss;
6482 			nsegs = (MAX_SKB_FRAGS - 1) * n;
6483 		} else {
6484 			n = mss / BNXT_RX_PAGE_SIZE;
6485 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
6486 				n++;
6487 			nsegs = (MAX_SKB_FRAGS - n) / n;
6488 		}
6489 
6490 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6491 			segs = MAX_TPA_SEGS_P5;
6492 			max_aggs = bp->max_tpa;
6493 		} else {
6494 			segs = ilog2(nsegs);
6495 		}
6496 		req->max_agg_segs = cpu_to_le16(segs);
6497 		req->max_aggs = cpu_to_le16(max_aggs);
6498 
6499 		req->min_agg_len = cpu_to_le32(512);
6500 		bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6501 	}
6502 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6503 
6504 	return hwrm_req_send(bp, req);
6505 }
6506 
6507 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6508 {
6509 	struct bnxt_ring_grp_info *grp_info;
6510 
6511 	grp_info = &bp->grp_info[ring->grp_idx];
6512 	return grp_info->cp_fw_ring_id;
6513 }
6514 
6515 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6516 {
6517 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6518 		return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6519 	else
6520 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6521 }
6522 
6523 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6524 {
6525 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6526 		return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6527 	else
6528 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6529 }
6530 
6531 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6532 {
6533 	int entries;
6534 
6535 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6536 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6537 	else
6538 		entries = HW_HASH_INDEX_SIZE;
6539 
6540 	bp->rss_indir_tbl_entries = entries;
6541 	bp->rss_indir_tbl =
6542 		kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6543 	if (!bp->rss_indir_tbl)
6544 		return -ENOMEM;
6545 
6546 	return 0;
6547 }
6548 
6549 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6550 				 struct ethtool_rxfh_context *rss_ctx)
6551 {
6552 	u16 max_rings, max_entries, pad, i;
6553 	u32 *rss_indir_tbl;
6554 
6555 	if (!bp->rx_nr_rings)
6556 		return;
6557 
6558 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6559 		max_rings = bp->rx_nr_rings - 1;
6560 	else
6561 		max_rings = bp->rx_nr_rings;
6562 
6563 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6564 	if (rss_ctx)
6565 		rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6566 	else
6567 		rss_indir_tbl = &bp->rss_indir_tbl[0];
6568 
6569 	for (i = 0; i < max_entries; i++)
6570 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6571 
6572 	pad = bp->rss_indir_tbl_entries - max_entries;
6573 	if (pad)
6574 		memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6575 }
6576 
6577 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6578 {
6579 	u32 i, tbl_size, max_ring = 0;
6580 
6581 	if (!bp->rss_indir_tbl)
6582 		return 0;
6583 
6584 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6585 	for (i = 0; i < tbl_size; i++)
6586 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6587 	return max_ring;
6588 }
6589 
6590 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6591 {
6592 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6593 		if (!rx_rings)
6594 			return 0;
6595 		if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6596 			return BNXT_RSS_TABLE_MAX_TBL_P5;
6597 
6598 		return bnxt_calc_nr_ring_pages(rx_rings - 1,
6599 					       BNXT_RSS_TABLE_ENTRIES_P5);
6600 	}
6601 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6602 		return 2;
6603 	return 1;
6604 }
6605 
6606 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6607 {
6608 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6609 	u16 i, j;
6610 
6611 	/* Fill the RSS indirection table with ring group ids */
6612 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6613 		if (!no_rss)
6614 			j = bp->rss_indir_tbl[i];
6615 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6616 	}
6617 }
6618 
6619 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6620 				    struct bnxt_vnic_info *vnic)
6621 {
6622 	__le16 *ring_tbl = vnic->rss_table;
6623 	struct bnxt_rx_ring_info *rxr;
6624 	u16 tbl_size, i;
6625 
6626 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6627 
6628 	for (i = 0; i < tbl_size; i++) {
6629 		u16 ring_id, j;
6630 
6631 		if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6632 			j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6633 		else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6634 			j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6635 		else
6636 			j = bp->rss_indir_tbl[i];
6637 		rxr = &bp->rx_ring[j];
6638 
6639 		ring_id = rxr->rx_ring_struct.fw_ring_id;
6640 		*ring_tbl++ = cpu_to_le16(ring_id);
6641 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6642 		*ring_tbl++ = cpu_to_le16(ring_id);
6643 	}
6644 }
6645 
6646 static void
6647 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6648 			 struct bnxt_vnic_info *vnic)
6649 {
6650 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6651 		bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6652 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6653 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6654 	} else {
6655 		bnxt_fill_hw_rss_tbl(bp, vnic);
6656 	}
6657 
6658 	if (bp->rss_hash_delta) {
6659 		req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6660 		if (bp->rss_hash_cfg & bp->rss_hash_delta)
6661 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6662 		else
6663 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6664 	} else {
6665 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6666 	}
6667 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6668 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6669 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6670 }
6671 
6672 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6673 				  bool set_rss)
6674 {
6675 	struct hwrm_vnic_rss_cfg_input *req;
6676 	int rc;
6677 
6678 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6679 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6680 		return 0;
6681 
6682 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6683 	if (rc)
6684 		return rc;
6685 
6686 	if (set_rss)
6687 		__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6688 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6689 	return hwrm_req_send(bp, req);
6690 }
6691 
6692 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6693 				     struct bnxt_vnic_info *vnic, bool set_rss)
6694 {
6695 	struct hwrm_vnic_rss_cfg_input *req;
6696 	dma_addr_t ring_tbl_map;
6697 	u32 i, nr_ctxs;
6698 	int rc;
6699 
6700 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6701 	if (rc)
6702 		return rc;
6703 
6704 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6705 	if (!set_rss)
6706 		return hwrm_req_send(bp, req);
6707 
6708 	__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6709 	ring_tbl_map = vnic->rss_table_dma_addr;
6710 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6711 
6712 	hwrm_req_hold(bp, req);
6713 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6714 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6715 		req->ring_table_pair_index = i;
6716 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6717 		rc = hwrm_req_send(bp, req);
6718 		if (rc)
6719 			goto exit;
6720 	}
6721 
6722 exit:
6723 	hwrm_req_drop(bp, req);
6724 	return rc;
6725 }
6726 
6727 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6728 {
6729 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6730 	struct hwrm_vnic_rss_qcfg_output *resp;
6731 	struct hwrm_vnic_rss_qcfg_input *req;
6732 
6733 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6734 		return;
6735 
6736 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6737 	/* all contexts configured to same hash_type, zero always exists */
6738 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6739 	resp = hwrm_req_hold(bp, req);
6740 	if (!hwrm_req_send(bp, req)) {
6741 		bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6742 		bp->rss_hash_delta = 0;
6743 	}
6744 	hwrm_req_drop(bp, req);
6745 }
6746 
6747 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6748 {
6749 	u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6750 	struct hwrm_vnic_plcmodes_cfg_input *req;
6751 	int rc;
6752 
6753 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6754 	if (rc)
6755 		return rc;
6756 
6757 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6758 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6759 	req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6760 
6761 	if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6762 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6763 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6764 		req->enables |=
6765 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6766 		req->hds_threshold = cpu_to_le16(hds_thresh);
6767 	}
6768 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6769 	return hwrm_req_send(bp, req);
6770 }
6771 
6772 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6773 					struct bnxt_vnic_info *vnic,
6774 					u16 ctx_idx)
6775 {
6776 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6777 
6778 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6779 		return;
6780 
6781 	req->rss_cos_lb_ctx_id =
6782 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6783 
6784 	hwrm_req_send(bp, req);
6785 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6786 }
6787 
6788 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6789 {
6790 	int i, j;
6791 
6792 	for (i = 0; i < bp->nr_vnics; i++) {
6793 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6794 
6795 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6796 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6797 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6798 		}
6799 	}
6800 	bp->rsscos_nr_ctxs = 0;
6801 }
6802 
6803 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6804 				    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6805 {
6806 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6807 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6808 	int rc;
6809 
6810 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6811 	if (rc)
6812 		return rc;
6813 
6814 	resp = hwrm_req_hold(bp, req);
6815 	rc = hwrm_req_send(bp, req);
6816 	if (!rc)
6817 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6818 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
6819 	hwrm_req_drop(bp, req);
6820 
6821 	return rc;
6822 }
6823 
6824 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6825 {
6826 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6827 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6828 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6829 }
6830 
6831 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6832 {
6833 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6834 	struct hwrm_vnic_cfg_input *req;
6835 	unsigned int ring = 0, grp_idx;
6836 	u16 def_vlan = 0;
6837 	int rc;
6838 
6839 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6840 	if (rc)
6841 		return rc;
6842 
6843 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6844 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6845 
6846 		req->default_rx_ring_id =
6847 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6848 		req->default_cmpl_ring_id =
6849 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6850 		req->enables =
6851 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6852 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6853 		goto vnic_mru;
6854 	}
6855 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6856 	/* Only RSS support for now TBD: COS & LB */
6857 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6858 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6859 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6860 					   VNIC_CFG_REQ_ENABLES_MRU);
6861 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6862 		req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6863 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6864 					   VNIC_CFG_REQ_ENABLES_MRU);
6865 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6866 	} else {
6867 		req->rss_rule = cpu_to_le16(0xffff);
6868 	}
6869 
6870 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6871 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6872 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6873 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6874 	} else {
6875 		req->cos_rule = cpu_to_le16(0xffff);
6876 	}
6877 
6878 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6879 		ring = 0;
6880 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6881 		ring = vnic->vnic_id - 1;
6882 	else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6883 		ring = bp->rx_nr_rings - 1;
6884 
6885 	grp_idx = bp->rx_ring[ring].bnapi->index;
6886 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6887 	req->lb_rule = cpu_to_le16(0xffff);
6888 vnic_mru:
6889 	vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6890 	req->mru = cpu_to_le16(vnic->mru);
6891 
6892 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6893 #ifdef CONFIG_BNXT_SRIOV
6894 	if (BNXT_VF(bp))
6895 		def_vlan = bp->vf.vlan;
6896 #endif
6897 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6898 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6899 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6900 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6901 
6902 	return hwrm_req_send(bp, req);
6903 }
6904 
6905 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6906 				    struct bnxt_vnic_info *vnic)
6907 {
6908 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6909 		struct hwrm_vnic_free_input *req;
6910 
6911 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6912 			return;
6913 
6914 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6915 
6916 		hwrm_req_send(bp, req);
6917 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
6918 	}
6919 }
6920 
6921 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6922 {
6923 	u16 i;
6924 
6925 	for (i = 0; i < bp->nr_vnics; i++)
6926 		bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6927 }
6928 
6929 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6930 			 unsigned int start_rx_ring_idx,
6931 			 unsigned int nr_rings)
6932 {
6933 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6934 	struct hwrm_vnic_alloc_output *resp;
6935 	struct hwrm_vnic_alloc_input *req;
6936 	int rc;
6937 
6938 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6939 	if (rc)
6940 		return rc;
6941 
6942 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6943 		goto vnic_no_ring_grps;
6944 
6945 	/* map ring groups to this vnic */
6946 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6947 		grp_idx = bp->rx_ring[i].bnapi->index;
6948 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6949 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6950 				   j, nr_rings);
6951 			break;
6952 		}
6953 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6954 	}
6955 
6956 vnic_no_ring_grps:
6957 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6958 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6959 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6960 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6961 
6962 	resp = hwrm_req_hold(bp, req);
6963 	rc = hwrm_req_send(bp, req);
6964 	if (!rc)
6965 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6966 	hwrm_req_drop(bp, req);
6967 	return rc;
6968 }
6969 
6970 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6971 {
6972 	struct hwrm_vnic_qcaps_output *resp;
6973 	struct hwrm_vnic_qcaps_input *req;
6974 	int rc;
6975 
6976 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6977 	bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6978 	bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6979 	if (bp->hwrm_spec_code < 0x10600)
6980 		return 0;
6981 
6982 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6983 	if (rc)
6984 		return rc;
6985 
6986 	resp = hwrm_req_hold(bp, req);
6987 	rc = hwrm_req_send(bp, req);
6988 	if (!rc) {
6989 		u32 flags = le32_to_cpu(resp->flags);
6990 
6991 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6992 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6993 			bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6994 		if (flags &
6995 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6996 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6997 
6998 		/* Older P5 fw before EXT_HW_STATS support did not set
6999 		 * VLAN_STRIP_CAP properly.
7000 		 */
7001 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7002 		    (BNXT_CHIP_P5(bp) &&
7003 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7004 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7005 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7006 			bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7007 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7008 			bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7009 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7010 		if (bp->max_tpa_v2) {
7011 			if (BNXT_CHIP_P5(bp))
7012 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7013 			else
7014 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7015 		}
7016 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7017 			bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7018 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7019 			bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7020 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7021 			bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7022 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7023 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7024 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7025 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7026 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7027 			bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7028 		if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7029 			bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7030 	}
7031 	hwrm_req_drop(bp, req);
7032 	return rc;
7033 }
7034 
7035 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7036 {
7037 	struct hwrm_ring_grp_alloc_output *resp;
7038 	struct hwrm_ring_grp_alloc_input *req;
7039 	int rc;
7040 	u16 i;
7041 
7042 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7043 		return 0;
7044 
7045 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7046 	if (rc)
7047 		return rc;
7048 
7049 	resp = hwrm_req_hold(bp, req);
7050 	for (i = 0; i < bp->rx_nr_rings; i++) {
7051 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7052 
7053 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7054 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7055 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7056 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7057 
7058 		rc = hwrm_req_send(bp, req);
7059 
7060 		if (rc)
7061 			break;
7062 
7063 		bp->grp_info[grp_idx].fw_grp_id =
7064 			le32_to_cpu(resp->ring_group_id);
7065 	}
7066 	hwrm_req_drop(bp, req);
7067 	return rc;
7068 }
7069 
7070 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7071 {
7072 	struct hwrm_ring_grp_free_input *req;
7073 	u16 i;
7074 
7075 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7076 		return;
7077 
7078 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7079 		return;
7080 
7081 	hwrm_req_hold(bp, req);
7082 	for (i = 0; i < bp->cp_nr_rings; i++) {
7083 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7084 			continue;
7085 		req->ring_group_id =
7086 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
7087 
7088 		hwrm_req_send(bp, req);
7089 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7090 	}
7091 	hwrm_req_drop(bp, req);
7092 }
7093 
7094 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7095 				       struct hwrm_ring_alloc_input *req,
7096 				       struct bnxt_rx_ring_info *rxr,
7097 				       struct bnxt_ring_struct *ring)
7098 {
7099 	struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7100 	u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7101 		      RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7102 
7103 	if (ring_type == HWRM_RING_ALLOC_AGG) {
7104 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7105 		req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7106 		req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7107 		enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7108 	} else {
7109 		req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7110 		if (NET_IP_ALIGN == 2)
7111 			req->flags =
7112 				cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7113 	}
7114 	req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7115 	req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7116 	req->enables |= cpu_to_le32(enables);
7117 }
7118 
7119 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7120 				    struct bnxt_rx_ring_info *rxr,
7121 				    struct bnxt_ring_struct *ring,
7122 				    u32 ring_type, u32 map_index)
7123 {
7124 	struct hwrm_ring_alloc_output *resp;
7125 	struct hwrm_ring_alloc_input *req;
7126 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7127 	struct bnxt_ring_grp_info *grp_info;
7128 	int rc, err = 0;
7129 	u16 ring_id;
7130 
7131 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7132 	if (rc)
7133 		goto exit;
7134 
7135 	req->enables = 0;
7136 	if (rmem->nr_pages > 1) {
7137 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7138 		/* Page size is in log2 units */
7139 		req->page_size = BNXT_PAGE_SHIFT;
7140 		req->page_tbl_depth = 1;
7141 	} else {
7142 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
7143 	}
7144 	req->fbo = 0;
7145 	/* Association of ring index with doorbell index and MSIX number */
7146 	req->logical_id = cpu_to_le16(map_index);
7147 
7148 	switch (ring_type) {
7149 	case HWRM_RING_ALLOC_TX: {
7150 		struct bnxt_tx_ring_info *txr;
7151 		u16 flags = 0;
7152 
7153 		txr = container_of(ring, struct bnxt_tx_ring_info,
7154 				   tx_ring_struct);
7155 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7156 		/* Association of transmit ring with completion ring */
7157 		grp_info = &bp->grp_info[ring->grp_idx];
7158 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7159 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7160 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7161 		req->queue_id = cpu_to_le16(ring->queue_id);
7162 		if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7163 			req->cmpl_coal_cnt =
7164 				RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7165 		if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7166 			flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7167 		req->flags = cpu_to_le16(flags);
7168 		break;
7169 	}
7170 	case HWRM_RING_ALLOC_RX:
7171 	case HWRM_RING_ALLOC_AGG:
7172 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7173 		req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7174 			      cpu_to_le32(bp->rx_ring_mask + 1) :
7175 			      cpu_to_le32(bp->rx_agg_ring_mask + 1);
7176 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7177 			bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7178 						   rxr, ring);
7179 		break;
7180 	case HWRM_RING_ALLOC_CMPL:
7181 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7182 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7183 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7184 			/* Association of cp ring with nq */
7185 			grp_info = &bp->grp_info[map_index];
7186 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7187 			req->cq_handle = cpu_to_le64(ring->handle);
7188 			req->enables |= cpu_to_le32(
7189 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7190 		} else {
7191 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7192 		}
7193 		break;
7194 	case HWRM_RING_ALLOC_NQ:
7195 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7196 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7197 		req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7198 		break;
7199 	default:
7200 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7201 			   ring_type);
7202 		return -EINVAL;
7203 	}
7204 
7205 	resp = hwrm_req_hold(bp, req);
7206 	rc = hwrm_req_send(bp, req);
7207 	err = le16_to_cpu(resp->error_code);
7208 	ring_id = le16_to_cpu(resp->ring_id);
7209 	hwrm_req_drop(bp, req);
7210 
7211 exit:
7212 	if (rc || err) {
7213 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7214 			   ring_type, rc, err);
7215 		return -EIO;
7216 	}
7217 	ring->fw_ring_id = ring_id;
7218 	return rc;
7219 }
7220 
7221 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7222 {
7223 	int rc;
7224 
7225 	if (BNXT_PF(bp)) {
7226 		struct hwrm_func_cfg_input *req;
7227 
7228 		rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7229 		if (rc)
7230 			return rc;
7231 
7232 		req->fid = cpu_to_le16(0xffff);
7233 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7234 		req->async_event_cr = cpu_to_le16(idx);
7235 		return hwrm_req_send(bp, req);
7236 	} else {
7237 		struct hwrm_func_vf_cfg_input *req;
7238 
7239 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7240 		if (rc)
7241 			return rc;
7242 
7243 		req->enables =
7244 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7245 		req->async_event_cr = cpu_to_le16(idx);
7246 		return hwrm_req_send(bp, req);
7247 	}
7248 }
7249 
7250 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7251 			     u32 ring_type)
7252 {
7253 	switch (ring_type) {
7254 	case HWRM_RING_ALLOC_TX:
7255 		db->db_ring_mask = bp->tx_ring_mask;
7256 		break;
7257 	case HWRM_RING_ALLOC_RX:
7258 		db->db_ring_mask = bp->rx_ring_mask;
7259 		break;
7260 	case HWRM_RING_ALLOC_AGG:
7261 		db->db_ring_mask = bp->rx_agg_ring_mask;
7262 		break;
7263 	case HWRM_RING_ALLOC_CMPL:
7264 	case HWRM_RING_ALLOC_NQ:
7265 		db->db_ring_mask = bp->cp_ring_mask;
7266 		break;
7267 	}
7268 	if (bp->flags & BNXT_FLAG_CHIP_P7) {
7269 		db->db_epoch_mask = db->db_ring_mask + 1;
7270 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7271 	}
7272 }
7273 
7274 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7275 			u32 map_idx, u32 xid)
7276 {
7277 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7278 		switch (ring_type) {
7279 		case HWRM_RING_ALLOC_TX:
7280 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7281 			break;
7282 		case HWRM_RING_ALLOC_RX:
7283 		case HWRM_RING_ALLOC_AGG:
7284 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7285 			break;
7286 		case HWRM_RING_ALLOC_CMPL:
7287 			db->db_key64 = DBR_PATH_L2;
7288 			break;
7289 		case HWRM_RING_ALLOC_NQ:
7290 			db->db_key64 = DBR_PATH_L2;
7291 			break;
7292 		}
7293 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
7294 
7295 		if (bp->flags & BNXT_FLAG_CHIP_P7)
7296 			db->db_key64 |= DBR_VALID;
7297 
7298 		db->doorbell = bp->bar1 + bp->db_offset;
7299 	} else {
7300 		db->doorbell = bp->bar1 + map_idx * 0x80;
7301 		switch (ring_type) {
7302 		case HWRM_RING_ALLOC_TX:
7303 			db->db_key32 = DB_KEY_TX;
7304 			break;
7305 		case HWRM_RING_ALLOC_RX:
7306 		case HWRM_RING_ALLOC_AGG:
7307 			db->db_key32 = DB_KEY_RX;
7308 			break;
7309 		case HWRM_RING_ALLOC_CMPL:
7310 			db->db_key32 = DB_KEY_CP;
7311 			break;
7312 		}
7313 	}
7314 	bnxt_set_db_mask(bp, db, ring_type);
7315 }
7316 
7317 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7318 				   struct bnxt_rx_ring_info *rxr)
7319 {
7320 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7321 	struct bnxt_napi *bnapi = rxr->bnapi;
7322 	u32 type = HWRM_RING_ALLOC_RX;
7323 	u32 map_idx = bnapi->index;
7324 	int rc;
7325 
7326 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7327 	if (rc)
7328 		return rc;
7329 
7330 	bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7331 	bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7332 
7333 	return 0;
7334 }
7335 
7336 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7337 				       struct bnxt_rx_ring_info *rxr)
7338 {
7339 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7340 	u32 type = HWRM_RING_ALLOC_AGG;
7341 	u32 grp_idx = ring->grp_idx;
7342 	u32 map_idx;
7343 	int rc;
7344 
7345 	map_idx = grp_idx + bp->rx_nr_rings;
7346 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7347 	if (rc)
7348 		return rc;
7349 
7350 	bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7351 		    ring->fw_ring_id);
7352 	bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7353 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7354 	bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7355 
7356 	return 0;
7357 }
7358 
7359 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7360 				      struct bnxt_cp_ring_info *cpr)
7361 {
7362 	const u32 type = HWRM_RING_ALLOC_CMPL;
7363 	struct bnxt_napi *bnapi = cpr->bnapi;
7364 	struct bnxt_ring_struct *ring;
7365 	u32 map_idx = bnapi->index;
7366 	int rc;
7367 
7368 	ring = &cpr->cp_ring_struct;
7369 	ring->handle = BNXT_SET_NQ_HDL(cpr);
7370 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7371 	if (rc)
7372 		return rc;
7373 	bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7374 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7375 	return 0;
7376 }
7377 
7378 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7379 				   struct bnxt_tx_ring_info *txr, u32 tx_idx)
7380 {
7381 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7382 	const u32 type = HWRM_RING_ALLOC_TX;
7383 	int rc;
7384 
7385 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7386 	if (rc)
7387 		return rc;
7388 	bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7389 	return 0;
7390 }
7391 
7392 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7393 {
7394 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7395 	int i, rc = 0;
7396 	u32 type;
7397 
7398 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7399 		type = HWRM_RING_ALLOC_NQ;
7400 	else
7401 		type = HWRM_RING_ALLOC_CMPL;
7402 	for (i = 0; i < bp->cp_nr_rings; i++) {
7403 		struct bnxt_napi *bnapi = bp->bnapi[i];
7404 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7405 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7406 		u32 map_idx = ring->map_idx;
7407 		unsigned int vector;
7408 
7409 		vector = bp->irq_tbl[map_idx].vector;
7410 		disable_irq_nosync(vector);
7411 		rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7412 		if (rc) {
7413 			enable_irq(vector);
7414 			goto err_out;
7415 		}
7416 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7417 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7418 		enable_irq(vector);
7419 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7420 
7421 		if (!i) {
7422 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7423 			if (rc)
7424 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7425 		}
7426 	}
7427 
7428 	for (i = 0; i < bp->tx_nr_rings; i++) {
7429 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7430 
7431 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7432 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7433 			if (rc)
7434 				goto err_out;
7435 		}
7436 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7437 		if (rc)
7438 			goto err_out;
7439 	}
7440 
7441 	for (i = 0; i < bp->rx_nr_rings; i++) {
7442 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7443 
7444 		rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7445 		if (rc)
7446 			goto err_out;
7447 		/* If we have agg rings, post agg buffers first. */
7448 		if (!agg_rings)
7449 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7450 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7451 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7452 			if (rc)
7453 				goto err_out;
7454 		}
7455 	}
7456 
7457 	if (agg_rings) {
7458 		for (i = 0; i < bp->rx_nr_rings; i++) {
7459 			rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7460 			if (rc)
7461 				goto err_out;
7462 		}
7463 	}
7464 err_out:
7465 	return rc;
7466 }
7467 
7468 static void bnxt_cancel_dim(struct bnxt *bp)
7469 {
7470 	int i;
7471 
7472 	/* DIM work is initialized in bnxt_enable_napi().  Proceed only
7473 	 * if NAPI is enabled.
7474 	 */
7475 	if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7476 		return;
7477 
7478 	/* Make sure NAPI sees that the VNIC is disabled */
7479 	synchronize_net();
7480 	for (i = 0; i < bp->rx_nr_rings; i++) {
7481 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7482 		struct bnxt_napi *bnapi = rxr->bnapi;
7483 
7484 		cancel_work_sync(&bnapi->cp_ring.dim.work);
7485 	}
7486 }
7487 
7488 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7489 				   struct bnxt_ring_struct *ring,
7490 				   u32 ring_type, int cmpl_ring_id)
7491 {
7492 	struct hwrm_ring_free_output *resp;
7493 	struct hwrm_ring_free_input *req;
7494 	u16 error_code = 0;
7495 	int rc;
7496 
7497 	if (BNXT_NO_FW_ACCESS(bp))
7498 		return 0;
7499 
7500 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7501 	if (rc)
7502 		goto exit;
7503 
7504 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7505 	req->ring_type = ring_type;
7506 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
7507 
7508 	resp = hwrm_req_hold(bp, req);
7509 	rc = hwrm_req_send(bp, req);
7510 	error_code = le16_to_cpu(resp->error_code);
7511 	hwrm_req_drop(bp, req);
7512 exit:
7513 	if (rc || error_code) {
7514 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7515 			   ring_type, rc, error_code);
7516 		return -EIO;
7517 	}
7518 	return 0;
7519 }
7520 
7521 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7522 				   struct bnxt_tx_ring_info *txr,
7523 				   bool close_path)
7524 {
7525 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7526 	u32 cmpl_ring_id;
7527 
7528 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7529 		return;
7530 
7531 	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7532 		       INVALID_HW_RING_ID;
7533 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7534 				cmpl_ring_id);
7535 	ring->fw_ring_id = INVALID_HW_RING_ID;
7536 }
7537 
7538 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7539 				   struct bnxt_rx_ring_info *rxr,
7540 				   bool close_path)
7541 {
7542 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7543 	u32 grp_idx = rxr->bnapi->index;
7544 	u32 cmpl_ring_id;
7545 
7546 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7547 		return;
7548 
7549 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7550 	hwrm_ring_free_send_msg(bp, ring,
7551 				RING_FREE_REQ_RING_TYPE_RX,
7552 				close_path ? cmpl_ring_id :
7553 				INVALID_HW_RING_ID);
7554 	ring->fw_ring_id = INVALID_HW_RING_ID;
7555 	bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7556 }
7557 
7558 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7559 				       struct bnxt_rx_ring_info *rxr,
7560 				       bool close_path)
7561 {
7562 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7563 	u32 grp_idx = rxr->bnapi->index;
7564 	u32 type, cmpl_ring_id;
7565 
7566 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7567 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7568 	else
7569 		type = RING_FREE_REQ_RING_TYPE_RX;
7570 
7571 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7572 		return;
7573 
7574 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7575 	hwrm_ring_free_send_msg(bp, ring, type,
7576 				close_path ? cmpl_ring_id :
7577 				INVALID_HW_RING_ID);
7578 	ring->fw_ring_id = INVALID_HW_RING_ID;
7579 	bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7580 }
7581 
7582 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7583 				   struct bnxt_cp_ring_info *cpr)
7584 {
7585 	struct bnxt_ring_struct *ring;
7586 
7587 	ring = &cpr->cp_ring_struct;
7588 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7589 		return;
7590 
7591 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7592 				INVALID_HW_RING_ID);
7593 	ring->fw_ring_id = INVALID_HW_RING_ID;
7594 }
7595 
7596 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7597 {
7598 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7599 	int i, size = ring->ring_mem.page_size;
7600 
7601 	cpr->cp_raw_cons = 0;
7602 	cpr->toggle = 0;
7603 
7604 	for (i = 0; i < bp->cp_nr_pages; i++)
7605 		if (cpr->cp_desc_ring[i])
7606 			memset(cpr->cp_desc_ring[i], 0, size);
7607 }
7608 
7609 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7610 {
7611 	u32 type;
7612 	int i;
7613 
7614 	if (!bp->bnapi)
7615 		return;
7616 
7617 	for (i = 0; i < bp->tx_nr_rings; i++)
7618 		bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7619 
7620 	bnxt_cancel_dim(bp);
7621 	for (i = 0; i < bp->rx_nr_rings; i++) {
7622 		bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7623 		bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7624 	}
7625 
7626 	/* The completion rings are about to be freed.  After that the
7627 	 * IRQ doorbell will not work anymore.  So we need to disable
7628 	 * IRQ here.
7629 	 */
7630 	bnxt_disable_int_sync(bp);
7631 
7632 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7633 		type = RING_FREE_REQ_RING_TYPE_NQ;
7634 	else
7635 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7636 	for (i = 0; i < bp->cp_nr_rings; i++) {
7637 		struct bnxt_napi *bnapi = bp->bnapi[i];
7638 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7639 		struct bnxt_ring_struct *ring;
7640 		int j;
7641 
7642 		for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7643 			bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7644 
7645 		ring = &cpr->cp_ring_struct;
7646 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7647 			hwrm_ring_free_send_msg(bp, ring, type,
7648 						INVALID_HW_RING_ID);
7649 			ring->fw_ring_id = INVALID_HW_RING_ID;
7650 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7651 		}
7652 	}
7653 }
7654 
7655 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7656 			     bool shared);
7657 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7658 			   bool shared);
7659 
7660 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7661 {
7662 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7663 	struct hwrm_func_qcfg_output *resp;
7664 	struct hwrm_func_qcfg_input *req;
7665 	int rc;
7666 
7667 	if (bp->hwrm_spec_code < 0x10601)
7668 		return 0;
7669 
7670 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7671 	if (rc)
7672 		return rc;
7673 
7674 	req->fid = cpu_to_le16(0xffff);
7675 	resp = hwrm_req_hold(bp, req);
7676 	rc = hwrm_req_send(bp, req);
7677 	if (rc) {
7678 		hwrm_req_drop(bp, req);
7679 		return rc;
7680 	}
7681 
7682 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7683 	if (BNXT_NEW_RM(bp)) {
7684 		u16 cp, stats;
7685 
7686 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7687 		hw_resc->resv_hw_ring_grps =
7688 			le32_to_cpu(resp->alloc_hw_ring_grps);
7689 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7690 		hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7691 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
7692 		stats = le16_to_cpu(resp->alloc_stat_ctx);
7693 		hw_resc->resv_irqs = cp;
7694 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7695 			int rx = hw_resc->resv_rx_rings;
7696 			int tx = hw_resc->resv_tx_rings;
7697 
7698 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
7699 				rx >>= 1;
7700 			if (cp < (rx + tx)) {
7701 				rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7702 				if (rc)
7703 					goto get_rings_exit;
7704 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
7705 					rx <<= 1;
7706 				hw_resc->resv_rx_rings = rx;
7707 				hw_resc->resv_tx_rings = tx;
7708 			}
7709 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7710 			hw_resc->resv_hw_ring_grps = rx;
7711 		}
7712 		hw_resc->resv_cp_rings = cp;
7713 		hw_resc->resv_stat_ctxs = stats;
7714 	}
7715 get_rings_exit:
7716 	hwrm_req_drop(bp, req);
7717 	return rc;
7718 }
7719 
7720 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7721 {
7722 	struct hwrm_func_qcfg_output *resp;
7723 	struct hwrm_func_qcfg_input *req;
7724 	int rc;
7725 
7726 	if (bp->hwrm_spec_code < 0x10601)
7727 		return 0;
7728 
7729 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7730 	if (rc)
7731 		return rc;
7732 
7733 	req->fid = cpu_to_le16(fid);
7734 	resp = hwrm_req_hold(bp, req);
7735 	rc = hwrm_req_send(bp, req);
7736 	if (!rc)
7737 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7738 
7739 	hwrm_req_drop(bp, req);
7740 	return rc;
7741 }
7742 
7743 static bool bnxt_rfs_supported(struct bnxt *bp);
7744 
7745 static struct hwrm_func_cfg_input *
7746 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7747 {
7748 	struct hwrm_func_cfg_input *req;
7749 	u32 enables = 0;
7750 
7751 	if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7752 		return NULL;
7753 
7754 	req->fid = cpu_to_le16(0xffff);
7755 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7756 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7757 	if (BNXT_NEW_RM(bp)) {
7758 		enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7759 		enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7760 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7761 			enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7762 			enables |= hwr->cp_p5 ?
7763 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7764 		} else {
7765 			enables |= hwr->cp ?
7766 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7767 			enables |= hwr->grp ?
7768 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7769 		}
7770 		enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7771 		enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7772 					  0;
7773 		req->num_rx_rings = cpu_to_le16(hwr->rx);
7774 		req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7775 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7776 			req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7777 			req->num_msix = cpu_to_le16(hwr->cp);
7778 		} else {
7779 			req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7780 			req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7781 		}
7782 		req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7783 		req->num_vnics = cpu_to_le16(hwr->vnic);
7784 	}
7785 	req->enables = cpu_to_le32(enables);
7786 	return req;
7787 }
7788 
7789 static struct hwrm_func_vf_cfg_input *
7790 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7791 {
7792 	struct hwrm_func_vf_cfg_input *req;
7793 	u32 enables = 0;
7794 
7795 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7796 		return NULL;
7797 
7798 	enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7799 	enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7800 			     FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7801 	enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7802 	enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7803 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7804 		enables |= hwr->cp_p5 ?
7805 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7806 	} else {
7807 		enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7808 		enables |= hwr->grp ?
7809 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7810 	}
7811 	enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7812 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7813 
7814 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7815 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7816 	req->num_rx_rings = cpu_to_le16(hwr->rx);
7817 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7818 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7819 		req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7820 	} else {
7821 		req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7822 		req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7823 	}
7824 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7825 	req->num_vnics = cpu_to_le16(hwr->vnic);
7826 
7827 	req->enables = cpu_to_le32(enables);
7828 	return req;
7829 }
7830 
7831 static int
7832 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7833 {
7834 	struct hwrm_func_cfg_input *req;
7835 	int rc;
7836 
7837 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7838 	if (!req)
7839 		return -ENOMEM;
7840 
7841 	if (!req->enables) {
7842 		hwrm_req_drop(bp, req);
7843 		return 0;
7844 	}
7845 
7846 	rc = hwrm_req_send(bp, req);
7847 	if (rc)
7848 		return rc;
7849 
7850 	if (bp->hwrm_spec_code < 0x10601)
7851 		bp->hw_resc.resv_tx_rings = hwr->tx;
7852 
7853 	return bnxt_hwrm_get_rings(bp);
7854 }
7855 
7856 static int
7857 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7858 {
7859 	struct hwrm_func_vf_cfg_input *req;
7860 	int rc;
7861 
7862 	if (!BNXT_NEW_RM(bp)) {
7863 		bp->hw_resc.resv_tx_rings = hwr->tx;
7864 		return 0;
7865 	}
7866 
7867 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7868 	if (!req)
7869 		return -ENOMEM;
7870 
7871 	rc = hwrm_req_send(bp, req);
7872 	if (rc)
7873 		return rc;
7874 
7875 	return bnxt_hwrm_get_rings(bp);
7876 }
7877 
7878 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7879 {
7880 	if (BNXT_PF(bp))
7881 		return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7882 	else
7883 		return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7884 }
7885 
7886 int bnxt_nq_rings_in_use(struct bnxt *bp)
7887 {
7888 	return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7889 }
7890 
7891 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7892 {
7893 	int cp;
7894 
7895 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7896 		return bnxt_nq_rings_in_use(bp);
7897 
7898 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
7899 	return cp;
7900 }
7901 
7902 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7903 {
7904 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7905 }
7906 
7907 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7908 {
7909 	if (!hwr->grp)
7910 		return 0;
7911 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7912 		int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7913 
7914 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7915 			rss_ctx *= hwr->vnic;
7916 		return rss_ctx;
7917 	}
7918 	if (BNXT_VF(bp))
7919 		return BNXT_VF_MAX_RSS_CTX;
7920 	if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7921 		return hwr->grp + 1;
7922 	return 1;
7923 }
7924 
7925 /* Check if a default RSS map needs to be setup.  This function is only
7926  * used on older firmware that does not require reserving RX rings.
7927  */
7928 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7929 {
7930 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7931 
7932 	/* The RSS map is valid for RX rings set to resv_rx_rings */
7933 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7934 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
7935 		if (!netif_is_rxfh_configured(bp->dev))
7936 			bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7937 	}
7938 }
7939 
7940 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7941 {
7942 	if (bp->flags & BNXT_FLAG_RFS) {
7943 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7944 			return 2 + bp->num_rss_ctx;
7945 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7946 			return rx_rings + 1;
7947 	}
7948 	return 1;
7949 }
7950 
7951 static void bnxt_get_total_resources(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7952 {
7953 	hwr->cp = bnxt_nq_rings_in_use(bp);
7954 	hwr->cp_p5 = 0;
7955 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7956 		hwr->cp_p5 = bnxt_cp_rings_in_use(bp);
7957 	hwr->tx = bp->tx_nr_rings;
7958 	hwr->rx = bp->rx_nr_rings;
7959 	hwr->grp = hwr->rx;
7960 	hwr->vnic = bnxt_get_total_vnics(bp, hwr->rx);
7961 	hwr->rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr);
7962 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7963 		hwr->rx <<= 1;
7964 	hwr->stat = bnxt_get_func_stat_ctxs(bp);
7965 }
7966 
7967 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7968 {
7969 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7970 	struct bnxt_hw_rings hwr;
7971 
7972 	bnxt_get_total_resources(bp, &hwr);
7973 
7974 	/* Old firmware does not need RX ring reservations but we still
7975 	 * need to setup a default RSS map when needed.  With new firmware
7976 	 * we go through RX ring reservations first and then set up the
7977 	 * RSS map for the successfully reserved RX rings when needed.
7978 	 */
7979 	if (!BNXT_NEW_RM(bp))
7980 		bnxt_check_rss_tbl_no_rmgr(bp);
7981 
7982 	if (hw_resc->resv_tx_rings != hwr.tx && bp->hwrm_spec_code >= 0x10601)
7983 		return true;
7984 
7985 	if (!BNXT_NEW_RM(bp))
7986 		return false;
7987 
7988 	if (hw_resc->resv_rx_rings != hwr.rx ||
7989 	    hw_resc->resv_vnics != hwr.vnic ||
7990 	    hw_resc->resv_stat_ctxs != hwr.stat ||
7991 	    hw_resc->resv_rsscos_ctxs != hwr.rss_ctx ||
7992 	    (hw_resc->resv_hw_ring_grps != hwr.grp &&
7993 	     !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7994 		return true;
7995 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7996 		if (hw_resc->resv_cp_rings != hwr.cp_p5)
7997 			return true;
7998 	} else if (hw_resc->resv_cp_rings != hwr.cp) {
7999 		return true;
8000 	}
8001 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
8002 	    hw_resc->resv_irqs != hwr.cp)
8003 		return true;
8004 	return false;
8005 }
8006 
8007 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8008 {
8009 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8010 
8011 	hwr->tx = hw_resc->resv_tx_rings;
8012 	if (BNXT_NEW_RM(bp)) {
8013 		hwr->rx = hw_resc->resv_rx_rings;
8014 		hwr->cp = hw_resc->resv_irqs;
8015 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8016 			hwr->cp_p5 = hw_resc->resv_cp_rings;
8017 		hwr->grp = hw_resc->resv_hw_ring_grps;
8018 		hwr->vnic = hw_resc->resv_vnics;
8019 		hwr->stat = hw_resc->resv_stat_ctxs;
8020 		hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8021 	}
8022 }
8023 
8024 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8025 {
8026 	return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8027 	       hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8028 }
8029 
8030 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8031 
8032 static int __bnxt_reserve_rings(struct bnxt *bp)
8033 {
8034 	struct bnxt_hw_rings hwr = {0};
8035 	int rx_rings, old_rx_rings, rc;
8036 	int cp = bp->cp_nr_rings;
8037 	int ulp_msix = 0;
8038 	bool sh = false;
8039 	int tx_cp;
8040 
8041 	if (!bnxt_need_reserve_rings(bp))
8042 		return 0;
8043 
8044 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
8045 		ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8046 		if (!ulp_msix)
8047 			bnxt_set_ulp_stat_ctxs(bp, 0);
8048 		else
8049 			bnxt_set_dflt_ulp_stat_ctxs(bp);
8050 
8051 		if (ulp_msix > bp->ulp_num_msix_want)
8052 			ulp_msix = bp->ulp_num_msix_want;
8053 		hwr.cp = cp + ulp_msix;
8054 	} else {
8055 		hwr.cp = bnxt_nq_rings_in_use(bp);
8056 	}
8057 
8058 	hwr.tx = bp->tx_nr_rings;
8059 	hwr.rx = bp->rx_nr_rings;
8060 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8061 		sh = true;
8062 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8063 		hwr.cp_p5 = hwr.rx + hwr.tx;
8064 
8065 	hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8066 
8067 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8068 		hwr.rx <<= 1;
8069 	hwr.grp = bp->rx_nr_rings;
8070 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8071 	hwr.stat = bnxt_get_func_stat_ctxs(bp);
8072 	old_rx_rings = bp->hw_resc.resv_rx_rings;
8073 
8074 	rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8075 	if (rc)
8076 		return rc;
8077 
8078 	bnxt_copy_reserved_rings(bp, &hwr);
8079 
8080 	rx_rings = hwr.rx;
8081 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8082 		if (hwr.rx >= 2) {
8083 			rx_rings = hwr.rx >> 1;
8084 		} else {
8085 			if (netif_running(bp->dev))
8086 				return -ENOMEM;
8087 
8088 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8089 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8090 			bp->dev->hw_features &= ~NETIF_F_LRO;
8091 			bp->dev->features &= ~NETIF_F_LRO;
8092 			bnxt_set_ring_params(bp);
8093 		}
8094 	}
8095 	rx_rings = min_t(int, rx_rings, hwr.grp);
8096 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8097 	if (bnxt_ulp_registered(bp->edev) &&
8098 	    hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8099 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8100 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
8101 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8102 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8103 		hwr.rx = rx_rings << 1;
8104 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8105 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8106 	if (hwr.tx != bp->tx_nr_rings) {
8107 		netdev_warn(bp->dev,
8108 			    "Able to reserve only %d out of %d requested TX rings\n",
8109 			    hwr.tx, bp->tx_nr_rings);
8110 	}
8111 	bp->tx_nr_rings = hwr.tx;
8112 
8113 	/* If we cannot reserve all the RX rings, reset the RSS map only
8114 	 * if absolutely necessary
8115 	 */
8116 	if (rx_rings != bp->rx_nr_rings) {
8117 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8118 			    rx_rings, bp->rx_nr_rings);
8119 		if (netif_is_rxfh_configured(bp->dev) &&
8120 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8121 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8122 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8123 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8124 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8125 		}
8126 	}
8127 	bp->rx_nr_rings = rx_rings;
8128 	bp->cp_nr_rings = hwr.cp;
8129 
8130 	/* Fall back if we cannot reserve enough HW RSS contexts */
8131 	if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8132 	    hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8133 		bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8134 
8135 	if (!bnxt_rings_ok(bp, &hwr))
8136 		return -ENOMEM;
8137 
8138 	if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8139 	    !netif_is_rxfh_configured(bp->dev))
8140 		bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8141 
8142 	if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8143 		int resv_msix, resv_ctx, ulp_ctxs;
8144 		struct bnxt_hw_resc *hw_resc;
8145 
8146 		hw_resc = &bp->hw_resc;
8147 		resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8148 		ulp_msix = min_t(int, resv_msix, ulp_msix);
8149 		bnxt_set_ulp_msix_num(bp, ulp_msix);
8150 		resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
8151 		ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8152 		bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8153 	}
8154 
8155 	return rc;
8156 }
8157 
8158 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8159 {
8160 	struct hwrm_func_vf_cfg_input *req;
8161 	u32 flags;
8162 
8163 	if (!BNXT_NEW_RM(bp))
8164 		return 0;
8165 
8166 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8167 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8168 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8169 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8170 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8171 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8172 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8173 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8174 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8175 
8176 	req->flags = cpu_to_le32(flags);
8177 	return hwrm_req_send_silent(bp, req);
8178 }
8179 
8180 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8181 {
8182 	struct hwrm_func_cfg_input *req;
8183 	u32 flags;
8184 
8185 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8186 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8187 	if (BNXT_NEW_RM(bp)) {
8188 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8189 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8190 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8191 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8192 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8193 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8194 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8195 		else
8196 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8197 	}
8198 
8199 	req->flags = cpu_to_le32(flags);
8200 	return hwrm_req_send_silent(bp, req);
8201 }
8202 
8203 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8204 {
8205 	if (bp->hwrm_spec_code < 0x10801)
8206 		return 0;
8207 
8208 	if (BNXT_PF(bp))
8209 		return bnxt_hwrm_check_pf_rings(bp, hwr);
8210 
8211 	return bnxt_hwrm_check_vf_rings(bp, hwr);
8212 }
8213 
8214 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8215 {
8216 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8217 	struct hwrm_ring_aggint_qcaps_output *resp;
8218 	struct hwrm_ring_aggint_qcaps_input *req;
8219 	int rc;
8220 
8221 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8222 	coal_cap->num_cmpl_dma_aggr_max = 63;
8223 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8224 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8225 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8226 	coal_cap->int_lat_tmr_min_max = 65535;
8227 	coal_cap->int_lat_tmr_max_max = 65535;
8228 	coal_cap->num_cmpl_aggr_int_max = 65535;
8229 	coal_cap->timer_units = 80;
8230 
8231 	if (bp->hwrm_spec_code < 0x10902)
8232 		return;
8233 
8234 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8235 		return;
8236 
8237 	resp = hwrm_req_hold(bp, req);
8238 	rc = hwrm_req_send_silent(bp, req);
8239 	if (!rc) {
8240 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8241 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8242 		coal_cap->num_cmpl_dma_aggr_max =
8243 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8244 		coal_cap->num_cmpl_dma_aggr_during_int_max =
8245 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8246 		coal_cap->cmpl_aggr_dma_tmr_max =
8247 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8248 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8249 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8250 		coal_cap->int_lat_tmr_min_max =
8251 			le16_to_cpu(resp->int_lat_tmr_min_max);
8252 		coal_cap->int_lat_tmr_max_max =
8253 			le16_to_cpu(resp->int_lat_tmr_max_max);
8254 		coal_cap->num_cmpl_aggr_int_max =
8255 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
8256 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8257 	}
8258 	hwrm_req_drop(bp, req);
8259 }
8260 
8261 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8262 {
8263 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8264 
8265 	return usec * 1000 / coal_cap->timer_units;
8266 }
8267 
8268 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8269 	struct bnxt_coal *hw_coal,
8270 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8271 {
8272 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8273 	u16 val, tmr, max, flags = hw_coal->flags;
8274 	u32 cmpl_params = coal_cap->cmpl_params;
8275 
8276 	max = hw_coal->bufs_per_record * 128;
8277 	if (hw_coal->budget)
8278 		max = hw_coal->bufs_per_record * hw_coal->budget;
8279 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8280 
8281 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8282 	req->num_cmpl_aggr_int = cpu_to_le16(val);
8283 
8284 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8285 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
8286 
8287 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8288 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
8289 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8290 
8291 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8292 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8293 	req->int_lat_tmr_max = cpu_to_le16(tmr);
8294 
8295 	/* min timer set to 1/2 of interrupt timer */
8296 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8297 		val = tmr / 2;
8298 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8299 		req->int_lat_tmr_min = cpu_to_le16(val);
8300 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8301 	}
8302 
8303 	/* buf timer set to 1/4 of interrupt timer */
8304 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8305 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8306 
8307 	if (cmpl_params &
8308 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8309 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8310 		val = clamp_t(u16, tmr, 1,
8311 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8312 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8313 		req->enables |=
8314 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8315 	}
8316 
8317 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8318 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8319 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8320 	req->flags = cpu_to_le16(flags);
8321 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8322 }
8323 
8324 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8325 				   struct bnxt_coal *hw_coal)
8326 {
8327 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8328 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8329 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8330 	u32 nq_params = coal_cap->nq_params;
8331 	u16 tmr;
8332 	int rc;
8333 
8334 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8335 		return 0;
8336 
8337 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8338 	if (rc)
8339 		return rc;
8340 
8341 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8342 	req->flags =
8343 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8344 
8345 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8346 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8347 	req->int_lat_tmr_min = cpu_to_le16(tmr);
8348 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8349 	return hwrm_req_send(bp, req);
8350 }
8351 
8352 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8353 {
8354 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8355 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8356 	struct bnxt_coal coal;
8357 	int rc;
8358 
8359 	/* Tick values in micro seconds.
8360 	 * 1 coal_buf x bufs_per_record = 1 completion record.
8361 	 */
8362 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8363 
8364 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8365 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8366 
8367 	if (!bnapi->rx_ring)
8368 		return -ENODEV;
8369 
8370 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8371 	if (rc)
8372 		return rc;
8373 
8374 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8375 
8376 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8377 
8378 	return hwrm_req_send(bp, req_rx);
8379 }
8380 
8381 static int
8382 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8383 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8384 {
8385 	u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8386 
8387 	req->ring_id = cpu_to_le16(ring_id);
8388 	return hwrm_req_send(bp, req);
8389 }
8390 
8391 static int
8392 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8393 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8394 {
8395 	struct bnxt_tx_ring_info *txr;
8396 	int i, rc;
8397 
8398 	bnxt_for_each_napi_tx(i, bnapi, txr) {
8399 		u16 ring_id;
8400 
8401 		ring_id = bnxt_cp_ring_for_tx(bp, txr);
8402 		req->ring_id = cpu_to_le16(ring_id);
8403 		rc = hwrm_req_send(bp, req);
8404 		if (rc)
8405 			return rc;
8406 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8407 			return 0;
8408 	}
8409 	return 0;
8410 }
8411 
8412 int bnxt_hwrm_set_coal(struct bnxt *bp)
8413 {
8414 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8415 	int i, rc;
8416 
8417 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8418 	if (rc)
8419 		return rc;
8420 
8421 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8422 	if (rc) {
8423 		hwrm_req_drop(bp, req_rx);
8424 		return rc;
8425 	}
8426 
8427 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8428 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8429 
8430 	hwrm_req_hold(bp, req_rx);
8431 	hwrm_req_hold(bp, req_tx);
8432 	for (i = 0; i < bp->cp_nr_rings; i++) {
8433 		struct bnxt_napi *bnapi = bp->bnapi[i];
8434 		struct bnxt_coal *hw_coal;
8435 
8436 		if (!bnapi->rx_ring)
8437 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8438 		else
8439 			rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8440 		if (rc)
8441 			break;
8442 
8443 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8444 			continue;
8445 
8446 		if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8447 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8448 			if (rc)
8449 				break;
8450 		}
8451 		if (bnapi->rx_ring)
8452 			hw_coal = &bp->rx_coal;
8453 		else
8454 			hw_coal = &bp->tx_coal;
8455 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8456 	}
8457 	hwrm_req_drop(bp, req_rx);
8458 	hwrm_req_drop(bp, req_tx);
8459 	return rc;
8460 }
8461 
8462 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8463 {
8464 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8465 	struct hwrm_stat_ctx_free_input *req;
8466 	int i;
8467 
8468 	if (!bp->bnapi)
8469 		return;
8470 
8471 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8472 		return;
8473 
8474 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8475 		return;
8476 	if (BNXT_FW_MAJ(bp) <= 20) {
8477 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8478 			hwrm_req_drop(bp, req);
8479 			return;
8480 		}
8481 		hwrm_req_hold(bp, req0);
8482 	}
8483 	hwrm_req_hold(bp, req);
8484 	for (i = 0; i < bp->cp_nr_rings; i++) {
8485 		struct bnxt_napi *bnapi = bp->bnapi[i];
8486 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8487 
8488 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8489 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8490 			if (req0) {
8491 				req0->stat_ctx_id = req->stat_ctx_id;
8492 				hwrm_req_send(bp, req0);
8493 			}
8494 			hwrm_req_send(bp, req);
8495 
8496 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8497 		}
8498 	}
8499 	hwrm_req_drop(bp, req);
8500 	if (req0)
8501 		hwrm_req_drop(bp, req0);
8502 }
8503 
8504 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8505 {
8506 	struct hwrm_stat_ctx_alloc_output *resp;
8507 	struct hwrm_stat_ctx_alloc_input *req;
8508 	int rc, i;
8509 
8510 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8511 		return 0;
8512 
8513 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8514 	if (rc)
8515 		return rc;
8516 
8517 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8518 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8519 
8520 	resp = hwrm_req_hold(bp, req);
8521 	for (i = 0; i < bp->cp_nr_rings; i++) {
8522 		struct bnxt_napi *bnapi = bp->bnapi[i];
8523 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8524 
8525 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8526 
8527 		rc = hwrm_req_send(bp, req);
8528 		if (rc)
8529 			break;
8530 
8531 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8532 
8533 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8534 	}
8535 	hwrm_req_drop(bp, req);
8536 	return rc;
8537 }
8538 
8539 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8540 {
8541 	struct hwrm_func_qcfg_output *resp;
8542 	struct hwrm_func_qcfg_input *req;
8543 	u16 flags;
8544 	int rc;
8545 
8546 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8547 	if (rc)
8548 		return rc;
8549 
8550 	req->fid = cpu_to_le16(0xffff);
8551 	resp = hwrm_req_hold(bp, req);
8552 	rc = hwrm_req_send(bp, req);
8553 	if (rc)
8554 		goto func_qcfg_exit;
8555 
8556 	flags = le16_to_cpu(resp->flags);
8557 #ifdef CONFIG_BNXT_SRIOV
8558 	if (BNXT_VF(bp)) {
8559 		struct bnxt_vf_info *vf = &bp->vf;
8560 
8561 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8562 		if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8563 			vf->flags |= BNXT_VF_TRUST;
8564 		else
8565 			vf->flags &= ~BNXT_VF_TRUST;
8566 	} else {
8567 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8568 	}
8569 #endif
8570 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8571 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8572 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8573 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8574 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8575 	}
8576 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8577 		bp->flags |= BNXT_FLAG_MULTI_HOST;
8578 
8579 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8580 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8581 
8582 	if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8583 		bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8584 	if (resp->roce_bidi_opt_mode &
8585 	    FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8586 		bp->cos0_cos1_shared = 1;
8587 	else
8588 		bp->cos0_cos1_shared = 0;
8589 
8590 	switch (resp->port_partition_type) {
8591 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8592 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8593 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8594 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8595 		bp->port_partition_type = resp->port_partition_type;
8596 		break;
8597 	}
8598 	if (bp->hwrm_spec_code < 0x10707 ||
8599 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8600 		bp->br_mode = BRIDGE_MODE_VEB;
8601 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8602 		bp->br_mode = BRIDGE_MODE_VEPA;
8603 	else
8604 		bp->br_mode = BRIDGE_MODE_UNDEF;
8605 
8606 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8607 	if (!bp->max_mtu)
8608 		bp->max_mtu = BNXT_MAX_MTU;
8609 
8610 	if (bp->db_size)
8611 		goto func_qcfg_exit;
8612 
8613 	bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8614 	if (BNXT_CHIP_P5(bp)) {
8615 		if (BNXT_PF(bp))
8616 			bp->db_offset = DB_PF_OFFSET_P5;
8617 		else
8618 			bp->db_offset = DB_VF_OFFSET_P5;
8619 	}
8620 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8621 				 1024);
8622 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8623 	    bp->db_size <= bp->db_offset)
8624 		bp->db_size = pci_resource_len(bp->pdev, 2);
8625 
8626 func_qcfg_exit:
8627 	hwrm_req_drop(bp, req);
8628 	return rc;
8629 }
8630 
8631 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8632 				      u8 init_val, u8 init_offset,
8633 				      bool init_mask_set)
8634 {
8635 	ctxm->init_value = init_val;
8636 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8637 	if (init_mask_set)
8638 		ctxm->init_offset = init_offset * 4;
8639 	else
8640 		ctxm->init_value = 0;
8641 }
8642 
8643 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8644 {
8645 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8646 	u16 type;
8647 
8648 	for (type = 0; type < ctx_max; type++) {
8649 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8650 		int n = 1;
8651 
8652 		if (!ctxm->max_entries || ctxm->pg_info)
8653 			continue;
8654 
8655 		if (ctxm->instance_bmap)
8656 			n = hweight32(ctxm->instance_bmap);
8657 		ctxm->pg_info = kzalloc_objs(*ctxm->pg_info, n);
8658 		if (!ctxm->pg_info)
8659 			return -ENOMEM;
8660 	}
8661 	return 0;
8662 }
8663 
8664 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8665 				  struct bnxt_ctx_mem_type *ctxm, bool force);
8666 
8667 #define BNXT_CTX_INIT_VALID(flags)	\
8668 	(!!((flags) &			\
8669 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8670 
8671 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8672 {
8673 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
8674 	struct hwrm_func_backing_store_qcaps_v2_input *req;
8675 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8676 	u16 type, next_type = 0;
8677 	int rc;
8678 
8679 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8680 	if (rc)
8681 		return rc;
8682 
8683 	if (!ctx) {
8684 		ctx = kzalloc_obj(*ctx);
8685 		if (!ctx)
8686 			return -ENOMEM;
8687 		bp->ctx = ctx;
8688 	}
8689 
8690 	resp = hwrm_req_hold(bp, req);
8691 
8692 	for (type = 0; type < BNXT_CTX_V2_MAX; type = next_type) {
8693 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8694 		u8 init_val, init_off, i;
8695 		u32 max_entries;
8696 		u16 entry_size;
8697 		__le32 *p;
8698 		u32 flags;
8699 
8700 		req->type = cpu_to_le16(type);
8701 		rc = hwrm_req_send(bp, req);
8702 		if (rc)
8703 			goto ctx_done;
8704 		flags = le32_to_cpu(resp->flags);
8705 		next_type = le16_to_cpu(resp->next_valid_type);
8706 		if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8707 			bnxt_free_one_ctx_mem(bp, ctxm, true);
8708 			continue;
8709 		}
8710 		entry_size = le16_to_cpu(resp->entry_size);
8711 		max_entries = le32_to_cpu(resp->max_num_entries);
8712 		if (ctxm->mem_valid) {
8713 			if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8714 			    ctxm->entry_size != entry_size ||
8715 			    ctxm->max_entries != max_entries)
8716 				bnxt_free_one_ctx_mem(bp, ctxm, true);
8717 			else
8718 				continue;
8719 		}
8720 		ctxm->type = type;
8721 		ctxm->entry_size = entry_size;
8722 		ctxm->flags = flags;
8723 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8724 		ctxm->entry_multiple = resp->entry_multiple;
8725 		ctxm->max_entries = max_entries;
8726 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8727 		init_val = resp->ctx_init_value;
8728 		init_off = resp->ctx_init_offset;
8729 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8730 					  BNXT_CTX_INIT_VALID(flags));
8731 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8732 					      BNXT_MAX_SPLIT_ENTRY);
8733 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8734 		     i++, p++)
8735 			ctxm->split[i] = le32_to_cpu(*p);
8736 	}
8737 	rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8738 
8739 ctx_done:
8740 	hwrm_req_drop(bp, req);
8741 	return rc;
8742 }
8743 
8744 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8745 {
8746 	struct hwrm_func_backing_store_qcaps_output *resp;
8747 	struct hwrm_func_backing_store_qcaps_input *req;
8748 	int rc;
8749 
8750 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8751 	    (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8752 		return 0;
8753 
8754 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8755 		return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8756 
8757 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8758 	if (rc)
8759 		return rc;
8760 
8761 	resp = hwrm_req_hold(bp, req);
8762 	rc = hwrm_req_send_silent(bp, req);
8763 	if (!rc) {
8764 		struct bnxt_ctx_mem_type *ctxm;
8765 		struct bnxt_ctx_mem_info *ctx;
8766 		u8 init_val, init_idx = 0;
8767 		u16 init_mask;
8768 
8769 		ctx = bp->ctx;
8770 		if (!ctx) {
8771 			ctx = kzalloc_obj(*ctx);
8772 			if (!ctx) {
8773 				rc = -ENOMEM;
8774 				goto ctx_err;
8775 			}
8776 			bp->ctx = ctx;
8777 		}
8778 		init_val = resp->ctx_kind_initializer;
8779 		init_mask = le16_to_cpu(resp->ctx_init_mask);
8780 
8781 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8782 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8783 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8784 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8785 		ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8786 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8787 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8788 					  (init_mask & (1 << init_idx++)) != 0);
8789 
8790 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8791 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8792 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8793 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8794 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8795 					  (init_mask & (1 << init_idx++)) != 0);
8796 
8797 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8798 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8799 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8800 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8801 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8802 					  (init_mask & (1 << init_idx++)) != 0);
8803 
8804 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8805 		ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8806 		ctxm->max_entries = ctxm->vnic_entries +
8807 			le16_to_cpu(resp->vnic_max_ring_table_entries);
8808 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8809 		bnxt_init_ctx_initializer(ctxm, init_val,
8810 					  resp->vnic_init_offset,
8811 					  (init_mask & (1 << init_idx++)) != 0);
8812 
8813 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8814 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8815 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8816 		bnxt_init_ctx_initializer(ctxm, init_val,
8817 					  resp->stat_init_offset,
8818 					  (init_mask & (1 << init_idx++)) != 0);
8819 
8820 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8821 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8822 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8823 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8824 		ctxm->entry_multiple = resp->tqm_entries_multiple;
8825 		if (!ctxm->entry_multiple)
8826 			ctxm->entry_multiple = 1;
8827 
8828 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8829 
8830 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8831 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8832 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8833 		ctxm->mrav_num_entries_units =
8834 			le16_to_cpu(resp->mrav_num_entries_units);
8835 		bnxt_init_ctx_initializer(ctxm, init_val,
8836 					  resp->mrav_init_offset,
8837 					  (init_mask & (1 << init_idx++)) != 0);
8838 
8839 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8840 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8841 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8842 
8843 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8844 		if (!ctx->tqm_fp_rings_count)
8845 			ctx->tqm_fp_rings_count = bp->max_q;
8846 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8847 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8848 
8849 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8850 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8851 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8852 
8853 		rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8854 	} else {
8855 		rc = 0;
8856 	}
8857 ctx_err:
8858 	hwrm_req_drop(bp, req);
8859 	return rc;
8860 }
8861 
8862 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8863 				  __le64 *pg_dir)
8864 {
8865 	if (!rmem->nr_pages)
8866 		return;
8867 
8868 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8869 	if (rmem->depth >= 1) {
8870 		if (rmem->depth == 2)
8871 			*pg_attr |= 2;
8872 		else
8873 			*pg_attr |= 1;
8874 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8875 	} else {
8876 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8877 	}
8878 }
8879 
8880 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
8881 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
8882 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
8883 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
8884 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
8885 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8886 
8887 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8888 {
8889 	struct hwrm_func_backing_store_cfg_input *req;
8890 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8891 	struct bnxt_ctx_pg_info *ctx_pg;
8892 	struct bnxt_ctx_mem_type *ctxm;
8893 	void **__req = (void **)&req;
8894 	u32 req_len = sizeof(*req);
8895 	__le32 *num_entries;
8896 	__le64 *pg_dir;
8897 	u32 flags = 0;
8898 	u8 *pg_attr;
8899 	u32 ena;
8900 	int rc;
8901 	int i;
8902 
8903 	if (!ctx)
8904 		return 0;
8905 
8906 	if (req_len > bp->hwrm_max_ext_req_len)
8907 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8908 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8909 	if (rc)
8910 		return rc;
8911 
8912 	req->enables = cpu_to_le32(enables);
8913 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8914 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8915 		ctx_pg = ctxm->pg_info;
8916 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8917 		req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8918 		req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8919 		req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8920 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8921 				      &req->qpc_pg_size_qpc_lvl,
8922 				      &req->qpc_page_dir);
8923 
8924 		if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8925 			req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8926 	}
8927 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8928 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8929 		ctx_pg = ctxm->pg_info;
8930 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8931 		req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8932 		req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8933 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8934 				      &req->srq_pg_size_srq_lvl,
8935 				      &req->srq_page_dir);
8936 	}
8937 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8938 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8939 		ctx_pg = ctxm->pg_info;
8940 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8941 		req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8942 		req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8943 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8944 				      &req->cq_pg_size_cq_lvl,
8945 				      &req->cq_page_dir);
8946 	}
8947 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8948 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8949 		ctx_pg = ctxm->pg_info;
8950 		req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8951 		req->vnic_num_ring_table_entries =
8952 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8953 		req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8954 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8955 				      &req->vnic_pg_size_vnic_lvl,
8956 				      &req->vnic_page_dir);
8957 	}
8958 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8959 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8960 		ctx_pg = ctxm->pg_info;
8961 		req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8962 		req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8963 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8964 				      &req->stat_pg_size_stat_lvl,
8965 				      &req->stat_page_dir);
8966 	}
8967 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8968 		u32 units;
8969 
8970 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8971 		ctx_pg = ctxm->pg_info;
8972 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8973 		units = ctxm->mrav_num_entries_units;
8974 		if (units) {
8975 			u32 num_mr, num_ah = ctxm->mrav_av_entries;
8976 			u32 entries;
8977 
8978 			num_mr = ctx_pg->entries - num_ah;
8979 			entries = ((num_mr / units) << 16) | (num_ah / units);
8980 			req->mrav_num_entries = cpu_to_le32(entries);
8981 			flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8982 		}
8983 		req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8984 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8985 				      &req->mrav_pg_size_mrav_lvl,
8986 				      &req->mrav_page_dir);
8987 	}
8988 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8989 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8990 		ctx_pg = ctxm->pg_info;
8991 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8992 		req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8993 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8994 				      &req->tim_pg_size_tim_lvl,
8995 				      &req->tim_page_dir);
8996 	}
8997 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8998 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
8999 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
9000 	     pg_dir = &req->tqm_sp_page_dir,
9001 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
9002 	     ctx_pg = ctxm->pg_info;
9003 	     i < BNXT_MAX_TQM_RINGS;
9004 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
9005 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
9006 		if (!(enables & ena))
9007 			continue;
9008 
9009 		req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
9010 		*num_entries = cpu_to_le32(ctx_pg->entries);
9011 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
9012 	}
9013 	req->flags = cpu_to_le32(flags);
9014 	return hwrm_req_send(bp, req);
9015 }
9016 
9017 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9018 				  struct bnxt_ctx_pg_info *ctx_pg)
9019 {
9020 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9021 
9022 	rmem->page_size = BNXT_PAGE_SIZE;
9023 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
9024 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
9025 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9026 	if (rmem->depth >= 1)
9027 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9028 	return bnxt_alloc_ring(bp, rmem);
9029 }
9030 
9031 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9032 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9033 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
9034 {
9035 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9036 	int rc;
9037 
9038 	if (!mem_size)
9039 		return -EINVAL;
9040 
9041 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9042 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9043 		ctx_pg->nr_pages = 0;
9044 		return -EINVAL;
9045 	}
9046 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9047 		int nr_tbls, i;
9048 
9049 		rmem->depth = 2;
9050 		ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
9051 		if (!ctx_pg->ctx_pg_tbl)
9052 			return -ENOMEM;
9053 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9054 		rmem->nr_pages = nr_tbls;
9055 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9056 		if (rc)
9057 			return rc;
9058 		for (i = 0; i < nr_tbls; i++) {
9059 			struct bnxt_ctx_pg_info *pg_tbl;
9060 
9061 			pg_tbl = kzalloc_obj(*pg_tbl);
9062 			if (!pg_tbl)
9063 				return -ENOMEM;
9064 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9065 			rmem = &pg_tbl->ring_mem;
9066 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9067 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9068 			rmem->depth = 1;
9069 			rmem->nr_pages = MAX_CTX_PAGES;
9070 			rmem->ctx_mem = ctxm;
9071 			if (i == (nr_tbls - 1)) {
9072 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9073 
9074 				if (rem)
9075 					rmem->nr_pages = rem;
9076 			}
9077 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9078 			if (rc)
9079 				break;
9080 		}
9081 	} else {
9082 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9083 		if (rmem->nr_pages > 1 || depth)
9084 			rmem->depth = 1;
9085 		rmem->ctx_mem = ctxm;
9086 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9087 	}
9088 	return rc;
9089 }
9090 
9091 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9092 				    struct bnxt_ctx_pg_info *ctx_pg,
9093 				    void *buf, size_t offset, size_t head,
9094 				    size_t tail)
9095 {
9096 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9097 	size_t nr_pages = ctx_pg->nr_pages;
9098 	int page_size = rmem->page_size;
9099 	size_t len = 0, total_len = 0;
9100 	u16 depth = rmem->depth;
9101 
9102 	tail %= nr_pages * page_size;
9103 	do {
9104 		if (depth > 1) {
9105 			int i = head / (page_size * MAX_CTX_PAGES);
9106 			struct bnxt_ctx_pg_info *pg_tbl;
9107 
9108 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9109 			rmem = &pg_tbl->ring_mem;
9110 		}
9111 		len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9112 		head += len;
9113 		offset += len;
9114 		total_len += len;
9115 		if (head >= nr_pages * page_size)
9116 			head = 0;
9117 	} while (head != tail);
9118 	return total_len;
9119 }
9120 
9121 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9122 				  struct bnxt_ctx_pg_info *ctx_pg)
9123 {
9124 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9125 
9126 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9127 	    ctx_pg->ctx_pg_tbl) {
9128 		int i, nr_tbls = rmem->nr_pages;
9129 
9130 		for (i = 0; i < nr_tbls; i++) {
9131 			struct bnxt_ctx_pg_info *pg_tbl;
9132 			struct bnxt_ring_mem_info *rmem2;
9133 
9134 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9135 			if (!pg_tbl)
9136 				continue;
9137 			rmem2 = &pg_tbl->ring_mem;
9138 			bnxt_free_ring(bp, rmem2);
9139 			ctx_pg->ctx_pg_arr[i] = NULL;
9140 			kfree(pg_tbl);
9141 			ctx_pg->ctx_pg_tbl[i] = NULL;
9142 		}
9143 		kfree(ctx_pg->ctx_pg_tbl);
9144 		ctx_pg->ctx_pg_tbl = NULL;
9145 	}
9146 	bnxt_free_ring(bp, rmem);
9147 	ctx_pg->nr_pages = 0;
9148 }
9149 
9150 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9151 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
9152 				   u8 pg_lvl)
9153 {
9154 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9155 	int i, rc = 0, n = 1;
9156 	u32 mem_size;
9157 
9158 	if (!ctxm->entry_size || !ctx_pg)
9159 		return -EINVAL;
9160 	if (ctxm->instance_bmap)
9161 		n = hweight32(ctxm->instance_bmap);
9162 	if (ctxm->entry_multiple)
9163 		entries = roundup(entries, ctxm->entry_multiple);
9164 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9165 	mem_size = entries * ctxm->entry_size;
9166 	for (i = 0; i < n && !rc; i++) {
9167 		ctx_pg[i].entries = entries;
9168 		rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9169 					    ctxm->init_value ? ctxm : NULL);
9170 	}
9171 	if (!rc)
9172 		ctxm->mem_valid = 1;
9173 	return rc;
9174 }
9175 
9176 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9177 					       struct bnxt_ctx_mem_type *ctxm,
9178 					       bool last)
9179 {
9180 	struct hwrm_func_backing_store_cfg_v2_input *req;
9181 	u32 instance_bmap = ctxm->instance_bmap;
9182 	int i, j, rc = 0, n = 1;
9183 	__le32 *p;
9184 
9185 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9186 		return 0;
9187 
9188 	if (instance_bmap)
9189 		n = hweight32(ctxm->instance_bmap);
9190 	else
9191 		instance_bmap = 1;
9192 
9193 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9194 	if (rc)
9195 		return rc;
9196 	hwrm_req_hold(bp, req);
9197 	req->type = cpu_to_le16(ctxm->type);
9198 	req->entry_size = cpu_to_le16(ctxm->entry_size);
9199 	if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9200 	    bnxt_bs_trace_avail(bp, ctxm->type)) {
9201 		struct bnxt_bs_trace_info *bs_trace;
9202 		u32 enables;
9203 
9204 		enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9205 		req->enables = cpu_to_le32(enables);
9206 		bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9207 		req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9208 	}
9209 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
9210 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9211 		p[i] = cpu_to_le32(ctxm->split[i]);
9212 	for (i = 0, j = 0; j < n && !rc; i++) {
9213 		struct bnxt_ctx_pg_info *ctx_pg;
9214 
9215 		if (!(instance_bmap & (1 << i)))
9216 			continue;
9217 		req->instance = cpu_to_le16(i);
9218 		ctx_pg = &ctxm->pg_info[j++];
9219 		if (!ctx_pg->entries)
9220 			continue;
9221 		req->num_entries = cpu_to_le32(ctx_pg->entries);
9222 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9223 				      &req->page_size_pbl_level,
9224 				      &req->page_dir);
9225 		if (last && j == n)
9226 			req->flags =
9227 				cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9228 		rc = hwrm_req_send(bp, req);
9229 	}
9230 	hwrm_req_drop(bp, req);
9231 	return rc;
9232 }
9233 
9234 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9235 {
9236 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9237 	struct bnxt_ctx_mem_type *ctxm;
9238 	u16 last_type = BNXT_CTX_INV;
9239 	int rc = 0;
9240 	u16 type;
9241 
9242 	for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9243 		ctxm = &ctx->ctx_arr[type];
9244 		if (!bnxt_bs_trace_avail(bp, type))
9245 			continue;
9246 		if (!ctxm->mem_valid) {
9247 			rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9248 						     ctxm->max_entries, 1);
9249 			if (rc) {
9250 				netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9251 					    type);
9252 				continue;
9253 			}
9254 			bnxt_bs_trace_init(bp, ctxm);
9255 		}
9256 		last_type = type;
9257 	}
9258 
9259 	if (last_type == BNXT_CTX_INV) {
9260 		for (type = 0; type < BNXT_CTX_MAX; type++) {
9261 			ctxm = &ctx->ctx_arr[type];
9262 			if (ctxm->mem_valid)
9263 				last_type = type;
9264 		}
9265 		if (last_type == BNXT_CTX_INV)
9266 			return 0;
9267 	}
9268 	ctx->ctx_arr[last_type].last = 1;
9269 
9270 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9271 		ctxm = &ctx->ctx_arr[type];
9272 
9273 		if (!ctxm->mem_valid)
9274 			continue;
9275 		rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9276 		if (rc)
9277 			return rc;
9278 	}
9279 	return 0;
9280 }
9281 
9282 /**
9283  * __bnxt_copy_ctx_mem - copy host context memory
9284  * @bp: The driver context
9285  * @ctxm: The pointer to the context memory type
9286  * @buf: The destination buffer or NULL to just obtain the length
9287  * @offset: The buffer offset to copy the data to
9288  * @head: The head offset of context memory to copy from
9289  * @tail: The tail offset (last byte + 1) of context memory to end the copy
9290  *
9291  * This function is called for debugging purposes to dump the host context
9292  * used by the chip.
9293  *
9294  * Return: Length of memory copied
9295  */
9296 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9297 				  struct bnxt_ctx_mem_type *ctxm, void *buf,
9298 				  size_t offset, size_t head, size_t tail)
9299 {
9300 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9301 	size_t len = 0, total_len = 0;
9302 	int i, n = 1;
9303 
9304 	if (!ctx_pg)
9305 		return 0;
9306 
9307 	if (ctxm->instance_bmap)
9308 		n = hweight32(ctxm->instance_bmap);
9309 	for (i = 0; i < n; i++) {
9310 		len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9311 					    tail);
9312 		offset += len;
9313 		total_len += len;
9314 	}
9315 	return total_len;
9316 }
9317 
9318 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9319 			 void *buf, size_t offset)
9320 {
9321 	size_t tail = ctxm->max_entries * ctxm->entry_size;
9322 
9323 	return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9324 }
9325 
9326 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9327 				  struct bnxt_ctx_mem_type *ctxm, bool force)
9328 {
9329 	struct bnxt_ctx_pg_info *ctx_pg;
9330 	int i, n = 1;
9331 
9332 	ctxm->last = 0;
9333 
9334 	if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9335 		return;
9336 
9337 	ctx_pg = ctxm->pg_info;
9338 	if (ctx_pg) {
9339 		if (ctxm->instance_bmap)
9340 			n = hweight32(ctxm->instance_bmap);
9341 		for (i = 0; i < n; i++)
9342 			bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9343 
9344 		kfree(ctx_pg);
9345 		ctxm->pg_info = NULL;
9346 		ctxm->mem_valid = 0;
9347 	}
9348 	memset(ctxm, 0, sizeof(*ctxm));
9349 }
9350 
9351 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9352 {
9353 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9354 	u16 type;
9355 
9356 	if (!ctx)
9357 		return;
9358 
9359 	for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9360 		bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9361 
9362 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9363 	if (force) {
9364 		kfree(ctx);
9365 		bp->ctx = NULL;
9366 	}
9367 }
9368 
9369 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9370 {
9371 	struct bnxt_ctx_mem_type *ctxm;
9372 	struct bnxt_ctx_mem_info *ctx;
9373 	u32 l2_qps, qp1_qps, max_qps;
9374 	u32 ena, entries_sp, entries;
9375 	u32 srqs, max_srqs, min;
9376 	u32 num_mr, num_ah;
9377 	u32 extra_srqs = 0;
9378 	u32 extra_qps = 0;
9379 	u32 fast_qpmd_qps;
9380 	u8 pg_lvl = 1;
9381 	int i, rc;
9382 
9383 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9384 	if (rc) {
9385 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9386 			   rc);
9387 		return rc;
9388 	}
9389 	ctx = bp->ctx;
9390 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9391 		return 0;
9392 
9393 	ena = 0;
9394 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9395 		goto skip_legacy;
9396 
9397 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9398 	l2_qps = ctxm->qp_l2_entries;
9399 	qp1_qps = ctxm->qp_qp1_entries;
9400 	fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9401 	max_qps = ctxm->max_entries;
9402 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9403 	srqs = ctxm->srq_l2_entries;
9404 	max_srqs = ctxm->max_entries;
9405 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9406 		pg_lvl = 2;
9407 		if (BNXT_SW_RES_LMT(bp)) {
9408 			extra_qps = max_qps - l2_qps - qp1_qps;
9409 			extra_srqs = max_srqs - srqs;
9410 		} else {
9411 			extra_qps = min_t(u32, 65536,
9412 					  max_qps - l2_qps - qp1_qps);
9413 			/* allocate extra qps if fw supports RoCE fast qp
9414 			 * destroy feature
9415 			 */
9416 			extra_qps += fast_qpmd_qps;
9417 			extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9418 		}
9419 		if (fast_qpmd_qps)
9420 			ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9421 	}
9422 
9423 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9424 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9425 				     pg_lvl);
9426 	if (rc)
9427 		return rc;
9428 
9429 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9430 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9431 	if (rc)
9432 		return rc;
9433 
9434 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9435 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9436 				     extra_qps * 2, pg_lvl);
9437 	if (rc)
9438 		return rc;
9439 
9440 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9441 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9442 	if (rc)
9443 		return rc;
9444 
9445 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9446 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9447 	if (rc)
9448 		return rc;
9449 
9450 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9451 		goto skip_rdma;
9452 
9453 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9454 	if (BNXT_SW_RES_LMT(bp) &&
9455 	    ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9456 		num_ah = ctxm->mrav_av_entries;
9457 		num_mr = ctxm->max_entries - num_ah;
9458 	} else {
9459 		/* 128K extra is needed to accommodate static AH context
9460 		 * allocation by f/w.
9461 		 */
9462 		num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9463 		num_ah = min_t(u32, num_mr, 1024 * 128);
9464 		ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9465 		if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9466 			ctxm->mrav_av_entries = num_ah;
9467 	}
9468 
9469 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9470 	if (rc)
9471 		return rc;
9472 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9473 
9474 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9475 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9476 	if (rc)
9477 		return rc;
9478 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9479 
9480 skip_rdma:
9481 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9482 	min = ctxm->min_entries;
9483 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9484 		     2 * (extra_qps + qp1_qps) + min;
9485 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9486 	if (rc)
9487 		return rc;
9488 
9489 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9490 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
9491 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9492 	if (rc)
9493 		return rc;
9494 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9495 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9496 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9497 
9498 skip_legacy:
9499 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9500 		rc = bnxt_backing_store_cfg_v2(bp);
9501 	else
9502 		rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9503 	if (rc) {
9504 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9505 			   rc);
9506 		return rc;
9507 	}
9508 	ctx->flags |= BNXT_CTX_FLAG_INITED;
9509 	return 0;
9510 }
9511 
9512 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9513 {
9514 	struct hwrm_dbg_crashdump_medium_cfg_input *req;
9515 	u16 page_attr;
9516 	int rc;
9517 
9518 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9519 		return 0;
9520 
9521 	rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9522 	if (rc)
9523 		return rc;
9524 
9525 	if (BNXT_PAGE_SIZE == 0x2000)
9526 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9527 	else if (BNXT_PAGE_SIZE == 0x10000)
9528 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9529 	else
9530 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9531 	req->pg_size_lvl = cpu_to_le16(page_attr |
9532 				       bp->fw_crash_mem->ring_mem.depth);
9533 	req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9534 	req->size = cpu_to_le32(bp->fw_crash_len);
9535 	req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9536 	return hwrm_req_send(bp, req);
9537 }
9538 
9539 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9540 {
9541 	if (bp->fw_crash_mem) {
9542 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9543 		kfree(bp->fw_crash_mem);
9544 		bp->fw_crash_mem = NULL;
9545 	}
9546 }
9547 
9548 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9549 {
9550 	u32 mem_size = 0;
9551 	int rc;
9552 
9553 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9554 		return 0;
9555 
9556 	rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9557 	if (rc)
9558 		return rc;
9559 
9560 	mem_size = round_up(mem_size, 4);
9561 
9562 	/* keep and use the existing pages */
9563 	if (bp->fw_crash_mem &&
9564 	    mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9565 		goto alloc_done;
9566 
9567 	if (bp->fw_crash_mem)
9568 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9569 	else
9570 		bp->fw_crash_mem = kzalloc_obj(*bp->fw_crash_mem);
9571 	if (!bp->fw_crash_mem)
9572 		return -ENOMEM;
9573 
9574 	rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9575 	if (rc) {
9576 		bnxt_free_crash_dump_mem(bp);
9577 		return rc;
9578 	}
9579 
9580 alloc_done:
9581 	bp->fw_crash_len = mem_size;
9582 	return 0;
9583 }
9584 
9585 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9586 {
9587 	struct hwrm_func_resource_qcaps_output *resp;
9588 	struct hwrm_func_resource_qcaps_input *req;
9589 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9590 	int rc;
9591 
9592 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9593 	if (rc)
9594 		return rc;
9595 
9596 	req->fid = cpu_to_le16(0xffff);
9597 	resp = hwrm_req_hold(bp, req);
9598 	rc = hwrm_req_send_silent(bp, req);
9599 	if (rc)
9600 		goto hwrm_func_resc_qcaps_exit;
9601 
9602 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9603 	if (!all)
9604 		goto hwrm_func_resc_qcaps_exit;
9605 
9606 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9607 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9608 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9609 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9610 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9611 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9612 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9613 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9614 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9615 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9616 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9617 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9618 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9619 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9620 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9621 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9622 
9623 	if (hw_resc->max_rsscos_ctxs >=
9624 	    hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9625 		bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9626 
9627 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9628 		u16 max_msix = le16_to_cpu(resp->max_msix);
9629 
9630 		hw_resc->max_nqs = max_msix;
9631 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9632 	}
9633 
9634 	if (BNXT_PF(bp)) {
9635 		struct bnxt_pf_info *pf = &bp->pf;
9636 
9637 		pf->vf_resv_strategy =
9638 			le16_to_cpu(resp->vf_reservation_strategy);
9639 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9640 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9641 	}
9642 hwrm_func_resc_qcaps_exit:
9643 	hwrm_req_drop(bp, req);
9644 	return rc;
9645 }
9646 
9647 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9648 {
9649 	struct hwrm_port_mac_ptp_qcfg_output *resp;
9650 	struct hwrm_port_mac_ptp_qcfg_input *req;
9651 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9652 	u8 flags;
9653 	int rc;
9654 
9655 	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9656 		rc = -ENODEV;
9657 		goto no_ptp;
9658 	}
9659 
9660 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9661 	if (rc)
9662 		goto no_ptp;
9663 
9664 	req->port_id = cpu_to_le16(bp->pf.port_id);
9665 	resp = hwrm_req_hold(bp, req);
9666 	rc = hwrm_req_send(bp, req);
9667 	if (rc)
9668 		goto exit;
9669 
9670 	flags = resp->flags;
9671 	if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9672 	    !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9673 		rc = -ENODEV;
9674 		goto exit;
9675 	}
9676 	if (!ptp) {
9677 		ptp = kzalloc_obj(*ptp);
9678 		if (!ptp) {
9679 			rc = -ENOMEM;
9680 			goto exit;
9681 		}
9682 		ptp->bp = bp;
9683 		bp->ptp_cfg = ptp;
9684 	}
9685 
9686 	if (flags &
9687 	    (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9688 	     PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9689 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9690 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9691 	} else if (BNXT_CHIP_P5(bp)) {
9692 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9693 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9694 	} else {
9695 		rc = -ENODEV;
9696 		goto exit;
9697 	}
9698 	ptp->rtc_configured =
9699 		(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9700 	rc = bnxt_ptp_init(bp);
9701 	if (rc)
9702 		netdev_warn(bp->dev, "PTP initialization failed.\n");
9703 exit:
9704 	hwrm_req_drop(bp, req);
9705 	if (!rc)
9706 		return 0;
9707 
9708 no_ptp:
9709 	bnxt_ptp_clear(bp);
9710 	kfree(ptp);
9711 	bp->ptp_cfg = NULL;
9712 	return rc;
9713 }
9714 
9715 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9716 {
9717 	u32 flags, flags_ext, flags_ext2, flags_ext3;
9718 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9719 	struct hwrm_func_qcaps_output *resp;
9720 	struct hwrm_func_qcaps_input *req;
9721 	int rc;
9722 
9723 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9724 	if (rc)
9725 		return rc;
9726 
9727 	req->fid = cpu_to_le16(0xffff);
9728 	resp = hwrm_req_hold(bp, req);
9729 	rc = hwrm_req_send(bp, req);
9730 	if (rc)
9731 		goto hwrm_func_qcaps_exit;
9732 
9733 	flags = le32_to_cpu(resp->flags);
9734 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9735 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9736 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9737 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9738 	if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9739 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9740 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9741 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9742 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9743 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9744 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9745 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9746 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9747 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9748 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9749 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9750 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9751 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9752 	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9753 		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9754 
9755 	flags_ext = le32_to_cpu(resp->flags_ext);
9756 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9757 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9758 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9759 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9760 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9761 		bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9762 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9763 		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9764 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9765 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9766 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9767 		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9768 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9769 		bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9770 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9771 		bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9772 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9773 		bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9774 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9775 		bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9776 
9777 	flags_ext2 = le32_to_cpu(resp->flags_ext2);
9778 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9779 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9780 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9781 		bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9782 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9783 		bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9784 	if (flags_ext2 &
9785 	    FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9786 		bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9787 	if (BNXT_PF(bp) &&
9788 	    (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9789 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9790 
9791 	flags_ext3 = le32_to_cpu(resp->flags_ext3);
9792 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9793 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9794 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9795 		bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9796 
9797 	bp->tx_push_thresh = 0;
9798 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9799 	    BNXT_FW_MAJ(bp) > 217)
9800 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9801 
9802 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9803 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9804 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9805 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9806 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9807 	if (!hw_resc->max_hw_ring_grps)
9808 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9809 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9810 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9811 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9812 
9813 	hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9814 	hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9815 	hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9816 	hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9817 	hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9818 	hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9819 
9820 	if (BNXT_PF(bp)) {
9821 		struct bnxt_pf_info *pf = &bp->pf;
9822 
9823 		pf->fw_fid = le16_to_cpu(resp->fid);
9824 		pf->port_id = le16_to_cpu(resp->port_id);
9825 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9826 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9827 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
9828 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
9829 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9830 			bp->flags |= BNXT_FLAG_WOL_CAP;
9831 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9832 			bp->fw_cap |= BNXT_FW_CAP_PTP;
9833 		} else {
9834 			bnxt_ptp_clear(bp);
9835 			kfree(bp->ptp_cfg);
9836 			bp->ptp_cfg = NULL;
9837 		}
9838 	} else {
9839 #ifdef CONFIG_BNXT_SRIOV
9840 		struct bnxt_vf_info *vf = &bp->vf;
9841 
9842 		vf->fw_fid = le16_to_cpu(resp->fid);
9843 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9844 #endif
9845 	}
9846 	bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9847 
9848 hwrm_func_qcaps_exit:
9849 	hwrm_req_drop(bp, req);
9850 	return rc;
9851 }
9852 
9853 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9854 {
9855 	struct hwrm_dbg_qcaps_output *resp;
9856 	struct hwrm_dbg_qcaps_input *req;
9857 	int rc;
9858 
9859 	bp->fw_dbg_cap = 0;
9860 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9861 		return;
9862 
9863 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9864 	if (rc)
9865 		return;
9866 
9867 	req->fid = cpu_to_le16(0xffff);
9868 	resp = hwrm_req_hold(bp, req);
9869 	rc = hwrm_req_send(bp, req);
9870 	if (rc)
9871 		goto hwrm_dbg_qcaps_exit;
9872 
9873 	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9874 
9875 hwrm_dbg_qcaps_exit:
9876 	hwrm_req_drop(bp, req);
9877 }
9878 
9879 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9880 
9881 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9882 {
9883 	int rc;
9884 
9885 	rc = __bnxt_hwrm_func_qcaps(bp);
9886 	if (rc)
9887 		return rc;
9888 
9889 	bnxt_hwrm_dbg_qcaps(bp);
9890 
9891 	rc = bnxt_hwrm_queue_qportcfg(bp);
9892 	if (rc) {
9893 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9894 		return rc;
9895 	}
9896 	if (bp->hwrm_spec_code >= 0x10803) {
9897 		rc = bnxt_alloc_ctx_mem(bp);
9898 		if (rc)
9899 			return rc;
9900 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9901 		if (!rc)
9902 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9903 	}
9904 	return 0;
9905 }
9906 
9907 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9908 {
9909 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9910 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9911 	u32 flags;
9912 	int rc;
9913 
9914 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9915 		return 0;
9916 
9917 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9918 	if (rc)
9919 		return rc;
9920 
9921 	resp = hwrm_req_hold(bp, req);
9922 	rc = hwrm_req_send(bp, req);
9923 	if (rc)
9924 		goto hwrm_cfa_adv_qcaps_exit;
9925 
9926 	flags = le32_to_cpu(resp->flags);
9927 	if (flags &
9928 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9929 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9930 
9931 	if (flags &
9932 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9933 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9934 
9935 	if (flags &
9936 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9937 		bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9938 
9939 hwrm_cfa_adv_qcaps_exit:
9940 	hwrm_req_drop(bp, req);
9941 	return rc;
9942 }
9943 
9944 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9945 {
9946 	if (bp->fw_health)
9947 		return 0;
9948 
9949 	bp->fw_health = kzalloc_obj(*bp->fw_health);
9950 	if (!bp->fw_health)
9951 		return -ENOMEM;
9952 
9953 	mutex_init(&bp->fw_health->lock);
9954 	return 0;
9955 }
9956 
9957 static int bnxt_alloc_fw_health(struct bnxt *bp)
9958 {
9959 	int rc;
9960 
9961 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9962 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9963 		return 0;
9964 
9965 	rc = __bnxt_alloc_fw_health(bp);
9966 	if (rc) {
9967 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9968 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9969 		return rc;
9970 	}
9971 
9972 	return 0;
9973 }
9974 
9975 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9976 {
9977 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9978 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9979 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
9980 }
9981 
9982 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9983 {
9984 	struct bnxt_fw_health *fw_health = bp->fw_health;
9985 	u32 reg_type;
9986 
9987 	if (!fw_health)
9988 		return;
9989 
9990 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9991 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9992 		fw_health->status_reliable = false;
9993 
9994 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9995 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9996 		fw_health->resets_reliable = false;
9997 }
9998 
9999 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
10000 {
10001 	void __iomem *hs;
10002 	u32 status_loc;
10003 	u32 reg_type;
10004 	u32 sig;
10005 
10006 	if (bp->fw_health)
10007 		bp->fw_health->status_reliable = false;
10008 
10009 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
10010 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
10011 
10012 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
10013 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
10014 		if (!bp->chip_num) {
10015 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10016 			bp->chip_num = readl(bp->bar0 +
10017 					     BNXT_FW_HEALTH_WIN_BASE +
10018 					     BNXT_GRC_REG_CHIP_NUM);
10019 		}
10020 		if (!BNXT_CHIP_P5_PLUS(bp))
10021 			return;
10022 
10023 		status_loc = BNXT_GRC_REG_STATUS_P5 |
10024 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
10025 	} else {
10026 		status_loc = readl(hs + offsetof(struct hcomm_status,
10027 						 fw_status_loc));
10028 	}
10029 
10030 	if (__bnxt_alloc_fw_health(bp)) {
10031 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
10032 		return;
10033 	}
10034 
10035 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10036 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10037 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10038 		__bnxt_map_fw_health_reg(bp, status_loc);
10039 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10040 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
10041 	}
10042 
10043 	bp->fw_health->status_reliable = true;
10044 }
10045 
10046 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10047 {
10048 	struct bnxt_fw_health *fw_health = bp->fw_health;
10049 	u32 reg_base = 0xffffffff;
10050 	int i;
10051 
10052 	bp->fw_health->status_reliable = false;
10053 	bp->fw_health->resets_reliable = false;
10054 	/* Only pre-map the monitoring GRC registers using window 3 */
10055 	for (i = 0; i < 4; i++) {
10056 		u32 reg = fw_health->regs[i];
10057 
10058 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10059 			continue;
10060 		if (reg_base == 0xffffffff)
10061 			reg_base = reg & BNXT_GRC_BASE_MASK;
10062 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10063 			return -ERANGE;
10064 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10065 	}
10066 	bp->fw_health->status_reliable = true;
10067 	bp->fw_health->resets_reliable = true;
10068 	if (reg_base == 0xffffffff)
10069 		return 0;
10070 
10071 	__bnxt_map_fw_health_reg(bp, reg_base);
10072 	return 0;
10073 }
10074 
10075 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10076 {
10077 	if (!bp->fw_health)
10078 		return;
10079 
10080 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10081 		bp->fw_health->status_reliable = true;
10082 		bp->fw_health->resets_reliable = true;
10083 	} else {
10084 		bnxt_try_map_fw_health_reg(bp);
10085 	}
10086 }
10087 
10088 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10089 {
10090 	struct bnxt_fw_health *fw_health = bp->fw_health;
10091 	struct hwrm_error_recovery_qcfg_output *resp;
10092 	struct hwrm_error_recovery_qcfg_input *req;
10093 	int rc, i;
10094 
10095 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10096 		return 0;
10097 
10098 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10099 	if (rc)
10100 		return rc;
10101 
10102 	resp = hwrm_req_hold(bp, req);
10103 	rc = hwrm_req_send(bp, req);
10104 	if (rc)
10105 		goto err_recovery_out;
10106 	fw_health->flags = le32_to_cpu(resp->flags);
10107 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10108 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10109 		rc = -EINVAL;
10110 		goto err_recovery_out;
10111 	}
10112 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10113 	fw_health->master_func_wait_dsecs =
10114 		le32_to_cpu(resp->master_func_wait_period);
10115 	fw_health->normal_func_wait_dsecs =
10116 		le32_to_cpu(resp->normal_func_wait_period);
10117 	fw_health->post_reset_wait_dsecs =
10118 		le32_to_cpu(resp->master_func_wait_period_after_reset);
10119 	fw_health->post_reset_max_wait_dsecs =
10120 		le32_to_cpu(resp->max_bailout_time_after_reset);
10121 	fw_health->regs[BNXT_FW_HEALTH_REG] =
10122 		le32_to_cpu(resp->fw_health_status_reg);
10123 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10124 		le32_to_cpu(resp->fw_heartbeat_reg);
10125 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10126 		le32_to_cpu(resp->fw_reset_cnt_reg);
10127 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10128 		le32_to_cpu(resp->reset_inprogress_reg);
10129 	fw_health->fw_reset_inprog_reg_mask =
10130 		le32_to_cpu(resp->reset_inprogress_reg_mask);
10131 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10132 	if (fw_health->fw_reset_seq_cnt >= 16) {
10133 		rc = -EINVAL;
10134 		goto err_recovery_out;
10135 	}
10136 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10137 		fw_health->fw_reset_seq_regs[i] =
10138 			le32_to_cpu(resp->reset_reg[i]);
10139 		fw_health->fw_reset_seq_vals[i] =
10140 			le32_to_cpu(resp->reset_reg_val[i]);
10141 		fw_health->fw_reset_seq_delay_msec[i] =
10142 			resp->delay_after_reset[i];
10143 	}
10144 err_recovery_out:
10145 	hwrm_req_drop(bp, req);
10146 	if (!rc)
10147 		rc = bnxt_map_fw_health_regs(bp);
10148 	if (rc)
10149 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10150 	return rc;
10151 }
10152 
10153 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10154 {
10155 	struct hwrm_func_reset_input *req;
10156 	int rc;
10157 
10158 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10159 	if (rc)
10160 		return rc;
10161 
10162 	req->enables = 0;
10163 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10164 	return hwrm_req_send(bp, req);
10165 }
10166 
10167 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10168 {
10169 	struct hwrm_nvm_get_dev_info_output nvm_info;
10170 
10171 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10172 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10173 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10174 			 nvm_info.nvm_cfg_ver_upd);
10175 }
10176 
10177 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10178 {
10179 	struct hwrm_queue_qportcfg_output *resp;
10180 	struct hwrm_queue_qportcfg_input *req;
10181 	u8 i, j, *qptr;
10182 	bool no_rdma;
10183 	int rc = 0;
10184 
10185 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10186 	if (rc)
10187 		return rc;
10188 
10189 	resp = hwrm_req_hold(bp, req);
10190 	rc = hwrm_req_send(bp, req);
10191 	if (rc)
10192 		goto qportcfg_exit;
10193 
10194 	if (!resp->max_configurable_queues) {
10195 		rc = -EINVAL;
10196 		goto qportcfg_exit;
10197 	}
10198 	bp->max_tc = resp->max_configurable_queues;
10199 	bp->max_lltc = resp->max_configurable_lossless_queues;
10200 	if (bp->max_tc > BNXT_MAX_QUEUE)
10201 		bp->max_tc = BNXT_MAX_QUEUE;
10202 
10203 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10204 	qptr = &resp->queue_id0;
10205 	for (i = 0, j = 0; i < bp->max_tc; i++) {
10206 		bp->q_info[j].queue_id = *qptr;
10207 		bp->q_ids[i] = *qptr++;
10208 		bp->q_info[j].queue_profile = *qptr++;
10209 		bp->tc_to_qidx[j] = j;
10210 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10211 		    (no_rdma && BNXT_PF(bp)))
10212 			j++;
10213 	}
10214 	bp->max_q = bp->max_tc;
10215 	bp->max_tc = max_t(u8, j, 1);
10216 
10217 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10218 		bp->max_tc = 1;
10219 
10220 	if (bp->max_lltc > bp->max_tc)
10221 		bp->max_lltc = bp->max_tc;
10222 
10223 qportcfg_exit:
10224 	hwrm_req_drop(bp, req);
10225 	return rc;
10226 }
10227 
10228 static int bnxt_hwrm_poll(struct bnxt *bp)
10229 {
10230 	struct hwrm_ver_get_input *req;
10231 	int rc;
10232 
10233 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10234 	if (rc)
10235 		return rc;
10236 
10237 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10238 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10239 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10240 
10241 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10242 	rc = hwrm_req_send(bp, req);
10243 	return rc;
10244 }
10245 
10246 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10247 {
10248 	struct hwrm_ver_get_output *resp;
10249 	struct hwrm_ver_get_input *req;
10250 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
10251 	u32 dev_caps_cfg, hwrm_ver;
10252 	int rc, len, max_tmo_secs;
10253 
10254 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10255 	if (rc)
10256 		return rc;
10257 
10258 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10259 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10260 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10261 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10262 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10263 
10264 	resp = hwrm_req_hold(bp, req);
10265 	rc = hwrm_req_send(bp, req);
10266 	if (rc)
10267 		goto hwrm_ver_get_exit;
10268 
10269 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10270 
10271 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10272 			     resp->hwrm_intf_min_8b << 8 |
10273 			     resp->hwrm_intf_upd_8b;
10274 	if (resp->hwrm_intf_maj_8b < 1) {
10275 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10276 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10277 			    resp->hwrm_intf_upd_8b);
10278 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10279 	}
10280 
10281 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10282 			HWRM_VERSION_UPDATE;
10283 
10284 	if (bp->hwrm_spec_code > hwrm_ver)
10285 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10286 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10287 			 HWRM_VERSION_UPDATE);
10288 	else
10289 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10290 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10291 			 resp->hwrm_intf_upd_8b);
10292 
10293 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10294 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10295 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10296 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10297 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10298 		len = FW_VER_STR_LEN;
10299 	} else {
10300 		fw_maj = resp->hwrm_fw_maj_8b;
10301 		fw_min = resp->hwrm_fw_min_8b;
10302 		fw_bld = resp->hwrm_fw_bld_8b;
10303 		fw_rsv = resp->hwrm_fw_rsvd_8b;
10304 		len = BC_HWRM_STR_LEN;
10305 	}
10306 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10307 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10308 		 fw_rsv);
10309 
10310 	if (strlen(resp->active_pkg_name)) {
10311 		int fw_ver_len = strlen(bp->fw_ver_str);
10312 
10313 		snprintf(bp->fw_ver_str + fw_ver_len,
10314 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10315 			 resp->active_pkg_name);
10316 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10317 	}
10318 
10319 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10320 	if (!bp->hwrm_cmd_timeout)
10321 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10322 	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10323 	if (!bp->hwrm_cmd_max_timeout)
10324 		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10325 	max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10326 #ifdef CONFIG_DETECT_HUNG_TASK
10327 	if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10328 	    max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10329 		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10330 			    max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10331 	}
10332 #endif
10333 
10334 	if (resp->hwrm_intf_maj_8b >= 1) {
10335 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10336 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10337 	}
10338 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10339 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10340 
10341 	bp->chip_num = le16_to_cpu(resp->chip_num);
10342 	bp->chip_rev = resp->chip_rev;
10343 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10344 	    !resp->chip_metal)
10345 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10346 
10347 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10348 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10349 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10350 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10351 
10352 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10353 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10354 
10355 	if (dev_caps_cfg &
10356 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10357 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10358 
10359 	if (dev_caps_cfg &
10360 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10361 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10362 
10363 	if (dev_caps_cfg &
10364 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10365 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10366 
10367 hwrm_ver_get_exit:
10368 	hwrm_req_drop(bp, req);
10369 	return rc;
10370 }
10371 
10372 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10373 {
10374 	struct hwrm_fw_set_time_input *req;
10375 	struct tm tm;
10376 	time64_t now = ktime_get_real_seconds();
10377 	int rc;
10378 
10379 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10380 	    bp->hwrm_spec_code < 0x10400)
10381 		return -EOPNOTSUPP;
10382 
10383 	time64_to_tm(now, 0, &tm);
10384 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10385 	if (rc)
10386 		return rc;
10387 
10388 	req->year = cpu_to_le16(1900 + tm.tm_year);
10389 	req->month = 1 + tm.tm_mon;
10390 	req->day = tm.tm_mday;
10391 	req->hour = tm.tm_hour;
10392 	req->minute = tm.tm_min;
10393 	req->second = tm.tm_sec;
10394 	return hwrm_req_send(bp, req);
10395 }
10396 
10397 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10398 {
10399 	u64 sw_tmp;
10400 
10401 	hw &= mask;
10402 	sw_tmp = (*sw & ~mask) | hw;
10403 	if (hw < (*sw & mask))
10404 		sw_tmp += mask + 1;
10405 	WRITE_ONCE(*sw, sw_tmp);
10406 }
10407 
10408 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10409 				    int count, bool ignore_zero)
10410 {
10411 	int i;
10412 
10413 	for (i = 0; i < count; i++) {
10414 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10415 
10416 		if (ignore_zero && !hw)
10417 			continue;
10418 
10419 		if (masks[i] == -1ULL)
10420 			sw_stats[i] = hw;
10421 		else
10422 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10423 	}
10424 }
10425 
10426 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10427 {
10428 	if (!stats->hw_stats)
10429 		return;
10430 
10431 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10432 				stats->hw_masks, stats->len / 8, false);
10433 }
10434 
10435 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10436 {
10437 	struct bnxt_stats_mem *ring0_stats;
10438 	bool ignore_zero = false;
10439 	int i;
10440 
10441 	/* Chip bug.  Counter intermittently becomes 0. */
10442 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10443 		ignore_zero = true;
10444 
10445 	for (i = 0; i < bp->cp_nr_rings; i++) {
10446 		struct bnxt_napi *bnapi = bp->bnapi[i];
10447 		struct bnxt_cp_ring_info *cpr;
10448 		struct bnxt_stats_mem *stats;
10449 
10450 		cpr = &bnapi->cp_ring;
10451 		stats = &cpr->stats;
10452 		if (!i)
10453 			ring0_stats = stats;
10454 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10455 					ring0_stats->hw_masks,
10456 					ring0_stats->len / 8, ignore_zero);
10457 	}
10458 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
10459 		struct bnxt_stats_mem *stats = &bp->port_stats;
10460 		__le64 *hw_stats = stats->hw_stats;
10461 		u64 *sw_stats = stats->sw_stats;
10462 		u64 *masks = stats->hw_masks;
10463 		int cnt;
10464 
10465 		cnt = sizeof(struct rx_port_stats) / 8;
10466 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10467 
10468 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10469 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10470 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10471 		cnt = sizeof(struct tx_port_stats) / 8;
10472 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10473 	}
10474 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10475 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10476 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10477 	}
10478 }
10479 
10480 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10481 {
10482 	struct hwrm_port_qstats_input *req;
10483 	struct bnxt_pf_info *pf = &bp->pf;
10484 	int rc;
10485 
10486 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10487 		return 0;
10488 
10489 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10490 		return -EOPNOTSUPP;
10491 
10492 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10493 	if (rc)
10494 		return rc;
10495 
10496 	req->flags = flags;
10497 	req->port_id = cpu_to_le16(pf->port_id);
10498 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10499 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
10500 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10501 	return hwrm_req_send(bp, req);
10502 }
10503 
10504 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10505 {
10506 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10507 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10508 	struct hwrm_port_qstats_ext_output *resp_qs;
10509 	struct hwrm_port_qstats_ext_input *req_qs;
10510 	struct bnxt_pf_info *pf = &bp->pf;
10511 	u32 tx_stat_size;
10512 	int rc;
10513 
10514 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10515 		return 0;
10516 
10517 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10518 		return -EOPNOTSUPP;
10519 
10520 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10521 	if (rc)
10522 		return rc;
10523 
10524 	req_qs->flags = flags;
10525 	req_qs->port_id = cpu_to_le16(pf->port_id);
10526 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10527 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10528 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10529 		       sizeof(struct tx_port_stats_ext) : 0;
10530 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10531 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10532 	resp_qs = hwrm_req_hold(bp, req_qs);
10533 	rc = hwrm_req_send(bp, req_qs);
10534 	if (!rc) {
10535 		bp->fw_rx_stats_ext_size =
10536 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
10537 		if (BNXT_FW_MAJ(bp) < 220 &&
10538 		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10539 			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10540 
10541 		bp->fw_tx_stats_ext_size = tx_stat_size ?
10542 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10543 	} else {
10544 		bp->fw_rx_stats_ext_size = 0;
10545 		bp->fw_tx_stats_ext_size = 0;
10546 	}
10547 	hwrm_req_drop(bp, req_qs);
10548 
10549 	if (flags)
10550 		return rc;
10551 
10552 	if (bp->fw_tx_stats_ext_size <=
10553 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10554 		bp->pri2cos_valid = 0;
10555 		return rc;
10556 	}
10557 
10558 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10559 	if (rc)
10560 		return rc;
10561 
10562 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10563 
10564 	resp_qc = hwrm_req_hold(bp, req_qc);
10565 	rc = hwrm_req_send(bp, req_qc);
10566 	if (!rc) {
10567 		u8 *pri2cos;
10568 		int i, j;
10569 
10570 		pri2cos = &resp_qc->pri0_cos_queue_id;
10571 		for (i = 0; i < 8; i++) {
10572 			u8 queue_id = pri2cos[i];
10573 			u8 queue_idx;
10574 
10575 			/* Per port queue IDs start from 0, 10, 20, etc */
10576 			queue_idx = queue_id % 10;
10577 			if (queue_idx > BNXT_MAX_QUEUE) {
10578 				bp->pri2cos_valid = false;
10579 				hwrm_req_drop(bp, req_qc);
10580 				return rc;
10581 			}
10582 			for (j = 0; j < bp->max_q; j++) {
10583 				if (bp->q_ids[j] == queue_id)
10584 					bp->pri2cos_idx[i] = queue_idx;
10585 			}
10586 		}
10587 		bp->pri2cos_valid = true;
10588 	}
10589 	hwrm_req_drop(bp, req_qc);
10590 
10591 	return rc;
10592 }
10593 
10594 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10595 {
10596 	bnxt_hwrm_tunnel_dst_port_free(bp,
10597 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10598 	bnxt_hwrm_tunnel_dst_port_free(bp,
10599 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10600 }
10601 
10602 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10603 {
10604 	int rc, i;
10605 	u32 tpa_flags = 0;
10606 
10607 	if (set_tpa)
10608 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
10609 	else if (BNXT_NO_FW_ACCESS(bp))
10610 		return 0;
10611 	for (i = 0; i < bp->nr_vnics; i++) {
10612 		rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10613 		if (rc) {
10614 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10615 				   i, rc);
10616 			return rc;
10617 		}
10618 	}
10619 	return 0;
10620 }
10621 
10622 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10623 {
10624 	int i;
10625 
10626 	for (i = 0; i < bp->nr_vnics; i++)
10627 		bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10628 }
10629 
10630 static void bnxt_clear_vnic(struct bnxt *bp)
10631 {
10632 	if (!bp->vnic_info)
10633 		return;
10634 
10635 	bnxt_hwrm_clear_vnic_filter(bp);
10636 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10637 		/* clear all RSS setting before free vnic ctx */
10638 		bnxt_hwrm_clear_vnic_rss(bp);
10639 		bnxt_hwrm_vnic_ctx_free(bp);
10640 	}
10641 	/* before free the vnic, undo the vnic tpa settings */
10642 	if (bp->flags & BNXT_FLAG_TPA)
10643 		bnxt_set_tpa(bp, false);
10644 	bnxt_hwrm_vnic_free(bp);
10645 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10646 		bnxt_hwrm_vnic_ctx_free(bp);
10647 }
10648 
10649 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10650 				    bool irq_re_init)
10651 {
10652 	bnxt_clear_vnic(bp);
10653 	bnxt_hwrm_ring_free(bp, close_path);
10654 	bnxt_hwrm_ring_grp_free(bp);
10655 	if (irq_re_init) {
10656 		bnxt_hwrm_stat_ctx_free(bp);
10657 		bnxt_hwrm_free_tunnel_ports(bp);
10658 	}
10659 }
10660 
10661 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10662 {
10663 	struct hwrm_func_cfg_input *req;
10664 	u8 evb_mode;
10665 	int rc;
10666 
10667 	if (br_mode == BRIDGE_MODE_VEB)
10668 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10669 	else if (br_mode == BRIDGE_MODE_VEPA)
10670 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10671 	else
10672 		return -EINVAL;
10673 
10674 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10675 	if (rc)
10676 		return rc;
10677 
10678 	req->fid = cpu_to_le16(0xffff);
10679 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10680 	req->evb_mode = evb_mode;
10681 	return hwrm_req_send(bp, req);
10682 }
10683 
10684 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10685 {
10686 	struct hwrm_func_cfg_input *req;
10687 	int rc;
10688 
10689 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10690 		return 0;
10691 
10692 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10693 	if (rc)
10694 		return rc;
10695 
10696 	req->fid = cpu_to_le16(0xffff);
10697 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10698 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10699 	if (size == 128)
10700 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10701 
10702 	return hwrm_req_send(bp, req);
10703 }
10704 
10705 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10706 {
10707 	int rc;
10708 
10709 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10710 		goto skip_rss_ctx;
10711 
10712 	/* allocate context for vnic */
10713 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10714 	if (rc) {
10715 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10716 			   vnic->vnic_id, rc);
10717 		goto vnic_setup_err;
10718 	}
10719 	bp->rsscos_nr_ctxs++;
10720 
10721 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10722 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10723 		if (rc) {
10724 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10725 				   vnic->vnic_id, rc);
10726 			goto vnic_setup_err;
10727 		}
10728 		bp->rsscos_nr_ctxs++;
10729 	}
10730 
10731 skip_rss_ctx:
10732 	/* configure default vnic, ring grp */
10733 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10734 	if (rc) {
10735 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10736 			   vnic->vnic_id, rc);
10737 		goto vnic_setup_err;
10738 	}
10739 
10740 	/* Enable RSS hashing on vnic */
10741 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10742 	if (rc) {
10743 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10744 			   vnic->vnic_id, rc);
10745 		goto vnic_setup_err;
10746 	}
10747 
10748 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10749 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10750 		if (rc) {
10751 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10752 				   vnic->vnic_id, rc);
10753 		}
10754 	}
10755 
10756 vnic_setup_err:
10757 	return rc;
10758 }
10759 
10760 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10761 			  u8 valid)
10762 {
10763 	struct hwrm_vnic_update_input *req;
10764 	int rc;
10765 
10766 	rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10767 	if (rc)
10768 		return rc;
10769 
10770 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10771 
10772 	if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10773 		req->mru = cpu_to_le16(vnic->mru);
10774 
10775 	req->enables = cpu_to_le32(valid);
10776 
10777 	return hwrm_req_send(bp, req);
10778 }
10779 
10780 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10781 {
10782 	int rc;
10783 
10784 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10785 	if (rc) {
10786 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10787 			   vnic->vnic_id, rc);
10788 		return rc;
10789 	}
10790 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10791 	if (rc)
10792 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10793 			   vnic->vnic_id, rc);
10794 	return rc;
10795 }
10796 
10797 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10798 {
10799 	int rc, i, nr_ctxs;
10800 
10801 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10802 	for (i = 0; i < nr_ctxs; i++) {
10803 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10804 		if (rc) {
10805 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10806 				   vnic->vnic_id, i, rc);
10807 			break;
10808 		}
10809 		bp->rsscos_nr_ctxs++;
10810 	}
10811 	if (i < nr_ctxs)
10812 		return -ENOMEM;
10813 
10814 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10815 	if (rc)
10816 		return rc;
10817 
10818 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10819 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10820 		if (rc) {
10821 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10822 				   vnic->vnic_id, rc);
10823 		}
10824 	}
10825 	return rc;
10826 }
10827 
10828 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10829 {
10830 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10831 		return __bnxt_setup_vnic_p5(bp, vnic);
10832 	else
10833 		return __bnxt_setup_vnic(bp, vnic);
10834 }
10835 
10836 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10837 				     struct bnxt_vnic_info *vnic,
10838 				     u16 start_rx_ring_idx, int rx_rings)
10839 {
10840 	int rc;
10841 
10842 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10843 	if (rc) {
10844 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10845 			   vnic->vnic_id, rc);
10846 		return rc;
10847 	}
10848 	return bnxt_setup_vnic(bp, vnic);
10849 }
10850 
10851 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10852 {
10853 	struct bnxt_vnic_info *vnic;
10854 	int i, rc = 0;
10855 
10856 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10857 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10858 		return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10859 	}
10860 
10861 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10862 		return 0;
10863 
10864 	for (i = 0; i < bp->rx_nr_rings; i++) {
10865 		u16 vnic_id = i + 1;
10866 		u16 ring_id = i;
10867 
10868 		if (vnic_id >= bp->nr_vnics)
10869 			break;
10870 
10871 		vnic = &bp->vnic_info[vnic_id];
10872 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
10873 		if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10874 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10875 		if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10876 			break;
10877 	}
10878 	return rc;
10879 }
10880 
10881 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10882 			  bool all)
10883 {
10884 	struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10885 	struct bnxt_filter_base *usr_fltr, *tmp;
10886 	struct bnxt_ntuple_filter *ntp_fltr;
10887 	int i;
10888 
10889 	bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10890 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10891 		if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10892 			bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10893 	}
10894 	if (!all)
10895 		return;
10896 
10897 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10898 		if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10899 		    usr_fltr->fw_vnic_id == rss_ctx->index) {
10900 			ntp_fltr = container_of(usr_fltr,
10901 						struct bnxt_ntuple_filter,
10902 						base);
10903 			bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10904 			bnxt_del_ntp_filter(bp, ntp_fltr);
10905 			bnxt_del_one_usr_fltr(bp, usr_fltr);
10906 		}
10907 	}
10908 
10909 	if (vnic->rss_table)
10910 		dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10911 				  vnic->rss_table,
10912 				  vnic->rss_table_dma_addr);
10913 	bp->num_rss_ctx--;
10914 }
10915 
10916 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10917 				  int rxr_id)
10918 {
10919 	u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10920 	int i, vnic_rx;
10921 
10922 	/* Ntuple VNIC always has all the rx rings. Any change of ring id
10923 	 * must be updated because a future filter may use it.
10924 	 */
10925 	if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10926 		return true;
10927 
10928 	for (i = 0; i < tbl_size; i++) {
10929 		if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10930 			vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10931 		else
10932 			vnic_rx = bp->rss_indir_tbl[i];
10933 
10934 		if (rxr_id == vnic_rx)
10935 			return true;
10936 	}
10937 
10938 	return false;
10939 }
10940 
10941 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10942 				u16 mru, int rxr_id)
10943 {
10944 	int rc;
10945 
10946 	if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10947 		return 0;
10948 
10949 	if (mru) {
10950 		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10951 		if (rc) {
10952 			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10953 				   vnic->vnic_id, rc);
10954 			return rc;
10955 		}
10956 	}
10957 	vnic->mru = mru;
10958 	bnxt_hwrm_vnic_update(bp, vnic,
10959 			      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10960 
10961 	return 0;
10962 }
10963 
10964 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10965 {
10966 	struct ethtool_rxfh_context *ctx;
10967 	unsigned long context;
10968 	int rc;
10969 
10970 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10971 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10972 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10973 
10974 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10975 		if (rc)
10976 			return rc;
10977 	}
10978 
10979 	return 0;
10980 }
10981 
10982 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10983 {
10984 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10985 	struct ethtool_rxfh_context *ctx;
10986 	unsigned long context;
10987 
10988 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10989 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10990 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10991 
10992 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10993 		    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10994 		    __bnxt_setup_vnic_p5(bp, vnic)) {
10995 			netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10996 				   rss_ctx->index);
10997 			bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10998 			ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10999 		}
11000 	}
11001 }
11002 
11003 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
11004 {
11005 	struct ethtool_rxfh_context *ctx;
11006 	unsigned long context;
11007 
11008 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
11009 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
11010 
11011 		bnxt_del_one_rss_ctx(bp, rss_ctx, false);
11012 	}
11013 }
11014 
11015 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
11016 static bool bnxt_promisc_ok(struct bnxt *bp)
11017 {
11018 #ifdef CONFIG_BNXT_SRIOV
11019 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11020 		return false;
11021 #endif
11022 	return true;
11023 }
11024 
11025 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11026 {
11027 	struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11028 	unsigned int rc = 0;
11029 
11030 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11031 	if (rc) {
11032 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11033 			   rc);
11034 		return rc;
11035 	}
11036 
11037 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11038 	if (rc) {
11039 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11040 			   rc);
11041 		return rc;
11042 	}
11043 	return rc;
11044 }
11045 
11046 static int bnxt_cfg_rx_mode(struct bnxt *);
11047 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
11048 
11049 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11050 {
11051 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11052 	int rc = 0;
11053 	unsigned int rx_nr_rings = bp->rx_nr_rings;
11054 
11055 	if (irq_re_init) {
11056 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
11057 		if (rc) {
11058 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11059 				   rc);
11060 			goto err_out;
11061 		}
11062 	}
11063 
11064 	rc = bnxt_hwrm_ring_alloc(bp);
11065 	if (rc) {
11066 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11067 		goto err_out;
11068 	}
11069 
11070 	rc = bnxt_hwrm_ring_grp_alloc(bp);
11071 	if (rc) {
11072 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11073 		goto err_out;
11074 	}
11075 
11076 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11077 		rx_nr_rings--;
11078 
11079 	/* default vnic 0 */
11080 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11081 	if (rc) {
11082 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11083 		goto err_out;
11084 	}
11085 
11086 	if (BNXT_VF(bp))
11087 		bnxt_hwrm_func_qcfg(bp);
11088 
11089 	rc = bnxt_setup_vnic(bp, vnic);
11090 	if (rc)
11091 		goto err_out;
11092 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11093 		bnxt_hwrm_update_rss_hash_cfg(bp);
11094 
11095 	if (bp->flags & BNXT_FLAG_RFS) {
11096 		rc = bnxt_alloc_rfs_vnics(bp);
11097 		if (rc)
11098 			goto err_out;
11099 	}
11100 
11101 	if (bp->flags & BNXT_FLAG_TPA) {
11102 		rc = bnxt_set_tpa(bp, true);
11103 		if (rc)
11104 			goto err_out;
11105 	}
11106 
11107 	if (BNXT_VF(bp))
11108 		bnxt_update_vf_mac(bp);
11109 
11110 	/* Filter for default vnic 0 */
11111 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11112 	if (rc) {
11113 		if (BNXT_VF(bp) && rc == -ENODEV)
11114 			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11115 		else
11116 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11117 		goto err_out;
11118 	}
11119 	vnic->uc_filter_count = 1;
11120 
11121 	vnic->rx_mask = 0;
11122 	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11123 		goto skip_rx_mask;
11124 
11125 	if (bp->dev->flags & IFF_BROADCAST)
11126 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11127 
11128 	if (bp->dev->flags & IFF_PROMISC)
11129 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11130 
11131 	if (bp->dev->flags & IFF_ALLMULTI) {
11132 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11133 		vnic->mc_list_count = 0;
11134 	} else if (bp->dev->flags & IFF_MULTICAST) {
11135 		u32 mask = 0;
11136 
11137 		bnxt_mc_list_updated(bp, &mask);
11138 		vnic->rx_mask |= mask;
11139 	}
11140 
11141 	rc = bnxt_cfg_rx_mode(bp);
11142 	if (rc)
11143 		goto err_out;
11144 
11145 skip_rx_mask:
11146 	rc = bnxt_hwrm_set_coal(bp);
11147 	if (rc)
11148 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11149 				rc);
11150 
11151 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11152 		rc = bnxt_setup_nitroa0_vnic(bp);
11153 		if (rc)
11154 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11155 				   rc);
11156 	}
11157 
11158 	if (BNXT_VF(bp)) {
11159 		bnxt_hwrm_func_qcfg(bp);
11160 		netdev_update_features(bp->dev);
11161 	}
11162 
11163 	return 0;
11164 
11165 err_out:
11166 	bnxt_hwrm_resource_free(bp, 0, true);
11167 
11168 	return rc;
11169 }
11170 
11171 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11172 {
11173 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11174 	return 0;
11175 }
11176 
11177 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11178 {
11179 	bnxt_init_cp_rings(bp);
11180 	bnxt_init_rx_rings(bp);
11181 	bnxt_init_tx_rings(bp);
11182 	bnxt_init_ring_grps(bp, irq_re_init);
11183 	bnxt_init_vnics(bp);
11184 
11185 	return bnxt_init_chip(bp, irq_re_init);
11186 }
11187 
11188 static int bnxt_set_real_num_queues(struct bnxt *bp)
11189 {
11190 	int rc;
11191 	struct net_device *dev = bp->dev;
11192 
11193 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11194 					  bp->tx_nr_rings_xdp);
11195 	if (rc)
11196 		return rc;
11197 
11198 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11199 	if (rc)
11200 		return rc;
11201 
11202 #ifdef CONFIG_RFS_ACCEL
11203 	if (bp->flags & BNXT_FLAG_RFS)
11204 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11205 #endif
11206 
11207 	return rc;
11208 }
11209 
11210 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11211 			     bool shared)
11212 {
11213 	int _rx = *rx, _tx = *tx;
11214 
11215 	if (shared) {
11216 		*rx = min_t(int, _rx, max);
11217 		*tx = min_t(int, _tx, max);
11218 	} else {
11219 		if (max < 2)
11220 			return -ENOMEM;
11221 
11222 		while (_rx + _tx > max) {
11223 			if (_rx > _tx && _rx > 1)
11224 				_rx--;
11225 			else if (_tx > 1)
11226 				_tx--;
11227 		}
11228 		*rx = _rx;
11229 		*tx = _tx;
11230 	}
11231 	return 0;
11232 }
11233 
11234 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11235 {
11236 	return (tx - tx_xdp) / tx_sets + tx_xdp;
11237 }
11238 
11239 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11240 {
11241 	int tcs = bp->num_tc;
11242 
11243 	if (!tcs)
11244 		tcs = 1;
11245 	return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11246 }
11247 
11248 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11249 {
11250 	int tcs = bp->num_tc;
11251 
11252 	return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11253 	       bp->tx_nr_rings_xdp;
11254 }
11255 
11256 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11257 			   bool sh)
11258 {
11259 	int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11260 
11261 	if (tx_cp != *tx) {
11262 		int tx_saved = tx_cp, rc;
11263 
11264 		rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11265 		if (rc)
11266 			return rc;
11267 		if (tx_cp != tx_saved)
11268 			*tx = bnxt_num_cp_to_tx(bp, tx_cp);
11269 		return 0;
11270 	}
11271 	return __bnxt_trim_rings(bp, rx, tx, max, sh);
11272 }
11273 
11274 static void bnxt_setup_msix(struct bnxt *bp)
11275 {
11276 	const int len = sizeof(bp->irq_tbl[0].name);
11277 	struct net_device *dev = bp->dev;
11278 	int tcs, i;
11279 
11280 	tcs = bp->num_tc;
11281 	if (tcs) {
11282 		int i, off, count;
11283 
11284 		for (i = 0; i < tcs; i++) {
11285 			count = bp->tx_nr_rings_per_tc;
11286 			off = BNXT_TC_TO_RING_BASE(bp, i);
11287 			netdev_set_tc_queue(dev, i, count, off);
11288 		}
11289 	}
11290 
11291 	for (i = 0; i < bp->cp_nr_rings; i++) {
11292 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11293 		char *attr;
11294 
11295 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11296 			attr = "TxRx";
11297 		else if (i < bp->rx_nr_rings)
11298 			attr = "rx";
11299 		else
11300 			attr = "tx";
11301 
11302 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11303 			 attr, i);
11304 		bp->irq_tbl[map_idx].handler = bnxt_msix;
11305 	}
11306 }
11307 
11308 static int bnxt_init_int_mode(struct bnxt *bp);
11309 
11310 static int bnxt_change_msix(struct bnxt *bp, int total)
11311 {
11312 	struct msi_map map;
11313 	int i;
11314 
11315 	/* add MSIX to the end if needed */
11316 	for (i = bp->total_irqs; i < total; i++) {
11317 		map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11318 		if (map.index < 0)
11319 			return bp->total_irqs;
11320 		bp->irq_tbl[i].vector = map.virq;
11321 		bp->total_irqs++;
11322 	}
11323 
11324 	/* trim MSIX from the end if needed */
11325 	for (i = bp->total_irqs; i > total; i--) {
11326 		map.index = i - 1;
11327 		map.virq = bp->irq_tbl[i - 1].vector;
11328 		pci_msix_free_irq(bp->pdev, map);
11329 		bp->total_irqs--;
11330 	}
11331 	return bp->total_irqs;
11332 }
11333 
11334 static int bnxt_setup_int_mode(struct bnxt *bp)
11335 {
11336 	int rc;
11337 
11338 	if (!bp->irq_tbl) {
11339 		rc = bnxt_init_int_mode(bp);
11340 		if (rc || !bp->irq_tbl)
11341 			return rc ?: -ENODEV;
11342 	}
11343 
11344 	bnxt_setup_msix(bp);
11345 
11346 	rc = bnxt_set_real_num_queues(bp);
11347 	return rc;
11348 }
11349 
11350 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11351 {
11352 	return bp->hw_resc.max_rsscos_ctxs;
11353 }
11354 
11355 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11356 {
11357 	return bp->hw_resc.max_vnics;
11358 }
11359 
11360 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11361 {
11362 	return bp->hw_resc.max_stat_ctxs;
11363 }
11364 
11365 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11366 {
11367 	return bp->hw_resc.max_cp_rings;
11368 }
11369 
11370 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11371 {
11372 	unsigned int cp = bp->hw_resc.max_cp_rings;
11373 
11374 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11375 		cp -= bnxt_get_ulp_msix_num(bp);
11376 
11377 	return cp;
11378 }
11379 
11380 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11381 {
11382 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11383 
11384 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11385 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11386 
11387 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11388 }
11389 
11390 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11391 {
11392 	bp->hw_resc.max_irqs = max_irqs;
11393 }
11394 
11395 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11396 {
11397 	unsigned int cp;
11398 
11399 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
11400 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11401 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11402 	else
11403 		return cp - bp->cp_nr_rings;
11404 }
11405 
11406 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11407 {
11408 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11409 }
11410 
11411 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11412 {
11413 	int max_irq = bnxt_get_max_func_irqs(bp);
11414 	int total_req = bp->cp_nr_rings + num;
11415 
11416 	if (max_irq < total_req) {
11417 		num = max_irq - bp->cp_nr_rings;
11418 		if (num <= 0)
11419 			return 0;
11420 	}
11421 	return num;
11422 }
11423 
11424 static int bnxt_get_num_msix(struct bnxt *bp)
11425 {
11426 	if (!BNXT_NEW_RM(bp))
11427 		return bnxt_get_max_func_irqs(bp);
11428 
11429 	return bnxt_nq_rings_in_use(bp);
11430 }
11431 
11432 static int bnxt_init_int_mode(struct bnxt *bp)
11433 {
11434 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11435 
11436 	total_vecs = bnxt_get_num_msix(bp);
11437 	max = bnxt_get_max_func_irqs(bp);
11438 	if (total_vecs > max)
11439 		total_vecs = max;
11440 
11441 	if (!total_vecs)
11442 		return 0;
11443 
11444 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11445 		min = 2;
11446 
11447 	total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11448 					   PCI_IRQ_MSIX);
11449 	ulp_msix = bnxt_get_ulp_msix_num(bp);
11450 	if (total_vecs < 0 || total_vecs < ulp_msix) {
11451 		rc = -ENODEV;
11452 		goto msix_setup_exit;
11453 	}
11454 
11455 	tbl_size = total_vecs;
11456 	if (pci_msix_can_alloc_dyn(bp->pdev))
11457 		tbl_size = max;
11458 	bp->irq_tbl = kzalloc_objs(*bp->irq_tbl, tbl_size);
11459 	if (bp->irq_tbl) {
11460 		for (i = 0; i < total_vecs; i++)
11461 			bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11462 
11463 		bp->total_irqs = total_vecs;
11464 		/* Trim rings based upon num of vectors allocated */
11465 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11466 				     total_vecs - ulp_msix, min == 1);
11467 		if (rc)
11468 			goto msix_setup_exit;
11469 
11470 		tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11471 		bp->cp_nr_rings = (min == 1) ?
11472 				  max_t(int, tx_cp, bp->rx_nr_rings) :
11473 				  tx_cp + bp->rx_nr_rings;
11474 
11475 	} else {
11476 		rc = -ENOMEM;
11477 		goto msix_setup_exit;
11478 	}
11479 	return 0;
11480 
11481 msix_setup_exit:
11482 	netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11483 	kfree(bp->irq_tbl);
11484 	bp->irq_tbl = NULL;
11485 	pci_free_irq_vectors(bp->pdev);
11486 	return rc;
11487 }
11488 
11489 static void bnxt_clear_int_mode(struct bnxt *bp)
11490 {
11491 	pci_free_irq_vectors(bp->pdev);
11492 
11493 	kfree(bp->irq_tbl);
11494 	bp->irq_tbl = NULL;
11495 }
11496 
11497 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11498 {
11499 	bool irq_cleared = false;
11500 	bool irq_change = false;
11501 	int tcs = bp->num_tc;
11502 	int irqs_required;
11503 	int rc;
11504 
11505 	if (!bnxt_need_reserve_rings(bp))
11506 		return 0;
11507 
11508 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11509 		int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11510 
11511 		if (ulp_msix > bp->ulp_num_msix_want)
11512 			ulp_msix = bp->ulp_num_msix_want;
11513 		irqs_required = ulp_msix + bp->cp_nr_rings;
11514 	} else {
11515 		irqs_required = bnxt_get_num_msix(bp);
11516 	}
11517 
11518 	if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11519 		irq_change = true;
11520 		if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11521 			bnxt_ulp_irq_stop(bp);
11522 			bnxt_clear_int_mode(bp);
11523 			irq_cleared = true;
11524 		}
11525 	}
11526 	rc = __bnxt_reserve_rings(bp);
11527 	if (irq_cleared) {
11528 		if (!rc)
11529 			rc = bnxt_init_int_mode(bp);
11530 		bnxt_ulp_irq_restart(bp, rc);
11531 	} else if (irq_change && !rc) {
11532 		if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11533 			rc = -ENOSPC;
11534 	}
11535 	if (rc) {
11536 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11537 		return rc;
11538 	}
11539 	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11540 		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11541 		netdev_err(bp->dev, "tx ring reservation failure\n");
11542 		netdev_reset_tc(bp->dev);
11543 		bp->num_tc = 0;
11544 		if (bp->tx_nr_rings_xdp)
11545 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11546 		else
11547 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11548 		return -ENOMEM;
11549 	}
11550 	return 0;
11551 }
11552 
11553 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11554 {
11555 	struct bnxt_tx_ring_info *txr;
11556 	struct netdev_queue *txq;
11557 	struct bnxt_napi *bnapi;
11558 	int i;
11559 
11560 	bnapi = bp->bnapi[idx];
11561 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11562 		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11563 		synchronize_net();
11564 
11565 		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11566 			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11567 			if (txq) {
11568 				__netif_tx_lock_bh(txq);
11569 				netif_tx_stop_queue(txq);
11570 				__netif_tx_unlock_bh(txq);
11571 			}
11572 		}
11573 
11574 		if (!bp->tph_mode)
11575 			continue;
11576 
11577 		bnxt_hwrm_tx_ring_free(bp, txr, true);
11578 		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11579 		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11580 		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11581 	}
11582 }
11583 
11584 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11585 {
11586 	struct bnxt_tx_ring_info *txr;
11587 	struct netdev_queue *txq;
11588 	struct bnxt_napi *bnapi;
11589 	int rc, i;
11590 
11591 	bnapi = bp->bnapi[idx];
11592 	/* All rings have been reserved and previously allocated.
11593 	 * Reallocating with the same parameters should never fail.
11594 	 */
11595 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11596 		if (!bp->tph_mode)
11597 			goto start_tx;
11598 
11599 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11600 		if (rc)
11601 			return rc;
11602 
11603 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11604 		if (rc)
11605 			return rc;
11606 
11607 		txr->tx_prod = 0;
11608 		txr->tx_cons = 0;
11609 		txr->tx_hw_cons = 0;
11610 start_tx:
11611 		WRITE_ONCE(txr->dev_state, 0);
11612 		synchronize_net();
11613 
11614 		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11615 			continue;
11616 
11617 		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11618 		if (txq)
11619 			netif_tx_start_queue(txq);
11620 	}
11621 
11622 	return 0;
11623 }
11624 
11625 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11626 				     const cpumask_t *mask)
11627 {
11628 	struct bnxt_irq *irq;
11629 	u16 tag;
11630 	int err;
11631 
11632 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11633 
11634 	if (!irq->bp->tph_mode)
11635 		return;
11636 
11637 	cpumask_copy(irq->cpu_mask, mask);
11638 
11639 	if (irq->ring_nr >= irq->bp->rx_nr_rings)
11640 		return;
11641 
11642 	if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11643 				cpumask_first(irq->cpu_mask), &tag))
11644 		return;
11645 
11646 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11647 		return;
11648 
11649 	netdev_lock(irq->bp->dev);
11650 	if (netif_running(irq->bp->dev)) {
11651 		err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11652 		if (err)
11653 			netdev_err(irq->bp->dev,
11654 				   "RX queue restart failed: err=%d\n", err);
11655 	}
11656 	netdev_unlock(irq->bp->dev);
11657 }
11658 
11659 static void bnxt_irq_affinity_release(struct kref *ref)
11660 {
11661 	struct irq_affinity_notify *notify =
11662 		container_of(ref, struct irq_affinity_notify, kref);
11663 	struct bnxt_irq *irq;
11664 
11665 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11666 
11667 	if (!irq->bp->tph_mode)
11668 		return;
11669 
11670 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11671 		netdev_err(irq->bp->dev,
11672 			   "Setting ST=0 for MSIX entry %d failed\n",
11673 			   irq->msix_nr);
11674 		return;
11675 	}
11676 }
11677 
11678 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11679 {
11680 	irq_set_affinity_notifier(irq->vector, NULL);
11681 }
11682 
11683 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11684 {
11685 	struct irq_affinity_notify *notify;
11686 
11687 	irq->bp = bp;
11688 
11689 	/* Nothing to do if TPH is not enabled */
11690 	if (!bp->tph_mode)
11691 		return;
11692 
11693 	/* Register IRQ affinity notifier */
11694 	notify = &irq->affinity_notify;
11695 	notify->irq = irq->vector;
11696 	notify->notify = bnxt_irq_affinity_notify;
11697 	notify->release = bnxt_irq_affinity_release;
11698 
11699 	irq_set_affinity_notifier(irq->vector, notify);
11700 }
11701 
11702 static void bnxt_free_irq(struct bnxt *bp)
11703 {
11704 	struct bnxt_irq *irq;
11705 	int i;
11706 
11707 #ifdef CONFIG_RFS_ACCEL
11708 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11709 	bp->dev->rx_cpu_rmap = NULL;
11710 #endif
11711 	if (!bp->irq_tbl || !bp->bnapi)
11712 		return;
11713 
11714 	for (i = 0; i < bp->cp_nr_rings; i++) {
11715 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11716 
11717 		irq = &bp->irq_tbl[map_idx];
11718 		if (irq->requested) {
11719 			if (irq->have_cpumask) {
11720 				irq_update_affinity_hint(irq->vector, NULL);
11721 				free_cpumask_var(irq->cpu_mask);
11722 				irq->have_cpumask = 0;
11723 			}
11724 
11725 			bnxt_release_irq_notifier(irq);
11726 
11727 			free_irq(irq->vector, bp->bnapi[i]);
11728 		}
11729 
11730 		irq->requested = 0;
11731 	}
11732 
11733 	/* Disable TPH support */
11734 	pcie_disable_tph(bp->pdev);
11735 	bp->tph_mode = 0;
11736 }
11737 
11738 static int bnxt_request_irq(struct bnxt *bp)
11739 {
11740 	struct cpu_rmap *rmap = NULL;
11741 	int i, j, rc = 0;
11742 	unsigned long flags = 0;
11743 
11744 	rc = bnxt_setup_int_mode(bp);
11745 	if (rc) {
11746 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11747 			   rc);
11748 		return rc;
11749 	}
11750 #ifdef CONFIG_RFS_ACCEL
11751 	rmap = bp->dev->rx_cpu_rmap;
11752 #endif
11753 
11754 	/* Enable TPH support as part of IRQ request */
11755 	rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11756 	if (!rc)
11757 		bp->tph_mode = PCI_TPH_ST_IV_MODE;
11758 
11759 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11760 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11761 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11762 
11763 		if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11764 		    rmap && bp->bnapi[i]->rx_ring) {
11765 			rc = irq_cpu_rmap_add(rmap, irq->vector);
11766 			if (rc)
11767 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11768 					    j);
11769 			j++;
11770 		}
11771 
11772 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11773 				 bp->bnapi[i]);
11774 		if (rc)
11775 			break;
11776 
11777 		netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11778 		irq->requested = 1;
11779 
11780 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11781 			int numa_node = dev_to_node(&bp->pdev->dev);
11782 			u16 tag;
11783 
11784 			irq->have_cpumask = 1;
11785 			irq->msix_nr = map_idx;
11786 			irq->ring_nr = i;
11787 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11788 					irq->cpu_mask);
11789 			rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11790 			if (rc) {
11791 				netdev_warn(bp->dev,
11792 					    "Update affinity hint failed, IRQ = %d\n",
11793 					    irq->vector);
11794 				break;
11795 			}
11796 
11797 			bnxt_register_irq_notifier(bp, irq);
11798 
11799 			/* Init ST table entry */
11800 			if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11801 						cpumask_first(irq->cpu_mask),
11802 						&tag))
11803 				continue;
11804 
11805 			pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11806 		}
11807 	}
11808 	return rc;
11809 }
11810 
11811 static void bnxt_del_napi(struct bnxt *bp)
11812 {
11813 	int i;
11814 
11815 	if (!bp->bnapi)
11816 		return;
11817 
11818 	for (i = 0; i < bp->rx_nr_rings; i++)
11819 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11820 	for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11821 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11822 
11823 	for (i = 0; i < bp->cp_nr_rings; i++) {
11824 		struct bnxt_napi *bnapi = bp->bnapi[i];
11825 
11826 		__netif_napi_del_locked(&bnapi->napi);
11827 	}
11828 	/* We called __netif_napi_del_locked(), we need
11829 	 * to respect an RCU grace period before freeing napi structures.
11830 	 */
11831 	synchronize_net();
11832 }
11833 
11834 static void bnxt_init_napi(struct bnxt *bp)
11835 {
11836 	int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11837 	unsigned int cp_nr_rings = bp->cp_nr_rings;
11838 	struct bnxt_napi *bnapi;
11839 	int i;
11840 
11841 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11842 		poll_fn = bnxt_poll_p5;
11843 	else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11844 		cp_nr_rings--;
11845 
11846 	set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11847 
11848 	for (i = 0; i < cp_nr_rings; i++) {
11849 		bnapi = bp->bnapi[i];
11850 		netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11851 					     bnapi->index);
11852 	}
11853 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11854 		bnapi = bp->bnapi[cp_nr_rings];
11855 		netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11856 	}
11857 }
11858 
11859 static void bnxt_disable_napi(struct bnxt *bp)
11860 {
11861 	int i;
11862 
11863 	if (!bp->bnapi ||
11864 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11865 		return;
11866 
11867 	for (i = 0; i < bp->cp_nr_rings; i++) {
11868 		struct bnxt_napi *bnapi = bp->bnapi[i];
11869 		struct bnxt_cp_ring_info *cpr;
11870 
11871 		cpr = &bnapi->cp_ring;
11872 		if (bnapi->tx_fault)
11873 			cpr->sw_stats->tx.tx_resets++;
11874 		if (bnapi->in_reset)
11875 			cpr->sw_stats->rx.rx_resets++;
11876 		napi_disable_locked(&bnapi->napi);
11877 	}
11878 }
11879 
11880 static void bnxt_enable_napi(struct bnxt *bp)
11881 {
11882 	int i;
11883 
11884 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11885 	for (i = 0; i < bp->cp_nr_rings; i++) {
11886 		struct bnxt_napi *bnapi = bp->bnapi[i];
11887 		struct bnxt_cp_ring_info *cpr;
11888 
11889 		bnapi->tx_fault = 0;
11890 
11891 		cpr = &bnapi->cp_ring;
11892 		bnapi->in_reset = false;
11893 
11894 		if (bnapi->rx_ring) {
11895 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11896 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11897 		}
11898 		napi_enable_locked(&bnapi->napi);
11899 	}
11900 }
11901 
11902 void bnxt_tx_disable(struct bnxt *bp)
11903 {
11904 	int i;
11905 	struct bnxt_tx_ring_info *txr;
11906 
11907 	if (bp->tx_ring) {
11908 		for (i = 0; i < bp->tx_nr_rings; i++) {
11909 			txr = &bp->tx_ring[i];
11910 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11911 		}
11912 	}
11913 	/* Make sure napi polls see @dev_state change */
11914 	synchronize_net();
11915 	/* Drop carrier first to prevent TX timeout */
11916 	netif_carrier_off(bp->dev);
11917 	/* Stop all TX queues */
11918 	netif_tx_disable(bp->dev);
11919 }
11920 
11921 void bnxt_tx_enable(struct bnxt *bp)
11922 {
11923 	int i;
11924 	struct bnxt_tx_ring_info *txr;
11925 
11926 	for (i = 0; i < bp->tx_nr_rings; i++) {
11927 		txr = &bp->tx_ring[i];
11928 		WRITE_ONCE(txr->dev_state, 0);
11929 	}
11930 	/* Make sure napi polls see @dev_state change */
11931 	synchronize_net();
11932 	netif_tx_wake_all_queues(bp->dev);
11933 	if (BNXT_LINK_IS_UP(bp))
11934 		netif_carrier_on(bp->dev);
11935 }
11936 
11937 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11938 {
11939 	u8 active_fec = link_info->active_fec_sig_mode &
11940 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11941 
11942 	switch (active_fec) {
11943 	default:
11944 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11945 		return "None";
11946 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11947 		return "Clause 74 BaseR";
11948 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11949 		return "Clause 91 RS(528,514)";
11950 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11951 		return "Clause 91 RS544_1XN";
11952 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11953 		return "Clause 91 RS(544,514)";
11954 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11955 		return "Clause 91 RS272_1XN";
11956 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11957 		return "Clause 91 RS(272,257)";
11958 	}
11959 }
11960 
11961 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
11962 {
11963 	u8 reason = link_info->link_down_reason;
11964 
11965 	/* Multiple bits can be set, we report 1 bit only in order of
11966 	 * priority.
11967 	 */
11968 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
11969 		return "(Remote fault)";
11970 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
11971 		return "(OTP Speed limit violation)";
11972 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
11973 		return "(Cable removed)";
11974 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
11975 		return "(Module fault)";
11976 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
11977 		return "(BMC request down)";
11978 	return "";
11979 }
11980 
11981 void bnxt_report_link(struct bnxt *bp)
11982 {
11983 	if (BNXT_LINK_IS_UP(bp)) {
11984 		const char *signal = "";
11985 		const char *flow_ctrl;
11986 		const char *duplex;
11987 		u32 speed;
11988 		u16 fec;
11989 
11990 		netif_carrier_on(bp->dev);
11991 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11992 		if (speed == SPEED_UNKNOWN) {
11993 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11994 			return;
11995 		}
11996 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11997 			duplex = "full";
11998 		else
11999 			duplex = "half";
12000 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
12001 			flow_ctrl = "ON - receive & transmit";
12002 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
12003 			flow_ctrl = "ON - transmit";
12004 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
12005 			flow_ctrl = "ON - receive";
12006 		else
12007 			flow_ctrl = "none";
12008 		if (bp->link_info.phy_qcfg_resp.option_flags &
12009 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
12010 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
12011 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
12012 			switch (sig_mode) {
12013 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12014 				signal = "(NRZ) ";
12015 				break;
12016 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12017 				signal = "(PAM4 56Gbps) ";
12018 				break;
12019 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12020 				signal = "(PAM4 112Gbps) ";
12021 				break;
12022 			default:
12023 				break;
12024 			}
12025 		}
12026 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12027 			    speed, signal, duplex, flow_ctrl);
12028 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12029 			netdev_info(bp->dev, "EEE is %s\n",
12030 				    bp->eee.eee_active ? "active" :
12031 							 "not active");
12032 		fec = bp->link_info.fec_cfg;
12033 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12034 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12035 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12036 				    bnxt_report_fec(&bp->link_info));
12037 	} else {
12038 		char *str = bnxt_link_down_reason(&bp->link_info);
12039 
12040 		netif_carrier_off(bp->dev);
12041 		netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12042 	}
12043 }
12044 
12045 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12046 {
12047 	if (!resp->supported_speeds_auto_mode &&
12048 	    !resp->supported_speeds_force_mode &&
12049 	    !resp->supported_pam4_speeds_auto_mode &&
12050 	    !resp->supported_pam4_speeds_force_mode &&
12051 	    !resp->supported_speeds2_auto_mode &&
12052 	    !resp->supported_speeds2_force_mode)
12053 		return true;
12054 	return false;
12055 }
12056 
12057 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12058 {
12059 	struct bnxt_link_info *link_info = &bp->link_info;
12060 	struct hwrm_port_phy_qcaps_output *resp;
12061 	struct hwrm_port_phy_qcaps_input *req;
12062 	int rc = 0;
12063 
12064 	if (bp->hwrm_spec_code < 0x10201)
12065 		return 0;
12066 
12067 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12068 	if (rc)
12069 		return rc;
12070 
12071 	resp = hwrm_req_hold(bp, req);
12072 	rc = hwrm_req_send(bp, req);
12073 	if (rc)
12074 		goto hwrm_phy_qcaps_exit;
12075 
12076 	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12077 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12078 		struct ethtool_keee *eee = &bp->eee;
12079 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12080 
12081 		_bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12082 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12083 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12084 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12085 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12086 	}
12087 
12088 	if (bp->hwrm_spec_code >= 0x10a01) {
12089 		if (bnxt_phy_qcaps_no_speed(resp)) {
12090 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12091 			netdev_warn(bp->dev, "Ethernet link disabled\n");
12092 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12093 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12094 			netdev_info(bp->dev, "Ethernet link enabled\n");
12095 			/* Phy re-enabled, reprobe the speeds */
12096 			link_info->support_auto_speeds = 0;
12097 			link_info->support_pam4_auto_speeds = 0;
12098 			link_info->support_auto_speeds2 = 0;
12099 		}
12100 	}
12101 	if (resp->supported_speeds_auto_mode)
12102 		link_info->support_auto_speeds =
12103 			le16_to_cpu(resp->supported_speeds_auto_mode);
12104 	if (resp->supported_pam4_speeds_auto_mode)
12105 		link_info->support_pam4_auto_speeds =
12106 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12107 	if (resp->supported_speeds2_auto_mode)
12108 		link_info->support_auto_speeds2 =
12109 			le16_to_cpu(resp->supported_speeds2_auto_mode);
12110 
12111 	bp->port_count = resp->port_cnt;
12112 
12113 hwrm_phy_qcaps_exit:
12114 	hwrm_req_drop(bp, req);
12115 	return rc;
12116 }
12117 
12118 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12119 {
12120 	struct hwrm_port_mac_qcaps_output *resp;
12121 	struct hwrm_port_mac_qcaps_input *req;
12122 	int rc;
12123 
12124 	if (bp->hwrm_spec_code < 0x10a03)
12125 		return;
12126 
12127 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12128 	if (rc)
12129 		return;
12130 
12131 	resp = hwrm_req_hold(bp, req);
12132 	rc = hwrm_req_send_silent(bp, req);
12133 	if (!rc)
12134 		bp->mac_flags = resp->flags;
12135 	hwrm_req_drop(bp, req);
12136 }
12137 
12138 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12139 {
12140 	u16 diff = advertising ^ supported;
12141 
12142 	return ((supported | diff) != supported);
12143 }
12144 
12145 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12146 {
12147 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12148 
12149 	/* Check if any advertised speeds are no longer supported. The caller
12150 	 * holds the link_lock mutex, so we can modify link_info settings.
12151 	 */
12152 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12153 		if (bnxt_support_dropped(link_info->advertising,
12154 					 link_info->support_auto_speeds2)) {
12155 			link_info->advertising = link_info->support_auto_speeds2;
12156 			return true;
12157 		}
12158 		return false;
12159 	}
12160 	if (bnxt_support_dropped(link_info->advertising,
12161 				 link_info->support_auto_speeds)) {
12162 		link_info->advertising = link_info->support_auto_speeds;
12163 		return true;
12164 	}
12165 	if (bnxt_support_dropped(link_info->advertising_pam4,
12166 				 link_info->support_pam4_auto_speeds)) {
12167 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12168 		return true;
12169 	}
12170 	return false;
12171 }
12172 
12173 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12174 {
12175 	struct bnxt_link_info *link_info = &bp->link_info;
12176 	struct hwrm_port_phy_qcfg_output *resp;
12177 	struct hwrm_port_phy_qcfg_input *req;
12178 	u8 link_state = link_info->link_state;
12179 	bool support_changed;
12180 	int rc;
12181 
12182 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12183 	if (rc)
12184 		return rc;
12185 
12186 	resp = hwrm_req_hold(bp, req);
12187 	rc = hwrm_req_send(bp, req);
12188 	if (rc) {
12189 		hwrm_req_drop(bp, req);
12190 		if (BNXT_VF(bp) && rc == -ENODEV) {
12191 			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12192 			rc = 0;
12193 		}
12194 		return rc;
12195 	}
12196 
12197 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12198 	link_info->phy_link_status = resp->link;
12199 	link_info->duplex = resp->duplex_cfg;
12200 	if (bp->hwrm_spec_code >= 0x10800)
12201 		link_info->duplex = resp->duplex_state;
12202 	link_info->pause = resp->pause;
12203 	link_info->auto_mode = resp->auto_mode;
12204 	link_info->auto_pause_setting = resp->auto_pause;
12205 	link_info->lp_pause = resp->link_partner_adv_pause;
12206 	link_info->force_pause_setting = resp->force_pause;
12207 	link_info->duplex_setting = resp->duplex_cfg;
12208 	if (link_info->phy_link_status == BNXT_LINK_LINK) {
12209 		link_info->link_speed = le16_to_cpu(resp->link_speed);
12210 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12211 			link_info->active_lanes = resp->active_lanes;
12212 	} else {
12213 		link_info->link_speed = 0;
12214 		link_info->active_lanes = 0;
12215 	}
12216 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12217 	link_info->force_pam4_link_speed =
12218 		le16_to_cpu(resp->force_pam4_link_speed);
12219 	link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12220 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12221 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12222 	link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12223 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12224 	link_info->auto_pam4_link_speeds =
12225 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
12226 	link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12227 	link_info->lp_auto_link_speeds =
12228 		le16_to_cpu(resp->link_partner_adv_speeds);
12229 	link_info->lp_auto_pam4_link_speeds =
12230 		resp->link_partner_pam4_adv_speeds;
12231 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12232 	link_info->phy_ver[0] = resp->phy_maj;
12233 	link_info->phy_ver[1] = resp->phy_min;
12234 	link_info->phy_ver[2] = resp->phy_bld;
12235 	link_info->media_type = resp->media_type;
12236 	link_info->phy_type = resp->phy_type;
12237 	link_info->transceiver = resp->xcvr_pkg_type;
12238 	link_info->phy_addr = resp->eee_config_phy_addr &
12239 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12240 	link_info->module_status = resp->module_status;
12241 	link_info->link_down_reason = resp->link_down_reason;
12242 
12243 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12244 		struct ethtool_keee *eee = &bp->eee;
12245 		u16 fw_speeds;
12246 
12247 		eee->eee_active = 0;
12248 		if (resp->eee_config_phy_addr &
12249 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12250 			eee->eee_active = 1;
12251 			fw_speeds = le16_to_cpu(
12252 				resp->link_partner_adv_eee_link_speed_mask);
12253 			_bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12254 		}
12255 
12256 		/* Pull initial EEE config */
12257 		if (!chng_link_state) {
12258 			if (resp->eee_config_phy_addr &
12259 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12260 				eee->eee_enabled = 1;
12261 
12262 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12263 			_bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12264 
12265 			if (resp->eee_config_phy_addr &
12266 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12267 				__le32 tmr;
12268 
12269 				eee->tx_lpi_enabled = 1;
12270 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12271 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
12272 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12273 			}
12274 		}
12275 	}
12276 
12277 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12278 	if (bp->hwrm_spec_code >= 0x10504) {
12279 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12280 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12281 	}
12282 	/* TODO: need to add more logic to report VF link */
12283 	if (chng_link_state) {
12284 		if (link_info->phy_link_status == BNXT_LINK_LINK)
12285 			link_info->link_state = BNXT_LINK_STATE_UP;
12286 		else
12287 			link_info->link_state = BNXT_LINK_STATE_DOWN;
12288 		if (link_state != link_info->link_state)
12289 			bnxt_report_link(bp);
12290 	} else {
12291 		/* always link down if not require to update link state */
12292 		link_info->link_state = BNXT_LINK_STATE_DOWN;
12293 	}
12294 	hwrm_req_drop(bp, req);
12295 
12296 	if (!BNXT_PHY_CFG_ABLE(bp))
12297 		return 0;
12298 
12299 	support_changed = bnxt_support_speed_dropped(link_info);
12300 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12301 		bnxt_hwrm_set_link_setting(bp, true, false);
12302 	return 0;
12303 }
12304 
12305 static void bnxt_get_port_module_status(struct bnxt *bp)
12306 {
12307 	struct bnxt_link_info *link_info = &bp->link_info;
12308 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12309 	u8 module_status;
12310 
12311 	if (bnxt_update_link(bp, true))
12312 		return;
12313 
12314 	module_status = link_info->module_status;
12315 	switch (module_status) {
12316 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12317 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12318 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12319 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12320 			    bp->pf.port_id);
12321 		if (bp->hwrm_spec_code >= 0x10201) {
12322 			netdev_warn(bp->dev, "Module part number %s\n",
12323 				    resp->phy_vendor_partnumber);
12324 		}
12325 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12326 			netdev_warn(bp->dev, "TX is disabled\n");
12327 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12328 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12329 	}
12330 }
12331 
12332 static void
12333 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12334 {
12335 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12336 		if (bp->hwrm_spec_code >= 0x10201)
12337 			req->auto_pause =
12338 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12339 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12340 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12341 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12342 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12343 		req->enables |=
12344 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12345 	} else {
12346 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12347 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12348 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12349 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12350 		req->enables |=
12351 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12352 		if (bp->hwrm_spec_code >= 0x10201) {
12353 			req->auto_pause = req->force_pause;
12354 			req->enables |= cpu_to_le32(
12355 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12356 		}
12357 	}
12358 }
12359 
12360 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12361 {
12362 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12363 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12364 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12365 			req->enables |=
12366 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12367 			req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12368 		} else if (bp->link_info.advertising) {
12369 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12370 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12371 		}
12372 		if (bp->link_info.advertising_pam4) {
12373 			req->enables |=
12374 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12375 			req->auto_link_pam4_speed_mask =
12376 				cpu_to_le16(bp->link_info.advertising_pam4);
12377 		}
12378 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12379 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12380 	} else {
12381 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12382 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12383 			req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12384 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12385 			netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12386 				   (u32)bp->link_info.req_link_speed);
12387 		} else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12388 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12389 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12390 		} else {
12391 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12392 		}
12393 	}
12394 
12395 	/* tell chimp that the setting takes effect immediately */
12396 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12397 }
12398 
12399 int bnxt_hwrm_set_pause(struct bnxt *bp)
12400 {
12401 	struct hwrm_port_phy_cfg_input *req;
12402 	int rc;
12403 
12404 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12405 	if (rc)
12406 		return rc;
12407 
12408 	bnxt_hwrm_set_pause_common(bp, req);
12409 
12410 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12411 	    bp->link_info.force_link_chng)
12412 		bnxt_hwrm_set_link_common(bp, req);
12413 
12414 	rc = hwrm_req_send(bp, req);
12415 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12416 		/* since changing of pause setting doesn't trigger any link
12417 		 * change event, the driver needs to update the current pause
12418 		 * result upon successfully return of the phy_cfg command
12419 		 */
12420 		bp->link_info.pause =
12421 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12422 		bp->link_info.auto_pause_setting = 0;
12423 		if (!bp->link_info.force_link_chng)
12424 			bnxt_report_link(bp);
12425 	}
12426 	bp->link_info.force_link_chng = false;
12427 	return rc;
12428 }
12429 
12430 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12431 			      struct hwrm_port_phy_cfg_input *req)
12432 {
12433 	struct ethtool_keee *eee = &bp->eee;
12434 
12435 	if (eee->eee_enabled) {
12436 		u16 eee_speeds;
12437 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12438 
12439 		if (eee->tx_lpi_enabled)
12440 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12441 		else
12442 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12443 
12444 		req->flags |= cpu_to_le32(flags);
12445 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12446 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12447 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12448 	} else {
12449 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12450 	}
12451 }
12452 
12453 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12454 {
12455 	struct hwrm_port_phy_cfg_input *req;
12456 	int rc;
12457 
12458 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12459 	if (rc)
12460 		return rc;
12461 
12462 	if (set_pause)
12463 		bnxt_hwrm_set_pause_common(bp, req);
12464 
12465 	bnxt_hwrm_set_link_common(bp, req);
12466 
12467 	if (set_eee)
12468 		bnxt_hwrm_set_eee(bp, req);
12469 	return hwrm_req_send(bp, req);
12470 }
12471 
12472 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12473 {
12474 	struct hwrm_port_phy_cfg_input *req;
12475 	int rc;
12476 
12477 	if (!BNXT_SINGLE_PF(bp))
12478 		return 0;
12479 
12480 	if (pci_num_vf(bp->pdev) &&
12481 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12482 		return 0;
12483 
12484 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12485 	if (rc)
12486 		return rc;
12487 
12488 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12489 	rc = hwrm_req_send(bp, req);
12490 	if (!rc) {
12491 		mutex_lock(&bp->link_lock);
12492 		/* Device is not obliged link down in certain scenarios, even
12493 		 * when forced. Setting the state unknown is consistent with
12494 		 * driver startup and will force link state to be reported
12495 		 * during subsequent open based on PORT_PHY_QCFG.
12496 		 */
12497 		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12498 		mutex_unlock(&bp->link_lock);
12499 	}
12500 	return rc;
12501 }
12502 
12503 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12504 {
12505 #ifdef CONFIG_TEE_BNXT_FW
12506 	int rc = tee_bnxt_fw_load();
12507 
12508 	if (rc)
12509 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12510 
12511 	return rc;
12512 #else
12513 	netdev_err(bp->dev, "OP-TEE not supported\n");
12514 	return -ENODEV;
12515 #endif
12516 }
12517 
12518 static int bnxt_try_recover_fw(struct bnxt *bp)
12519 {
12520 	if (bp->fw_health && bp->fw_health->status_reliable) {
12521 		int retry = 0, rc;
12522 		u32 sts;
12523 
12524 		do {
12525 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12526 			rc = bnxt_hwrm_poll(bp);
12527 			if (!BNXT_FW_IS_BOOTING(sts) &&
12528 			    !BNXT_FW_IS_RECOVERING(sts))
12529 				break;
12530 			retry++;
12531 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12532 
12533 		if (!BNXT_FW_IS_HEALTHY(sts)) {
12534 			netdev_err(bp->dev,
12535 				   "Firmware not responding, status: 0x%x\n",
12536 				   sts);
12537 			rc = -ENODEV;
12538 		}
12539 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12540 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12541 			return bnxt_fw_reset_via_optee(bp);
12542 		}
12543 		return rc;
12544 	}
12545 
12546 	return -ENODEV;
12547 }
12548 
12549 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12550 {
12551 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12552 
12553 	if (!BNXT_NEW_RM(bp))
12554 		return; /* no resource reservations required */
12555 
12556 	hw_resc->resv_cp_rings = 0;
12557 	hw_resc->resv_stat_ctxs = 0;
12558 	hw_resc->resv_irqs = 0;
12559 	hw_resc->resv_tx_rings = 0;
12560 	hw_resc->resv_rx_rings = 0;
12561 	hw_resc->resv_hw_ring_grps = 0;
12562 	hw_resc->resv_vnics = 0;
12563 	hw_resc->resv_rsscos_ctxs = 0;
12564 	if (!fw_reset) {
12565 		bp->tx_nr_rings = 0;
12566 		bp->rx_nr_rings = 0;
12567 	}
12568 }
12569 
12570 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12571 {
12572 	int rc;
12573 
12574 	if (!BNXT_NEW_RM(bp))
12575 		return 0; /* no resource reservations required */
12576 
12577 	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12578 	if (rc)
12579 		netdev_err(bp->dev, "resc_qcaps failed\n");
12580 
12581 	bnxt_clear_reservations(bp, fw_reset);
12582 
12583 	return rc;
12584 }
12585 
12586 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12587 {
12588 	struct hwrm_func_drv_if_change_output *resp;
12589 	struct hwrm_func_drv_if_change_input *req;
12590 	bool resc_reinit = false;
12591 	bool caps_change = false;
12592 	int rc, retry = 0;
12593 	bool fw_reset;
12594 	u32 flags = 0;
12595 
12596 	fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12597 	bp->fw_reset_state = 0;
12598 
12599 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12600 		return 0;
12601 
12602 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12603 	if (rc)
12604 		return rc;
12605 
12606 	if (up)
12607 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12608 	resp = hwrm_req_hold(bp, req);
12609 
12610 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12611 	while (retry < BNXT_FW_IF_RETRY) {
12612 		rc = hwrm_req_send(bp, req);
12613 		if (rc != -EAGAIN)
12614 			break;
12615 
12616 		msleep(50);
12617 		retry++;
12618 	}
12619 
12620 	if (rc == -EAGAIN) {
12621 		hwrm_req_drop(bp, req);
12622 		return rc;
12623 	} else if (!rc) {
12624 		flags = le32_to_cpu(resp->flags);
12625 	} else if (up) {
12626 		rc = bnxt_try_recover_fw(bp);
12627 		fw_reset = true;
12628 	}
12629 	hwrm_req_drop(bp, req);
12630 	if (rc)
12631 		return rc;
12632 
12633 	if (!up) {
12634 		bnxt_inv_fw_health_reg(bp);
12635 		return 0;
12636 	}
12637 
12638 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12639 		resc_reinit = true;
12640 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12641 	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12642 		fw_reset = true;
12643 	else
12644 		bnxt_remap_fw_health_regs(bp);
12645 
12646 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12647 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12648 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12649 		return -ENODEV;
12650 	}
12651 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12652 		caps_change = true;
12653 
12654 	if (resc_reinit || fw_reset || caps_change) {
12655 		if (fw_reset || caps_change) {
12656 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12657 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12658 				bnxt_ulp_irq_stop(bp);
12659 			bnxt_free_ctx_mem(bp, false);
12660 			bnxt_dcb_free(bp);
12661 			rc = bnxt_fw_init_one(bp);
12662 			if (rc) {
12663 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12664 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12665 				return rc;
12666 			}
12667 			/* IRQ will be initialized later in bnxt_request_irq()*/
12668 			bnxt_clear_int_mode(bp);
12669 		}
12670 		rc = bnxt_cancel_reservations(bp, fw_reset);
12671 	}
12672 	return rc;
12673 }
12674 
12675 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12676 {
12677 	struct hwrm_port_led_qcaps_output *resp;
12678 	struct hwrm_port_led_qcaps_input *req;
12679 	struct bnxt_pf_info *pf = &bp->pf;
12680 	int rc;
12681 
12682 	bp->num_leds = 0;
12683 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12684 		return 0;
12685 
12686 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12687 	if (rc)
12688 		return rc;
12689 
12690 	req->port_id = cpu_to_le16(pf->port_id);
12691 	resp = hwrm_req_hold(bp, req);
12692 	rc = hwrm_req_send(bp, req);
12693 	if (rc) {
12694 		hwrm_req_drop(bp, req);
12695 		return rc;
12696 	}
12697 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12698 		int i;
12699 
12700 		bp->num_leds = resp->num_leds;
12701 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12702 						 bp->num_leds);
12703 		for (i = 0; i < bp->num_leds; i++) {
12704 			struct bnxt_led_info *led = &bp->leds[i];
12705 			__le16 caps = led->led_state_caps;
12706 
12707 			if (!led->led_group_id ||
12708 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
12709 				bp->num_leds = 0;
12710 				break;
12711 			}
12712 		}
12713 	}
12714 	hwrm_req_drop(bp, req);
12715 	return 0;
12716 }
12717 
12718 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12719 {
12720 	struct hwrm_wol_filter_alloc_output *resp;
12721 	struct hwrm_wol_filter_alloc_input *req;
12722 	int rc;
12723 
12724 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12725 	if (rc)
12726 		return rc;
12727 
12728 	req->port_id = cpu_to_le16(bp->pf.port_id);
12729 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12730 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12731 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12732 
12733 	resp = hwrm_req_hold(bp, req);
12734 	rc = hwrm_req_send(bp, req);
12735 	if (!rc)
12736 		bp->wol_filter_id = resp->wol_filter_id;
12737 	hwrm_req_drop(bp, req);
12738 	return rc;
12739 }
12740 
12741 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12742 {
12743 	struct hwrm_wol_filter_free_input *req;
12744 	int rc;
12745 
12746 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12747 	if (rc)
12748 		return rc;
12749 
12750 	req->port_id = cpu_to_le16(bp->pf.port_id);
12751 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12752 	req->wol_filter_id = bp->wol_filter_id;
12753 
12754 	return hwrm_req_send(bp, req);
12755 }
12756 
12757 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12758 {
12759 	struct hwrm_wol_filter_qcfg_output *resp;
12760 	struct hwrm_wol_filter_qcfg_input *req;
12761 	u16 next_handle = 0;
12762 	int rc;
12763 
12764 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12765 	if (rc)
12766 		return rc;
12767 
12768 	req->port_id = cpu_to_le16(bp->pf.port_id);
12769 	req->handle = cpu_to_le16(handle);
12770 	resp = hwrm_req_hold(bp, req);
12771 	rc = hwrm_req_send(bp, req);
12772 	if (!rc) {
12773 		next_handle = le16_to_cpu(resp->next_handle);
12774 		if (next_handle != 0) {
12775 			if (resp->wol_type ==
12776 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12777 				bp->wol = 1;
12778 				bp->wol_filter_id = resp->wol_filter_id;
12779 			}
12780 		}
12781 	}
12782 	hwrm_req_drop(bp, req);
12783 	return next_handle;
12784 }
12785 
12786 static void bnxt_get_wol_settings(struct bnxt *bp)
12787 {
12788 	u16 handle = 0;
12789 
12790 	bp->wol = 0;
12791 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12792 		return;
12793 
12794 	do {
12795 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12796 	} while (handle && handle != 0xffff);
12797 }
12798 
12799 static bool bnxt_eee_config_ok(struct bnxt *bp)
12800 {
12801 	struct ethtool_keee *eee = &bp->eee;
12802 	struct bnxt_link_info *link_info = &bp->link_info;
12803 
12804 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12805 		return true;
12806 
12807 	if (eee->eee_enabled) {
12808 		__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12809 		__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12810 
12811 		_bnxt_fw_to_linkmode(advertising, link_info->advertising);
12812 
12813 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12814 			eee->eee_enabled = 0;
12815 			return false;
12816 		}
12817 		if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12818 			linkmode_and(eee->advertised, advertising,
12819 				     eee->supported);
12820 			return false;
12821 		}
12822 	}
12823 	return true;
12824 }
12825 
12826 static int bnxt_update_phy_setting(struct bnxt *bp)
12827 {
12828 	int rc;
12829 	bool update_link = false;
12830 	bool update_pause = false;
12831 	bool update_eee = false;
12832 	struct bnxt_link_info *link_info = &bp->link_info;
12833 
12834 	rc = bnxt_update_link(bp, true);
12835 	if (rc) {
12836 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12837 			   rc);
12838 		return rc;
12839 	}
12840 	if (!BNXT_SINGLE_PF(bp))
12841 		return 0;
12842 
12843 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12844 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12845 	    link_info->req_flow_ctrl)
12846 		update_pause = true;
12847 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12848 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
12849 		update_pause = true;
12850 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12851 		if (BNXT_AUTO_MODE(link_info->auto_mode))
12852 			update_link = true;
12853 		if (bnxt_force_speed_updated(link_info))
12854 			update_link = true;
12855 		if (link_info->req_duplex != link_info->duplex_setting)
12856 			update_link = true;
12857 	} else {
12858 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12859 			update_link = true;
12860 		if (bnxt_auto_speed_updated(link_info))
12861 			update_link = true;
12862 	}
12863 
12864 	/* The last close may have shutdown the link, so need to call
12865 	 * PHY_CFG to bring it back up.
12866 	 */
12867 	if (!BNXT_LINK_IS_UP(bp))
12868 		update_link = true;
12869 
12870 	if (!bnxt_eee_config_ok(bp))
12871 		update_eee = true;
12872 
12873 	if (update_link)
12874 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12875 	else if (update_pause)
12876 		rc = bnxt_hwrm_set_pause(bp);
12877 	if (rc) {
12878 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12879 			   rc);
12880 		return rc;
12881 	}
12882 
12883 	return rc;
12884 }
12885 
12886 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12887 
12888 static int bnxt_reinit_after_abort(struct bnxt *bp)
12889 {
12890 	int rc;
12891 
12892 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12893 		return -EBUSY;
12894 
12895 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
12896 		return -ENODEV;
12897 
12898 	rc = bnxt_fw_init_one(bp);
12899 	if (!rc) {
12900 		bnxt_clear_int_mode(bp);
12901 		rc = bnxt_init_int_mode(bp);
12902 		if (!rc) {
12903 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12904 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12905 		}
12906 	}
12907 	return rc;
12908 }
12909 
12910 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12911 {
12912 	struct bnxt_ntuple_filter *ntp_fltr;
12913 	struct bnxt_l2_filter *l2_fltr;
12914 
12915 	if (list_empty(&fltr->list))
12916 		return;
12917 
12918 	if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12919 		ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12920 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12921 		atomic_inc(&l2_fltr->refcnt);
12922 		ntp_fltr->l2_fltr = l2_fltr;
12923 		if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12924 			bnxt_del_ntp_filter(bp, ntp_fltr);
12925 			netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12926 				   fltr->sw_id);
12927 		}
12928 	} else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12929 		l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12930 		if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12931 			bnxt_del_l2_filter(bp, l2_fltr);
12932 			netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12933 				   fltr->sw_id);
12934 		}
12935 	}
12936 }
12937 
12938 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12939 {
12940 	struct bnxt_filter_base *usr_fltr, *tmp;
12941 
12942 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12943 		bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12944 }
12945 
12946 static int bnxt_set_xps_mapping(struct bnxt *bp)
12947 {
12948 	int numa_node = dev_to_node(&bp->pdev->dev);
12949 	unsigned int q_idx, map_idx, cpu, i;
12950 	const struct cpumask *cpu_mask_ptr;
12951 	int nr_cpus = num_online_cpus();
12952 	cpumask_t *q_map;
12953 	int rc = 0;
12954 
12955 	q_map = kzalloc_objs(*q_map, bp->tx_nr_rings_per_tc);
12956 	if (!q_map)
12957 		return -ENOMEM;
12958 
12959 	/* Create CPU mask for all TX queues across MQPRIO traffic classes.
12960 	 * Each TC has the same number of TX queues. The nth TX queue for each
12961 	 * TC will have the same CPU mask.
12962 	 */
12963 	for (i = 0; i < nr_cpus; i++) {
12964 		map_idx = i % bp->tx_nr_rings_per_tc;
12965 		cpu = cpumask_local_spread(i, numa_node);
12966 		cpu_mask_ptr = get_cpu_mask(cpu);
12967 		cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12968 	}
12969 
12970 	/* Register CPU mask for each TX queue except the ones marked for XDP */
12971 	for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12972 		map_idx = q_idx % bp->tx_nr_rings_per_tc;
12973 		rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12974 		if (rc) {
12975 			netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12976 				    q_idx);
12977 			break;
12978 		}
12979 	}
12980 
12981 	kfree(q_map);
12982 
12983 	return rc;
12984 }
12985 
12986 static int bnxt_tx_nr_rings(struct bnxt *bp)
12987 {
12988 	return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12989 			    bp->tx_nr_rings_per_tc;
12990 }
12991 
12992 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12993 {
12994 	return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12995 }
12996 
12997 static void bnxt_set_xdp_tx_rings(struct bnxt *bp)
12998 {
12999 	bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13000 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13001 }
13002 
13003 static void bnxt_adj_tx_rings(struct bnxt *bp)
13004 {
13005 	/* Make adjustments if reserved TX rings are less than requested */
13006 	bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13007 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13008 	if (bp->tx_nr_rings_xdp)
13009 		bnxt_set_xdp_tx_rings(bp);
13010 }
13011 
13012 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13013 {
13014 	int rc = 0;
13015 
13016 	netif_carrier_off(bp->dev);
13017 	if (irq_re_init) {
13018 		/* Reserve rings now if none were reserved at driver probe. */
13019 		rc = bnxt_init_dflt_ring_mode(bp);
13020 		if (rc) {
13021 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
13022 			return rc;
13023 		}
13024 	}
13025 	rc = bnxt_reserve_rings(bp, irq_re_init);
13026 	if (rc)
13027 		return rc;
13028 
13029 	bnxt_adj_tx_rings(bp);
13030 	rc = bnxt_alloc_mem(bp, irq_re_init);
13031 	if (rc) {
13032 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13033 		goto open_err_free_mem;
13034 	}
13035 
13036 	if (irq_re_init) {
13037 		bnxt_init_napi(bp);
13038 		rc = bnxt_request_irq(bp);
13039 		if (rc) {
13040 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13041 			goto open_err_irq;
13042 		}
13043 	}
13044 
13045 	rc = bnxt_init_nic(bp, irq_re_init);
13046 	if (rc) {
13047 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13048 		goto open_err_irq;
13049 	}
13050 
13051 	bnxt_enable_napi(bp);
13052 	bnxt_debug_dev_init(bp);
13053 
13054 	if (link_re_init) {
13055 		mutex_lock(&bp->link_lock);
13056 		rc = bnxt_update_phy_setting(bp);
13057 		mutex_unlock(&bp->link_lock);
13058 		if (rc) {
13059 			netdev_warn(bp->dev, "failed to update phy settings\n");
13060 			if (BNXT_SINGLE_PF(bp)) {
13061 				bp->link_info.phy_retry = true;
13062 				bp->link_info.phy_retry_expires =
13063 					jiffies + 5 * HZ;
13064 			}
13065 		}
13066 	}
13067 
13068 	if (irq_re_init) {
13069 		udp_tunnel_nic_reset_ntf(bp->dev);
13070 		rc = bnxt_set_xps_mapping(bp);
13071 		if (rc)
13072 			netdev_warn(bp->dev, "failed to set xps mapping\n");
13073 	}
13074 
13075 	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13076 		if (!static_key_enabled(&bnxt_xdp_locking_key))
13077 			static_branch_enable(&bnxt_xdp_locking_key);
13078 	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13079 		static_branch_disable(&bnxt_xdp_locking_key);
13080 	}
13081 	set_bit(BNXT_STATE_OPEN, &bp->state);
13082 	bnxt_enable_int(bp);
13083 	/* Enable TX queues */
13084 	bnxt_tx_enable(bp);
13085 	mod_timer(&bp->timer, jiffies + bp->current_interval);
13086 	/* Poll link status and check for SFP+ module status */
13087 	mutex_lock(&bp->link_lock);
13088 	bnxt_get_port_module_status(bp);
13089 	mutex_unlock(&bp->link_lock);
13090 
13091 	/* VF-reps may need to be re-opened after the PF is re-opened */
13092 	if (BNXT_PF(bp))
13093 		bnxt_vf_reps_open(bp);
13094 	bnxt_ptp_init_rtc(bp, true);
13095 	bnxt_ptp_cfg_tstamp_filters(bp);
13096 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13097 		bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13098 	bnxt_cfg_usr_fltrs(bp);
13099 	return 0;
13100 
13101 open_err_irq:
13102 	bnxt_del_napi(bp);
13103 
13104 open_err_free_mem:
13105 	bnxt_free_skbs(bp);
13106 	bnxt_free_irq(bp);
13107 	bnxt_free_mem(bp, true);
13108 	return rc;
13109 }
13110 
13111 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13112 {
13113 	int rc = 0;
13114 
13115 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13116 		rc = -EIO;
13117 	if (!rc)
13118 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13119 	if (rc) {
13120 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13121 		netif_close(bp->dev);
13122 	}
13123 	return rc;
13124 }
13125 
13126 /* netdev instance lock held, open the NIC half way by allocating all
13127  * resources, but NAPI, IRQ, and TX are not enabled.  This is mainly used
13128  * for offline self tests.
13129  */
13130 int bnxt_half_open_nic(struct bnxt *bp)
13131 {
13132 	int rc = 0;
13133 
13134 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13135 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13136 		rc = -ENODEV;
13137 		goto half_open_err;
13138 	}
13139 
13140 	rc = bnxt_alloc_mem(bp, true);
13141 	if (rc) {
13142 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13143 		goto half_open_err;
13144 	}
13145 	bnxt_init_napi(bp);
13146 	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13147 	rc = bnxt_init_nic(bp, true);
13148 	if (rc) {
13149 		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13150 		bnxt_del_napi(bp);
13151 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13152 		goto half_open_err;
13153 	}
13154 	return 0;
13155 
13156 half_open_err:
13157 	bnxt_free_skbs(bp);
13158 	bnxt_free_mem(bp, true);
13159 	netif_close(bp->dev);
13160 	return rc;
13161 }
13162 
13163 /* netdev instance lock held, this call can only be made after a previous
13164  * successful call to bnxt_half_open_nic().
13165  */
13166 void bnxt_half_close_nic(struct bnxt *bp)
13167 {
13168 	bnxt_hwrm_resource_free(bp, false, true);
13169 	bnxt_del_napi(bp);
13170 	bnxt_free_skbs(bp);
13171 	bnxt_free_mem(bp, true);
13172 	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13173 }
13174 
13175 void bnxt_reenable_sriov(struct bnxt *bp)
13176 {
13177 	if (BNXT_PF(bp)) {
13178 		struct bnxt_pf_info *pf = &bp->pf;
13179 		int n = pf->active_vfs;
13180 
13181 		if (n)
13182 			bnxt_cfg_hw_sriov(bp, &n, true);
13183 	}
13184 }
13185 
13186 static int bnxt_open(struct net_device *dev)
13187 {
13188 	struct bnxt *bp = netdev_priv(dev);
13189 	int rc;
13190 
13191 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13192 		rc = bnxt_reinit_after_abort(bp);
13193 		if (rc) {
13194 			if (rc == -EBUSY)
13195 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13196 			else
13197 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13198 			return -ENODEV;
13199 		}
13200 	}
13201 
13202 	rc = bnxt_hwrm_if_change(bp, true);
13203 	if (rc)
13204 		return rc;
13205 
13206 	rc = __bnxt_open_nic(bp, true, true);
13207 	if (rc) {
13208 		bnxt_hwrm_if_change(bp, false);
13209 	} else {
13210 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13211 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13212 				bnxt_queue_sp_work(bp,
13213 						   BNXT_RESTART_ULP_SP_EVENT);
13214 		}
13215 	}
13216 
13217 	return rc;
13218 }
13219 
13220 static bool bnxt_drv_busy(struct bnxt *bp)
13221 {
13222 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13223 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
13224 }
13225 
13226 static void bnxt_get_ring_stats(struct bnxt *bp,
13227 				struct rtnl_link_stats64 *stats);
13228 
13229 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13230 			     bool link_re_init)
13231 {
13232 	/* Close the VF-reps before closing PF */
13233 	if (BNXT_PF(bp))
13234 		bnxt_vf_reps_close(bp);
13235 
13236 	/* Change device state to avoid TX queue wake up's */
13237 	bnxt_tx_disable(bp);
13238 
13239 	clear_bit(BNXT_STATE_OPEN, &bp->state);
13240 	smp_mb__after_atomic();
13241 	while (bnxt_drv_busy(bp))
13242 		msleep(20);
13243 
13244 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13245 		bnxt_clear_rss_ctxs(bp);
13246 	/* Flush rings and disable interrupts */
13247 	bnxt_shutdown_nic(bp, irq_re_init);
13248 
13249 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13250 
13251 	bnxt_debug_dev_exit(bp);
13252 	bnxt_disable_napi(bp);
13253 	timer_delete_sync(&bp->timer);
13254 	bnxt_free_skbs(bp);
13255 
13256 	/* Save ring stats before shutdown */
13257 	if (bp->bnapi && irq_re_init) {
13258 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13259 		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13260 	}
13261 	if (irq_re_init) {
13262 		bnxt_free_irq(bp);
13263 		bnxt_del_napi(bp);
13264 	}
13265 	bnxt_free_mem(bp, irq_re_init);
13266 }
13267 
13268 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13269 {
13270 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13271 		/* If we get here, it means firmware reset is in progress
13272 		 * while we are trying to close.  We can safely proceed with
13273 		 * the close because we are holding netdev instance lock.
13274 		 * Some firmware messages may fail as we proceed to close.
13275 		 * We set the ABORT_ERR flag here so that the FW reset thread
13276 		 * will later abort when it gets the netdev instance lock
13277 		 * and sees the flag.
13278 		 */
13279 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13280 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13281 	}
13282 
13283 #ifdef CONFIG_BNXT_SRIOV
13284 	if (bp->sriov_cfg) {
13285 		int rc;
13286 
13287 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13288 						      !bp->sriov_cfg,
13289 						      BNXT_SRIOV_CFG_WAIT_TMO);
13290 		if (!rc)
13291 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13292 		else if (rc < 0)
13293 			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13294 	}
13295 #endif
13296 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
13297 }
13298 
13299 static int bnxt_close(struct net_device *dev)
13300 {
13301 	struct bnxt *bp = netdev_priv(dev);
13302 
13303 	bnxt_close_nic(bp, true, true);
13304 	bnxt_hwrm_shutdown_link(bp);
13305 	bnxt_hwrm_if_change(bp, false);
13306 	return 0;
13307 }
13308 
13309 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13310 				   u16 *val)
13311 {
13312 	struct hwrm_port_phy_mdio_read_output *resp;
13313 	struct hwrm_port_phy_mdio_read_input *req;
13314 	int rc;
13315 
13316 	if (bp->hwrm_spec_code < 0x10a00)
13317 		return -EOPNOTSUPP;
13318 
13319 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13320 	if (rc)
13321 		return rc;
13322 
13323 	req->port_id = cpu_to_le16(bp->pf.port_id);
13324 	req->phy_addr = phy_addr;
13325 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13326 	if (mdio_phy_id_is_c45(phy_addr)) {
13327 		req->cl45_mdio = 1;
13328 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13329 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13330 		req->reg_addr = cpu_to_le16(reg);
13331 	}
13332 
13333 	resp = hwrm_req_hold(bp, req);
13334 	rc = hwrm_req_send(bp, req);
13335 	if (!rc)
13336 		*val = le16_to_cpu(resp->reg_data);
13337 	hwrm_req_drop(bp, req);
13338 	return rc;
13339 }
13340 
13341 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13342 				    u16 val)
13343 {
13344 	struct hwrm_port_phy_mdio_write_input *req;
13345 	int rc;
13346 
13347 	if (bp->hwrm_spec_code < 0x10a00)
13348 		return -EOPNOTSUPP;
13349 
13350 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13351 	if (rc)
13352 		return rc;
13353 
13354 	req->port_id = cpu_to_le16(bp->pf.port_id);
13355 	req->phy_addr = phy_addr;
13356 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13357 	if (mdio_phy_id_is_c45(phy_addr)) {
13358 		req->cl45_mdio = 1;
13359 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13360 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13361 		req->reg_addr = cpu_to_le16(reg);
13362 	}
13363 	req->reg_data = cpu_to_le16(val);
13364 
13365 	return hwrm_req_send(bp, req);
13366 }
13367 
13368 /* netdev instance lock held */
13369 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13370 {
13371 	struct mii_ioctl_data *mdio = if_mii(ifr);
13372 	struct bnxt *bp = netdev_priv(dev);
13373 	int rc;
13374 
13375 	switch (cmd) {
13376 	case SIOCGMIIPHY:
13377 		mdio->phy_id = bp->link_info.phy_addr;
13378 
13379 		fallthrough;
13380 	case SIOCGMIIREG: {
13381 		u16 mii_regval = 0;
13382 
13383 		if (!netif_running(dev))
13384 			return -EAGAIN;
13385 
13386 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13387 					     &mii_regval);
13388 		mdio->val_out = mii_regval;
13389 		return rc;
13390 	}
13391 
13392 	case SIOCSMIIREG:
13393 		if (!netif_running(dev))
13394 			return -EAGAIN;
13395 
13396 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13397 						mdio->val_in);
13398 
13399 	default:
13400 		/* do nothing */
13401 		break;
13402 	}
13403 	return -EOPNOTSUPP;
13404 }
13405 
13406 static void bnxt_get_ring_stats(struct bnxt *bp,
13407 				struct rtnl_link_stats64 *stats)
13408 {
13409 	int i;
13410 
13411 	for (i = 0; i < bp->cp_nr_rings; i++) {
13412 		struct bnxt_napi *bnapi = bp->bnapi[i];
13413 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13414 		u64 *sw = cpr->stats.sw_stats;
13415 
13416 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13417 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13418 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13419 
13420 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13421 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13422 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13423 
13424 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13425 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13426 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13427 
13428 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13429 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13430 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13431 
13432 		stats->rx_missed_errors +=
13433 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13434 
13435 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13436 
13437 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13438 
13439 		stats->rx_dropped +=
13440 			cpr->sw_stats->rx.rx_netpoll_discards +
13441 			cpr->sw_stats->rx.rx_oom_discards;
13442 	}
13443 }
13444 
13445 static void bnxt_add_prev_stats(struct bnxt *bp,
13446 				struct rtnl_link_stats64 *stats)
13447 {
13448 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13449 
13450 	stats->rx_packets += prev_stats->rx_packets;
13451 	stats->tx_packets += prev_stats->tx_packets;
13452 	stats->rx_bytes += prev_stats->rx_bytes;
13453 	stats->tx_bytes += prev_stats->tx_bytes;
13454 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
13455 	stats->multicast += prev_stats->multicast;
13456 	stats->rx_dropped += prev_stats->rx_dropped;
13457 	stats->tx_dropped += prev_stats->tx_dropped;
13458 }
13459 
13460 static void
13461 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13462 {
13463 	struct bnxt *bp = netdev_priv(dev);
13464 
13465 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
13466 	/* Make sure bnxt_close_nic() sees that we are reading stats before
13467 	 * we check the BNXT_STATE_OPEN flag.
13468 	 */
13469 	smp_mb__after_atomic();
13470 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13471 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13472 		*stats = bp->net_stats_prev;
13473 		return;
13474 	}
13475 
13476 	bnxt_get_ring_stats(bp, stats);
13477 	bnxt_add_prev_stats(bp, stats);
13478 
13479 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
13480 		u64 *rx = bp->port_stats.sw_stats;
13481 		u64 *tx = bp->port_stats.sw_stats +
13482 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13483 
13484 		stats->rx_crc_errors =
13485 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13486 		stats->rx_frame_errors =
13487 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13488 		stats->rx_length_errors =
13489 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13490 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13491 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13492 		stats->rx_errors =
13493 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13494 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13495 		stats->collisions =
13496 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13497 		stats->tx_fifo_errors =
13498 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13499 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13500 	}
13501 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13502 }
13503 
13504 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13505 					struct bnxt_total_ring_err_stats *stats,
13506 					struct bnxt_cp_ring_info *cpr)
13507 {
13508 	struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13509 	u64 *hw_stats = cpr->stats.sw_stats;
13510 
13511 	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13512 	stats->rx_total_resets += sw_stats->rx.rx_resets;
13513 	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13514 	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13515 	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13516 	stats->rx_total_ring_discards +=
13517 		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13518 	stats->rx_total_hw_gro_packets += sw_stats->rx.rx_hw_gro_packets;
13519 	stats->rx_total_hw_gro_wire_packets += sw_stats->rx.rx_hw_gro_wire_packets;
13520 	stats->tx_total_resets += sw_stats->tx.tx_resets;
13521 	stats->tx_total_ring_discards +=
13522 		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13523 	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13524 }
13525 
13526 void bnxt_get_ring_err_stats(struct bnxt *bp,
13527 			     struct bnxt_total_ring_err_stats *stats)
13528 {
13529 	int i;
13530 
13531 	for (i = 0; i < bp->cp_nr_rings; i++)
13532 		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13533 }
13534 
13535 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13536 {
13537 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13538 	struct net_device *dev = bp->dev;
13539 	struct netdev_hw_addr *ha;
13540 	u8 *haddr;
13541 	int mc_count = 0;
13542 	bool update = false;
13543 	int off = 0;
13544 
13545 	netdev_for_each_mc_addr(ha, dev) {
13546 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
13547 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13548 			vnic->mc_list_count = 0;
13549 			return false;
13550 		}
13551 		haddr = ha->addr;
13552 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13553 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13554 			update = true;
13555 		}
13556 		off += ETH_ALEN;
13557 		mc_count++;
13558 	}
13559 	if (mc_count)
13560 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13561 
13562 	if (mc_count != vnic->mc_list_count) {
13563 		vnic->mc_list_count = mc_count;
13564 		update = true;
13565 	}
13566 	return update;
13567 }
13568 
13569 static bool bnxt_uc_list_updated(struct bnxt *bp)
13570 {
13571 	struct net_device *dev = bp->dev;
13572 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13573 	struct netdev_hw_addr *ha;
13574 	int off = 0;
13575 
13576 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13577 		return true;
13578 
13579 	netdev_for_each_uc_addr(ha, dev) {
13580 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13581 			return true;
13582 
13583 		off += ETH_ALEN;
13584 	}
13585 	return false;
13586 }
13587 
13588 static void bnxt_set_rx_mode(struct net_device *dev)
13589 {
13590 	struct bnxt *bp = netdev_priv(dev);
13591 	struct bnxt_vnic_info *vnic;
13592 	bool mc_update = false;
13593 	bool uc_update;
13594 	u32 mask;
13595 
13596 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13597 		return;
13598 
13599 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13600 	mask = vnic->rx_mask;
13601 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13602 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13603 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13604 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13605 
13606 	if (dev->flags & IFF_PROMISC)
13607 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13608 
13609 	uc_update = bnxt_uc_list_updated(bp);
13610 
13611 	if (dev->flags & IFF_BROADCAST)
13612 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13613 	if (dev->flags & IFF_ALLMULTI) {
13614 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13615 		vnic->mc_list_count = 0;
13616 	} else if (dev->flags & IFF_MULTICAST) {
13617 		mc_update = bnxt_mc_list_updated(bp, &mask);
13618 	}
13619 
13620 	if (mask != vnic->rx_mask || uc_update || mc_update) {
13621 		vnic->rx_mask = mask;
13622 
13623 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13624 	}
13625 }
13626 
13627 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13628 {
13629 	struct net_device *dev = bp->dev;
13630 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13631 	struct netdev_hw_addr *ha;
13632 	int i, off = 0, rc;
13633 	bool uc_update;
13634 
13635 	netif_addr_lock_bh(dev);
13636 	uc_update = bnxt_uc_list_updated(bp);
13637 	netif_addr_unlock_bh(dev);
13638 
13639 	if (!uc_update)
13640 		goto skip_uc;
13641 
13642 	for (i = 1; i < vnic->uc_filter_count; i++) {
13643 		struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13644 
13645 		bnxt_hwrm_l2_filter_free(bp, fltr);
13646 		bnxt_del_l2_filter(bp, fltr);
13647 	}
13648 
13649 	vnic->uc_filter_count = 1;
13650 
13651 	netif_addr_lock_bh(dev);
13652 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13653 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13654 	} else {
13655 		netdev_for_each_uc_addr(ha, dev) {
13656 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13657 			off += ETH_ALEN;
13658 			vnic->uc_filter_count++;
13659 		}
13660 	}
13661 	netif_addr_unlock_bh(dev);
13662 
13663 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13664 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13665 		if (rc) {
13666 			if (BNXT_VF(bp) && rc == -ENODEV) {
13667 				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13668 					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13669 				else
13670 					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13671 				rc = 0;
13672 			} else {
13673 				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13674 			}
13675 			vnic->uc_filter_count = i;
13676 			return rc;
13677 		}
13678 	}
13679 	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13680 		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13681 
13682 skip_uc:
13683 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13684 	    !bnxt_promisc_ok(bp))
13685 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13686 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13687 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13688 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13689 			    rc);
13690 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13691 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13692 		vnic->mc_list_count = 0;
13693 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13694 	}
13695 	if (rc)
13696 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13697 			   rc);
13698 
13699 	return rc;
13700 }
13701 
13702 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13703 {
13704 #ifdef CONFIG_BNXT_SRIOV
13705 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13706 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13707 
13708 		/* No minimum rings were provisioned by the PF.  Don't
13709 		 * reserve rings by default when device is down.
13710 		 */
13711 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13712 			return true;
13713 
13714 		if (!netif_running(bp->dev))
13715 			return false;
13716 	}
13717 #endif
13718 	return true;
13719 }
13720 
13721 /* If the chip and firmware supports RFS */
13722 static bool bnxt_rfs_supported(struct bnxt *bp)
13723 {
13724 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13725 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13726 			return true;
13727 		return false;
13728 	}
13729 	/* 212 firmware is broken for aRFS */
13730 	if (BNXT_FW_MAJ(bp) == 212)
13731 		return false;
13732 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13733 		return true;
13734 	if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13735 		return true;
13736 	return false;
13737 }
13738 
13739 /* If runtime conditions support RFS */
13740 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13741 {
13742 	struct bnxt_hw_rings hwr = {0};
13743 	int max_vnics, max_rss_ctxs;
13744 
13745 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13746 	    !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13747 		return bnxt_rfs_supported(bp);
13748 
13749 	if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13750 		return false;
13751 
13752 	hwr.grp = bp->rx_nr_rings;
13753 	hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13754 	if (new_rss_ctx)
13755 		hwr.vnic++;
13756 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13757 	max_vnics = bnxt_get_max_func_vnics(bp);
13758 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13759 
13760 	if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13761 		if (bp->rx_nr_rings > 1)
13762 			netdev_warn(bp->dev,
13763 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13764 				    min(max_rss_ctxs - 1, max_vnics - 1));
13765 		return false;
13766 	}
13767 
13768 	if (!BNXT_NEW_RM(bp))
13769 		return true;
13770 
13771 	/* Do not reduce VNIC and RSS ctx reservations.  There is a FW
13772 	 * issue that will mess up the default VNIC if we reduce the
13773 	 * reservations.
13774 	 */
13775 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13776 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13777 		return true;
13778 
13779 	bnxt_hwrm_reserve_rings(bp, &hwr);
13780 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13781 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13782 		return true;
13783 
13784 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13785 	hwr.vnic = 1;
13786 	hwr.rss_ctx = 0;
13787 	bnxt_hwrm_reserve_rings(bp, &hwr);
13788 	return false;
13789 }
13790 
13791 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13792 					   netdev_features_t features)
13793 {
13794 	struct bnxt *bp = netdev_priv(dev);
13795 	netdev_features_t vlan_features;
13796 
13797 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13798 		features &= ~NETIF_F_NTUPLE;
13799 
13800 	if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13801 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13802 
13803 	if (!(features & NETIF_F_GRO))
13804 		features &= ~NETIF_F_GRO_HW;
13805 
13806 	if (features & NETIF_F_GRO_HW)
13807 		features &= ~NETIF_F_LRO;
13808 
13809 	/* Both CTAG and STAG VLAN acceleration on the RX side have to be
13810 	 * turned on or off together.
13811 	 */
13812 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13813 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13814 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13815 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13816 		else if (vlan_features)
13817 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13818 	}
13819 #ifdef CONFIG_BNXT_SRIOV
13820 	if (BNXT_VF(bp) && bp->vf.vlan)
13821 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13822 #endif
13823 	return features;
13824 }
13825 
13826 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13827 				bool link_re_init, u32 flags, bool update_tpa)
13828 {
13829 	bnxt_close_nic(bp, irq_re_init, link_re_init);
13830 	bp->flags = flags;
13831 	if (update_tpa)
13832 		bnxt_set_ring_params(bp);
13833 	return bnxt_open_nic(bp, irq_re_init, link_re_init);
13834 }
13835 
13836 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13837 {
13838 	bool update_tpa = false, update_ntuple = false;
13839 	struct bnxt *bp = netdev_priv(dev);
13840 	u32 flags = bp->flags;
13841 	u32 changes;
13842 	int rc = 0;
13843 	bool re_init = false;
13844 
13845 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13846 	if (features & NETIF_F_GRO_HW)
13847 		flags |= BNXT_FLAG_GRO;
13848 	else if (features & NETIF_F_LRO)
13849 		flags |= BNXT_FLAG_LRO;
13850 
13851 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13852 		flags &= ~BNXT_FLAG_TPA;
13853 
13854 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13855 		flags |= BNXT_FLAG_STRIP_VLAN;
13856 
13857 	if (features & NETIF_F_NTUPLE)
13858 		flags |= BNXT_FLAG_RFS;
13859 	else
13860 		bnxt_clear_usr_fltrs(bp, true);
13861 
13862 	changes = flags ^ bp->flags;
13863 	if (changes & BNXT_FLAG_TPA) {
13864 		update_tpa = true;
13865 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13866 		    (flags & BNXT_FLAG_TPA) == 0 ||
13867 		    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13868 			re_init = true;
13869 	}
13870 
13871 	if (changes & ~BNXT_FLAG_TPA)
13872 		re_init = true;
13873 
13874 	if (changes & BNXT_FLAG_RFS)
13875 		update_ntuple = true;
13876 
13877 	if (flags != bp->flags) {
13878 		u32 old_flags = bp->flags;
13879 
13880 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13881 			bp->flags = flags;
13882 			if (update_tpa)
13883 				bnxt_set_ring_params(bp);
13884 			return rc;
13885 		}
13886 
13887 		if (update_ntuple)
13888 			return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13889 
13890 		if (re_init)
13891 			return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13892 
13893 		if (update_tpa) {
13894 			bp->flags = flags;
13895 			rc = bnxt_set_tpa(bp,
13896 					  (flags & BNXT_FLAG_TPA) ?
13897 					  true : false);
13898 			if (rc)
13899 				bp->flags = old_flags;
13900 		}
13901 	}
13902 	return rc;
13903 }
13904 
13905 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13906 			      u8 **nextp)
13907 {
13908 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13909 	int hdr_count = 0;
13910 	u8 *nexthdr;
13911 	int start;
13912 
13913 	/* Check that there are at most 2 IPv6 extension headers, no
13914 	 * fragment header, and each is <= 64 bytes.
13915 	 */
13916 	start = nw_off + sizeof(*ip6h);
13917 	nexthdr = &ip6h->nexthdr;
13918 	while (ipv6_ext_hdr(*nexthdr)) {
13919 		struct ipv6_opt_hdr *hp;
13920 		int hdrlen;
13921 
13922 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13923 		    *nexthdr == NEXTHDR_FRAGMENT)
13924 			return false;
13925 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13926 					  skb_headlen(skb), NULL);
13927 		if (!hp)
13928 			return false;
13929 		if (*nexthdr == NEXTHDR_AUTH)
13930 			hdrlen = ipv6_authlen(hp);
13931 		else
13932 			hdrlen = ipv6_optlen(hp);
13933 
13934 		if (hdrlen > 64)
13935 			return false;
13936 
13937 		hdr_count++;
13938 		nexthdr = &hp->nexthdr;
13939 		start += hdrlen;
13940 	}
13941 	if (nextp) {
13942 		/* Caller will check inner protocol */
13943 		if (skb->encapsulation) {
13944 			*nextp = nexthdr;
13945 			return true;
13946 		}
13947 		*nextp = NULL;
13948 	}
13949 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13950 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13951 }
13952 
13953 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
13954 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13955 {
13956 	struct udphdr *uh = udp_hdr(skb);
13957 	__be16 udp_port = uh->dest;
13958 
13959 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13960 	    udp_port != bp->vxlan_gpe_port)
13961 		return false;
13962 	if (skb->inner_protocol == htons(ETH_P_TEB)) {
13963 		struct ethhdr *eh = inner_eth_hdr(skb);
13964 
13965 		switch (eh->h_proto) {
13966 		case htons(ETH_P_IP):
13967 			return true;
13968 		case htons(ETH_P_IPV6):
13969 			return bnxt_exthdr_check(bp, skb,
13970 						 skb_inner_network_offset(skb),
13971 						 NULL);
13972 		}
13973 	} else if (skb->inner_protocol == htons(ETH_P_IP)) {
13974 		return true;
13975 	} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13976 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13977 					 NULL);
13978 	}
13979 	return false;
13980 }
13981 
13982 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13983 {
13984 	switch (l4_proto) {
13985 	case IPPROTO_UDP:
13986 		return bnxt_udp_tunl_check(bp, skb);
13987 	case IPPROTO_IPIP:
13988 		return true;
13989 	case IPPROTO_GRE: {
13990 		switch (skb->inner_protocol) {
13991 		default:
13992 			return false;
13993 		case htons(ETH_P_IP):
13994 			return true;
13995 		case htons(ETH_P_IPV6):
13996 			fallthrough;
13997 		}
13998 	}
13999 	case IPPROTO_IPV6:
14000 		/* Check ext headers of inner ipv6 */
14001 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
14002 					 NULL);
14003 	}
14004 	return false;
14005 }
14006 
14007 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
14008 					     struct net_device *dev,
14009 					     netdev_features_t features)
14010 {
14011 	struct bnxt *bp = netdev_priv(dev);
14012 	u8 *l4_proto;
14013 
14014 	features = vlan_features_check(skb, features);
14015 	switch (vlan_get_protocol(skb)) {
14016 	case htons(ETH_P_IP):
14017 		if (!skb->encapsulation)
14018 			return features;
14019 		l4_proto = &ip_hdr(skb)->protocol;
14020 		if (bnxt_tunl_check(bp, skb, *l4_proto))
14021 			return features;
14022 		break;
14023 	case htons(ETH_P_IPV6):
14024 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14025 				       &l4_proto))
14026 			break;
14027 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14028 			return features;
14029 		break;
14030 	}
14031 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14032 }
14033 
14034 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14035 			 u32 *reg_buf)
14036 {
14037 	struct hwrm_dbg_read_direct_output *resp;
14038 	struct hwrm_dbg_read_direct_input *req;
14039 	__le32 *dbg_reg_buf;
14040 	dma_addr_t mapping;
14041 	int rc, i;
14042 
14043 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14044 	if (rc)
14045 		return rc;
14046 
14047 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14048 					 &mapping);
14049 	if (!dbg_reg_buf) {
14050 		rc = -ENOMEM;
14051 		goto dbg_rd_reg_exit;
14052 	}
14053 
14054 	req->host_dest_addr = cpu_to_le64(mapping);
14055 
14056 	resp = hwrm_req_hold(bp, req);
14057 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14058 	req->read_len32 = cpu_to_le32(num_words);
14059 
14060 	rc = hwrm_req_send(bp, req);
14061 	if (rc || resp->error_code) {
14062 		rc = -EIO;
14063 		goto dbg_rd_reg_exit;
14064 	}
14065 	for (i = 0; i < num_words; i++)
14066 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14067 
14068 dbg_rd_reg_exit:
14069 	hwrm_req_drop(bp, req);
14070 	return rc;
14071 }
14072 
14073 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14074 				       u32 ring_id, u32 *prod, u32 *cons)
14075 {
14076 	struct hwrm_dbg_ring_info_get_output *resp;
14077 	struct hwrm_dbg_ring_info_get_input *req;
14078 	int rc;
14079 
14080 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14081 	if (rc)
14082 		return rc;
14083 
14084 	req->ring_type = ring_type;
14085 	req->fw_ring_id = cpu_to_le32(ring_id);
14086 	resp = hwrm_req_hold(bp, req);
14087 	rc = hwrm_req_send(bp, req);
14088 	if (!rc) {
14089 		*prod = le32_to_cpu(resp->producer_index);
14090 		*cons = le32_to_cpu(resp->consumer_index);
14091 	}
14092 	hwrm_req_drop(bp, req);
14093 	return rc;
14094 }
14095 
14096 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14097 {
14098 	struct bnxt_tx_ring_info *txr;
14099 	int i = bnapi->index, j;
14100 
14101 	bnxt_for_each_napi_tx(j, bnapi, txr)
14102 		netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14103 			    i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14104 			    txr->tx_cons);
14105 }
14106 
14107 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14108 {
14109 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14110 	int i = bnapi->index;
14111 
14112 	if (!rxr)
14113 		return;
14114 
14115 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14116 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14117 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14118 		    rxr->rx_sw_agg_prod);
14119 }
14120 
14121 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14122 {
14123 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14124 	int i = bnapi->index, j;
14125 
14126 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14127 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14128 	for (j = 0; j < cpr->cp_ring_count; j++) {
14129 		cpr2 = &cpr->cp_ring_arr[j];
14130 		if (!cpr2->bnapi)
14131 			continue;
14132 		netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14133 			    i, j, cpr2->cp_ring_struct.fw_ring_id,
14134 			    cpr2->cp_raw_cons);
14135 	}
14136 }
14137 
14138 static void bnxt_dbg_dump_states(struct bnxt *bp)
14139 {
14140 	int i;
14141 	struct bnxt_napi *bnapi;
14142 
14143 	for (i = 0; i < bp->cp_nr_rings; i++) {
14144 		bnapi = bp->bnapi[i];
14145 		if (netif_msg_drv(bp)) {
14146 			bnxt_dump_tx_sw_state(bnapi);
14147 			bnxt_dump_rx_sw_state(bnapi);
14148 			bnxt_dump_cp_sw_state(bnapi);
14149 		}
14150 	}
14151 }
14152 
14153 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14154 {
14155 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14156 	struct hwrm_ring_reset_input *req;
14157 	struct bnxt_napi *bnapi = rxr->bnapi;
14158 	struct bnxt_cp_ring_info *cpr;
14159 	u16 cp_ring_id;
14160 	int rc;
14161 
14162 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14163 	if (rc)
14164 		return rc;
14165 
14166 	cpr = &bnapi->cp_ring;
14167 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14168 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
14169 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14170 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14171 	return hwrm_req_send_silent(bp, req);
14172 }
14173 
14174 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14175 {
14176 	if (!silent)
14177 		bnxt_dbg_dump_states(bp);
14178 	if (netif_running(bp->dev)) {
14179 		bnxt_close_nic(bp, !silent, false);
14180 		bnxt_open_nic(bp, !silent, false);
14181 	}
14182 }
14183 
14184 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14185 {
14186 	struct bnxt *bp = netdev_priv(dev);
14187 
14188 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
14189 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14190 }
14191 
14192 static void bnxt_fw_health_check(struct bnxt *bp)
14193 {
14194 	struct bnxt_fw_health *fw_health = bp->fw_health;
14195 	struct pci_dev *pdev = bp->pdev;
14196 	u32 val;
14197 
14198 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14199 		return;
14200 
14201 	/* Make sure it is enabled before checking the tmr_counter. */
14202 	smp_rmb();
14203 	if (fw_health->tmr_counter) {
14204 		fw_health->tmr_counter--;
14205 		return;
14206 	}
14207 
14208 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14209 	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14210 		fw_health->arrests++;
14211 		goto fw_reset;
14212 	}
14213 
14214 	fw_health->last_fw_heartbeat = val;
14215 
14216 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14217 	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14218 		fw_health->discoveries++;
14219 		goto fw_reset;
14220 	}
14221 
14222 	fw_health->tmr_counter = fw_health->tmr_multiplier;
14223 	return;
14224 
14225 fw_reset:
14226 	bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14227 }
14228 
14229 static void bnxt_timer(struct timer_list *t)
14230 {
14231 	struct bnxt *bp = timer_container_of(bp, t, timer);
14232 	struct net_device *dev = bp->dev;
14233 
14234 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14235 		return;
14236 
14237 	if (atomic_read(&bp->intr_sem) != 0)
14238 		goto bnxt_restart_timer;
14239 
14240 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14241 		bnxt_fw_health_check(bp);
14242 
14243 	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14244 		bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14245 
14246 	if (bnxt_tc_flower_enabled(bp))
14247 		bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14248 
14249 #ifdef CONFIG_RFS_ACCEL
14250 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14251 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14252 #endif /*CONFIG_RFS_ACCEL*/
14253 
14254 	if (bp->link_info.phy_retry) {
14255 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14256 			bp->link_info.phy_retry = false;
14257 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14258 		} else {
14259 			bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14260 		}
14261 	}
14262 
14263 	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14264 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14265 
14266 	if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14267 		bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14268 
14269 bnxt_restart_timer:
14270 	mod_timer(&bp->timer, jiffies + bp->current_interval);
14271 }
14272 
14273 static void bnxt_lock_sp(struct bnxt *bp)
14274 {
14275 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14276 	 * set.  If the device is being closed, bnxt_close() may be holding
14277 	 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14278 	 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14279 	 * instance lock.
14280 	 */
14281 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14282 	netdev_lock(bp->dev);
14283 }
14284 
14285 static void bnxt_unlock_sp(struct bnxt *bp)
14286 {
14287 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14288 	netdev_unlock(bp->dev);
14289 }
14290 
14291 /* Only called from bnxt_sp_task() */
14292 static void bnxt_reset(struct bnxt *bp, bool silent)
14293 {
14294 	bnxt_lock_sp(bp);
14295 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
14296 		bnxt_reset_task(bp, silent);
14297 	bnxt_unlock_sp(bp);
14298 }
14299 
14300 /* Only called from bnxt_sp_task() */
14301 static void bnxt_rx_ring_reset(struct bnxt *bp)
14302 {
14303 	int i;
14304 
14305 	bnxt_lock_sp(bp);
14306 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14307 		bnxt_unlock_sp(bp);
14308 		return;
14309 	}
14310 	/* Disable and flush TPA before resetting the RX ring */
14311 	if (bp->flags & BNXT_FLAG_TPA)
14312 		bnxt_set_tpa(bp, false);
14313 	for (i = 0; i < bp->rx_nr_rings; i++) {
14314 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14315 		struct bnxt_cp_ring_info *cpr;
14316 		int rc;
14317 
14318 		if (!rxr->bnapi->in_reset)
14319 			continue;
14320 
14321 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
14322 		if (rc) {
14323 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
14324 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14325 			else
14326 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14327 					    rc);
14328 			bnxt_reset_task(bp, true);
14329 			break;
14330 		}
14331 		bnxt_free_one_rx_ring_skbs(bp, rxr);
14332 		rxr->rx_prod = 0;
14333 		rxr->rx_agg_prod = 0;
14334 		rxr->rx_sw_agg_prod = 0;
14335 		rxr->rx_next_cons = 0;
14336 		rxr->bnapi->in_reset = false;
14337 		bnxt_alloc_one_rx_ring(bp, i);
14338 		cpr = &rxr->bnapi->cp_ring;
14339 		cpr->sw_stats->rx.rx_resets++;
14340 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
14341 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14342 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14343 	}
14344 	if (bp->flags & BNXT_FLAG_TPA)
14345 		bnxt_set_tpa(bp, true);
14346 	bnxt_unlock_sp(bp);
14347 }
14348 
14349 static void bnxt_fw_fatal_close(struct bnxt *bp)
14350 {
14351 	bnxt_tx_disable(bp);
14352 	bnxt_disable_napi(bp);
14353 	bnxt_disable_int_sync(bp);
14354 	bnxt_free_irq(bp);
14355 	bnxt_clear_int_mode(bp);
14356 	pci_disable_device(bp->pdev);
14357 }
14358 
14359 static void bnxt_fw_reset_close(struct bnxt *bp)
14360 {
14361 	/* When firmware is in fatal state, quiesce device and disable
14362 	 * bus master to prevent any potential bad DMAs before freeing
14363 	 * kernel memory.
14364 	 */
14365 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14366 		u16 val = 0;
14367 
14368 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14369 		if (val == 0xffff)
14370 			bp->fw_reset_min_dsecs = 0;
14371 		bnxt_fw_fatal_close(bp);
14372 	}
14373 	__bnxt_close_nic(bp, true, false);
14374 	bnxt_vf_reps_free(bp);
14375 	bnxt_clear_int_mode(bp);
14376 	bnxt_hwrm_func_drv_unrgtr(bp);
14377 	if (pci_is_enabled(bp->pdev))
14378 		pci_disable_device(bp->pdev);
14379 	bnxt_free_ctx_mem(bp, false);
14380 }
14381 
14382 static bool is_bnxt_fw_ok(struct bnxt *bp)
14383 {
14384 	struct bnxt_fw_health *fw_health = bp->fw_health;
14385 	bool no_heartbeat = false, has_reset = false;
14386 	u32 val;
14387 
14388 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14389 	if (val == fw_health->last_fw_heartbeat)
14390 		no_heartbeat = true;
14391 
14392 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14393 	if (val != fw_health->last_fw_reset_cnt)
14394 		has_reset = true;
14395 
14396 	if (!no_heartbeat && has_reset)
14397 		return true;
14398 
14399 	return false;
14400 }
14401 
14402 /* netdev instance lock is acquired before calling this function */
14403 static void bnxt_force_fw_reset(struct bnxt *bp)
14404 {
14405 	struct bnxt_fw_health *fw_health = bp->fw_health;
14406 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14407 	u32 wait_dsecs;
14408 
14409 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14410 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14411 		return;
14412 
14413 	/* we have to serialize with bnxt_refclk_read()*/
14414 	if (ptp) {
14415 		unsigned long flags;
14416 
14417 		write_seqlock_irqsave(&ptp->ptp_lock, flags);
14418 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14419 		write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14420 	} else {
14421 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14422 	}
14423 	bnxt_fw_reset_close(bp);
14424 	wait_dsecs = fw_health->master_func_wait_dsecs;
14425 	if (fw_health->primary) {
14426 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14427 			wait_dsecs = 0;
14428 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14429 	} else {
14430 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14431 		wait_dsecs = fw_health->normal_func_wait_dsecs;
14432 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14433 	}
14434 
14435 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14436 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14437 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14438 }
14439 
14440 void bnxt_fw_exception(struct bnxt *bp)
14441 {
14442 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14443 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14444 	bnxt_ulp_stop(bp);
14445 	bnxt_lock_sp(bp);
14446 	bnxt_force_fw_reset(bp);
14447 	bnxt_unlock_sp(bp);
14448 }
14449 
14450 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14451  * < 0 on error.
14452  */
14453 static int bnxt_get_registered_vfs(struct bnxt *bp)
14454 {
14455 #ifdef CONFIG_BNXT_SRIOV
14456 	int rc;
14457 
14458 	if (!BNXT_PF(bp))
14459 		return 0;
14460 
14461 	rc = bnxt_hwrm_func_qcfg(bp);
14462 	if (rc) {
14463 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14464 		return rc;
14465 	}
14466 	if (bp->pf.registered_vfs)
14467 		return bp->pf.registered_vfs;
14468 	if (bp->sriov_cfg)
14469 		return 1;
14470 #endif
14471 	return 0;
14472 }
14473 
14474 void bnxt_fw_reset(struct bnxt *bp)
14475 {
14476 	bnxt_ulp_stop(bp);
14477 	bnxt_lock_sp(bp);
14478 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14479 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14480 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14481 		int n = 0, tmo;
14482 
14483 		/* we have to serialize with bnxt_refclk_read()*/
14484 		if (ptp) {
14485 			unsigned long flags;
14486 
14487 			write_seqlock_irqsave(&ptp->ptp_lock, flags);
14488 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14489 			write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14490 		} else {
14491 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14492 		}
14493 		if (bp->pf.active_vfs &&
14494 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14495 			n = bnxt_get_registered_vfs(bp);
14496 		if (n < 0) {
14497 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14498 				   n);
14499 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14500 			netif_close(bp->dev);
14501 			goto fw_reset_exit;
14502 		} else if (n > 0) {
14503 			u16 vf_tmo_dsecs = n * 10;
14504 
14505 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14506 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14507 			bp->fw_reset_state =
14508 				BNXT_FW_RESET_STATE_POLL_VF;
14509 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14510 			goto fw_reset_exit;
14511 		}
14512 		bnxt_fw_reset_close(bp);
14513 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14514 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14515 			tmo = HZ / 10;
14516 		} else {
14517 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14518 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
14519 		}
14520 		bnxt_queue_fw_reset_work(bp, tmo);
14521 	}
14522 fw_reset_exit:
14523 	bnxt_unlock_sp(bp);
14524 }
14525 
14526 static void bnxt_chk_missed_irq(struct bnxt *bp)
14527 {
14528 	int i;
14529 
14530 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14531 		return;
14532 
14533 	for (i = 0; i < bp->cp_nr_rings; i++) {
14534 		struct bnxt_napi *bnapi = bp->bnapi[i];
14535 		struct bnxt_cp_ring_info *cpr;
14536 		u32 fw_ring_id;
14537 		int j;
14538 
14539 		if (!bnapi)
14540 			continue;
14541 
14542 		cpr = &bnapi->cp_ring;
14543 		for (j = 0; j < cpr->cp_ring_count; j++) {
14544 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14545 			u32 val[2];
14546 
14547 			if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14548 				continue;
14549 
14550 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14551 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14552 				continue;
14553 			}
14554 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14555 			bnxt_dbg_hwrm_ring_info_get(bp,
14556 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14557 				fw_ring_id, &val[0], &val[1]);
14558 			cpr->sw_stats->cmn.missed_irqs++;
14559 		}
14560 	}
14561 }
14562 
14563 static void bnxt_cfg_ntp_filters(struct bnxt *);
14564 
14565 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14566 {
14567 	struct bnxt_link_info *link_info = &bp->link_info;
14568 
14569 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14570 		link_info->autoneg = BNXT_AUTONEG_SPEED;
14571 		if (bp->hwrm_spec_code >= 0x10201) {
14572 			if (link_info->auto_pause_setting &
14573 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14574 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14575 		} else {
14576 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14577 		}
14578 		bnxt_set_auto_speed(link_info);
14579 	} else {
14580 		bnxt_set_force_speed(link_info);
14581 		link_info->req_duplex = link_info->duplex_setting;
14582 	}
14583 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14584 		link_info->req_flow_ctrl =
14585 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14586 	else
14587 		link_info->req_flow_ctrl = link_info->force_pause_setting;
14588 }
14589 
14590 static void bnxt_fw_echo_reply(struct bnxt *bp)
14591 {
14592 	struct bnxt_fw_health *fw_health = bp->fw_health;
14593 	struct hwrm_func_echo_response_input *req;
14594 	int rc;
14595 
14596 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14597 	if (rc)
14598 		return;
14599 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14600 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14601 	hwrm_req_send(bp, req);
14602 }
14603 
14604 static void bnxt_ulp_restart(struct bnxt *bp)
14605 {
14606 	bnxt_ulp_stop(bp);
14607 	bnxt_ulp_start(bp, 0);
14608 }
14609 
14610 static void bnxt_sp_task(struct work_struct *work)
14611 {
14612 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14613 
14614 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14615 	smp_mb__after_atomic();
14616 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14617 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14618 		return;
14619 	}
14620 
14621 	if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14622 		bnxt_ulp_restart(bp);
14623 		bnxt_reenable_sriov(bp);
14624 	}
14625 
14626 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14627 		bnxt_cfg_rx_mode(bp);
14628 
14629 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14630 		bnxt_cfg_ntp_filters(bp);
14631 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14632 		bnxt_hwrm_exec_fwd_req(bp);
14633 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14634 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
14635 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14636 		bnxt_hwrm_port_qstats(bp, 0);
14637 		bnxt_hwrm_port_qstats_ext(bp, 0);
14638 		bnxt_accumulate_all_stats(bp);
14639 	}
14640 
14641 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14642 		int rc;
14643 
14644 		mutex_lock(&bp->link_lock);
14645 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14646 				       &bp->sp_event))
14647 			bnxt_hwrm_phy_qcaps(bp);
14648 
14649 		rc = bnxt_update_link(bp, true);
14650 		if (rc)
14651 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14652 				   rc);
14653 
14654 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14655 				       &bp->sp_event))
14656 			bnxt_init_ethtool_link_settings(bp);
14657 		mutex_unlock(&bp->link_lock);
14658 	}
14659 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14660 		int rc;
14661 
14662 		mutex_lock(&bp->link_lock);
14663 		rc = bnxt_update_phy_setting(bp);
14664 		mutex_unlock(&bp->link_lock);
14665 		if (rc) {
14666 			netdev_warn(bp->dev, "update phy settings retry failed\n");
14667 		} else {
14668 			bp->link_info.phy_retry = false;
14669 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
14670 		}
14671 	}
14672 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14673 		mutex_lock(&bp->link_lock);
14674 		bnxt_get_port_module_status(bp);
14675 		mutex_unlock(&bp->link_lock);
14676 	}
14677 
14678 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14679 		bnxt_tc_flow_stats_work(bp);
14680 
14681 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14682 		bnxt_chk_missed_irq(bp);
14683 
14684 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14685 		bnxt_fw_echo_reply(bp);
14686 
14687 	if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14688 		bnxt_hwmon_notify_event(bp);
14689 
14690 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
14691 	 * must be the last functions to be called before exiting.
14692 	 */
14693 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14694 		bnxt_reset(bp, false);
14695 
14696 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14697 		bnxt_reset(bp, true);
14698 
14699 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14700 		bnxt_rx_ring_reset(bp);
14701 
14702 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14703 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14704 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14705 			bnxt_devlink_health_fw_report(bp);
14706 		else
14707 			bnxt_fw_reset(bp);
14708 	}
14709 
14710 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14711 		if (!is_bnxt_fw_ok(bp))
14712 			bnxt_devlink_health_fw_report(bp);
14713 	}
14714 
14715 	smp_mb__before_atomic();
14716 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14717 }
14718 
14719 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14720 				int *max_cp);
14721 
14722 /* Under netdev instance lock */
14723 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14724 		     int tx_xdp)
14725 {
14726 	int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14727 	struct bnxt_hw_rings hwr = {0};
14728 	int rx_rings = rx;
14729 	int rc;
14730 
14731 	if (tcs)
14732 		tx_sets = tcs;
14733 
14734 	_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14735 
14736 	if (max_rx < rx_rings)
14737 		return -ENOMEM;
14738 
14739 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
14740 		rx_rings <<= 1;
14741 
14742 	hwr.rx = rx_rings;
14743 	hwr.tx = tx * tx_sets + tx_xdp;
14744 	if (max_tx < hwr.tx)
14745 		return -ENOMEM;
14746 
14747 	hwr.vnic = bnxt_get_total_vnics(bp, rx);
14748 
14749 	tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14750 	hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14751 	if (max_cp < hwr.cp)
14752 		return -ENOMEM;
14753 	hwr.stat = hwr.cp;
14754 	if (BNXT_NEW_RM(bp)) {
14755 		hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14756 		hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14757 		hwr.grp = rx;
14758 		hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14759 	}
14760 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14761 		hwr.cp_p5 = hwr.tx + rx;
14762 	rc = bnxt_hwrm_check_rings(bp, &hwr);
14763 	if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14764 		if (!bnxt_ulp_registered(bp->edev)) {
14765 			hwr.cp += bnxt_get_ulp_msix_num(bp);
14766 			hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14767 		}
14768 		if (hwr.cp > bp->total_irqs) {
14769 			int total_msix = bnxt_change_msix(bp, hwr.cp);
14770 
14771 			if (total_msix < hwr.cp) {
14772 				netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14773 					    hwr.cp, total_msix);
14774 				rc = -ENOSPC;
14775 			}
14776 		}
14777 	}
14778 	return rc;
14779 }
14780 
14781 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14782 {
14783 	if (bp->bar2) {
14784 		pci_iounmap(pdev, bp->bar2);
14785 		bp->bar2 = NULL;
14786 	}
14787 
14788 	if (bp->bar1) {
14789 		pci_iounmap(pdev, bp->bar1);
14790 		bp->bar1 = NULL;
14791 	}
14792 
14793 	if (bp->bar0) {
14794 		pci_iounmap(pdev, bp->bar0);
14795 		bp->bar0 = NULL;
14796 	}
14797 }
14798 
14799 static void bnxt_cleanup_pci(struct bnxt *bp)
14800 {
14801 	bnxt_unmap_bars(bp, bp->pdev);
14802 	pci_release_regions(bp->pdev);
14803 	if (pci_is_enabled(bp->pdev))
14804 		pci_disable_device(bp->pdev);
14805 }
14806 
14807 static void bnxt_init_dflt_coal(struct bnxt *bp)
14808 {
14809 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14810 	struct bnxt_coal *coal;
14811 	u16 flags = 0;
14812 
14813 	if (coal_cap->cmpl_params &
14814 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14815 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14816 
14817 	/* Tick values in micro seconds.
14818 	 * 1 coal_buf x bufs_per_record = 1 completion record.
14819 	 */
14820 	coal = &bp->rx_coal;
14821 	coal->coal_ticks = 10;
14822 	coal->coal_bufs = 30;
14823 	coal->coal_ticks_irq = 1;
14824 	coal->coal_bufs_irq = 2;
14825 	coal->idle_thresh = 50;
14826 	coal->bufs_per_record = 2;
14827 	coal->budget = 64;		/* NAPI budget */
14828 	coal->flags = flags;
14829 
14830 	coal = &bp->tx_coal;
14831 	coal->coal_ticks = 28;
14832 	coal->coal_bufs = 30;
14833 	coal->coal_ticks_irq = 2;
14834 	coal->coal_bufs_irq = 2;
14835 	coal->bufs_per_record = 1;
14836 	coal->flags = flags;
14837 
14838 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14839 }
14840 
14841 /* FW that pre-reserves 1 VNIC per function */
14842 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14843 {
14844 	u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14845 
14846 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14847 	    (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14848 		return true;
14849 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14850 	    (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14851 		return true;
14852 	return false;
14853 }
14854 
14855 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14856 {
14857 	struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14858 	struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14859 	int rc;
14860 
14861 	bp->max_pfcwd_tmo_ms = 0;
14862 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14863 	if (rc)
14864 		return;
14865 	resp = hwrm_req_hold(bp, req);
14866 	rc = hwrm_req_send_silent(bp, req);
14867 	if (!rc)
14868 		bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14869 	hwrm_req_drop(bp, req);
14870 }
14871 
14872 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14873 {
14874 	int rc;
14875 
14876 	bp->fw_cap = 0;
14877 	rc = bnxt_hwrm_ver_get(bp);
14878 	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
14879 	 * so wait before continuing with recovery.
14880 	 */
14881 	if (rc)
14882 		msleep(100);
14883 	bnxt_try_map_fw_health_reg(bp);
14884 	if (rc) {
14885 		rc = bnxt_try_recover_fw(bp);
14886 		if (rc)
14887 			return rc;
14888 		rc = bnxt_hwrm_ver_get(bp);
14889 		if (rc)
14890 			return rc;
14891 	}
14892 
14893 	bnxt_nvm_cfg_ver_get(bp);
14894 
14895 	rc = bnxt_hwrm_func_reset(bp);
14896 	if (rc)
14897 		return -ENODEV;
14898 
14899 	bnxt_hwrm_fw_set_time(bp);
14900 	return 0;
14901 }
14902 
14903 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14904 {
14905 	int rc;
14906 
14907 	/* Get the MAX capabilities for this function */
14908 	rc = bnxt_hwrm_func_qcaps(bp);
14909 	if (rc) {
14910 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14911 			   rc);
14912 		return -ENODEV;
14913 	}
14914 
14915 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14916 	if (rc)
14917 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14918 			    rc);
14919 
14920 	if (bnxt_alloc_fw_health(bp)) {
14921 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14922 	} else {
14923 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
14924 		if (rc)
14925 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14926 				    rc);
14927 	}
14928 
14929 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14930 	if (rc)
14931 		return -ENODEV;
14932 
14933 	rc = bnxt_alloc_crash_dump_mem(bp);
14934 	if (rc)
14935 		netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14936 			    rc);
14937 	if (!rc) {
14938 		rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14939 		if (rc) {
14940 			bnxt_free_crash_dump_mem(bp);
14941 			netdev_warn(bp->dev,
14942 				    "hwrm crash dump mem failure rc: %d\n", rc);
14943 		}
14944 	}
14945 
14946 	if (bnxt_fw_pre_resv_vnics(bp))
14947 		bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14948 
14949 	bnxt_hwrm_pfcwd_qcaps(bp);
14950 	bnxt_hwrm_func_qcfg(bp);
14951 	bnxt_hwrm_vnic_qcaps(bp);
14952 	bnxt_hwrm_port_led_qcaps(bp);
14953 	bnxt_ethtool_init(bp);
14954 	if (bp->fw_cap & BNXT_FW_CAP_PTP)
14955 		__bnxt_hwrm_ptp_qcfg(bp);
14956 	bnxt_dcb_init(bp);
14957 	bnxt_hwmon_init(bp);
14958 	return 0;
14959 }
14960 
14961 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14962 {
14963 	bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14964 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14965 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14966 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14967 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14968 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14969 		bp->rss_hash_delta = bp->rss_hash_cfg;
14970 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14971 		bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14972 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14973 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14974 	}
14975 }
14976 
14977 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14978 {
14979 	struct net_device *dev = bp->dev;
14980 
14981 	dev->hw_features &= ~NETIF_F_NTUPLE;
14982 	dev->features &= ~NETIF_F_NTUPLE;
14983 	bp->flags &= ~BNXT_FLAG_RFS;
14984 	if (bnxt_rfs_supported(bp)) {
14985 		dev->hw_features |= NETIF_F_NTUPLE;
14986 		if (bnxt_rfs_capable(bp, false)) {
14987 			bp->flags |= BNXT_FLAG_RFS;
14988 			dev->features |= NETIF_F_NTUPLE;
14989 		}
14990 	}
14991 }
14992 
14993 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14994 {
14995 	struct pci_dev *pdev = bp->pdev;
14996 
14997 	bnxt_set_dflt_rss_hash_type(bp);
14998 	bnxt_set_dflt_rfs(bp);
14999 
15000 	bnxt_get_wol_settings(bp);
15001 	if (bp->flags & BNXT_FLAG_WOL_CAP)
15002 		device_set_wakeup_enable(&pdev->dev, bp->wol);
15003 	else
15004 		device_set_wakeup_capable(&pdev->dev, false);
15005 
15006 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
15007 	bnxt_hwrm_coal_params_qcaps(bp);
15008 }
15009 
15010 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
15011 
15012 int bnxt_fw_init_one(struct bnxt *bp)
15013 {
15014 	int rc;
15015 
15016 	rc = bnxt_fw_init_one_p1(bp);
15017 	if (rc) {
15018 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15019 		return rc;
15020 	}
15021 	rc = bnxt_fw_init_one_p2(bp);
15022 	if (rc) {
15023 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15024 		return rc;
15025 	}
15026 	rc = bnxt_probe_phy(bp, false);
15027 	if (rc)
15028 		return rc;
15029 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15030 	if (rc)
15031 		return rc;
15032 
15033 	bnxt_fw_init_one_p3(bp);
15034 	return 0;
15035 }
15036 
15037 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15038 {
15039 	struct bnxt_fw_health *fw_health = bp->fw_health;
15040 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15041 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15042 	u32 reg_type, reg_off, delay_msecs;
15043 
15044 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15045 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15046 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15047 	switch (reg_type) {
15048 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
15049 		pci_write_config_dword(bp->pdev, reg_off, val);
15050 		break;
15051 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
15052 		writel(reg_off & BNXT_GRC_BASE_MASK,
15053 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15054 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15055 		fallthrough;
15056 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15057 		writel(val, bp->bar0 + reg_off);
15058 		break;
15059 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15060 		writel(val, bp->bar1 + reg_off);
15061 		break;
15062 	}
15063 	if (delay_msecs) {
15064 		pci_read_config_dword(bp->pdev, 0, &val);
15065 		msleep(delay_msecs);
15066 	}
15067 }
15068 
15069 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15070 {
15071 	struct hwrm_func_qcfg_output *resp;
15072 	struct hwrm_func_qcfg_input *req;
15073 	bool result = true; /* firmware will enforce if unknown */
15074 
15075 	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15076 		return result;
15077 
15078 	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15079 		return result;
15080 
15081 	req->fid = cpu_to_le16(0xffff);
15082 	resp = hwrm_req_hold(bp, req);
15083 	if (!hwrm_req_send(bp, req))
15084 		result = !!(le16_to_cpu(resp->flags) &
15085 			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15086 	hwrm_req_drop(bp, req);
15087 	return result;
15088 }
15089 
15090 static void bnxt_reset_all(struct bnxt *bp)
15091 {
15092 	struct bnxt_fw_health *fw_health = bp->fw_health;
15093 	int i, rc;
15094 
15095 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15096 		bnxt_fw_reset_via_optee(bp);
15097 		bp->fw_reset_timestamp = jiffies;
15098 		return;
15099 	}
15100 
15101 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15102 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15103 			bnxt_fw_reset_writel(bp, i);
15104 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15105 		struct hwrm_fw_reset_input *req;
15106 
15107 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15108 		if (!rc) {
15109 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15110 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15111 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15112 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15113 			rc = hwrm_req_send(bp, req);
15114 		}
15115 		if (rc != -ENODEV)
15116 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15117 	}
15118 	bp->fw_reset_timestamp = jiffies;
15119 }
15120 
15121 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15122 {
15123 	return time_after(jiffies, bp->fw_reset_timestamp +
15124 			  (bp->fw_reset_max_dsecs * HZ / 10));
15125 }
15126 
15127 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15128 {
15129 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15130 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15131 		bnxt_dl_health_fw_status_update(bp, false);
15132 	bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15133 	netif_close(bp->dev);
15134 }
15135 
15136 static void bnxt_fw_reset_task(struct work_struct *work)
15137 {
15138 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15139 	int rc = 0;
15140 
15141 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15142 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15143 		return;
15144 	}
15145 
15146 	switch (bp->fw_reset_state) {
15147 	case BNXT_FW_RESET_STATE_POLL_VF: {
15148 		int n = bnxt_get_registered_vfs(bp);
15149 		int tmo;
15150 
15151 		if (n < 0) {
15152 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15153 				   n, jiffies_to_msecs(jiffies -
15154 				   bp->fw_reset_timestamp));
15155 			goto fw_reset_abort;
15156 		} else if (n > 0) {
15157 			if (bnxt_fw_reset_timeout(bp)) {
15158 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15159 				bp->fw_reset_state = 0;
15160 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15161 					   n);
15162 				goto ulp_start;
15163 			}
15164 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15165 			return;
15166 		}
15167 		bp->fw_reset_timestamp = jiffies;
15168 		netdev_lock(bp->dev);
15169 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15170 			bnxt_fw_reset_abort(bp, rc);
15171 			netdev_unlock(bp->dev);
15172 			goto ulp_start;
15173 		}
15174 		bnxt_fw_reset_close(bp);
15175 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15176 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15177 			tmo = HZ / 10;
15178 		} else {
15179 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15180 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
15181 		}
15182 		netdev_unlock(bp->dev);
15183 		bnxt_queue_fw_reset_work(bp, tmo);
15184 		return;
15185 	}
15186 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15187 		u32 val;
15188 
15189 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15190 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15191 		    !bnxt_fw_reset_timeout(bp)) {
15192 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15193 			return;
15194 		}
15195 
15196 		if (!bp->fw_health->primary) {
15197 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15198 
15199 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15200 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15201 			return;
15202 		}
15203 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15204 	}
15205 		fallthrough;
15206 	case BNXT_FW_RESET_STATE_RESET_FW:
15207 		bnxt_reset_all(bp);
15208 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15209 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15210 		return;
15211 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
15212 		bnxt_inv_fw_health_reg(bp);
15213 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15214 		    !bp->fw_reset_min_dsecs) {
15215 			u16 val;
15216 
15217 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15218 			if (val == 0xffff) {
15219 				if (bnxt_fw_reset_timeout(bp)) {
15220 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15221 					rc = -ETIMEDOUT;
15222 					goto fw_reset_abort;
15223 				}
15224 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
15225 				return;
15226 			}
15227 		}
15228 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15229 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15230 		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15231 		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15232 			bnxt_dl_remote_reload(bp);
15233 		if (pci_enable_device(bp->pdev)) {
15234 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15235 			rc = -ENODEV;
15236 			goto fw_reset_abort;
15237 		}
15238 		pci_set_master(bp->pdev);
15239 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15240 		fallthrough;
15241 	case BNXT_FW_RESET_STATE_POLL_FW:
15242 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15243 		rc = bnxt_hwrm_poll(bp);
15244 		if (rc) {
15245 			if (bnxt_fw_reset_timeout(bp)) {
15246 				netdev_err(bp->dev, "Firmware reset aborted\n");
15247 				goto fw_reset_abort_status;
15248 			}
15249 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15250 			return;
15251 		}
15252 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15253 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15254 		fallthrough;
15255 	case BNXT_FW_RESET_STATE_OPENING:
15256 		while (!netdev_trylock(bp->dev)) {
15257 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15258 			return;
15259 		}
15260 		rc = bnxt_open(bp->dev);
15261 		if (rc) {
15262 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15263 			bnxt_fw_reset_abort(bp, rc);
15264 			netdev_unlock(bp->dev);
15265 			goto ulp_start;
15266 		}
15267 
15268 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15269 		    bp->fw_health->enabled) {
15270 			bp->fw_health->last_fw_reset_cnt =
15271 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15272 		}
15273 		bp->fw_reset_state = 0;
15274 		/* Make sure fw_reset_state is 0 before clearing the flag */
15275 		smp_mb__before_atomic();
15276 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15277 		bnxt_ptp_reapply_pps(bp);
15278 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15279 		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15280 			bnxt_dl_health_fw_recovery_done(bp);
15281 			bnxt_dl_health_fw_status_update(bp, true);
15282 		}
15283 		netdev_unlock(bp->dev);
15284 		bnxt_ulp_start(bp, 0);
15285 		bnxt_reenable_sriov(bp);
15286 		netdev_lock(bp->dev);
15287 		bnxt_vf_reps_alloc(bp);
15288 		bnxt_vf_reps_open(bp);
15289 		netdev_unlock(bp->dev);
15290 		break;
15291 	}
15292 	return;
15293 
15294 fw_reset_abort_status:
15295 	if (bp->fw_health->status_reliable ||
15296 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15297 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15298 
15299 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15300 	}
15301 fw_reset_abort:
15302 	netdev_lock(bp->dev);
15303 	bnxt_fw_reset_abort(bp, rc);
15304 	netdev_unlock(bp->dev);
15305 ulp_start:
15306 	bnxt_ulp_start(bp, rc);
15307 }
15308 
15309 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15310 {
15311 	int rc;
15312 	struct bnxt *bp = netdev_priv(dev);
15313 
15314 	SET_NETDEV_DEV(dev, &pdev->dev);
15315 
15316 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
15317 	rc = pci_enable_device(pdev);
15318 	if (rc) {
15319 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15320 		goto init_err;
15321 	}
15322 
15323 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15324 		dev_err(&pdev->dev,
15325 			"Cannot find PCI device base address, aborting\n");
15326 		rc = -ENODEV;
15327 		goto init_err_disable;
15328 	}
15329 
15330 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15331 	if (rc) {
15332 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15333 		goto init_err_disable;
15334 	}
15335 
15336 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15337 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15338 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15339 		rc = -EIO;
15340 		goto init_err_release;
15341 	}
15342 
15343 	pci_set_master(pdev);
15344 
15345 	bp->dev = dev;
15346 	bp->pdev = pdev;
15347 
15348 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15349 	 * determines the BAR size.
15350 	 */
15351 	bp->bar0 = pci_ioremap_bar(pdev, 0);
15352 	if (!bp->bar0) {
15353 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15354 		rc = -ENOMEM;
15355 		goto init_err_release;
15356 	}
15357 
15358 	bp->bar2 = pci_ioremap_bar(pdev, 4);
15359 	if (!bp->bar2) {
15360 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15361 		rc = -ENOMEM;
15362 		goto init_err_release;
15363 	}
15364 
15365 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
15366 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15367 
15368 	spin_lock_init(&bp->ntp_fltr_lock);
15369 #if BITS_PER_LONG == 32
15370 	spin_lock_init(&bp->db_lock);
15371 #endif
15372 
15373 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15374 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15375 
15376 	timer_setup(&bp->timer, bnxt_timer, 0);
15377 	bp->current_interval = BNXT_TIMER_INTERVAL;
15378 
15379 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15380 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15381 
15382 	clear_bit(BNXT_STATE_OPEN, &bp->state);
15383 	return 0;
15384 
15385 init_err_release:
15386 	bnxt_unmap_bars(bp, pdev);
15387 	pci_release_regions(pdev);
15388 
15389 init_err_disable:
15390 	pci_disable_device(pdev);
15391 
15392 init_err:
15393 	return rc;
15394 }
15395 
15396 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15397 {
15398 	struct sockaddr *addr = p;
15399 	struct bnxt *bp = netdev_priv(dev);
15400 	int rc = 0;
15401 
15402 	netdev_assert_locked(dev);
15403 
15404 	if (!is_valid_ether_addr(addr->sa_data))
15405 		return -EADDRNOTAVAIL;
15406 
15407 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15408 		return 0;
15409 
15410 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
15411 	if (rc)
15412 		return rc;
15413 
15414 	eth_hw_addr_set(dev, addr->sa_data);
15415 	bnxt_clear_usr_fltrs(bp, true);
15416 	if (netif_running(dev)) {
15417 		bnxt_close_nic(bp, false, false);
15418 		rc = bnxt_open_nic(bp, false, false);
15419 	}
15420 
15421 	return rc;
15422 }
15423 
15424 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15425 {
15426 	struct bnxt *bp = netdev_priv(dev);
15427 
15428 	netdev_assert_locked(dev);
15429 
15430 	if (netif_running(dev))
15431 		bnxt_close_nic(bp, true, false);
15432 
15433 	WRITE_ONCE(dev->mtu, new_mtu);
15434 
15435 	/* MTU change may change the AGG ring settings if an XDP multi-buffer
15436 	 * program is attached.  We need to set the AGG rings settings and
15437 	 * rx_skb_func accordingly.
15438 	 */
15439 	if (READ_ONCE(bp->xdp_prog))
15440 		bnxt_set_rx_skb_mode(bp, true);
15441 
15442 	bnxt_set_ring_params(bp);
15443 
15444 	if (netif_running(dev))
15445 		return bnxt_open_nic(bp, true, false);
15446 
15447 	return 0;
15448 }
15449 
15450 void bnxt_set_cp_rings(struct bnxt *bp, bool sh)
15451 {
15452 	int tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15453 
15454 	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15455 			       tx_cp + bp->rx_nr_rings;
15456 }
15457 
15458 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15459 {
15460 	struct bnxt *bp = netdev_priv(dev);
15461 	bool sh = false;
15462 	int rc;
15463 
15464 	if (tc > bp->max_tc) {
15465 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15466 			   tc, bp->max_tc);
15467 		return -EINVAL;
15468 	}
15469 
15470 	if (bp->num_tc == tc)
15471 		return 0;
15472 
15473 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15474 		sh = true;
15475 
15476 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15477 			      sh, tc, bp->tx_nr_rings_xdp);
15478 	if (rc)
15479 		return rc;
15480 
15481 	/* Needs to close the device and do hw resource re-allocations */
15482 	if (netif_running(bp->dev))
15483 		bnxt_close_nic(bp, true, false);
15484 
15485 	if (tc) {
15486 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15487 		netdev_set_num_tc(dev, tc);
15488 		bp->num_tc = tc;
15489 	} else {
15490 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15491 		netdev_reset_tc(dev);
15492 		bp->num_tc = 0;
15493 	}
15494 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15495 	bnxt_set_cp_rings(bp, sh);
15496 
15497 	if (netif_running(bp->dev))
15498 		return bnxt_open_nic(bp, true, false);
15499 
15500 	return 0;
15501 }
15502 
15503 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15504 				  void *cb_priv)
15505 {
15506 	struct bnxt *bp = cb_priv;
15507 
15508 	if (!bnxt_tc_flower_enabled(bp) ||
15509 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15510 		return -EOPNOTSUPP;
15511 
15512 	switch (type) {
15513 	case TC_SETUP_CLSFLOWER:
15514 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15515 	default:
15516 		return -EOPNOTSUPP;
15517 	}
15518 }
15519 
15520 LIST_HEAD(bnxt_block_cb_list);
15521 
15522 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15523 			 void *type_data)
15524 {
15525 	struct bnxt *bp = netdev_priv(dev);
15526 
15527 	switch (type) {
15528 	case TC_SETUP_BLOCK:
15529 		return flow_block_cb_setup_simple(type_data,
15530 						  &bnxt_block_cb_list,
15531 						  bnxt_setup_tc_block_cb,
15532 						  bp, bp, true);
15533 	case TC_SETUP_QDISC_MQPRIO: {
15534 		struct tc_mqprio_qopt *mqprio = type_data;
15535 
15536 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15537 
15538 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15539 	}
15540 	default:
15541 		return -EOPNOTSUPP;
15542 	}
15543 }
15544 
15545 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15546 			    const struct sk_buff *skb)
15547 {
15548 	struct bnxt_vnic_info *vnic;
15549 
15550 	if (skb)
15551 		return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15552 
15553 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15554 	return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15555 }
15556 
15557 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15558 			   u32 idx)
15559 {
15560 	struct hlist_head *head;
15561 	int bit_id;
15562 
15563 	spin_lock_bh(&bp->ntp_fltr_lock);
15564 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15565 	if (bit_id < 0) {
15566 		spin_unlock_bh(&bp->ntp_fltr_lock);
15567 		return -ENOMEM;
15568 	}
15569 
15570 	fltr->base.sw_id = (u16)bit_id;
15571 	fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15572 	fltr->base.flags |= BNXT_ACT_RING_DST;
15573 	head = &bp->ntp_fltr_hash_tbl[idx];
15574 	hlist_add_head_rcu(&fltr->base.hash, head);
15575 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15576 	bnxt_insert_usr_fltr(bp, &fltr->base);
15577 	bp->ntp_fltr_count++;
15578 	spin_unlock_bh(&bp->ntp_fltr_lock);
15579 	return 0;
15580 }
15581 
15582 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15583 			    struct bnxt_ntuple_filter *f2)
15584 {
15585 	struct bnxt_flow_masks *masks1 = &f1->fmasks;
15586 	struct bnxt_flow_masks *masks2 = &f2->fmasks;
15587 	struct flow_keys *keys1 = &f1->fkeys;
15588 	struct flow_keys *keys2 = &f2->fkeys;
15589 
15590 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
15591 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
15592 		return false;
15593 
15594 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15595 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15596 		    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15597 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15598 		    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15599 			return false;
15600 	} else {
15601 		if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15602 				     &keys2->addrs.v6addrs.src) ||
15603 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15604 				     &masks2->addrs.v6addrs.src) ||
15605 		    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15606 				     &keys2->addrs.v6addrs.dst) ||
15607 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15608 				     &masks2->addrs.v6addrs.dst))
15609 			return false;
15610 	}
15611 
15612 	return keys1->ports.src == keys2->ports.src &&
15613 	       masks1->ports.src == masks2->ports.src &&
15614 	       keys1->ports.dst == keys2->ports.dst &&
15615 	       masks1->ports.dst == masks2->ports.dst &&
15616 	       keys1->control.flags == keys2->control.flags &&
15617 	       f1->l2_fltr == f2->l2_fltr;
15618 }
15619 
15620 struct bnxt_ntuple_filter *
15621 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15622 				struct bnxt_ntuple_filter *fltr, u32 idx)
15623 {
15624 	struct bnxt_ntuple_filter *f;
15625 	struct hlist_head *head;
15626 
15627 	head = &bp->ntp_fltr_hash_tbl[idx];
15628 	hlist_for_each_entry_rcu(f, head, base.hash) {
15629 		if (bnxt_fltr_match(f, fltr))
15630 			return f;
15631 	}
15632 	return NULL;
15633 }
15634 
15635 #ifdef CONFIG_RFS_ACCEL
15636 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15637 			      u16 rxq_index, u32 flow_id)
15638 {
15639 	struct bnxt *bp = netdev_priv(dev);
15640 	struct bnxt_ntuple_filter *fltr, *new_fltr;
15641 	struct flow_keys *fkeys;
15642 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15643 	struct bnxt_l2_filter *l2_fltr;
15644 	int rc = 0, idx;
15645 	u32 flags;
15646 
15647 	if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15648 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15649 		atomic_inc(&l2_fltr->refcnt);
15650 	} else {
15651 		struct bnxt_l2_key key;
15652 
15653 		ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15654 		key.vlan = 0;
15655 		l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15656 		if (!l2_fltr)
15657 			return -EINVAL;
15658 		if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15659 			bnxt_del_l2_filter(bp, l2_fltr);
15660 			return -EINVAL;
15661 		}
15662 	}
15663 	new_fltr = kzalloc_obj(*new_fltr, GFP_ATOMIC);
15664 	if (!new_fltr) {
15665 		bnxt_del_l2_filter(bp, l2_fltr);
15666 		return -ENOMEM;
15667 	}
15668 
15669 	fkeys = &new_fltr->fkeys;
15670 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15671 		rc = -EPROTONOSUPPORT;
15672 		goto err_free;
15673 	}
15674 
15675 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15676 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15677 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15678 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15679 		rc = -EPROTONOSUPPORT;
15680 		goto err_free;
15681 	}
15682 	new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15683 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15684 		if (bp->hwrm_spec_code < 0x10601) {
15685 			rc = -EPROTONOSUPPORT;
15686 			goto err_free;
15687 		}
15688 		new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15689 	}
15690 	flags = fkeys->control.flags;
15691 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
15692 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15693 		rc = -EPROTONOSUPPORT;
15694 		goto err_free;
15695 	}
15696 	new_fltr->l2_fltr = l2_fltr;
15697 
15698 	idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15699 	rcu_read_lock();
15700 	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15701 	if (fltr) {
15702 		rc = fltr->base.sw_id;
15703 		rcu_read_unlock();
15704 		goto err_free;
15705 	}
15706 	rcu_read_unlock();
15707 
15708 	new_fltr->flow_id = flow_id;
15709 	new_fltr->base.rxq = rxq_index;
15710 	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15711 	if (!rc) {
15712 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15713 		return new_fltr->base.sw_id;
15714 	}
15715 
15716 err_free:
15717 	bnxt_del_l2_filter(bp, l2_fltr);
15718 	kfree(new_fltr);
15719 	return rc;
15720 }
15721 #endif
15722 
15723 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15724 {
15725 	spin_lock_bh(&bp->ntp_fltr_lock);
15726 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15727 		spin_unlock_bh(&bp->ntp_fltr_lock);
15728 		return;
15729 	}
15730 	hlist_del_rcu(&fltr->base.hash);
15731 	bnxt_del_one_usr_fltr(bp, &fltr->base);
15732 	bp->ntp_fltr_count--;
15733 	spin_unlock_bh(&bp->ntp_fltr_lock);
15734 	bnxt_del_l2_filter(bp, fltr->l2_fltr);
15735 	clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15736 	kfree_rcu(fltr, base.rcu);
15737 }
15738 
15739 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15740 {
15741 #ifdef CONFIG_RFS_ACCEL
15742 	int i;
15743 
15744 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15745 		struct hlist_head *head;
15746 		struct hlist_node *tmp;
15747 		struct bnxt_ntuple_filter *fltr;
15748 		int rc;
15749 
15750 		head = &bp->ntp_fltr_hash_tbl[i];
15751 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15752 			bool del = false;
15753 
15754 			if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15755 				if (fltr->base.flags & BNXT_ACT_NO_AGING)
15756 					continue;
15757 				if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15758 							fltr->flow_id,
15759 							fltr->base.sw_id)) {
15760 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
15761 									 fltr);
15762 					del = true;
15763 				}
15764 			} else {
15765 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15766 								       fltr);
15767 				if (rc)
15768 					del = true;
15769 				else
15770 					set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15771 			}
15772 
15773 			if (del)
15774 				bnxt_del_ntp_filter(bp, fltr);
15775 		}
15776 	}
15777 #endif
15778 }
15779 
15780 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15781 				    unsigned int entry, struct udp_tunnel_info *ti)
15782 {
15783 	struct bnxt *bp = netdev_priv(netdev);
15784 	unsigned int cmd;
15785 
15786 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15787 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15788 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15789 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15790 	else
15791 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15792 
15793 	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15794 }
15795 
15796 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15797 				      unsigned int entry, struct udp_tunnel_info *ti)
15798 {
15799 	struct bnxt *bp = netdev_priv(netdev);
15800 	unsigned int cmd;
15801 
15802 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15803 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15804 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15805 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15806 	else
15807 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15808 
15809 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15810 }
15811 
15812 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15813 	.set_port	= bnxt_udp_tunnel_set_port,
15814 	.unset_port	= bnxt_udp_tunnel_unset_port,
15815 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15816 	.tables		= {
15817 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15818 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15819 	},
15820 }, bnxt_udp_tunnels_p7 = {
15821 	.set_port	= bnxt_udp_tunnel_set_port,
15822 	.unset_port	= bnxt_udp_tunnel_unset_port,
15823 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15824 	.tables		= {
15825 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15826 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15827 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15828 	},
15829 };
15830 
15831 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15832 			       struct net_device *dev, u32 filter_mask,
15833 			       int nlflags)
15834 {
15835 	struct bnxt *bp = netdev_priv(dev);
15836 
15837 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15838 				       nlflags, filter_mask, NULL);
15839 }
15840 
15841 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15842 			       u16 flags, struct netlink_ext_ack *extack)
15843 {
15844 	struct bnxt *bp = netdev_priv(dev);
15845 	struct nlattr *attr, *br_spec;
15846 	int rem, rc = 0;
15847 
15848 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15849 		return -EOPNOTSUPP;
15850 
15851 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15852 	if (!br_spec)
15853 		return -EINVAL;
15854 
15855 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15856 		u16 mode;
15857 
15858 		mode = nla_get_u16(attr);
15859 		if (mode == bp->br_mode)
15860 			break;
15861 
15862 		rc = bnxt_hwrm_set_br_mode(bp, mode);
15863 		if (!rc)
15864 			bp->br_mode = mode;
15865 		break;
15866 	}
15867 	return rc;
15868 }
15869 
15870 int bnxt_get_port_parent_id(struct net_device *dev,
15871 			    struct netdev_phys_item_id *ppid)
15872 {
15873 	struct bnxt *bp = netdev_priv(dev);
15874 
15875 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15876 		return -EOPNOTSUPP;
15877 
15878 	/* The PF and it's VF-reps only support the switchdev framework */
15879 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15880 		return -EOPNOTSUPP;
15881 
15882 	ppid->id_len = sizeof(bp->dsn);
15883 	memcpy(ppid->id, bp->dsn, ppid->id_len);
15884 
15885 	return 0;
15886 }
15887 
15888 static const struct net_device_ops bnxt_netdev_ops = {
15889 	.ndo_open		= bnxt_open,
15890 	.ndo_start_xmit		= bnxt_start_xmit,
15891 	.ndo_stop		= bnxt_close,
15892 	.ndo_get_stats64	= bnxt_get_stats64,
15893 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
15894 	.ndo_eth_ioctl		= bnxt_ioctl,
15895 	.ndo_validate_addr	= eth_validate_addr,
15896 	.ndo_set_mac_address	= bnxt_change_mac_addr,
15897 	.ndo_change_mtu		= bnxt_change_mtu,
15898 	.ndo_fix_features	= bnxt_fix_features,
15899 	.ndo_set_features	= bnxt_set_features,
15900 	.ndo_features_check	= bnxt_features_check,
15901 	.ndo_tx_timeout		= bnxt_tx_timeout,
15902 #ifdef CONFIG_BNXT_SRIOV
15903 	.ndo_get_vf_config	= bnxt_get_vf_config,
15904 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
15905 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
15906 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
15907 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
15908 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
15909 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
15910 #endif
15911 	.ndo_setup_tc           = bnxt_setup_tc,
15912 #ifdef CONFIG_RFS_ACCEL
15913 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
15914 #endif
15915 	.ndo_bpf		= bnxt_xdp,
15916 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
15917 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
15918 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
15919 	.ndo_hwtstamp_get	= bnxt_hwtstamp_get,
15920 	.ndo_hwtstamp_set	= bnxt_hwtstamp_set,
15921 };
15922 
15923 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15924 				    struct netdev_queue_stats_rx *stats)
15925 {
15926 	struct bnxt *bp = netdev_priv(dev);
15927 	struct bnxt_cp_ring_info *cpr;
15928 	u64 *sw;
15929 
15930 	if (!bp->bnapi)
15931 		return;
15932 
15933 	cpr = &bp->bnapi[i]->cp_ring;
15934 	sw = cpr->stats.sw_stats;
15935 
15936 	stats->packets = 0;
15937 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15938 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15939 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15940 
15941 	stats->bytes = 0;
15942 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15943 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15944 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15945 
15946 	stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15947 	stats->hw_gro_packets = cpr->sw_stats->rx.rx_hw_gro_packets;
15948 	stats->hw_gro_wire_packets = cpr->sw_stats->rx.rx_hw_gro_wire_packets;
15949 }
15950 
15951 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15952 				    struct netdev_queue_stats_tx *stats)
15953 {
15954 	struct bnxt *bp = netdev_priv(dev);
15955 	struct bnxt_napi *bnapi;
15956 	u64 *sw;
15957 
15958 	if (!bp->tx_ring)
15959 		return;
15960 
15961 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15962 	sw = bnapi->cp_ring.stats.sw_stats;
15963 
15964 	stats->packets = 0;
15965 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15966 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15967 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15968 
15969 	stats->bytes = 0;
15970 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15971 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15972 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15973 }
15974 
15975 static void bnxt_get_base_stats(struct net_device *dev,
15976 				struct netdev_queue_stats_rx *rx,
15977 				struct netdev_queue_stats_tx *tx)
15978 {
15979 	struct bnxt *bp = netdev_priv(dev);
15980 
15981 	rx->packets = bp->net_stats_prev.rx_packets;
15982 	rx->bytes = bp->net_stats_prev.rx_bytes;
15983 	rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15984 	rx->hw_gro_packets = bp->ring_err_stats_prev.rx_total_hw_gro_packets;
15985 	rx->hw_gro_wire_packets = bp->ring_err_stats_prev.rx_total_hw_gro_wire_packets;
15986 
15987 	tx->packets = bp->net_stats_prev.tx_packets;
15988 	tx->bytes = bp->net_stats_prev.tx_bytes;
15989 }
15990 
15991 static const struct netdev_stat_ops bnxt_stat_ops = {
15992 	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
15993 	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
15994 	.get_base_stats		= bnxt_get_base_stats,
15995 };
15996 
15997 static void bnxt_queue_default_qcfg(struct net_device *dev,
15998 				    struct netdev_queue_config *qcfg)
15999 {
16000 	qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
16001 }
16002 
16003 static int bnxt_validate_qcfg(struct net_device *dev,
16004 			      struct netdev_queue_config *qcfg,
16005 			      struct netlink_ext_ack *extack)
16006 {
16007 	struct bnxt *bp = netdev_priv(dev);
16008 
16009 	/* Older chips need MSS calc so rx_page_size is not supported */
16010 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16011 	    qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
16012 		return -EINVAL;
16013 
16014 	if (!is_power_of_2(qcfg->rx_page_size))
16015 		return -ERANGE;
16016 
16017 	if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
16018 	    qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16019 		return -ERANGE;
16020 
16021 	return 0;
16022 }
16023 
16024 static int bnxt_queue_mem_alloc(struct net_device *dev,
16025 				struct netdev_queue_config *qcfg,
16026 				void *qmem, int idx)
16027 {
16028 	struct bnxt_rx_ring_info *rxr, *clone;
16029 	struct bnxt *bp = netdev_priv(dev);
16030 	struct bnxt_ring_struct *ring;
16031 	int rc;
16032 
16033 	if (!bp->rx_ring)
16034 		return -ENETDOWN;
16035 
16036 	rxr = &bp->rx_ring[idx];
16037 	clone = qmem;
16038 	memcpy(clone, rxr, sizeof(*rxr));
16039 	bnxt_init_rx_ring_struct(bp, clone);
16040 	bnxt_reset_rx_ring_struct(bp, clone);
16041 
16042 	clone->rx_prod = 0;
16043 	clone->rx_agg_prod = 0;
16044 	clone->rx_sw_agg_prod = 0;
16045 	clone->rx_next_cons = 0;
16046 	clone->need_head_pool = false;
16047 	clone->rx_page_size = qcfg->rx_page_size;
16048 
16049 	rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16050 	if (rc)
16051 		return rc;
16052 
16053 	rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16054 	if (rc < 0)
16055 		goto err_page_pool_destroy;
16056 
16057 	rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16058 					MEM_TYPE_PAGE_POOL,
16059 					clone->page_pool);
16060 	if (rc)
16061 		goto err_rxq_info_unreg;
16062 
16063 	ring = &clone->rx_ring_struct;
16064 	rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16065 	if (rc)
16066 		goto err_free_rx_ring;
16067 
16068 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16069 		ring = &clone->rx_agg_ring_struct;
16070 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16071 		if (rc)
16072 			goto err_free_rx_agg_ring;
16073 
16074 		rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16075 		if (rc)
16076 			goto err_free_rx_agg_ring;
16077 	}
16078 
16079 	if (bp->flags & BNXT_FLAG_TPA) {
16080 		rc = bnxt_alloc_one_tpa_info(bp, clone);
16081 		if (rc)
16082 			goto err_free_tpa_info;
16083 	}
16084 
16085 	bnxt_init_one_rx_ring_rxbd(bp, clone);
16086 	bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16087 
16088 	bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16089 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16090 		bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16091 	if (bp->flags & BNXT_FLAG_TPA)
16092 		bnxt_alloc_one_tpa_info_data(bp, clone);
16093 
16094 	return 0;
16095 
16096 err_free_tpa_info:
16097 	bnxt_free_one_tpa_info(bp, clone);
16098 err_free_rx_agg_ring:
16099 	bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16100 err_free_rx_ring:
16101 	bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16102 err_rxq_info_unreg:
16103 	xdp_rxq_info_unreg(&clone->xdp_rxq);
16104 err_page_pool_destroy:
16105 	page_pool_destroy(clone->page_pool);
16106 	page_pool_destroy(clone->head_pool);
16107 	clone->page_pool = NULL;
16108 	clone->head_pool = NULL;
16109 	return rc;
16110 }
16111 
16112 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16113 {
16114 	struct bnxt_rx_ring_info *rxr = qmem;
16115 	struct bnxt *bp = netdev_priv(dev);
16116 	struct bnxt_ring_struct *ring;
16117 
16118 	bnxt_free_one_rx_ring_skbs(bp, rxr);
16119 	bnxt_free_one_tpa_info(bp, rxr);
16120 
16121 	xdp_rxq_info_unreg(&rxr->xdp_rxq);
16122 
16123 	page_pool_destroy(rxr->page_pool);
16124 	page_pool_destroy(rxr->head_pool);
16125 	rxr->page_pool = NULL;
16126 	rxr->head_pool = NULL;
16127 
16128 	ring = &rxr->rx_ring_struct;
16129 	bnxt_free_ring(bp, &ring->ring_mem);
16130 
16131 	ring = &rxr->rx_agg_ring_struct;
16132 	bnxt_free_ring(bp, &ring->ring_mem);
16133 
16134 	kfree(rxr->rx_agg_bmap);
16135 	rxr->rx_agg_bmap = NULL;
16136 }
16137 
16138 static void bnxt_copy_rx_ring(struct bnxt *bp,
16139 			      struct bnxt_rx_ring_info *dst,
16140 			      struct bnxt_rx_ring_info *src)
16141 {
16142 	struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16143 	struct bnxt_ring_struct *dst_ring, *src_ring;
16144 	int i;
16145 
16146 	dst_ring = &dst->rx_ring_struct;
16147 	dst_rmem = &dst_ring->ring_mem;
16148 	src_ring = &src->rx_ring_struct;
16149 	src_rmem = &src_ring->ring_mem;
16150 
16151 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16152 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16153 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16154 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16155 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16156 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16157 
16158 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16159 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16160 	*dst_rmem->vmem = *src_rmem->vmem;
16161 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16162 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16163 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16164 	}
16165 
16166 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16167 		return;
16168 
16169 	dst_ring = &dst->rx_agg_ring_struct;
16170 	dst_rmem = &dst_ring->ring_mem;
16171 	src_ring = &src->rx_agg_ring_struct;
16172 	src_rmem = &src_ring->ring_mem;
16173 
16174 	dst->rx_page_size = src->rx_page_size;
16175 
16176 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16177 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16178 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16179 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16180 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16181 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16182 	WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16183 
16184 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16185 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16186 	*dst_rmem->vmem = *src_rmem->vmem;
16187 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16188 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16189 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16190 	}
16191 
16192 	dst->rx_agg_bmap = src->rx_agg_bmap;
16193 }
16194 
16195 static int bnxt_queue_start(struct net_device *dev,
16196 			    struct netdev_queue_config *qcfg,
16197 			    void *qmem, int idx)
16198 {
16199 	struct bnxt *bp = netdev_priv(dev);
16200 	struct bnxt_rx_ring_info *rxr, *clone;
16201 	struct bnxt_cp_ring_info *cpr;
16202 	struct bnxt_vnic_info *vnic;
16203 	struct bnxt_napi *bnapi;
16204 	int i, rc;
16205 	u16 mru;
16206 
16207 	rxr = &bp->rx_ring[idx];
16208 	clone = qmem;
16209 
16210 	rxr->rx_prod = clone->rx_prod;
16211 	rxr->rx_agg_prod = clone->rx_agg_prod;
16212 	rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16213 	rxr->rx_next_cons = clone->rx_next_cons;
16214 	rxr->rx_tpa = clone->rx_tpa;
16215 	rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16216 	rxr->page_pool = clone->page_pool;
16217 	rxr->head_pool = clone->head_pool;
16218 	rxr->xdp_rxq = clone->xdp_rxq;
16219 	rxr->need_head_pool = clone->need_head_pool;
16220 
16221 	bnxt_copy_rx_ring(bp, rxr, clone);
16222 
16223 	bnapi = rxr->bnapi;
16224 	cpr = &bnapi->cp_ring;
16225 
16226 	/* All rings have been reserved and previously allocated.
16227 	 * Reallocating with the same parameters should never fail.
16228 	 */
16229 	rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16230 	if (rc)
16231 		goto err_reset;
16232 
16233 	if (bp->tph_mode) {
16234 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16235 		if (rc)
16236 			goto err_reset;
16237 	}
16238 
16239 	rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16240 	if (rc)
16241 		goto err_reset;
16242 
16243 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16244 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16245 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16246 
16247 	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16248 		rc = bnxt_tx_queue_start(bp, idx);
16249 		if (rc)
16250 			goto err_reset;
16251 	}
16252 
16253 	bnxt_enable_rx_page_pool(rxr);
16254 	napi_enable_locked(&bnapi->napi);
16255 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16256 
16257 	mru = bp->dev->mtu + VLAN_ETH_HLEN;
16258 	for (i = 0; i < bp->nr_vnics; i++) {
16259 		vnic = &bp->vnic_info[i];
16260 
16261 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16262 		if (rc)
16263 			return rc;
16264 	}
16265 	return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16266 
16267 err_reset:
16268 	netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16269 		   rc);
16270 	napi_enable_locked(&bnapi->napi);
16271 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16272 	bnxt_reset_task(bp, true);
16273 	return rc;
16274 }
16275 
16276 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16277 {
16278 	struct bnxt *bp = netdev_priv(dev);
16279 	struct bnxt_rx_ring_info *rxr;
16280 	struct bnxt_cp_ring_info *cpr;
16281 	struct bnxt_vnic_info *vnic;
16282 	struct bnxt_napi *bnapi;
16283 	int i;
16284 
16285 	for (i = 0; i < bp->nr_vnics; i++) {
16286 		vnic = &bp->vnic_info[i];
16287 
16288 		bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16289 	}
16290 	bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16291 	/* Make sure NAPI sees that the VNIC is disabled */
16292 	synchronize_net();
16293 	rxr = &bp->rx_ring[idx];
16294 	bnapi = rxr->bnapi;
16295 	cpr = &bnapi->cp_ring;
16296 	cancel_work_sync(&cpr->dim.work);
16297 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
16298 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16299 	page_pool_disable_direct_recycling(rxr->page_pool);
16300 	if (bnxt_separate_head_pool(rxr))
16301 		page_pool_disable_direct_recycling(rxr->head_pool);
16302 
16303 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16304 		bnxt_tx_queue_stop(bp, idx);
16305 
16306 	/* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16307 	 * completion is handled in NAPI to guarantee no more DMA on that ring
16308 	 * after seeing the completion.
16309 	 */
16310 	napi_disable_locked(&bnapi->napi);
16311 
16312 	if (bp->tph_mode) {
16313 		bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16314 		bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16315 	}
16316 	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16317 
16318 	memcpy(qmem, rxr, sizeof(*rxr));
16319 	bnxt_init_rx_ring_struct(bp, qmem);
16320 
16321 	return 0;
16322 }
16323 
16324 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16325 	.ndo_queue_mem_size	= sizeof(struct bnxt_rx_ring_info),
16326 	.ndo_queue_mem_alloc	= bnxt_queue_mem_alloc,
16327 	.ndo_queue_mem_free	= bnxt_queue_mem_free,
16328 	.ndo_queue_start	= bnxt_queue_start,
16329 	.ndo_queue_stop		= bnxt_queue_stop,
16330 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16331 	.ndo_validate_qcfg	= bnxt_validate_qcfg,
16332 	.supported_params	= QCFG_RX_PAGE_SIZE,
16333 };
16334 
16335 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16336 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16337 };
16338 
16339 static void bnxt_remove_one(struct pci_dev *pdev)
16340 {
16341 	struct net_device *dev = pci_get_drvdata(pdev);
16342 	struct bnxt *bp = netdev_priv(dev);
16343 
16344 	if (BNXT_PF(bp))
16345 		__bnxt_sriov_disable(bp);
16346 
16347 	bnxt_rdma_aux_device_del(bp);
16348 
16349 	unregister_netdev(dev);
16350 	bnxt_ptp_clear(bp);
16351 
16352 	bnxt_rdma_aux_device_uninit(bp);
16353 
16354 	bnxt_free_l2_filters(bp, true);
16355 	bnxt_free_ntp_fltrs(bp, true);
16356 	WARN_ON(bp->num_rss_ctx);
16357 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16358 	/* Flush any pending tasks */
16359 	cancel_work_sync(&bp->sp_task);
16360 	cancel_delayed_work_sync(&bp->fw_reset_task);
16361 	bp->sp_event = 0;
16362 
16363 	bnxt_dl_fw_reporters_destroy(bp);
16364 	bnxt_dl_unregister(bp);
16365 	bnxt_shutdown_tc(bp);
16366 
16367 	bnxt_clear_int_mode(bp);
16368 	bnxt_hwrm_func_drv_unrgtr(bp);
16369 	bnxt_free_hwrm_resources(bp);
16370 	bnxt_hwmon_uninit(bp);
16371 	bnxt_ethtool_free(bp);
16372 	bnxt_dcb_free(bp);
16373 	kfree(bp->ptp_cfg);
16374 	bp->ptp_cfg = NULL;
16375 	kfree(bp->fw_health);
16376 	bp->fw_health = NULL;
16377 	bnxt_cleanup_pci(bp);
16378 	bnxt_free_ctx_mem(bp, true);
16379 	bnxt_free_crash_dump_mem(bp);
16380 	kfree(bp->rss_indir_tbl);
16381 	bp->rss_indir_tbl = NULL;
16382 	bnxt_free_port_stats(bp);
16383 	free_netdev(dev);
16384 }
16385 
16386 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16387 {
16388 	int rc = 0;
16389 	struct bnxt_link_info *link_info = &bp->link_info;
16390 
16391 	bp->phy_flags = 0;
16392 	rc = bnxt_hwrm_phy_qcaps(bp);
16393 	if (rc) {
16394 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16395 			   rc);
16396 		return rc;
16397 	}
16398 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16399 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16400 	else
16401 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16402 
16403 	bp->mac_flags = 0;
16404 	bnxt_hwrm_mac_qcaps(bp);
16405 
16406 	if (!fw_dflt)
16407 		return 0;
16408 
16409 	mutex_lock(&bp->link_lock);
16410 	rc = bnxt_update_link(bp, false);
16411 	if (rc) {
16412 		mutex_unlock(&bp->link_lock);
16413 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16414 			   rc);
16415 		return rc;
16416 	}
16417 
16418 	/* Older firmware does not have supported_auto_speeds, so assume
16419 	 * that all supported speeds can be autonegotiated.
16420 	 */
16421 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16422 		link_info->support_auto_speeds = link_info->support_speeds;
16423 
16424 	bnxt_init_ethtool_link_settings(bp);
16425 	mutex_unlock(&bp->link_lock);
16426 	return 0;
16427 }
16428 
16429 static int bnxt_get_max_irq(struct pci_dev *pdev)
16430 {
16431 	u16 ctrl;
16432 
16433 	if (!pdev->msix_cap)
16434 		return 1;
16435 
16436 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16437 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16438 }
16439 
16440 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16441 				int *max_cp)
16442 {
16443 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16444 	int max_ring_grps = 0, max_irq;
16445 
16446 	*max_tx = hw_resc->max_tx_rings;
16447 	*max_rx = hw_resc->max_rx_rings;
16448 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16449 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16450 			bnxt_get_ulp_msix_num_in_use(bp),
16451 			hw_resc->max_stat_ctxs -
16452 			bnxt_get_ulp_stat_ctxs_in_use(bp));
16453 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16454 		*max_cp = min_t(int, *max_cp, max_irq);
16455 	max_ring_grps = hw_resc->max_hw_ring_grps;
16456 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16457 		*max_cp -= 1;
16458 		*max_rx -= 2;
16459 	}
16460 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16461 		*max_rx >>= 1;
16462 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16463 		int rc;
16464 
16465 		rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16466 		if (rc) {
16467 			*max_rx = 0;
16468 			*max_tx = 0;
16469 		}
16470 		/* On P5 chips, max_cp output param should be available NQs */
16471 		*max_cp = max_irq;
16472 	}
16473 	*max_rx = min_t(int, *max_rx, max_ring_grps);
16474 }
16475 
16476 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16477 {
16478 	int rx, tx, cp;
16479 
16480 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
16481 	*max_rx = rx;
16482 	*max_tx = tx;
16483 	if (!rx || !tx || !cp)
16484 		return -ENOMEM;
16485 
16486 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16487 }
16488 
16489 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16490 			       bool shared)
16491 {
16492 	int rc;
16493 
16494 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16495 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16496 		/* Not enough rings, try disabling agg rings. */
16497 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16498 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16499 		if (rc) {
16500 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
16501 			bp->flags |= BNXT_FLAG_AGG_RINGS;
16502 			return rc;
16503 		}
16504 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16505 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16506 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16507 		bnxt_set_ring_params(bp);
16508 	}
16509 
16510 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16511 		int max_cp, max_stat, max_irq;
16512 
16513 		/* Reserve minimum resources for RoCE */
16514 		max_cp = bnxt_get_max_func_cp_rings(bp);
16515 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
16516 		max_irq = bnxt_get_max_func_irqs(bp);
16517 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16518 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16519 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16520 			return 0;
16521 
16522 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16523 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16524 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16525 		max_cp = min_t(int, max_cp, max_irq);
16526 		max_cp = min_t(int, max_cp, max_stat);
16527 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16528 		if (rc)
16529 			rc = 0;
16530 	}
16531 	return rc;
16532 }
16533 
16534 /* In initial default shared ring setting, each shared ring must have a
16535  * RX/TX ring pair.
16536  */
16537 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16538 {
16539 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16540 	bp->rx_nr_rings = bp->cp_nr_rings;
16541 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16542 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16543 }
16544 
16545 static void bnxt_adj_dflt_rings(struct bnxt *bp, bool sh)
16546 {
16547 	if (sh)
16548 		bnxt_trim_dflt_sh_rings(bp);
16549 	else
16550 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16551 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16552 	if (sh && READ_ONCE(bp->xdp_prog)) {
16553 		bnxt_set_xdp_tx_rings(bp);
16554 		bnxt_set_cp_rings(bp, true);
16555 	}
16556 }
16557 
16558 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16559 {
16560 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
16561 	int avail_msix;
16562 
16563 	if (!bnxt_can_reserve_rings(bp))
16564 		return 0;
16565 
16566 	if (sh)
16567 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
16568 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16569 	/* Reduce default rings on multi-port cards so that total default
16570 	 * rings do not exceed CPU count.
16571 	 */
16572 	if (bp->port_count > 1) {
16573 		int max_rings =
16574 			max_t(int, num_online_cpus() / bp->port_count, 1);
16575 
16576 		dflt_rings = min_t(int, dflt_rings, max_rings);
16577 	}
16578 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16579 	if (rc)
16580 		return rc;
16581 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16582 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16583 
16584 	bnxt_adj_dflt_rings(bp, sh);
16585 
16586 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16587 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16588 		int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16589 
16590 		bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16591 		bnxt_set_dflt_ulp_stat_ctxs(bp);
16592 	}
16593 
16594 	rc = __bnxt_reserve_rings(bp);
16595 	if (rc && rc != -ENODEV)
16596 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16597 
16598 	bnxt_adj_tx_rings(bp);
16599 	if (sh)
16600 		bnxt_adj_dflt_rings(bp, true);
16601 
16602 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
16603 	if (bnxt_need_reserve_rings(bp)) {
16604 		rc = __bnxt_reserve_rings(bp);
16605 		if (rc && rc != -ENODEV)
16606 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16607 		bnxt_adj_tx_rings(bp);
16608 	}
16609 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16610 		bp->rx_nr_rings++;
16611 		bp->cp_nr_rings++;
16612 	}
16613 	if (rc) {
16614 		bp->tx_nr_rings = 0;
16615 		bp->rx_nr_rings = 0;
16616 	}
16617 	return rc;
16618 }
16619 
16620 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16621 {
16622 	int rc;
16623 
16624 	if (bp->tx_nr_rings)
16625 		return 0;
16626 
16627 	bnxt_ulp_irq_stop(bp);
16628 	bnxt_clear_int_mode(bp);
16629 	rc = bnxt_set_dflt_rings(bp, true);
16630 	if (rc) {
16631 		if (BNXT_VF(bp) && rc == -ENODEV)
16632 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16633 		else
16634 			netdev_err(bp->dev, "Not enough rings available.\n");
16635 		goto init_dflt_ring_err;
16636 	}
16637 	rc = bnxt_init_int_mode(bp);
16638 	if (rc)
16639 		goto init_dflt_ring_err;
16640 
16641 	bnxt_adj_tx_rings(bp);
16642 
16643 	bnxt_set_dflt_rfs(bp);
16644 
16645 init_dflt_ring_err:
16646 	bnxt_ulp_irq_restart(bp, rc);
16647 	return rc;
16648 }
16649 
16650 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16651 {
16652 	int rc;
16653 
16654 	netdev_ops_assert_locked(bp->dev);
16655 	bnxt_hwrm_func_qcaps(bp);
16656 
16657 	if (netif_running(bp->dev))
16658 		__bnxt_close_nic(bp, true, false);
16659 
16660 	bnxt_ulp_irq_stop(bp);
16661 	bnxt_clear_int_mode(bp);
16662 	rc = bnxt_init_int_mode(bp);
16663 	bnxt_ulp_irq_restart(bp, rc);
16664 
16665 	if (netif_running(bp->dev)) {
16666 		if (rc)
16667 			netif_close(bp->dev);
16668 		else
16669 			rc = bnxt_open_nic(bp, true, false);
16670 	}
16671 
16672 	return rc;
16673 }
16674 
16675 static int bnxt_init_mac_addr(struct bnxt *bp)
16676 {
16677 	int rc = 0;
16678 
16679 	if (BNXT_PF(bp)) {
16680 		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16681 	} else {
16682 #ifdef CONFIG_BNXT_SRIOV
16683 		struct bnxt_vf_info *vf = &bp->vf;
16684 		bool strict_approval = true;
16685 
16686 		if (is_valid_ether_addr(vf->mac_addr)) {
16687 			/* overwrite netdev dev_addr with admin VF MAC */
16688 			eth_hw_addr_set(bp->dev, vf->mac_addr);
16689 			/* Older PF driver or firmware may not approve this
16690 			 * correctly.
16691 			 */
16692 			strict_approval = false;
16693 		} else {
16694 			eth_hw_addr_random(bp->dev);
16695 		}
16696 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16697 #endif
16698 	}
16699 	return rc;
16700 }
16701 
16702 static void bnxt_vpd_read_info(struct bnxt *bp)
16703 {
16704 	struct pci_dev *pdev = bp->pdev;
16705 	unsigned int vpd_size, kw_len;
16706 	int pos, size;
16707 	u8 *vpd_data;
16708 
16709 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16710 	if (IS_ERR(vpd_data)) {
16711 		pci_warn(pdev, "Unable to read VPD\n");
16712 		return;
16713 	}
16714 
16715 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16716 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16717 	if (pos < 0)
16718 		goto read_sn;
16719 
16720 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16721 	memcpy(bp->board_partno, &vpd_data[pos], size);
16722 
16723 read_sn:
16724 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16725 					   PCI_VPD_RO_KEYWORD_SERIALNO,
16726 					   &kw_len);
16727 	if (pos < 0)
16728 		goto exit;
16729 
16730 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16731 	memcpy(bp->board_serialno, &vpd_data[pos], size);
16732 exit:
16733 	kfree(vpd_data);
16734 }
16735 
16736 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16737 {
16738 	struct pci_dev *pdev = bp->pdev;
16739 	u64 qword;
16740 
16741 	qword = pci_get_dsn(pdev);
16742 	if (!qword) {
16743 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16744 		return -EOPNOTSUPP;
16745 	}
16746 
16747 	put_unaligned_le64(qword, dsn);
16748 
16749 	bp->flags |= BNXT_FLAG_DSN_VALID;
16750 	return 0;
16751 }
16752 
16753 static int bnxt_map_db_bar(struct bnxt *bp)
16754 {
16755 	if (!bp->db_size)
16756 		return -ENODEV;
16757 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16758 	if (!bp->bar1)
16759 		return -ENOMEM;
16760 	return 0;
16761 }
16762 
16763 void bnxt_print_device_info(struct bnxt *bp)
16764 {
16765 	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16766 		    board_info[bp->board_idx].name,
16767 		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16768 
16769 	pcie_print_link_status(bp->pdev);
16770 }
16771 
16772 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16773 {
16774 	struct bnxt_hw_resc *hw_resc;
16775 	struct net_device *dev;
16776 	struct bnxt *bp;
16777 	int rc, max_irqs;
16778 
16779 	if (pci_is_bridge(pdev))
16780 		return -ENODEV;
16781 
16782 	if (!pdev->msix_cap) {
16783 		dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16784 		return -ENODEV;
16785 	}
16786 
16787 	/* Clear any pending DMA transactions from crash kernel
16788 	 * while loading driver in capture kernel.
16789 	 */
16790 	if (is_kdump_kernel()) {
16791 		pci_clear_master(pdev);
16792 		pcie_flr(pdev);
16793 	}
16794 
16795 	max_irqs = bnxt_get_max_irq(pdev);
16796 	dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16797 				 max_irqs);
16798 	if (!dev)
16799 		return -ENOMEM;
16800 
16801 	bp = netdev_priv(dev);
16802 	bp->board_idx = ent->driver_data;
16803 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16804 	bnxt_set_max_func_irqs(bp, max_irqs);
16805 
16806 	if (bnxt_vf_pciid(bp->board_idx))
16807 		bp->flags |= BNXT_FLAG_VF;
16808 
16809 	/* No devlink port registration in case of a VF */
16810 	if (BNXT_PF(bp))
16811 		SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16812 
16813 	rc = bnxt_init_board(pdev, dev);
16814 	if (rc < 0)
16815 		goto init_err_free;
16816 
16817 	dev->netdev_ops = &bnxt_netdev_ops;
16818 	dev->stat_ops = &bnxt_stat_ops;
16819 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16820 	dev->ethtool_ops = &bnxt_ethtool_ops;
16821 	pci_set_drvdata(pdev, dev);
16822 
16823 	rc = bnxt_alloc_hwrm_resources(bp);
16824 	if (rc)
16825 		goto init_err_pci_clean;
16826 
16827 	mutex_init(&bp->hwrm_cmd_lock);
16828 	mutex_init(&bp->link_lock);
16829 
16830 	rc = bnxt_fw_init_one_p1(bp);
16831 	if (rc)
16832 		goto init_err_pci_clean;
16833 
16834 	if (BNXT_PF(bp))
16835 		bnxt_vpd_read_info(bp);
16836 
16837 	if (BNXT_CHIP_P5_PLUS(bp)) {
16838 		bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16839 		if (BNXT_CHIP_P7(bp))
16840 			bp->flags |= BNXT_FLAG_CHIP_P7;
16841 	}
16842 
16843 	rc = bnxt_alloc_rss_indir_tbl(bp);
16844 	if (rc)
16845 		goto init_err_pci_clean;
16846 
16847 	rc = bnxt_fw_init_one_p2(bp);
16848 	if (rc)
16849 		goto init_err_pci_clean;
16850 
16851 	rc = bnxt_map_db_bar(bp);
16852 	if (rc) {
16853 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16854 			rc);
16855 		goto init_err_pci_clean;
16856 	}
16857 
16858 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16859 			   NETIF_F_TSO | NETIF_F_TSO6 |
16860 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16861 			   NETIF_F_GSO_IPXIP4 |
16862 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16863 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16864 			   NETIF_F_RXCSUM | NETIF_F_GRO;
16865 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16866 		dev->hw_features |= NETIF_F_GSO_UDP_L4;
16867 
16868 	if (BNXT_SUPPORTS_TPA(bp))
16869 		dev->hw_features |= NETIF_F_LRO;
16870 
16871 	dev->hw_enc_features =
16872 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16873 			NETIF_F_TSO | NETIF_F_TSO6 |
16874 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16875 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16876 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16877 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16878 		dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16879 	if (bp->flags & BNXT_FLAG_CHIP_P7)
16880 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16881 	else
16882 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16883 
16884 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16885 				    NETIF_F_GSO_GRE_CSUM;
16886 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16887 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16888 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16889 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16890 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16891 	if (BNXT_SUPPORTS_TPA(bp))
16892 		dev->hw_features |= NETIF_F_GRO_HW;
16893 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16894 	if (dev->features & NETIF_F_GRO_HW)
16895 		dev->features &= ~NETIF_F_LRO;
16896 	dev->priv_flags |= IFF_UNICAST_FLT;
16897 
16898 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16899 	if (bp->tso_max_segs)
16900 		netif_set_tso_max_segs(dev, bp->tso_max_segs);
16901 
16902 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16903 			    NETDEV_XDP_ACT_RX_SG;
16904 
16905 #ifdef CONFIG_BNXT_SRIOV
16906 	init_waitqueue_head(&bp->sriov_cfg_wait);
16907 #endif
16908 	if (BNXT_SUPPORTS_TPA(bp)) {
16909 		bp->gro_func = bnxt_gro_func_5730x;
16910 		if (BNXT_CHIP_P4(bp))
16911 			bp->gro_func = bnxt_gro_func_5731x;
16912 		else if (BNXT_CHIP_P5_PLUS(bp))
16913 			bp->gro_func = bnxt_gro_func_5750x;
16914 	}
16915 	if (!BNXT_CHIP_P4_PLUS(bp))
16916 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
16917 
16918 	rc = bnxt_init_mac_addr(bp);
16919 	if (rc) {
16920 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16921 		rc = -EADDRNOTAVAIL;
16922 		goto init_err_pci_clean;
16923 	}
16924 
16925 	if (BNXT_PF(bp)) {
16926 		/* Read the adapter's DSN to use as the eswitch switch_id */
16927 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16928 	}
16929 
16930 	/* MTU range: 60 - FW defined max */
16931 	dev->min_mtu = ETH_ZLEN;
16932 	dev->max_mtu = bp->max_mtu;
16933 
16934 	rc = bnxt_probe_phy(bp, true);
16935 	if (rc)
16936 		goto init_err_pci_clean;
16937 
16938 	hw_resc = &bp->hw_resc;
16939 	bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16940 		       BNXT_L2_FLTR_MAX_FLTR;
16941 	/* Older firmware may not report these filters properly */
16942 	if (bp->max_fltr < BNXT_MAX_FLTR)
16943 		bp->max_fltr = BNXT_MAX_FLTR;
16944 	bnxt_init_l2_fltr_tbl(bp);
16945 	__bnxt_set_rx_skb_mode(bp, false);
16946 	bnxt_set_tpa_flags(bp);
16947 	bnxt_init_ring_params(bp);
16948 	bnxt_set_ring_params(bp);
16949 	bnxt_rdma_aux_device_init(bp);
16950 	rc = bnxt_set_dflt_rings(bp, true);
16951 	if (rc) {
16952 		if (BNXT_VF(bp) && rc == -ENODEV) {
16953 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16954 		} else {
16955 			netdev_err(bp->dev, "Not enough rings available.\n");
16956 			rc = -ENOMEM;
16957 		}
16958 		goto init_err_pci_clean;
16959 	}
16960 
16961 	bnxt_fw_init_one_p3(bp);
16962 
16963 	bnxt_init_dflt_coal(bp);
16964 
16965 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16966 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
16967 
16968 	rc = bnxt_init_int_mode(bp);
16969 	if (rc)
16970 		goto init_err_pci_clean;
16971 
16972 	/* No TC has been set yet and rings may have been trimmed due to
16973 	 * limited MSIX, so we re-initialize the TX rings per TC.
16974 	 */
16975 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16976 
16977 	if (BNXT_PF(bp)) {
16978 		if (!bnxt_pf_wq) {
16979 			bnxt_pf_wq =
16980 				create_singlethread_workqueue("bnxt_pf_wq");
16981 			if (!bnxt_pf_wq) {
16982 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
16983 				rc = -ENOMEM;
16984 				goto init_err_pci_clean;
16985 			}
16986 		}
16987 		rc = bnxt_init_tc(bp);
16988 		if (rc)
16989 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16990 				   rc);
16991 	}
16992 
16993 	bnxt_inv_fw_health_reg(bp);
16994 	rc = bnxt_dl_register(bp);
16995 	if (rc)
16996 		goto init_err_dl;
16997 
16998 	INIT_LIST_HEAD(&bp->usr_fltr_list);
16999 
17000 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
17001 		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
17002 
17003 	dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
17004 	if (BNXT_SUPPORTS_QUEUE_API(bp))
17005 		dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
17006 	dev->netmem_tx = true;
17007 
17008 	rc = register_netdev(dev);
17009 	if (rc)
17010 		goto init_err_cleanup;
17011 
17012 	bnxt_dl_fw_reporters_create(bp);
17013 
17014 	bnxt_rdma_aux_device_add(bp);
17015 
17016 	bnxt_print_device_info(bp);
17017 
17018 	pci_save_state(pdev);
17019 
17020 	return 0;
17021 init_err_cleanup:
17022 	bnxt_rdma_aux_device_uninit(bp);
17023 	bnxt_dl_unregister(bp);
17024 init_err_dl:
17025 	bnxt_shutdown_tc(bp);
17026 	bnxt_clear_int_mode(bp);
17027 
17028 init_err_pci_clean:
17029 	bnxt_hwrm_func_drv_unrgtr(bp);
17030 	bnxt_ptp_clear(bp);
17031 	kfree(bp->ptp_cfg);
17032 	bp->ptp_cfg = NULL;
17033 	bnxt_free_hwrm_resources(bp);
17034 	bnxt_hwmon_uninit(bp);
17035 	bnxt_ethtool_free(bp);
17036 	kfree(bp->fw_health);
17037 	bp->fw_health = NULL;
17038 	bnxt_cleanup_pci(bp);
17039 	bnxt_free_ctx_mem(bp, true);
17040 	bnxt_free_crash_dump_mem(bp);
17041 	kfree(bp->rss_indir_tbl);
17042 	bp->rss_indir_tbl = NULL;
17043 
17044 init_err_free:
17045 	free_netdev(dev);
17046 	return rc;
17047 }
17048 
17049 static void bnxt_shutdown(struct pci_dev *pdev)
17050 {
17051 	struct net_device *dev = pci_get_drvdata(pdev);
17052 	struct bnxt *bp;
17053 
17054 	if (!dev)
17055 		return;
17056 
17057 	rtnl_lock();
17058 	netdev_lock(dev);
17059 	bp = netdev_priv(dev);
17060 	if (!bp)
17061 		goto shutdown_exit;
17062 
17063 	if (netif_running(dev))
17064 		netif_close(dev);
17065 
17066 	if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17067 		pcie_flr(pdev);
17068 		goto shutdown_exit;
17069 	}
17070 	bnxt_ptp_clear(bp);
17071 	bnxt_clear_int_mode(bp);
17072 	pci_disable_device(pdev);
17073 
17074 	if (system_state == SYSTEM_POWER_OFF) {
17075 		pci_wake_from_d3(pdev, bp->wol);
17076 		pci_set_power_state(pdev, PCI_D3hot);
17077 	}
17078 
17079 shutdown_exit:
17080 	netdev_unlock(dev);
17081 	rtnl_unlock();
17082 }
17083 
17084 #ifdef CONFIG_PM_SLEEP
17085 static int bnxt_suspend(struct device *device)
17086 {
17087 	struct net_device *dev = dev_get_drvdata(device);
17088 	struct bnxt *bp = netdev_priv(dev);
17089 	int rc = 0;
17090 
17091 	bnxt_ulp_stop(bp);
17092 
17093 	netdev_lock(dev);
17094 	if (netif_running(dev)) {
17095 		netif_device_detach(dev);
17096 		rc = bnxt_close(dev);
17097 	}
17098 	bnxt_hwrm_func_drv_unrgtr(bp);
17099 	bnxt_ptp_clear(bp);
17100 	pci_disable_device(bp->pdev);
17101 	bnxt_free_ctx_mem(bp, false);
17102 	netdev_unlock(dev);
17103 	return rc;
17104 }
17105 
17106 static int bnxt_resume(struct device *device)
17107 {
17108 	struct net_device *dev = dev_get_drvdata(device);
17109 	struct bnxt *bp = netdev_priv(dev);
17110 	int rc = 0;
17111 
17112 	netdev_lock(dev);
17113 	rc = pci_enable_device(bp->pdev);
17114 	if (rc) {
17115 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17116 			   rc);
17117 		goto resume_exit;
17118 	}
17119 	pci_set_master(bp->pdev);
17120 	if (bnxt_hwrm_ver_get(bp)) {
17121 		rc = -ENODEV;
17122 		goto resume_exit;
17123 	}
17124 	rc = bnxt_hwrm_func_reset(bp);
17125 	if (rc) {
17126 		rc = -EBUSY;
17127 		goto resume_exit;
17128 	}
17129 
17130 	rc = bnxt_hwrm_func_qcaps(bp);
17131 	if (rc)
17132 		goto resume_exit;
17133 
17134 	bnxt_clear_reservations(bp, true);
17135 
17136 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17137 		rc = -ENODEV;
17138 		goto resume_exit;
17139 	}
17140 	if (bp->fw_crash_mem)
17141 		bnxt_hwrm_crash_dump_mem_cfg(bp);
17142 
17143 	if (bnxt_ptp_init(bp)) {
17144 		kfree(bp->ptp_cfg);
17145 		bp->ptp_cfg = NULL;
17146 	}
17147 	bnxt_get_wol_settings(bp);
17148 	if (netif_running(dev)) {
17149 		rc = bnxt_open(dev);
17150 		if (!rc)
17151 			netif_device_attach(dev);
17152 	}
17153 
17154 resume_exit:
17155 	netdev_unlock(bp->dev);
17156 	bnxt_ulp_start(bp, rc);
17157 	if (!rc)
17158 		bnxt_reenable_sriov(bp);
17159 	return rc;
17160 }
17161 
17162 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17163 #define BNXT_PM_OPS (&bnxt_pm_ops)
17164 
17165 #else
17166 
17167 #define BNXT_PM_OPS NULL
17168 
17169 #endif /* CONFIG_PM_SLEEP */
17170 
17171 /**
17172  * bnxt_io_error_detected - called when PCI error is detected
17173  * @pdev: Pointer to PCI device
17174  * @state: The current pci connection state
17175  *
17176  * This function is called after a PCI bus error affecting
17177  * this device has been detected.
17178  */
17179 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17180 					       pci_channel_state_t state)
17181 {
17182 	struct net_device *netdev = pci_get_drvdata(pdev);
17183 	struct bnxt *bp = netdev_priv(netdev);
17184 	bool abort = false;
17185 
17186 	netdev_info(netdev, "PCI I/O error detected\n");
17187 
17188 	bnxt_ulp_stop(bp);
17189 
17190 	netdev_lock(netdev);
17191 	netif_device_detach(netdev);
17192 
17193 	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17194 		netdev_err(bp->dev, "Firmware reset already in progress\n");
17195 		abort = true;
17196 	}
17197 
17198 	if (abort || state == pci_channel_io_perm_failure) {
17199 		netdev_unlock(netdev);
17200 		return PCI_ERS_RESULT_DISCONNECT;
17201 	}
17202 
17203 	/* Link is not reliable anymore if state is pci_channel_io_frozen
17204 	 * so we disable bus master to prevent any potential bad DMAs before
17205 	 * freeing kernel memory.
17206 	 */
17207 	if (state == pci_channel_io_frozen) {
17208 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17209 		bnxt_fw_fatal_close(bp);
17210 	}
17211 
17212 	if (netif_running(netdev))
17213 		__bnxt_close_nic(bp, true, true);
17214 
17215 	if (pci_is_enabled(pdev))
17216 		pci_disable_device(pdev);
17217 	bnxt_free_ctx_mem(bp, false);
17218 	netdev_unlock(netdev);
17219 
17220 	/* Request a slot reset. */
17221 	return PCI_ERS_RESULT_NEED_RESET;
17222 }
17223 
17224 /**
17225  * bnxt_io_slot_reset - called after the pci bus has been reset.
17226  * @pdev: Pointer to PCI device
17227  *
17228  * Restart the card from scratch, as if from a cold-boot.
17229  * At this point, the card has experienced a hard reset,
17230  * followed by fixups by BIOS, and has its config space
17231  * set up identically to what it was at cold boot.
17232  */
17233 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17234 {
17235 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17236 	struct net_device *netdev = pci_get_drvdata(pdev);
17237 	struct bnxt *bp = netdev_priv(netdev);
17238 	int retry = 0;
17239 	int err = 0;
17240 	int off;
17241 
17242 	netdev_info(bp->dev, "PCI Slot Reset\n");
17243 
17244 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17245 	    test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17246 		msleep(900);
17247 
17248 	netdev_lock(netdev);
17249 
17250 	if (pci_enable_device(pdev)) {
17251 		dev_err(&pdev->dev,
17252 			"Cannot re-enable PCI device after reset.\n");
17253 	} else {
17254 		pci_set_master(pdev);
17255 		/* Upon fatal error, our device internal logic that latches to
17256 		 * BAR value is getting reset and will restore only upon
17257 		 * rewriting the BARs.
17258 		 *
17259 		 * As pci_restore_state() does not re-write the BARs if the
17260 		 * value is same as saved value earlier, driver needs to
17261 		 * write the BARs to 0 to force restore, in case of fatal error.
17262 		 */
17263 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17264 				       &bp->state)) {
17265 			for (off = PCI_BASE_ADDRESS_0;
17266 			     off <= PCI_BASE_ADDRESS_5; off += 4)
17267 				pci_write_config_dword(bp->pdev, off, 0);
17268 		}
17269 		pci_restore_state(pdev);
17270 		pci_save_state(pdev);
17271 
17272 		bnxt_inv_fw_health_reg(bp);
17273 		bnxt_try_map_fw_health_reg(bp);
17274 
17275 		/* In some PCIe AER scenarios, firmware may take up to
17276 		 * 10 seconds to become ready in the worst case.
17277 		 */
17278 		do {
17279 			err = bnxt_try_recover_fw(bp);
17280 			if (!err)
17281 				break;
17282 			retry++;
17283 		} while (retry < BNXT_FW_SLOT_RESET_RETRY);
17284 
17285 		if (err) {
17286 			dev_err(&pdev->dev, "Firmware not ready\n");
17287 			goto reset_exit;
17288 		}
17289 
17290 		err = bnxt_hwrm_func_reset(bp);
17291 		if (!err)
17292 			result = PCI_ERS_RESULT_RECOVERED;
17293 
17294 		/* IRQ will be initialized later in bnxt_io_resume */
17295 		bnxt_ulp_irq_stop(bp);
17296 		bnxt_clear_int_mode(bp);
17297 	}
17298 
17299 reset_exit:
17300 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17301 	bnxt_clear_reservations(bp, true);
17302 	netdev_unlock(netdev);
17303 
17304 	return result;
17305 }
17306 
17307 /**
17308  * bnxt_io_resume - called when traffic can start flowing again.
17309  * @pdev: Pointer to PCI device
17310  *
17311  * This callback is called when the error recovery driver tells
17312  * us that its OK to resume normal operation.
17313  */
17314 static void bnxt_io_resume(struct pci_dev *pdev)
17315 {
17316 	struct net_device *netdev = pci_get_drvdata(pdev);
17317 	struct bnxt *bp = netdev_priv(netdev);
17318 	int err;
17319 
17320 	netdev_info(bp->dev, "PCI Slot Resume\n");
17321 	netdev_lock(netdev);
17322 
17323 	err = bnxt_hwrm_func_qcaps(bp);
17324 	if (!err) {
17325 		if (netif_running(netdev)) {
17326 			err = bnxt_open(netdev);
17327 		} else {
17328 			err = bnxt_reserve_rings(bp, true);
17329 			if (!err)
17330 				err = bnxt_init_int_mode(bp);
17331 		}
17332 	}
17333 
17334 	if (!err)
17335 		netif_device_attach(netdev);
17336 
17337 	netdev_unlock(netdev);
17338 	bnxt_ulp_start(bp, err);
17339 	if (!err)
17340 		bnxt_reenable_sriov(bp);
17341 }
17342 
17343 static const struct pci_error_handlers bnxt_err_handler = {
17344 	.error_detected	= bnxt_io_error_detected,
17345 	.slot_reset	= bnxt_io_slot_reset,
17346 	.resume		= bnxt_io_resume
17347 };
17348 
17349 static struct pci_driver bnxt_pci_driver = {
17350 	.name		= DRV_MODULE_NAME,
17351 	.id_table	= bnxt_pci_tbl,
17352 	.probe		= bnxt_init_one,
17353 	.remove		= bnxt_remove_one,
17354 	.shutdown	= bnxt_shutdown,
17355 	.driver.pm	= BNXT_PM_OPS,
17356 	.err_handler	= &bnxt_err_handler,
17357 #if defined(CONFIG_BNXT_SRIOV)
17358 	.sriov_configure = bnxt_sriov_configure,
17359 #endif
17360 };
17361 
17362 static int __init bnxt_init(void)
17363 {
17364 	int err;
17365 
17366 	bnxt_debug_init();
17367 	err = pci_register_driver(&bnxt_pci_driver);
17368 	if (err) {
17369 		bnxt_debug_exit();
17370 		return err;
17371 	}
17372 
17373 	return 0;
17374 }
17375 
17376 static void __exit bnxt_exit(void)
17377 {
17378 	pci_unregister_driver(&bnxt_pci_driver);
17379 	if (bnxt_pf_wq)
17380 		destroy_workqueue(bnxt_pf_wq);
17381 	bnxt_debug_exit();
17382 }
17383 
17384 module_init(bnxt_init);
17385 module_exit(bnxt_exit);
17386