xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62 
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77 
78 #define BNXT_TX_TIMEOUT		(5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
80 				 NETIF_MSG_TX_ERR)
81 
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85 
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88 
89 #define BNXT_TX_PUSH_THRESH 164
90 
91 /* indexed by enum board_idx */
92 static const struct {
93 	char *name;
94 } board_info[] = {
95 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 	[BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 	[BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 	[BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 	[BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 	[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 	[NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147 
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 	{ PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 	{ PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 	{ PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 	{ PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 	{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 	{ PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 	{ 0 }
225 };
226 
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228 
229 static const u16 bnxt_vf_req_snif[] = {
230 	HWRM_FUNC_CFG,
231 	HWRM_FUNC_VF_CFG,
232 	HWRM_PORT_PHY_QCFG,
233 	HWRM_CFA_L2_FILTER_ALLOC,
234 };
235 
236 static const u16 bnxt_async_events_arr[] = {
237 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 	ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255 
256 const u16 bnxt_bstore_to_trace[] = {
257 	[BNXT_CTX_SRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 	[BNXT_CTX_SRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 	[BNXT_CTX_CRT]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 	[BNXT_CTX_CRT2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 	[BNXT_CTX_RIGP0]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 	[BNXT_CTX_L2HWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 	[BNXT_CTX_REHWRM]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 	[BNXT_CTX_CA0]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 	[BNXT_CTX_CA1]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 	[BNXT_CTX_CA2]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 	[BNXT_CTX_RIGP1]	= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 	[BNXT_CTX_KONG]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 	[BNXT_CTX_QPC]		= DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271 
272 static struct workqueue_struct *bnxt_pf_wq;
273 
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277 
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 	.ports = {
280 		.src = 0,
281 		.dst = 0,
282 	},
283 	.addrs = {
284 		.v6addrs = {
285 			.src = BNXT_IPV6_MASK_NONE,
286 			.dst = BNXT_IPV6_MASK_NONE,
287 		},
288 	},
289 };
290 
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 	.ports = {
293 		.src = cpu_to_be16(0xffff),
294 		.dst = cpu_to_be16(0xffff),
295 	},
296 	.addrs = {
297 		.v6addrs = {
298 			.src = BNXT_IPV6_MASK_ALL,
299 			.dst = BNXT_IPV6_MASK_ALL,
300 		},
301 	},
302 };
303 
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 	.ports = {
306 		.src = cpu_to_be16(0xffff),
307 		.dst = cpu_to_be16(0xffff),
308 	},
309 	.addrs = {
310 		.v4addrs = {
311 			.src = cpu_to_be32(0xffffffff),
312 			.dst = cpu_to_be32(0xffffffff),
313 		},
314 	},
315 };
316 
317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 		idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 		idx == NETXTREME_E_P7_VF_HV);
324 }
325 
326 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328 
329 #define BNXT_DB_CQ(db, idx)						\
330 	writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331 
332 #define BNXT_DB_NQ_P5(db, idx)						\
333 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 		    (db)->doorbell)
335 
336 #define BNXT_DB_NQ_P7(db, idx)						\
337 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |		\
338 		    DB_RING_IDX(db, idx), (db)->doorbell)
339 
340 #define BNXT_DB_CQ_ARM(db, idx)						\
341 	writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342 
343 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
344 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |		\
345 		    DB_RING_IDX(db, idx), (db)->doorbell)
346 
347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 	if (bp->flags & BNXT_FLAG_CHIP_P7)
350 		BNXT_DB_NQ_P7(db, idx);
351 	else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 		BNXT_DB_NQ_P5(db, idx);
353 	else
354 		BNXT_DB_CQ(db, idx);
355 }
356 
357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 		BNXT_DB_NQ_ARM_P5(db, idx);
361 	else
362 		BNXT_DB_CQ_ARM(db, idx);
363 }
364 
365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 			    DB_RING_IDX(db, idx), db->doorbell);
370 	else
371 		BNXT_DB_CQ(db, idx);
372 }
373 
374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 		return;
378 
379 	if (BNXT_PF(bp))
380 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 	else
382 		schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384 
385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 	if (BNXT_PF(bp))
388 		queue_work(bnxt_pf_wq, &bp->sp_task);
389 	else
390 		schedule_work(&bp->sp_task);
391 }
392 
393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 	set_bit(event, &bp->sp_event);
396 	__bnxt_queue_sp_work(bp);
397 }
398 
399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 	if (!rxr->bnapi->in_reset) {
402 		rxr->bnapi->in_reset = true;
403 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 		else
406 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 		__bnxt_queue_sp_work(bp);
408 	}
409 	rxr->rx_next_cons = 0xffff;
410 }
411 
412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 			  u16 curr)
414 {
415 	struct bnxt_napi *bnapi = txr->bnapi;
416 
417 	if (bnapi->tx_fault)
418 		return;
419 
420 	netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 		   txr->txq_index, txr->tx_hw_cons,
422 		   txr->tx_cons, txr->tx_prod, curr);
423 	WARN_ON_ONCE(1);
424 	bnapi->tx_fault = 1;
425 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427 
428 const u16 bnxt_lhint_arr[] = {
429 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 	TX_BD_FLAGS_LHINT_512_TO_1023,
431 	TX_BD_FLAGS_LHINT_1024_TO_2047,
432 	TX_BD_FLAGS_LHINT_1024_TO_2047,
433 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449 
450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
453 
454 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 		return 0;
456 
457 	return md_dst->u.port_info.port_id;
458 }
459 
460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 			     u16 prod)
462 {
463 	/* Sync BD data before updating doorbell */
464 	wmb();
465 	bnxt_db_write(bp, &txr->tx_db, prod);
466 	txr->kick_pending = 0;
467 }
468 
469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 	struct bnxt *bp = netdev_priv(dev);
472 	struct tx_bd *txbd, *txbd0;
473 	struct tx_bd_ext *txbd1;
474 	struct netdev_queue *txq;
475 	int i;
476 	dma_addr_t mapping;
477 	unsigned int length, pad = 0;
478 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 	struct pci_dev *pdev = bp->pdev;
481 	u16 prod, last_frag, txts_prod;
482 	struct bnxt_tx_ring_info *txr;
483 	struct bnxt_sw_tx_bd *tx_buf;
484 	__le32 lflags = 0;
485 	skb_frag_t *frag;
486 
487 	i = skb_get_queue_mapping(skb);
488 	if (unlikely(i >= bp->tx_nr_rings)) {
489 		dev_kfree_skb_any(skb);
490 		dev_core_stats_tx_dropped_inc(dev);
491 		return NETDEV_TX_OK;
492 	}
493 
494 	txq = netdev_get_tx_queue(dev, i);
495 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 	prod = txr->tx_prod;
497 
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 	if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 		netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d.  SKB will be linearized.\n",
501 				 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 		if (skb_linearize(skb)) {
503 			dev_kfree_skb_any(skb);
504 			dev_core_stats_tx_dropped_inc(dev);
505 			return NETDEV_TX_OK;
506 		}
507 	}
508 #endif
509 	free_size = bnxt_tx_avail(bp, txr);
510 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 		/* We must have raced with NAPI cleanup */
512 		if (net_ratelimit() && txr->kick_pending)
513 			netif_warn(bp, tx_err, dev,
514 				   "bnxt: ring busy w/ flush pending!\n");
515 		if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 					bp->tx_wake_thresh))
517 			return NETDEV_TX_BUSY;
518 	}
519 
520 	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
521 		goto tx_free;
522 
523 	length = skb->len;
524 	len = skb_headlen(skb);
525 	last_frag = skb_shinfo(skb)->nr_frags;
526 
527 	txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
528 
529 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
530 	tx_buf->skb = skb;
531 	tx_buf->nr_frags = last_frag;
532 
533 	vlan_tag_flags = 0;
534 	cfa_action = bnxt_xmit_get_cfa_action(skb);
535 	if (skb_vlan_tag_present(skb)) {
536 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
537 				 skb_vlan_tag_get(skb);
538 		/* Currently supports 8021Q, 8021AD vlan offloads
539 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
540 		 */
541 		if (skb->vlan_proto == htons(ETH_P_8021Q))
542 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
543 	}
544 
545 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
546 	    ptp->tx_tstamp_en) {
547 		if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
548 			lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
549 			tx_buf->is_ts_pkt = 1;
550 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
551 		} else if (!skb_is_gso(skb)) {
552 			u16 seq_id, hdr_off;
553 
554 			if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
555 			    !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
556 				if (vlan_tag_flags)
557 					hdr_off += VLAN_HLEN;
558 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
559 				tx_buf->is_ts_pkt = 1;
560 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
561 
562 				ptp->txts_req[txts_prod].tx_seqid = seq_id;
563 				ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
564 				tx_buf->txts_prod = txts_prod;
565 			}
566 		}
567 	}
568 	if (unlikely(skb->no_fcs))
569 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
570 
571 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
572 	    skb_frags_readable(skb) && !lflags) {
573 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
574 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
575 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
576 		void __iomem *db = txr->tx_db.doorbell;
577 		void *pdata = tx_push_buf->data;
578 		u64 *end;
579 		int j, push_len;
580 
581 		/* Set COAL_NOW to be ready quickly for the next push */
582 		tx_push->tx_bd_len_flags_type =
583 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
584 					TX_BD_TYPE_LONG_TX_BD |
585 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
586 					TX_BD_FLAGS_COAL_NOW |
587 					TX_BD_FLAGS_PACKET_END |
588 					TX_BD_CNT(2));
589 
590 		if (skb->ip_summed == CHECKSUM_PARTIAL)
591 			tx_push1->tx_bd_hsize_lflags =
592 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
593 		else
594 			tx_push1->tx_bd_hsize_lflags = 0;
595 
596 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
597 		tx_push1->tx_bd_cfa_action =
598 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
599 
600 		end = pdata + length;
601 		end = PTR_ALIGN(end, 8) - 1;
602 		*end = 0;
603 
604 		skb_copy_from_linear_data(skb, pdata, len);
605 		pdata += len;
606 		for (j = 0; j < last_frag; j++) {
607 			void *fptr;
608 
609 			frag = &skb_shinfo(skb)->frags[j];
610 			fptr = skb_frag_address_safe(frag);
611 			if (!fptr)
612 				goto normal_tx;
613 
614 			memcpy(pdata, fptr, skb_frag_size(frag));
615 			pdata += skb_frag_size(frag);
616 		}
617 
618 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
619 		txbd->tx_bd_haddr = txr->data_mapping;
620 		txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
621 		prod = NEXT_TX(prod);
622 		tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
623 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
624 		memcpy(txbd, tx_push1, sizeof(*txbd));
625 		prod = NEXT_TX(prod);
626 		tx_push->doorbell =
627 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
628 				    DB_RING_IDX(&txr->tx_db, prod));
629 		WRITE_ONCE(txr->tx_prod, prod);
630 
631 		tx_buf->is_push = 1;
632 		netdev_tx_sent_queue(txq, skb->len);
633 		wmb();	/* Sync is_push and byte queue before pushing data */
634 
635 		push_len = (length + sizeof(*tx_push) + 7) / 8;
636 		if (push_len > 16) {
637 			__iowrite64_copy(db, tx_push_buf, 16);
638 			__iowrite32_copy(db + 4, tx_push_buf + 1,
639 					 (push_len - 16) << 1);
640 		} else {
641 			__iowrite64_copy(db, tx_push_buf, push_len);
642 		}
643 
644 		goto tx_done;
645 	}
646 
647 normal_tx:
648 	if (length < BNXT_MIN_PKT_SIZE) {
649 		pad = BNXT_MIN_PKT_SIZE - length;
650 		if (skb_pad(skb, pad))
651 			/* SKB already freed. */
652 			goto tx_kick_pending;
653 		length = BNXT_MIN_PKT_SIZE;
654 	}
655 
656 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
657 
658 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
659 		goto tx_free;
660 
661 	dma_unmap_addr_set(tx_buf, mapping, mapping);
662 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
663 		TX_BD_CNT(last_frag + 2);
664 
665 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
666 	txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
667 
668 	prod = NEXT_TX(prod);
669 	txbd1 = (struct tx_bd_ext *)
670 		&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
671 
672 	txbd1->tx_bd_hsize_lflags = lflags;
673 	if (skb_is_gso(skb)) {
674 		bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
675 		u32 hdr_len;
676 
677 		if (skb->encapsulation) {
678 			if (udp_gso)
679 				hdr_len = skb_inner_transport_offset(skb) +
680 					  sizeof(struct udphdr);
681 			else
682 				hdr_len = skb_inner_tcp_all_headers(skb);
683 		} else if (udp_gso) {
684 			hdr_len = skb_transport_offset(skb) +
685 				  sizeof(struct udphdr);
686 		} else {
687 			hdr_len = skb_tcp_all_headers(skb);
688 		}
689 
690 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
691 					TX_BD_FLAGS_T_IPID |
692 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
693 		length = skb_shinfo(skb)->gso_size;
694 		txbd1->tx_bd_mss = cpu_to_le32(length);
695 		length += hdr_len;
696 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
697 		txbd1->tx_bd_hsize_lflags |=
698 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
699 		txbd1->tx_bd_mss = 0;
700 	}
701 
702 	length >>= 9;
703 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
704 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
705 				     skb->len);
706 		i = 0;
707 		goto tx_dma_error;
708 	}
709 	flags |= bnxt_lhint_arr[length];
710 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
711 
712 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
713 	txbd1->tx_bd_cfa_action =
714 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
715 	txbd0 = txbd;
716 	for (i = 0; i < last_frag; i++) {
717 		frag = &skb_shinfo(skb)->frags[i];
718 		prod = NEXT_TX(prod);
719 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
720 
721 		len = skb_frag_size(frag);
722 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
723 					   DMA_TO_DEVICE);
724 
725 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
726 			goto tx_dma_error;
727 
728 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
729 		netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
730 					  mapping, mapping);
731 
732 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
733 
734 		flags = len << TX_BD_LEN_SHIFT;
735 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
736 	}
737 
738 	flags &= ~TX_BD_LEN;
739 	txbd->tx_bd_len_flags_type =
740 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
741 			    TX_BD_FLAGS_PACKET_END);
742 
743 	netdev_tx_sent_queue(txq, skb->len);
744 
745 	skb_tx_timestamp(skb);
746 
747 	prod = NEXT_TX(prod);
748 	WRITE_ONCE(txr->tx_prod, prod);
749 
750 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
751 		bnxt_txr_db_kick(bp, txr, prod);
752 	} else {
753 		if (free_size >= bp->tx_wake_thresh)
754 			txbd0->tx_bd_len_flags_type |=
755 				cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
756 		txr->kick_pending = 1;
757 	}
758 
759 tx_done:
760 
761 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
762 		if (netdev_xmit_more() && !tx_buf->is_push) {
763 			txbd0->tx_bd_len_flags_type &=
764 				cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
765 			bnxt_txr_db_kick(bp, txr, prod);
766 		}
767 
768 		netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
769 				   bp->tx_wake_thresh);
770 	}
771 	return NETDEV_TX_OK;
772 
773 tx_dma_error:
774 	last_frag = i;
775 
776 	/* start back at beginning and unmap skb */
777 	prod = txr->tx_prod;
778 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
779 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
780 			 skb_headlen(skb), DMA_TO_DEVICE);
781 	prod = NEXT_TX(prod);
782 
783 	/* unmap remaining mapped pages */
784 	for (i = 0; i < last_frag; i++) {
785 		prod = NEXT_TX(prod);
786 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
787 		frag = &skb_shinfo(skb)->frags[i];
788 		netmem_dma_unmap_page_attrs(&pdev->dev,
789 					    dma_unmap_addr(tx_buf, mapping),
790 					    skb_frag_size(frag),
791 					    DMA_TO_DEVICE, 0);
792 	}
793 
794 tx_free:
795 	dev_kfree_skb_any(skb);
796 tx_kick_pending:
797 	if (BNXT_TX_PTP_IS_SET(lflags)) {
798 		txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
799 		atomic64_inc(&bp->ptp_cfg->stats.ts_err);
800 		if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
801 			/* set SKB to err so PTP worker will clean up */
802 			ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
803 	}
804 	if (txr->kick_pending)
805 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
806 	txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
807 	dev_core_stats_tx_dropped_inc(dev);
808 	return NETDEV_TX_OK;
809 }
810 
811 /* Returns true if some remaining TX packets not processed. */
812 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
813 			  int budget)
814 {
815 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
816 	struct pci_dev *pdev = bp->pdev;
817 	u16 hw_cons = txr->tx_hw_cons;
818 	unsigned int tx_bytes = 0;
819 	u16 cons = txr->tx_cons;
820 	skb_frag_t *frag;
821 	int tx_pkts = 0;
822 	bool rc = false;
823 
824 	while (RING_TX(bp, cons) != hw_cons) {
825 		struct bnxt_sw_tx_bd *tx_buf;
826 		struct sk_buff *skb;
827 		bool is_ts_pkt;
828 		int j, last;
829 
830 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
831 		skb = tx_buf->skb;
832 
833 		if (unlikely(!skb)) {
834 			bnxt_sched_reset_txr(bp, txr, cons);
835 			return rc;
836 		}
837 
838 		is_ts_pkt = tx_buf->is_ts_pkt;
839 		if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
840 			rc = true;
841 			break;
842 		}
843 
844 		cons = NEXT_TX(cons);
845 		tx_pkts++;
846 		tx_bytes += skb->len;
847 		tx_buf->skb = NULL;
848 		tx_buf->is_ts_pkt = 0;
849 
850 		if (tx_buf->is_push) {
851 			tx_buf->is_push = 0;
852 			goto next_tx_int;
853 		}
854 
855 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
856 				 skb_headlen(skb), DMA_TO_DEVICE);
857 		last = tx_buf->nr_frags;
858 
859 		for (j = 0; j < last; j++) {
860 			frag = &skb_shinfo(skb)->frags[j];
861 			cons = NEXT_TX(cons);
862 			tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
863 			netmem_dma_unmap_page_attrs(&pdev->dev,
864 						    dma_unmap_addr(tx_buf,
865 								   mapping),
866 						    skb_frag_size(frag),
867 						    DMA_TO_DEVICE, 0);
868 		}
869 		if (unlikely(is_ts_pkt)) {
870 			if (BNXT_CHIP_P5(bp)) {
871 				/* PTP worker takes ownership of the skb */
872 				bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
873 				skb = NULL;
874 			}
875 		}
876 
877 next_tx_int:
878 		cons = NEXT_TX(cons);
879 
880 		napi_consume_skb(skb, budget);
881 	}
882 
883 	WRITE_ONCE(txr->tx_cons, cons);
884 
885 	__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
886 				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
887 				   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
888 
889 	return rc;
890 }
891 
892 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
893 {
894 	struct bnxt_tx_ring_info *txr;
895 	bool more = false;
896 	int i;
897 
898 	bnxt_for_each_napi_tx(i, bnapi, txr) {
899 		if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
900 			more |= __bnxt_tx_int(bp, txr, budget);
901 	}
902 	if (!more)
903 		bnapi->events &= ~BNXT_TX_CMP_EVENT;
904 }
905 
906 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
907 {
908 	return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
909 }
910 
911 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
912 					 struct bnxt_rx_ring_info *rxr,
913 					 unsigned int *offset,
914 					 gfp_t gfp)
915 {
916 	struct page *page;
917 
918 	if (rxr->rx_page_size < PAGE_SIZE) {
919 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
920 						rxr->rx_page_size);
921 	} else {
922 		page = page_pool_dev_alloc_pages(rxr->page_pool);
923 		*offset = 0;
924 	}
925 	if (!page)
926 		return NULL;
927 
928 	*mapping = page_pool_get_dma_addr(page) + *offset;
929 	return page;
930 }
931 
932 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
933 					 struct bnxt_rx_ring_info *rxr,
934 					 unsigned int *offset,
935 					 gfp_t gfp)
936 {
937 	netmem_ref netmem;
938 
939 	if (rxr->rx_page_size < PAGE_SIZE) {
940 		netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
941 						     rxr->rx_page_size, gfp);
942 	} else {
943 		netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
944 		*offset = 0;
945 	}
946 	if (!netmem)
947 		return 0;
948 
949 	*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
950 	return netmem;
951 }
952 
953 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
954 				       struct bnxt_rx_ring_info *rxr,
955 				       gfp_t gfp)
956 {
957 	unsigned int offset;
958 	struct page *page;
959 
960 	page = page_pool_alloc_frag(rxr->head_pool, &offset,
961 				    bp->rx_buf_size, gfp);
962 	if (!page)
963 		return NULL;
964 
965 	*mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
966 	return page_address(page) + offset;
967 }
968 
969 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
970 		       u16 prod, gfp_t gfp)
971 {
972 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
973 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
974 	dma_addr_t mapping;
975 
976 	if (BNXT_RX_PAGE_MODE(bp)) {
977 		unsigned int offset;
978 		struct page *page =
979 			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
980 
981 		if (!page)
982 			return -ENOMEM;
983 
984 		mapping += bp->rx_dma_offset;
985 		rx_buf->data = page;
986 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
987 	} else {
988 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
989 
990 		if (!data)
991 			return -ENOMEM;
992 
993 		rx_buf->data = data;
994 		rx_buf->data_ptr = data + bp->rx_offset;
995 	}
996 	rx_buf->mapping = mapping;
997 
998 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
999 	return 0;
1000 }
1001 
1002 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1003 {
1004 	u16 prod = rxr->rx_prod;
1005 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1006 	struct bnxt *bp = rxr->bnapi->bp;
1007 	struct rx_bd *cons_bd, *prod_bd;
1008 
1009 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1010 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1011 
1012 	prod_rx_buf->data = data;
1013 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1014 
1015 	prod_rx_buf->mapping = cons_rx_buf->mapping;
1016 
1017 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1018 	cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1019 
1020 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1021 }
1022 
1023 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1024 {
1025 	u16 next, max = rxr->rx_agg_bmap_size;
1026 
1027 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1028 	if (next >= max)
1029 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1030 	return next;
1031 }
1032 
1033 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1034 				u16 prod, gfp_t gfp)
1035 {
1036 	struct rx_bd *rxbd =
1037 		&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1038 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1039 	u16 sw_prod = rxr->rx_sw_agg_prod;
1040 	unsigned int offset = 0;
1041 	dma_addr_t mapping;
1042 	netmem_ref netmem;
1043 
1044 	netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1045 	if (!netmem)
1046 		return -ENOMEM;
1047 
1048 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1049 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1050 
1051 	__set_bit(sw_prod, rxr->rx_agg_bmap);
1052 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1053 	rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1054 
1055 	rx_agg_buf->netmem = netmem;
1056 	rx_agg_buf->offset = offset;
1057 	rx_agg_buf->mapping = mapping;
1058 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1059 	rxbd->rx_bd_opaque = sw_prod;
1060 	return 0;
1061 }
1062 
1063 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1064 				       struct bnxt_cp_ring_info *cpr,
1065 				       u16 cp_cons, u16 curr)
1066 {
1067 	struct rx_agg_cmp *agg;
1068 
1069 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1070 	agg = (struct rx_agg_cmp *)
1071 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1072 	return agg;
1073 }
1074 
1075 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1076 					      struct bnxt_rx_ring_info *rxr,
1077 					      u16 agg_id, u16 curr)
1078 {
1079 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1080 
1081 	return &tpa_info->agg_arr[curr];
1082 }
1083 
1084 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1085 				   u16 start, u32 agg_bufs, bool tpa)
1086 {
1087 	struct bnxt_napi *bnapi = cpr->bnapi;
1088 	struct bnxt *bp = bnapi->bp;
1089 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1090 	u16 prod = rxr->rx_agg_prod;
1091 	u16 sw_prod = rxr->rx_sw_agg_prod;
1092 	bool p5_tpa = false;
1093 	u32 i;
1094 
1095 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1096 		p5_tpa = true;
1097 
1098 	for (i = 0; i < agg_bufs; i++) {
1099 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1100 		struct rx_agg_cmp *agg;
1101 		struct rx_bd *prod_bd;
1102 		netmem_ref netmem;
1103 		u16 cons;
1104 
1105 		if (p5_tpa)
1106 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1107 		else
1108 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
1109 		cons = agg->rx_agg_cmp_opaque;
1110 		__clear_bit(cons, rxr->rx_agg_bmap);
1111 
1112 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1113 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1114 
1115 		__set_bit(sw_prod, rxr->rx_agg_bmap);
1116 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1117 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1118 
1119 		/* It is possible for sw_prod to be equal to cons, so
1120 		 * set cons_rx_buf->netmem to 0 first.
1121 		 */
1122 		netmem = cons_rx_buf->netmem;
1123 		cons_rx_buf->netmem = 0;
1124 		prod_rx_buf->netmem = netmem;
1125 		prod_rx_buf->offset = cons_rx_buf->offset;
1126 
1127 		prod_rx_buf->mapping = cons_rx_buf->mapping;
1128 
1129 		prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1130 
1131 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1132 		prod_bd->rx_bd_opaque = sw_prod;
1133 
1134 		prod = NEXT_RX_AGG(prod);
1135 		sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1136 	}
1137 	rxr->rx_agg_prod = prod;
1138 	rxr->rx_sw_agg_prod = sw_prod;
1139 }
1140 
1141 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1142 					      struct bnxt_rx_ring_info *rxr,
1143 					      u16 cons, void *data, u8 *data_ptr,
1144 					      dma_addr_t dma_addr,
1145 					      unsigned int offset_and_len)
1146 {
1147 	unsigned int len = offset_and_len & 0xffff;
1148 	struct page *page = data;
1149 	u16 prod = rxr->rx_prod;
1150 	struct sk_buff *skb;
1151 	int err;
1152 
1153 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1154 	if (unlikely(err)) {
1155 		bnxt_reuse_rx_data(rxr, cons, data);
1156 		return NULL;
1157 	}
1158 	dma_addr -= bp->rx_dma_offset;
1159 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1160 				bp->rx_dir);
1161 	skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
1162 	if (!skb) {
1163 		page_pool_recycle_direct(rxr->page_pool, page);
1164 		return NULL;
1165 	}
1166 	skb_mark_for_recycle(skb);
1167 	skb_reserve(skb, bp->rx_offset);
1168 	__skb_put(skb, len);
1169 
1170 	return skb;
1171 }
1172 
1173 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1174 					struct bnxt_rx_ring_info *rxr,
1175 					u16 cons, void *data, u8 *data_ptr,
1176 					dma_addr_t dma_addr,
1177 					unsigned int offset_and_len)
1178 {
1179 	unsigned int payload = offset_and_len >> 16;
1180 	unsigned int len = offset_and_len & 0xffff;
1181 	skb_frag_t *frag;
1182 	struct page *page = data;
1183 	u16 prod = rxr->rx_prod;
1184 	struct sk_buff *skb;
1185 	int off, err;
1186 
1187 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1188 	if (unlikely(err)) {
1189 		bnxt_reuse_rx_data(rxr, cons, data);
1190 		return NULL;
1191 	}
1192 	dma_addr -= bp->rx_dma_offset;
1193 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
1194 				bp->rx_dir);
1195 
1196 	if (unlikely(!payload))
1197 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1198 
1199 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1200 	if (!skb) {
1201 		page_pool_recycle_direct(rxr->page_pool, page);
1202 		return NULL;
1203 	}
1204 
1205 	skb_mark_for_recycle(skb);
1206 	off = (void *)data_ptr - page_address(page);
1207 	skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
1208 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1209 	       payload + NET_IP_ALIGN);
1210 
1211 	frag = &skb_shinfo(skb)->frags[0];
1212 	skb_frag_size_sub(frag, payload);
1213 	skb_frag_off_add(frag, payload);
1214 	skb->data_len -= payload;
1215 	skb->tail += payload;
1216 
1217 	return skb;
1218 }
1219 
1220 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1221 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1222 				   void *data, u8 *data_ptr,
1223 				   dma_addr_t dma_addr,
1224 				   unsigned int offset_and_len)
1225 {
1226 	u16 prod = rxr->rx_prod;
1227 	struct sk_buff *skb;
1228 	int err;
1229 
1230 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1231 	if (unlikely(err)) {
1232 		bnxt_reuse_rx_data(rxr, cons, data);
1233 		return NULL;
1234 	}
1235 
1236 	skb = napi_build_skb(data, bp->rx_buf_size);
1237 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1238 				bp->rx_dir);
1239 	if (!skb) {
1240 		page_pool_free_va(rxr->head_pool, data, true);
1241 		return NULL;
1242 	}
1243 
1244 	skb_mark_for_recycle(skb);
1245 	skb_reserve(skb, bp->rx_offset);
1246 	skb_put(skb, offset_and_len & 0xffff);
1247 	return skb;
1248 }
1249 
1250 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1251 				 struct bnxt_cp_ring_info *cpr,
1252 				 u16 idx, u32 agg_bufs, bool tpa,
1253 				 struct sk_buff *skb,
1254 				 struct xdp_buff *xdp)
1255 {
1256 	struct bnxt_napi *bnapi = cpr->bnapi;
1257 	struct skb_shared_info *shinfo;
1258 	struct bnxt_rx_ring_info *rxr;
1259 	u32 i, total_frag_len = 0;
1260 	bool p5_tpa = false;
1261 	u16 prod;
1262 
1263 	rxr = bnapi->rx_ring;
1264 	prod = rxr->rx_agg_prod;
1265 
1266 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1267 		p5_tpa = true;
1268 
1269 	if (skb)
1270 		shinfo = skb_shinfo(skb);
1271 	else
1272 		shinfo = xdp_get_shared_info_from_buff(xdp);
1273 
1274 	for (i = 0; i < agg_bufs; i++) {
1275 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1276 		struct rx_agg_cmp *agg;
1277 		u16 cons, frag_len;
1278 		netmem_ref netmem;
1279 
1280 		if (p5_tpa)
1281 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1282 		else
1283 			agg = bnxt_get_agg(bp, cpr, idx, i);
1284 		cons = agg->rx_agg_cmp_opaque;
1285 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1286 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1287 
1288 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1289 		if (skb) {
1290 			skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1291 					       cons_rx_buf->offset,
1292 					       frag_len, rxr->rx_page_size);
1293 		} else {
1294 			skb_frag_t *frag = &shinfo->frags[i];
1295 
1296 			skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1297 						  cons_rx_buf->offset,
1298 						  frag_len);
1299 			shinfo->nr_frags = i + 1;
1300 		}
1301 		__clear_bit(cons, rxr->rx_agg_bmap);
1302 
1303 		/* It is possible for bnxt_alloc_rx_netmem() to allocate
1304 		 * a sw_prod index that equals the cons index, so we
1305 		 * need to clear the cons entry now.
1306 		 */
1307 		netmem = cons_rx_buf->netmem;
1308 		cons_rx_buf->netmem = 0;
1309 
1310 		if (xdp && netmem_is_pfmemalloc(netmem))
1311 			xdp_buff_set_frag_pfmemalloc(xdp);
1312 
1313 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1314 			if (skb) {
1315 				skb->len -= frag_len;
1316 				skb->data_len -= frag_len;
1317 				skb->truesize -= rxr->rx_page_size;
1318 			}
1319 
1320 			--shinfo->nr_frags;
1321 			cons_rx_buf->netmem = netmem;
1322 
1323 			/* Update prod since possibly some netmems have been
1324 			 * allocated already.
1325 			 */
1326 			rxr->rx_agg_prod = prod;
1327 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1328 			return 0;
1329 		}
1330 
1331 		page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1332 						  rxr->rx_page_size);
1333 
1334 		total_frag_len += frag_len;
1335 		prod = NEXT_RX_AGG(prod);
1336 	}
1337 	rxr->rx_agg_prod = prod;
1338 	return total_frag_len;
1339 }
1340 
1341 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1342 					       struct bnxt_cp_ring_info *cpr,
1343 					       struct sk_buff *skb, u16 idx,
1344 					       u32 agg_bufs, bool tpa)
1345 {
1346 	u32 total_frag_len = 0;
1347 
1348 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1349 					       skb, NULL);
1350 	if (!total_frag_len) {
1351 		skb_mark_for_recycle(skb);
1352 		dev_kfree_skb(skb);
1353 		return NULL;
1354 	}
1355 
1356 	return skb;
1357 }
1358 
1359 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1360 				   struct bnxt_cp_ring_info *cpr,
1361 				   struct xdp_buff *xdp, u16 idx,
1362 				   u32 agg_bufs, bool tpa)
1363 {
1364 	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1365 	u32 total_frag_len = 0;
1366 
1367 	if (!xdp_buff_has_frags(xdp))
1368 		shinfo->nr_frags = 0;
1369 
1370 	total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1371 					       NULL, xdp);
1372 	if (total_frag_len) {
1373 		xdp_buff_set_frags_flag(xdp);
1374 		shinfo->nr_frags = agg_bufs;
1375 		shinfo->xdp_frags_size = total_frag_len;
1376 	}
1377 	return total_frag_len;
1378 }
1379 
1380 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1381 			       u8 agg_bufs, u32 *raw_cons)
1382 {
1383 	u16 last;
1384 	struct rx_agg_cmp *agg;
1385 
1386 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1387 	last = RING_CMP(*raw_cons);
1388 	agg = (struct rx_agg_cmp *)
1389 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1390 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1391 }
1392 
1393 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1394 				      unsigned int len,
1395 				      dma_addr_t mapping)
1396 {
1397 	struct bnxt *bp = bnapi->bp;
1398 	struct pci_dev *pdev = bp->pdev;
1399 	struct sk_buff *skb;
1400 
1401 	skb = napi_alloc_skb(&bnapi->napi, len);
1402 	if (!skb)
1403 		return NULL;
1404 
1405 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1406 				bp->rx_dir);
1407 
1408 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1409 	       len + NET_IP_ALIGN);
1410 
1411 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1412 				   bp->rx_dir);
1413 
1414 	skb_put(skb, len);
1415 
1416 	return skb;
1417 }
1418 
1419 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1420 				     unsigned int len,
1421 				     dma_addr_t mapping)
1422 {
1423 	return bnxt_copy_data(bnapi, data, len, mapping);
1424 }
1425 
1426 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1427 				     struct xdp_buff *xdp,
1428 				     unsigned int len,
1429 				     dma_addr_t mapping)
1430 {
1431 	unsigned int metasize = 0;
1432 	u8 *data = xdp->data;
1433 	struct sk_buff *skb;
1434 
1435 	len = xdp->data_end - xdp->data_meta;
1436 	metasize = xdp->data - xdp->data_meta;
1437 	data = xdp->data_meta;
1438 
1439 	skb = bnxt_copy_data(bnapi, data, len, mapping);
1440 	if (!skb)
1441 		return skb;
1442 
1443 	if (metasize) {
1444 		skb_metadata_set(skb, metasize);
1445 		__skb_pull(skb, metasize);
1446 	}
1447 
1448 	return skb;
1449 }
1450 
1451 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1452 			   u32 *raw_cons, void *cmp)
1453 {
1454 	struct rx_cmp *rxcmp = cmp;
1455 	u32 tmp_raw_cons = *raw_cons;
1456 	u8 cmp_type, agg_bufs = 0;
1457 
1458 	cmp_type = RX_CMP_TYPE(rxcmp);
1459 
1460 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1461 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1462 			    RX_CMP_AGG_BUFS) >>
1463 			   RX_CMP_AGG_BUFS_SHIFT;
1464 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1465 		struct rx_tpa_end_cmp *tpa_end = cmp;
1466 
1467 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1468 			return 0;
1469 
1470 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1471 	}
1472 
1473 	if (agg_bufs) {
1474 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1475 			return -EBUSY;
1476 	}
1477 	*raw_cons = tmp_raw_cons;
1478 	return 0;
1479 }
1480 
1481 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1482 {
1483 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1484 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1485 
1486 	if (test_bit(idx, map->agg_idx_bmap)) {
1487 		idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
1488 		if (idx >= MAX_TPA_P5)
1489 			return INVALID_HW_RING_ID;
1490 	}
1491 	__set_bit(idx, map->agg_idx_bmap);
1492 	map->agg_id_tbl[agg_id] = idx;
1493 	return idx;
1494 }
1495 
1496 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1497 {
1498 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1499 
1500 	__clear_bit(idx, map->agg_idx_bmap);
1501 }
1502 
1503 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1504 {
1505 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1506 
1507 	return map->agg_id_tbl[agg_id];
1508 }
1509 
1510 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1511 			      struct rx_tpa_start_cmp *tpa_start,
1512 			      struct rx_tpa_start_cmp_ext *tpa_start1)
1513 {
1514 	tpa_info->cfa_code_valid = 1;
1515 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1516 	tpa_info->vlan_valid = 0;
1517 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1518 		tpa_info->vlan_valid = 1;
1519 		tpa_info->metadata =
1520 			le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1521 	}
1522 }
1523 
1524 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1525 				 struct rx_tpa_start_cmp *tpa_start,
1526 				 struct rx_tpa_start_cmp_ext *tpa_start1)
1527 {
1528 	tpa_info->vlan_valid = 0;
1529 	if (TPA_START_VLAN_VALID(tpa_start)) {
1530 		u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1531 		u32 vlan_proto = ETH_P_8021Q;
1532 
1533 		tpa_info->vlan_valid = 1;
1534 		if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1535 			vlan_proto = ETH_P_8021AD;
1536 		tpa_info->metadata = vlan_proto << 16 |
1537 				     TPA_START_METADATA0_TCI(tpa_start1);
1538 	}
1539 }
1540 
1541 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1542 			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1543 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1544 {
1545 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1546 	struct bnxt_tpa_info *tpa_info;
1547 	u16 cons, prod, agg_id;
1548 	struct rx_bd *prod_bd;
1549 	dma_addr_t mapping;
1550 
1551 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1552 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1553 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1554 		if (unlikely(agg_id == INVALID_HW_RING_ID)) {
1555 			netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
1556 				    rxr->bnapi->index,
1557 				    TPA_START_AGG_ID_P5(tpa_start));
1558 			bnxt_sched_reset_rxr(bp, rxr);
1559 			return;
1560 		}
1561 	} else {
1562 		agg_id = TPA_START_AGG_ID(tpa_start);
1563 	}
1564 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1565 	prod = rxr->rx_prod;
1566 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1567 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1568 	tpa_info = &rxr->rx_tpa[agg_id];
1569 
1570 	if (unlikely(cons != rxr->rx_next_cons ||
1571 		     TPA_START_ERROR(tpa_start))) {
1572 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1573 			    cons, rxr->rx_next_cons,
1574 			    TPA_START_ERROR_CODE(tpa_start1));
1575 		bnxt_sched_reset_rxr(bp, rxr);
1576 		return;
1577 	}
1578 	prod_rx_buf->data = tpa_info->data;
1579 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1580 
1581 	mapping = tpa_info->mapping;
1582 	prod_rx_buf->mapping = mapping;
1583 
1584 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1585 
1586 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1587 
1588 	tpa_info->data = cons_rx_buf->data;
1589 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1590 	cons_rx_buf->data = NULL;
1591 	tpa_info->mapping = cons_rx_buf->mapping;
1592 
1593 	tpa_info->len =
1594 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1595 				RX_TPA_START_CMP_LEN_SHIFT;
1596 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1597 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1598 		tpa_info->gso_type = SKB_GSO_TCPV4;
1599 		if (TPA_START_IS_IPV6(tpa_start1))
1600 			tpa_info->gso_type = SKB_GSO_TCPV6;
1601 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1602 		else if (!BNXT_CHIP_P4_PLUS(bp) &&
1603 			 TPA_START_HASH_TYPE(tpa_start) == 3)
1604 			tpa_info->gso_type = SKB_GSO_TCPV6;
1605 		tpa_info->rss_hash =
1606 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1607 	} else {
1608 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1609 		tpa_info->gso_type = 0;
1610 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1611 	}
1612 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1613 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1614 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1615 		bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1616 	else
1617 		bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1618 	tpa_info->agg_count = 0;
1619 
1620 	rxr->rx_prod = NEXT_RX(prod);
1621 	cons = RING_RX(bp, NEXT_RX(cons));
1622 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1623 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1624 
1625 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1626 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1627 	cons_rx_buf->data = NULL;
1628 }
1629 
1630 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1631 {
1632 	if (agg_bufs)
1633 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1634 }
1635 
1636 #ifdef CONFIG_INET
1637 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1638 {
1639 	struct udphdr *uh = NULL;
1640 
1641 	if (ip_proto == htons(ETH_P_IP)) {
1642 		struct iphdr *iph = (struct iphdr *)skb->data;
1643 
1644 		if (iph->protocol == IPPROTO_UDP)
1645 			uh = (struct udphdr *)(iph + 1);
1646 	} else {
1647 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1648 
1649 		if (iph->nexthdr == IPPROTO_UDP)
1650 			uh = (struct udphdr *)(iph + 1);
1651 	}
1652 	if (uh) {
1653 		if (uh->check)
1654 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1655 		else
1656 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1657 	}
1658 }
1659 #endif
1660 
1661 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1662 					   int payload_off, int tcp_ts,
1663 					   struct sk_buff *skb)
1664 {
1665 #ifdef CONFIG_INET
1666 	struct tcphdr *th;
1667 	int len, nw_off;
1668 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1669 	u32 hdr_info = tpa_info->hdr_info;
1670 	bool loopback = false;
1671 
1672 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1673 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1674 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1675 
1676 	/* If the packet is an internal loopback packet, the offsets will
1677 	 * have an extra 4 bytes.
1678 	 */
1679 	if (inner_mac_off == 4) {
1680 		loopback = true;
1681 	} else if (inner_mac_off > 4) {
1682 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1683 					    ETH_HLEN - 2));
1684 
1685 		/* We only support inner iPv4/ipv6.  If we don't see the
1686 		 * correct protocol ID, it must be a loopback packet where
1687 		 * the offsets are off by 4.
1688 		 */
1689 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1690 			loopback = true;
1691 	}
1692 	if (loopback) {
1693 		/* internal loopback packet, subtract all offsets by 4 */
1694 		inner_ip_off -= 4;
1695 		inner_mac_off -= 4;
1696 		outer_ip_off -= 4;
1697 	}
1698 
1699 	nw_off = inner_ip_off - ETH_HLEN;
1700 	skb_set_network_header(skb, nw_off);
1701 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1702 		struct ipv6hdr *iph = ipv6_hdr(skb);
1703 
1704 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1705 		len = skb->len - skb_transport_offset(skb);
1706 		th = tcp_hdr(skb);
1707 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1708 	} else {
1709 		struct iphdr *iph = ip_hdr(skb);
1710 
1711 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1712 		len = skb->len - skb_transport_offset(skb);
1713 		th = tcp_hdr(skb);
1714 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1715 	}
1716 
1717 	if (inner_mac_off) { /* tunnel */
1718 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1719 					    ETH_HLEN - 2));
1720 
1721 		bnxt_gro_tunnel(skb, proto);
1722 	}
1723 #endif
1724 	return skb;
1725 }
1726 
1727 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1728 					   int payload_off, int tcp_ts,
1729 					   struct sk_buff *skb)
1730 {
1731 #ifdef CONFIG_INET
1732 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1733 	u32 hdr_info = tpa_info->hdr_info;
1734 	int iphdr_len, nw_off;
1735 
1736 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1737 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1738 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1739 
1740 	nw_off = inner_ip_off - ETH_HLEN;
1741 	skb_set_network_header(skb, nw_off);
1742 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1743 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1744 	skb_set_transport_header(skb, nw_off + iphdr_len);
1745 
1746 	if (inner_mac_off) { /* tunnel */
1747 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1748 					    ETH_HLEN - 2));
1749 
1750 		bnxt_gro_tunnel(skb, proto);
1751 	}
1752 #endif
1753 	return skb;
1754 }
1755 
1756 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1757 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1758 
1759 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1760 					   int payload_off, int tcp_ts,
1761 					   struct sk_buff *skb)
1762 {
1763 #ifdef CONFIG_INET
1764 	struct tcphdr *th;
1765 	int len, nw_off, tcp_opt_len = 0;
1766 
1767 	if (tcp_ts)
1768 		tcp_opt_len = 12;
1769 
1770 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1771 		struct iphdr *iph;
1772 
1773 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1774 			 ETH_HLEN;
1775 		skb_set_network_header(skb, nw_off);
1776 		iph = ip_hdr(skb);
1777 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1778 		len = skb->len - skb_transport_offset(skb);
1779 		th = tcp_hdr(skb);
1780 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1781 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1782 		struct ipv6hdr *iph;
1783 
1784 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1785 			 ETH_HLEN;
1786 		skb_set_network_header(skb, nw_off);
1787 		iph = ipv6_hdr(skb);
1788 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1789 		len = skb->len - skb_transport_offset(skb);
1790 		th = tcp_hdr(skb);
1791 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1792 	} else {
1793 		dev_kfree_skb_any(skb);
1794 		return NULL;
1795 	}
1796 
1797 	if (nw_off) /* tunnel */
1798 		bnxt_gro_tunnel(skb, skb->protocol);
1799 #endif
1800 	return skb;
1801 }
1802 
1803 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1804 					   struct bnxt_tpa_info *tpa_info,
1805 					   struct rx_tpa_end_cmp *tpa_end,
1806 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1807 					   struct sk_buff *skb)
1808 {
1809 #ifdef CONFIG_INET
1810 	int payload_off;
1811 	u16 segs;
1812 
1813 	segs = TPA_END_TPA_SEGS(tpa_end);
1814 	if (segs == 1)
1815 		return skb;
1816 
1817 	NAPI_GRO_CB(skb)->count = segs;
1818 	skb_shinfo(skb)->gso_size =
1819 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1820 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1821 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1822 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1823 	else
1824 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1825 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1826 	if (likely(skb))
1827 		tcp_gro_complete(skb);
1828 #endif
1829 	return skb;
1830 }
1831 
1832 /* Given the cfa_code of a received packet determine which
1833  * netdev (vf-rep or PF) the packet is destined to.
1834  */
1835 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1836 {
1837 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1838 
1839 	/* if vf-rep dev is NULL, it must belong to the PF */
1840 	return dev ? dev : bp->dev;
1841 }
1842 
1843 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1844 					   struct bnxt_cp_ring_info *cpr,
1845 					   u32 *raw_cons,
1846 					   struct rx_tpa_end_cmp *tpa_end,
1847 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1848 					   u8 *event)
1849 {
1850 	struct bnxt_napi *bnapi = cpr->bnapi;
1851 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1852 	struct net_device *dev = bp->dev;
1853 	u8 *data_ptr, agg_bufs;
1854 	unsigned int len;
1855 	struct bnxt_tpa_info *tpa_info;
1856 	dma_addr_t mapping;
1857 	struct sk_buff *skb;
1858 	u16 idx = 0, agg_id;
1859 	void *data;
1860 	bool gro;
1861 
1862 	if (unlikely(bnapi->in_reset)) {
1863 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1864 
1865 		if (rc < 0)
1866 			return ERR_PTR(-EBUSY);
1867 		return NULL;
1868 	}
1869 
1870 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1871 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1872 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1873 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1874 		tpa_info = &rxr->rx_tpa[agg_id];
1875 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1876 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1877 				    agg_bufs, tpa_info->agg_count);
1878 			agg_bufs = tpa_info->agg_count;
1879 		}
1880 		tpa_info->agg_count = 0;
1881 		*event |= BNXT_AGG_EVENT;
1882 		bnxt_free_agg_idx(rxr, agg_id);
1883 		idx = agg_id;
1884 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1885 	} else {
1886 		agg_id = TPA_END_AGG_ID(tpa_end);
1887 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1888 		tpa_info = &rxr->rx_tpa[agg_id];
1889 		idx = RING_CMP(*raw_cons);
1890 		if (agg_bufs) {
1891 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1892 				return ERR_PTR(-EBUSY);
1893 
1894 			*event |= BNXT_AGG_EVENT;
1895 			idx = NEXT_CMP(idx);
1896 		}
1897 		gro = !!TPA_END_GRO(tpa_end);
1898 	}
1899 	data = tpa_info->data;
1900 	data_ptr = tpa_info->data_ptr;
1901 	prefetch(data_ptr);
1902 	len = tpa_info->len;
1903 	mapping = tpa_info->mapping;
1904 
1905 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1906 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1907 		if (agg_bufs > MAX_SKB_FRAGS)
1908 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1909 				    agg_bufs, (int)MAX_SKB_FRAGS);
1910 		return NULL;
1911 	}
1912 
1913 	if (len <= bp->rx_copybreak) {
1914 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1915 		if (!skb) {
1916 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1917 			cpr->sw_stats->rx.rx_oom_discards += 1;
1918 			return NULL;
1919 		}
1920 	} else {
1921 		u8 *new_data;
1922 		dma_addr_t new_mapping;
1923 
1924 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1925 						GFP_ATOMIC);
1926 		if (!new_data) {
1927 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1928 			cpr->sw_stats->rx.rx_oom_discards += 1;
1929 			return NULL;
1930 		}
1931 
1932 		tpa_info->data = new_data;
1933 		tpa_info->data_ptr = new_data + bp->rx_offset;
1934 		tpa_info->mapping = new_mapping;
1935 
1936 		skb = napi_build_skb(data, bp->rx_buf_size);
1937 		dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1938 					bp->rx_buf_use_size, bp->rx_dir);
1939 
1940 		if (!skb) {
1941 			page_pool_free_va(rxr->head_pool, data, true);
1942 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1943 			cpr->sw_stats->rx.rx_oom_discards += 1;
1944 			return NULL;
1945 		}
1946 		skb_mark_for_recycle(skb);
1947 		skb_reserve(skb, bp->rx_offset);
1948 		skb_put(skb, len);
1949 	}
1950 
1951 	if (agg_bufs) {
1952 		skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1953 					      true);
1954 		if (!skb) {
1955 			/* Page reuse already handled by bnxt_rx_pages(). */
1956 			cpr->sw_stats->rx.rx_oom_discards += 1;
1957 			return NULL;
1958 		}
1959 	}
1960 
1961 	if (tpa_info->cfa_code_valid)
1962 		dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1963 	skb->protocol = eth_type_trans(skb, dev);
1964 
1965 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1966 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1967 
1968 	if (tpa_info->vlan_valid &&
1969 	    (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1970 		__be16 vlan_proto = htons(tpa_info->metadata >>
1971 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1972 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1973 
1974 		if (eth_type_vlan(vlan_proto)) {
1975 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1976 		} else {
1977 			dev_kfree_skb(skb);
1978 			return NULL;
1979 		}
1980 	}
1981 
1982 	skb_checksum_none_assert(skb);
1983 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1984 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1985 		skb->csum_level =
1986 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1987 	}
1988 
1989 	if (gro)
1990 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1991 
1992 	return skb;
1993 }
1994 
1995 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1996 			 struct rx_agg_cmp *rx_agg)
1997 {
1998 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1999 	struct bnxt_tpa_info *tpa_info;
2000 
2001 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
2002 	tpa_info = &rxr->rx_tpa[agg_id];
2003 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
2004 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
2005 }
2006 
2007 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
2008 			     struct sk_buff *skb)
2009 {
2010 	skb_mark_for_recycle(skb);
2011 
2012 	if (skb->dev != bp->dev) {
2013 		/* this packet belongs to a vf-rep */
2014 		bnxt_vf_rep_rx(bp, skb);
2015 		return;
2016 	}
2017 	skb_record_rx_queue(skb, bnapi->index);
2018 	napi_gro_receive(&bnapi->napi, skb);
2019 }
2020 
2021 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2022 			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2023 {
2024 	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2025 
2026 	if (BNXT_PTP_RX_TS_VALID(flags))
2027 		goto ts_valid;
2028 	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2029 		return false;
2030 
2031 ts_valid:
2032 	*cmpl_ts = ts;
2033 	return true;
2034 }
2035 
2036 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2037 				    struct rx_cmp *rxcmp,
2038 				    struct rx_cmp_ext *rxcmp1)
2039 {
2040 	__be16 vlan_proto;
2041 	u16 vtag;
2042 
2043 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2044 		__le32 flags2 = rxcmp1->rx_cmp_flags2;
2045 		u32 meta_data;
2046 
2047 		if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2048 			return skb;
2049 
2050 		meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2051 		vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2052 		vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2053 		if (eth_type_vlan(vlan_proto))
2054 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2055 		else
2056 			goto vlan_err;
2057 	} else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2058 		if (RX_CMP_VLAN_VALID(rxcmp)) {
2059 			u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2060 
2061 			if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2062 				vlan_proto = htons(ETH_P_8021Q);
2063 			else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2064 				vlan_proto = htons(ETH_P_8021AD);
2065 			else
2066 				goto vlan_err;
2067 			vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2068 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2069 		}
2070 	}
2071 	return skb;
2072 vlan_err:
2073 	skb_mark_for_recycle(skb);
2074 	dev_kfree_skb(skb);
2075 	return NULL;
2076 }
2077 
2078 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2079 					   struct rx_cmp *rxcmp)
2080 {
2081 	u8 ext_op;
2082 
2083 	ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2084 	switch (ext_op) {
2085 	case EXT_OP_INNER_4:
2086 	case EXT_OP_OUTER_4:
2087 	case EXT_OP_INNFL_3:
2088 	case EXT_OP_OUTFL_3:
2089 		return PKT_HASH_TYPE_L4;
2090 	default:
2091 		return PKT_HASH_TYPE_L3;
2092 	}
2093 }
2094 
2095 /* returns the following:
2096  * 1       - 1 packet successfully received
2097  * 0       - successful TPA_START, packet not completed yet
2098  * -EBUSY  - completion ring does not have all the agg buffers yet
2099  * -ENOMEM - packet aborted due to out of memory
2100  * -EIO    - packet aborted due to hw error indicated in BD
2101  */
2102 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2103 		       u32 *raw_cons, u8 *event)
2104 {
2105 	struct bnxt_napi *bnapi = cpr->bnapi;
2106 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2107 	struct net_device *dev = bp->dev;
2108 	struct rx_cmp *rxcmp;
2109 	struct rx_cmp_ext *rxcmp1;
2110 	u32 tmp_raw_cons = *raw_cons;
2111 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2112 	struct skb_shared_info *sinfo;
2113 	struct bnxt_sw_rx_bd *rx_buf;
2114 	unsigned int len;
2115 	u8 *data_ptr, agg_bufs, cmp_type;
2116 	bool xdp_active = false;
2117 	dma_addr_t dma_addr;
2118 	struct sk_buff *skb;
2119 	struct xdp_buff xdp;
2120 	u32 flags, misc;
2121 	u32 cmpl_ts;
2122 	void *data;
2123 	int rc = 0;
2124 
2125 	rxcmp = (struct rx_cmp *)
2126 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2127 
2128 	cmp_type = RX_CMP_TYPE(rxcmp);
2129 
2130 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2131 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2132 		goto next_rx_no_prod_no_len;
2133 	}
2134 
2135 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2136 	cp_cons = RING_CMP(tmp_raw_cons);
2137 	rxcmp1 = (struct rx_cmp_ext *)
2138 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2139 
2140 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2141 		return -EBUSY;
2142 
2143 	/* The valid test of the entry must be done first before
2144 	 * reading any further.
2145 	 */
2146 	dma_rmb();
2147 	prod = rxr->rx_prod;
2148 
2149 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2150 	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2151 		bnxt_tpa_start(bp, rxr, cmp_type,
2152 			       (struct rx_tpa_start_cmp *)rxcmp,
2153 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
2154 
2155 		*event |= BNXT_RX_EVENT;
2156 		goto next_rx_no_prod_no_len;
2157 
2158 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2159 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2160 				   (struct rx_tpa_end_cmp *)rxcmp,
2161 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2162 
2163 		if (IS_ERR(skb))
2164 			return -EBUSY;
2165 
2166 		rc = -ENOMEM;
2167 		if (likely(skb)) {
2168 			bnxt_deliver_skb(bp, bnapi, skb);
2169 			rc = 1;
2170 		}
2171 		*event |= BNXT_RX_EVENT;
2172 		goto next_rx_no_prod_no_len;
2173 	}
2174 
2175 	cons = rxcmp->rx_cmp_opaque;
2176 	if (unlikely(cons != rxr->rx_next_cons)) {
2177 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2178 
2179 		/* 0xffff is forced error, don't print it */
2180 		if (rxr->rx_next_cons != 0xffff)
2181 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2182 				    cons, rxr->rx_next_cons);
2183 		bnxt_sched_reset_rxr(bp, rxr);
2184 		if (rc1)
2185 			return rc1;
2186 		goto next_rx_no_prod_no_len;
2187 	}
2188 	rx_buf = &rxr->rx_buf_ring[cons];
2189 	data = rx_buf->data;
2190 	data_ptr = rx_buf->data_ptr;
2191 	prefetch(data_ptr);
2192 
2193 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2194 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2195 
2196 	if (agg_bufs) {
2197 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2198 			return -EBUSY;
2199 
2200 		cp_cons = NEXT_CMP(cp_cons);
2201 		*event |= BNXT_AGG_EVENT;
2202 	}
2203 	*event |= BNXT_RX_EVENT;
2204 
2205 	rx_buf->data = NULL;
2206 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2207 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2208 
2209 		bnxt_reuse_rx_data(rxr, cons, data);
2210 		if (agg_bufs)
2211 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2212 					       false);
2213 
2214 		rc = -EIO;
2215 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2216 			bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2217 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2218 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2219 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
2220 						 rx_err);
2221 				bnxt_sched_reset_rxr(bp, rxr);
2222 			}
2223 		}
2224 		goto next_rx_no_len;
2225 	}
2226 
2227 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2228 	len = flags >> RX_CMP_LEN_SHIFT;
2229 	dma_addr = rx_buf->mapping;
2230 
2231 	if (bnxt_xdp_attached(bp, rxr)) {
2232 		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2233 		if (agg_bufs) {
2234 			u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2235 							       cp_cons,
2236 							       agg_bufs,
2237 							       false);
2238 			if (!frag_len)
2239 				goto oom_next_rx;
2240 
2241 		}
2242 		xdp_active = true;
2243 	}
2244 
2245 	if (xdp_active) {
2246 		if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2247 			rc = 1;
2248 			goto next_rx;
2249 		}
2250 		if (xdp_buff_has_frags(&xdp)) {
2251 			sinfo = xdp_get_shared_info_from_buff(&xdp);
2252 			agg_bufs = sinfo->nr_frags;
2253 		} else {
2254 			agg_bufs = 0;
2255 		}
2256 	}
2257 
2258 	if (len <= bp->rx_copybreak) {
2259 		if (!xdp_active)
2260 			skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2261 		else
2262 			skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2263 		bnxt_reuse_rx_data(rxr, cons, data);
2264 		if (!skb) {
2265 			if (agg_bufs) {
2266 				if (!xdp_active)
2267 					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2268 							       agg_bufs, false);
2269 				else
2270 					bnxt_xdp_buff_frags_free(rxr, &xdp);
2271 			}
2272 			goto oom_next_rx;
2273 		}
2274 	} else {
2275 		u32 payload;
2276 
2277 		if (rx_buf->data_ptr == data_ptr)
2278 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
2279 		else
2280 			payload = 0;
2281 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2282 				      payload | len);
2283 		if (!skb)
2284 			goto oom_next_rx;
2285 	}
2286 
2287 	if (agg_bufs) {
2288 		if (!xdp_active) {
2289 			skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2290 						      agg_bufs, false);
2291 			if (!skb)
2292 				goto oom_next_rx;
2293 		} else {
2294 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
2295 			if (!skb) {
2296 				/* we should be able to free the old skb here */
2297 				bnxt_xdp_buff_frags_free(rxr, &xdp);
2298 				goto oom_next_rx;
2299 			}
2300 		}
2301 	}
2302 
2303 	if (RX_CMP_HASH_VALID(rxcmp)) {
2304 		enum pkt_hash_types type;
2305 
2306 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2307 			type = bnxt_rss_ext_op(bp, rxcmp);
2308 		} else {
2309 			u32 itypes = RX_CMP_ITYPES(rxcmp);
2310 
2311 			if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2312 			    itypes == RX_CMP_FLAGS_ITYPE_UDP)
2313 				type = PKT_HASH_TYPE_L4;
2314 			else
2315 				type = PKT_HASH_TYPE_L3;
2316 		}
2317 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2318 	}
2319 
2320 	if (cmp_type == CMP_TYPE_RX_L2_CMP)
2321 		dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2322 	skb->protocol = eth_type_trans(skb, dev);
2323 
2324 	if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2325 		skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2326 		if (!skb)
2327 			goto next_rx;
2328 	}
2329 
2330 	skb_checksum_none_assert(skb);
2331 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
2332 		if (dev->features & NETIF_F_RXCSUM) {
2333 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2334 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2335 		}
2336 	} else {
2337 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2338 			if (dev->features & NETIF_F_RXCSUM)
2339 				bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2340 		}
2341 	}
2342 
2343 	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2344 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2345 			u64 ns, ts;
2346 
2347 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2348 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2349 
2350 				ns = bnxt_timecounter_cyc2time(ptp, ts);
2351 				memset(skb_hwtstamps(skb), 0,
2352 				       sizeof(*skb_hwtstamps(skb)));
2353 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2354 			}
2355 		}
2356 	}
2357 	bnxt_deliver_skb(bp, bnapi, skb);
2358 	rc = 1;
2359 
2360 next_rx:
2361 	cpr->rx_packets += 1;
2362 	cpr->rx_bytes += len;
2363 
2364 next_rx_no_len:
2365 	rxr->rx_prod = NEXT_RX(prod);
2366 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2367 
2368 next_rx_no_prod_no_len:
2369 	*raw_cons = tmp_raw_cons;
2370 
2371 	return rc;
2372 
2373 oom_next_rx:
2374 	cpr->sw_stats->rx.rx_oom_discards += 1;
2375 	rc = -ENOMEM;
2376 	goto next_rx;
2377 }
2378 
2379 /* In netpoll mode, if we are using a combined completion ring, we need to
2380  * discard the rx packets and recycle the buffers.
2381  */
2382 static int bnxt_force_rx_discard(struct bnxt *bp,
2383 				 struct bnxt_cp_ring_info *cpr,
2384 				 u32 *raw_cons, u8 *event)
2385 {
2386 	u32 tmp_raw_cons = *raw_cons;
2387 	struct rx_cmp_ext *rxcmp1;
2388 	struct rx_cmp *rxcmp;
2389 	u16 cp_cons;
2390 	u8 cmp_type;
2391 	int rc;
2392 
2393 	cp_cons = RING_CMP(tmp_raw_cons);
2394 	rxcmp = (struct rx_cmp *)
2395 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2396 
2397 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2398 	cp_cons = RING_CMP(tmp_raw_cons);
2399 	rxcmp1 = (struct rx_cmp_ext *)
2400 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2401 
2402 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2403 		return -EBUSY;
2404 
2405 	/* The valid test of the entry must be done first before
2406 	 * reading any further.
2407 	 */
2408 	dma_rmb();
2409 	cmp_type = RX_CMP_TYPE(rxcmp);
2410 	if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2411 	    cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2412 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2413 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2414 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2415 		struct rx_tpa_end_cmp_ext *tpa_end1;
2416 
2417 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2418 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2419 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2420 	}
2421 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2422 	if (rc && rc != -EBUSY)
2423 		cpr->sw_stats->rx.rx_netpoll_discards += 1;
2424 	return rc;
2425 }
2426 
2427 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2428 {
2429 	struct bnxt_fw_health *fw_health = bp->fw_health;
2430 	u32 reg = fw_health->regs[reg_idx];
2431 	u32 reg_type, reg_off, val = 0;
2432 
2433 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2434 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2435 	switch (reg_type) {
2436 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2437 		pci_read_config_dword(bp->pdev, reg_off, &val);
2438 		break;
2439 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2440 		reg_off = fw_health->mapped_regs[reg_idx];
2441 		fallthrough;
2442 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2443 		val = readl(bp->bar0 + reg_off);
2444 		break;
2445 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2446 		val = readl(bp->bar1 + reg_off);
2447 		break;
2448 	}
2449 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2450 		val &= fw_health->fw_reset_inprog_reg_mask;
2451 	return val;
2452 }
2453 
2454 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2455 {
2456 	int i;
2457 
2458 	for (i = 0; i < bp->rx_nr_rings; i++) {
2459 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2460 		struct bnxt_ring_grp_info *grp_info;
2461 
2462 		grp_info = &bp->grp_info[grp_idx];
2463 		if (grp_info->agg_fw_ring_id == ring_id)
2464 			return grp_idx;
2465 	}
2466 	return INVALID_HW_RING_ID;
2467 }
2468 
2469 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2470 {
2471 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2472 
2473 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2474 		return link_info->force_link_speed2;
2475 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2476 		return link_info->force_pam4_link_speed;
2477 	return link_info->force_link_speed;
2478 }
2479 
2480 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2481 {
2482 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2483 
2484 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2485 		link_info->req_link_speed = link_info->force_link_speed2;
2486 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2487 		switch (link_info->req_link_speed) {
2488 		case BNXT_LINK_SPEED_50GB_PAM4:
2489 		case BNXT_LINK_SPEED_100GB_PAM4:
2490 		case BNXT_LINK_SPEED_200GB_PAM4:
2491 		case BNXT_LINK_SPEED_400GB_PAM4:
2492 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2493 			break;
2494 		case BNXT_LINK_SPEED_100GB_PAM4_112:
2495 		case BNXT_LINK_SPEED_200GB_PAM4_112:
2496 		case BNXT_LINK_SPEED_400GB_PAM4_112:
2497 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2498 			break;
2499 		default:
2500 			link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2501 		}
2502 		return;
2503 	}
2504 	link_info->req_link_speed = link_info->force_link_speed;
2505 	link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2506 	if (link_info->force_pam4_link_speed) {
2507 		link_info->req_link_speed = link_info->force_pam4_link_speed;
2508 		link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2509 	}
2510 }
2511 
2512 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2513 {
2514 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2515 
2516 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2517 		link_info->advertising = link_info->auto_link_speeds2;
2518 		return;
2519 	}
2520 	link_info->advertising = link_info->auto_link_speeds;
2521 	link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2522 }
2523 
2524 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2525 {
2526 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2527 
2528 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2529 		if (link_info->req_link_speed != link_info->force_link_speed2)
2530 			return true;
2531 		return false;
2532 	}
2533 	if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2534 	    link_info->req_link_speed != link_info->force_link_speed)
2535 		return true;
2536 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2537 	    link_info->req_link_speed != link_info->force_pam4_link_speed)
2538 		return true;
2539 	return false;
2540 }
2541 
2542 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2543 {
2544 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2545 
2546 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2547 		if (link_info->advertising != link_info->auto_link_speeds2)
2548 			return true;
2549 		return false;
2550 	}
2551 	if (link_info->advertising != link_info->auto_link_speeds ||
2552 	    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2553 		return true;
2554 	return false;
2555 }
2556 
2557 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2558 {
2559 	u32 flags = bp->ctx->ctx_arr[type].flags;
2560 
2561 	return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2562 		((flags & BNXT_CTX_MEM_FW_TRACE) ||
2563 		 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2564 }
2565 
2566 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2567 {
2568 	u32 mem_size, pages, rem_bytes, magic_byte_offset;
2569 	u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2570 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2571 	struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2572 	struct bnxt_bs_trace_info *bs_trace;
2573 	int last_pg;
2574 
2575 	if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2576 		return;
2577 
2578 	mem_size = ctxm->max_entries * ctxm->entry_size;
2579 	rem_bytes = mem_size % BNXT_PAGE_SIZE;
2580 	pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2581 
2582 	last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2583 	magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2584 
2585 	rmem = &ctx_pg[0].ring_mem;
2586 	bs_trace = &bp->bs_trace[trace_type];
2587 	bs_trace->ctx_type = ctxm->type;
2588 	bs_trace->trace_type = trace_type;
2589 	if (pages > MAX_CTX_PAGES) {
2590 		int last_pg_dir = rmem->nr_pages - 1;
2591 
2592 		rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2593 		bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2594 	} else {
2595 		bs_trace->magic_byte = rmem->pg_arr[last_pg];
2596 	}
2597 	bs_trace->magic_byte += magic_byte_offset;
2598 	*bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2599 }
2600 
2601 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1)				\
2602 	(((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2603 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2604 
2605 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2)				\
2606 	(((data2) &							\
2607 	  ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2608 	 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2609 
2610 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)				\
2611 	((data2) &							\
2612 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2613 
2614 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)			\
2615 	(((data2) &							\
2616 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2617 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2618 
2619 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)			\
2620 	((data1) &							\
2621 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2622 
2623 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)		\
2624 	(((data1) &							\
2625 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2626 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2627 
2628 /* Return true if the workqueue has to be scheduled */
2629 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2630 {
2631 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2632 
2633 	switch (err_type) {
2634 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2635 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2636 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2637 		break;
2638 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2639 		netdev_warn(bp->dev, "Pause Storm detected!\n");
2640 		break;
2641 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2642 		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2643 		break;
2644 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2645 		u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2646 		char *threshold_type;
2647 		bool notify = false;
2648 		char *dir_str;
2649 
2650 		switch (type) {
2651 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2652 			threshold_type = "warning";
2653 			break;
2654 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2655 			threshold_type = "critical";
2656 			break;
2657 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2658 			threshold_type = "fatal";
2659 			break;
2660 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2661 			threshold_type = "shutdown";
2662 			break;
2663 		default:
2664 			netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2665 			return false;
2666 		}
2667 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2668 			dir_str = "above";
2669 			notify = true;
2670 		} else {
2671 			dir_str = "below";
2672 		}
2673 		netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2674 			    dir_str, threshold_type);
2675 		netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2676 			    BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2677 			    BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2678 		if (notify) {
2679 			bp->thermal_threshold_type = type;
2680 			set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2681 			return true;
2682 		}
2683 		return false;
2684 	}
2685 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2686 		netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2687 		break;
2688 	default:
2689 		netdev_err(bp->dev, "FW reported unknown error type %u\n",
2690 			   err_type);
2691 		break;
2692 	}
2693 	return false;
2694 }
2695 
2696 #define BNXT_GET_EVENT_PORT(data)	\
2697 	((data) &			\
2698 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2699 
2700 #define BNXT_EVENT_RING_TYPE(data2)	\
2701 	((data2) &			\
2702 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2703 
2704 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2705 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2706 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2707 
2708 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
2709 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2710 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2711 
2712 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
2713 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2714 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2715 
2716 #define BNXT_PHC_BITS	48
2717 
2718 static int bnxt_async_event_process(struct bnxt *bp,
2719 				    struct hwrm_async_event_cmpl *cmpl)
2720 {
2721 	u16 event_id = le16_to_cpu(cmpl->event_id);
2722 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2723 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2724 
2725 	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2726 		   event_id, data1, data2);
2727 
2728 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2729 	switch (event_id) {
2730 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2731 		struct bnxt_link_info *link_info = &bp->link_info;
2732 
2733 		if (BNXT_VF(bp))
2734 			goto async_event_process_exit;
2735 
2736 		/* print unsupported speed warning in forced speed mode only */
2737 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2738 		    (data1 & 0x20000)) {
2739 			u16 fw_speed = bnxt_get_force_speed(link_info);
2740 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2741 
2742 			if (speed != SPEED_UNKNOWN)
2743 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2744 					    speed);
2745 		}
2746 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2747 	}
2748 		fallthrough;
2749 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2750 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2751 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2752 		fallthrough;
2753 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2754 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2755 		break;
2756 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2757 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2758 		break;
2759 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2760 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2761 
2762 		if (BNXT_VF(bp))
2763 			break;
2764 
2765 		if (bp->pf.port_id != port_id)
2766 			break;
2767 
2768 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2769 		break;
2770 	}
2771 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2772 		if (BNXT_PF(bp))
2773 			goto async_event_process_exit;
2774 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2775 		break;
2776 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2777 		char *type_str = "Solicited";
2778 
2779 		if (!bp->fw_health)
2780 			goto async_event_process_exit;
2781 
2782 		bp->fw_reset_timestamp = jiffies;
2783 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2784 		if (!bp->fw_reset_min_dsecs)
2785 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2786 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2787 		if (!bp->fw_reset_max_dsecs)
2788 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2789 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2790 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2791 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2792 			type_str = "Fatal";
2793 			bp->fw_health->fatalities++;
2794 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2795 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2796 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2797 			type_str = "Non-fatal";
2798 			bp->fw_health->survivals++;
2799 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2800 		}
2801 		netif_warn(bp, hw, bp->dev,
2802 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2803 			   type_str, data1, data2,
2804 			   bp->fw_reset_min_dsecs * 100,
2805 			   bp->fw_reset_max_dsecs * 100);
2806 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2807 		break;
2808 	}
2809 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2810 		struct bnxt_fw_health *fw_health = bp->fw_health;
2811 		char *status_desc = "healthy";
2812 		u32 status;
2813 
2814 		if (!fw_health)
2815 			goto async_event_process_exit;
2816 
2817 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2818 			fw_health->enabled = false;
2819 			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2820 			break;
2821 		}
2822 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2823 		fw_health->tmr_multiplier =
2824 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2825 				     bp->current_interval * 10);
2826 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2827 		if (!fw_health->enabled)
2828 			fw_health->last_fw_heartbeat =
2829 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2830 		fw_health->last_fw_reset_cnt =
2831 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2832 		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2833 		if (status != BNXT_FW_STATUS_HEALTHY)
2834 			status_desc = "unhealthy";
2835 		netif_info(bp, drv, bp->dev,
2836 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2837 			   fw_health->primary ? "primary" : "backup", status,
2838 			   status_desc, fw_health->last_fw_reset_cnt);
2839 		if (!fw_health->enabled) {
2840 			/* Make sure tmr_counter is set and visible to
2841 			 * bnxt_health_check() before setting enabled to true.
2842 			 */
2843 			smp_wmb();
2844 			fw_health->enabled = true;
2845 		}
2846 		goto async_event_process_exit;
2847 	}
2848 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2849 		netif_notice(bp, hw, bp->dev,
2850 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2851 			     data1, data2);
2852 		goto async_event_process_exit;
2853 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2854 		struct bnxt_rx_ring_info *rxr;
2855 		u16 grp_idx;
2856 
2857 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2858 			goto async_event_process_exit;
2859 
2860 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2861 			    BNXT_EVENT_RING_TYPE(data2), data1);
2862 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2863 			goto async_event_process_exit;
2864 
2865 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2866 		if (grp_idx == INVALID_HW_RING_ID) {
2867 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2868 				    data1);
2869 			goto async_event_process_exit;
2870 		}
2871 		rxr = bp->bnapi[grp_idx]->rx_ring;
2872 		bnxt_sched_reset_rxr(bp, rxr);
2873 		goto async_event_process_exit;
2874 	}
2875 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2876 		struct bnxt_fw_health *fw_health = bp->fw_health;
2877 
2878 		netif_notice(bp, hw, bp->dev,
2879 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2880 			     data1, data2);
2881 		if (fw_health) {
2882 			fw_health->echo_req_data1 = data1;
2883 			fw_health->echo_req_data2 = data2;
2884 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2885 			break;
2886 		}
2887 		goto async_event_process_exit;
2888 	}
2889 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2890 		bnxt_ptp_pps_event(bp, data1, data2);
2891 		goto async_event_process_exit;
2892 	}
2893 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2894 		if (bnxt_event_error_report(bp, data1, data2))
2895 			break;
2896 		goto async_event_process_exit;
2897 	}
2898 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2899 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2900 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2901 			if (BNXT_PTP_USE_RTC(bp)) {
2902 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2903 				unsigned long flags;
2904 				u64 ns;
2905 
2906 				if (!ptp)
2907 					goto async_event_process_exit;
2908 
2909 				bnxt_ptp_update_current_time(bp);
2910 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2911 				       BNXT_PHC_BITS) | ptp->current_time);
2912 				write_seqlock_irqsave(&ptp->ptp_lock, flags);
2913 				bnxt_ptp_rtc_timecounter_init(ptp, ns);
2914 				write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2915 			}
2916 			break;
2917 		}
2918 		goto async_event_process_exit;
2919 	}
2920 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2921 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2922 
2923 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2924 		goto async_event_process_exit;
2925 	}
2926 	case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2927 		u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2928 		u32 offset =  BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2929 
2930 		bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2931 		goto async_event_process_exit;
2932 	}
2933 	default:
2934 		goto async_event_process_exit;
2935 	}
2936 	__bnxt_queue_sp_work(bp);
2937 async_event_process_exit:
2938 	bnxt_ulp_async_events(bp, cmpl);
2939 	return 0;
2940 }
2941 
2942 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2943 {
2944 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2945 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2946 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2947 				(struct hwrm_fwd_req_cmpl *)txcmp;
2948 
2949 	switch (cmpl_type) {
2950 	case CMPL_BASE_TYPE_HWRM_DONE:
2951 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2952 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2953 		break;
2954 
2955 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2956 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2957 
2958 		if ((vf_id < bp->pf.first_vf_id) ||
2959 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2960 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2961 				   vf_id);
2962 			return -EINVAL;
2963 		}
2964 
2965 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2966 		bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2967 		break;
2968 
2969 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2970 		bnxt_async_event_process(bp,
2971 					 (struct hwrm_async_event_cmpl *)txcmp);
2972 		break;
2973 
2974 	default:
2975 		break;
2976 	}
2977 
2978 	return 0;
2979 }
2980 
2981 static bool bnxt_vnic_is_active(struct bnxt *bp)
2982 {
2983 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2984 
2985 	return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2986 }
2987 
2988 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2989 {
2990 	struct bnxt_napi *bnapi = dev_instance;
2991 	struct bnxt *bp = bnapi->bp;
2992 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2993 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2994 
2995 	cpr->event_ctr++;
2996 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2997 	napi_schedule(&bnapi->napi);
2998 	return IRQ_HANDLED;
2999 }
3000 
3001 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
3002 {
3003 	u32 raw_cons = cpr->cp_raw_cons;
3004 	u16 cons = RING_CMP(raw_cons);
3005 	struct tx_cmp *txcmp;
3006 
3007 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3008 
3009 	return TX_CMP_VALID(txcmp, raw_cons);
3010 }
3011 
3012 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3013 			    int budget)
3014 {
3015 	struct bnxt_napi *bnapi = cpr->bnapi;
3016 	u32 raw_cons = cpr->cp_raw_cons;
3017 	bool flush_xdp = false;
3018 	u32 cons;
3019 	int rx_pkts = 0;
3020 	u8 event = 0;
3021 	struct tx_cmp *txcmp;
3022 
3023 	cpr->has_more_work = 0;
3024 	cpr->had_work_done = 1;
3025 	while (1) {
3026 		u8 cmp_type;
3027 		int rc;
3028 
3029 		cons = RING_CMP(raw_cons);
3030 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3031 
3032 		if (!TX_CMP_VALID(txcmp, raw_cons))
3033 			break;
3034 
3035 		/* The valid test of the entry must be done first before
3036 		 * reading any further.
3037 		 */
3038 		dma_rmb();
3039 		cmp_type = TX_CMP_TYPE(txcmp);
3040 		if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3041 		    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3042 			u32 opaque = txcmp->tx_cmp_opaque;
3043 			struct bnxt_tx_ring_info *txr;
3044 			u16 tx_freed;
3045 
3046 			txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3047 			event |= BNXT_TX_CMP_EVENT;
3048 			if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3049 				txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3050 			else
3051 				txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3052 			tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3053 				   bp->tx_ring_mask;
3054 			/* return full budget so NAPI will complete. */
3055 			if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3056 				rx_pkts = budget;
3057 				raw_cons = NEXT_RAW_CMP(raw_cons);
3058 				if (budget)
3059 					cpr->has_more_work = 1;
3060 				break;
3061 			}
3062 		} else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3063 			bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3064 		} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3065 			   cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3066 			if (likely(budget))
3067 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3068 			else
3069 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3070 							   &event);
3071 			if (event & BNXT_REDIRECT_EVENT)
3072 				flush_xdp = true;
3073 			if (likely(rc >= 0))
3074 				rx_pkts += rc;
3075 			/* Increment rx_pkts when rc is -ENOMEM to count towards
3076 			 * the NAPI budget.  Otherwise, we may potentially loop
3077 			 * here forever if we consistently cannot allocate
3078 			 * buffers.
3079 			 */
3080 			else if (rc == -ENOMEM && budget)
3081 				rx_pkts++;
3082 			else if (rc == -EBUSY)	/* partial completion */
3083 				break;
3084 		} else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3085 				    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3086 				    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3087 			bnxt_hwrm_handler(bp, txcmp);
3088 		}
3089 		raw_cons = NEXT_RAW_CMP(raw_cons);
3090 
3091 		if (rx_pkts && rx_pkts == budget) {
3092 			cpr->has_more_work = 1;
3093 			break;
3094 		}
3095 	}
3096 
3097 	if (flush_xdp) {
3098 		xdp_do_flush();
3099 		event &= ~BNXT_REDIRECT_EVENT;
3100 	}
3101 
3102 	if (event & BNXT_TX_EVENT) {
3103 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3104 		u16 prod = txr->tx_prod;
3105 
3106 		/* Sync BD data before updating doorbell */
3107 		wmb();
3108 
3109 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3110 		event &= ~BNXT_TX_EVENT;
3111 	}
3112 
3113 	cpr->cp_raw_cons = raw_cons;
3114 	bnapi->events |= event;
3115 	return rx_pkts;
3116 }
3117 
3118 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3119 				  int budget)
3120 {
3121 	if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3122 		bnapi->tx_int(bp, bnapi, budget);
3123 
3124 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3125 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3126 
3127 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3128 		bnapi->events &= ~BNXT_RX_EVENT;
3129 	}
3130 	if (bnapi->events & BNXT_AGG_EVENT) {
3131 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3132 
3133 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3134 		bnapi->events &= ~BNXT_AGG_EVENT;
3135 	}
3136 }
3137 
3138 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3139 			  int budget)
3140 {
3141 	struct bnxt_napi *bnapi = cpr->bnapi;
3142 	int rx_pkts;
3143 
3144 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3145 
3146 	/* ACK completion ring before freeing tx ring and producing new
3147 	 * buffers in rx/agg rings to prevent overflowing the completion
3148 	 * ring.
3149 	 */
3150 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3151 
3152 	__bnxt_poll_work_done(bp, bnapi, budget);
3153 	return rx_pkts;
3154 }
3155 
3156 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3157 {
3158 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3159 	struct bnxt *bp = bnapi->bp;
3160 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3161 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3162 	struct tx_cmp *txcmp;
3163 	struct rx_cmp_ext *rxcmp1;
3164 	u32 cp_cons, tmp_raw_cons;
3165 	u32 raw_cons = cpr->cp_raw_cons;
3166 	bool flush_xdp = false;
3167 	u32 rx_pkts = 0;
3168 	u8 event = 0;
3169 
3170 	while (1) {
3171 		int rc;
3172 
3173 		cp_cons = RING_CMP(raw_cons);
3174 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3175 
3176 		if (!TX_CMP_VALID(txcmp, raw_cons))
3177 			break;
3178 
3179 		/* The valid test of the entry must be done first before
3180 		 * reading any further.
3181 		 */
3182 		dma_rmb();
3183 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3184 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3185 			cp_cons = RING_CMP(tmp_raw_cons);
3186 			rxcmp1 = (struct rx_cmp_ext *)
3187 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3188 
3189 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3190 				break;
3191 
3192 			/* force an error to recycle the buffer */
3193 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3194 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3195 
3196 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3197 			if (likely(rc == -EIO) && budget)
3198 				rx_pkts++;
3199 			else if (rc == -EBUSY)	/* partial completion */
3200 				break;
3201 			if (event & BNXT_REDIRECT_EVENT)
3202 				flush_xdp = true;
3203 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
3204 				    CMPL_BASE_TYPE_HWRM_DONE)) {
3205 			bnxt_hwrm_handler(bp, txcmp);
3206 		} else {
3207 			netdev_err(bp->dev,
3208 				   "Invalid completion received on special ring\n");
3209 		}
3210 		raw_cons = NEXT_RAW_CMP(raw_cons);
3211 
3212 		if (rx_pkts == budget)
3213 			break;
3214 	}
3215 
3216 	cpr->cp_raw_cons = raw_cons;
3217 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3218 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3219 
3220 	if (event & BNXT_AGG_EVENT)
3221 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3222 	if (flush_xdp)
3223 		xdp_do_flush();
3224 
3225 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3226 		napi_complete_done(napi, rx_pkts);
3227 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3228 	}
3229 	return rx_pkts;
3230 }
3231 
3232 static int bnxt_poll(struct napi_struct *napi, int budget)
3233 {
3234 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3235 	struct bnxt *bp = bnapi->bp;
3236 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3237 	int work_done = 0;
3238 
3239 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3240 		napi_complete(napi);
3241 		return 0;
3242 	}
3243 	while (1) {
3244 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3245 
3246 		if (work_done >= budget) {
3247 			if (!budget)
3248 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3249 			break;
3250 		}
3251 
3252 		if (!bnxt_has_work(bp, cpr)) {
3253 			if (napi_complete_done(napi, work_done))
3254 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3255 			break;
3256 		}
3257 	}
3258 	if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3259 		struct dim_sample dim_sample = {};
3260 
3261 		dim_update_sample(cpr->event_ctr,
3262 				  cpr->rx_packets,
3263 				  cpr->rx_bytes,
3264 				  &dim_sample);
3265 		net_dim(&cpr->dim, &dim_sample);
3266 	}
3267 	return work_done;
3268 }
3269 
3270 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3271 {
3272 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3273 	int i, work_done = 0;
3274 
3275 	for (i = 0; i < cpr->cp_ring_count; i++) {
3276 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3277 
3278 		if (cpr2->had_nqe_notify) {
3279 			work_done += __bnxt_poll_work(bp, cpr2,
3280 						      budget - work_done);
3281 			cpr->has_more_work |= cpr2->has_more_work;
3282 		}
3283 	}
3284 	return work_done;
3285 }
3286 
3287 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3288 				 u64 dbr_type, int budget)
3289 {
3290 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3291 	int i;
3292 
3293 	for (i = 0; i < cpr->cp_ring_count; i++) {
3294 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3295 		struct bnxt_db_info *db;
3296 
3297 		if (cpr2->had_work_done) {
3298 			u32 tgl = 0;
3299 
3300 			if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3301 				cpr2->had_nqe_notify = 0;
3302 				tgl = cpr2->toggle;
3303 			}
3304 			db = &cpr2->cp_db;
3305 			bnxt_writeq(bp,
3306 				    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3307 				    DB_RING_IDX(db, cpr2->cp_raw_cons),
3308 				    db->doorbell);
3309 			cpr2->had_work_done = 0;
3310 		}
3311 	}
3312 	__bnxt_poll_work_done(bp, bnapi, budget);
3313 }
3314 
3315 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3316 {
3317 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3318 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3319 	struct bnxt_cp_ring_info *cpr_rx;
3320 	u32 raw_cons = cpr->cp_raw_cons;
3321 	struct bnxt *bp = bnapi->bp;
3322 	struct nqe_cn *nqcmp;
3323 	int work_done = 0;
3324 	u32 cons;
3325 
3326 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3327 		napi_complete(napi);
3328 		return 0;
3329 	}
3330 	if (cpr->has_more_work) {
3331 		cpr->has_more_work = 0;
3332 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3333 	}
3334 	while (1) {
3335 		u16 type;
3336 
3337 		cons = RING_CMP(raw_cons);
3338 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3339 
3340 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3341 			if (cpr->has_more_work)
3342 				break;
3343 
3344 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3345 					     budget);
3346 			cpr->cp_raw_cons = raw_cons;
3347 			if (napi_complete_done(napi, work_done))
3348 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3349 						  cpr->cp_raw_cons);
3350 			goto poll_done;
3351 		}
3352 
3353 		/* The valid test of the entry must be done first before
3354 		 * reading any further.
3355 		 */
3356 		dma_rmb();
3357 
3358 		type = le16_to_cpu(nqcmp->type);
3359 		if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3360 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3361 			u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3362 			struct bnxt_cp_ring_info *cpr2;
3363 
3364 			/* No more budget for RX work */
3365 			if (budget && work_done >= budget &&
3366 			    cq_type == BNXT_NQ_HDL_TYPE_RX)
3367 				break;
3368 
3369 			idx = BNXT_NQ_HDL_IDX(idx);
3370 			cpr2 = &cpr->cp_ring_arr[idx];
3371 			cpr2->had_nqe_notify = 1;
3372 			cpr2->toggle = NQE_CN_TOGGLE(type);
3373 			work_done += __bnxt_poll_work(bp, cpr2,
3374 						      budget - work_done);
3375 			cpr->has_more_work |= cpr2->has_more_work;
3376 		} else {
3377 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3378 		}
3379 		raw_cons = NEXT_RAW_CMP(raw_cons);
3380 	}
3381 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3382 	if (raw_cons != cpr->cp_raw_cons) {
3383 		cpr->cp_raw_cons = raw_cons;
3384 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3385 	}
3386 poll_done:
3387 	cpr_rx = &cpr->cp_ring_arr[0];
3388 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3389 	    (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3390 		struct dim_sample dim_sample = {};
3391 
3392 		dim_update_sample(cpr->event_ctr,
3393 				  cpr_rx->rx_packets,
3394 				  cpr_rx->rx_bytes,
3395 				  &dim_sample);
3396 		net_dim(&cpr->dim, &dim_sample);
3397 	}
3398 	return work_done;
3399 }
3400 
3401 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3402 				       struct bnxt_tx_ring_info *txr, int idx)
3403 {
3404 	int i, max_idx;
3405 	struct pci_dev *pdev = bp->pdev;
3406 
3407 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3408 
3409 	for (i = 0; i < max_idx;) {
3410 		struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3411 		struct sk_buff *skb;
3412 		int j, last;
3413 
3414 		if (idx  < bp->tx_nr_rings_xdp &&
3415 		    tx_buf->action == XDP_REDIRECT) {
3416 			dma_unmap_single(&pdev->dev,
3417 					 dma_unmap_addr(tx_buf, mapping),
3418 					 dma_unmap_len(tx_buf, len),
3419 					 DMA_TO_DEVICE);
3420 			xdp_return_frame(tx_buf->xdpf);
3421 			tx_buf->action = 0;
3422 			tx_buf->xdpf = NULL;
3423 			i++;
3424 			continue;
3425 		}
3426 
3427 		skb = tx_buf->skb;
3428 		if (!skb) {
3429 			i++;
3430 			continue;
3431 		}
3432 
3433 		tx_buf->skb = NULL;
3434 
3435 		if (tx_buf->is_push) {
3436 			dev_kfree_skb(skb);
3437 			i += 2;
3438 			continue;
3439 		}
3440 
3441 		dma_unmap_single(&pdev->dev,
3442 				 dma_unmap_addr(tx_buf, mapping),
3443 				 skb_headlen(skb),
3444 				 DMA_TO_DEVICE);
3445 
3446 		last = tx_buf->nr_frags;
3447 		i += 2;
3448 		for (j = 0; j < last; j++, i++) {
3449 			int ring_idx = i & bp->tx_ring_mask;
3450 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3451 
3452 			tx_buf = &txr->tx_buf_ring[ring_idx];
3453 			netmem_dma_unmap_page_attrs(&pdev->dev,
3454 						    dma_unmap_addr(tx_buf,
3455 								   mapping),
3456 						    skb_frag_size(frag),
3457 						    DMA_TO_DEVICE, 0);
3458 		}
3459 		dev_kfree_skb(skb);
3460 	}
3461 	netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3462 }
3463 
3464 static void bnxt_free_tx_skbs(struct bnxt *bp)
3465 {
3466 	int i;
3467 
3468 	if (!bp->tx_ring)
3469 		return;
3470 
3471 	for (i = 0; i < bp->tx_nr_rings; i++) {
3472 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3473 
3474 		if (!txr->tx_buf_ring)
3475 			continue;
3476 
3477 		bnxt_free_one_tx_ring_skbs(bp, txr, i);
3478 	}
3479 
3480 	if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3481 		bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3482 }
3483 
3484 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3485 {
3486 	int i, max_idx;
3487 
3488 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3489 
3490 	for (i = 0; i < max_idx; i++) {
3491 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3492 		void *data = rx_buf->data;
3493 
3494 		if (!data)
3495 			continue;
3496 
3497 		rx_buf->data = NULL;
3498 		if (BNXT_RX_PAGE_MODE(bp))
3499 			page_pool_recycle_direct(rxr->page_pool, data);
3500 		else
3501 			page_pool_free_va(rxr->head_pool, data, true);
3502 	}
3503 }
3504 
3505 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3506 {
3507 	int i, max_idx;
3508 
3509 	max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3510 
3511 	for (i = 0; i < max_idx; i++) {
3512 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3513 		netmem_ref netmem = rx_agg_buf->netmem;
3514 
3515 		if (!netmem)
3516 			continue;
3517 
3518 		rx_agg_buf->netmem = 0;
3519 		__clear_bit(i, rxr->rx_agg_bmap);
3520 
3521 		page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3522 	}
3523 }
3524 
3525 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3526 					struct bnxt_rx_ring_info *rxr)
3527 {
3528 	int i;
3529 
3530 	for (i = 0; i < bp->max_tpa; i++) {
3531 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3532 		u8 *data = tpa_info->data;
3533 
3534 		if (!data)
3535 			continue;
3536 
3537 		tpa_info->data = NULL;
3538 		page_pool_free_va(rxr->head_pool, data, false);
3539 	}
3540 }
3541 
3542 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3543 				       struct bnxt_rx_ring_info *rxr)
3544 {
3545 	struct bnxt_tpa_idx_map *map;
3546 
3547 	if (!rxr->rx_tpa)
3548 		goto skip_rx_tpa_free;
3549 
3550 	bnxt_free_one_tpa_info_data(bp, rxr);
3551 
3552 skip_rx_tpa_free:
3553 	if (!rxr->rx_buf_ring)
3554 		goto skip_rx_buf_free;
3555 
3556 	bnxt_free_one_rx_ring(bp, rxr);
3557 
3558 skip_rx_buf_free:
3559 	if (!rxr->rx_agg_ring)
3560 		goto skip_rx_agg_free;
3561 
3562 	bnxt_free_one_rx_agg_ring(bp, rxr);
3563 
3564 skip_rx_agg_free:
3565 	map = rxr->rx_tpa_idx_map;
3566 	if (map)
3567 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3568 }
3569 
3570 static void bnxt_free_rx_skbs(struct bnxt *bp)
3571 {
3572 	int i;
3573 
3574 	if (!bp->rx_ring)
3575 		return;
3576 
3577 	for (i = 0; i < bp->rx_nr_rings; i++)
3578 		bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3579 }
3580 
3581 static void bnxt_free_skbs(struct bnxt *bp)
3582 {
3583 	bnxt_free_tx_skbs(bp);
3584 	bnxt_free_rx_skbs(bp);
3585 }
3586 
3587 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3588 {
3589 	u8 init_val = ctxm->init_value;
3590 	u16 offset = ctxm->init_offset;
3591 	u8 *p2 = p;
3592 	int i;
3593 
3594 	if (!init_val)
3595 		return;
3596 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3597 		memset(p, init_val, len);
3598 		return;
3599 	}
3600 	for (i = 0; i < len; i += ctxm->entry_size)
3601 		*(p2 + i + offset) = init_val;
3602 }
3603 
3604 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3605 			       void *buf, size_t offset, size_t head,
3606 			       size_t tail)
3607 {
3608 	int i, head_page, start_idx, source_offset;
3609 	size_t len, rem_len, total_len, max_bytes;
3610 
3611 	head_page = head / rmem->page_size;
3612 	source_offset = head % rmem->page_size;
3613 	total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3614 	if (!total_len)
3615 		total_len = MAX_CTX_BYTES;
3616 	start_idx = head_page % MAX_CTX_PAGES;
3617 	max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3618 		    source_offset;
3619 	total_len = min(total_len, max_bytes);
3620 	rem_len = total_len;
3621 
3622 	for (i = start_idx; rem_len; i++, source_offset = 0) {
3623 		len = min((size_t)(rmem->page_size - source_offset), rem_len);
3624 		if (buf)
3625 			memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3626 			       len);
3627 		offset += len;
3628 		rem_len -= len;
3629 	}
3630 	return total_len;
3631 }
3632 
3633 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3634 {
3635 	struct pci_dev *pdev = bp->pdev;
3636 	int i;
3637 
3638 	if (!rmem->pg_arr)
3639 		goto skip_pages;
3640 
3641 	for (i = 0; i < rmem->nr_pages; i++) {
3642 		if (!rmem->pg_arr[i])
3643 			continue;
3644 
3645 		dma_free_coherent(&pdev->dev, rmem->page_size,
3646 				  rmem->pg_arr[i], rmem->dma_arr[i]);
3647 
3648 		rmem->pg_arr[i] = NULL;
3649 	}
3650 skip_pages:
3651 	if (rmem->pg_tbl) {
3652 		size_t pg_tbl_size = rmem->nr_pages * 8;
3653 
3654 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3655 			pg_tbl_size = rmem->page_size;
3656 		dma_free_coherent(&pdev->dev, pg_tbl_size,
3657 				  rmem->pg_tbl, rmem->pg_tbl_map);
3658 		rmem->pg_tbl = NULL;
3659 	}
3660 	if (rmem->vmem_size && *rmem->vmem) {
3661 		vfree(*rmem->vmem);
3662 		*rmem->vmem = NULL;
3663 	}
3664 }
3665 
3666 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3667 {
3668 	struct pci_dev *pdev = bp->pdev;
3669 	u64 valid_bit = 0;
3670 	int i;
3671 
3672 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3673 		valid_bit = PTU_PTE_VALID;
3674 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3675 		size_t pg_tbl_size = rmem->nr_pages * 8;
3676 
3677 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3678 			pg_tbl_size = rmem->page_size;
3679 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3680 						  &rmem->pg_tbl_map,
3681 						  GFP_KERNEL);
3682 		if (!rmem->pg_tbl)
3683 			return -ENOMEM;
3684 	}
3685 
3686 	for (i = 0; i < rmem->nr_pages; i++) {
3687 		u64 extra_bits = valid_bit;
3688 
3689 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3690 						     rmem->page_size,
3691 						     &rmem->dma_arr[i],
3692 						     GFP_KERNEL);
3693 		if (!rmem->pg_arr[i])
3694 			return -ENOMEM;
3695 
3696 		if (rmem->ctx_mem)
3697 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3698 					  rmem->page_size);
3699 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
3700 			if (i == rmem->nr_pages - 2 &&
3701 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3702 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
3703 			else if (i == rmem->nr_pages - 1 &&
3704 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3705 				extra_bits |= PTU_PTE_LAST;
3706 			rmem->pg_tbl[i] =
3707 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3708 		}
3709 	}
3710 
3711 	if (rmem->vmem_size) {
3712 		*rmem->vmem = vzalloc(rmem->vmem_size);
3713 		if (!(*rmem->vmem))
3714 			return -ENOMEM;
3715 	}
3716 	return 0;
3717 }
3718 
3719 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3720 				   struct bnxt_rx_ring_info *rxr)
3721 {
3722 	int i;
3723 
3724 	kfree(rxr->rx_tpa_idx_map);
3725 	rxr->rx_tpa_idx_map = NULL;
3726 	if (rxr->rx_tpa) {
3727 		for (i = 0; i < bp->max_tpa; i++) {
3728 			kfree(rxr->rx_tpa[i].agg_arr);
3729 			rxr->rx_tpa[i].agg_arr = NULL;
3730 		}
3731 	}
3732 	kfree(rxr->rx_tpa);
3733 	rxr->rx_tpa = NULL;
3734 }
3735 
3736 static void bnxt_free_tpa_info(struct bnxt *bp)
3737 {
3738 	int i;
3739 
3740 	for (i = 0; i < bp->rx_nr_rings; i++) {
3741 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3742 
3743 		bnxt_free_one_tpa_info(bp, rxr);
3744 	}
3745 }
3746 
3747 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3748 				   struct bnxt_rx_ring_info *rxr)
3749 {
3750 	struct rx_agg_cmp *agg;
3751 	int i;
3752 
3753 	rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3754 			      GFP_KERNEL);
3755 	if (!rxr->rx_tpa)
3756 		return -ENOMEM;
3757 
3758 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3759 		return 0;
3760 	for (i = 0; i < bp->max_tpa; i++) {
3761 		agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3762 		if (!agg)
3763 			return -ENOMEM;
3764 		rxr->rx_tpa[i].agg_arr = agg;
3765 	}
3766 	rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3767 				      GFP_KERNEL);
3768 	if (!rxr->rx_tpa_idx_map)
3769 		return -ENOMEM;
3770 
3771 	return 0;
3772 }
3773 
3774 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3775 {
3776 	int i, rc;
3777 
3778 	bp->max_tpa = MAX_TPA;
3779 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3780 		if (!bp->max_tpa_v2)
3781 			return 0;
3782 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3783 	}
3784 
3785 	for (i = 0; i < bp->rx_nr_rings; i++) {
3786 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3787 
3788 		rc = bnxt_alloc_one_tpa_info(bp, rxr);
3789 		if (rc)
3790 			return rc;
3791 	}
3792 	return 0;
3793 }
3794 
3795 static void bnxt_free_rx_rings(struct bnxt *bp)
3796 {
3797 	int i;
3798 
3799 	if (!bp->rx_ring)
3800 		return;
3801 
3802 	bnxt_free_tpa_info(bp);
3803 	for (i = 0; i < bp->rx_nr_rings; i++) {
3804 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3805 		struct bnxt_ring_struct *ring;
3806 
3807 		if (rxr->xdp_prog)
3808 			bpf_prog_put(rxr->xdp_prog);
3809 
3810 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3811 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3812 
3813 		page_pool_destroy(rxr->page_pool);
3814 		page_pool_destroy(rxr->head_pool);
3815 		rxr->page_pool = rxr->head_pool = NULL;
3816 
3817 		kfree(rxr->rx_agg_bmap);
3818 		rxr->rx_agg_bmap = NULL;
3819 
3820 		ring = &rxr->rx_ring_struct;
3821 		bnxt_free_ring(bp, &ring->ring_mem);
3822 
3823 		ring = &rxr->rx_agg_ring_struct;
3824 		bnxt_free_ring(bp, &ring->ring_mem);
3825 	}
3826 }
3827 
3828 static int bnxt_rx_agg_ring_fill_level(struct bnxt *bp,
3829 				       struct bnxt_rx_ring_info *rxr)
3830 {
3831 	/* User may have chosen larger than default rx_page_size,
3832 	 * we keep the ring sizes uniform and also want uniform amount
3833 	 * of bytes consumed per ring, so cap how much of the rings we fill.
3834 	 */
3835 	int fill_level = bp->rx_agg_ring_size;
3836 
3837 	if (rxr->rx_page_size > BNXT_RX_PAGE_SIZE)
3838 		fill_level /= rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3839 
3840 	return fill_level;
3841 }
3842 
3843 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3844 				   struct bnxt_rx_ring_info *rxr,
3845 				   int numa_node)
3846 {
3847 	unsigned int agg_size_fac = rxr->rx_page_size / BNXT_RX_PAGE_SIZE;
3848 	const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3849 	struct page_pool_params pp = { 0 };
3850 	struct page_pool *pool;
3851 
3852 	pp.pool_size = bnxt_rx_agg_ring_fill_level(bp, rxr) / agg_size_fac;
3853 	if (BNXT_RX_PAGE_MODE(bp))
3854 		pp.pool_size += bp->rx_ring_size / rx_size_fac;
3855 
3856 	pp.order = get_order(rxr->rx_page_size);
3857 	pp.nid = numa_node;
3858 	pp.netdev = bp->dev;
3859 	pp.dev = &bp->pdev->dev;
3860 	pp.dma_dir = bp->rx_dir;
3861 	pp.max_len = PAGE_SIZE << pp.order;
3862 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3863 		   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3864 	pp.queue_idx = rxr->bnapi->index;
3865 
3866 	pool = page_pool_create(&pp);
3867 	if (IS_ERR(pool))
3868 		return PTR_ERR(pool);
3869 	rxr->page_pool = pool;
3870 
3871 	rxr->need_head_pool = page_pool_is_unreadable(pool);
3872 	rxr->need_head_pool |= !!pp.order;
3873 	if (bnxt_separate_head_pool(rxr)) {
3874 		pp.order = 0;
3875 		pp.max_len = PAGE_SIZE;
3876 		pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3877 		pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3878 		pool = page_pool_create(&pp);
3879 		if (IS_ERR(pool))
3880 			goto err_destroy_pp;
3881 	} else {
3882 		page_pool_get(pool);
3883 	}
3884 	rxr->head_pool = pool;
3885 
3886 	return 0;
3887 
3888 err_destroy_pp:
3889 	page_pool_destroy(rxr->page_pool);
3890 	rxr->page_pool = NULL;
3891 	return PTR_ERR(pool);
3892 }
3893 
3894 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3895 {
3896 	page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3897 	page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3898 }
3899 
3900 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3901 {
3902 	u16 mem_size;
3903 
3904 	rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3905 	mem_size = rxr->rx_agg_bmap_size / 8;
3906 	rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3907 	if (!rxr->rx_agg_bmap)
3908 		return -ENOMEM;
3909 
3910 	return 0;
3911 }
3912 
3913 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3914 {
3915 	int numa_node = dev_to_node(&bp->pdev->dev);
3916 	int i, rc = 0, agg_rings = 0, cpu;
3917 
3918 	if (!bp->rx_ring)
3919 		return -ENOMEM;
3920 
3921 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3922 		agg_rings = 1;
3923 
3924 	for (i = 0; i < bp->rx_nr_rings; i++) {
3925 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3926 		struct bnxt_ring_struct *ring;
3927 		int cpu_node;
3928 
3929 		ring = &rxr->rx_ring_struct;
3930 
3931 		cpu = cpumask_local_spread(i, numa_node);
3932 		cpu_node = cpu_to_node(cpu);
3933 		netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3934 			   i, cpu_node);
3935 		rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3936 		if (rc)
3937 			return rc;
3938 		bnxt_enable_rx_page_pool(rxr);
3939 
3940 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3941 		if (rc < 0)
3942 			return rc;
3943 
3944 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3945 						MEM_TYPE_PAGE_POOL,
3946 						rxr->page_pool);
3947 		if (rc) {
3948 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3949 			return rc;
3950 		}
3951 
3952 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3953 		if (rc)
3954 			return rc;
3955 
3956 		ring->grp_idx = i;
3957 		if (agg_rings) {
3958 			ring = &rxr->rx_agg_ring_struct;
3959 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3960 			if (rc)
3961 				return rc;
3962 
3963 			ring->grp_idx = i;
3964 			rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3965 			if (rc)
3966 				return rc;
3967 		}
3968 	}
3969 	if (bp->flags & BNXT_FLAG_TPA)
3970 		rc = bnxt_alloc_tpa_info(bp);
3971 	return rc;
3972 }
3973 
3974 static void bnxt_free_tx_rings(struct bnxt *bp)
3975 {
3976 	int i;
3977 	struct pci_dev *pdev = bp->pdev;
3978 
3979 	if (!bp->tx_ring)
3980 		return;
3981 
3982 	for (i = 0; i < bp->tx_nr_rings; i++) {
3983 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3984 		struct bnxt_ring_struct *ring;
3985 
3986 		if (txr->tx_push) {
3987 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3988 					  txr->tx_push, txr->tx_push_mapping);
3989 			txr->tx_push = NULL;
3990 		}
3991 
3992 		ring = &txr->tx_ring_struct;
3993 
3994 		bnxt_free_ring(bp, &ring->ring_mem);
3995 	}
3996 }
3997 
3998 #define BNXT_TC_TO_RING_BASE(bp, tc)	\
3999 	((tc) * (bp)->tx_nr_rings_per_tc)
4000 
4001 #define BNXT_RING_TO_TC_OFF(bp, tx)	\
4002 	((tx) % (bp)->tx_nr_rings_per_tc)
4003 
4004 #define BNXT_RING_TO_TC(bp, tx)		\
4005 	((tx) / (bp)->tx_nr_rings_per_tc)
4006 
4007 static int bnxt_alloc_tx_rings(struct bnxt *bp)
4008 {
4009 	int i, j, rc;
4010 	struct pci_dev *pdev = bp->pdev;
4011 
4012 	bp->tx_push_size = 0;
4013 	if (bp->tx_push_thresh) {
4014 		int push_size;
4015 
4016 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
4017 					bp->tx_push_thresh);
4018 
4019 		if (push_size > 256) {
4020 			push_size = 0;
4021 			bp->tx_push_thresh = 0;
4022 		}
4023 
4024 		bp->tx_push_size = push_size;
4025 	}
4026 
4027 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
4028 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4029 		struct bnxt_ring_struct *ring;
4030 		u8 qidx;
4031 
4032 		ring = &txr->tx_ring_struct;
4033 
4034 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4035 		if (rc)
4036 			return rc;
4037 
4038 		ring->grp_idx = txr->bnapi->index;
4039 		if (bp->tx_push_size) {
4040 			dma_addr_t mapping;
4041 
4042 			/* One pre-allocated DMA buffer to backup
4043 			 * TX push operation
4044 			 */
4045 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
4046 						bp->tx_push_size,
4047 						&txr->tx_push_mapping,
4048 						GFP_KERNEL);
4049 
4050 			if (!txr->tx_push)
4051 				return -ENOMEM;
4052 
4053 			mapping = txr->tx_push_mapping +
4054 				sizeof(struct tx_push_bd);
4055 			txr->data_mapping = cpu_to_le64(mapping);
4056 		}
4057 		qidx = bp->tc_to_qidx[j];
4058 		ring->queue_id = bp->q_info[qidx].queue_id;
4059 		spin_lock_init(&txr->xdp_tx_lock);
4060 		if (i < bp->tx_nr_rings_xdp)
4061 			continue;
4062 		if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4063 			j++;
4064 	}
4065 	return 0;
4066 }
4067 
4068 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4069 {
4070 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4071 
4072 	kfree(cpr->cp_desc_ring);
4073 	cpr->cp_desc_ring = NULL;
4074 	ring->ring_mem.pg_arr = NULL;
4075 	kfree(cpr->cp_desc_mapping);
4076 	cpr->cp_desc_mapping = NULL;
4077 	ring->ring_mem.dma_arr = NULL;
4078 }
4079 
4080 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4081 {
4082 	cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
4083 	if (!cpr->cp_desc_ring)
4084 		return -ENOMEM;
4085 	cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
4086 				       GFP_KERNEL);
4087 	if (!cpr->cp_desc_mapping)
4088 		return -ENOMEM;
4089 	return 0;
4090 }
4091 
4092 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4093 {
4094 	int i;
4095 
4096 	if (!bp->bnapi)
4097 		return;
4098 	for (i = 0; i < bp->cp_nr_rings; i++) {
4099 		struct bnxt_napi *bnapi = bp->bnapi[i];
4100 
4101 		if (!bnapi)
4102 			continue;
4103 		bnxt_free_cp_arrays(&bnapi->cp_ring);
4104 	}
4105 }
4106 
4107 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4108 {
4109 	int i, n = bp->cp_nr_pages;
4110 
4111 	for (i = 0; i < bp->cp_nr_rings; i++) {
4112 		struct bnxt_napi *bnapi = bp->bnapi[i];
4113 		int rc;
4114 
4115 		if (!bnapi)
4116 			continue;
4117 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4118 		if (rc)
4119 			return rc;
4120 	}
4121 	return 0;
4122 }
4123 
4124 static void bnxt_free_cp_rings(struct bnxt *bp)
4125 {
4126 	int i;
4127 
4128 	if (!bp->bnapi)
4129 		return;
4130 
4131 	for (i = 0; i < bp->cp_nr_rings; i++) {
4132 		struct bnxt_napi *bnapi = bp->bnapi[i];
4133 		struct bnxt_cp_ring_info *cpr;
4134 		struct bnxt_ring_struct *ring;
4135 		int j;
4136 
4137 		if (!bnapi)
4138 			continue;
4139 
4140 		cpr = &bnapi->cp_ring;
4141 		ring = &cpr->cp_ring_struct;
4142 
4143 		bnxt_free_ring(bp, &ring->ring_mem);
4144 
4145 		if (!cpr->cp_ring_arr)
4146 			continue;
4147 
4148 		for (j = 0; j < cpr->cp_ring_count; j++) {
4149 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4150 
4151 			ring = &cpr2->cp_ring_struct;
4152 			bnxt_free_ring(bp, &ring->ring_mem);
4153 			bnxt_free_cp_arrays(cpr2);
4154 		}
4155 		kfree(cpr->cp_ring_arr);
4156 		cpr->cp_ring_arr = NULL;
4157 		cpr->cp_ring_count = 0;
4158 	}
4159 }
4160 
4161 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4162 				  struct bnxt_cp_ring_info *cpr)
4163 {
4164 	struct bnxt_ring_mem_info *rmem;
4165 	struct bnxt_ring_struct *ring;
4166 	int rc;
4167 
4168 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4169 	if (rc) {
4170 		bnxt_free_cp_arrays(cpr);
4171 		return -ENOMEM;
4172 	}
4173 	ring = &cpr->cp_ring_struct;
4174 	rmem = &ring->ring_mem;
4175 	rmem->nr_pages = bp->cp_nr_pages;
4176 	rmem->page_size = HW_CMPD_RING_SIZE;
4177 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
4178 	rmem->dma_arr = cpr->cp_desc_mapping;
4179 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4180 	rc = bnxt_alloc_ring(bp, rmem);
4181 	if (rc) {
4182 		bnxt_free_ring(bp, rmem);
4183 		bnxt_free_cp_arrays(cpr);
4184 	}
4185 	return rc;
4186 }
4187 
4188 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4189 {
4190 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4191 	int i, j, rc, ulp_msix;
4192 	int tcs = bp->num_tc;
4193 
4194 	if (!tcs)
4195 		tcs = 1;
4196 	ulp_msix = bnxt_get_ulp_msix_num(bp);
4197 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4198 		struct bnxt_napi *bnapi = bp->bnapi[i];
4199 		struct bnxt_cp_ring_info *cpr, *cpr2;
4200 		struct bnxt_ring_struct *ring;
4201 		int cp_count = 0, k;
4202 		int rx = 0, tx = 0;
4203 
4204 		if (!bnapi)
4205 			continue;
4206 
4207 		cpr = &bnapi->cp_ring;
4208 		cpr->bnapi = bnapi;
4209 		ring = &cpr->cp_ring_struct;
4210 
4211 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4212 		if (rc)
4213 			return rc;
4214 
4215 		ring->map_idx = ulp_msix + i;
4216 
4217 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4218 			continue;
4219 
4220 		if (i < bp->rx_nr_rings) {
4221 			cp_count++;
4222 			rx = 1;
4223 		}
4224 		if (i < bp->tx_nr_rings_xdp) {
4225 			cp_count++;
4226 			tx = 1;
4227 		} else if ((sh && i < bp->tx_nr_rings) ||
4228 			 (!sh && i >= bp->rx_nr_rings)) {
4229 			cp_count += tcs;
4230 			tx = 1;
4231 		}
4232 
4233 		cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4234 					   GFP_KERNEL);
4235 		if (!cpr->cp_ring_arr)
4236 			return -ENOMEM;
4237 		cpr->cp_ring_count = cp_count;
4238 
4239 		for (k = 0; k < cp_count; k++) {
4240 			cpr2 = &cpr->cp_ring_arr[k];
4241 			rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4242 			if (rc)
4243 				return rc;
4244 			cpr2->bnapi = bnapi;
4245 			cpr2->sw_stats = cpr->sw_stats;
4246 			cpr2->cp_idx = k;
4247 			if (!k && rx) {
4248 				bp->rx_ring[i].rx_cpr = cpr2;
4249 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4250 			} else {
4251 				int n, tc = k - rx;
4252 
4253 				n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4254 				bp->tx_ring[n].tx_cpr = cpr2;
4255 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4256 			}
4257 		}
4258 		if (tx)
4259 			j++;
4260 	}
4261 	return 0;
4262 }
4263 
4264 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4265 				     struct bnxt_rx_ring_info *rxr)
4266 {
4267 	struct bnxt_ring_mem_info *rmem;
4268 	struct bnxt_ring_struct *ring;
4269 
4270 	ring = &rxr->rx_ring_struct;
4271 	rmem = &ring->ring_mem;
4272 	rmem->nr_pages = bp->rx_nr_pages;
4273 	rmem->page_size = HW_RXBD_RING_SIZE;
4274 	rmem->pg_arr = (void **)rxr->rx_desc_ring;
4275 	rmem->dma_arr = rxr->rx_desc_mapping;
4276 	rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4277 	rmem->vmem = (void **)&rxr->rx_buf_ring;
4278 
4279 	ring = &rxr->rx_agg_ring_struct;
4280 	rmem = &ring->ring_mem;
4281 	rmem->nr_pages = bp->rx_agg_nr_pages;
4282 	rmem->page_size = HW_RXBD_RING_SIZE;
4283 	rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4284 	rmem->dma_arr = rxr->rx_agg_desc_mapping;
4285 	rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4286 	rmem->vmem = (void **)&rxr->rx_agg_ring;
4287 }
4288 
4289 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4290 				      struct bnxt_rx_ring_info *rxr)
4291 {
4292 	struct bnxt_ring_mem_info *rmem;
4293 	struct bnxt_ring_struct *ring;
4294 	int i;
4295 
4296 	rxr->page_pool->p.napi = NULL;
4297 	rxr->page_pool = NULL;
4298 	rxr->head_pool->p.napi = NULL;
4299 	rxr->head_pool = NULL;
4300 	memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4301 
4302 	ring = &rxr->rx_ring_struct;
4303 	rmem = &ring->ring_mem;
4304 	rmem->pg_tbl = NULL;
4305 	rmem->pg_tbl_map = 0;
4306 	for (i = 0; i < rmem->nr_pages; i++) {
4307 		rmem->pg_arr[i] = NULL;
4308 		rmem->dma_arr[i] = 0;
4309 	}
4310 	*rmem->vmem = NULL;
4311 
4312 	ring = &rxr->rx_agg_ring_struct;
4313 	rmem = &ring->ring_mem;
4314 	rmem->pg_tbl = NULL;
4315 	rmem->pg_tbl_map = 0;
4316 	for (i = 0; i < rmem->nr_pages; i++) {
4317 		rmem->pg_arr[i] = NULL;
4318 		rmem->dma_arr[i] = 0;
4319 	}
4320 	*rmem->vmem = NULL;
4321 }
4322 
4323 static void bnxt_init_ring_struct(struct bnxt *bp)
4324 {
4325 	int i, j;
4326 
4327 	for (i = 0; i < bp->cp_nr_rings; i++) {
4328 		struct bnxt_napi *bnapi = bp->bnapi[i];
4329 		struct netdev_queue_config qcfg;
4330 		struct bnxt_ring_mem_info *rmem;
4331 		struct bnxt_cp_ring_info *cpr;
4332 		struct bnxt_rx_ring_info *rxr;
4333 		struct bnxt_tx_ring_info *txr;
4334 		struct bnxt_ring_struct *ring;
4335 
4336 		if (!bnapi)
4337 			continue;
4338 
4339 		cpr = &bnapi->cp_ring;
4340 		ring = &cpr->cp_ring_struct;
4341 		rmem = &ring->ring_mem;
4342 		rmem->nr_pages = bp->cp_nr_pages;
4343 		rmem->page_size = HW_CMPD_RING_SIZE;
4344 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
4345 		rmem->dma_arr = cpr->cp_desc_mapping;
4346 		rmem->vmem_size = 0;
4347 
4348 		rxr = bnapi->rx_ring;
4349 		if (!rxr)
4350 			goto skip_rx;
4351 
4352 		netdev_queue_config(bp->dev, i, &qcfg);
4353 		rxr->rx_page_size = qcfg.rx_page_size;
4354 
4355 		ring = &rxr->rx_ring_struct;
4356 		rmem = &ring->ring_mem;
4357 		rmem->nr_pages = bp->rx_nr_pages;
4358 		rmem->page_size = HW_RXBD_RING_SIZE;
4359 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
4360 		rmem->dma_arr = rxr->rx_desc_mapping;
4361 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4362 		rmem->vmem = (void **)&rxr->rx_buf_ring;
4363 
4364 		ring = &rxr->rx_agg_ring_struct;
4365 		rmem = &ring->ring_mem;
4366 		rmem->nr_pages = bp->rx_agg_nr_pages;
4367 		rmem->page_size = HW_RXBD_RING_SIZE;
4368 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4369 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
4370 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4371 		rmem->vmem = (void **)&rxr->rx_agg_ring;
4372 
4373 skip_rx:
4374 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4375 			ring = &txr->tx_ring_struct;
4376 			rmem = &ring->ring_mem;
4377 			rmem->nr_pages = bp->tx_nr_pages;
4378 			rmem->page_size = HW_TXBD_RING_SIZE;
4379 			rmem->pg_arr = (void **)txr->tx_desc_ring;
4380 			rmem->dma_arr = txr->tx_desc_mapping;
4381 			rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4382 			rmem->vmem = (void **)&txr->tx_buf_ring;
4383 		}
4384 	}
4385 }
4386 
4387 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4388 {
4389 	int i;
4390 	u32 prod;
4391 	struct rx_bd **rx_buf_ring;
4392 
4393 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4394 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4395 		int j;
4396 		struct rx_bd *rxbd;
4397 
4398 		rxbd = rx_buf_ring[i];
4399 		if (!rxbd)
4400 			continue;
4401 
4402 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4403 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4404 			rxbd->rx_bd_opaque = prod;
4405 		}
4406 	}
4407 }
4408 
4409 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4410 				       struct bnxt_rx_ring_info *rxr,
4411 				       int ring_nr)
4412 {
4413 	u32 prod;
4414 	int i;
4415 
4416 	prod = rxr->rx_prod;
4417 	for (i = 0; i < bp->rx_ring_size; i++) {
4418 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4419 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4420 				    ring_nr, i, bp->rx_ring_size);
4421 			break;
4422 		}
4423 		prod = NEXT_RX(prod);
4424 	}
4425 	rxr->rx_prod = prod;
4426 }
4427 
4428 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4429 					  struct bnxt_rx_ring_info *rxr,
4430 					  int ring_nr)
4431 {
4432 	int fill_level, i;
4433 	u32 prod;
4434 
4435 	fill_level = bnxt_rx_agg_ring_fill_level(bp, rxr);
4436 
4437 	prod = rxr->rx_agg_prod;
4438 	for (i = 0; i < fill_level; i++) {
4439 		if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4440 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4441 				    ring_nr, i, bp->rx_agg_ring_size);
4442 			break;
4443 		}
4444 		prod = NEXT_RX_AGG(prod);
4445 	}
4446 	rxr->rx_agg_prod = prod;
4447 }
4448 
4449 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4450 					struct bnxt_rx_ring_info *rxr)
4451 {
4452 	dma_addr_t mapping;
4453 	u8 *data;
4454 	int i;
4455 
4456 	for (i = 0; i < bp->max_tpa; i++) {
4457 		data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4458 					    GFP_KERNEL);
4459 		if (!data)
4460 			return -ENOMEM;
4461 
4462 		rxr->rx_tpa[i].data = data;
4463 		rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4464 		rxr->rx_tpa[i].mapping = mapping;
4465 	}
4466 
4467 	return 0;
4468 }
4469 
4470 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4471 {
4472 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4473 	int rc;
4474 
4475 	bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4476 
4477 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4478 		return 0;
4479 
4480 	bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4481 
4482 	if (rxr->rx_tpa) {
4483 		rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4484 		if (rc)
4485 			return rc;
4486 	}
4487 	return 0;
4488 }
4489 
4490 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4491 				       struct bnxt_rx_ring_info *rxr)
4492 {
4493 	struct bnxt_ring_struct *ring;
4494 	u32 type;
4495 
4496 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4497 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4498 
4499 	if (NET_IP_ALIGN == 2)
4500 		type |= RX_BD_FLAGS_SOP;
4501 
4502 	ring = &rxr->rx_ring_struct;
4503 	bnxt_init_rxbd_pages(ring, type);
4504 	ring->fw_ring_id = INVALID_HW_RING_ID;
4505 }
4506 
4507 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4508 					   struct bnxt_rx_ring_info *rxr)
4509 {
4510 	struct bnxt_ring_struct *ring;
4511 	u32 type;
4512 
4513 	ring = &rxr->rx_agg_ring_struct;
4514 	ring->fw_ring_id = INVALID_HW_RING_ID;
4515 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4516 		type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
4517 			RX_BD_TYPE_RX_AGG_BD;
4518 
4519 		/* On P7, setting EOP will cause the chip to disable
4520 		 * Relaxed Ordering (RO) for TPA data.  Disable EOP for
4521 		 * potentially higher performance with RO.
4522 		 */
4523 		if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4524 			type |= RX_BD_FLAGS_AGG_EOP;
4525 
4526 		bnxt_init_rxbd_pages(ring, type);
4527 	}
4528 }
4529 
4530 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4531 {
4532 	struct bnxt_rx_ring_info *rxr;
4533 
4534 	rxr = &bp->rx_ring[ring_nr];
4535 	bnxt_init_one_rx_ring_rxbd(bp, rxr);
4536 
4537 	netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4538 			     &rxr->bnapi->napi);
4539 
4540 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4541 		bpf_prog_add(bp->xdp_prog, 1);
4542 		rxr->xdp_prog = bp->xdp_prog;
4543 	}
4544 
4545 	bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4546 
4547 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
4548 }
4549 
4550 static void bnxt_init_cp_rings(struct bnxt *bp)
4551 {
4552 	int i, j;
4553 
4554 	for (i = 0; i < bp->cp_nr_rings; i++) {
4555 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4556 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4557 
4558 		ring->fw_ring_id = INVALID_HW_RING_ID;
4559 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4560 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4561 		if (!cpr->cp_ring_arr)
4562 			continue;
4563 		for (j = 0; j < cpr->cp_ring_count; j++) {
4564 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4565 
4566 			ring = &cpr2->cp_ring_struct;
4567 			ring->fw_ring_id = INVALID_HW_RING_ID;
4568 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4569 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4570 		}
4571 	}
4572 }
4573 
4574 static int bnxt_init_rx_rings(struct bnxt *bp)
4575 {
4576 	int i, rc = 0;
4577 
4578 	if (BNXT_RX_PAGE_MODE(bp)) {
4579 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4580 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4581 	} else {
4582 		bp->rx_offset = BNXT_RX_OFFSET;
4583 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4584 	}
4585 
4586 	for (i = 0; i < bp->rx_nr_rings; i++) {
4587 		rc = bnxt_init_one_rx_ring(bp, i);
4588 		if (rc)
4589 			break;
4590 	}
4591 
4592 	return rc;
4593 }
4594 
4595 static int bnxt_init_tx_rings(struct bnxt *bp)
4596 {
4597 	u16 i;
4598 
4599 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4600 				   BNXT_MIN_TX_DESC_CNT);
4601 
4602 	for (i = 0; i < bp->tx_nr_rings; i++) {
4603 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4604 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4605 
4606 		ring->fw_ring_id = INVALID_HW_RING_ID;
4607 
4608 		if (i >= bp->tx_nr_rings_xdp)
4609 			netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4610 					     NETDEV_QUEUE_TYPE_TX,
4611 					     &txr->bnapi->napi);
4612 	}
4613 
4614 	return 0;
4615 }
4616 
4617 static void bnxt_free_ring_grps(struct bnxt *bp)
4618 {
4619 	kfree(bp->grp_info);
4620 	bp->grp_info = NULL;
4621 }
4622 
4623 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4624 {
4625 	int i;
4626 
4627 	if (irq_re_init) {
4628 		bp->grp_info = kcalloc(bp->cp_nr_rings,
4629 				       sizeof(struct bnxt_ring_grp_info),
4630 				       GFP_KERNEL);
4631 		if (!bp->grp_info)
4632 			return -ENOMEM;
4633 	}
4634 	for (i = 0; i < bp->cp_nr_rings; i++) {
4635 		if (irq_re_init)
4636 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4637 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4638 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4639 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4640 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4641 	}
4642 	return 0;
4643 }
4644 
4645 static void bnxt_free_vnics(struct bnxt *bp)
4646 {
4647 	kfree(bp->vnic_info);
4648 	bp->vnic_info = NULL;
4649 	bp->nr_vnics = 0;
4650 }
4651 
4652 static int bnxt_alloc_vnics(struct bnxt *bp)
4653 {
4654 	int num_vnics = 1;
4655 
4656 #ifdef CONFIG_RFS_ACCEL
4657 	if (bp->flags & BNXT_FLAG_RFS) {
4658 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4659 			num_vnics++;
4660 		else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4661 			num_vnics += bp->rx_nr_rings;
4662 	}
4663 #endif
4664 
4665 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4666 		num_vnics++;
4667 
4668 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4669 				GFP_KERNEL);
4670 	if (!bp->vnic_info)
4671 		return -ENOMEM;
4672 
4673 	bp->nr_vnics = num_vnics;
4674 	return 0;
4675 }
4676 
4677 static void bnxt_init_vnics(struct bnxt *bp)
4678 {
4679 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4680 	int i;
4681 
4682 	for (i = 0; i < bp->nr_vnics; i++) {
4683 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4684 		int j;
4685 
4686 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
4687 		vnic->vnic_id = i;
4688 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4689 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4690 
4691 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4692 
4693 		if (bp->vnic_info[i].rss_hash_key) {
4694 			if (i == BNXT_VNIC_DEFAULT) {
4695 				u8 *key = (void *)vnic->rss_hash_key;
4696 				int k;
4697 
4698 				if (!bp->rss_hash_key_valid &&
4699 				    !bp->rss_hash_key_updated) {
4700 					get_random_bytes(bp->rss_hash_key,
4701 							 HW_HASH_KEY_SIZE);
4702 					bp->rss_hash_key_updated = true;
4703 				}
4704 
4705 				memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4706 				       HW_HASH_KEY_SIZE);
4707 
4708 				if (!bp->rss_hash_key_updated)
4709 					continue;
4710 
4711 				bp->rss_hash_key_updated = false;
4712 				bp->rss_hash_key_valid = true;
4713 
4714 				bp->toeplitz_prefix = 0;
4715 				for (k = 0; k < 8; k++) {
4716 					bp->toeplitz_prefix <<= 8;
4717 					bp->toeplitz_prefix |= key[k];
4718 				}
4719 			} else {
4720 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4721 				       HW_HASH_KEY_SIZE);
4722 			}
4723 		}
4724 	}
4725 }
4726 
4727 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4728 {
4729 	int pages;
4730 
4731 	pages = ring_size / desc_per_pg;
4732 
4733 	if (!pages)
4734 		return 1;
4735 
4736 	pages++;
4737 
4738 	while (pages & (pages - 1))
4739 		pages++;
4740 
4741 	return pages;
4742 }
4743 
4744 void bnxt_set_tpa_flags(struct bnxt *bp)
4745 {
4746 	bp->flags &= ~BNXT_FLAG_TPA;
4747 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4748 		return;
4749 	if (bp->dev->features & NETIF_F_LRO)
4750 		bp->flags |= BNXT_FLAG_LRO;
4751 	else if (bp->dev->features & NETIF_F_GRO_HW)
4752 		bp->flags |= BNXT_FLAG_GRO;
4753 }
4754 
4755 static void bnxt_init_ring_params(struct bnxt *bp)
4756 {
4757 	unsigned int rx_size;
4758 
4759 	bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4760 	/* Try to fit 4 chunks into a 4k page */
4761 	rx_size = SZ_1K -
4762 		NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4763 	bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4764 }
4765 
4766 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4767  * be set on entry.
4768  */
4769 void bnxt_set_ring_params(struct bnxt *bp)
4770 {
4771 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4772 	u32 agg_factor = 0, agg_ring_size = 0;
4773 
4774 	/* 8 for CRC and VLAN */
4775 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4776 
4777 	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4778 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4779 
4780 	ring_size = bp->rx_ring_size;
4781 	bp->rx_agg_ring_size = 0;
4782 	bp->rx_agg_nr_pages = 0;
4783 
4784 	if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4785 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4786 
4787 	bp->flags &= ~BNXT_FLAG_JUMBO;
4788 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4789 		u32 jumbo_factor;
4790 
4791 		bp->flags |= BNXT_FLAG_JUMBO;
4792 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4793 		if (jumbo_factor > agg_factor)
4794 			agg_factor = jumbo_factor;
4795 	}
4796 	if (agg_factor) {
4797 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4798 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4799 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4800 				    bp->rx_ring_size, ring_size);
4801 			bp->rx_ring_size = ring_size;
4802 		}
4803 		agg_ring_size = ring_size * agg_factor;
4804 
4805 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4806 							RX_DESC_CNT);
4807 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4808 			u32 tmp = agg_ring_size;
4809 
4810 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4811 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4812 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4813 				    tmp, agg_ring_size);
4814 		}
4815 		bp->rx_agg_ring_size = agg_ring_size;
4816 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4817 
4818 		if (BNXT_RX_PAGE_MODE(bp)) {
4819 			rx_space = PAGE_SIZE;
4820 			rx_size = PAGE_SIZE -
4821 				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4822 				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4823 		} else {
4824 			rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4825 				       bp->rx_copybreak,
4826 				       bp->dev->cfg_pending->hds_thresh);
4827 			rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4828 			rx_space = rx_size + NET_SKB_PAD +
4829 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4830 		}
4831 	}
4832 
4833 	bp->rx_buf_use_size = rx_size;
4834 	bp->rx_buf_size = rx_space;
4835 
4836 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4837 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4838 
4839 	ring_size = bp->tx_ring_size;
4840 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4841 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4842 
4843 	max_rx_cmpl = bp->rx_ring_size;
4844 	/* MAX TPA needs to be added because TPA_START completions are
4845 	 * immediately recycled, so the TPA completions are not bound by
4846 	 * the RX ring size.
4847 	 */
4848 	if (bp->flags & BNXT_FLAG_TPA)
4849 		max_rx_cmpl += bp->max_tpa;
4850 	/* RX and TPA completions are 32-byte, all others are 16-byte */
4851 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4852 	bp->cp_ring_size = ring_size;
4853 
4854 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4855 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
4856 		bp->cp_nr_pages = MAX_CP_PAGES;
4857 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4858 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4859 			    ring_size, bp->cp_ring_size);
4860 	}
4861 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4862 	bp->cp_ring_mask = bp->cp_bit - 1;
4863 }
4864 
4865 /* Changing allocation mode of RX rings.
4866  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4867  */
4868 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4869 {
4870 	struct net_device *dev = bp->dev;
4871 
4872 	if (page_mode) {
4873 		bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4874 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4875 
4876 		if (bp->xdp_prog->aux->xdp_has_frags)
4877 			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4878 		else
4879 			dev->max_mtu =
4880 				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4881 		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4882 			bp->flags |= BNXT_FLAG_JUMBO;
4883 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
4884 		} else {
4885 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4886 			bp->rx_skb_func = bnxt_rx_page_skb;
4887 		}
4888 		bp->rx_dir = DMA_BIDIRECTIONAL;
4889 	} else {
4890 		dev->max_mtu = bp->max_mtu;
4891 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4892 		bp->rx_dir = DMA_FROM_DEVICE;
4893 		bp->rx_skb_func = bnxt_rx_skb;
4894 	}
4895 }
4896 
4897 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4898 {
4899 	__bnxt_set_rx_skb_mode(bp, page_mode);
4900 
4901 	if (!page_mode) {
4902 		int rx, tx;
4903 
4904 		bnxt_get_max_rings(bp, &rx, &tx, true);
4905 		if (rx > 1) {
4906 			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4907 			bp->dev->hw_features |= NETIF_F_LRO;
4908 		}
4909 	}
4910 
4911 	/* Update LRO and GRO_HW availability */
4912 	netdev_update_features(bp->dev);
4913 }
4914 
4915 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4916 {
4917 	int i;
4918 	struct bnxt_vnic_info *vnic;
4919 	struct pci_dev *pdev = bp->pdev;
4920 
4921 	if (!bp->vnic_info)
4922 		return;
4923 
4924 	for (i = 0; i < bp->nr_vnics; i++) {
4925 		vnic = &bp->vnic_info[i];
4926 
4927 		kfree(vnic->fw_grp_ids);
4928 		vnic->fw_grp_ids = NULL;
4929 
4930 		kfree(vnic->uc_list);
4931 		vnic->uc_list = NULL;
4932 
4933 		if (vnic->mc_list) {
4934 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4935 					  vnic->mc_list, vnic->mc_list_mapping);
4936 			vnic->mc_list = NULL;
4937 		}
4938 
4939 		if (vnic->rss_table) {
4940 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4941 					  vnic->rss_table,
4942 					  vnic->rss_table_dma_addr);
4943 			vnic->rss_table = NULL;
4944 		}
4945 
4946 		vnic->rss_hash_key = NULL;
4947 		vnic->flags = 0;
4948 	}
4949 }
4950 
4951 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4952 {
4953 	int i, rc = 0, size;
4954 	struct bnxt_vnic_info *vnic;
4955 	struct pci_dev *pdev = bp->pdev;
4956 	int max_rings;
4957 
4958 	for (i = 0; i < bp->nr_vnics; i++) {
4959 		vnic = &bp->vnic_info[i];
4960 
4961 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4962 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4963 
4964 			if (mem_size > 0) {
4965 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4966 				if (!vnic->uc_list) {
4967 					rc = -ENOMEM;
4968 					goto out;
4969 				}
4970 			}
4971 		}
4972 
4973 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4974 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4975 			vnic->mc_list =
4976 				dma_alloc_coherent(&pdev->dev,
4977 						   vnic->mc_list_size,
4978 						   &vnic->mc_list_mapping,
4979 						   GFP_KERNEL);
4980 			if (!vnic->mc_list) {
4981 				rc = -ENOMEM;
4982 				goto out;
4983 			}
4984 		}
4985 
4986 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4987 			goto vnic_skip_grps;
4988 
4989 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4990 			max_rings = bp->rx_nr_rings;
4991 		else
4992 			max_rings = 1;
4993 
4994 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4995 		if (!vnic->fw_grp_ids) {
4996 			rc = -ENOMEM;
4997 			goto out;
4998 		}
4999 vnic_skip_grps:
5000 		if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
5001 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
5002 			continue;
5003 
5004 		/* Allocate rss table and hash key */
5005 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
5006 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5007 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
5008 
5009 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
5010 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
5011 						     vnic->rss_table_size,
5012 						     &vnic->rss_table_dma_addr,
5013 						     GFP_KERNEL);
5014 		if (!vnic->rss_table) {
5015 			rc = -ENOMEM;
5016 			goto out;
5017 		}
5018 
5019 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
5020 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
5021 	}
5022 	return 0;
5023 
5024 out:
5025 	return rc;
5026 }
5027 
5028 static void bnxt_free_hwrm_resources(struct bnxt *bp)
5029 {
5030 	struct bnxt_hwrm_wait_token *token;
5031 
5032 	dma_pool_destroy(bp->hwrm_dma_pool);
5033 	bp->hwrm_dma_pool = NULL;
5034 
5035 	rcu_read_lock();
5036 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5037 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5038 	rcu_read_unlock();
5039 }
5040 
5041 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5042 {
5043 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5044 					    BNXT_HWRM_DMA_SIZE,
5045 					    BNXT_HWRM_DMA_ALIGN, 0);
5046 	if (!bp->hwrm_dma_pool)
5047 		return -ENOMEM;
5048 
5049 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5050 
5051 	return 0;
5052 }
5053 
5054 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5055 {
5056 	kfree(stats->hw_masks);
5057 	stats->hw_masks = NULL;
5058 	kfree(stats->sw_stats);
5059 	stats->sw_stats = NULL;
5060 	if (stats->hw_stats) {
5061 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5062 				  stats->hw_stats_map);
5063 		stats->hw_stats = NULL;
5064 	}
5065 }
5066 
5067 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5068 				bool alloc_masks)
5069 {
5070 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5071 					     &stats->hw_stats_map, GFP_KERNEL);
5072 	if (!stats->hw_stats)
5073 		return -ENOMEM;
5074 
5075 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5076 	if (!stats->sw_stats)
5077 		goto stats_mem_err;
5078 
5079 	if (alloc_masks) {
5080 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5081 		if (!stats->hw_masks)
5082 			goto stats_mem_err;
5083 	}
5084 	return 0;
5085 
5086 stats_mem_err:
5087 	bnxt_free_stats_mem(bp, stats);
5088 	return -ENOMEM;
5089 }
5090 
5091 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5092 {
5093 	int i;
5094 
5095 	for (i = 0; i < count; i++)
5096 		mask_arr[i] = mask;
5097 }
5098 
5099 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5100 {
5101 	int i;
5102 
5103 	for (i = 0; i < count; i++)
5104 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5105 }
5106 
5107 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5108 				    struct bnxt_stats_mem *stats)
5109 {
5110 	struct hwrm_func_qstats_ext_output *resp;
5111 	struct hwrm_func_qstats_ext_input *req;
5112 	__le64 *hw_masks;
5113 	int rc;
5114 
5115 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5116 	    !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5117 		return -EOPNOTSUPP;
5118 
5119 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5120 	if (rc)
5121 		return rc;
5122 
5123 	req->fid = cpu_to_le16(0xffff);
5124 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5125 
5126 	resp = hwrm_req_hold(bp, req);
5127 	rc = hwrm_req_send(bp, req);
5128 	if (!rc) {
5129 		hw_masks = &resp->rx_ucast_pkts;
5130 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5131 	}
5132 	hwrm_req_drop(bp, req);
5133 	return rc;
5134 }
5135 
5136 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5137 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5138 
5139 static void bnxt_init_stats(struct bnxt *bp)
5140 {
5141 	struct bnxt_napi *bnapi = bp->bnapi[0];
5142 	struct bnxt_cp_ring_info *cpr;
5143 	struct bnxt_stats_mem *stats;
5144 	__le64 *rx_stats, *tx_stats;
5145 	int rc, rx_count, tx_count;
5146 	u64 *rx_masks, *tx_masks;
5147 	u64 mask;
5148 	u8 flags;
5149 
5150 	cpr = &bnapi->cp_ring;
5151 	stats = &cpr->stats;
5152 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5153 	if (rc) {
5154 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5155 			mask = (1ULL << 48) - 1;
5156 		else
5157 			mask = -1ULL;
5158 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5159 	}
5160 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
5161 		stats = &bp->port_stats;
5162 		rx_stats = stats->hw_stats;
5163 		rx_masks = stats->hw_masks;
5164 		rx_count = sizeof(struct rx_port_stats) / 8;
5165 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5166 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5167 		tx_count = sizeof(struct tx_port_stats) / 8;
5168 
5169 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5170 		rc = bnxt_hwrm_port_qstats(bp, flags);
5171 		if (rc) {
5172 			mask = (1ULL << 40) - 1;
5173 
5174 			bnxt_fill_masks(rx_masks, mask, rx_count);
5175 			bnxt_fill_masks(tx_masks, mask, tx_count);
5176 		} else {
5177 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5178 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5179 			bnxt_hwrm_port_qstats(bp, 0);
5180 		}
5181 	}
5182 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5183 		stats = &bp->rx_port_stats_ext;
5184 		rx_stats = stats->hw_stats;
5185 		rx_masks = stats->hw_masks;
5186 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
5187 		stats = &bp->tx_port_stats_ext;
5188 		tx_stats = stats->hw_stats;
5189 		tx_masks = stats->hw_masks;
5190 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
5191 
5192 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5193 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5194 		if (rc) {
5195 			mask = (1ULL << 40) - 1;
5196 
5197 			bnxt_fill_masks(rx_masks, mask, rx_count);
5198 			if (tx_stats)
5199 				bnxt_fill_masks(tx_masks, mask, tx_count);
5200 		} else {
5201 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5202 			if (tx_stats)
5203 				bnxt_copy_hw_masks(tx_masks, tx_stats,
5204 						   tx_count);
5205 			bnxt_hwrm_port_qstats_ext(bp, 0);
5206 		}
5207 	}
5208 }
5209 
5210 static void bnxt_free_port_stats(struct bnxt *bp)
5211 {
5212 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
5213 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5214 
5215 	bnxt_free_stats_mem(bp, &bp->port_stats);
5216 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5217 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5218 }
5219 
5220 static void bnxt_free_ring_stats(struct bnxt *bp)
5221 {
5222 	int i;
5223 
5224 	if (!bp->bnapi)
5225 		return;
5226 
5227 	for (i = 0; i < bp->cp_nr_rings; i++) {
5228 		struct bnxt_napi *bnapi = bp->bnapi[i];
5229 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5230 
5231 		bnxt_free_stats_mem(bp, &cpr->stats);
5232 
5233 		kfree(cpr->sw_stats);
5234 		cpr->sw_stats = NULL;
5235 	}
5236 }
5237 
5238 static int bnxt_alloc_stats(struct bnxt *bp)
5239 {
5240 	u32 size, i;
5241 	int rc;
5242 
5243 	size = bp->hw_ring_stats_size;
5244 
5245 	for (i = 0; i < bp->cp_nr_rings; i++) {
5246 		struct bnxt_napi *bnapi = bp->bnapi[i];
5247 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5248 
5249 		cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5250 		if (!cpr->sw_stats)
5251 			return -ENOMEM;
5252 
5253 		cpr->stats.len = size;
5254 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5255 		if (rc)
5256 			return rc;
5257 
5258 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5259 	}
5260 
5261 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5262 		return 0;
5263 
5264 	if (bp->port_stats.hw_stats)
5265 		goto alloc_ext_stats;
5266 
5267 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5268 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5269 	if (rc)
5270 		return rc;
5271 
5272 	bp->flags |= BNXT_FLAG_PORT_STATS;
5273 
5274 alloc_ext_stats:
5275 	/* Display extended statistics only if FW supports it */
5276 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5277 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5278 			return 0;
5279 
5280 	if (bp->rx_port_stats_ext.hw_stats)
5281 		goto alloc_tx_ext_stats;
5282 
5283 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5284 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5285 	/* Extended stats are optional */
5286 	if (rc)
5287 		return 0;
5288 
5289 alloc_tx_ext_stats:
5290 	if (bp->tx_port_stats_ext.hw_stats)
5291 		return 0;
5292 
5293 	if (bp->hwrm_spec_code >= 0x10902 ||
5294 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5295 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5296 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5297 		/* Extended stats are optional */
5298 		if (rc)
5299 			return 0;
5300 	}
5301 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5302 	return 0;
5303 }
5304 
5305 static void bnxt_clear_ring_indices(struct bnxt *bp)
5306 {
5307 	int i, j;
5308 
5309 	if (!bp->bnapi)
5310 		return;
5311 
5312 	for (i = 0; i < bp->cp_nr_rings; i++) {
5313 		struct bnxt_napi *bnapi = bp->bnapi[i];
5314 		struct bnxt_cp_ring_info *cpr;
5315 		struct bnxt_rx_ring_info *rxr;
5316 		struct bnxt_tx_ring_info *txr;
5317 
5318 		if (!bnapi)
5319 			continue;
5320 
5321 		cpr = &bnapi->cp_ring;
5322 		cpr->cp_raw_cons = 0;
5323 
5324 		bnxt_for_each_napi_tx(j, bnapi, txr) {
5325 			txr->tx_prod = 0;
5326 			txr->tx_cons = 0;
5327 			txr->tx_hw_cons = 0;
5328 		}
5329 
5330 		rxr = bnapi->rx_ring;
5331 		if (rxr) {
5332 			rxr->rx_prod = 0;
5333 			rxr->rx_agg_prod = 0;
5334 			rxr->rx_sw_agg_prod = 0;
5335 			rxr->rx_next_cons = 0;
5336 		}
5337 		bnapi->events = 0;
5338 	}
5339 }
5340 
5341 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5342 {
5343 	u8 type = fltr->type, flags = fltr->flags;
5344 
5345 	INIT_LIST_HEAD(&fltr->list);
5346 	if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5347 	    (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5348 		list_add_tail(&fltr->list, &bp->usr_fltr_list);
5349 }
5350 
5351 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5352 {
5353 	if (!list_empty(&fltr->list))
5354 		list_del_init(&fltr->list);
5355 }
5356 
5357 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5358 {
5359 	struct bnxt_filter_base *usr_fltr, *tmp;
5360 
5361 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5362 		if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5363 			continue;
5364 		bnxt_del_one_usr_fltr(bp, usr_fltr);
5365 	}
5366 }
5367 
5368 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5369 {
5370 	hlist_del(&fltr->hash);
5371 	bnxt_del_one_usr_fltr(bp, fltr);
5372 	if (fltr->flags) {
5373 		clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5374 		bp->ntp_fltr_count--;
5375 	}
5376 	kfree(fltr);
5377 }
5378 
5379 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5380 {
5381 	int i;
5382 
5383 	netdev_assert_locked_or_invisible(bp->dev);
5384 
5385 	/* Under netdev instance lock and all our NAPIs have been disabled.
5386 	 * It's safe to delete the hash table.
5387 	 */
5388 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5389 		struct hlist_head *head;
5390 		struct hlist_node *tmp;
5391 		struct bnxt_ntuple_filter *fltr;
5392 
5393 		head = &bp->ntp_fltr_hash_tbl[i];
5394 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5395 			bnxt_del_l2_filter(bp, fltr->l2_fltr);
5396 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5397 				     !list_empty(&fltr->base.list)))
5398 				continue;
5399 			bnxt_del_fltr(bp, &fltr->base);
5400 		}
5401 	}
5402 	if (!all)
5403 		return;
5404 
5405 	bitmap_free(bp->ntp_fltr_bmap);
5406 	bp->ntp_fltr_bmap = NULL;
5407 	bp->ntp_fltr_count = 0;
5408 }
5409 
5410 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5411 {
5412 	int i, rc = 0;
5413 
5414 	if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5415 		return 0;
5416 
5417 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5418 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5419 
5420 	bp->ntp_fltr_count = 0;
5421 	bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5422 
5423 	if (!bp->ntp_fltr_bmap)
5424 		rc = -ENOMEM;
5425 
5426 	return rc;
5427 }
5428 
5429 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5430 {
5431 	int i;
5432 
5433 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5434 		struct hlist_head *head;
5435 		struct hlist_node *tmp;
5436 		struct bnxt_l2_filter *fltr;
5437 
5438 		head = &bp->l2_fltr_hash_tbl[i];
5439 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5440 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5441 				     !list_empty(&fltr->base.list)))
5442 				continue;
5443 			bnxt_del_fltr(bp, &fltr->base);
5444 		}
5445 	}
5446 }
5447 
5448 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5449 {
5450 	int i;
5451 
5452 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5453 		INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5454 	get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5455 }
5456 
5457 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5458 {
5459 	bnxt_free_vnic_attributes(bp);
5460 	bnxt_free_tx_rings(bp);
5461 	bnxt_free_rx_rings(bp);
5462 	bnxt_free_cp_rings(bp);
5463 	bnxt_free_all_cp_arrays(bp);
5464 	bnxt_free_ntp_fltrs(bp, false);
5465 	bnxt_free_l2_filters(bp, false);
5466 	if (irq_re_init) {
5467 		bnxt_free_ring_stats(bp);
5468 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5469 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5470 			bnxt_free_port_stats(bp);
5471 		bnxt_free_ring_grps(bp);
5472 		bnxt_free_vnics(bp);
5473 		kfree(bp->tx_ring_map);
5474 		bp->tx_ring_map = NULL;
5475 		kfree(bp->tx_ring);
5476 		bp->tx_ring = NULL;
5477 		kfree(bp->rx_ring);
5478 		bp->rx_ring = NULL;
5479 		kfree(bp->bnapi);
5480 		bp->bnapi = NULL;
5481 	} else {
5482 		bnxt_clear_ring_indices(bp);
5483 	}
5484 }
5485 
5486 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5487 {
5488 	int i, j, rc, size, arr_size;
5489 	void *bnapi;
5490 
5491 	if (irq_re_init) {
5492 		/* Allocate bnapi mem pointer array and mem block for
5493 		 * all queues
5494 		 */
5495 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5496 				bp->cp_nr_rings);
5497 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5498 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5499 		if (!bnapi)
5500 			return -ENOMEM;
5501 
5502 		bp->bnapi = bnapi;
5503 		bnapi += arr_size;
5504 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5505 			bp->bnapi[i] = bnapi;
5506 			bp->bnapi[i]->index = i;
5507 			bp->bnapi[i]->bp = bp;
5508 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5509 				struct bnxt_cp_ring_info *cpr =
5510 					&bp->bnapi[i]->cp_ring;
5511 
5512 				cpr->cp_ring_struct.ring_mem.flags =
5513 					BNXT_RMEM_RING_PTE_FLAG;
5514 			}
5515 		}
5516 
5517 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
5518 				      sizeof(struct bnxt_rx_ring_info),
5519 				      GFP_KERNEL);
5520 		if (!bp->rx_ring)
5521 			return -ENOMEM;
5522 
5523 		for (i = 0; i < bp->rx_nr_rings; i++) {
5524 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5525 
5526 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5527 				rxr->rx_ring_struct.ring_mem.flags =
5528 					BNXT_RMEM_RING_PTE_FLAG;
5529 				rxr->rx_agg_ring_struct.ring_mem.flags =
5530 					BNXT_RMEM_RING_PTE_FLAG;
5531 			} else {
5532 				rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5533 			}
5534 			rxr->bnapi = bp->bnapi[i];
5535 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5536 		}
5537 
5538 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
5539 				      sizeof(struct bnxt_tx_ring_info),
5540 				      GFP_KERNEL);
5541 		if (!bp->tx_ring)
5542 			return -ENOMEM;
5543 
5544 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5545 					  GFP_KERNEL);
5546 
5547 		if (!bp->tx_ring_map)
5548 			return -ENOMEM;
5549 
5550 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5551 			j = 0;
5552 		else
5553 			j = bp->rx_nr_rings;
5554 
5555 		for (i = 0; i < bp->tx_nr_rings; i++) {
5556 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5557 			struct bnxt_napi *bnapi2;
5558 
5559 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5560 				txr->tx_ring_struct.ring_mem.flags =
5561 					BNXT_RMEM_RING_PTE_FLAG;
5562 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5563 			if (i >= bp->tx_nr_rings_xdp) {
5564 				int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5565 
5566 				bnapi2 = bp->bnapi[k];
5567 				txr->txq_index = i - bp->tx_nr_rings_xdp;
5568 				txr->tx_napi_idx =
5569 					BNXT_RING_TO_TC(bp, txr->txq_index);
5570 				bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5571 				bnapi2->tx_int = bnxt_tx_int;
5572 			} else {
5573 				bnapi2 = bp->bnapi[j];
5574 				bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5575 				bnapi2->tx_ring[0] = txr;
5576 				bnapi2->tx_int = bnxt_tx_int_xdp;
5577 				j++;
5578 			}
5579 			txr->bnapi = bnapi2;
5580 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5581 				txr->tx_cpr = &bnapi2->cp_ring;
5582 		}
5583 
5584 		rc = bnxt_alloc_stats(bp);
5585 		if (rc)
5586 			goto alloc_mem_err;
5587 		bnxt_init_stats(bp);
5588 
5589 		rc = bnxt_alloc_ntp_fltrs(bp);
5590 		if (rc)
5591 			goto alloc_mem_err;
5592 
5593 		rc = bnxt_alloc_vnics(bp);
5594 		if (rc)
5595 			goto alloc_mem_err;
5596 	}
5597 
5598 	rc = bnxt_alloc_all_cp_arrays(bp);
5599 	if (rc)
5600 		goto alloc_mem_err;
5601 
5602 	bnxt_init_ring_struct(bp);
5603 
5604 	rc = bnxt_alloc_rx_rings(bp);
5605 	if (rc)
5606 		goto alloc_mem_err;
5607 
5608 	rc = bnxt_alloc_tx_rings(bp);
5609 	if (rc)
5610 		goto alloc_mem_err;
5611 
5612 	rc = bnxt_alloc_cp_rings(bp);
5613 	if (rc)
5614 		goto alloc_mem_err;
5615 
5616 	bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5617 						  BNXT_VNIC_MCAST_FLAG |
5618 						  BNXT_VNIC_UCAST_FLAG;
5619 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5620 		bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5621 			BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5622 
5623 	rc = bnxt_alloc_vnic_attributes(bp);
5624 	if (rc)
5625 		goto alloc_mem_err;
5626 	return 0;
5627 
5628 alloc_mem_err:
5629 	bnxt_free_mem(bp, true);
5630 	return rc;
5631 }
5632 
5633 static void bnxt_disable_int(struct bnxt *bp)
5634 {
5635 	int i;
5636 
5637 	if (!bp->bnapi)
5638 		return;
5639 
5640 	for (i = 0; i < bp->cp_nr_rings; i++) {
5641 		struct bnxt_napi *bnapi = bp->bnapi[i];
5642 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5643 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5644 
5645 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
5646 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5647 	}
5648 }
5649 
5650 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5651 {
5652 	struct bnxt_napi *bnapi = bp->bnapi[n];
5653 	struct bnxt_cp_ring_info *cpr;
5654 
5655 	cpr = &bnapi->cp_ring;
5656 	return cpr->cp_ring_struct.map_idx;
5657 }
5658 
5659 static void bnxt_disable_int_sync(struct bnxt *bp)
5660 {
5661 	int i;
5662 
5663 	if (!bp->irq_tbl)
5664 		return;
5665 
5666 	atomic_inc(&bp->intr_sem);
5667 
5668 	bnxt_disable_int(bp);
5669 	for (i = 0; i < bp->cp_nr_rings; i++) {
5670 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5671 
5672 		synchronize_irq(bp->irq_tbl[map_idx].vector);
5673 	}
5674 }
5675 
5676 static void bnxt_enable_int(struct bnxt *bp)
5677 {
5678 	int i;
5679 
5680 	atomic_set(&bp->intr_sem, 0);
5681 	for (i = 0; i < bp->cp_nr_rings; i++) {
5682 		struct bnxt_napi *bnapi = bp->bnapi[i];
5683 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5684 
5685 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5686 	}
5687 }
5688 
5689 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5690 			    bool async_only)
5691 {
5692 	DECLARE_BITMAP(async_events_bmap, 256);
5693 	u32 *events = (u32 *)async_events_bmap;
5694 	struct hwrm_func_drv_rgtr_output *resp;
5695 	struct hwrm_func_drv_rgtr_input *req;
5696 	u32 flags;
5697 	int rc, i;
5698 
5699 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5700 	if (rc)
5701 		return rc;
5702 
5703 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5704 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5705 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5706 
5707 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5708 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5709 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5710 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5711 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5712 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5713 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5714 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5715 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5716 	req->flags = cpu_to_le32(flags);
5717 	req->ver_maj_8b = DRV_VER_MAJ;
5718 	req->ver_min_8b = DRV_VER_MIN;
5719 	req->ver_upd_8b = DRV_VER_UPD;
5720 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5721 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
5722 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5723 
5724 	if (BNXT_PF(bp)) {
5725 		u32 data[8];
5726 		int i;
5727 
5728 		memset(data, 0, sizeof(data));
5729 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5730 			u16 cmd = bnxt_vf_req_snif[i];
5731 			unsigned int bit, idx;
5732 
5733 			if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5734 			    cmd == HWRM_PORT_PHY_QCFG)
5735 				continue;
5736 
5737 			idx = cmd / 32;
5738 			bit = cmd % 32;
5739 			data[idx] |= 1 << bit;
5740 		}
5741 
5742 		for (i = 0; i < 8; i++)
5743 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5744 
5745 		req->enables |=
5746 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5747 	}
5748 
5749 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5750 		req->flags |= cpu_to_le32(
5751 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5752 
5753 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
5754 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5755 		u16 event_id = bnxt_async_events_arr[i];
5756 
5757 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5758 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5759 			continue;
5760 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5761 		    !bp->ptp_cfg)
5762 			continue;
5763 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
5764 	}
5765 	if (bmap && bmap_size) {
5766 		for (i = 0; i < bmap_size; i++) {
5767 			if (test_bit(i, bmap))
5768 				__set_bit(i, async_events_bmap);
5769 		}
5770 	}
5771 	for (i = 0; i < 8; i++)
5772 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5773 
5774 	if (async_only)
5775 		req->enables =
5776 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5777 
5778 	resp = hwrm_req_hold(bp, req);
5779 	rc = hwrm_req_send(bp, req);
5780 	if (!rc) {
5781 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5782 		if (resp->flags &
5783 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5784 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5785 	}
5786 	hwrm_req_drop(bp, req);
5787 	return rc;
5788 }
5789 
5790 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5791 {
5792 	struct hwrm_func_drv_unrgtr_input *req;
5793 	int rc;
5794 
5795 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5796 		return 0;
5797 
5798 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5799 	if (rc)
5800 		return rc;
5801 	return hwrm_req_send(bp, req);
5802 }
5803 
5804 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5805 
5806 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5807 {
5808 	struct hwrm_tunnel_dst_port_free_input *req;
5809 	int rc;
5810 
5811 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5812 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5813 		return 0;
5814 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5815 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5816 		return 0;
5817 
5818 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5819 	if (rc)
5820 		return rc;
5821 
5822 	req->tunnel_type = tunnel_type;
5823 
5824 	switch (tunnel_type) {
5825 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5826 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5827 		bp->vxlan_port = 0;
5828 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5829 		break;
5830 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5831 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5832 		bp->nge_port = 0;
5833 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5834 		break;
5835 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5836 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5837 		bp->vxlan_gpe_port = 0;
5838 		bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5839 		break;
5840 	default:
5841 		break;
5842 	}
5843 
5844 	rc = hwrm_req_send(bp, req);
5845 	if (rc)
5846 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5847 			   rc);
5848 	if (bp->flags & BNXT_FLAG_TPA)
5849 		bnxt_set_tpa(bp, true);
5850 	return rc;
5851 }
5852 
5853 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5854 					   u8 tunnel_type)
5855 {
5856 	struct hwrm_tunnel_dst_port_alloc_output *resp;
5857 	struct hwrm_tunnel_dst_port_alloc_input *req;
5858 	int rc;
5859 
5860 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5861 	if (rc)
5862 		return rc;
5863 
5864 	req->tunnel_type = tunnel_type;
5865 	req->tunnel_dst_port_val = port;
5866 
5867 	resp = hwrm_req_hold(bp, req);
5868 	rc = hwrm_req_send(bp, req);
5869 	if (rc) {
5870 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5871 			   rc);
5872 		goto err_out;
5873 	}
5874 
5875 	switch (tunnel_type) {
5876 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5877 		bp->vxlan_port = port;
5878 		bp->vxlan_fw_dst_port_id =
5879 			le16_to_cpu(resp->tunnel_dst_port_id);
5880 		break;
5881 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5882 		bp->nge_port = port;
5883 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5884 		break;
5885 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5886 		bp->vxlan_gpe_port = port;
5887 		bp->vxlan_gpe_fw_dst_port_id =
5888 			le16_to_cpu(resp->tunnel_dst_port_id);
5889 		break;
5890 	default:
5891 		break;
5892 	}
5893 	if (bp->flags & BNXT_FLAG_TPA)
5894 		bnxt_set_tpa(bp, true);
5895 
5896 err_out:
5897 	hwrm_req_drop(bp, req);
5898 	return rc;
5899 }
5900 
5901 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5902 {
5903 	struct hwrm_cfa_l2_set_rx_mask_input *req;
5904 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5905 	int rc;
5906 
5907 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5908 	if (rc)
5909 		return rc;
5910 
5911 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5912 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5913 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5914 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5915 	}
5916 	req->mask = cpu_to_le32(vnic->rx_mask);
5917 	return hwrm_req_send_silent(bp, req);
5918 }
5919 
5920 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5921 {
5922 	if (!atomic_dec_and_test(&fltr->refcnt))
5923 		return;
5924 	spin_lock_bh(&bp->ntp_fltr_lock);
5925 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5926 		spin_unlock_bh(&bp->ntp_fltr_lock);
5927 		return;
5928 	}
5929 	hlist_del_rcu(&fltr->base.hash);
5930 	bnxt_del_one_usr_fltr(bp, &fltr->base);
5931 	if (fltr->base.flags) {
5932 		clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5933 		bp->ntp_fltr_count--;
5934 	}
5935 	spin_unlock_bh(&bp->ntp_fltr_lock);
5936 	kfree_rcu(fltr, base.rcu);
5937 }
5938 
5939 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5940 						      struct bnxt_l2_key *key,
5941 						      u32 idx)
5942 {
5943 	struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5944 	struct bnxt_l2_filter *fltr;
5945 
5946 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
5947 		struct bnxt_l2_key *l2_key = &fltr->l2_key;
5948 
5949 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5950 		    l2_key->vlan == key->vlan)
5951 			return fltr;
5952 	}
5953 	return NULL;
5954 }
5955 
5956 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5957 						    struct bnxt_l2_key *key,
5958 						    u32 idx)
5959 {
5960 	struct bnxt_l2_filter *fltr = NULL;
5961 
5962 	rcu_read_lock();
5963 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5964 	if (fltr)
5965 		atomic_inc(&fltr->refcnt);
5966 	rcu_read_unlock();
5967 	return fltr;
5968 }
5969 
5970 #define BNXT_IPV4_4TUPLE(bp, fkeys)					\
5971 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5972 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||	\
5973 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5974 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5975 
5976 #define BNXT_IPV6_4TUPLE(bp, fkeys)					\
5977 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5978 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||	\
5979 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5980 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5981 
5982 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5983 {
5984 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5985 		if (BNXT_IPV4_4TUPLE(bp, fkeys))
5986 			return sizeof(fkeys->addrs.v4addrs) +
5987 			       sizeof(fkeys->ports);
5988 
5989 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5990 			return sizeof(fkeys->addrs.v4addrs);
5991 	}
5992 
5993 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5994 		if (BNXT_IPV6_4TUPLE(bp, fkeys))
5995 			return sizeof(fkeys->addrs.v6addrs) +
5996 			       sizeof(fkeys->ports);
5997 
5998 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5999 			return sizeof(fkeys->addrs.v6addrs);
6000 	}
6001 
6002 	return 0;
6003 }
6004 
6005 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
6006 			 const unsigned char *key)
6007 {
6008 	u64 prefix = bp->toeplitz_prefix, hash = 0;
6009 	struct bnxt_ipv4_tuple tuple4;
6010 	struct bnxt_ipv6_tuple tuple6;
6011 	int i, j, len = 0;
6012 	u8 *four_tuple;
6013 
6014 	len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
6015 	if (!len)
6016 		return 0;
6017 
6018 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6019 		tuple4.v4addrs = fkeys->addrs.v4addrs;
6020 		tuple4.ports = fkeys->ports;
6021 		four_tuple = (unsigned char *)&tuple4;
6022 	} else {
6023 		tuple6.v6addrs = fkeys->addrs.v6addrs;
6024 		tuple6.ports = fkeys->ports;
6025 		four_tuple = (unsigned char *)&tuple6;
6026 	}
6027 
6028 	for (i = 0, j = 8; i < len; i++, j++) {
6029 		u8 byte = four_tuple[i];
6030 		int bit;
6031 
6032 		for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
6033 			if (byte & 0x80)
6034 				hash ^= prefix;
6035 		}
6036 		prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6037 	}
6038 
6039 	/* The valid part of the hash is in the upper 32 bits. */
6040 	return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6041 }
6042 
6043 #ifdef CONFIG_RFS_ACCEL
6044 static struct bnxt_l2_filter *
6045 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6046 {
6047 	struct bnxt_l2_filter *fltr;
6048 	u32 idx;
6049 
6050 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6051 	      BNXT_L2_FLTR_HASH_MASK;
6052 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6053 	return fltr;
6054 }
6055 #endif
6056 
6057 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6058 			       struct bnxt_l2_key *key, u32 idx)
6059 {
6060 	struct hlist_head *head;
6061 
6062 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6063 	fltr->l2_key.vlan = key->vlan;
6064 	fltr->base.type = BNXT_FLTR_TYPE_L2;
6065 	if (fltr->base.flags) {
6066 		int bit_id;
6067 
6068 		bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6069 						 bp->max_fltr, 0);
6070 		if (bit_id < 0)
6071 			return -ENOMEM;
6072 		fltr->base.sw_id = (u16)bit_id;
6073 		bp->ntp_fltr_count++;
6074 	}
6075 	head = &bp->l2_fltr_hash_tbl[idx];
6076 	hlist_add_head_rcu(&fltr->base.hash, head);
6077 	bnxt_insert_usr_fltr(bp, &fltr->base);
6078 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6079 	atomic_set(&fltr->refcnt, 1);
6080 	return 0;
6081 }
6082 
6083 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6084 						   struct bnxt_l2_key *key,
6085 						   gfp_t gfp)
6086 {
6087 	struct bnxt_l2_filter *fltr;
6088 	u32 idx;
6089 	int rc;
6090 
6091 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6092 	      BNXT_L2_FLTR_HASH_MASK;
6093 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
6094 	if (fltr)
6095 		return fltr;
6096 
6097 	fltr = kzalloc(sizeof(*fltr), gfp);
6098 	if (!fltr)
6099 		return ERR_PTR(-ENOMEM);
6100 	spin_lock_bh(&bp->ntp_fltr_lock);
6101 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6102 	spin_unlock_bh(&bp->ntp_fltr_lock);
6103 	if (rc) {
6104 		bnxt_del_l2_filter(bp, fltr);
6105 		fltr = ERR_PTR(rc);
6106 	}
6107 	return fltr;
6108 }
6109 
6110 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6111 						struct bnxt_l2_key *key,
6112 						u16 flags)
6113 {
6114 	struct bnxt_l2_filter *fltr;
6115 	u32 idx;
6116 	int rc;
6117 
6118 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6119 	      BNXT_L2_FLTR_HASH_MASK;
6120 	spin_lock_bh(&bp->ntp_fltr_lock);
6121 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6122 	if (fltr) {
6123 		fltr = ERR_PTR(-EEXIST);
6124 		goto l2_filter_exit;
6125 	}
6126 	fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
6127 	if (!fltr) {
6128 		fltr = ERR_PTR(-ENOMEM);
6129 		goto l2_filter_exit;
6130 	}
6131 	fltr->base.flags = flags;
6132 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6133 	if (rc) {
6134 		spin_unlock_bh(&bp->ntp_fltr_lock);
6135 		bnxt_del_l2_filter(bp, fltr);
6136 		return ERR_PTR(rc);
6137 	}
6138 
6139 l2_filter_exit:
6140 	spin_unlock_bh(&bp->ntp_fltr_lock);
6141 	return fltr;
6142 }
6143 
6144 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6145 {
6146 #ifdef CONFIG_BNXT_SRIOV
6147 	struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6148 
6149 	return vf->fw_fid;
6150 #else
6151 	return INVALID_HW_RING_ID;
6152 #endif
6153 }
6154 
6155 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6156 {
6157 	struct hwrm_cfa_l2_filter_free_input *req;
6158 	u16 target_id = 0xffff;
6159 	int rc;
6160 
6161 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6162 		struct bnxt_pf_info *pf = &bp->pf;
6163 
6164 		if (fltr->base.vf_idx >= pf->active_vfs)
6165 			return -EINVAL;
6166 
6167 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6168 		if (target_id == INVALID_HW_RING_ID)
6169 			return -EINVAL;
6170 	}
6171 
6172 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6173 	if (rc)
6174 		return rc;
6175 
6176 	req->target_id = cpu_to_le16(target_id);
6177 	req->l2_filter_id = fltr->base.filter_id;
6178 	return hwrm_req_send(bp, req);
6179 }
6180 
6181 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6182 {
6183 	struct hwrm_cfa_l2_filter_alloc_output *resp;
6184 	struct hwrm_cfa_l2_filter_alloc_input *req;
6185 	u16 target_id = 0xffff;
6186 	int rc;
6187 
6188 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6189 		struct bnxt_pf_info *pf = &bp->pf;
6190 
6191 		if (fltr->base.vf_idx >= pf->active_vfs)
6192 			return -EINVAL;
6193 
6194 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6195 	}
6196 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6197 	if (rc)
6198 		return rc;
6199 
6200 	req->target_id = cpu_to_le16(target_id);
6201 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6202 
6203 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6204 		req->flags |=
6205 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6206 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6207 	req->enables =
6208 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6209 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6210 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6211 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6212 	eth_broadcast_addr(req->l2_addr_mask);
6213 
6214 	if (fltr->l2_key.vlan) {
6215 		req->enables |=
6216 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6217 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6218 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6219 		req->num_vlans = 1;
6220 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6221 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
6222 	}
6223 
6224 	resp = hwrm_req_hold(bp, req);
6225 	rc = hwrm_req_send(bp, req);
6226 	if (!rc) {
6227 		fltr->base.filter_id = resp->l2_filter_id;
6228 		set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6229 	}
6230 	hwrm_req_drop(bp, req);
6231 	return rc;
6232 }
6233 
6234 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6235 				     struct bnxt_ntuple_filter *fltr)
6236 {
6237 	struct hwrm_cfa_ntuple_filter_free_input *req;
6238 	int rc;
6239 
6240 	set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6241 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6242 	if (rc)
6243 		return rc;
6244 
6245 	req->ntuple_filter_id = fltr->base.filter_id;
6246 	return hwrm_req_send(bp, req);
6247 }
6248 
6249 #define BNXT_NTP_FLTR_FLAGS					\
6250 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
6251 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
6252 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
6253 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
6254 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
6255 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
6256 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
6257 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
6258 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
6259 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
6260 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
6261 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
6262 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6263 
6264 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
6265 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6266 
6267 void bnxt_fill_ipv6_mask(__be32 mask[4])
6268 {
6269 	int i;
6270 
6271 	for (i = 0; i < 4; i++)
6272 		mask[i] = cpu_to_be32(~0);
6273 }
6274 
6275 static void
6276 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6277 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
6278 			  struct bnxt_ntuple_filter *fltr)
6279 {
6280 	u16 rxq = fltr->base.rxq;
6281 
6282 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6283 		struct ethtool_rxfh_context *ctx;
6284 		struct bnxt_rss_ctx *rss_ctx;
6285 		struct bnxt_vnic_info *vnic;
6286 
6287 		ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6288 			      fltr->base.fw_vnic_id);
6289 		if (ctx) {
6290 			rss_ctx = ethtool_rxfh_context_priv(ctx);
6291 			vnic = &rss_ctx->vnic;
6292 
6293 			req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6294 		}
6295 		return;
6296 	}
6297 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6298 		struct bnxt_vnic_info *vnic;
6299 		u32 enables;
6300 
6301 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6302 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6303 		enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6304 		req->enables |= cpu_to_le32(enables);
6305 		req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6306 	} else {
6307 		u32 flags;
6308 
6309 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6310 		req->flags |= cpu_to_le32(flags);
6311 		req->dst_id = cpu_to_le16(rxq);
6312 	}
6313 }
6314 
6315 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6316 				      struct bnxt_ntuple_filter *fltr)
6317 {
6318 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6319 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
6320 	struct bnxt_flow_masks *masks = &fltr->fmasks;
6321 	struct flow_keys *keys = &fltr->fkeys;
6322 	struct bnxt_l2_filter *l2_fltr;
6323 	struct bnxt_vnic_info *vnic;
6324 	int rc;
6325 
6326 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6327 	if (rc)
6328 		return rc;
6329 
6330 	l2_fltr = fltr->l2_fltr;
6331 	req->l2_filter_id = l2_fltr->base.filter_id;
6332 
6333 	if (fltr->base.flags & BNXT_ACT_DROP) {
6334 		req->flags =
6335 			cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6336 	} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6337 		bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6338 	} else {
6339 		vnic = &bp->vnic_info[fltr->base.rxq + 1];
6340 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6341 	}
6342 	req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6343 
6344 	req->ethertype = htons(ETH_P_IP);
6345 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6346 	req->ip_protocol = keys->basic.ip_proto;
6347 
6348 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6349 		req->ethertype = htons(ETH_P_IPV6);
6350 		req->ip_addr_type =
6351 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6352 		*(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6353 		*(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6354 		*(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6355 		*(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6356 	} else {
6357 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6358 		req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6359 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6360 		req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6361 	}
6362 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6363 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6364 		req->tunnel_type =
6365 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6366 	}
6367 
6368 	req->src_port = keys->ports.src;
6369 	req->src_port_mask = masks->ports.src;
6370 	req->dst_port = keys->ports.dst;
6371 	req->dst_port_mask = masks->ports.dst;
6372 
6373 	resp = hwrm_req_hold(bp, req);
6374 	rc = hwrm_req_send(bp, req);
6375 	if (!rc)
6376 		fltr->base.filter_id = resp->ntuple_filter_id;
6377 	hwrm_req_drop(bp, req);
6378 	return rc;
6379 }
6380 
6381 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6382 				     const u8 *mac_addr)
6383 {
6384 	struct bnxt_l2_filter *fltr;
6385 	struct bnxt_l2_key key;
6386 	int rc;
6387 
6388 	ether_addr_copy(key.dst_mac_addr, mac_addr);
6389 	key.vlan = 0;
6390 	fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6391 	if (IS_ERR(fltr))
6392 		return PTR_ERR(fltr);
6393 
6394 	fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6395 	rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6396 	if (rc)
6397 		bnxt_del_l2_filter(bp, fltr);
6398 	else
6399 		bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6400 	return rc;
6401 }
6402 
6403 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6404 {
6405 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6406 
6407 	/* Any associated ntuple filters will also be cleared by firmware. */
6408 	for (i = 0; i < num_of_vnics; i++) {
6409 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6410 
6411 		for (j = 0; j < vnic->uc_filter_count; j++) {
6412 			struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6413 
6414 			bnxt_hwrm_l2_filter_free(bp, fltr);
6415 			bnxt_del_l2_filter(bp, fltr);
6416 		}
6417 		vnic->uc_filter_count = 0;
6418 	}
6419 }
6420 
6421 #define BNXT_DFLT_TUNL_TPA_BMAP				\
6422 	(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |	\
6423 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |	\
6424 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6425 
6426 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6427 					   struct hwrm_vnic_tpa_cfg_input *req)
6428 {
6429 	u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6430 
6431 	if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6432 		return;
6433 
6434 	if (bp->vxlan_port)
6435 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6436 	if (bp->vxlan_gpe_port)
6437 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6438 	if (bp->nge_port)
6439 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6440 
6441 	req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6442 	req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6443 }
6444 
6445 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6446 			   u32 tpa_flags)
6447 {
6448 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6449 	struct hwrm_vnic_tpa_cfg_input *req;
6450 	int rc;
6451 
6452 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6453 		return 0;
6454 
6455 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6456 	if (rc)
6457 		return rc;
6458 
6459 	if (tpa_flags) {
6460 		u16 mss = bp->dev->mtu - 40;
6461 		u32 nsegs, n, segs = 0, flags;
6462 
6463 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6464 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6465 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6466 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6467 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6468 		if (tpa_flags & BNXT_FLAG_GRO)
6469 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6470 
6471 		req->flags = cpu_to_le32(flags);
6472 
6473 		req->enables =
6474 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6475 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6476 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6477 
6478 		/* Number of segs are log2 units, and first packet is not
6479 		 * included as part of this units.
6480 		 */
6481 		if (mss <= BNXT_RX_PAGE_SIZE) {
6482 			n = BNXT_RX_PAGE_SIZE / mss;
6483 			nsegs = (MAX_SKB_FRAGS - 1) * n;
6484 		} else {
6485 			n = mss / BNXT_RX_PAGE_SIZE;
6486 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
6487 				n++;
6488 			nsegs = (MAX_SKB_FRAGS - n) / n;
6489 		}
6490 
6491 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6492 			segs = MAX_TPA_SEGS_P5;
6493 			max_aggs = bp->max_tpa;
6494 		} else {
6495 			segs = ilog2(nsegs);
6496 		}
6497 		req->max_agg_segs = cpu_to_le16(segs);
6498 		req->max_aggs = cpu_to_le16(max_aggs);
6499 
6500 		req->min_agg_len = cpu_to_le32(512);
6501 		bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6502 	}
6503 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6504 
6505 	return hwrm_req_send(bp, req);
6506 }
6507 
6508 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6509 {
6510 	struct bnxt_ring_grp_info *grp_info;
6511 
6512 	grp_info = &bp->grp_info[ring->grp_idx];
6513 	return grp_info->cp_fw_ring_id;
6514 }
6515 
6516 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6517 {
6518 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6519 		return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6520 	else
6521 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6522 }
6523 
6524 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6525 {
6526 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6527 		return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6528 	else
6529 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6530 }
6531 
6532 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6533 {
6534 	int entries;
6535 
6536 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6537 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6538 	else
6539 		entries = HW_HASH_INDEX_SIZE;
6540 
6541 	bp->rss_indir_tbl_entries = entries;
6542 	bp->rss_indir_tbl =
6543 		kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6544 	if (!bp->rss_indir_tbl)
6545 		return -ENOMEM;
6546 
6547 	return 0;
6548 }
6549 
6550 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6551 				 struct ethtool_rxfh_context *rss_ctx)
6552 {
6553 	u16 max_rings, max_entries, pad, i;
6554 	u32 *rss_indir_tbl;
6555 
6556 	if (!bp->rx_nr_rings)
6557 		return;
6558 
6559 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6560 		max_rings = bp->rx_nr_rings - 1;
6561 	else
6562 		max_rings = bp->rx_nr_rings;
6563 
6564 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6565 	if (rss_ctx)
6566 		rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6567 	else
6568 		rss_indir_tbl = &bp->rss_indir_tbl[0];
6569 
6570 	for (i = 0; i < max_entries; i++)
6571 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6572 
6573 	pad = bp->rss_indir_tbl_entries - max_entries;
6574 	if (pad)
6575 		memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6576 }
6577 
6578 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6579 {
6580 	u32 i, tbl_size, max_ring = 0;
6581 
6582 	if (!bp->rss_indir_tbl)
6583 		return 0;
6584 
6585 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6586 	for (i = 0; i < tbl_size; i++)
6587 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6588 	return max_ring;
6589 }
6590 
6591 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6592 {
6593 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6594 		if (!rx_rings)
6595 			return 0;
6596 		if (bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX)
6597 			return BNXT_RSS_TABLE_MAX_TBL_P5;
6598 
6599 		return bnxt_calc_nr_ring_pages(rx_rings - 1,
6600 					       BNXT_RSS_TABLE_ENTRIES_P5);
6601 	}
6602 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6603 		return 2;
6604 	return 1;
6605 }
6606 
6607 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6608 {
6609 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6610 	u16 i, j;
6611 
6612 	/* Fill the RSS indirection table with ring group ids */
6613 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6614 		if (!no_rss)
6615 			j = bp->rss_indir_tbl[i];
6616 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6617 	}
6618 }
6619 
6620 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6621 				    struct bnxt_vnic_info *vnic)
6622 {
6623 	__le16 *ring_tbl = vnic->rss_table;
6624 	struct bnxt_rx_ring_info *rxr;
6625 	u16 tbl_size, i;
6626 
6627 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6628 
6629 	for (i = 0; i < tbl_size; i++) {
6630 		u16 ring_id, j;
6631 
6632 		if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6633 			j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6634 		else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6635 			j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6636 		else
6637 			j = bp->rss_indir_tbl[i];
6638 		rxr = &bp->rx_ring[j];
6639 
6640 		ring_id = rxr->rx_ring_struct.fw_ring_id;
6641 		*ring_tbl++ = cpu_to_le16(ring_id);
6642 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6643 		*ring_tbl++ = cpu_to_le16(ring_id);
6644 	}
6645 }
6646 
6647 static void
6648 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6649 			 struct bnxt_vnic_info *vnic)
6650 {
6651 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6652 		bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6653 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6654 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6655 	} else {
6656 		bnxt_fill_hw_rss_tbl(bp, vnic);
6657 	}
6658 
6659 	if (bp->rss_hash_delta) {
6660 		req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6661 		if (bp->rss_hash_cfg & bp->rss_hash_delta)
6662 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6663 		else
6664 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6665 	} else {
6666 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6667 	}
6668 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6669 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6670 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6671 }
6672 
6673 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6674 				  bool set_rss)
6675 {
6676 	struct hwrm_vnic_rss_cfg_input *req;
6677 	int rc;
6678 
6679 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6680 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6681 		return 0;
6682 
6683 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6684 	if (rc)
6685 		return rc;
6686 
6687 	if (set_rss)
6688 		__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6689 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6690 	return hwrm_req_send(bp, req);
6691 }
6692 
6693 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6694 				     struct bnxt_vnic_info *vnic, bool set_rss)
6695 {
6696 	struct hwrm_vnic_rss_cfg_input *req;
6697 	dma_addr_t ring_tbl_map;
6698 	u32 i, nr_ctxs;
6699 	int rc;
6700 
6701 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6702 	if (rc)
6703 		return rc;
6704 
6705 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6706 	if (!set_rss)
6707 		return hwrm_req_send(bp, req);
6708 
6709 	__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6710 	ring_tbl_map = vnic->rss_table_dma_addr;
6711 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6712 
6713 	hwrm_req_hold(bp, req);
6714 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6715 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6716 		req->ring_table_pair_index = i;
6717 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6718 		rc = hwrm_req_send(bp, req);
6719 		if (rc)
6720 			goto exit;
6721 	}
6722 
6723 exit:
6724 	hwrm_req_drop(bp, req);
6725 	return rc;
6726 }
6727 
6728 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6729 {
6730 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6731 	struct hwrm_vnic_rss_qcfg_output *resp;
6732 	struct hwrm_vnic_rss_qcfg_input *req;
6733 
6734 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6735 		return;
6736 
6737 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6738 	/* all contexts configured to same hash_type, zero always exists */
6739 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6740 	resp = hwrm_req_hold(bp, req);
6741 	if (!hwrm_req_send(bp, req)) {
6742 		bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6743 		bp->rss_hash_delta = 0;
6744 	}
6745 	hwrm_req_drop(bp, req);
6746 }
6747 
6748 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6749 {
6750 	u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6751 	struct hwrm_vnic_plcmodes_cfg_input *req;
6752 	int rc;
6753 
6754 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6755 	if (rc)
6756 		return rc;
6757 
6758 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6759 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6760 	req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6761 
6762 	if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6763 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6764 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6765 		req->enables |=
6766 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6767 		req->hds_threshold = cpu_to_le16(hds_thresh);
6768 	}
6769 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6770 	return hwrm_req_send(bp, req);
6771 }
6772 
6773 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6774 					struct bnxt_vnic_info *vnic,
6775 					u16 ctx_idx)
6776 {
6777 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6778 
6779 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6780 		return;
6781 
6782 	req->rss_cos_lb_ctx_id =
6783 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6784 
6785 	hwrm_req_send(bp, req);
6786 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6787 }
6788 
6789 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6790 {
6791 	int i, j;
6792 
6793 	for (i = 0; i < bp->nr_vnics; i++) {
6794 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6795 
6796 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6797 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6798 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6799 		}
6800 	}
6801 	bp->rsscos_nr_ctxs = 0;
6802 }
6803 
6804 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6805 				    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6806 {
6807 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6808 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6809 	int rc;
6810 
6811 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6812 	if (rc)
6813 		return rc;
6814 
6815 	resp = hwrm_req_hold(bp, req);
6816 	rc = hwrm_req_send(bp, req);
6817 	if (!rc)
6818 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6819 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
6820 	hwrm_req_drop(bp, req);
6821 
6822 	return rc;
6823 }
6824 
6825 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6826 {
6827 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6828 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6829 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6830 }
6831 
6832 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6833 {
6834 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6835 	struct hwrm_vnic_cfg_input *req;
6836 	unsigned int ring = 0, grp_idx;
6837 	u16 def_vlan = 0;
6838 	int rc;
6839 
6840 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6841 	if (rc)
6842 		return rc;
6843 
6844 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6845 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6846 
6847 		req->default_rx_ring_id =
6848 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6849 		req->default_cmpl_ring_id =
6850 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6851 		req->enables =
6852 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6853 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6854 		goto vnic_mru;
6855 	}
6856 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6857 	/* Only RSS support for now TBD: COS & LB */
6858 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6859 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6860 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6861 					   VNIC_CFG_REQ_ENABLES_MRU);
6862 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6863 		req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6864 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6865 					   VNIC_CFG_REQ_ENABLES_MRU);
6866 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6867 	} else {
6868 		req->rss_rule = cpu_to_le16(0xffff);
6869 	}
6870 
6871 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6872 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6873 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6874 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6875 	} else {
6876 		req->cos_rule = cpu_to_le16(0xffff);
6877 	}
6878 
6879 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6880 		ring = 0;
6881 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6882 		ring = vnic->vnic_id - 1;
6883 	else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6884 		ring = bp->rx_nr_rings - 1;
6885 
6886 	grp_idx = bp->rx_ring[ring].bnapi->index;
6887 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6888 	req->lb_rule = cpu_to_le16(0xffff);
6889 vnic_mru:
6890 	vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6891 	req->mru = cpu_to_le16(vnic->mru);
6892 
6893 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6894 #ifdef CONFIG_BNXT_SRIOV
6895 	if (BNXT_VF(bp))
6896 		def_vlan = bp->vf.vlan;
6897 #endif
6898 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6899 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6900 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6901 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6902 
6903 	return hwrm_req_send(bp, req);
6904 }
6905 
6906 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6907 				    struct bnxt_vnic_info *vnic)
6908 {
6909 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6910 		struct hwrm_vnic_free_input *req;
6911 
6912 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6913 			return;
6914 
6915 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6916 
6917 		hwrm_req_send(bp, req);
6918 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
6919 	}
6920 }
6921 
6922 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6923 {
6924 	u16 i;
6925 
6926 	for (i = 0; i < bp->nr_vnics; i++)
6927 		bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6928 }
6929 
6930 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6931 			 unsigned int start_rx_ring_idx,
6932 			 unsigned int nr_rings)
6933 {
6934 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6935 	struct hwrm_vnic_alloc_output *resp;
6936 	struct hwrm_vnic_alloc_input *req;
6937 	int rc;
6938 
6939 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6940 	if (rc)
6941 		return rc;
6942 
6943 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6944 		goto vnic_no_ring_grps;
6945 
6946 	/* map ring groups to this vnic */
6947 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6948 		grp_idx = bp->rx_ring[i].bnapi->index;
6949 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6950 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6951 				   j, nr_rings);
6952 			break;
6953 		}
6954 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6955 	}
6956 
6957 vnic_no_ring_grps:
6958 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6959 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6960 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6961 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6962 
6963 	resp = hwrm_req_hold(bp, req);
6964 	rc = hwrm_req_send(bp, req);
6965 	if (!rc)
6966 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6967 	hwrm_req_drop(bp, req);
6968 	return rc;
6969 }
6970 
6971 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6972 {
6973 	struct hwrm_vnic_qcaps_output *resp;
6974 	struct hwrm_vnic_qcaps_input *req;
6975 	int rc;
6976 
6977 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6978 	bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6979 	bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6980 	if (bp->hwrm_spec_code < 0x10600)
6981 		return 0;
6982 
6983 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6984 	if (rc)
6985 		return rc;
6986 
6987 	resp = hwrm_req_hold(bp, req);
6988 	rc = hwrm_req_send(bp, req);
6989 	if (!rc) {
6990 		u32 flags = le32_to_cpu(resp->flags);
6991 
6992 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6993 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6994 			bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6995 		if (flags &
6996 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6997 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6998 
6999 		/* Older P5 fw before EXT_HW_STATS support did not set
7000 		 * VLAN_STRIP_CAP properly.
7001 		 */
7002 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
7003 		    (BNXT_CHIP_P5(bp) &&
7004 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
7005 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
7006 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
7007 			bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
7008 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
7009 			bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
7010 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
7011 		if (bp->max_tpa_v2) {
7012 			if (BNXT_CHIP_P5(bp))
7013 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
7014 			else
7015 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
7016 		}
7017 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
7018 			bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
7019 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
7020 			bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
7021 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
7022 			bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
7023 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
7024 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
7025 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
7026 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
7027 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
7028 			bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
7029 		if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
7030 			bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
7031 	}
7032 	hwrm_req_drop(bp, req);
7033 	return rc;
7034 }
7035 
7036 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
7037 {
7038 	struct hwrm_ring_grp_alloc_output *resp;
7039 	struct hwrm_ring_grp_alloc_input *req;
7040 	int rc;
7041 	u16 i;
7042 
7043 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7044 		return 0;
7045 
7046 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7047 	if (rc)
7048 		return rc;
7049 
7050 	resp = hwrm_req_hold(bp, req);
7051 	for (i = 0; i < bp->rx_nr_rings; i++) {
7052 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7053 
7054 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7055 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7056 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7057 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7058 
7059 		rc = hwrm_req_send(bp, req);
7060 
7061 		if (rc)
7062 			break;
7063 
7064 		bp->grp_info[grp_idx].fw_grp_id =
7065 			le32_to_cpu(resp->ring_group_id);
7066 	}
7067 	hwrm_req_drop(bp, req);
7068 	return rc;
7069 }
7070 
7071 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7072 {
7073 	struct hwrm_ring_grp_free_input *req;
7074 	u16 i;
7075 
7076 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7077 		return;
7078 
7079 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7080 		return;
7081 
7082 	hwrm_req_hold(bp, req);
7083 	for (i = 0; i < bp->cp_nr_rings; i++) {
7084 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7085 			continue;
7086 		req->ring_group_id =
7087 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
7088 
7089 		hwrm_req_send(bp, req);
7090 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7091 	}
7092 	hwrm_req_drop(bp, req);
7093 }
7094 
7095 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7096 				       struct hwrm_ring_alloc_input *req,
7097 				       struct bnxt_rx_ring_info *rxr,
7098 				       struct bnxt_ring_struct *ring)
7099 {
7100 	struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7101 	u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7102 		      RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7103 
7104 	if (ring_type == HWRM_RING_ALLOC_AGG) {
7105 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7106 		req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7107 		req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
7108 		enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7109 	} else {
7110 		req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7111 		if (NET_IP_ALIGN == 2)
7112 			req->flags =
7113 				cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7114 	}
7115 	req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7116 	req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7117 	req->enables |= cpu_to_le32(enables);
7118 }
7119 
7120 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7121 				    struct bnxt_rx_ring_info *rxr,
7122 				    struct bnxt_ring_struct *ring,
7123 				    u32 ring_type, u32 map_index)
7124 {
7125 	struct hwrm_ring_alloc_output *resp;
7126 	struct hwrm_ring_alloc_input *req;
7127 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7128 	struct bnxt_ring_grp_info *grp_info;
7129 	int rc, err = 0;
7130 	u16 ring_id;
7131 
7132 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7133 	if (rc)
7134 		goto exit;
7135 
7136 	req->enables = 0;
7137 	if (rmem->nr_pages > 1) {
7138 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7139 		/* Page size is in log2 units */
7140 		req->page_size = BNXT_PAGE_SHIFT;
7141 		req->page_tbl_depth = 1;
7142 	} else {
7143 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
7144 	}
7145 	req->fbo = 0;
7146 	/* Association of ring index with doorbell index and MSIX number */
7147 	req->logical_id = cpu_to_le16(map_index);
7148 
7149 	switch (ring_type) {
7150 	case HWRM_RING_ALLOC_TX: {
7151 		struct bnxt_tx_ring_info *txr;
7152 		u16 flags = 0;
7153 
7154 		txr = container_of(ring, struct bnxt_tx_ring_info,
7155 				   tx_ring_struct);
7156 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7157 		/* Association of transmit ring with completion ring */
7158 		grp_info = &bp->grp_info[ring->grp_idx];
7159 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7160 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7161 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7162 		req->queue_id = cpu_to_le16(ring->queue_id);
7163 		if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7164 			req->cmpl_coal_cnt =
7165 				RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7166 		if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7167 			flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7168 		req->flags = cpu_to_le16(flags);
7169 		break;
7170 	}
7171 	case HWRM_RING_ALLOC_RX:
7172 	case HWRM_RING_ALLOC_AGG:
7173 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7174 		req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7175 			      cpu_to_le32(bp->rx_ring_mask + 1) :
7176 			      cpu_to_le32(bp->rx_agg_ring_mask + 1);
7177 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7178 			bnxt_set_rx_ring_params_p5(bp, ring_type, req,
7179 						   rxr, ring);
7180 		break;
7181 	case HWRM_RING_ALLOC_CMPL:
7182 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7183 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7184 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7185 			/* Association of cp ring with nq */
7186 			grp_info = &bp->grp_info[map_index];
7187 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7188 			req->cq_handle = cpu_to_le64(ring->handle);
7189 			req->enables |= cpu_to_le32(
7190 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7191 		} else {
7192 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7193 		}
7194 		break;
7195 	case HWRM_RING_ALLOC_NQ:
7196 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7197 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7198 		req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7199 		break;
7200 	default:
7201 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7202 			   ring_type);
7203 		return -EINVAL;
7204 	}
7205 
7206 	resp = hwrm_req_hold(bp, req);
7207 	rc = hwrm_req_send(bp, req);
7208 	err = le16_to_cpu(resp->error_code);
7209 	ring_id = le16_to_cpu(resp->ring_id);
7210 	hwrm_req_drop(bp, req);
7211 
7212 exit:
7213 	if (rc || err) {
7214 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7215 			   ring_type, rc, err);
7216 		return -EIO;
7217 	}
7218 	ring->fw_ring_id = ring_id;
7219 	return rc;
7220 }
7221 
7222 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7223 {
7224 	int rc;
7225 
7226 	if (BNXT_PF(bp)) {
7227 		struct hwrm_func_cfg_input *req;
7228 
7229 		rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7230 		if (rc)
7231 			return rc;
7232 
7233 		req->fid = cpu_to_le16(0xffff);
7234 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7235 		req->async_event_cr = cpu_to_le16(idx);
7236 		return hwrm_req_send(bp, req);
7237 	} else {
7238 		struct hwrm_func_vf_cfg_input *req;
7239 
7240 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7241 		if (rc)
7242 			return rc;
7243 
7244 		req->enables =
7245 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7246 		req->async_event_cr = cpu_to_le16(idx);
7247 		return hwrm_req_send(bp, req);
7248 	}
7249 }
7250 
7251 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7252 			     u32 ring_type)
7253 {
7254 	switch (ring_type) {
7255 	case HWRM_RING_ALLOC_TX:
7256 		db->db_ring_mask = bp->tx_ring_mask;
7257 		break;
7258 	case HWRM_RING_ALLOC_RX:
7259 		db->db_ring_mask = bp->rx_ring_mask;
7260 		break;
7261 	case HWRM_RING_ALLOC_AGG:
7262 		db->db_ring_mask = bp->rx_agg_ring_mask;
7263 		break;
7264 	case HWRM_RING_ALLOC_CMPL:
7265 	case HWRM_RING_ALLOC_NQ:
7266 		db->db_ring_mask = bp->cp_ring_mask;
7267 		break;
7268 	}
7269 	if (bp->flags & BNXT_FLAG_CHIP_P7) {
7270 		db->db_epoch_mask = db->db_ring_mask + 1;
7271 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7272 	}
7273 }
7274 
7275 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7276 			u32 map_idx, u32 xid)
7277 {
7278 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7279 		switch (ring_type) {
7280 		case HWRM_RING_ALLOC_TX:
7281 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7282 			break;
7283 		case HWRM_RING_ALLOC_RX:
7284 		case HWRM_RING_ALLOC_AGG:
7285 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7286 			break;
7287 		case HWRM_RING_ALLOC_CMPL:
7288 			db->db_key64 = DBR_PATH_L2;
7289 			break;
7290 		case HWRM_RING_ALLOC_NQ:
7291 			db->db_key64 = DBR_PATH_L2;
7292 			break;
7293 		}
7294 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
7295 
7296 		if (bp->flags & BNXT_FLAG_CHIP_P7)
7297 			db->db_key64 |= DBR_VALID;
7298 
7299 		db->doorbell = bp->bar1 + bp->db_offset;
7300 	} else {
7301 		db->doorbell = bp->bar1 + map_idx * 0x80;
7302 		switch (ring_type) {
7303 		case HWRM_RING_ALLOC_TX:
7304 			db->db_key32 = DB_KEY_TX;
7305 			break;
7306 		case HWRM_RING_ALLOC_RX:
7307 		case HWRM_RING_ALLOC_AGG:
7308 			db->db_key32 = DB_KEY_RX;
7309 			break;
7310 		case HWRM_RING_ALLOC_CMPL:
7311 			db->db_key32 = DB_KEY_CP;
7312 			break;
7313 		}
7314 	}
7315 	bnxt_set_db_mask(bp, db, ring_type);
7316 }
7317 
7318 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7319 				   struct bnxt_rx_ring_info *rxr)
7320 {
7321 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7322 	struct bnxt_napi *bnapi = rxr->bnapi;
7323 	u32 type = HWRM_RING_ALLOC_RX;
7324 	u32 map_idx = bnapi->index;
7325 	int rc;
7326 
7327 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7328 	if (rc)
7329 		return rc;
7330 
7331 	bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7332 	bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7333 
7334 	return 0;
7335 }
7336 
7337 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7338 				       struct bnxt_rx_ring_info *rxr)
7339 {
7340 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7341 	u32 type = HWRM_RING_ALLOC_AGG;
7342 	u32 grp_idx = ring->grp_idx;
7343 	u32 map_idx;
7344 	int rc;
7345 
7346 	map_idx = grp_idx + bp->rx_nr_rings;
7347 	rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
7348 	if (rc)
7349 		return rc;
7350 
7351 	bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7352 		    ring->fw_ring_id);
7353 	bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7354 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7355 	bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7356 
7357 	return 0;
7358 }
7359 
7360 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7361 				      struct bnxt_cp_ring_info *cpr)
7362 {
7363 	const u32 type = HWRM_RING_ALLOC_CMPL;
7364 	struct bnxt_napi *bnapi = cpr->bnapi;
7365 	struct bnxt_ring_struct *ring;
7366 	u32 map_idx = bnapi->index;
7367 	int rc;
7368 
7369 	ring = &cpr->cp_ring_struct;
7370 	ring->handle = BNXT_SET_NQ_HDL(cpr);
7371 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7372 	if (rc)
7373 		return rc;
7374 	bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7375 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7376 	return 0;
7377 }
7378 
7379 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7380 				   struct bnxt_tx_ring_info *txr, u32 tx_idx)
7381 {
7382 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7383 	const u32 type = HWRM_RING_ALLOC_TX;
7384 	int rc;
7385 
7386 	rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
7387 	if (rc)
7388 		return rc;
7389 	bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7390 	return 0;
7391 }
7392 
7393 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7394 {
7395 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7396 	int i, rc = 0;
7397 	u32 type;
7398 
7399 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7400 		type = HWRM_RING_ALLOC_NQ;
7401 	else
7402 		type = HWRM_RING_ALLOC_CMPL;
7403 	for (i = 0; i < bp->cp_nr_rings; i++) {
7404 		struct bnxt_napi *bnapi = bp->bnapi[i];
7405 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7406 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7407 		u32 map_idx = ring->map_idx;
7408 		unsigned int vector;
7409 
7410 		vector = bp->irq_tbl[map_idx].vector;
7411 		disable_irq_nosync(vector);
7412 		rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
7413 		if (rc) {
7414 			enable_irq(vector);
7415 			goto err_out;
7416 		}
7417 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7418 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7419 		enable_irq(vector);
7420 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7421 
7422 		if (!i) {
7423 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7424 			if (rc)
7425 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7426 		}
7427 	}
7428 
7429 	for (i = 0; i < bp->tx_nr_rings; i++) {
7430 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7431 
7432 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7433 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7434 			if (rc)
7435 				goto err_out;
7436 		}
7437 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7438 		if (rc)
7439 			goto err_out;
7440 	}
7441 
7442 	for (i = 0; i < bp->rx_nr_rings; i++) {
7443 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7444 
7445 		rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7446 		if (rc)
7447 			goto err_out;
7448 		/* If we have agg rings, post agg buffers first. */
7449 		if (!agg_rings)
7450 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7451 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7452 			rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7453 			if (rc)
7454 				goto err_out;
7455 		}
7456 	}
7457 
7458 	if (agg_rings) {
7459 		for (i = 0; i < bp->rx_nr_rings; i++) {
7460 			rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7461 			if (rc)
7462 				goto err_out;
7463 		}
7464 	}
7465 err_out:
7466 	return rc;
7467 }
7468 
7469 static void bnxt_cancel_dim(struct bnxt *bp)
7470 {
7471 	int i;
7472 
7473 	/* DIM work is initialized in bnxt_enable_napi().  Proceed only
7474 	 * if NAPI is enabled.
7475 	 */
7476 	if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7477 		return;
7478 
7479 	/* Make sure NAPI sees that the VNIC is disabled */
7480 	synchronize_net();
7481 	for (i = 0; i < bp->rx_nr_rings; i++) {
7482 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7483 		struct bnxt_napi *bnapi = rxr->bnapi;
7484 
7485 		cancel_work_sync(&bnapi->cp_ring.dim.work);
7486 	}
7487 }
7488 
7489 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7490 				   struct bnxt_ring_struct *ring,
7491 				   u32 ring_type, int cmpl_ring_id)
7492 {
7493 	struct hwrm_ring_free_output *resp;
7494 	struct hwrm_ring_free_input *req;
7495 	u16 error_code = 0;
7496 	int rc;
7497 
7498 	if (BNXT_NO_FW_ACCESS(bp))
7499 		return 0;
7500 
7501 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7502 	if (rc)
7503 		goto exit;
7504 
7505 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7506 	req->ring_type = ring_type;
7507 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
7508 
7509 	resp = hwrm_req_hold(bp, req);
7510 	rc = hwrm_req_send(bp, req);
7511 	error_code = le16_to_cpu(resp->error_code);
7512 	hwrm_req_drop(bp, req);
7513 exit:
7514 	if (rc || error_code) {
7515 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7516 			   ring_type, rc, error_code);
7517 		return -EIO;
7518 	}
7519 	return 0;
7520 }
7521 
7522 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7523 				   struct bnxt_tx_ring_info *txr,
7524 				   bool close_path)
7525 {
7526 	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7527 	u32 cmpl_ring_id;
7528 
7529 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7530 		return;
7531 
7532 	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7533 		       INVALID_HW_RING_ID;
7534 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7535 				cmpl_ring_id);
7536 	ring->fw_ring_id = INVALID_HW_RING_ID;
7537 }
7538 
7539 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7540 				   struct bnxt_rx_ring_info *rxr,
7541 				   bool close_path)
7542 {
7543 	struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7544 	u32 grp_idx = rxr->bnapi->index;
7545 	u32 cmpl_ring_id;
7546 
7547 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7548 		return;
7549 
7550 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7551 	hwrm_ring_free_send_msg(bp, ring,
7552 				RING_FREE_REQ_RING_TYPE_RX,
7553 				close_path ? cmpl_ring_id :
7554 				INVALID_HW_RING_ID);
7555 	ring->fw_ring_id = INVALID_HW_RING_ID;
7556 	bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7557 }
7558 
7559 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7560 				       struct bnxt_rx_ring_info *rxr,
7561 				       bool close_path)
7562 {
7563 	struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7564 	u32 grp_idx = rxr->bnapi->index;
7565 	u32 type, cmpl_ring_id;
7566 
7567 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7568 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7569 	else
7570 		type = RING_FREE_REQ_RING_TYPE_RX;
7571 
7572 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7573 		return;
7574 
7575 	cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7576 	hwrm_ring_free_send_msg(bp, ring, type,
7577 				close_path ? cmpl_ring_id :
7578 				INVALID_HW_RING_ID);
7579 	ring->fw_ring_id = INVALID_HW_RING_ID;
7580 	bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7581 }
7582 
7583 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7584 				   struct bnxt_cp_ring_info *cpr)
7585 {
7586 	struct bnxt_ring_struct *ring;
7587 
7588 	ring = &cpr->cp_ring_struct;
7589 	if (ring->fw_ring_id == INVALID_HW_RING_ID)
7590 		return;
7591 
7592 	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7593 				INVALID_HW_RING_ID);
7594 	ring->fw_ring_id = INVALID_HW_RING_ID;
7595 }
7596 
7597 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7598 {
7599 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7600 	int i, size = ring->ring_mem.page_size;
7601 
7602 	cpr->cp_raw_cons = 0;
7603 	cpr->toggle = 0;
7604 
7605 	for (i = 0; i < bp->cp_nr_pages; i++)
7606 		if (cpr->cp_desc_ring[i])
7607 			memset(cpr->cp_desc_ring[i], 0, size);
7608 }
7609 
7610 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7611 {
7612 	u32 type;
7613 	int i;
7614 
7615 	if (!bp->bnapi)
7616 		return;
7617 
7618 	for (i = 0; i < bp->tx_nr_rings; i++)
7619 		bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7620 
7621 	bnxt_cancel_dim(bp);
7622 	for (i = 0; i < bp->rx_nr_rings; i++) {
7623 		bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7624 		bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7625 	}
7626 
7627 	/* The completion rings are about to be freed.  After that the
7628 	 * IRQ doorbell will not work anymore.  So we need to disable
7629 	 * IRQ here.
7630 	 */
7631 	bnxt_disable_int_sync(bp);
7632 
7633 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7634 		type = RING_FREE_REQ_RING_TYPE_NQ;
7635 	else
7636 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7637 	for (i = 0; i < bp->cp_nr_rings; i++) {
7638 		struct bnxt_napi *bnapi = bp->bnapi[i];
7639 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7640 		struct bnxt_ring_struct *ring;
7641 		int j;
7642 
7643 		for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7644 			bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7645 
7646 		ring = &cpr->cp_ring_struct;
7647 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7648 			hwrm_ring_free_send_msg(bp, ring, type,
7649 						INVALID_HW_RING_ID);
7650 			ring->fw_ring_id = INVALID_HW_RING_ID;
7651 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7652 		}
7653 	}
7654 }
7655 
7656 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7657 			     bool shared);
7658 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7659 			   bool shared);
7660 
7661 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7662 {
7663 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7664 	struct hwrm_func_qcfg_output *resp;
7665 	struct hwrm_func_qcfg_input *req;
7666 	int rc;
7667 
7668 	if (bp->hwrm_spec_code < 0x10601)
7669 		return 0;
7670 
7671 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7672 	if (rc)
7673 		return rc;
7674 
7675 	req->fid = cpu_to_le16(0xffff);
7676 	resp = hwrm_req_hold(bp, req);
7677 	rc = hwrm_req_send(bp, req);
7678 	if (rc) {
7679 		hwrm_req_drop(bp, req);
7680 		return rc;
7681 	}
7682 
7683 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7684 	if (BNXT_NEW_RM(bp)) {
7685 		u16 cp, stats;
7686 
7687 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7688 		hw_resc->resv_hw_ring_grps =
7689 			le32_to_cpu(resp->alloc_hw_ring_grps);
7690 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7691 		hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7692 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
7693 		stats = le16_to_cpu(resp->alloc_stat_ctx);
7694 		hw_resc->resv_irqs = cp;
7695 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7696 			int rx = hw_resc->resv_rx_rings;
7697 			int tx = hw_resc->resv_tx_rings;
7698 
7699 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
7700 				rx >>= 1;
7701 			if (cp < (rx + tx)) {
7702 				rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7703 				if (rc)
7704 					goto get_rings_exit;
7705 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
7706 					rx <<= 1;
7707 				hw_resc->resv_rx_rings = rx;
7708 				hw_resc->resv_tx_rings = tx;
7709 			}
7710 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7711 			hw_resc->resv_hw_ring_grps = rx;
7712 		}
7713 		hw_resc->resv_cp_rings = cp;
7714 		hw_resc->resv_stat_ctxs = stats;
7715 	}
7716 get_rings_exit:
7717 	hwrm_req_drop(bp, req);
7718 	return rc;
7719 }
7720 
7721 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7722 {
7723 	struct hwrm_func_qcfg_output *resp;
7724 	struct hwrm_func_qcfg_input *req;
7725 	int rc;
7726 
7727 	if (bp->hwrm_spec_code < 0x10601)
7728 		return 0;
7729 
7730 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7731 	if (rc)
7732 		return rc;
7733 
7734 	req->fid = cpu_to_le16(fid);
7735 	resp = hwrm_req_hold(bp, req);
7736 	rc = hwrm_req_send(bp, req);
7737 	if (!rc)
7738 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7739 
7740 	hwrm_req_drop(bp, req);
7741 	return rc;
7742 }
7743 
7744 static bool bnxt_rfs_supported(struct bnxt *bp);
7745 
7746 static struct hwrm_func_cfg_input *
7747 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7748 {
7749 	struct hwrm_func_cfg_input *req;
7750 	u32 enables = 0;
7751 
7752 	if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7753 		return NULL;
7754 
7755 	req->fid = cpu_to_le16(0xffff);
7756 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7757 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7758 	if (BNXT_NEW_RM(bp)) {
7759 		enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7760 		enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7761 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7762 			enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7763 			enables |= hwr->cp_p5 ?
7764 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7765 		} else {
7766 			enables |= hwr->cp ?
7767 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7768 			enables |= hwr->grp ?
7769 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7770 		}
7771 		enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7772 		enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7773 					  0;
7774 		req->num_rx_rings = cpu_to_le16(hwr->rx);
7775 		req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7776 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7777 			req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7778 			req->num_msix = cpu_to_le16(hwr->cp);
7779 		} else {
7780 			req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7781 			req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7782 		}
7783 		req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7784 		req->num_vnics = cpu_to_le16(hwr->vnic);
7785 	}
7786 	req->enables = cpu_to_le32(enables);
7787 	return req;
7788 }
7789 
7790 static struct hwrm_func_vf_cfg_input *
7791 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7792 {
7793 	struct hwrm_func_vf_cfg_input *req;
7794 	u32 enables = 0;
7795 
7796 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7797 		return NULL;
7798 
7799 	enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7800 	enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7801 			     FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7802 	enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7803 	enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7804 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7805 		enables |= hwr->cp_p5 ?
7806 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7807 	} else {
7808 		enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7809 		enables |= hwr->grp ?
7810 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7811 	}
7812 	enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7813 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7814 
7815 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7816 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7817 	req->num_rx_rings = cpu_to_le16(hwr->rx);
7818 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7819 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7820 		req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7821 	} else {
7822 		req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7823 		req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7824 	}
7825 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7826 	req->num_vnics = cpu_to_le16(hwr->vnic);
7827 
7828 	req->enables = cpu_to_le32(enables);
7829 	return req;
7830 }
7831 
7832 static int
7833 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7834 {
7835 	struct hwrm_func_cfg_input *req;
7836 	int rc;
7837 
7838 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7839 	if (!req)
7840 		return -ENOMEM;
7841 
7842 	if (!req->enables) {
7843 		hwrm_req_drop(bp, req);
7844 		return 0;
7845 	}
7846 
7847 	rc = hwrm_req_send(bp, req);
7848 	if (rc)
7849 		return rc;
7850 
7851 	if (bp->hwrm_spec_code < 0x10601)
7852 		bp->hw_resc.resv_tx_rings = hwr->tx;
7853 
7854 	return bnxt_hwrm_get_rings(bp);
7855 }
7856 
7857 static int
7858 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7859 {
7860 	struct hwrm_func_vf_cfg_input *req;
7861 	int rc;
7862 
7863 	if (!BNXT_NEW_RM(bp)) {
7864 		bp->hw_resc.resv_tx_rings = hwr->tx;
7865 		return 0;
7866 	}
7867 
7868 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7869 	if (!req)
7870 		return -ENOMEM;
7871 
7872 	rc = hwrm_req_send(bp, req);
7873 	if (rc)
7874 		return rc;
7875 
7876 	return bnxt_hwrm_get_rings(bp);
7877 }
7878 
7879 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7880 {
7881 	if (BNXT_PF(bp))
7882 		return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7883 	else
7884 		return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7885 }
7886 
7887 int bnxt_nq_rings_in_use(struct bnxt *bp)
7888 {
7889 	return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7890 }
7891 
7892 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7893 {
7894 	int cp;
7895 
7896 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7897 		return bnxt_nq_rings_in_use(bp);
7898 
7899 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
7900 	return cp;
7901 }
7902 
7903 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7904 {
7905 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7906 }
7907 
7908 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7909 {
7910 	if (!hwr->grp)
7911 		return 0;
7912 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7913 		int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7914 
7915 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7916 			rss_ctx *= hwr->vnic;
7917 		return rss_ctx;
7918 	}
7919 	if (BNXT_VF(bp))
7920 		return BNXT_VF_MAX_RSS_CTX;
7921 	if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7922 		return hwr->grp + 1;
7923 	return 1;
7924 }
7925 
7926 /* Check if a default RSS map needs to be setup.  This function is only
7927  * used on older firmware that does not require reserving RX rings.
7928  */
7929 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7930 {
7931 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7932 
7933 	/* The RSS map is valid for RX rings set to resv_rx_rings */
7934 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7935 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
7936 		if (!netif_is_rxfh_configured(bp->dev))
7937 			bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7938 	}
7939 }
7940 
7941 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7942 {
7943 	if (bp->flags & BNXT_FLAG_RFS) {
7944 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7945 			return 2 + bp->num_rss_ctx;
7946 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7947 			return rx_rings + 1;
7948 	}
7949 	return 1;
7950 }
7951 
7952 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7953 {
7954 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7955 	int cp = bnxt_cp_rings_in_use(bp);
7956 	int nq = bnxt_nq_rings_in_use(bp);
7957 	int rx = bp->rx_nr_rings, stat;
7958 	int vnic, grp = rx;
7959 
7960 	/* Old firmware does not need RX ring reservations but we still
7961 	 * need to setup a default RSS map when needed.  With new firmware
7962 	 * we go through RX ring reservations first and then set up the
7963 	 * RSS map for the successfully reserved RX rings when needed.
7964 	 */
7965 	if (!BNXT_NEW_RM(bp))
7966 		bnxt_check_rss_tbl_no_rmgr(bp);
7967 
7968 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7969 	    bp->hwrm_spec_code >= 0x10601)
7970 		return true;
7971 
7972 	if (!BNXT_NEW_RM(bp))
7973 		return false;
7974 
7975 	vnic = bnxt_get_total_vnics(bp, rx);
7976 
7977 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7978 		rx <<= 1;
7979 	stat = bnxt_get_func_stat_ctxs(bp);
7980 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7981 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7982 	    (hw_resc->resv_hw_ring_grps != grp &&
7983 	     !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7984 		return true;
7985 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7986 	    hw_resc->resv_irqs != nq)
7987 		return true;
7988 	return false;
7989 }
7990 
7991 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7992 {
7993 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7994 
7995 	hwr->tx = hw_resc->resv_tx_rings;
7996 	if (BNXT_NEW_RM(bp)) {
7997 		hwr->rx = hw_resc->resv_rx_rings;
7998 		hwr->cp = hw_resc->resv_irqs;
7999 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8000 			hwr->cp_p5 = hw_resc->resv_cp_rings;
8001 		hwr->grp = hw_resc->resv_hw_ring_grps;
8002 		hwr->vnic = hw_resc->resv_vnics;
8003 		hwr->stat = hw_resc->resv_stat_ctxs;
8004 		hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
8005 	}
8006 }
8007 
8008 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8009 {
8010 	return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
8011 	       hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
8012 }
8013 
8014 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
8015 
8016 static int __bnxt_reserve_rings(struct bnxt *bp)
8017 {
8018 	struct bnxt_hw_rings hwr = {0};
8019 	int rx_rings, old_rx_rings, rc;
8020 	int cp = bp->cp_nr_rings;
8021 	int ulp_msix = 0;
8022 	bool sh = false;
8023 	int tx_cp;
8024 
8025 	if (!bnxt_need_reserve_rings(bp))
8026 		return 0;
8027 
8028 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
8029 		ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
8030 		if (!ulp_msix)
8031 			bnxt_set_ulp_stat_ctxs(bp, 0);
8032 
8033 		if (ulp_msix > bp->ulp_num_msix_want)
8034 			ulp_msix = bp->ulp_num_msix_want;
8035 		hwr.cp = cp + ulp_msix;
8036 	} else {
8037 		hwr.cp = bnxt_nq_rings_in_use(bp);
8038 	}
8039 
8040 	hwr.tx = bp->tx_nr_rings;
8041 	hwr.rx = bp->rx_nr_rings;
8042 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8043 		sh = true;
8044 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8045 		hwr.cp_p5 = hwr.rx + hwr.tx;
8046 
8047 	hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8048 
8049 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8050 		hwr.rx <<= 1;
8051 	hwr.grp = bp->rx_nr_rings;
8052 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8053 	hwr.stat = bnxt_get_func_stat_ctxs(bp);
8054 	old_rx_rings = bp->hw_resc.resv_rx_rings;
8055 
8056 	rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8057 	if (rc)
8058 		return rc;
8059 
8060 	bnxt_copy_reserved_rings(bp, &hwr);
8061 
8062 	rx_rings = hwr.rx;
8063 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8064 		if (hwr.rx >= 2) {
8065 			rx_rings = hwr.rx >> 1;
8066 		} else {
8067 			if (netif_running(bp->dev))
8068 				return -ENOMEM;
8069 
8070 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8071 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8072 			bp->dev->hw_features &= ~NETIF_F_LRO;
8073 			bp->dev->features &= ~NETIF_F_LRO;
8074 			bnxt_set_ring_params(bp);
8075 		}
8076 	}
8077 	rx_rings = min_t(int, rx_rings, hwr.grp);
8078 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8079 	if (bnxt_ulp_registered(bp->edev) &&
8080 	    hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8081 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8082 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
8083 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8084 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
8085 		hwr.rx = rx_rings << 1;
8086 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8087 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8088 	if (hwr.tx != bp->tx_nr_rings) {
8089 		netdev_warn(bp->dev,
8090 			    "Able to reserve only %d out of %d requested TX rings\n",
8091 			    hwr.tx, bp->tx_nr_rings);
8092 	}
8093 	bp->tx_nr_rings = hwr.tx;
8094 
8095 	/* If we cannot reserve all the RX rings, reset the RSS map only
8096 	 * if absolutely necessary
8097 	 */
8098 	if (rx_rings != bp->rx_nr_rings) {
8099 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8100 			    rx_rings, bp->rx_nr_rings);
8101 		if (netif_is_rxfh_configured(bp->dev) &&
8102 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8103 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8104 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8105 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8106 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8107 		}
8108 	}
8109 	bp->rx_nr_rings = rx_rings;
8110 	bp->cp_nr_rings = hwr.cp;
8111 
8112 	/* Fall back if we cannot reserve enough HW RSS contexts */
8113 	if ((bp->rss_cap & BNXT_RSS_CAP_LARGE_RSS_CTX) &&
8114 	    hwr.rss_ctx < bnxt_get_total_rss_ctxs(bp, &hwr))
8115 		bp->rss_cap &= ~BNXT_RSS_CAP_LARGE_RSS_CTX;
8116 
8117 	if (!bnxt_rings_ok(bp, &hwr))
8118 		return -ENOMEM;
8119 
8120 	if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8121 	    !netif_is_rxfh_configured(bp->dev))
8122 		bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8123 
8124 	if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8125 		int resv_msix, resv_ctx, ulp_ctxs;
8126 		struct bnxt_hw_resc *hw_resc;
8127 
8128 		hw_resc = &bp->hw_resc;
8129 		resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8130 		ulp_msix = min_t(int, resv_msix, ulp_msix);
8131 		bnxt_set_ulp_msix_num(bp, ulp_msix);
8132 		resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
8133 		ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8134 		bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8135 	}
8136 
8137 	return rc;
8138 }
8139 
8140 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8141 {
8142 	struct hwrm_func_vf_cfg_input *req;
8143 	u32 flags;
8144 
8145 	if (!BNXT_NEW_RM(bp))
8146 		return 0;
8147 
8148 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8149 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8150 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8151 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8152 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8153 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8154 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8155 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8156 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8157 
8158 	req->flags = cpu_to_le32(flags);
8159 	return hwrm_req_send_silent(bp, req);
8160 }
8161 
8162 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8163 {
8164 	struct hwrm_func_cfg_input *req;
8165 	u32 flags;
8166 
8167 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8168 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8169 	if (BNXT_NEW_RM(bp)) {
8170 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8171 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8172 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8173 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8174 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8175 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8176 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8177 		else
8178 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8179 	}
8180 
8181 	req->flags = cpu_to_le32(flags);
8182 	return hwrm_req_send_silent(bp, req);
8183 }
8184 
8185 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8186 {
8187 	if (bp->hwrm_spec_code < 0x10801)
8188 		return 0;
8189 
8190 	if (BNXT_PF(bp))
8191 		return bnxt_hwrm_check_pf_rings(bp, hwr);
8192 
8193 	return bnxt_hwrm_check_vf_rings(bp, hwr);
8194 }
8195 
8196 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8197 {
8198 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8199 	struct hwrm_ring_aggint_qcaps_output *resp;
8200 	struct hwrm_ring_aggint_qcaps_input *req;
8201 	int rc;
8202 
8203 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8204 	coal_cap->num_cmpl_dma_aggr_max = 63;
8205 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8206 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8207 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8208 	coal_cap->int_lat_tmr_min_max = 65535;
8209 	coal_cap->int_lat_tmr_max_max = 65535;
8210 	coal_cap->num_cmpl_aggr_int_max = 65535;
8211 	coal_cap->timer_units = 80;
8212 
8213 	if (bp->hwrm_spec_code < 0x10902)
8214 		return;
8215 
8216 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8217 		return;
8218 
8219 	resp = hwrm_req_hold(bp, req);
8220 	rc = hwrm_req_send_silent(bp, req);
8221 	if (!rc) {
8222 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8223 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8224 		coal_cap->num_cmpl_dma_aggr_max =
8225 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8226 		coal_cap->num_cmpl_dma_aggr_during_int_max =
8227 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8228 		coal_cap->cmpl_aggr_dma_tmr_max =
8229 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8230 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8231 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8232 		coal_cap->int_lat_tmr_min_max =
8233 			le16_to_cpu(resp->int_lat_tmr_min_max);
8234 		coal_cap->int_lat_tmr_max_max =
8235 			le16_to_cpu(resp->int_lat_tmr_max_max);
8236 		coal_cap->num_cmpl_aggr_int_max =
8237 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
8238 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8239 	}
8240 	hwrm_req_drop(bp, req);
8241 }
8242 
8243 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8244 {
8245 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8246 
8247 	return usec * 1000 / coal_cap->timer_units;
8248 }
8249 
8250 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8251 	struct bnxt_coal *hw_coal,
8252 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8253 {
8254 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8255 	u16 val, tmr, max, flags = hw_coal->flags;
8256 	u32 cmpl_params = coal_cap->cmpl_params;
8257 
8258 	max = hw_coal->bufs_per_record * 128;
8259 	if (hw_coal->budget)
8260 		max = hw_coal->bufs_per_record * hw_coal->budget;
8261 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8262 
8263 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8264 	req->num_cmpl_aggr_int = cpu_to_le16(val);
8265 
8266 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8267 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
8268 
8269 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8270 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
8271 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8272 
8273 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8274 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8275 	req->int_lat_tmr_max = cpu_to_le16(tmr);
8276 
8277 	/* min timer set to 1/2 of interrupt timer */
8278 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8279 		val = tmr / 2;
8280 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8281 		req->int_lat_tmr_min = cpu_to_le16(val);
8282 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8283 	}
8284 
8285 	/* buf timer set to 1/4 of interrupt timer */
8286 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8287 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8288 
8289 	if (cmpl_params &
8290 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8291 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8292 		val = clamp_t(u16, tmr, 1,
8293 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8294 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8295 		req->enables |=
8296 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8297 	}
8298 
8299 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8300 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8301 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8302 	req->flags = cpu_to_le16(flags);
8303 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8304 }
8305 
8306 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8307 				   struct bnxt_coal *hw_coal)
8308 {
8309 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8310 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8311 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8312 	u32 nq_params = coal_cap->nq_params;
8313 	u16 tmr;
8314 	int rc;
8315 
8316 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8317 		return 0;
8318 
8319 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8320 	if (rc)
8321 		return rc;
8322 
8323 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8324 	req->flags =
8325 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8326 
8327 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8328 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8329 	req->int_lat_tmr_min = cpu_to_le16(tmr);
8330 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8331 	return hwrm_req_send(bp, req);
8332 }
8333 
8334 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8335 {
8336 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8337 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8338 	struct bnxt_coal coal;
8339 	int rc;
8340 
8341 	/* Tick values in micro seconds.
8342 	 * 1 coal_buf x bufs_per_record = 1 completion record.
8343 	 */
8344 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8345 
8346 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8347 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8348 
8349 	if (!bnapi->rx_ring)
8350 		return -ENODEV;
8351 
8352 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8353 	if (rc)
8354 		return rc;
8355 
8356 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8357 
8358 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8359 
8360 	return hwrm_req_send(bp, req_rx);
8361 }
8362 
8363 static int
8364 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8365 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8366 {
8367 	u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8368 
8369 	req->ring_id = cpu_to_le16(ring_id);
8370 	return hwrm_req_send(bp, req);
8371 }
8372 
8373 static int
8374 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8375 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8376 {
8377 	struct bnxt_tx_ring_info *txr;
8378 	int i, rc;
8379 
8380 	bnxt_for_each_napi_tx(i, bnapi, txr) {
8381 		u16 ring_id;
8382 
8383 		ring_id = bnxt_cp_ring_for_tx(bp, txr);
8384 		req->ring_id = cpu_to_le16(ring_id);
8385 		rc = hwrm_req_send(bp, req);
8386 		if (rc)
8387 			return rc;
8388 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8389 			return 0;
8390 	}
8391 	return 0;
8392 }
8393 
8394 int bnxt_hwrm_set_coal(struct bnxt *bp)
8395 {
8396 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8397 	int i, rc;
8398 
8399 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8400 	if (rc)
8401 		return rc;
8402 
8403 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8404 	if (rc) {
8405 		hwrm_req_drop(bp, req_rx);
8406 		return rc;
8407 	}
8408 
8409 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8410 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8411 
8412 	hwrm_req_hold(bp, req_rx);
8413 	hwrm_req_hold(bp, req_tx);
8414 	for (i = 0; i < bp->cp_nr_rings; i++) {
8415 		struct bnxt_napi *bnapi = bp->bnapi[i];
8416 		struct bnxt_coal *hw_coal;
8417 
8418 		if (!bnapi->rx_ring)
8419 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8420 		else
8421 			rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8422 		if (rc)
8423 			break;
8424 
8425 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8426 			continue;
8427 
8428 		if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8429 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8430 			if (rc)
8431 				break;
8432 		}
8433 		if (bnapi->rx_ring)
8434 			hw_coal = &bp->rx_coal;
8435 		else
8436 			hw_coal = &bp->tx_coal;
8437 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8438 	}
8439 	hwrm_req_drop(bp, req_rx);
8440 	hwrm_req_drop(bp, req_tx);
8441 	return rc;
8442 }
8443 
8444 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8445 {
8446 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8447 	struct hwrm_stat_ctx_free_input *req;
8448 	int i;
8449 
8450 	if (!bp->bnapi)
8451 		return;
8452 
8453 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8454 		return;
8455 
8456 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8457 		return;
8458 	if (BNXT_FW_MAJ(bp) <= 20) {
8459 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8460 			hwrm_req_drop(bp, req);
8461 			return;
8462 		}
8463 		hwrm_req_hold(bp, req0);
8464 	}
8465 	hwrm_req_hold(bp, req);
8466 	for (i = 0; i < bp->cp_nr_rings; i++) {
8467 		struct bnxt_napi *bnapi = bp->bnapi[i];
8468 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8469 
8470 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8471 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8472 			if (req0) {
8473 				req0->stat_ctx_id = req->stat_ctx_id;
8474 				hwrm_req_send(bp, req0);
8475 			}
8476 			hwrm_req_send(bp, req);
8477 
8478 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8479 		}
8480 	}
8481 	hwrm_req_drop(bp, req);
8482 	if (req0)
8483 		hwrm_req_drop(bp, req0);
8484 }
8485 
8486 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8487 {
8488 	struct hwrm_stat_ctx_alloc_output *resp;
8489 	struct hwrm_stat_ctx_alloc_input *req;
8490 	int rc, i;
8491 
8492 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8493 		return 0;
8494 
8495 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8496 	if (rc)
8497 		return rc;
8498 
8499 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8500 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8501 
8502 	resp = hwrm_req_hold(bp, req);
8503 	for (i = 0; i < bp->cp_nr_rings; i++) {
8504 		struct bnxt_napi *bnapi = bp->bnapi[i];
8505 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8506 
8507 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8508 
8509 		rc = hwrm_req_send(bp, req);
8510 		if (rc)
8511 			break;
8512 
8513 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8514 
8515 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8516 	}
8517 	hwrm_req_drop(bp, req);
8518 	return rc;
8519 }
8520 
8521 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8522 {
8523 	struct hwrm_func_qcfg_output *resp;
8524 	struct hwrm_func_qcfg_input *req;
8525 	u16 flags;
8526 	int rc;
8527 
8528 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8529 	if (rc)
8530 		return rc;
8531 
8532 	req->fid = cpu_to_le16(0xffff);
8533 	resp = hwrm_req_hold(bp, req);
8534 	rc = hwrm_req_send(bp, req);
8535 	if (rc)
8536 		goto func_qcfg_exit;
8537 
8538 	flags = le16_to_cpu(resp->flags);
8539 #ifdef CONFIG_BNXT_SRIOV
8540 	if (BNXT_VF(bp)) {
8541 		struct bnxt_vf_info *vf = &bp->vf;
8542 
8543 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8544 		if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8545 			vf->flags |= BNXT_VF_TRUST;
8546 		else
8547 			vf->flags &= ~BNXT_VF_TRUST;
8548 	} else {
8549 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8550 	}
8551 #endif
8552 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8553 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8554 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8555 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8556 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8557 	}
8558 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8559 		bp->flags |= BNXT_FLAG_MULTI_HOST;
8560 
8561 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8562 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8563 
8564 	if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8565 		bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8566 	if (resp->roce_bidi_opt_mode &
8567 	    FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8568 		bp->cos0_cos1_shared = 1;
8569 	else
8570 		bp->cos0_cos1_shared = 0;
8571 
8572 	switch (resp->port_partition_type) {
8573 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8574 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8575 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8576 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8577 		bp->port_partition_type = resp->port_partition_type;
8578 		break;
8579 	}
8580 	if (bp->hwrm_spec_code < 0x10707 ||
8581 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8582 		bp->br_mode = BRIDGE_MODE_VEB;
8583 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8584 		bp->br_mode = BRIDGE_MODE_VEPA;
8585 	else
8586 		bp->br_mode = BRIDGE_MODE_UNDEF;
8587 
8588 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8589 	if (!bp->max_mtu)
8590 		bp->max_mtu = BNXT_MAX_MTU;
8591 
8592 	if (bp->db_size)
8593 		goto func_qcfg_exit;
8594 
8595 	bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8596 	if (BNXT_CHIP_P5(bp)) {
8597 		if (BNXT_PF(bp))
8598 			bp->db_offset = DB_PF_OFFSET_P5;
8599 		else
8600 			bp->db_offset = DB_VF_OFFSET_P5;
8601 	}
8602 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8603 				 1024);
8604 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8605 	    bp->db_size <= bp->db_offset)
8606 		bp->db_size = pci_resource_len(bp->pdev, 2);
8607 
8608 func_qcfg_exit:
8609 	hwrm_req_drop(bp, req);
8610 	return rc;
8611 }
8612 
8613 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8614 				      u8 init_val, u8 init_offset,
8615 				      bool init_mask_set)
8616 {
8617 	ctxm->init_value = init_val;
8618 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8619 	if (init_mask_set)
8620 		ctxm->init_offset = init_offset * 4;
8621 	else
8622 		ctxm->init_value = 0;
8623 }
8624 
8625 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8626 {
8627 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8628 	u16 type;
8629 
8630 	for (type = 0; type < ctx_max; type++) {
8631 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8632 		int n = 1;
8633 
8634 		if (!ctxm->max_entries || ctxm->pg_info)
8635 			continue;
8636 
8637 		if (ctxm->instance_bmap)
8638 			n = hweight32(ctxm->instance_bmap);
8639 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8640 		if (!ctxm->pg_info)
8641 			return -ENOMEM;
8642 	}
8643 	return 0;
8644 }
8645 
8646 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8647 				  struct bnxt_ctx_mem_type *ctxm, bool force);
8648 
8649 #define BNXT_CTX_INIT_VALID(flags)	\
8650 	(!!((flags) &			\
8651 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8652 
8653 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8654 {
8655 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
8656 	struct hwrm_func_backing_store_qcaps_v2_input *req;
8657 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8658 	u16 type;
8659 	int rc;
8660 
8661 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8662 	if (rc)
8663 		return rc;
8664 
8665 	if (!ctx) {
8666 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8667 		if (!ctx)
8668 			return -ENOMEM;
8669 		bp->ctx = ctx;
8670 	}
8671 
8672 	resp = hwrm_req_hold(bp, req);
8673 
8674 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8675 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8676 		u8 init_val, init_off, i;
8677 		u32 max_entries;
8678 		u16 entry_size;
8679 		__le32 *p;
8680 		u32 flags;
8681 
8682 		req->type = cpu_to_le16(type);
8683 		rc = hwrm_req_send(bp, req);
8684 		if (rc)
8685 			goto ctx_done;
8686 		flags = le32_to_cpu(resp->flags);
8687 		type = le16_to_cpu(resp->next_valid_type);
8688 		if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8689 			bnxt_free_one_ctx_mem(bp, ctxm, true);
8690 			continue;
8691 		}
8692 		entry_size = le16_to_cpu(resp->entry_size);
8693 		max_entries = le32_to_cpu(resp->max_num_entries);
8694 		if (ctxm->mem_valid) {
8695 			if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8696 			    ctxm->entry_size != entry_size ||
8697 			    ctxm->max_entries != max_entries)
8698 				bnxt_free_one_ctx_mem(bp, ctxm, true);
8699 			else
8700 				continue;
8701 		}
8702 		ctxm->type = le16_to_cpu(resp->type);
8703 		ctxm->entry_size = entry_size;
8704 		ctxm->flags = flags;
8705 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8706 		ctxm->entry_multiple = resp->entry_multiple;
8707 		ctxm->max_entries = max_entries;
8708 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8709 		init_val = resp->ctx_init_value;
8710 		init_off = resp->ctx_init_offset;
8711 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8712 					  BNXT_CTX_INIT_VALID(flags));
8713 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8714 					      BNXT_MAX_SPLIT_ENTRY);
8715 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8716 		     i++, p++)
8717 			ctxm->split[i] = le32_to_cpu(*p);
8718 	}
8719 	rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8720 
8721 ctx_done:
8722 	hwrm_req_drop(bp, req);
8723 	return rc;
8724 }
8725 
8726 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8727 {
8728 	struct hwrm_func_backing_store_qcaps_output *resp;
8729 	struct hwrm_func_backing_store_qcaps_input *req;
8730 	int rc;
8731 
8732 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8733 	    (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8734 		return 0;
8735 
8736 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8737 		return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8738 
8739 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8740 	if (rc)
8741 		return rc;
8742 
8743 	resp = hwrm_req_hold(bp, req);
8744 	rc = hwrm_req_send_silent(bp, req);
8745 	if (!rc) {
8746 		struct bnxt_ctx_mem_type *ctxm;
8747 		struct bnxt_ctx_mem_info *ctx;
8748 		u8 init_val, init_idx = 0;
8749 		u16 init_mask;
8750 
8751 		ctx = bp->ctx;
8752 		if (!ctx) {
8753 			ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8754 			if (!ctx) {
8755 				rc = -ENOMEM;
8756 				goto ctx_err;
8757 			}
8758 			bp->ctx = ctx;
8759 		}
8760 		init_val = resp->ctx_kind_initializer;
8761 		init_mask = le16_to_cpu(resp->ctx_init_mask);
8762 
8763 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8764 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8765 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8766 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8767 		ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8768 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8769 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8770 					  (init_mask & (1 << init_idx++)) != 0);
8771 
8772 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8773 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8774 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8775 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8776 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8777 					  (init_mask & (1 << init_idx++)) != 0);
8778 
8779 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8780 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8781 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8782 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8783 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8784 					  (init_mask & (1 << init_idx++)) != 0);
8785 
8786 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8787 		ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8788 		ctxm->max_entries = ctxm->vnic_entries +
8789 			le16_to_cpu(resp->vnic_max_ring_table_entries);
8790 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8791 		bnxt_init_ctx_initializer(ctxm, init_val,
8792 					  resp->vnic_init_offset,
8793 					  (init_mask & (1 << init_idx++)) != 0);
8794 
8795 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8796 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8797 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8798 		bnxt_init_ctx_initializer(ctxm, init_val,
8799 					  resp->stat_init_offset,
8800 					  (init_mask & (1 << init_idx++)) != 0);
8801 
8802 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8803 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8804 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8805 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8806 		ctxm->entry_multiple = resp->tqm_entries_multiple;
8807 		if (!ctxm->entry_multiple)
8808 			ctxm->entry_multiple = 1;
8809 
8810 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8811 
8812 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8813 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8814 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8815 		ctxm->mrav_num_entries_units =
8816 			le16_to_cpu(resp->mrav_num_entries_units);
8817 		bnxt_init_ctx_initializer(ctxm, init_val,
8818 					  resp->mrav_init_offset,
8819 					  (init_mask & (1 << init_idx++)) != 0);
8820 
8821 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8822 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8823 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8824 
8825 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8826 		if (!ctx->tqm_fp_rings_count)
8827 			ctx->tqm_fp_rings_count = bp->max_q;
8828 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8829 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8830 
8831 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8832 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8833 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8834 
8835 		rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8836 	} else {
8837 		rc = 0;
8838 	}
8839 ctx_err:
8840 	hwrm_req_drop(bp, req);
8841 	return rc;
8842 }
8843 
8844 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8845 				  __le64 *pg_dir)
8846 {
8847 	if (!rmem->nr_pages)
8848 		return;
8849 
8850 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8851 	if (rmem->depth >= 1) {
8852 		if (rmem->depth == 2)
8853 			*pg_attr |= 2;
8854 		else
8855 			*pg_attr |= 1;
8856 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8857 	} else {
8858 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8859 	}
8860 }
8861 
8862 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
8863 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
8864 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
8865 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
8866 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
8867 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8868 
8869 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8870 {
8871 	struct hwrm_func_backing_store_cfg_input *req;
8872 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8873 	struct bnxt_ctx_pg_info *ctx_pg;
8874 	struct bnxt_ctx_mem_type *ctxm;
8875 	void **__req = (void **)&req;
8876 	u32 req_len = sizeof(*req);
8877 	__le32 *num_entries;
8878 	__le64 *pg_dir;
8879 	u32 flags = 0;
8880 	u8 *pg_attr;
8881 	u32 ena;
8882 	int rc;
8883 	int i;
8884 
8885 	if (!ctx)
8886 		return 0;
8887 
8888 	if (req_len > bp->hwrm_max_ext_req_len)
8889 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8890 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8891 	if (rc)
8892 		return rc;
8893 
8894 	req->enables = cpu_to_le32(enables);
8895 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8896 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8897 		ctx_pg = ctxm->pg_info;
8898 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8899 		req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8900 		req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8901 		req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8902 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8903 				      &req->qpc_pg_size_qpc_lvl,
8904 				      &req->qpc_page_dir);
8905 
8906 		if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8907 			req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8908 	}
8909 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8910 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8911 		ctx_pg = ctxm->pg_info;
8912 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8913 		req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8914 		req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8915 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8916 				      &req->srq_pg_size_srq_lvl,
8917 				      &req->srq_page_dir);
8918 	}
8919 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8920 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8921 		ctx_pg = ctxm->pg_info;
8922 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8923 		req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8924 		req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8925 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8926 				      &req->cq_pg_size_cq_lvl,
8927 				      &req->cq_page_dir);
8928 	}
8929 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8930 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8931 		ctx_pg = ctxm->pg_info;
8932 		req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8933 		req->vnic_num_ring_table_entries =
8934 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8935 		req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8936 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8937 				      &req->vnic_pg_size_vnic_lvl,
8938 				      &req->vnic_page_dir);
8939 	}
8940 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8941 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8942 		ctx_pg = ctxm->pg_info;
8943 		req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8944 		req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8945 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8946 				      &req->stat_pg_size_stat_lvl,
8947 				      &req->stat_page_dir);
8948 	}
8949 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8950 		u32 units;
8951 
8952 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8953 		ctx_pg = ctxm->pg_info;
8954 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8955 		units = ctxm->mrav_num_entries_units;
8956 		if (units) {
8957 			u32 num_mr, num_ah = ctxm->mrav_av_entries;
8958 			u32 entries;
8959 
8960 			num_mr = ctx_pg->entries - num_ah;
8961 			entries = ((num_mr / units) << 16) | (num_ah / units);
8962 			req->mrav_num_entries = cpu_to_le32(entries);
8963 			flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8964 		}
8965 		req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8966 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8967 				      &req->mrav_pg_size_mrav_lvl,
8968 				      &req->mrav_page_dir);
8969 	}
8970 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8971 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8972 		ctx_pg = ctxm->pg_info;
8973 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8974 		req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8975 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8976 				      &req->tim_pg_size_tim_lvl,
8977 				      &req->tim_page_dir);
8978 	}
8979 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8980 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
8981 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8982 	     pg_dir = &req->tqm_sp_page_dir,
8983 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8984 	     ctx_pg = ctxm->pg_info;
8985 	     i < BNXT_MAX_TQM_RINGS;
8986 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8987 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8988 		if (!(enables & ena))
8989 			continue;
8990 
8991 		req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8992 		*num_entries = cpu_to_le32(ctx_pg->entries);
8993 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8994 	}
8995 	req->flags = cpu_to_le32(flags);
8996 	return hwrm_req_send(bp, req);
8997 }
8998 
8999 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
9000 				  struct bnxt_ctx_pg_info *ctx_pg)
9001 {
9002 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9003 
9004 	rmem->page_size = BNXT_PAGE_SIZE;
9005 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
9006 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
9007 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
9008 	if (rmem->depth >= 1)
9009 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
9010 	return bnxt_alloc_ring(bp, rmem);
9011 }
9012 
9013 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
9014 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
9015 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
9016 {
9017 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9018 	int rc;
9019 
9020 	if (!mem_size)
9021 		return -EINVAL;
9022 
9023 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9024 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
9025 		ctx_pg->nr_pages = 0;
9026 		return -EINVAL;
9027 	}
9028 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
9029 		int nr_tbls, i;
9030 
9031 		rmem->depth = 2;
9032 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
9033 					     GFP_KERNEL);
9034 		if (!ctx_pg->ctx_pg_tbl)
9035 			return -ENOMEM;
9036 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
9037 		rmem->nr_pages = nr_tbls;
9038 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9039 		if (rc)
9040 			return rc;
9041 		for (i = 0; i < nr_tbls; i++) {
9042 			struct bnxt_ctx_pg_info *pg_tbl;
9043 
9044 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
9045 			if (!pg_tbl)
9046 				return -ENOMEM;
9047 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9048 			rmem = &pg_tbl->ring_mem;
9049 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9050 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9051 			rmem->depth = 1;
9052 			rmem->nr_pages = MAX_CTX_PAGES;
9053 			rmem->ctx_mem = ctxm;
9054 			if (i == (nr_tbls - 1)) {
9055 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9056 
9057 				if (rem)
9058 					rmem->nr_pages = rem;
9059 			}
9060 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9061 			if (rc)
9062 				break;
9063 		}
9064 	} else {
9065 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9066 		if (rmem->nr_pages > 1 || depth)
9067 			rmem->depth = 1;
9068 		rmem->ctx_mem = ctxm;
9069 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9070 	}
9071 	return rc;
9072 }
9073 
9074 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9075 				    struct bnxt_ctx_pg_info *ctx_pg,
9076 				    void *buf, size_t offset, size_t head,
9077 				    size_t tail)
9078 {
9079 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9080 	size_t nr_pages = ctx_pg->nr_pages;
9081 	int page_size = rmem->page_size;
9082 	size_t len = 0, total_len = 0;
9083 	u16 depth = rmem->depth;
9084 
9085 	tail %= nr_pages * page_size;
9086 	do {
9087 		if (depth > 1) {
9088 			int i = head / (page_size * MAX_CTX_PAGES);
9089 			struct bnxt_ctx_pg_info *pg_tbl;
9090 
9091 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9092 			rmem = &pg_tbl->ring_mem;
9093 		}
9094 		len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9095 		head += len;
9096 		offset += len;
9097 		total_len += len;
9098 		if (head >= nr_pages * page_size)
9099 			head = 0;
9100 	} while (head != tail);
9101 	return total_len;
9102 }
9103 
9104 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9105 				  struct bnxt_ctx_pg_info *ctx_pg)
9106 {
9107 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9108 
9109 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9110 	    ctx_pg->ctx_pg_tbl) {
9111 		int i, nr_tbls = rmem->nr_pages;
9112 
9113 		for (i = 0; i < nr_tbls; i++) {
9114 			struct bnxt_ctx_pg_info *pg_tbl;
9115 			struct bnxt_ring_mem_info *rmem2;
9116 
9117 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
9118 			if (!pg_tbl)
9119 				continue;
9120 			rmem2 = &pg_tbl->ring_mem;
9121 			bnxt_free_ring(bp, rmem2);
9122 			ctx_pg->ctx_pg_arr[i] = NULL;
9123 			kfree(pg_tbl);
9124 			ctx_pg->ctx_pg_tbl[i] = NULL;
9125 		}
9126 		kfree(ctx_pg->ctx_pg_tbl);
9127 		ctx_pg->ctx_pg_tbl = NULL;
9128 	}
9129 	bnxt_free_ring(bp, rmem);
9130 	ctx_pg->nr_pages = 0;
9131 }
9132 
9133 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9134 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
9135 				   u8 pg_lvl)
9136 {
9137 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9138 	int i, rc = 0, n = 1;
9139 	u32 mem_size;
9140 
9141 	if (!ctxm->entry_size || !ctx_pg)
9142 		return -EINVAL;
9143 	if (ctxm->instance_bmap)
9144 		n = hweight32(ctxm->instance_bmap);
9145 	if (ctxm->entry_multiple)
9146 		entries = roundup(entries, ctxm->entry_multiple);
9147 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9148 	mem_size = entries * ctxm->entry_size;
9149 	for (i = 0; i < n && !rc; i++) {
9150 		ctx_pg[i].entries = entries;
9151 		rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9152 					    ctxm->init_value ? ctxm : NULL);
9153 	}
9154 	if (!rc)
9155 		ctxm->mem_valid = 1;
9156 	return rc;
9157 }
9158 
9159 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9160 					       struct bnxt_ctx_mem_type *ctxm,
9161 					       bool last)
9162 {
9163 	struct hwrm_func_backing_store_cfg_v2_input *req;
9164 	u32 instance_bmap = ctxm->instance_bmap;
9165 	int i, j, rc = 0, n = 1;
9166 	__le32 *p;
9167 
9168 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9169 		return 0;
9170 
9171 	if (instance_bmap)
9172 		n = hweight32(ctxm->instance_bmap);
9173 	else
9174 		instance_bmap = 1;
9175 
9176 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9177 	if (rc)
9178 		return rc;
9179 	hwrm_req_hold(bp, req);
9180 	req->type = cpu_to_le16(ctxm->type);
9181 	req->entry_size = cpu_to_le16(ctxm->entry_size);
9182 	if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9183 	    bnxt_bs_trace_avail(bp, ctxm->type)) {
9184 		struct bnxt_bs_trace_info *bs_trace;
9185 		u32 enables;
9186 
9187 		enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9188 		req->enables = cpu_to_le32(enables);
9189 		bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9190 		req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9191 	}
9192 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
9193 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9194 		p[i] = cpu_to_le32(ctxm->split[i]);
9195 	for (i = 0, j = 0; j < n && !rc; i++) {
9196 		struct bnxt_ctx_pg_info *ctx_pg;
9197 
9198 		if (!(instance_bmap & (1 << i)))
9199 			continue;
9200 		req->instance = cpu_to_le16(i);
9201 		ctx_pg = &ctxm->pg_info[j++];
9202 		if (!ctx_pg->entries)
9203 			continue;
9204 		req->num_entries = cpu_to_le32(ctx_pg->entries);
9205 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9206 				      &req->page_size_pbl_level,
9207 				      &req->page_dir);
9208 		if (last && j == n)
9209 			req->flags =
9210 				cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9211 		rc = hwrm_req_send(bp, req);
9212 	}
9213 	hwrm_req_drop(bp, req);
9214 	return rc;
9215 }
9216 
9217 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9218 {
9219 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9220 	struct bnxt_ctx_mem_type *ctxm;
9221 	u16 last_type = BNXT_CTX_INV;
9222 	int rc = 0;
9223 	u16 type;
9224 
9225 	for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9226 		ctxm = &ctx->ctx_arr[type];
9227 		if (!bnxt_bs_trace_avail(bp, type))
9228 			continue;
9229 		if (!ctxm->mem_valid) {
9230 			rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9231 						     ctxm->max_entries, 1);
9232 			if (rc) {
9233 				netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9234 					    type);
9235 				continue;
9236 			}
9237 			bnxt_bs_trace_init(bp, ctxm);
9238 		}
9239 		last_type = type;
9240 	}
9241 
9242 	if (last_type == BNXT_CTX_INV) {
9243 		for (type = 0; type < BNXT_CTX_MAX; type++) {
9244 			ctxm = &ctx->ctx_arr[type];
9245 			if (ctxm->mem_valid)
9246 				last_type = type;
9247 		}
9248 		if (last_type == BNXT_CTX_INV)
9249 			return 0;
9250 	}
9251 	ctx->ctx_arr[last_type].last = 1;
9252 
9253 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9254 		ctxm = &ctx->ctx_arr[type];
9255 
9256 		if (!ctxm->mem_valid)
9257 			continue;
9258 		rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9259 		if (rc)
9260 			return rc;
9261 	}
9262 	return 0;
9263 }
9264 
9265 /**
9266  * __bnxt_copy_ctx_mem - copy host context memory
9267  * @bp: The driver context
9268  * @ctxm: The pointer to the context memory type
9269  * @buf: The destination buffer or NULL to just obtain the length
9270  * @offset: The buffer offset to copy the data to
9271  * @head: The head offset of context memory to copy from
9272  * @tail: The tail offset (last byte + 1) of context memory to end the copy
9273  *
9274  * This function is called for debugging purposes to dump the host context
9275  * used by the chip.
9276  *
9277  * Return: Length of memory copied
9278  */
9279 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9280 				  struct bnxt_ctx_mem_type *ctxm, void *buf,
9281 				  size_t offset, size_t head, size_t tail)
9282 {
9283 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9284 	size_t len = 0, total_len = 0;
9285 	int i, n = 1;
9286 
9287 	if (!ctx_pg)
9288 		return 0;
9289 
9290 	if (ctxm->instance_bmap)
9291 		n = hweight32(ctxm->instance_bmap);
9292 	for (i = 0; i < n; i++) {
9293 		len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9294 					    tail);
9295 		offset += len;
9296 		total_len += len;
9297 	}
9298 	return total_len;
9299 }
9300 
9301 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9302 			 void *buf, size_t offset)
9303 {
9304 	size_t tail = ctxm->max_entries * ctxm->entry_size;
9305 
9306 	return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9307 }
9308 
9309 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9310 				  struct bnxt_ctx_mem_type *ctxm, bool force)
9311 {
9312 	struct bnxt_ctx_pg_info *ctx_pg;
9313 	int i, n = 1;
9314 
9315 	ctxm->last = 0;
9316 
9317 	if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9318 		return;
9319 
9320 	ctx_pg = ctxm->pg_info;
9321 	if (ctx_pg) {
9322 		if (ctxm->instance_bmap)
9323 			n = hweight32(ctxm->instance_bmap);
9324 		for (i = 0; i < n; i++)
9325 			bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9326 
9327 		kfree(ctx_pg);
9328 		ctxm->pg_info = NULL;
9329 		ctxm->mem_valid = 0;
9330 	}
9331 	memset(ctxm, 0, sizeof(*ctxm));
9332 }
9333 
9334 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9335 {
9336 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
9337 	u16 type;
9338 
9339 	if (!ctx)
9340 		return;
9341 
9342 	for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9343 		bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9344 
9345 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9346 	if (force) {
9347 		kfree(ctx);
9348 		bp->ctx = NULL;
9349 	}
9350 }
9351 
9352 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9353 {
9354 	struct bnxt_ctx_mem_type *ctxm;
9355 	struct bnxt_ctx_mem_info *ctx;
9356 	u32 l2_qps, qp1_qps, max_qps;
9357 	u32 ena, entries_sp, entries;
9358 	u32 srqs, max_srqs, min;
9359 	u32 num_mr, num_ah;
9360 	u32 extra_srqs = 0;
9361 	u32 extra_qps = 0;
9362 	u32 fast_qpmd_qps;
9363 	u8 pg_lvl = 1;
9364 	int i, rc;
9365 
9366 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9367 	if (rc) {
9368 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9369 			   rc);
9370 		return rc;
9371 	}
9372 	ctx = bp->ctx;
9373 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9374 		return 0;
9375 
9376 	ena = 0;
9377 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9378 		goto skip_legacy;
9379 
9380 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9381 	l2_qps = ctxm->qp_l2_entries;
9382 	qp1_qps = ctxm->qp_qp1_entries;
9383 	fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9384 	max_qps = ctxm->max_entries;
9385 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9386 	srqs = ctxm->srq_l2_entries;
9387 	max_srqs = ctxm->max_entries;
9388 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9389 		pg_lvl = 2;
9390 		if (BNXT_SW_RES_LMT(bp)) {
9391 			extra_qps = max_qps - l2_qps - qp1_qps;
9392 			extra_srqs = max_srqs - srqs;
9393 		} else {
9394 			extra_qps = min_t(u32, 65536,
9395 					  max_qps - l2_qps - qp1_qps);
9396 			/* allocate extra qps if fw supports RoCE fast qp
9397 			 * destroy feature
9398 			 */
9399 			extra_qps += fast_qpmd_qps;
9400 			extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9401 		}
9402 		if (fast_qpmd_qps)
9403 			ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9404 	}
9405 
9406 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9407 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9408 				     pg_lvl);
9409 	if (rc)
9410 		return rc;
9411 
9412 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9413 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9414 	if (rc)
9415 		return rc;
9416 
9417 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9418 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9419 				     extra_qps * 2, pg_lvl);
9420 	if (rc)
9421 		return rc;
9422 
9423 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9424 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9425 	if (rc)
9426 		return rc;
9427 
9428 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9429 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9430 	if (rc)
9431 		return rc;
9432 
9433 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9434 		goto skip_rdma;
9435 
9436 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9437 	if (BNXT_SW_RES_LMT(bp) &&
9438 	    ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9439 		num_ah = ctxm->mrav_av_entries;
9440 		num_mr = ctxm->max_entries - num_ah;
9441 	} else {
9442 		/* 128K extra is needed to accommodate static AH context
9443 		 * allocation by f/w.
9444 		 */
9445 		num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9446 		num_ah = min_t(u32, num_mr, 1024 * 128);
9447 		ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9448 		if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9449 			ctxm->mrav_av_entries = num_ah;
9450 	}
9451 
9452 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9453 	if (rc)
9454 		return rc;
9455 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9456 
9457 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9458 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9459 	if (rc)
9460 		return rc;
9461 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9462 
9463 skip_rdma:
9464 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9465 	min = ctxm->min_entries;
9466 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9467 		     2 * (extra_qps + qp1_qps) + min;
9468 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9469 	if (rc)
9470 		return rc;
9471 
9472 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9473 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
9474 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9475 	if (rc)
9476 		return rc;
9477 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9478 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9479 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9480 
9481 skip_legacy:
9482 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9483 		rc = bnxt_backing_store_cfg_v2(bp);
9484 	else
9485 		rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9486 	if (rc) {
9487 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9488 			   rc);
9489 		return rc;
9490 	}
9491 	ctx->flags |= BNXT_CTX_FLAG_INITED;
9492 	return 0;
9493 }
9494 
9495 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9496 {
9497 	struct hwrm_dbg_crashdump_medium_cfg_input *req;
9498 	u16 page_attr;
9499 	int rc;
9500 
9501 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9502 		return 0;
9503 
9504 	rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9505 	if (rc)
9506 		return rc;
9507 
9508 	if (BNXT_PAGE_SIZE == 0x2000)
9509 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9510 	else if (BNXT_PAGE_SIZE == 0x10000)
9511 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9512 	else
9513 		page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9514 	req->pg_size_lvl = cpu_to_le16(page_attr |
9515 				       bp->fw_crash_mem->ring_mem.depth);
9516 	req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9517 	req->size = cpu_to_le32(bp->fw_crash_len);
9518 	req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9519 	return hwrm_req_send(bp, req);
9520 }
9521 
9522 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9523 {
9524 	if (bp->fw_crash_mem) {
9525 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9526 		kfree(bp->fw_crash_mem);
9527 		bp->fw_crash_mem = NULL;
9528 	}
9529 }
9530 
9531 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9532 {
9533 	u32 mem_size = 0;
9534 	int rc;
9535 
9536 	if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9537 		return 0;
9538 
9539 	rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9540 	if (rc)
9541 		return rc;
9542 
9543 	mem_size = round_up(mem_size, 4);
9544 
9545 	/* keep and use the existing pages */
9546 	if (bp->fw_crash_mem &&
9547 	    mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9548 		goto alloc_done;
9549 
9550 	if (bp->fw_crash_mem)
9551 		bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9552 	else
9553 		bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9554 					   GFP_KERNEL);
9555 	if (!bp->fw_crash_mem)
9556 		return -ENOMEM;
9557 
9558 	rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9559 	if (rc) {
9560 		bnxt_free_crash_dump_mem(bp);
9561 		return rc;
9562 	}
9563 
9564 alloc_done:
9565 	bp->fw_crash_len = mem_size;
9566 	return 0;
9567 }
9568 
9569 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9570 {
9571 	struct hwrm_func_resource_qcaps_output *resp;
9572 	struct hwrm_func_resource_qcaps_input *req;
9573 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9574 	int rc;
9575 
9576 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9577 	if (rc)
9578 		return rc;
9579 
9580 	req->fid = cpu_to_le16(0xffff);
9581 	resp = hwrm_req_hold(bp, req);
9582 	rc = hwrm_req_send_silent(bp, req);
9583 	if (rc)
9584 		goto hwrm_func_resc_qcaps_exit;
9585 
9586 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9587 	if (!all)
9588 		goto hwrm_func_resc_qcaps_exit;
9589 
9590 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9591 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9592 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9593 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9594 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9595 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9596 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9597 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9598 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9599 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9600 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9601 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9602 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9603 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9604 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9605 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9606 
9607 	if (hw_resc->max_rsscos_ctxs >=
9608 	    hw_resc->max_vnics * BNXT_LARGE_RSS_TO_VNIC_RATIO)
9609 		bp->rss_cap |= BNXT_RSS_CAP_LARGE_RSS_CTX;
9610 
9611 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9612 		u16 max_msix = le16_to_cpu(resp->max_msix);
9613 
9614 		hw_resc->max_nqs = max_msix;
9615 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9616 	}
9617 
9618 	if (BNXT_PF(bp)) {
9619 		struct bnxt_pf_info *pf = &bp->pf;
9620 
9621 		pf->vf_resv_strategy =
9622 			le16_to_cpu(resp->vf_reservation_strategy);
9623 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9624 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9625 	}
9626 hwrm_func_resc_qcaps_exit:
9627 	hwrm_req_drop(bp, req);
9628 	return rc;
9629 }
9630 
9631 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9632 {
9633 	struct hwrm_port_mac_ptp_qcfg_output *resp;
9634 	struct hwrm_port_mac_ptp_qcfg_input *req;
9635 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9636 	u8 flags;
9637 	int rc;
9638 
9639 	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9640 		rc = -ENODEV;
9641 		goto no_ptp;
9642 	}
9643 
9644 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9645 	if (rc)
9646 		goto no_ptp;
9647 
9648 	req->port_id = cpu_to_le16(bp->pf.port_id);
9649 	resp = hwrm_req_hold(bp, req);
9650 	rc = hwrm_req_send(bp, req);
9651 	if (rc)
9652 		goto exit;
9653 
9654 	flags = resp->flags;
9655 	if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9656 	    !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9657 		rc = -ENODEV;
9658 		goto exit;
9659 	}
9660 	if (!ptp) {
9661 		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9662 		if (!ptp) {
9663 			rc = -ENOMEM;
9664 			goto exit;
9665 		}
9666 		ptp->bp = bp;
9667 		bp->ptp_cfg = ptp;
9668 	}
9669 
9670 	if (flags &
9671 	    (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9672 	     PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9673 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9674 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9675 	} else if (BNXT_CHIP_P5(bp)) {
9676 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9677 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9678 	} else {
9679 		rc = -ENODEV;
9680 		goto exit;
9681 	}
9682 	ptp->rtc_configured =
9683 		(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9684 	rc = bnxt_ptp_init(bp);
9685 	if (rc)
9686 		netdev_warn(bp->dev, "PTP initialization failed.\n");
9687 exit:
9688 	hwrm_req_drop(bp, req);
9689 	if (!rc)
9690 		return 0;
9691 
9692 no_ptp:
9693 	bnxt_ptp_clear(bp);
9694 	kfree(ptp);
9695 	bp->ptp_cfg = NULL;
9696 	return rc;
9697 }
9698 
9699 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9700 {
9701 	u32 flags, flags_ext, flags_ext2, flags_ext3;
9702 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9703 	struct hwrm_func_qcaps_output *resp;
9704 	struct hwrm_func_qcaps_input *req;
9705 	int rc;
9706 
9707 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9708 	if (rc)
9709 		return rc;
9710 
9711 	req->fid = cpu_to_le16(0xffff);
9712 	resp = hwrm_req_hold(bp, req);
9713 	rc = hwrm_req_send(bp, req);
9714 	if (rc)
9715 		goto hwrm_func_qcaps_exit;
9716 
9717 	flags = le32_to_cpu(resp->flags);
9718 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9719 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9720 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9721 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9722 	if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9723 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9724 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9725 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9726 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9727 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9728 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9729 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9730 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9731 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9732 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9733 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9734 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9735 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9736 	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9737 		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9738 
9739 	flags_ext = le32_to_cpu(resp->flags_ext);
9740 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9741 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9742 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9743 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9744 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED)
9745 		bp->fw_cap |= BNXT_FW_CAP_PTP_PTM;
9746 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9747 		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9748 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9749 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9750 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9751 		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9752 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9753 		bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9754 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9755 		bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9756 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9757 		bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9758 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9759 		bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9760 
9761 	flags_ext2 = le32_to_cpu(resp->flags_ext2);
9762 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9763 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9764 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9765 		bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9766 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9767 		bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9768 	if (flags_ext2 &
9769 	    FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9770 		bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9771 	if (BNXT_PF(bp) &&
9772 	    (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9773 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9774 
9775 	flags_ext3 = le32_to_cpu(resp->flags_ext3);
9776 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9777 		bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9778 	if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9779 		bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9780 
9781 	bp->tx_push_thresh = 0;
9782 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9783 	    BNXT_FW_MAJ(bp) > 217)
9784 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9785 
9786 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9787 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9788 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9789 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9790 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9791 	if (!hw_resc->max_hw_ring_grps)
9792 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9793 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9794 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9795 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9796 
9797 	hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9798 	hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9799 	hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9800 	hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9801 	hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9802 	hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9803 
9804 	if (BNXT_PF(bp)) {
9805 		struct bnxt_pf_info *pf = &bp->pf;
9806 
9807 		pf->fw_fid = le16_to_cpu(resp->fid);
9808 		pf->port_id = le16_to_cpu(resp->port_id);
9809 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9810 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9811 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
9812 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
9813 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9814 			bp->flags |= BNXT_FLAG_WOL_CAP;
9815 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9816 			bp->fw_cap |= BNXT_FW_CAP_PTP;
9817 		} else {
9818 			bnxt_ptp_clear(bp);
9819 			kfree(bp->ptp_cfg);
9820 			bp->ptp_cfg = NULL;
9821 		}
9822 	} else {
9823 #ifdef CONFIG_BNXT_SRIOV
9824 		struct bnxt_vf_info *vf = &bp->vf;
9825 
9826 		vf->fw_fid = le16_to_cpu(resp->fid);
9827 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9828 #endif
9829 	}
9830 	bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9831 
9832 hwrm_func_qcaps_exit:
9833 	hwrm_req_drop(bp, req);
9834 	return rc;
9835 }
9836 
9837 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9838 {
9839 	struct hwrm_dbg_qcaps_output *resp;
9840 	struct hwrm_dbg_qcaps_input *req;
9841 	int rc;
9842 
9843 	bp->fw_dbg_cap = 0;
9844 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9845 		return;
9846 
9847 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9848 	if (rc)
9849 		return;
9850 
9851 	req->fid = cpu_to_le16(0xffff);
9852 	resp = hwrm_req_hold(bp, req);
9853 	rc = hwrm_req_send(bp, req);
9854 	if (rc)
9855 		goto hwrm_dbg_qcaps_exit;
9856 
9857 	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9858 
9859 hwrm_dbg_qcaps_exit:
9860 	hwrm_req_drop(bp, req);
9861 }
9862 
9863 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9864 
9865 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9866 {
9867 	int rc;
9868 
9869 	rc = __bnxt_hwrm_func_qcaps(bp);
9870 	if (rc)
9871 		return rc;
9872 
9873 	bnxt_hwrm_dbg_qcaps(bp);
9874 
9875 	rc = bnxt_hwrm_queue_qportcfg(bp);
9876 	if (rc) {
9877 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9878 		return rc;
9879 	}
9880 	if (bp->hwrm_spec_code >= 0x10803) {
9881 		rc = bnxt_alloc_ctx_mem(bp);
9882 		if (rc)
9883 			return rc;
9884 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9885 		if (!rc)
9886 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9887 	}
9888 	return 0;
9889 }
9890 
9891 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9892 {
9893 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9894 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9895 	u32 flags;
9896 	int rc;
9897 
9898 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9899 		return 0;
9900 
9901 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9902 	if (rc)
9903 		return rc;
9904 
9905 	resp = hwrm_req_hold(bp, req);
9906 	rc = hwrm_req_send(bp, req);
9907 	if (rc)
9908 		goto hwrm_cfa_adv_qcaps_exit;
9909 
9910 	flags = le32_to_cpu(resp->flags);
9911 	if (flags &
9912 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9913 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9914 
9915 	if (flags &
9916 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9917 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9918 
9919 	if (flags &
9920 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9921 		bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9922 
9923 hwrm_cfa_adv_qcaps_exit:
9924 	hwrm_req_drop(bp, req);
9925 	return rc;
9926 }
9927 
9928 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9929 {
9930 	if (bp->fw_health)
9931 		return 0;
9932 
9933 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9934 	if (!bp->fw_health)
9935 		return -ENOMEM;
9936 
9937 	mutex_init(&bp->fw_health->lock);
9938 	return 0;
9939 }
9940 
9941 static int bnxt_alloc_fw_health(struct bnxt *bp)
9942 {
9943 	int rc;
9944 
9945 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9946 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9947 		return 0;
9948 
9949 	rc = __bnxt_alloc_fw_health(bp);
9950 	if (rc) {
9951 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9952 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9953 		return rc;
9954 	}
9955 
9956 	return 0;
9957 }
9958 
9959 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9960 {
9961 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9962 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9963 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
9964 }
9965 
9966 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9967 {
9968 	struct bnxt_fw_health *fw_health = bp->fw_health;
9969 	u32 reg_type;
9970 
9971 	if (!fw_health)
9972 		return;
9973 
9974 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9975 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9976 		fw_health->status_reliable = false;
9977 
9978 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9979 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9980 		fw_health->resets_reliable = false;
9981 }
9982 
9983 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9984 {
9985 	void __iomem *hs;
9986 	u32 status_loc;
9987 	u32 reg_type;
9988 	u32 sig;
9989 
9990 	if (bp->fw_health)
9991 		bp->fw_health->status_reliable = false;
9992 
9993 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9994 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9995 
9996 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9997 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9998 		if (!bp->chip_num) {
9999 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
10000 			bp->chip_num = readl(bp->bar0 +
10001 					     BNXT_FW_HEALTH_WIN_BASE +
10002 					     BNXT_GRC_REG_CHIP_NUM);
10003 		}
10004 		if (!BNXT_CHIP_P5_PLUS(bp))
10005 			return;
10006 
10007 		status_loc = BNXT_GRC_REG_STATUS_P5 |
10008 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
10009 	} else {
10010 		status_loc = readl(hs + offsetof(struct hcomm_status,
10011 						 fw_status_loc));
10012 	}
10013 
10014 	if (__bnxt_alloc_fw_health(bp)) {
10015 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
10016 		return;
10017 	}
10018 
10019 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
10020 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
10021 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
10022 		__bnxt_map_fw_health_reg(bp, status_loc);
10023 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
10024 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
10025 	}
10026 
10027 	bp->fw_health->status_reliable = true;
10028 }
10029 
10030 static int bnxt_map_fw_health_regs(struct bnxt *bp)
10031 {
10032 	struct bnxt_fw_health *fw_health = bp->fw_health;
10033 	u32 reg_base = 0xffffffff;
10034 	int i;
10035 
10036 	bp->fw_health->status_reliable = false;
10037 	bp->fw_health->resets_reliable = false;
10038 	/* Only pre-map the monitoring GRC registers using window 3 */
10039 	for (i = 0; i < 4; i++) {
10040 		u32 reg = fw_health->regs[i];
10041 
10042 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
10043 			continue;
10044 		if (reg_base == 0xffffffff)
10045 			reg_base = reg & BNXT_GRC_BASE_MASK;
10046 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
10047 			return -ERANGE;
10048 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
10049 	}
10050 	bp->fw_health->status_reliable = true;
10051 	bp->fw_health->resets_reliable = true;
10052 	if (reg_base == 0xffffffff)
10053 		return 0;
10054 
10055 	__bnxt_map_fw_health_reg(bp, reg_base);
10056 	return 0;
10057 }
10058 
10059 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10060 {
10061 	if (!bp->fw_health)
10062 		return;
10063 
10064 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10065 		bp->fw_health->status_reliable = true;
10066 		bp->fw_health->resets_reliable = true;
10067 	} else {
10068 		bnxt_try_map_fw_health_reg(bp);
10069 	}
10070 }
10071 
10072 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10073 {
10074 	struct bnxt_fw_health *fw_health = bp->fw_health;
10075 	struct hwrm_error_recovery_qcfg_output *resp;
10076 	struct hwrm_error_recovery_qcfg_input *req;
10077 	int rc, i;
10078 
10079 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10080 		return 0;
10081 
10082 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10083 	if (rc)
10084 		return rc;
10085 
10086 	resp = hwrm_req_hold(bp, req);
10087 	rc = hwrm_req_send(bp, req);
10088 	if (rc)
10089 		goto err_recovery_out;
10090 	fw_health->flags = le32_to_cpu(resp->flags);
10091 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10092 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10093 		rc = -EINVAL;
10094 		goto err_recovery_out;
10095 	}
10096 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10097 	fw_health->master_func_wait_dsecs =
10098 		le32_to_cpu(resp->master_func_wait_period);
10099 	fw_health->normal_func_wait_dsecs =
10100 		le32_to_cpu(resp->normal_func_wait_period);
10101 	fw_health->post_reset_wait_dsecs =
10102 		le32_to_cpu(resp->master_func_wait_period_after_reset);
10103 	fw_health->post_reset_max_wait_dsecs =
10104 		le32_to_cpu(resp->max_bailout_time_after_reset);
10105 	fw_health->regs[BNXT_FW_HEALTH_REG] =
10106 		le32_to_cpu(resp->fw_health_status_reg);
10107 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10108 		le32_to_cpu(resp->fw_heartbeat_reg);
10109 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10110 		le32_to_cpu(resp->fw_reset_cnt_reg);
10111 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10112 		le32_to_cpu(resp->reset_inprogress_reg);
10113 	fw_health->fw_reset_inprog_reg_mask =
10114 		le32_to_cpu(resp->reset_inprogress_reg_mask);
10115 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10116 	if (fw_health->fw_reset_seq_cnt >= 16) {
10117 		rc = -EINVAL;
10118 		goto err_recovery_out;
10119 	}
10120 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10121 		fw_health->fw_reset_seq_regs[i] =
10122 			le32_to_cpu(resp->reset_reg[i]);
10123 		fw_health->fw_reset_seq_vals[i] =
10124 			le32_to_cpu(resp->reset_reg_val[i]);
10125 		fw_health->fw_reset_seq_delay_msec[i] =
10126 			resp->delay_after_reset[i];
10127 	}
10128 err_recovery_out:
10129 	hwrm_req_drop(bp, req);
10130 	if (!rc)
10131 		rc = bnxt_map_fw_health_regs(bp);
10132 	if (rc)
10133 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10134 	return rc;
10135 }
10136 
10137 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10138 {
10139 	struct hwrm_func_reset_input *req;
10140 	int rc;
10141 
10142 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10143 	if (rc)
10144 		return rc;
10145 
10146 	req->enables = 0;
10147 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10148 	return hwrm_req_send(bp, req);
10149 }
10150 
10151 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10152 {
10153 	struct hwrm_nvm_get_dev_info_output nvm_info;
10154 
10155 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10156 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10157 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10158 			 nvm_info.nvm_cfg_ver_upd);
10159 }
10160 
10161 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10162 {
10163 	struct hwrm_queue_qportcfg_output *resp;
10164 	struct hwrm_queue_qportcfg_input *req;
10165 	u8 i, j, *qptr;
10166 	bool no_rdma;
10167 	int rc = 0;
10168 
10169 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10170 	if (rc)
10171 		return rc;
10172 
10173 	resp = hwrm_req_hold(bp, req);
10174 	rc = hwrm_req_send(bp, req);
10175 	if (rc)
10176 		goto qportcfg_exit;
10177 
10178 	if (!resp->max_configurable_queues) {
10179 		rc = -EINVAL;
10180 		goto qportcfg_exit;
10181 	}
10182 	bp->max_tc = resp->max_configurable_queues;
10183 	bp->max_lltc = resp->max_configurable_lossless_queues;
10184 	if (bp->max_tc > BNXT_MAX_QUEUE)
10185 		bp->max_tc = BNXT_MAX_QUEUE;
10186 
10187 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10188 	qptr = &resp->queue_id0;
10189 	for (i = 0, j = 0; i < bp->max_tc; i++) {
10190 		bp->q_info[j].queue_id = *qptr;
10191 		bp->q_ids[i] = *qptr++;
10192 		bp->q_info[j].queue_profile = *qptr++;
10193 		bp->tc_to_qidx[j] = j;
10194 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10195 		    (no_rdma && BNXT_PF(bp)))
10196 			j++;
10197 	}
10198 	bp->max_q = bp->max_tc;
10199 	bp->max_tc = max_t(u8, j, 1);
10200 
10201 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10202 		bp->max_tc = 1;
10203 
10204 	if (bp->max_lltc > bp->max_tc)
10205 		bp->max_lltc = bp->max_tc;
10206 
10207 qportcfg_exit:
10208 	hwrm_req_drop(bp, req);
10209 	return rc;
10210 }
10211 
10212 static int bnxt_hwrm_poll(struct bnxt *bp)
10213 {
10214 	struct hwrm_ver_get_input *req;
10215 	int rc;
10216 
10217 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10218 	if (rc)
10219 		return rc;
10220 
10221 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10222 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10223 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10224 
10225 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10226 	rc = hwrm_req_send(bp, req);
10227 	return rc;
10228 }
10229 
10230 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10231 {
10232 	struct hwrm_ver_get_output *resp;
10233 	struct hwrm_ver_get_input *req;
10234 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
10235 	u32 dev_caps_cfg, hwrm_ver;
10236 	int rc, len, max_tmo_secs;
10237 
10238 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10239 	if (rc)
10240 		return rc;
10241 
10242 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10243 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10244 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10245 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
10246 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10247 
10248 	resp = hwrm_req_hold(bp, req);
10249 	rc = hwrm_req_send(bp, req);
10250 	if (rc)
10251 		goto hwrm_ver_get_exit;
10252 
10253 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10254 
10255 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10256 			     resp->hwrm_intf_min_8b << 8 |
10257 			     resp->hwrm_intf_upd_8b;
10258 	if (resp->hwrm_intf_maj_8b < 1) {
10259 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10260 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10261 			    resp->hwrm_intf_upd_8b);
10262 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10263 	}
10264 
10265 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10266 			HWRM_VERSION_UPDATE;
10267 
10268 	if (bp->hwrm_spec_code > hwrm_ver)
10269 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10270 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10271 			 HWRM_VERSION_UPDATE);
10272 	else
10273 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10274 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10275 			 resp->hwrm_intf_upd_8b);
10276 
10277 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10278 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10279 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10280 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10281 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10282 		len = FW_VER_STR_LEN;
10283 	} else {
10284 		fw_maj = resp->hwrm_fw_maj_8b;
10285 		fw_min = resp->hwrm_fw_min_8b;
10286 		fw_bld = resp->hwrm_fw_bld_8b;
10287 		fw_rsv = resp->hwrm_fw_rsvd_8b;
10288 		len = BC_HWRM_STR_LEN;
10289 	}
10290 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10291 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10292 		 fw_rsv);
10293 
10294 	if (strlen(resp->active_pkg_name)) {
10295 		int fw_ver_len = strlen(bp->fw_ver_str);
10296 
10297 		snprintf(bp->fw_ver_str + fw_ver_len,
10298 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10299 			 resp->active_pkg_name);
10300 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10301 	}
10302 
10303 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10304 	if (!bp->hwrm_cmd_timeout)
10305 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10306 	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10307 	if (!bp->hwrm_cmd_max_timeout)
10308 		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10309 	max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10310 #ifdef CONFIG_DETECT_HUNG_TASK
10311 	if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10312 	    max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10313 		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10314 			    max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10315 	}
10316 #endif
10317 
10318 	if (resp->hwrm_intf_maj_8b >= 1) {
10319 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10320 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10321 	}
10322 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10323 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10324 
10325 	bp->chip_num = le16_to_cpu(resp->chip_num);
10326 	bp->chip_rev = resp->chip_rev;
10327 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10328 	    !resp->chip_metal)
10329 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10330 
10331 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10332 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10333 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10334 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10335 
10336 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10337 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10338 
10339 	if (dev_caps_cfg &
10340 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10341 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10342 
10343 	if (dev_caps_cfg &
10344 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10345 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10346 
10347 	if (dev_caps_cfg &
10348 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10349 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10350 
10351 hwrm_ver_get_exit:
10352 	hwrm_req_drop(bp, req);
10353 	return rc;
10354 }
10355 
10356 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10357 {
10358 	struct hwrm_fw_set_time_input *req;
10359 	struct tm tm;
10360 	time64_t now = ktime_get_real_seconds();
10361 	int rc;
10362 
10363 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10364 	    bp->hwrm_spec_code < 0x10400)
10365 		return -EOPNOTSUPP;
10366 
10367 	time64_to_tm(now, 0, &tm);
10368 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10369 	if (rc)
10370 		return rc;
10371 
10372 	req->year = cpu_to_le16(1900 + tm.tm_year);
10373 	req->month = 1 + tm.tm_mon;
10374 	req->day = tm.tm_mday;
10375 	req->hour = tm.tm_hour;
10376 	req->minute = tm.tm_min;
10377 	req->second = tm.tm_sec;
10378 	return hwrm_req_send(bp, req);
10379 }
10380 
10381 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10382 {
10383 	u64 sw_tmp;
10384 
10385 	hw &= mask;
10386 	sw_tmp = (*sw & ~mask) | hw;
10387 	if (hw < (*sw & mask))
10388 		sw_tmp += mask + 1;
10389 	WRITE_ONCE(*sw, sw_tmp);
10390 }
10391 
10392 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10393 				    int count, bool ignore_zero)
10394 {
10395 	int i;
10396 
10397 	for (i = 0; i < count; i++) {
10398 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10399 
10400 		if (ignore_zero && !hw)
10401 			continue;
10402 
10403 		if (masks[i] == -1ULL)
10404 			sw_stats[i] = hw;
10405 		else
10406 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10407 	}
10408 }
10409 
10410 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10411 {
10412 	if (!stats->hw_stats)
10413 		return;
10414 
10415 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10416 				stats->hw_masks, stats->len / 8, false);
10417 }
10418 
10419 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10420 {
10421 	struct bnxt_stats_mem *ring0_stats;
10422 	bool ignore_zero = false;
10423 	int i;
10424 
10425 	/* Chip bug.  Counter intermittently becomes 0. */
10426 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10427 		ignore_zero = true;
10428 
10429 	for (i = 0; i < bp->cp_nr_rings; i++) {
10430 		struct bnxt_napi *bnapi = bp->bnapi[i];
10431 		struct bnxt_cp_ring_info *cpr;
10432 		struct bnxt_stats_mem *stats;
10433 
10434 		cpr = &bnapi->cp_ring;
10435 		stats = &cpr->stats;
10436 		if (!i)
10437 			ring0_stats = stats;
10438 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10439 					ring0_stats->hw_masks,
10440 					ring0_stats->len / 8, ignore_zero);
10441 	}
10442 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
10443 		struct bnxt_stats_mem *stats = &bp->port_stats;
10444 		__le64 *hw_stats = stats->hw_stats;
10445 		u64 *sw_stats = stats->sw_stats;
10446 		u64 *masks = stats->hw_masks;
10447 		int cnt;
10448 
10449 		cnt = sizeof(struct rx_port_stats) / 8;
10450 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10451 
10452 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10453 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10454 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10455 		cnt = sizeof(struct tx_port_stats) / 8;
10456 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10457 	}
10458 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10459 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10460 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10461 	}
10462 }
10463 
10464 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10465 {
10466 	struct hwrm_port_qstats_input *req;
10467 	struct bnxt_pf_info *pf = &bp->pf;
10468 	int rc;
10469 
10470 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10471 		return 0;
10472 
10473 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10474 		return -EOPNOTSUPP;
10475 
10476 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10477 	if (rc)
10478 		return rc;
10479 
10480 	req->flags = flags;
10481 	req->port_id = cpu_to_le16(pf->port_id);
10482 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10483 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
10484 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10485 	return hwrm_req_send(bp, req);
10486 }
10487 
10488 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10489 {
10490 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10491 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10492 	struct hwrm_port_qstats_ext_output *resp_qs;
10493 	struct hwrm_port_qstats_ext_input *req_qs;
10494 	struct bnxt_pf_info *pf = &bp->pf;
10495 	u32 tx_stat_size;
10496 	int rc;
10497 
10498 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10499 		return 0;
10500 
10501 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10502 		return -EOPNOTSUPP;
10503 
10504 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10505 	if (rc)
10506 		return rc;
10507 
10508 	req_qs->flags = flags;
10509 	req_qs->port_id = cpu_to_le16(pf->port_id);
10510 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10511 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10512 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10513 		       sizeof(struct tx_port_stats_ext) : 0;
10514 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10515 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10516 	resp_qs = hwrm_req_hold(bp, req_qs);
10517 	rc = hwrm_req_send(bp, req_qs);
10518 	if (!rc) {
10519 		bp->fw_rx_stats_ext_size =
10520 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
10521 		if (BNXT_FW_MAJ(bp) < 220 &&
10522 		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10523 			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10524 
10525 		bp->fw_tx_stats_ext_size = tx_stat_size ?
10526 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10527 	} else {
10528 		bp->fw_rx_stats_ext_size = 0;
10529 		bp->fw_tx_stats_ext_size = 0;
10530 	}
10531 	hwrm_req_drop(bp, req_qs);
10532 
10533 	if (flags)
10534 		return rc;
10535 
10536 	if (bp->fw_tx_stats_ext_size <=
10537 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10538 		bp->pri2cos_valid = 0;
10539 		return rc;
10540 	}
10541 
10542 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10543 	if (rc)
10544 		return rc;
10545 
10546 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10547 
10548 	resp_qc = hwrm_req_hold(bp, req_qc);
10549 	rc = hwrm_req_send(bp, req_qc);
10550 	if (!rc) {
10551 		u8 *pri2cos;
10552 		int i, j;
10553 
10554 		pri2cos = &resp_qc->pri0_cos_queue_id;
10555 		for (i = 0; i < 8; i++) {
10556 			u8 queue_id = pri2cos[i];
10557 			u8 queue_idx;
10558 
10559 			/* Per port queue IDs start from 0, 10, 20, etc */
10560 			queue_idx = queue_id % 10;
10561 			if (queue_idx > BNXT_MAX_QUEUE) {
10562 				bp->pri2cos_valid = false;
10563 				hwrm_req_drop(bp, req_qc);
10564 				return rc;
10565 			}
10566 			for (j = 0; j < bp->max_q; j++) {
10567 				if (bp->q_ids[j] == queue_id)
10568 					bp->pri2cos_idx[i] = queue_idx;
10569 			}
10570 		}
10571 		bp->pri2cos_valid = true;
10572 	}
10573 	hwrm_req_drop(bp, req_qc);
10574 
10575 	return rc;
10576 }
10577 
10578 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10579 {
10580 	bnxt_hwrm_tunnel_dst_port_free(bp,
10581 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10582 	bnxt_hwrm_tunnel_dst_port_free(bp,
10583 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10584 }
10585 
10586 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10587 {
10588 	int rc, i;
10589 	u32 tpa_flags = 0;
10590 
10591 	if (set_tpa)
10592 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
10593 	else if (BNXT_NO_FW_ACCESS(bp))
10594 		return 0;
10595 	for (i = 0; i < bp->nr_vnics; i++) {
10596 		rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10597 		if (rc) {
10598 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10599 				   i, rc);
10600 			return rc;
10601 		}
10602 	}
10603 	return 0;
10604 }
10605 
10606 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10607 {
10608 	int i;
10609 
10610 	for (i = 0; i < bp->nr_vnics; i++)
10611 		bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10612 }
10613 
10614 static void bnxt_clear_vnic(struct bnxt *bp)
10615 {
10616 	if (!bp->vnic_info)
10617 		return;
10618 
10619 	bnxt_hwrm_clear_vnic_filter(bp);
10620 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10621 		/* clear all RSS setting before free vnic ctx */
10622 		bnxt_hwrm_clear_vnic_rss(bp);
10623 		bnxt_hwrm_vnic_ctx_free(bp);
10624 	}
10625 	/* before free the vnic, undo the vnic tpa settings */
10626 	if (bp->flags & BNXT_FLAG_TPA)
10627 		bnxt_set_tpa(bp, false);
10628 	bnxt_hwrm_vnic_free(bp);
10629 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10630 		bnxt_hwrm_vnic_ctx_free(bp);
10631 }
10632 
10633 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10634 				    bool irq_re_init)
10635 {
10636 	bnxt_clear_vnic(bp);
10637 	bnxt_hwrm_ring_free(bp, close_path);
10638 	bnxt_hwrm_ring_grp_free(bp);
10639 	if (irq_re_init) {
10640 		bnxt_hwrm_stat_ctx_free(bp);
10641 		bnxt_hwrm_free_tunnel_ports(bp);
10642 	}
10643 }
10644 
10645 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10646 {
10647 	struct hwrm_func_cfg_input *req;
10648 	u8 evb_mode;
10649 	int rc;
10650 
10651 	if (br_mode == BRIDGE_MODE_VEB)
10652 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10653 	else if (br_mode == BRIDGE_MODE_VEPA)
10654 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10655 	else
10656 		return -EINVAL;
10657 
10658 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10659 	if (rc)
10660 		return rc;
10661 
10662 	req->fid = cpu_to_le16(0xffff);
10663 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10664 	req->evb_mode = evb_mode;
10665 	return hwrm_req_send(bp, req);
10666 }
10667 
10668 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10669 {
10670 	struct hwrm_func_cfg_input *req;
10671 	int rc;
10672 
10673 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10674 		return 0;
10675 
10676 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10677 	if (rc)
10678 		return rc;
10679 
10680 	req->fid = cpu_to_le16(0xffff);
10681 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10682 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10683 	if (size == 128)
10684 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10685 
10686 	return hwrm_req_send(bp, req);
10687 }
10688 
10689 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10690 {
10691 	int rc;
10692 
10693 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10694 		goto skip_rss_ctx;
10695 
10696 	/* allocate context for vnic */
10697 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10698 	if (rc) {
10699 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10700 			   vnic->vnic_id, rc);
10701 		goto vnic_setup_err;
10702 	}
10703 	bp->rsscos_nr_ctxs++;
10704 
10705 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10706 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10707 		if (rc) {
10708 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10709 				   vnic->vnic_id, rc);
10710 			goto vnic_setup_err;
10711 		}
10712 		bp->rsscos_nr_ctxs++;
10713 	}
10714 
10715 skip_rss_ctx:
10716 	/* configure default vnic, ring grp */
10717 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10718 	if (rc) {
10719 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10720 			   vnic->vnic_id, rc);
10721 		goto vnic_setup_err;
10722 	}
10723 
10724 	/* Enable RSS hashing on vnic */
10725 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10726 	if (rc) {
10727 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10728 			   vnic->vnic_id, rc);
10729 		goto vnic_setup_err;
10730 	}
10731 
10732 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10733 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10734 		if (rc) {
10735 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10736 				   vnic->vnic_id, rc);
10737 		}
10738 	}
10739 
10740 vnic_setup_err:
10741 	return rc;
10742 }
10743 
10744 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10745 			  u8 valid)
10746 {
10747 	struct hwrm_vnic_update_input *req;
10748 	int rc;
10749 
10750 	rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10751 	if (rc)
10752 		return rc;
10753 
10754 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10755 
10756 	if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10757 		req->mru = cpu_to_le16(vnic->mru);
10758 
10759 	req->enables = cpu_to_le32(valid);
10760 
10761 	return hwrm_req_send(bp, req);
10762 }
10763 
10764 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10765 {
10766 	int rc;
10767 
10768 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10769 	if (rc) {
10770 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10771 			   vnic->vnic_id, rc);
10772 		return rc;
10773 	}
10774 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10775 	if (rc)
10776 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10777 			   vnic->vnic_id, rc);
10778 	return rc;
10779 }
10780 
10781 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10782 {
10783 	int rc, i, nr_ctxs;
10784 
10785 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10786 	for (i = 0; i < nr_ctxs; i++) {
10787 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10788 		if (rc) {
10789 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10790 				   vnic->vnic_id, i, rc);
10791 			break;
10792 		}
10793 		bp->rsscos_nr_ctxs++;
10794 	}
10795 	if (i < nr_ctxs)
10796 		return -ENOMEM;
10797 
10798 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10799 	if (rc)
10800 		return rc;
10801 
10802 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10803 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10804 		if (rc) {
10805 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10806 				   vnic->vnic_id, rc);
10807 		}
10808 	}
10809 	return rc;
10810 }
10811 
10812 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10813 {
10814 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10815 		return __bnxt_setup_vnic_p5(bp, vnic);
10816 	else
10817 		return __bnxt_setup_vnic(bp, vnic);
10818 }
10819 
10820 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10821 				     struct bnxt_vnic_info *vnic,
10822 				     u16 start_rx_ring_idx, int rx_rings)
10823 {
10824 	int rc;
10825 
10826 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10827 	if (rc) {
10828 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10829 			   vnic->vnic_id, rc);
10830 		return rc;
10831 	}
10832 	return bnxt_setup_vnic(bp, vnic);
10833 }
10834 
10835 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10836 {
10837 	struct bnxt_vnic_info *vnic;
10838 	int i, rc = 0;
10839 
10840 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10841 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10842 		return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10843 	}
10844 
10845 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10846 		return 0;
10847 
10848 	for (i = 0; i < bp->rx_nr_rings; i++) {
10849 		u16 vnic_id = i + 1;
10850 		u16 ring_id = i;
10851 
10852 		if (vnic_id >= bp->nr_vnics)
10853 			break;
10854 
10855 		vnic = &bp->vnic_info[vnic_id];
10856 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
10857 		if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10858 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10859 		if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10860 			break;
10861 	}
10862 	return rc;
10863 }
10864 
10865 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10866 			  bool all)
10867 {
10868 	struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10869 	struct bnxt_filter_base *usr_fltr, *tmp;
10870 	struct bnxt_ntuple_filter *ntp_fltr;
10871 	int i;
10872 
10873 	if (netif_running(bp->dev)) {
10874 		bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10875 		for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10876 			if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10877 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10878 		}
10879 	}
10880 	if (!all)
10881 		return;
10882 
10883 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10884 		if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10885 		    usr_fltr->fw_vnic_id == rss_ctx->index) {
10886 			ntp_fltr = container_of(usr_fltr,
10887 						struct bnxt_ntuple_filter,
10888 						base);
10889 			bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10890 			bnxt_del_ntp_filter(bp, ntp_fltr);
10891 			bnxt_del_one_usr_fltr(bp, usr_fltr);
10892 		}
10893 	}
10894 
10895 	if (vnic->rss_table)
10896 		dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10897 				  vnic->rss_table,
10898 				  vnic->rss_table_dma_addr);
10899 	bp->num_rss_ctx--;
10900 }
10901 
10902 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10903 				  int rxr_id)
10904 {
10905 	u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10906 	int i, vnic_rx;
10907 
10908 	/* Ntuple VNIC always has all the rx rings. Any change of ring id
10909 	 * must be updated because a future filter may use it.
10910 	 */
10911 	if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10912 		return true;
10913 
10914 	for (i = 0; i < tbl_size; i++) {
10915 		if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10916 			vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10917 		else
10918 			vnic_rx = bp->rss_indir_tbl[i];
10919 
10920 		if (rxr_id == vnic_rx)
10921 			return true;
10922 	}
10923 
10924 	return false;
10925 }
10926 
10927 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10928 				u16 mru, int rxr_id)
10929 {
10930 	int rc;
10931 
10932 	if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10933 		return 0;
10934 
10935 	if (mru) {
10936 		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10937 		if (rc) {
10938 			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10939 				   vnic->vnic_id, rc);
10940 			return rc;
10941 		}
10942 	}
10943 	vnic->mru = mru;
10944 	bnxt_hwrm_vnic_update(bp, vnic,
10945 			      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10946 
10947 	return 0;
10948 }
10949 
10950 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10951 {
10952 	struct ethtool_rxfh_context *ctx;
10953 	unsigned long context;
10954 	int rc;
10955 
10956 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10957 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10958 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10959 
10960 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10961 		if (rc)
10962 			return rc;
10963 	}
10964 
10965 	return 0;
10966 }
10967 
10968 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10969 {
10970 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10971 	struct ethtool_rxfh_context *ctx;
10972 	unsigned long context;
10973 
10974 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10975 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10976 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10977 
10978 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10979 		    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10980 		    __bnxt_setup_vnic_p5(bp, vnic)) {
10981 			netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10982 				   rss_ctx->index);
10983 			bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10984 			ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10985 		}
10986 	}
10987 }
10988 
10989 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10990 {
10991 	struct ethtool_rxfh_context *ctx;
10992 	unsigned long context;
10993 
10994 	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10995 		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10996 
10997 		bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10998 	}
10999 }
11000 
11001 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
11002 static bool bnxt_promisc_ok(struct bnxt *bp)
11003 {
11004 #ifdef CONFIG_BNXT_SRIOV
11005 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
11006 		return false;
11007 #endif
11008 	return true;
11009 }
11010 
11011 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
11012 {
11013 	struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
11014 	unsigned int rc = 0;
11015 
11016 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
11017 	if (rc) {
11018 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11019 			   rc);
11020 		return rc;
11021 	}
11022 
11023 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
11024 	if (rc) {
11025 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
11026 			   rc);
11027 		return rc;
11028 	}
11029 	return rc;
11030 }
11031 
11032 static int bnxt_cfg_rx_mode(struct bnxt *);
11033 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
11034 
11035 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
11036 {
11037 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
11038 	int rc = 0;
11039 	unsigned int rx_nr_rings = bp->rx_nr_rings;
11040 
11041 	if (irq_re_init) {
11042 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
11043 		if (rc) {
11044 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
11045 				   rc);
11046 			goto err_out;
11047 		}
11048 	}
11049 
11050 	rc = bnxt_hwrm_ring_alloc(bp);
11051 	if (rc) {
11052 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11053 		goto err_out;
11054 	}
11055 
11056 	rc = bnxt_hwrm_ring_grp_alloc(bp);
11057 	if (rc) {
11058 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11059 		goto err_out;
11060 	}
11061 
11062 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11063 		rx_nr_rings--;
11064 
11065 	/* default vnic 0 */
11066 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11067 	if (rc) {
11068 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11069 		goto err_out;
11070 	}
11071 
11072 	if (BNXT_VF(bp))
11073 		bnxt_hwrm_func_qcfg(bp);
11074 
11075 	rc = bnxt_setup_vnic(bp, vnic);
11076 	if (rc)
11077 		goto err_out;
11078 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11079 		bnxt_hwrm_update_rss_hash_cfg(bp);
11080 
11081 	if (bp->flags & BNXT_FLAG_RFS) {
11082 		rc = bnxt_alloc_rfs_vnics(bp);
11083 		if (rc)
11084 			goto err_out;
11085 	}
11086 
11087 	if (bp->flags & BNXT_FLAG_TPA) {
11088 		rc = bnxt_set_tpa(bp, true);
11089 		if (rc)
11090 			goto err_out;
11091 	}
11092 
11093 	if (BNXT_VF(bp))
11094 		bnxt_update_vf_mac(bp);
11095 
11096 	/* Filter for default vnic 0 */
11097 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11098 	if (rc) {
11099 		if (BNXT_VF(bp) && rc == -ENODEV)
11100 			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11101 		else
11102 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11103 		goto err_out;
11104 	}
11105 	vnic->uc_filter_count = 1;
11106 
11107 	vnic->rx_mask = 0;
11108 	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11109 		goto skip_rx_mask;
11110 
11111 	if (bp->dev->flags & IFF_BROADCAST)
11112 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11113 
11114 	if (bp->dev->flags & IFF_PROMISC)
11115 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11116 
11117 	if (bp->dev->flags & IFF_ALLMULTI) {
11118 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11119 		vnic->mc_list_count = 0;
11120 	} else if (bp->dev->flags & IFF_MULTICAST) {
11121 		u32 mask = 0;
11122 
11123 		bnxt_mc_list_updated(bp, &mask);
11124 		vnic->rx_mask |= mask;
11125 	}
11126 
11127 	rc = bnxt_cfg_rx_mode(bp);
11128 	if (rc)
11129 		goto err_out;
11130 
11131 skip_rx_mask:
11132 	rc = bnxt_hwrm_set_coal(bp);
11133 	if (rc)
11134 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11135 				rc);
11136 
11137 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11138 		rc = bnxt_setup_nitroa0_vnic(bp);
11139 		if (rc)
11140 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11141 				   rc);
11142 	}
11143 
11144 	if (BNXT_VF(bp)) {
11145 		bnxt_hwrm_func_qcfg(bp);
11146 		netdev_update_features(bp->dev);
11147 	}
11148 
11149 	return 0;
11150 
11151 err_out:
11152 	bnxt_hwrm_resource_free(bp, 0, true);
11153 
11154 	return rc;
11155 }
11156 
11157 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11158 {
11159 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11160 	return 0;
11161 }
11162 
11163 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11164 {
11165 	bnxt_init_cp_rings(bp);
11166 	bnxt_init_rx_rings(bp);
11167 	bnxt_init_tx_rings(bp);
11168 	bnxt_init_ring_grps(bp, irq_re_init);
11169 	bnxt_init_vnics(bp);
11170 
11171 	return bnxt_init_chip(bp, irq_re_init);
11172 }
11173 
11174 static int bnxt_set_real_num_queues(struct bnxt *bp)
11175 {
11176 	int rc;
11177 	struct net_device *dev = bp->dev;
11178 
11179 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11180 					  bp->tx_nr_rings_xdp);
11181 	if (rc)
11182 		return rc;
11183 
11184 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11185 	if (rc)
11186 		return rc;
11187 
11188 #ifdef CONFIG_RFS_ACCEL
11189 	if (bp->flags & BNXT_FLAG_RFS)
11190 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11191 #endif
11192 
11193 	return rc;
11194 }
11195 
11196 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11197 			     bool shared)
11198 {
11199 	int _rx = *rx, _tx = *tx;
11200 
11201 	if (shared) {
11202 		*rx = min_t(int, _rx, max);
11203 		*tx = min_t(int, _tx, max);
11204 	} else {
11205 		if (max < 2)
11206 			return -ENOMEM;
11207 
11208 		while (_rx + _tx > max) {
11209 			if (_rx > _tx && _rx > 1)
11210 				_rx--;
11211 			else if (_tx > 1)
11212 				_tx--;
11213 		}
11214 		*rx = _rx;
11215 		*tx = _tx;
11216 	}
11217 	return 0;
11218 }
11219 
11220 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11221 {
11222 	return (tx - tx_xdp) / tx_sets + tx_xdp;
11223 }
11224 
11225 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11226 {
11227 	int tcs = bp->num_tc;
11228 
11229 	if (!tcs)
11230 		tcs = 1;
11231 	return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11232 }
11233 
11234 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11235 {
11236 	int tcs = bp->num_tc;
11237 
11238 	return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11239 	       bp->tx_nr_rings_xdp;
11240 }
11241 
11242 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11243 			   bool sh)
11244 {
11245 	int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11246 
11247 	if (tx_cp != *tx) {
11248 		int tx_saved = tx_cp, rc;
11249 
11250 		rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11251 		if (rc)
11252 			return rc;
11253 		if (tx_cp != tx_saved)
11254 			*tx = bnxt_num_cp_to_tx(bp, tx_cp);
11255 		return 0;
11256 	}
11257 	return __bnxt_trim_rings(bp, rx, tx, max, sh);
11258 }
11259 
11260 static void bnxt_setup_msix(struct bnxt *bp)
11261 {
11262 	const int len = sizeof(bp->irq_tbl[0].name);
11263 	struct net_device *dev = bp->dev;
11264 	int tcs, i;
11265 
11266 	tcs = bp->num_tc;
11267 	if (tcs) {
11268 		int i, off, count;
11269 
11270 		for (i = 0; i < tcs; i++) {
11271 			count = bp->tx_nr_rings_per_tc;
11272 			off = BNXT_TC_TO_RING_BASE(bp, i);
11273 			netdev_set_tc_queue(dev, i, count, off);
11274 		}
11275 	}
11276 
11277 	for (i = 0; i < bp->cp_nr_rings; i++) {
11278 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11279 		char *attr;
11280 
11281 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11282 			attr = "TxRx";
11283 		else if (i < bp->rx_nr_rings)
11284 			attr = "rx";
11285 		else
11286 			attr = "tx";
11287 
11288 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11289 			 attr, i);
11290 		bp->irq_tbl[map_idx].handler = bnxt_msix;
11291 	}
11292 }
11293 
11294 static int bnxt_init_int_mode(struct bnxt *bp);
11295 
11296 static int bnxt_change_msix(struct bnxt *bp, int total)
11297 {
11298 	struct msi_map map;
11299 	int i;
11300 
11301 	/* add MSIX to the end if needed */
11302 	for (i = bp->total_irqs; i < total; i++) {
11303 		map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11304 		if (map.index < 0)
11305 			return bp->total_irqs;
11306 		bp->irq_tbl[i].vector = map.virq;
11307 		bp->total_irqs++;
11308 	}
11309 
11310 	/* trim MSIX from the end if needed */
11311 	for (i = bp->total_irqs; i > total; i--) {
11312 		map.index = i - 1;
11313 		map.virq = bp->irq_tbl[i - 1].vector;
11314 		pci_msix_free_irq(bp->pdev, map);
11315 		bp->total_irqs--;
11316 	}
11317 	return bp->total_irqs;
11318 }
11319 
11320 static int bnxt_setup_int_mode(struct bnxt *bp)
11321 {
11322 	int rc;
11323 
11324 	if (!bp->irq_tbl) {
11325 		rc = bnxt_init_int_mode(bp);
11326 		if (rc || !bp->irq_tbl)
11327 			return rc ?: -ENODEV;
11328 	}
11329 
11330 	bnxt_setup_msix(bp);
11331 
11332 	rc = bnxt_set_real_num_queues(bp);
11333 	return rc;
11334 }
11335 
11336 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11337 {
11338 	return bp->hw_resc.max_rsscos_ctxs;
11339 }
11340 
11341 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11342 {
11343 	return bp->hw_resc.max_vnics;
11344 }
11345 
11346 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11347 {
11348 	return bp->hw_resc.max_stat_ctxs;
11349 }
11350 
11351 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11352 {
11353 	return bp->hw_resc.max_cp_rings;
11354 }
11355 
11356 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11357 {
11358 	unsigned int cp = bp->hw_resc.max_cp_rings;
11359 
11360 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11361 		cp -= bnxt_get_ulp_msix_num(bp);
11362 
11363 	return cp;
11364 }
11365 
11366 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11367 {
11368 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11369 
11370 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11371 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11372 
11373 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11374 }
11375 
11376 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11377 {
11378 	bp->hw_resc.max_irqs = max_irqs;
11379 }
11380 
11381 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11382 {
11383 	unsigned int cp;
11384 
11385 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
11386 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11387 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11388 	else
11389 		return cp - bp->cp_nr_rings;
11390 }
11391 
11392 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11393 {
11394 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11395 }
11396 
11397 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11398 {
11399 	int max_irq = bnxt_get_max_func_irqs(bp);
11400 	int total_req = bp->cp_nr_rings + num;
11401 
11402 	if (max_irq < total_req) {
11403 		num = max_irq - bp->cp_nr_rings;
11404 		if (num <= 0)
11405 			return 0;
11406 	}
11407 	return num;
11408 }
11409 
11410 static int bnxt_get_num_msix(struct bnxt *bp)
11411 {
11412 	if (!BNXT_NEW_RM(bp))
11413 		return bnxt_get_max_func_irqs(bp);
11414 
11415 	return bnxt_nq_rings_in_use(bp);
11416 }
11417 
11418 static int bnxt_init_int_mode(struct bnxt *bp)
11419 {
11420 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11421 
11422 	total_vecs = bnxt_get_num_msix(bp);
11423 	max = bnxt_get_max_func_irqs(bp);
11424 	if (total_vecs > max)
11425 		total_vecs = max;
11426 
11427 	if (!total_vecs)
11428 		return 0;
11429 
11430 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11431 		min = 2;
11432 
11433 	total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11434 					   PCI_IRQ_MSIX);
11435 	ulp_msix = bnxt_get_ulp_msix_num(bp);
11436 	if (total_vecs < 0 || total_vecs < ulp_msix) {
11437 		rc = -ENODEV;
11438 		goto msix_setup_exit;
11439 	}
11440 
11441 	tbl_size = total_vecs;
11442 	if (pci_msix_can_alloc_dyn(bp->pdev))
11443 		tbl_size = max;
11444 	bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11445 	if (bp->irq_tbl) {
11446 		for (i = 0; i < total_vecs; i++)
11447 			bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11448 
11449 		bp->total_irqs = total_vecs;
11450 		/* Trim rings based upon num of vectors allocated */
11451 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11452 				     total_vecs - ulp_msix, min == 1);
11453 		if (rc)
11454 			goto msix_setup_exit;
11455 
11456 		tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11457 		bp->cp_nr_rings = (min == 1) ?
11458 				  max_t(int, tx_cp, bp->rx_nr_rings) :
11459 				  tx_cp + bp->rx_nr_rings;
11460 
11461 	} else {
11462 		rc = -ENOMEM;
11463 		goto msix_setup_exit;
11464 	}
11465 	return 0;
11466 
11467 msix_setup_exit:
11468 	netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11469 	kfree(bp->irq_tbl);
11470 	bp->irq_tbl = NULL;
11471 	pci_free_irq_vectors(bp->pdev);
11472 	return rc;
11473 }
11474 
11475 static void bnxt_clear_int_mode(struct bnxt *bp)
11476 {
11477 	pci_free_irq_vectors(bp->pdev);
11478 
11479 	kfree(bp->irq_tbl);
11480 	bp->irq_tbl = NULL;
11481 }
11482 
11483 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11484 {
11485 	bool irq_cleared = false;
11486 	bool irq_change = false;
11487 	int tcs = bp->num_tc;
11488 	int irqs_required;
11489 	int rc;
11490 
11491 	if (!bnxt_need_reserve_rings(bp))
11492 		return 0;
11493 
11494 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11495 		int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11496 
11497 		if (ulp_msix > bp->ulp_num_msix_want)
11498 			ulp_msix = bp->ulp_num_msix_want;
11499 		irqs_required = ulp_msix + bp->cp_nr_rings;
11500 	} else {
11501 		irqs_required = bnxt_get_num_msix(bp);
11502 	}
11503 
11504 	if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11505 		irq_change = true;
11506 		if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11507 			bnxt_ulp_irq_stop(bp);
11508 			bnxt_clear_int_mode(bp);
11509 			irq_cleared = true;
11510 		}
11511 	}
11512 	rc = __bnxt_reserve_rings(bp);
11513 	if (irq_cleared) {
11514 		if (!rc)
11515 			rc = bnxt_init_int_mode(bp);
11516 		bnxt_ulp_irq_restart(bp, rc);
11517 	} else if (irq_change && !rc) {
11518 		if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11519 			rc = -ENOSPC;
11520 	}
11521 	if (rc) {
11522 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11523 		return rc;
11524 	}
11525 	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11526 		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11527 		netdev_err(bp->dev, "tx ring reservation failure\n");
11528 		netdev_reset_tc(bp->dev);
11529 		bp->num_tc = 0;
11530 		if (bp->tx_nr_rings_xdp)
11531 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11532 		else
11533 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11534 		return -ENOMEM;
11535 	}
11536 	return 0;
11537 }
11538 
11539 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11540 {
11541 	struct bnxt_tx_ring_info *txr;
11542 	struct netdev_queue *txq;
11543 	struct bnxt_napi *bnapi;
11544 	int i;
11545 
11546 	bnapi = bp->bnapi[idx];
11547 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11548 		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11549 		synchronize_net();
11550 
11551 		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11552 			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11553 			if (txq) {
11554 				__netif_tx_lock_bh(txq);
11555 				netif_tx_stop_queue(txq);
11556 				__netif_tx_unlock_bh(txq);
11557 			}
11558 		}
11559 
11560 		if (!bp->tph_mode)
11561 			continue;
11562 
11563 		bnxt_hwrm_tx_ring_free(bp, txr, true);
11564 		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11565 		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11566 		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11567 	}
11568 }
11569 
11570 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11571 {
11572 	struct bnxt_tx_ring_info *txr;
11573 	struct netdev_queue *txq;
11574 	struct bnxt_napi *bnapi;
11575 	int rc, i;
11576 
11577 	bnapi = bp->bnapi[idx];
11578 	/* All rings have been reserved and previously allocated.
11579 	 * Reallocating with the same parameters should never fail.
11580 	 */
11581 	bnxt_for_each_napi_tx(i, bnapi, txr) {
11582 		if (!bp->tph_mode)
11583 			goto start_tx;
11584 
11585 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11586 		if (rc)
11587 			return rc;
11588 
11589 		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11590 		if (rc)
11591 			return rc;
11592 
11593 		txr->tx_prod = 0;
11594 		txr->tx_cons = 0;
11595 		txr->tx_hw_cons = 0;
11596 start_tx:
11597 		WRITE_ONCE(txr->dev_state, 0);
11598 		synchronize_net();
11599 
11600 		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11601 			continue;
11602 
11603 		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11604 		if (txq)
11605 			netif_tx_start_queue(txq);
11606 	}
11607 
11608 	return 0;
11609 }
11610 
11611 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11612 				     const cpumask_t *mask)
11613 {
11614 	struct bnxt_irq *irq;
11615 	u16 tag;
11616 	int err;
11617 
11618 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11619 
11620 	if (!irq->bp->tph_mode)
11621 		return;
11622 
11623 	cpumask_copy(irq->cpu_mask, mask);
11624 
11625 	if (irq->ring_nr >= irq->bp->rx_nr_rings)
11626 		return;
11627 
11628 	if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11629 				cpumask_first(irq->cpu_mask), &tag))
11630 		return;
11631 
11632 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11633 		return;
11634 
11635 	netdev_lock(irq->bp->dev);
11636 	if (netif_running(irq->bp->dev)) {
11637 		err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11638 		if (err)
11639 			netdev_err(irq->bp->dev,
11640 				   "RX queue restart failed: err=%d\n", err);
11641 	}
11642 	netdev_unlock(irq->bp->dev);
11643 }
11644 
11645 static void bnxt_irq_affinity_release(struct kref *ref)
11646 {
11647 	struct irq_affinity_notify *notify =
11648 		container_of(ref, struct irq_affinity_notify, kref);
11649 	struct bnxt_irq *irq;
11650 
11651 	irq = container_of(notify, struct bnxt_irq, affinity_notify);
11652 
11653 	if (!irq->bp->tph_mode)
11654 		return;
11655 
11656 	if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11657 		netdev_err(irq->bp->dev,
11658 			   "Setting ST=0 for MSIX entry %d failed\n",
11659 			   irq->msix_nr);
11660 		return;
11661 	}
11662 }
11663 
11664 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11665 {
11666 	irq_set_affinity_notifier(irq->vector, NULL);
11667 }
11668 
11669 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11670 {
11671 	struct irq_affinity_notify *notify;
11672 
11673 	irq->bp = bp;
11674 
11675 	/* Nothing to do if TPH is not enabled */
11676 	if (!bp->tph_mode)
11677 		return;
11678 
11679 	/* Register IRQ affinity notifier */
11680 	notify = &irq->affinity_notify;
11681 	notify->irq = irq->vector;
11682 	notify->notify = bnxt_irq_affinity_notify;
11683 	notify->release = bnxt_irq_affinity_release;
11684 
11685 	irq_set_affinity_notifier(irq->vector, notify);
11686 }
11687 
11688 static void bnxt_free_irq(struct bnxt *bp)
11689 {
11690 	struct bnxt_irq *irq;
11691 	int i;
11692 
11693 #ifdef CONFIG_RFS_ACCEL
11694 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11695 	bp->dev->rx_cpu_rmap = NULL;
11696 #endif
11697 	if (!bp->irq_tbl || !bp->bnapi)
11698 		return;
11699 
11700 	for (i = 0; i < bp->cp_nr_rings; i++) {
11701 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11702 
11703 		irq = &bp->irq_tbl[map_idx];
11704 		if (irq->requested) {
11705 			if (irq->have_cpumask) {
11706 				irq_update_affinity_hint(irq->vector, NULL);
11707 				free_cpumask_var(irq->cpu_mask);
11708 				irq->have_cpumask = 0;
11709 			}
11710 
11711 			bnxt_release_irq_notifier(irq);
11712 
11713 			free_irq(irq->vector, bp->bnapi[i]);
11714 		}
11715 
11716 		irq->requested = 0;
11717 	}
11718 
11719 	/* Disable TPH support */
11720 	pcie_disable_tph(bp->pdev);
11721 	bp->tph_mode = 0;
11722 }
11723 
11724 static int bnxt_request_irq(struct bnxt *bp)
11725 {
11726 	struct cpu_rmap *rmap = NULL;
11727 	int i, j, rc = 0;
11728 	unsigned long flags = 0;
11729 
11730 	rc = bnxt_setup_int_mode(bp);
11731 	if (rc) {
11732 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11733 			   rc);
11734 		return rc;
11735 	}
11736 #ifdef CONFIG_RFS_ACCEL
11737 	rmap = bp->dev->rx_cpu_rmap;
11738 #endif
11739 
11740 	/* Enable TPH support as part of IRQ request */
11741 	rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11742 	if (!rc)
11743 		bp->tph_mode = PCI_TPH_ST_IV_MODE;
11744 
11745 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11746 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11747 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11748 
11749 		if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11750 		    rmap && bp->bnapi[i]->rx_ring) {
11751 			rc = irq_cpu_rmap_add(rmap, irq->vector);
11752 			if (rc)
11753 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11754 					    j);
11755 			j++;
11756 		}
11757 
11758 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11759 				 bp->bnapi[i]);
11760 		if (rc)
11761 			break;
11762 
11763 		netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11764 		irq->requested = 1;
11765 
11766 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11767 			int numa_node = dev_to_node(&bp->pdev->dev);
11768 			u16 tag;
11769 
11770 			irq->have_cpumask = 1;
11771 			irq->msix_nr = map_idx;
11772 			irq->ring_nr = i;
11773 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11774 					irq->cpu_mask);
11775 			rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11776 			if (rc) {
11777 				netdev_warn(bp->dev,
11778 					    "Update affinity hint failed, IRQ = %d\n",
11779 					    irq->vector);
11780 				break;
11781 			}
11782 
11783 			bnxt_register_irq_notifier(bp, irq);
11784 
11785 			/* Init ST table entry */
11786 			if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11787 						cpumask_first(irq->cpu_mask),
11788 						&tag))
11789 				continue;
11790 
11791 			pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11792 		}
11793 	}
11794 	return rc;
11795 }
11796 
11797 static void bnxt_del_napi(struct bnxt *bp)
11798 {
11799 	int i;
11800 
11801 	if (!bp->bnapi)
11802 		return;
11803 
11804 	for (i = 0; i < bp->rx_nr_rings; i++)
11805 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11806 	for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11807 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11808 
11809 	for (i = 0; i < bp->cp_nr_rings; i++) {
11810 		struct bnxt_napi *bnapi = bp->bnapi[i];
11811 
11812 		__netif_napi_del_locked(&bnapi->napi);
11813 	}
11814 	/* We called __netif_napi_del_locked(), we need
11815 	 * to respect an RCU grace period before freeing napi structures.
11816 	 */
11817 	synchronize_net();
11818 }
11819 
11820 static void bnxt_init_napi(struct bnxt *bp)
11821 {
11822 	int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11823 	unsigned int cp_nr_rings = bp->cp_nr_rings;
11824 	struct bnxt_napi *bnapi;
11825 	int i;
11826 
11827 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11828 		poll_fn = bnxt_poll_p5;
11829 	else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11830 		cp_nr_rings--;
11831 
11832 	set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11833 
11834 	for (i = 0; i < cp_nr_rings; i++) {
11835 		bnapi = bp->bnapi[i];
11836 		netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11837 					     bnapi->index);
11838 	}
11839 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11840 		bnapi = bp->bnapi[cp_nr_rings];
11841 		netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11842 	}
11843 }
11844 
11845 static void bnxt_disable_napi(struct bnxt *bp)
11846 {
11847 	int i;
11848 
11849 	if (!bp->bnapi ||
11850 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11851 		return;
11852 
11853 	for (i = 0; i < bp->cp_nr_rings; i++) {
11854 		struct bnxt_napi *bnapi = bp->bnapi[i];
11855 		struct bnxt_cp_ring_info *cpr;
11856 
11857 		cpr = &bnapi->cp_ring;
11858 		if (bnapi->tx_fault)
11859 			cpr->sw_stats->tx.tx_resets++;
11860 		if (bnapi->in_reset)
11861 			cpr->sw_stats->rx.rx_resets++;
11862 		napi_disable_locked(&bnapi->napi);
11863 	}
11864 }
11865 
11866 static void bnxt_enable_napi(struct bnxt *bp)
11867 {
11868 	int i;
11869 
11870 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11871 	for (i = 0; i < bp->cp_nr_rings; i++) {
11872 		struct bnxt_napi *bnapi = bp->bnapi[i];
11873 		struct bnxt_cp_ring_info *cpr;
11874 
11875 		bnapi->tx_fault = 0;
11876 
11877 		cpr = &bnapi->cp_ring;
11878 		bnapi->in_reset = false;
11879 
11880 		if (bnapi->rx_ring) {
11881 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11882 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11883 		}
11884 		napi_enable_locked(&bnapi->napi);
11885 	}
11886 }
11887 
11888 void bnxt_tx_disable(struct bnxt *bp)
11889 {
11890 	int i;
11891 	struct bnxt_tx_ring_info *txr;
11892 
11893 	if (bp->tx_ring) {
11894 		for (i = 0; i < bp->tx_nr_rings; i++) {
11895 			txr = &bp->tx_ring[i];
11896 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11897 		}
11898 	}
11899 	/* Make sure napi polls see @dev_state change */
11900 	synchronize_net();
11901 	/* Drop carrier first to prevent TX timeout */
11902 	netif_carrier_off(bp->dev);
11903 	/* Stop all TX queues */
11904 	netif_tx_disable(bp->dev);
11905 }
11906 
11907 void bnxt_tx_enable(struct bnxt *bp)
11908 {
11909 	int i;
11910 	struct bnxt_tx_ring_info *txr;
11911 
11912 	for (i = 0; i < bp->tx_nr_rings; i++) {
11913 		txr = &bp->tx_ring[i];
11914 		WRITE_ONCE(txr->dev_state, 0);
11915 	}
11916 	/* Make sure napi polls see @dev_state change */
11917 	synchronize_net();
11918 	netif_tx_wake_all_queues(bp->dev);
11919 	if (BNXT_LINK_IS_UP(bp))
11920 		netif_carrier_on(bp->dev);
11921 }
11922 
11923 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11924 {
11925 	u8 active_fec = link_info->active_fec_sig_mode &
11926 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11927 
11928 	switch (active_fec) {
11929 	default:
11930 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11931 		return "None";
11932 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11933 		return "Clause 74 BaseR";
11934 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11935 		return "Clause 91 RS(528,514)";
11936 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11937 		return "Clause 91 RS544_1XN";
11938 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11939 		return "Clause 91 RS(544,514)";
11940 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11941 		return "Clause 91 RS272_1XN";
11942 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11943 		return "Clause 91 RS(272,257)";
11944 	}
11945 }
11946 
11947 static char *bnxt_link_down_reason(struct bnxt_link_info *link_info)
11948 {
11949 	u8 reason = link_info->link_down_reason;
11950 
11951 	/* Multiple bits can be set, we report 1 bit only in order of
11952 	 * priority.
11953 	 */
11954 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF)
11955 		return "(Remote fault)";
11956 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION)
11957 		return "(OTP Speed limit violation)";
11958 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED)
11959 		return "(Cable removed)";
11960 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT)
11961 		return "(Module fault)";
11962 	if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST)
11963 		return "(BMC request down)";
11964 	return "";
11965 }
11966 
11967 void bnxt_report_link(struct bnxt *bp)
11968 {
11969 	if (BNXT_LINK_IS_UP(bp)) {
11970 		const char *signal = "";
11971 		const char *flow_ctrl;
11972 		const char *duplex;
11973 		u32 speed;
11974 		u16 fec;
11975 
11976 		netif_carrier_on(bp->dev);
11977 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11978 		if (speed == SPEED_UNKNOWN) {
11979 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11980 			return;
11981 		}
11982 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11983 			duplex = "full";
11984 		else
11985 			duplex = "half";
11986 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11987 			flow_ctrl = "ON - receive & transmit";
11988 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11989 			flow_ctrl = "ON - transmit";
11990 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11991 			flow_ctrl = "ON - receive";
11992 		else
11993 			flow_ctrl = "none";
11994 		if (bp->link_info.phy_qcfg_resp.option_flags &
11995 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11996 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
11997 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11998 			switch (sig_mode) {
11999 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
12000 				signal = "(NRZ) ";
12001 				break;
12002 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
12003 				signal = "(PAM4 56Gbps) ";
12004 				break;
12005 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
12006 				signal = "(PAM4 112Gbps) ";
12007 				break;
12008 			default:
12009 				break;
12010 			}
12011 		}
12012 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
12013 			    speed, signal, duplex, flow_ctrl);
12014 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
12015 			netdev_info(bp->dev, "EEE is %s\n",
12016 				    bp->eee.eee_active ? "active" :
12017 							 "not active");
12018 		fec = bp->link_info.fec_cfg;
12019 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
12020 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
12021 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
12022 				    bnxt_report_fec(&bp->link_info));
12023 	} else {
12024 		char *str = bnxt_link_down_reason(&bp->link_info);
12025 
12026 		netif_carrier_off(bp->dev);
12027 		netdev_err(bp->dev, "NIC Link is Down %s\n", str);
12028 	}
12029 }
12030 
12031 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
12032 {
12033 	if (!resp->supported_speeds_auto_mode &&
12034 	    !resp->supported_speeds_force_mode &&
12035 	    !resp->supported_pam4_speeds_auto_mode &&
12036 	    !resp->supported_pam4_speeds_force_mode &&
12037 	    !resp->supported_speeds2_auto_mode &&
12038 	    !resp->supported_speeds2_force_mode)
12039 		return true;
12040 	return false;
12041 }
12042 
12043 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
12044 {
12045 	struct bnxt_link_info *link_info = &bp->link_info;
12046 	struct hwrm_port_phy_qcaps_output *resp;
12047 	struct hwrm_port_phy_qcaps_input *req;
12048 	int rc = 0;
12049 
12050 	if (bp->hwrm_spec_code < 0x10201)
12051 		return 0;
12052 
12053 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
12054 	if (rc)
12055 		return rc;
12056 
12057 	resp = hwrm_req_hold(bp, req);
12058 	rc = hwrm_req_send(bp, req);
12059 	if (rc)
12060 		goto hwrm_phy_qcaps_exit;
12061 
12062 	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
12063 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
12064 		struct ethtool_keee *eee = &bp->eee;
12065 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
12066 
12067 		_bnxt_fw_to_linkmode(eee->supported, fw_speeds);
12068 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
12069 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
12070 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
12071 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
12072 	}
12073 
12074 	if (bp->hwrm_spec_code >= 0x10a01) {
12075 		if (bnxt_phy_qcaps_no_speed(resp)) {
12076 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12077 			netdev_warn(bp->dev, "Ethernet link disabled\n");
12078 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12079 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12080 			netdev_info(bp->dev, "Ethernet link enabled\n");
12081 			/* Phy re-enabled, reprobe the speeds */
12082 			link_info->support_auto_speeds = 0;
12083 			link_info->support_pam4_auto_speeds = 0;
12084 			link_info->support_auto_speeds2 = 0;
12085 		}
12086 	}
12087 	if (resp->supported_speeds_auto_mode)
12088 		link_info->support_auto_speeds =
12089 			le16_to_cpu(resp->supported_speeds_auto_mode);
12090 	if (resp->supported_pam4_speeds_auto_mode)
12091 		link_info->support_pam4_auto_speeds =
12092 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12093 	if (resp->supported_speeds2_auto_mode)
12094 		link_info->support_auto_speeds2 =
12095 			le16_to_cpu(resp->supported_speeds2_auto_mode);
12096 
12097 	bp->port_count = resp->port_cnt;
12098 
12099 hwrm_phy_qcaps_exit:
12100 	hwrm_req_drop(bp, req);
12101 	return rc;
12102 }
12103 
12104 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12105 {
12106 	struct hwrm_port_mac_qcaps_output *resp;
12107 	struct hwrm_port_mac_qcaps_input *req;
12108 	int rc;
12109 
12110 	if (bp->hwrm_spec_code < 0x10a03)
12111 		return;
12112 
12113 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12114 	if (rc)
12115 		return;
12116 
12117 	resp = hwrm_req_hold(bp, req);
12118 	rc = hwrm_req_send_silent(bp, req);
12119 	if (!rc)
12120 		bp->mac_flags = resp->flags;
12121 	hwrm_req_drop(bp, req);
12122 }
12123 
12124 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12125 {
12126 	u16 diff = advertising ^ supported;
12127 
12128 	return ((supported | diff) != supported);
12129 }
12130 
12131 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12132 {
12133 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12134 
12135 	/* Check if any advertised speeds are no longer supported. The caller
12136 	 * holds the link_lock mutex, so we can modify link_info settings.
12137 	 */
12138 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12139 		if (bnxt_support_dropped(link_info->advertising,
12140 					 link_info->support_auto_speeds2)) {
12141 			link_info->advertising = link_info->support_auto_speeds2;
12142 			return true;
12143 		}
12144 		return false;
12145 	}
12146 	if (bnxt_support_dropped(link_info->advertising,
12147 				 link_info->support_auto_speeds)) {
12148 		link_info->advertising = link_info->support_auto_speeds;
12149 		return true;
12150 	}
12151 	if (bnxt_support_dropped(link_info->advertising_pam4,
12152 				 link_info->support_pam4_auto_speeds)) {
12153 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12154 		return true;
12155 	}
12156 	return false;
12157 }
12158 
12159 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12160 {
12161 	struct bnxt_link_info *link_info = &bp->link_info;
12162 	struct hwrm_port_phy_qcfg_output *resp;
12163 	struct hwrm_port_phy_qcfg_input *req;
12164 	u8 link_state = link_info->link_state;
12165 	bool support_changed;
12166 	int rc;
12167 
12168 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12169 	if (rc)
12170 		return rc;
12171 
12172 	resp = hwrm_req_hold(bp, req);
12173 	rc = hwrm_req_send(bp, req);
12174 	if (rc) {
12175 		hwrm_req_drop(bp, req);
12176 		if (BNXT_VF(bp) && rc == -ENODEV) {
12177 			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12178 			rc = 0;
12179 		}
12180 		return rc;
12181 	}
12182 
12183 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12184 	link_info->phy_link_status = resp->link;
12185 	link_info->duplex = resp->duplex_cfg;
12186 	if (bp->hwrm_spec_code >= 0x10800)
12187 		link_info->duplex = resp->duplex_state;
12188 	link_info->pause = resp->pause;
12189 	link_info->auto_mode = resp->auto_mode;
12190 	link_info->auto_pause_setting = resp->auto_pause;
12191 	link_info->lp_pause = resp->link_partner_adv_pause;
12192 	link_info->force_pause_setting = resp->force_pause;
12193 	link_info->duplex_setting = resp->duplex_cfg;
12194 	if (link_info->phy_link_status == BNXT_LINK_LINK) {
12195 		link_info->link_speed = le16_to_cpu(resp->link_speed);
12196 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12197 			link_info->active_lanes = resp->active_lanes;
12198 	} else {
12199 		link_info->link_speed = 0;
12200 		link_info->active_lanes = 0;
12201 	}
12202 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12203 	link_info->force_pam4_link_speed =
12204 		le16_to_cpu(resp->force_pam4_link_speed);
12205 	link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12206 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12207 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12208 	link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12209 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12210 	link_info->auto_pam4_link_speeds =
12211 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
12212 	link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12213 	link_info->lp_auto_link_speeds =
12214 		le16_to_cpu(resp->link_partner_adv_speeds);
12215 	link_info->lp_auto_pam4_link_speeds =
12216 		resp->link_partner_pam4_adv_speeds;
12217 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12218 	link_info->phy_ver[0] = resp->phy_maj;
12219 	link_info->phy_ver[1] = resp->phy_min;
12220 	link_info->phy_ver[2] = resp->phy_bld;
12221 	link_info->media_type = resp->media_type;
12222 	link_info->phy_type = resp->phy_type;
12223 	link_info->transceiver = resp->xcvr_pkg_type;
12224 	link_info->phy_addr = resp->eee_config_phy_addr &
12225 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12226 	link_info->module_status = resp->module_status;
12227 	link_info->link_down_reason = resp->link_down_reason;
12228 
12229 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12230 		struct ethtool_keee *eee = &bp->eee;
12231 		u16 fw_speeds;
12232 
12233 		eee->eee_active = 0;
12234 		if (resp->eee_config_phy_addr &
12235 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12236 			eee->eee_active = 1;
12237 			fw_speeds = le16_to_cpu(
12238 				resp->link_partner_adv_eee_link_speed_mask);
12239 			_bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12240 		}
12241 
12242 		/* Pull initial EEE config */
12243 		if (!chng_link_state) {
12244 			if (resp->eee_config_phy_addr &
12245 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12246 				eee->eee_enabled = 1;
12247 
12248 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12249 			_bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12250 
12251 			if (resp->eee_config_phy_addr &
12252 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12253 				__le32 tmr;
12254 
12255 				eee->tx_lpi_enabled = 1;
12256 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12257 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
12258 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12259 			}
12260 		}
12261 	}
12262 
12263 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12264 	if (bp->hwrm_spec_code >= 0x10504) {
12265 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12266 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12267 	}
12268 	/* TODO: need to add more logic to report VF link */
12269 	if (chng_link_state) {
12270 		if (link_info->phy_link_status == BNXT_LINK_LINK)
12271 			link_info->link_state = BNXT_LINK_STATE_UP;
12272 		else
12273 			link_info->link_state = BNXT_LINK_STATE_DOWN;
12274 		if (link_state != link_info->link_state)
12275 			bnxt_report_link(bp);
12276 	} else {
12277 		/* always link down if not require to update link state */
12278 		link_info->link_state = BNXT_LINK_STATE_DOWN;
12279 	}
12280 	hwrm_req_drop(bp, req);
12281 
12282 	if (!BNXT_PHY_CFG_ABLE(bp))
12283 		return 0;
12284 
12285 	support_changed = bnxt_support_speed_dropped(link_info);
12286 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12287 		bnxt_hwrm_set_link_setting(bp, true, false);
12288 	return 0;
12289 }
12290 
12291 static void bnxt_get_port_module_status(struct bnxt *bp)
12292 {
12293 	struct bnxt_link_info *link_info = &bp->link_info;
12294 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12295 	u8 module_status;
12296 
12297 	if (bnxt_update_link(bp, true))
12298 		return;
12299 
12300 	module_status = link_info->module_status;
12301 	switch (module_status) {
12302 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12303 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12304 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12305 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12306 			    bp->pf.port_id);
12307 		if (bp->hwrm_spec_code >= 0x10201) {
12308 			netdev_warn(bp->dev, "Module part number %s\n",
12309 				    resp->phy_vendor_partnumber);
12310 		}
12311 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12312 			netdev_warn(bp->dev, "TX is disabled\n");
12313 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12314 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12315 	}
12316 }
12317 
12318 static void
12319 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12320 {
12321 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12322 		if (bp->hwrm_spec_code >= 0x10201)
12323 			req->auto_pause =
12324 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12325 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12326 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12327 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12328 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12329 		req->enables |=
12330 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12331 	} else {
12332 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12333 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12334 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12335 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12336 		req->enables |=
12337 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12338 		if (bp->hwrm_spec_code >= 0x10201) {
12339 			req->auto_pause = req->force_pause;
12340 			req->enables |= cpu_to_le32(
12341 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12342 		}
12343 	}
12344 }
12345 
12346 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12347 {
12348 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12349 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12350 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12351 			req->enables |=
12352 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12353 			req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12354 		} else if (bp->link_info.advertising) {
12355 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12356 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12357 		}
12358 		if (bp->link_info.advertising_pam4) {
12359 			req->enables |=
12360 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12361 			req->auto_link_pam4_speed_mask =
12362 				cpu_to_le16(bp->link_info.advertising_pam4);
12363 		}
12364 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12365 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12366 	} else {
12367 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12368 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12369 			req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12370 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12371 			netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12372 				   (u32)bp->link_info.req_link_speed);
12373 		} else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12374 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12375 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12376 		} else {
12377 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12378 		}
12379 	}
12380 
12381 	/* tell chimp that the setting takes effect immediately */
12382 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12383 }
12384 
12385 int bnxt_hwrm_set_pause(struct bnxt *bp)
12386 {
12387 	struct hwrm_port_phy_cfg_input *req;
12388 	int rc;
12389 
12390 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12391 	if (rc)
12392 		return rc;
12393 
12394 	bnxt_hwrm_set_pause_common(bp, req);
12395 
12396 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12397 	    bp->link_info.force_link_chng)
12398 		bnxt_hwrm_set_link_common(bp, req);
12399 
12400 	rc = hwrm_req_send(bp, req);
12401 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12402 		/* since changing of pause setting doesn't trigger any link
12403 		 * change event, the driver needs to update the current pause
12404 		 * result upon successfully return of the phy_cfg command
12405 		 */
12406 		bp->link_info.pause =
12407 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12408 		bp->link_info.auto_pause_setting = 0;
12409 		if (!bp->link_info.force_link_chng)
12410 			bnxt_report_link(bp);
12411 	}
12412 	bp->link_info.force_link_chng = false;
12413 	return rc;
12414 }
12415 
12416 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12417 			      struct hwrm_port_phy_cfg_input *req)
12418 {
12419 	struct ethtool_keee *eee = &bp->eee;
12420 
12421 	if (eee->eee_enabled) {
12422 		u16 eee_speeds;
12423 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12424 
12425 		if (eee->tx_lpi_enabled)
12426 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12427 		else
12428 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12429 
12430 		req->flags |= cpu_to_le32(flags);
12431 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12432 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12433 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12434 	} else {
12435 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12436 	}
12437 }
12438 
12439 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12440 {
12441 	struct hwrm_port_phy_cfg_input *req;
12442 	int rc;
12443 
12444 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12445 	if (rc)
12446 		return rc;
12447 
12448 	if (set_pause)
12449 		bnxt_hwrm_set_pause_common(bp, req);
12450 
12451 	bnxt_hwrm_set_link_common(bp, req);
12452 
12453 	if (set_eee)
12454 		bnxt_hwrm_set_eee(bp, req);
12455 	return hwrm_req_send(bp, req);
12456 }
12457 
12458 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12459 {
12460 	struct hwrm_port_phy_cfg_input *req;
12461 	int rc;
12462 
12463 	if (!BNXT_SINGLE_PF(bp))
12464 		return 0;
12465 
12466 	if (pci_num_vf(bp->pdev) &&
12467 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12468 		return 0;
12469 
12470 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12471 	if (rc)
12472 		return rc;
12473 
12474 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12475 	rc = hwrm_req_send(bp, req);
12476 	if (!rc) {
12477 		mutex_lock(&bp->link_lock);
12478 		/* Device is not obliged link down in certain scenarios, even
12479 		 * when forced. Setting the state unknown is consistent with
12480 		 * driver startup and will force link state to be reported
12481 		 * during subsequent open based on PORT_PHY_QCFG.
12482 		 */
12483 		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12484 		mutex_unlock(&bp->link_lock);
12485 	}
12486 	return rc;
12487 }
12488 
12489 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12490 {
12491 #ifdef CONFIG_TEE_BNXT_FW
12492 	int rc = tee_bnxt_fw_load();
12493 
12494 	if (rc)
12495 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12496 
12497 	return rc;
12498 #else
12499 	netdev_err(bp->dev, "OP-TEE not supported\n");
12500 	return -ENODEV;
12501 #endif
12502 }
12503 
12504 static int bnxt_try_recover_fw(struct bnxt *bp)
12505 {
12506 	if (bp->fw_health && bp->fw_health->status_reliable) {
12507 		int retry = 0, rc;
12508 		u32 sts;
12509 
12510 		do {
12511 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12512 			rc = bnxt_hwrm_poll(bp);
12513 			if (!BNXT_FW_IS_BOOTING(sts) &&
12514 			    !BNXT_FW_IS_RECOVERING(sts))
12515 				break;
12516 			retry++;
12517 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12518 
12519 		if (!BNXT_FW_IS_HEALTHY(sts)) {
12520 			netdev_err(bp->dev,
12521 				   "Firmware not responding, status: 0x%x\n",
12522 				   sts);
12523 			rc = -ENODEV;
12524 		}
12525 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12526 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12527 			return bnxt_fw_reset_via_optee(bp);
12528 		}
12529 		return rc;
12530 	}
12531 
12532 	return -ENODEV;
12533 }
12534 
12535 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12536 {
12537 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12538 
12539 	if (!BNXT_NEW_RM(bp))
12540 		return; /* no resource reservations required */
12541 
12542 	hw_resc->resv_cp_rings = 0;
12543 	hw_resc->resv_stat_ctxs = 0;
12544 	hw_resc->resv_irqs = 0;
12545 	hw_resc->resv_tx_rings = 0;
12546 	hw_resc->resv_rx_rings = 0;
12547 	hw_resc->resv_hw_ring_grps = 0;
12548 	hw_resc->resv_vnics = 0;
12549 	hw_resc->resv_rsscos_ctxs = 0;
12550 	if (!fw_reset) {
12551 		bp->tx_nr_rings = 0;
12552 		bp->rx_nr_rings = 0;
12553 	}
12554 }
12555 
12556 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12557 {
12558 	int rc;
12559 
12560 	if (!BNXT_NEW_RM(bp))
12561 		return 0; /* no resource reservations required */
12562 
12563 	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12564 	if (rc)
12565 		netdev_err(bp->dev, "resc_qcaps failed\n");
12566 
12567 	bnxt_clear_reservations(bp, fw_reset);
12568 
12569 	return rc;
12570 }
12571 
12572 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12573 {
12574 	struct hwrm_func_drv_if_change_output *resp;
12575 	struct hwrm_func_drv_if_change_input *req;
12576 	bool resc_reinit = false;
12577 	bool caps_change = false;
12578 	int rc, retry = 0;
12579 	bool fw_reset;
12580 	u32 flags = 0;
12581 
12582 	fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12583 	bp->fw_reset_state = 0;
12584 
12585 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12586 		return 0;
12587 
12588 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12589 	if (rc)
12590 		return rc;
12591 
12592 	if (up)
12593 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12594 	resp = hwrm_req_hold(bp, req);
12595 
12596 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12597 	while (retry < BNXT_FW_IF_RETRY) {
12598 		rc = hwrm_req_send(bp, req);
12599 		if (rc != -EAGAIN)
12600 			break;
12601 
12602 		msleep(50);
12603 		retry++;
12604 	}
12605 
12606 	if (rc == -EAGAIN) {
12607 		hwrm_req_drop(bp, req);
12608 		return rc;
12609 	} else if (!rc) {
12610 		flags = le32_to_cpu(resp->flags);
12611 	} else if (up) {
12612 		rc = bnxt_try_recover_fw(bp);
12613 		fw_reset = true;
12614 	}
12615 	hwrm_req_drop(bp, req);
12616 	if (rc)
12617 		return rc;
12618 
12619 	if (!up) {
12620 		bnxt_inv_fw_health_reg(bp);
12621 		return 0;
12622 	}
12623 
12624 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12625 		resc_reinit = true;
12626 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12627 	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12628 		fw_reset = true;
12629 	else
12630 		bnxt_remap_fw_health_regs(bp);
12631 
12632 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12633 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12634 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12635 		return -ENODEV;
12636 	}
12637 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12638 		caps_change = true;
12639 
12640 	if (resc_reinit || fw_reset || caps_change) {
12641 		if (fw_reset || caps_change) {
12642 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12643 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12644 				bnxt_ulp_irq_stop(bp);
12645 			bnxt_free_ctx_mem(bp, false);
12646 			bnxt_dcb_free(bp);
12647 			rc = bnxt_fw_init_one(bp);
12648 			if (rc) {
12649 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12650 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12651 				return rc;
12652 			}
12653 			/* IRQ will be initialized later in bnxt_request_irq()*/
12654 			bnxt_clear_int_mode(bp);
12655 		}
12656 		rc = bnxt_cancel_reservations(bp, fw_reset);
12657 	}
12658 	return rc;
12659 }
12660 
12661 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12662 {
12663 	struct hwrm_port_led_qcaps_output *resp;
12664 	struct hwrm_port_led_qcaps_input *req;
12665 	struct bnxt_pf_info *pf = &bp->pf;
12666 	int rc;
12667 
12668 	bp->num_leds = 0;
12669 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12670 		return 0;
12671 
12672 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12673 	if (rc)
12674 		return rc;
12675 
12676 	req->port_id = cpu_to_le16(pf->port_id);
12677 	resp = hwrm_req_hold(bp, req);
12678 	rc = hwrm_req_send(bp, req);
12679 	if (rc) {
12680 		hwrm_req_drop(bp, req);
12681 		return rc;
12682 	}
12683 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12684 		int i;
12685 
12686 		bp->num_leds = resp->num_leds;
12687 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12688 						 bp->num_leds);
12689 		for (i = 0; i < bp->num_leds; i++) {
12690 			struct bnxt_led_info *led = &bp->leds[i];
12691 			__le16 caps = led->led_state_caps;
12692 
12693 			if (!led->led_group_id ||
12694 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
12695 				bp->num_leds = 0;
12696 				break;
12697 			}
12698 		}
12699 	}
12700 	hwrm_req_drop(bp, req);
12701 	return 0;
12702 }
12703 
12704 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12705 {
12706 	struct hwrm_wol_filter_alloc_output *resp;
12707 	struct hwrm_wol_filter_alloc_input *req;
12708 	int rc;
12709 
12710 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12711 	if (rc)
12712 		return rc;
12713 
12714 	req->port_id = cpu_to_le16(bp->pf.port_id);
12715 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12716 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12717 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12718 
12719 	resp = hwrm_req_hold(bp, req);
12720 	rc = hwrm_req_send(bp, req);
12721 	if (!rc)
12722 		bp->wol_filter_id = resp->wol_filter_id;
12723 	hwrm_req_drop(bp, req);
12724 	return rc;
12725 }
12726 
12727 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12728 {
12729 	struct hwrm_wol_filter_free_input *req;
12730 	int rc;
12731 
12732 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12733 	if (rc)
12734 		return rc;
12735 
12736 	req->port_id = cpu_to_le16(bp->pf.port_id);
12737 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12738 	req->wol_filter_id = bp->wol_filter_id;
12739 
12740 	return hwrm_req_send(bp, req);
12741 }
12742 
12743 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12744 {
12745 	struct hwrm_wol_filter_qcfg_output *resp;
12746 	struct hwrm_wol_filter_qcfg_input *req;
12747 	u16 next_handle = 0;
12748 	int rc;
12749 
12750 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12751 	if (rc)
12752 		return rc;
12753 
12754 	req->port_id = cpu_to_le16(bp->pf.port_id);
12755 	req->handle = cpu_to_le16(handle);
12756 	resp = hwrm_req_hold(bp, req);
12757 	rc = hwrm_req_send(bp, req);
12758 	if (!rc) {
12759 		next_handle = le16_to_cpu(resp->next_handle);
12760 		if (next_handle != 0) {
12761 			if (resp->wol_type ==
12762 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12763 				bp->wol = 1;
12764 				bp->wol_filter_id = resp->wol_filter_id;
12765 			}
12766 		}
12767 	}
12768 	hwrm_req_drop(bp, req);
12769 	return next_handle;
12770 }
12771 
12772 static void bnxt_get_wol_settings(struct bnxt *bp)
12773 {
12774 	u16 handle = 0;
12775 
12776 	bp->wol = 0;
12777 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12778 		return;
12779 
12780 	do {
12781 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12782 	} while (handle && handle != 0xffff);
12783 }
12784 
12785 static bool bnxt_eee_config_ok(struct bnxt *bp)
12786 {
12787 	struct ethtool_keee *eee = &bp->eee;
12788 	struct bnxt_link_info *link_info = &bp->link_info;
12789 
12790 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12791 		return true;
12792 
12793 	if (eee->eee_enabled) {
12794 		__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12795 		__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12796 
12797 		_bnxt_fw_to_linkmode(advertising, link_info->advertising);
12798 
12799 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12800 			eee->eee_enabled = 0;
12801 			return false;
12802 		}
12803 		if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12804 			linkmode_and(eee->advertised, advertising,
12805 				     eee->supported);
12806 			return false;
12807 		}
12808 	}
12809 	return true;
12810 }
12811 
12812 static int bnxt_update_phy_setting(struct bnxt *bp)
12813 {
12814 	int rc;
12815 	bool update_link = false;
12816 	bool update_pause = false;
12817 	bool update_eee = false;
12818 	struct bnxt_link_info *link_info = &bp->link_info;
12819 
12820 	rc = bnxt_update_link(bp, true);
12821 	if (rc) {
12822 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12823 			   rc);
12824 		return rc;
12825 	}
12826 	if (!BNXT_SINGLE_PF(bp))
12827 		return 0;
12828 
12829 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12830 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12831 	    link_info->req_flow_ctrl)
12832 		update_pause = true;
12833 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12834 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
12835 		update_pause = true;
12836 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12837 		if (BNXT_AUTO_MODE(link_info->auto_mode))
12838 			update_link = true;
12839 		if (bnxt_force_speed_updated(link_info))
12840 			update_link = true;
12841 		if (link_info->req_duplex != link_info->duplex_setting)
12842 			update_link = true;
12843 	} else {
12844 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12845 			update_link = true;
12846 		if (bnxt_auto_speed_updated(link_info))
12847 			update_link = true;
12848 	}
12849 
12850 	/* The last close may have shutdown the link, so need to call
12851 	 * PHY_CFG to bring it back up.
12852 	 */
12853 	if (!BNXT_LINK_IS_UP(bp))
12854 		update_link = true;
12855 
12856 	if (!bnxt_eee_config_ok(bp))
12857 		update_eee = true;
12858 
12859 	if (update_link)
12860 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12861 	else if (update_pause)
12862 		rc = bnxt_hwrm_set_pause(bp);
12863 	if (rc) {
12864 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12865 			   rc);
12866 		return rc;
12867 	}
12868 
12869 	return rc;
12870 }
12871 
12872 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12873 
12874 static int bnxt_reinit_after_abort(struct bnxt *bp)
12875 {
12876 	int rc;
12877 
12878 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12879 		return -EBUSY;
12880 
12881 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
12882 		return -ENODEV;
12883 
12884 	rc = bnxt_fw_init_one(bp);
12885 	if (!rc) {
12886 		bnxt_clear_int_mode(bp);
12887 		rc = bnxt_init_int_mode(bp);
12888 		if (!rc) {
12889 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12890 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12891 		}
12892 	}
12893 	return rc;
12894 }
12895 
12896 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12897 {
12898 	struct bnxt_ntuple_filter *ntp_fltr;
12899 	struct bnxt_l2_filter *l2_fltr;
12900 
12901 	if (list_empty(&fltr->list))
12902 		return;
12903 
12904 	if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12905 		ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12906 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12907 		atomic_inc(&l2_fltr->refcnt);
12908 		ntp_fltr->l2_fltr = l2_fltr;
12909 		if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12910 			bnxt_del_ntp_filter(bp, ntp_fltr);
12911 			netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12912 				   fltr->sw_id);
12913 		}
12914 	} else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12915 		l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12916 		if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12917 			bnxt_del_l2_filter(bp, l2_fltr);
12918 			netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12919 				   fltr->sw_id);
12920 		}
12921 	}
12922 }
12923 
12924 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12925 {
12926 	struct bnxt_filter_base *usr_fltr, *tmp;
12927 
12928 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12929 		bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12930 }
12931 
12932 static int bnxt_set_xps_mapping(struct bnxt *bp)
12933 {
12934 	int numa_node = dev_to_node(&bp->pdev->dev);
12935 	unsigned int q_idx, map_idx, cpu, i;
12936 	const struct cpumask *cpu_mask_ptr;
12937 	int nr_cpus = num_online_cpus();
12938 	cpumask_t *q_map;
12939 	int rc = 0;
12940 
12941 	q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12942 	if (!q_map)
12943 		return -ENOMEM;
12944 
12945 	/* Create CPU mask for all TX queues across MQPRIO traffic classes.
12946 	 * Each TC has the same number of TX queues. The nth TX queue for each
12947 	 * TC will have the same CPU mask.
12948 	 */
12949 	for (i = 0; i < nr_cpus; i++) {
12950 		map_idx = i % bp->tx_nr_rings_per_tc;
12951 		cpu = cpumask_local_spread(i, numa_node);
12952 		cpu_mask_ptr = get_cpu_mask(cpu);
12953 		cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12954 	}
12955 
12956 	/* Register CPU mask for each TX queue except the ones marked for XDP */
12957 	for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12958 		map_idx = q_idx % bp->tx_nr_rings_per_tc;
12959 		rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12960 		if (rc) {
12961 			netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12962 				    q_idx);
12963 			break;
12964 		}
12965 	}
12966 
12967 	kfree(q_map);
12968 
12969 	return rc;
12970 }
12971 
12972 static int bnxt_tx_nr_rings(struct bnxt *bp)
12973 {
12974 	return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12975 			    bp->tx_nr_rings_per_tc;
12976 }
12977 
12978 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12979 {
12980 	return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12981 }
12982 
12983 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12984 {
12985 	int rc = 0;
12986 
12987 	netif_carrier_off(bp->dev);
12988 	if (irq_re_init) {
12989 		/* Reserve rings now if none were reserved at driver probe. */
12990 		rc = bnxt_init_dflt_ring_mode(bp);
12991 		if (rc) {
12992 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12993 			return rc;
12994 		}
12995 	}
12996 	rc = bnxt_reserve_rings(bp, irq_re_init);
12997 	if (rc)
12998 		return rc;
12999 
13000 	/* Make adjustments if reserved TX rings are less than requested */
13001 	bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
13002 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
13003 	if (bp->tx_nr_rings_xdp) {
13004 		bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
13005 		bp->tx_nr_rings += bp->tx_nr_rings_xdp;
13006 	}
13007 	rc = bnxt_alloc_mem(bp, irq_re_init);
13008 	if (rc) {
13009 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13010 		goto open_err_free_mem;
13011 	}
13012 
13013 	if (irq_re_init) {
13014 		bnxt_init_napi(bp);
13015 		rc = bnxt_request_irq(bp);
13016 		if (rc) {
13017 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
13018 			goto open_err_irq;
13019 		}
13020 	}
13021 
13022 	rc = bnxt_init_nic(bp, irq_re_init);
13023 	if (rc) {
13024 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13025 		goto open_err_irq;
13026 	}
13027 
13028 	bnxt_enable_napi(bp);
13029 	bnxt_debug_dev_init(bp);
13030 
13031 	if (link_re_init) {
13032 		mutex_lock(&bp->link_lock);
13033 		rc = bnxt_update_phy_setting(bp);
13034 		mutex_unlock(&bp->link_lock);
13035 		if (rc) {
13036 			netdev_warn(bp->dev, "failed to update phy settings\n");
13037 			if (BNXT_SINGLE_PF(bp)) {
13038 				bp->link_info.phy_retry = true;
13039 				bp->link_info.phy_retry_expires =
13040 					jiffies + 5 * HZ;
13041 			}
13042 		}
13043 	}
13044 
13045 	if (irq_re_init) {
13046 		udp_tunnel_nic_reset_ntf(bp->dev);
13047 		rc = bnxt_set_xps_mapping(bp);
13048 		if (rc)
13049 			netdev_warn(bp->dev, "failed to set xps mapping\n");
13050 	}
13051 
13052 	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
13053 		if (!static_key_enabled(&bnxt_xdp_locking_key))
13054 			static_branch_enable(&bnxt_xdp_locking_key);
13055 	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
13056 		static_branch_disable(&bnxt_xdp_locking_key);
13057 	}
13058 	set_bit(BNXT_STATE_OPEN, &bp->state);
13059 	bnxt_enable_int(bp);
13060 	/* Enable TX queues */
13061 	bnxt_tx_enable(bp);
13062 	mod_timer(&bp->timer, jiffies + bp->current_interval);
13063 	/* Poll link status and check for SFP+ module status */
13064 	mutex_lock(&bp->link_lock);
13065 	bnxt_get_port_module_status(bp);
13066 	mutex_unlock(&bp->link_lock);
13067 
13068 	/* VF-reps may need to be re-opened after the PF is re-opened */
13069 	if (BNXT_PF(bp))
13070 		bnxt_vf_reps_open(bp);
13071 	bnxt_ptp_init_rtc(bp, true);
13072 	bnxt_ptp_cfg_tstamp_filters(bp);
13073 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13074 		bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13075 	bnxt_cfg_usr_fltrs(bp);
13076 	return 0;
13077 
13078 open_err_irq:
13079 	bnxt_del_napi(bp);
13080 
13081 open_err_free_mem:
13082 	bnxt_free_skbs(bp);
13083 	bnxt_free_irq(bp);
13084 	bnxt_free_mem(bp, true);
13085 	return rc;
13086 }
13087 
13088 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13089 {
13090 	int rc = 0;
13091 
13092 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13093 		rc = -EIO;
13094 	if (!rc)
13095 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13096 	if (rc) {
13097 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13098 		netif_close(bp->dev);
13099 	}
13100 	return rc;
13101 }
13102 
13103 /* netdev instance lock held, open the NIC half way by allocating all
13104  * resources, but NAPI, IRQ, and TX are not enabled.  This is mainly used
13105  * for offline self tests.
13106  */
13107 int bnxt_half_open_nic(struct bnxt *bp)
13108 {
13109 	int rc = 0;
13110 
13111 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13112 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13113 		rc = -ENODEV;
13114 		goto half_open_err;
13115 	}
13116 
13117 	rc = bnxt_alloc_mem(bp, true);
13118 	if (rc) {
13119 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13120 		goto half_open_err;
13121 	}
13122 	bnxt_init_napi(bp);
13123 	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13124 	rc = bnxt_init_nic(bp, true);
13125 	if (rc) {
13126 		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13127 		bnxt_del_napi(bp);
13128 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13129 		goto half_open_err;
13130 	}
13131 	return 0;
13132 
13133 half_open_err:
13134 	bnxt_free_skbs(bp);
13135 	bnxt_free_mem(bp, true);
13136 	netif_close(bp->dev);
13137 	return rc;
13138 }
13139 
13140 /* netdev instance lock held, this call can only be made after a previous
13141  * successful call to bnxt_half_open_nic().
13142  */
13143 void bnxt_half_close_nic(struct bnxt *bp)
13144 {
13145 	bnxt_hwrm_resource_free(bp, false, true);
13146 	bnxt_del_napi(bp);
13147 	bnxt_free_skbs(bp);
13148 	bnxt_free_mem(bp, true);
13149 	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13150 }
13151 
13152 void bnxt_reenable_sriov(struct bnxt *bp)
13153 {
13154 	if (BNXT_PF(bp)) {
13155 		struct bnxt_pf_info *pf = &bp->pf;
13156 		int n = pf->active_vfs;
13157 
13158 		if (n)
13159 			bnxt_cfg_hw_sriov(bp, &n, true);
13160 	}
13161 }
13162 
13163 static int bnxt_open(struct net_device *dev)
13164 {
13165 	struct bnxt *bp = netdev_priv(dev);
13166 	int rc;
13167 
13168 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13169 		rc = bnxt_reinit_after_abort(bp);
13170 		if (rc) {
13171 			if (rc == -EBUSY)
13172 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13173 			else
13174 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13175 			return -ENODEV;
13176 		}
13177 	}
13178 
13179 	rc = bnxt_hwrm_if_change(bp, true);
13180 	if (rc)
13181 		return rc;
13182 
13183 	rc = __bnxt_open_nic(bp, true, true);
13184 	if (rc) {
13185 		bnxt_hwrm_if_change(bp, false);
13186 	} else {
13187 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13188 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13189 				bnxt_queue_sp_work(bp,
13190 						   BNXT_RESTART_ULP_SP_EVENT);
13191 		}
13192 	}
13193 
13194 	return rc;
13195 }
13196 
13197 static bool bnxt_drv_busy(struct bnxt *bp)
13198 {
13199 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13200 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
13201 }
13202 
13203 static void bnxt_get_ring_stats(struct bnxt *bp,
13204 				struct rtnl_link_stats64 *stats);
13205 
13206 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13207 			     bool link_re_init)
13208 {
13209 	/* Close the VF-reps before closing PF */
13210 	if (BNXT_PF(bp))
13211 		bnxt_vf_reps_close(bp);
13212 
13213 	/* Change device state to avoid TX queue wake up's */
13214 	bnxt_tx_disable(bp);
13215 
13216 	clear_bit(BNXT_STATE_OPEN, &bp->state);
13217 	smp_mb__after_atomic();
13218 	while (bnxt_drv_busy(bp))
13219 		msleep(20);
13220 
13221 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13222 		bnxt_clear_rss_ctxs(bp);
13223 	/* Flush rings and disable interrupts */
13224 	bnxt_shutdown_nic(bp, irq_re_init);
13225 
13226 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13227 
13228 	bnxt_debug_dev_exit(bp);
13229 	bnxt_disable_napi(bp);
13230 	timer_delete_sync(&bp->timer);
13231 	bnxt_free_skbs(bp);
13232 
13233 	/* Save ring stats before shutdown */
13234 	if (bp->bnapi && irq_re_init) {
13235 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13236 		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13237 	}
13238 	if (irq_re_init) {
13239 		bnxt_free_irq(bp);
13240 		bnxt_del_napi(bp);
13241 	}
13242 	bnxt_free_mem(bp, irq_re_init);
13243 }
13244 
13245 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13246 {
13247 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13248 		/* If we get here, it means firmware reset is in progress
13249 		 * while we are trying to close.  We can safely proceed with
13250 		 * the close because we are holding netdev instance lock.
13251 		 * Some firmware messages may fail as we proceed to close.
13252 		 * We set the ABORT_ERR flag here so that the FW reset thread
13253 		 * will later abort when it gets the netdev instance lock
13254 		 * and sees the flag.
13255 		 */
13256 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13257 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13258 	}
13259 
13260 #ifdef CONFIG_BNXT_SRIOV
13261 	if (bp->sriov_cfg) {
13262 		int rc;
13263 
13264 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13265 						      !bp->sriov_cfg,
13266 						      BNXT_SRIOV_CFG_WAIT_TMO);
13267 		if (!rc)
13268 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13269 		else if (rc < 0)
13270 			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13271 	}
13272 #endif
13273 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
13274 }
13275 
13276 static int bnxt_close(struct net_device *dev)
13277 {
13278 	struct bnxt *bp = netdev_priv(dev);
13279 
13280 	bnxt_close_nic(bp, true, true);
13281 	bnxt_hwrm_shutdown_link(bp);
13282 	bnxt_hwrm_if_change(bp, false);
13283 	return 0;
13284 }
13285 
13286 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13287 				   u16 *val)
13288 {
13289 	struct hwrm_port_phy_mdio_read_output *resp;
13290 	struct hwrm_port_phy_mdio_read_input *req;
13291 	int rc;
13292 
13293 	if (bp->hwrm_spec_code < 0x10a00)
13294 		return -EOPNOTSUPP;
13295 
13296 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13297 	if (rc)
13298 		return rc;
13299 
13300 	req->port_id = cpu_to_le16(bp->pf.port_id);
13301 	req->phy_addr = phy_addr;
13302 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13303 	if (mdio_phy_id_is_c45(phy_addr)) {
13304 		req->cl45_mdio = 1;
13305 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13306 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13307 		req->reg_addr = cpu_to_le16(reg);
13308 	}
13309 
13310 	resp = hwrm_req_hold(bp, req);
13311 	rc = hwrm_req_send(bp, req);
13312 	if (!rc)
13313 		*val = le16_to_cpu(resp->reg_data);
13314 	hwrm_req_drop(bp, req);
13315 	return rc;
13316 }
13317 
13318 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13319 				    u16 val)
13320 {
13321 	struct hwrm_port_phy_mdio_write_input *req;
13322 	int rc;
13323 
13324 	if (bp->hwrm_spec_code < 0x10a00)
13325 		return -EOPNOTSUPP;
13326 
13327 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13328 	if (rc)
13329 		return rc;
13330 
13331 	req->port_id = cpu_to_le16(bp->pf.port_id);
13332 	req->phy_addr = phy_addr;
13333 	req->reg_addr = cpu_to_le16(reg & 0x1f);
13334 	if (mdio_phy_id_is_c45(phy_addr)) {
13335 		req->cl45_mdio = 1;
13336 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
13337 		req->dev_addr = mdio_phy_id_devad(phy_addr);
13338 		req->reg_addr = cpu_to_le16(reg);
13339 	}
13340 	req->reg_data = cpu_to_le16(val);
13341 
13342 	return hwrm_req_send(bp, req);
13343 }
13344 
13345 /* netdev instance lock held */
13346 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13347 {
13348 	struct mii_ioctl_data *mdio = if_mii(ifr);
13349 	struct bnxt *bp = netdev_priv(dev);
13350 	int rc;
13351 
13352 	switch (cmd) {
13353 	case SIOCGMIIPHY:
13354 		mdio->phy_id = bp->link_info.phy_addr;
13355 
13356 		fallthrough;
13357 	case SIOCGMIIREG: {
13358 		u16 mii_regval = 0;
13359 
13360 		if (!netif_running(dev))
13361 			return -EAGAIN;
13362 
13363 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13364 					     &mii_regval);
13365 		mdio->val_out = mii_regval;
13366 		return rc;
13367 	}
13368 
13369 	case SIOCSMIIREG:
13370 		if (!netif_running(dev))
13371 			return -EAGAIN;
13372 
13373 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13374 						mdio->val_in);
13375 
13376 	default:
13377 		/* do nothing */
13378 		break;
13379 	}
13380 	return -EOPNOTSUPP;
13381 }
13382 
13383 static void bnxt_get_ring_stats(struct bnxt *bp,
13384 				struct rtnl_link_stats64 *stats)
13385 {
13386 	int i;
13387 
13388 	for (i = 0; i < bp->cp_nr_rings; i++) {
13389 		struct bnxt_napi *bnapi = bp->bnapi[i];
13390 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13391 		u64 *sw = cpr->stats.sw_stats;
13392 
13393 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13394 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13395 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13396 
13397 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13398 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13399 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13400 
13401 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13402 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13403 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13404 
13405 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13406 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13407 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13408 
13409 		stats->rx_missed_errors +=
13410 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13411 
13412 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13413 
13414 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13415 
13416 		stats->rx_dropped +=
13417 			cpr->sw_stats->rx.rx_netpoll_discards +
13418 			cpr->sw_stats->rx.rx_oom_discards;
13419 	}
13420 }
13421 
13422 static void bnxt_add_prev_stats(struct bnxt *bp,
13423 				struct rtnl_link_stats64 *stats)
13424 {
13425 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13426 
13427 	stats->rx_packets += prev_stats->rx_packets;
13428 	stats->tx_packets += prev_stats->tx_packets;
13429 	stats->rx_bytes += prev_stats->rx_bytes;
13430 	stats->tx_bytes += prev_stats->tx_bytes;
13431 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
13432 	stats->multicast += prev_stats->multicast;
13433 	stats->rx_dropped += prev_stats->rx_dropped;
13434 	stats->tx_dropped += prev_stats->tx_dropped;
13435 }
13436 
13437 static void
13438 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13439 {
13440 	struct bnxt *bp = netdev_priv(dev);
13441 
13442 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
13443 	/* Make sure bnxt_close_nic() sees that we are reading stats before
13444 	 * we check the BNXT_STATE_OPEN flag.
13445 	 */
13446 	smp_mb__after_atomic();
13447 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13448 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13449 		*stats = bp->net_stats_prev;
13450 		return;
13451 	}
13452 
13453 	bnxt_get_ring_stats(bp, stats);
13454 	bnxt_add_prev_stats(bp, stats);
13455 
13456 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
13457 		u64 *rx = bp->port_stats.sw_stats;
13458 		u64 *tx = bp->port_stats.sw_stats +
13459 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13460 
13461 		stats->rx_crc_errors =
13462 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13463 		stats->rx_frame_errors =
13464 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13465 		stats->rx_length_errors =
13466 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13467 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13468 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13469 		stats->rx_errors =
13470 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13471 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13472 		stats->collisions =
13473 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13474 		stats->tx_fifo_errors =
13475 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13476 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13477 	}
13478 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13479 }
13480 
13481 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13482 					struct bnxt_total_ring_err_stats *stats,
13483 					struct bnxt_cp_ring_info *cpr)
13484 {
13485 	struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13486 	u64 *hw_stats = cpr->stats.sw_stats;
13487 
13488 	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13489 	stats->rx_total_resets += sw_stats->rx.rx_resets;
13490 	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13491 	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13492 	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13493 	stats->rx_total_ring_discards +=
13494 		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13495 	stats->tx_total_resets += sw_stats->tx.tx_resets;
13496 	stats->tx_total_ring_discards +=
13497 		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13498 	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13499 }
13500 
13501 void bnxt_get_ring_err_stats(struct bnxt *bp,
13502 			     struct bnxt_total_ring_err_stats *stats)
13503 {
13504 	int i;
13505 
13506 	for (i = 0; i < bp->cp_nr_rings; i++)
13507 		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13508 }
13509 
13510 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13511 {
13512 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13513 	struct net_device *dev = bp->dev;
13514 	struct netdev_hw_addr *ha;
13515 	u8 *haddr;
13516 	int mc_count = 0;
13517 	bool update = false;
13518 	int off = 0;
13519 
13520 	netdev_for_each_mc_addr(ha, dev) {
13521 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
13522 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13523 			vnic->mc_list_count = 0;
13524 			return false;
13525 		}
13526 		haddr = ha->addr;
13527 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13528 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13529 			update = true;
13530 		}
13531 		off += ETH_ALEN;
13532 		mc_count++;
13533 	}
13534 	if (mc_count)
13535 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13536 
13537 	if (mc_count != vnic->mc_list_count) {
13538 		vnic->mc_list_count = mc_count;
13539 		update = true;
13540 	}
13541 	return update;
13542 }
13543 
13544 static bool bnxt_uc_list_updated(struct bnxt *bp)
13545 {
13546 	struct net_device *dev = bp->dev;
13547 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13548 	struct netdev_hw_addr *ha;
13549 	int off = 0;
13550 
13551 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13552 		return true;
13553 
13554 	netdev_for_each_uc_addr(ha, dev) {
13555 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13556 			return true;
13557 
13558 		off += ETH_ALEN;
13559 	}
13560 	return false;
13561 }
13562 
13563 static void bnxt_set_rx_mode(struct net_device *dev)
13564 {
13565 	struct bnxt *bp = netdev_priv(dev);
13566 	struct bnxt_vnic_info *vnic;
13567 	bool mc_update = false;
13568 	bool uc_update;
13569 	u32 mask;
13570 
13571 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13572 		return;
13573 
13574 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13575 	mask = vnic->rx_mask;
13576 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13577 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13578 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13579 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13580 
13581 	if (dev->flags & IFF_PROMISC)
13582 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13583 
13584 	uc_update = bnxt_uc_list_updated(bp);
13585 
13586 	if (dev->flags & IFF_BROADCAST)
13587 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13588 	if (dev->flags & IFF_ALLMULTI) {
13589 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13590 		vnic->mc_list_count = 0;
13591 	} else if (dev->flags & IFF_MULTICAST) {
13592 		mc_update = bnxt_mc_list_updated(bp, &mask);
13593 	}
13594 
13595 	if (mask != vnic->rx_mask || uc_update || mc_update) {
13596 		vnic->rx_mask = mask;
13597 
13598 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13599 	}
13600 }
13601 
13602 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13603 {
13604 	struct net_device *dev = bp->dev;
13605 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13606 	struct netdev_hw_addr *ha;
13607 	int i, off = 0, rc;
13608 	bool uc_update;
13609 
13610 	netif_addr_lock_bh(dev);
13611 	uc_update = bnxt_uc_list_updated(bp);
13612 	netif_addr_unlock_bh(dev);
13613 
13614 	if (!uc_update)
13615 		goto skip_uc;
13616 
13617 	for (i = 1; i < vnic->uc_filter_count; i++) {
13618 		struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13619 
13620 		bnxt_hwrm_l2_filter_free(bp, fltr);
13621 		bnxt_del_l2_filter(bp, fltr);
13622 	}
13623 
13624 	vnic->uc_filter_count = 1;
13625 
13626 	netif_addr_lock_bh(dev);
13627 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13628 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13629 	} else {
13630 		netdev_for_each_uc_addr(ha, dev) {
13631 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13632 			off += ETH_ALEN;
13633 			vnic->uc_filter_count++;
13634 		}
13635 	}
13636 	netif_addr_unlock_bh(dev);
13637 
13638 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13639 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13640 		if (rc) {
13641 			if (BNXT_VF(bp) && rc == -ENODEV) {
13642 				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13643 					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13644 				else
13645 					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13646 				rc = 0;
13647 			} else {
13648 				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13649 			}
13650 			vnic->uc_filter_count = i;
13651 			return rc;
13652 		}
13653 	}
13654 	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13655 		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13656 
13657 skip_uc:
13658 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13659 	    !bnxt_promisc_ok(bp))
13660 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13661 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13662 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13663 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13664 			    rc);
13665 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13666 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13667 		vnic->mc_list_count = 0;
13668 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13669 	}
13670 	if (rc)
13671 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13672 			   rc);
13673 
13674 	return rc;
13675 }
13676 
13677 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13678 {
13679 #ifdef CONFIG_BNXT_SRIOV
13680 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13681 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13682 
13683 		/* No minimum rings were provisioned by the PF.  Don't
13684 		 * reserve rings by default when device is down.
13685 		 */
13686 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13687 			return true;
13688 
13689 		if (!netif_running(bp->dev))
13690 			return false;
13691 	}
13692 #endif
13693 	return true;
13694 }
13695 
13696 /* If the chip and firmware supports RFS */
13697 static bool bnxt_rfs_supported(struct bnxt *bp)
13698 {
13699 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13700 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13701 			return true;
13702 		return false;
13703 	}
13704 	/* 212 firmware is broken for aRFS */
13705 	if (BNXT_FW_MAJ(bp) == 212)
13706 		return false;
13707 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13708 		return true;
13709 	if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13710 		return true;
13711 	return false;
13712 }
13713 
13714 /* If runtime conditions support RFS */
13715 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13716 {
13717 	struct bnxt_hw_rings hwr = {0};
13718 	int max_vnics, max_rss_ctxs;
13719 
13720 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13721 	    !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13722 		return bnxt_rfs_supported(bp);
13723 
13724 	if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13725 		return false;
13726 
13727 	hwr.grp = bp->rx_nr_rings;
13728 	hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13729 	if (new_rss_ctx)
13730 		hwr.vnic++;
13731 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13732 	max_vnics = bnxt_get_max_func_vnics(bp);
13733 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13734 
13735 	if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13736 		if (bp->rx_nr_rings > 1)
13737 			netdev_warn(bp->dev,
13738 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13739 				    min(max_rss_ctxs - 1, max_vnics - 1));
13740 		return false;
13741 	}
13742 
13743 	if (!BNXT_NEW_RM(bp))
13744 		return true;
13745 
13746 	/* Do not reduce VNIC and RSS ctx reservations.  There is a FW
13747 	 * issue that will mess up the default VNIC if we reduce the
13748 	 * reservations.
13749 	 */
13750 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13751 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13752 		return true;
13753 
13754 	bnxt_hwrm_reserve_rings(bp, &hwr);
13755 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13756 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13757 		return true;
13758 
13759 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13760 	hwr.vnic = 1;
13761 	hwr.rss_ctx = 0;
13762 	bnxt_hwrm_reserve_rings(bp, &hwr);
13763 	return false;
13764 }
13765 
13766 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13767 					   netdev_features_t features)
13768 {
13769 	struct bnxt *bp = netdev_priv(dev);
13770 	netdev_features_t vlan_features;
13771 
13772 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13773 		features &= ~NETIF_F_NTUPLE;
13774 
13775 	if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13776 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13777 
13778 	if (!(features & NETIF_F_GRO))
13779 		features &= ~NETIF_F_GRO_HW;
13780 
13781 	if (features & NETIF_F_GRO_HW)
13782 		features &= ~NETIF_F_LRO;
13783 
13784 	/* Both CTAG and STAG VLAN acceleration on the RX side have to be
13785 	 * turned on or off together.
13786 	 */
13787 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13788 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13789 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13790 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13791 		else if (vlan_features)
13792 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13793 	}
13794 #ifdef CONFIG_BNXT_SRIOV
13795 	if (BNXT_VF(bp) && bp->vf.vlan)
13796 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13797 #endif
13798 	return features;
13799 }
13800 
13801 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13802 				bool link_re_init, u32 flags, bool update_tpa)
13803 {
13804 	bnxt_close_nic(bp, irq_re_init, link_re_init);
13805 	bp->flags = flags;
13806 	if (update_tpa)
13807 		bnxt_set_ring_params(bp);
13808 	return bnxt_open_nic(bp, irq_re_init, link_re_init);
13809 }
13810 
13811 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13812 {
13813 	bool update_tpa = false, update_ntuple = false;
13814 	struct bnxt *bp = netdev_priv(dev);
13815 	u32 flags = bp->flags;
13816 	u32 changes;
13817 	int rc = 0;
13818 	bool re_init = false;
13819 
13820 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13821 	if (features & NETIF_F_GRO_HW)
13822 		flags |= BNXT_FLAG_GRO;
13823 	else if (features & NETIF_F_LRO)
13824 		flags |= BNXT_FLAG_LRO;
13825 
13826 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13827 		flags &= ~BNXT_FLAG_TPA;
13828 
13829 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13830 		flags |= BNXT_FLAG_STRIP_VLAN;
13831 
13832 	if (features & NETIF_F_NTUPLE)
13833 		flags |= BNXT_FLAG_RFS;
13834 	else
13835 		bnxt_clear_usr_fltrs(bp, true);
13836 
13837 	changes = flags ^ bp->flags;
13838 	if (changes & BNXT_FLAG_TPA) {
13839 		update_tpa = true;
13840 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13841 		    (flags & BNXT_FLAG_TPA) == 0 ||
13842 		    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13843 			re_init = true;
13844 	}
13845 
13846 	if (changes & ~BNXT_FLAG_TPA)
13847 		re_init = true;
13848 
13849 	if (changes & BNXT_FLAG_RFS)
13850 		update_ntuple = true;
13851 
13852 	if (flags != bp->flags) {
13853 		u32 old_flags = bp->flags;
13854 
13855 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13856 			bp->flags = flags;
13857 			if (update_tpa)
13858 				bnxt_set_ring_params(bp);
13859 			return rc;
13860 		}
13861 
13862 		if (update_ntuple)
13863 			return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13864 
13865 		if (re_init)
13866 			return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13867 
13868 		if (update_tpa) {
13869 			bp->flags = flags;
13870 			rc = bnxt_set_tpa(bp,
13871 					  (flags & BNXT_FLAG_TPA) ?
13872 					  true : false);
13873 			if (rc)
13874 				bp->flags = old_flags;
13875 		}
13876 	}
13877 	return rc;
13878 }
13879 
13880 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13881 			      u8 **nextp)
13882 {
13883 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13884 	struct hop_jumbo_hdr *jhdr;
13885 	int hdr_count = 0;
13886 	u8 *nexthdr;
13887 	int start;
13888 
13889 	/* Check that there are at most 2 IPv6 extension headers, no
13890 	 * fragment header, and each is <= 64 bytes.
13891 	 */
13892 	start = nw_off + sizeof(*ip6h);
13893 	nexthdr = &ip6h->nexthdr;
13894 	while (ipv6_ext_hdr(*nexthdr)) {
13895 		struct ipv6_opt_hdr *hp;
13896 		int hdrlen;
13897 
13898 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13899 		    *nexthdr == NEXTHDR_FRAGMENT)
13900 			return false;
13901 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13902 					  skb_headlen(skb), NULL);
13903 		if (!hp)
13904 			return false;
13905 		if (*nexthdr == NEXTHDR_AUTH)
13906 			hdrlen = ipv6_authlen(hp);
13907 		else
13908 			hdrlen = ipv6_optlen(hp);
13909 
13910 		if (hdrlen > 64)
13911 			return false;
13912 
13913 		/* The ext header may be a hop-by-hop header inserted for
13914 		 * big TCP purposes. This will be removed before sending
13915 		 * from NIC, so do not count it.
13916 		 */
13917 		if (*nexthdr == NEXTHDR_HOP) {
13918 			if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13919 				goto increment_hdr;
13920 
13921 			jhdr = (struct hop_jumbo_hdr *)hp;
13922 			if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13923 			    jhdr->nexthdr != IPPROTO_TCP)
13924 				goto increment_hdr;
13925 
13926 			goto next_hdr;
13927 		}
13928 increment_hdr:
13929 		hdr_count++;
13930 next_hdr:
13931 		nexthdr = &hp->nexthdr;
13932 		start += hdrlen;
13933 	}
13934 	if (nextp) {
13935 		/* Caller will check inner protocol */
13936 		if (skb->encapsulation) {
13937 			*nextp = nexthdr;
13938 			return true;
13939 		}
13940 		*nextp = NULL;
13941 	}
13942 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13943 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13944 }
13945 
13946 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
13947 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13948 {
13949 	struct udphdr *uh = udp_hdr(skb);
13950 	__be16 udp_port = uh->dest;
13951 
13952 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13953 	    udp_port != bp->vxlan_gpe_port)
13954 		return false;
13955 	if (skb->inner_protocol == htons(ETH_P_TEB)) {
13956 		struct ethhdr *eh = inner_eth_hdr(skb);
13957 
13958 		switch (eh->h_proto) {
13959 		case htons(ETH_P_IP):
13960 			return true;
13961 		case htons(ETH_P_IPV6):
13962 			return bnxt_exthdr_check(bp, skb,
13963 						 skb_inner_network_offset(skb),
13964 						 NULL);
13965 		}
13966 	} else if (skb->inner_protocol == htons(ETH_P_IP)) {
13967 		return true;
13968 	} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13969 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13970 					 NULL);
13971 	}
13972 	return false;
13973 }
13974 
13975 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13976 {
13977 	switch (l4_proto) {
13978 	case IPPROTO_UDP:
13979 		return bnxt_udp_tunl_check(bp, skb);
13980 	case IPPROTO_IPIP:
13981 		return true;
13982 	case IPPROTO_GRE: {
13983 		switch (skb->inner_protocol) {
13984 		default:
13985 			return false;
13986 		case htons(ETH_P_IP):
13987 			return true;
13988 		case htons(ETH_P_IPV6):
13989 			fallthrough;
13990 		}
13991 	}
13992 	case IPPROTO_IPV6:
13993 		/* Check ext headers of inner ipv6 */
13994 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13995 					 NULL);
13996 	}
13997 	return false;
13998 }
13999 
14000 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
14001 					     struct net_device *dev,
14002 					     netdev_features_t features)
14003 {
14004 	struct bnxt *bp = netdev_priv(dev);
14005 	u8 *l4_proto;
14006 
14007 	features = vlan_features_check(skb, features);
14008 	switch (vlan_get_protocol(skb)) {
14009 	case htons(ETH_P_IP):
14010 		if (!skb->encapsulation)
14011 			return features;
14012 		l4_proto = &ip_hdr(skb)->protocol;
14013 		if (bnxt_tunl_check(bp, skb, *l4_proto))
14014 			return features;
14015 		break;
14016 	case htons(ETH_P_IPV6):
14017 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
14018 				       &l4_proto))
14019 			break;
14020 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
14021 			return features;
14022 		break;
14023 	}
14024 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
14025 }
14026 
14027 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
14028 			 u32 *reg_buf)
14029 {
14030 	struct hwrm_dbg_read_direct_output *resp;
14031 	struct hwrm_dbg_read_direct_input *req;
14032 	__le32 *dbg_reg_buf;
14033 	dma_addr_t mapping;
14034 	int rc, i;
14035 
14036 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
14037 	if (rc)
14038 		return rc;
14039 
14040 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
14041 					 &mapping);
14042 	if (!dbg_reg_buf) {
14043 		rc = -ENOMEM;
14044 		goto dbg_rd_reg_exit;
14045 	}
14046 
14047 	req->host_dest_addr = cpu_to_le64(mapping);
14048 
14049 	resp = hwrm_req_hold(bp, req);
14050 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
14051 	req->read_len32 = cpu_to_le32(num_words);
14052 
14053 	rc = hwrm_req_send(bp, req);
14054 	if (rc || resp->error_code) {
14055 		rc = -EIO;
14056 		goto dbg_rd_reg_exit;
14057 	}
14058 	for (i = 0; i < num_words; i++)
14059 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
14060 
14061 dbg_rd_reg_exit:
14062 	hwrm_req_drop(bp, req);
14063 	return rc;
14064 }
14065 
14066 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
14067 				       u32 ring_id, u32 *prod, u32 *cons)
14068 {
14069 	struct hwrm_dbg_ring_info_get_output *resp;
14070 	struct hwrm_dbg_ring_info_get_input *req;
14071 	int rc;
14072 
14073 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
14074 	if (rc)
14075 		return rc;
14076 
14077 	req->ring_type = ring_type;
14078 	req->fw_ring_id = cpu_to_le32(ring_id);
14079 	resp = hwrm_req_hold(bp, req);
14080 	rc = hwrm_req_send(bp, req);
14081 	if (!rc) {
14082 		*prod = le32_to_cpu(resp->producer_index);
14083 		*cons = le32_to_cpu(resp->consumer_index);
14084 	}
14085 	hwrm_req_drop(bp, req);
14086 	return rc;
14087 }
14088 
14089 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14090 {
14091 	struct bnxt_tx_ring_info *txr;
14092 	int i = bnapi->index, j;
14093 
14094 	bnxt_for_each_napi_tx(j, bnapi, txr)
14095 		netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14096 			    i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14097 			    txr->tx_cons);
14098 }
14099 
14100 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14101 {
14102 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14103 	int i = bnapi->index;
14104 
14105 	if (!rxr)
14106 		return;
14107 
14108 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14109 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14110 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14111 		    rxr->rx_sw_agg_prod);
14112 }
14113 
14114 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14115 {
14116 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14117 	int i = bnapi->index, j;
14118 
14119 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14120 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14121 	for (j = 0; j < cpr->cp_ring_count; j++) {
14122 		cpr2 = &cpr->cp_ring_arr[j];
14123 		if (!cpr2->bnapi)
14124 			continue;
14125 		netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14126 			    i, j, cpr2->cp_ring_struct.fw_ring_id,
14127 			    cpr2->cp_raw_cons);
14128 	}
14129 }
14130 
14131 static void bnxt_dbg_dump_states(struct bnxt *bp)
14132 {
14133 	int i;
14134 	struct bnxt_napi *bnapi;
14135 
14136 	for (i = 0; i < bp->cp_nr_rings; i++) {
14137 		bnapi = bp->bnapi[i];
14138 		if (netif_msg_drv(bp)) {
14139 			bnxt_dump_tx_sw_state(bnapi);
14140 			bnxt_dump_rx_sw_state(bnapi);
14141 			bnxt_dump_cp_sw_state(bnapi);
14142 		}
14143 	}
14144 }
14145 
14146 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14147 {
14148 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14149 	struct hwrm_ring_reset_input *req;
14150 	struct bnxt_napi *bnapi = rxr->bnapi;
14151 	struct bnxt_cp_ring_info *cpr;
14152 	u16 cp_ring_id;
14153 	int rc;
14154 
14155 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14156 	if (rc)
14157 		return rc;
14158 
14159 	cpr = &bnapi->cp_ring;
14160 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14161 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
14162 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14163 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14164 	return hwrm_req_send_silent(bp, req);
14165 }
14166 
14167 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14168 {
14169 	if (!silent)
14170 		bnxt_dbg_dump_states(bp);
14171 	if (netif_running(bp->dev)) {
14172 		bnxt_close_nic(bp, !silent, false);
14173 		bnxt_open_nic(bp, !silent, false);
14174 	}
14175 }
14176 
14177 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14178 {
14179 	struct bnxt *bp = netdev_priv(dev);
14180 
14181 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
14182 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14183 }
14184 
14185 static void bnxt_fw_health_check(struct bnxt *bp)
14186 {
14187 	struct bnxt_fw_health *fw_health = bp->fw_health;
14188 	struct pci_dev *pdev = bp->pdev;
14189 	u32 val;
14190 
14191 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14192 		return;
14193 
14194 	/* Make sure it is enabled before checking the tmr_counter. */
14195 	smp_rmb();
14196 	if (fw_health->tmr_counter) {
14197 		fw_health->tmr_counter--;
14198 		return;
14199 	}
14200 
14201 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14202 	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14203 		fw_health->arrests++;
14204 		goto fw_reset;
14205 	}
14206 
14207 	fw_health->last_fw_heartbeat = val;
14208 
14209 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14210 	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14211 		fw_health->discoveries++;
14212 		goto fw_reset;
14213 	}
14214 
14215 	fw_health->tmr_counter = fw_health->tmr_multiplier;
14216 	return;
14217 
14218 fw_reset:
14219 	bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14220 }
14221 
14222 static void bnxt_timer(struct timer_list *t)
14223 {
14224 	struct bnxt *bp = timer_container_of(bp, t, timer);
14225 	struct net_device *dev = bp->dev;
14226 
14227 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14228 		return;
14229 
14230 	if (atomic_read(&bp->intr_sem) != 0)
14231 		goto bnxt_restart_timer;
14232 
14233 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14234 		bnxt_fw_health_check(bp);
14235 
14236 	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14237 		bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14238 
14239 	if (bnxt_tc_flower_enabled(bp))
14240 		bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14241 
14242 #ifdef CONFIG_RFS_ACCEL
14243 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14244 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14245 #endif /*CONFIG_RFS_ACCEL*/
14246 
14247 	if (bp->link_info.phy_retry) {
14248 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14249 			bp->link_info.phy_retry = false;
14250 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14251 		} else {
14252 			bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14253 		}
14254 	}
14255 
14256 	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14257 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14258 
14259 	if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14260 		bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14261 
14262 bnxt_restart_timer:
14263 	mod_timer(&bp->timer, jiffies + bp->current_interval);
14264 }
14265 
14266 static void bnxt_lock_sp(struct bnxt *bp)
14267 {
14268 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14269 	 * set.  If the device is being closed, bnxt_close() may be holding
14270 	 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14271 	 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14272 	 * instance lock.
14273 	 */
14274 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14275 	netdev_lock(bp->dev);
14276 }
14277 
14278 static void bnxt_unlock_sp(struct bnxt *bp)
14279 {
14280 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14281 	netdev_unlock(bp->dev);
14282 }
14283 
14284 /* Only called from bnxt_sp_task() */
14285 static void bnxt_reset(struct bnxt *bp, bool silent)
14286 {
14287 	bnxt_lock_sp(bp);
14288 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
14289 		bnxt_reset_task(bp, silent);
14290 	bnxt_unlock_sp(bp);
14291 }
14292 
14293 /* Only called from bnxt_sp_task() */
14294 static void bnxt_rx_ring_reset(struct bnxt *bp)
14295 {
14296 	int i;
14297 
14298 	bnxt_lock_sp(bp);
14299 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14300 		bnxt_unlock_sp(bp);
14301 		return;
14302 	}
14303 	/* Disable and flush TPA before resetting the RX ring */
14304 	if (bp->flags & BNXT_FLAG_TPA)
14305 		bnxt_set_tpa(bp, false);
14306 	for (i = 0; i < bp->rx_nr_rings; i++) {
14307 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14308 		struct bnxt_cp_ring_info *cpr;
14309 		int rc;
14310 
14311 		if (!rxr->bnapi->in_reset)
14312 			continue;
14313 
14314 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
14315 		if (rc) {
14316 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
14317 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14318 			else
14319 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14320 					    rc);
14321 			bnxt_reset_task(bp, true);
14322 			break;
14323 		}
14324 		bnxt_free_one_rx_ring_skbs(bp, rxr);
14325 		rxr->rx_prod = 0;
14326 		rxr->rx_agg_prod = 0;
14327 		rxr->rx_sw_agg_prod = 0;
14328 		rxr->rx_next_cons = 0;
14329 		rxr->bnapi->in_reset = false;
14330 		bnxt_alloc_one_rx_ring(bp, i);
14331 		cpr = &rxr->bnapi->cp_ring;
14332 		cpr->sw_stats->rx.rx_resets++;
14333 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
14334 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14335 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14336 	}
14337 	if (bp->flags & BNXT_FLAG_TPA)
14338 		bnxt_set_tpa(bp, true);
14339 	bnxt_unlock_sp(bp);
14340 }
14341 
14342 static void bnxt_fw_fatal_close(struct bnxt *bp)
14343 {
14344 	bnxt_tx_disable(bp);
14345 	bnxt_disable_napi(bp);
14346 	bnxt_disable_int_sync(bp);
14347 	bnxt_free_irq(bp);
14348 	bnxt_clear_int_mode(bp);
14349 	pci_disable_device(bp->pdev);
14350 }
14351 
14352 static void bnxt_fw_reset_close(struct bnxt *bp)
14353 {
14354 	/* When firmware is in fatal state, quiesce device and disable
14355 	 * bus master to prevent any potential bad DMAs before freeing
14356 	 * kernel memory.
14357 	 */
14358 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14359 		u16 val = 0;
14360 
14361 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14362 		if (val == 0xffff)
14363 			bp->fw_reset_min_dsecs = 0;
14364 		bnxt_fw_fatal_close(bp);
14365 	}
14366 	__bnxt_close_nic(bp, true, false);
14367 	bnxt_vf_reps_free(bp);
14368 	bnxt_clear_int_mode(bp);
14369 	bnxt_hwrm_func_drv_unrgtr(bp);
14370 	if (pci_is_enabled(bp->pdev))
14371 		pci_disable_device(bp->pdev);
14372 	bnxt_free_ctx_mem(bp, false);
14373 }
14374 
14375 static bool is_bnxt_fw_ok(struct bnxt *bp)
14376 {
14377 	struct bnxt_fw_health *fw_health = bp->fw_health;
14378 	bool no_heartbeat = false, has_reset = false;
14379 	u32 val;
14380 
14381 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14382 	if (val == fw_health->last_fw_heartbeat)
14383 		no_heartbeat = true;
14384 
14385 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14386 	if (val != fw_health->last_fw_reset_cnt)
14387 		has_reset = true;
14388 
14389 	if (!no_heartbeat && has_reset)
14390 		return true;
14391 
14392 	return false;
14393 }
14394 
14395 /* netdev instance lock is acquired before calling this function */
14396 static void bnxt_force_fw_reset(struct bnxt *bp)
14397 {
14398 	struct bnxt_fw_health *fw_health = bp->fw_health;
14399 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14400 	u32 wait_dsecs;
14401 
14402 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14403 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14404 		return;
14405 
14406 	/* we have to serialize with bnxt_refclk_read()*/
14407 	if (ptp) {
14408 		unsigned long flags;
14409 
14410 		write_seqlock_irqsave(&ptp->ptp_lock, flags);
14411 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14412 		write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14413 	} else {
14414 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14415 	}
14416 	bnxt_fw_reset_close(bp);
14417 	wait_dsecs = fw_health->master_func_wait_dsecs;
14418 	if (fw_health->primary) {
14419 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14420 			wait_dsecs = 0;
14421 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14422 	} else {
14423 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14424 		wait_dsecs = fw_health->normal_func_wait_dsecs;
14425 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14426 	}
14427 
14428 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14429 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14430 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14431 }
14432 
14433 void bnxt_fw_exception(struct bnxt *bp)
14434 {
14435 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14436 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14437 	bnxt_ulp_stop(bp);
14438 	bnxt_lock_sp(bp);
14439 	bnxt_force_fw_reset(bp);
14440 	bnxt_unlock_sp(bp);
14441 }
14442 
14443 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14444  * < 0 on error.
14445  */
14446 static int bnxt_get_registered_vfs(struct bnxt *bp)
14447 {
14448 #ifdef CONFIG_BNXT_SRIOV
14449 	int rc;
14450 
14451 	if (!BNXT_PF(bp))
14452 		return 0;
14453 
14454 	rc = bnxt_hwrm_func_qcfg(bp);
14455 	if (rc) {
14456 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14457 		return rc;
14458 	}
14459 	if (bp->pf.registered_vfs)
14460 		return bp->pf.registered_vfs;
14461 	if (bp->sriov_cfg)
14462 		return 1;
14463 #endif
14464 	return 0;
14465 }
14466 
14467 void bnxt_fw_reset(struct bnxt *bp)
14468 {
14469 	bnxt_ulp_stop(bp);
14470 	bnxt_lock_sp(bp);
14471 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14472 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14473 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14474 		int n = 0, tmo;
14475 
14476 		/* we have to serialize with bnxt_refclk_read()*/
14477 		if (ptp) {
14478 			unsigned long flags;
14479 
14480 			write_seqlock_irqsave(&ptp->ptp_lock, flags);
14481 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14482 			write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14483 		} else {
14484 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14485 		}
14486 		if (bp->pf.active_vfs &&
14487 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14488 			n = bnxt_get_registered_vfs(bp);
14489 		if (n < 0) {
14490 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14491 				   n);
14492 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14493 			netif_close(bp->dev);
14494 			goto fw_reset_exit;
14495 		} else if (n > 0) {
14496 			u16 vf_tmo_dsecs = n * 10;
14497 
14498 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14499 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14500 			bp->fw_reset_state =
14501 				BNXT_FW_RESET_STATE_POLL_VF;
14502 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14503 			goto fw_reset_exit;
14504 		}
14505 		bnxt_fw_reset_close(bp);
14506 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14507 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14508 			tmo = HZ / 10;
14509 		} else {
14510 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14511 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
14512 		}
14513 		bnxt_queue_fw_reset_work(bp, tmo);
14514 	}
14515 fw_reset_exit:
14516 	bnxt_unlock_sp(bp);
14517 }
14518 
14519 static void bnxt_chk_missed_irq(struct bnxt *bp)
14520 {
14521 	int i;
14522 
14523 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14524 		return;
14525 
14526 	for (i = 0; i < bp->cp_nr_rings; i++) {
14527 		struct bnxt_napi *bnapi = bp->bnapi[i];
14528 		struct bnxt_cp_ring_info *cpr;
14529 		u32 fw_ring_id;
14530 		int j;
14531 
14532 		if (!bnapi)
14533 			continue;
14534 
14535 		cpr = &bnapi->cp_ring;
14536 		for (j = 0; j < cpr->cp_ring_count; j++) {
14537 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14538 			u32 val[2];
14539 
14540 			if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14541 				continue;
14542 
14543 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14544 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14545 				continue;
14546 			}
14547 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14548 			bnxt_dbg_hwrm_ring_info_get(bp,
14549 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14550 				fw_ring_id, &val[0], &val[1]);
14551 			cpr->sw_stats->cmn.missed_irqs++;
14552 		}
14553 	}
14554 }
14555 
14556 static void bnxt_cfg_ntp_filters(struct bnxt *);
14557 
14558 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14559 {
14560 	struct bnxt_link_info *link_info = &bp->link_info;
14561 
14562 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14563 		link_info->autoneg = BNXT_AUTONEG_SPEED;
14564 		if (bp->hwrm_spec_code >= 0x10201) {
14565 			if (link_info->auto_pause_setting &
14566 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14567 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14568 		} else {
14569 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14570 		}
14571 		bnxt_set_auto_speed(link_info);
14572 	} else {
14573 		bnxt_set_force_speed(link_info);
14574 		link_info->req_duplex = link_info->duplex_setting;
14575 	}
14576 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14577 		link_info->req_flow_ctrl =
14578 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14579 	else
14580 		link_info->req_flow_ctrl = link_info->force_pause_setting;
14581 }
14582 
14583 static void bnxt_fw_echo_reply(struct bnxt *bp)
14584 {
14585 	struct bnxt_fw_health *fw_health = bp->fw_health;
14586 	struct hwrm_func_echo_response_input *req;
14587 	int rc;
14588 
14589 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14590 	if (rc)
14591 		return;
14592 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14593 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14594 	hwrm_req_send(bp, req);
14595 }
14596 
14597 static void bnxt_ulp_restart(struct bnxt *bp)
14598 {
14599 	bnxt_ulp_stop(bp);
14600 	bnxt_ulp_start(bp, 0);
14601 }
14602 
14603 static void bnxt_sp_task(struct work_struct *work)
14604 {
14605 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14606 
14607 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14608 	smp_mb__after_atomic();
14609 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14610 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14611 		return;
14612 	}
14613 
14614 	if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14615 		bnxt_ulp_restart(bp);
14616 		bnxt_reenable_sriov(bp);
14617 	}
14618 
14619 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14620 		bnxt_cfg_rx_mode(bp);
14621 
14622 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14623 		bnxt_cfg_ntp_filters(bp);
14624 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14625 		bnxt_hwrm_exec_fwd_req(bp);
14626 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14627 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
14628 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14629 		bnxt_hwrm_port_qstats(bp, 0);
14630 		bnxt_hwrm_port_qstats_ext(bp, 0);
14631 		bnxt_accumulate_all_stats(bp);
14632 	}
14633 
14634 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14635 		int rc;
14636 
14637 		mutex_lock(&bp->link_lock);
14638 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14639 				       &bp->sp_event))
14640 			bnxt_hwrm_phy_qcaps(bp);
14641 
14642 		rc = bnxt_update_link(bp, true);
14643 		if (rc)
14644 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14645 				   rc);
14646 
14647 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14648 				       &bp->sp_event))
14649 			bnxt_init_ethtool_link_settings(bp);
14650 		mutex_unlock(&bp->link_lock);
14651 	}
14652 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14653 		int rc;
14654 
14655 		mutex_lock(&bp->link_lock);
14656 		rc = bnxt_update_phy_setting(bp);
14657 		mutex_unlock(&bp->link_lock);
14658 		if (rc) {
14659 			netdev_warn(bp->dev, "update phy settings retry failed\n");
14660 		} else {
14661 			bp->link_info.phy_retry = false;
14662 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
14663 		}
14664 	}
14665 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14666 		mutex_lock(&bp->link_lock);
14667 		bnxt_get_port_module_status(bp);
14668 		mutex_unlock(&bp->link_lock);
14669 	}
14670 
14671 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14672 		bnxt_tc_flow_stats_work(bp);
14673 
14674 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14675 		bnxt_chk_missed_irq(bp);
14676 
14677 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14678 		bnxt_fw_echo_reply(bp);
14679 
14680 	if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14681 		bnxt_hwmon_notify_event(bp);
14682 
14683 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
14684 	 * must be the last functions to be called before exiting.
14685 	 */
14686 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14687 		bnxt_reset(bp, false);
14688 
14689 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14690 		bnxt_reset(bp, true);
14691 
14692 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14693 		bnxt_rx_ring_reset(bp);
14694 
14695 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14696 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14697 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14698 			bnxt_devlink_health_fw_report(bp);
14699 		else
14700 			bnxt_fw_reset(bp);
14701 	}
14702 
14703 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14704 		if (!is_bnxt_fw_ok(bp))
14705 			bnxt_devlink_health_fw_report(bp);
14706 	}
14707 
14708 	smp_mb__before_atomic();
14709 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14710 }
14711 
14712 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14713 				int *max_cp);
14714 
14715 /* Under netdev instance lock */
14716 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14717 		     int tx_xdp)
14718 {
14719 	int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14720 	struct bnxt_hw_rings hwr = {0};
14721 	int rx_rings = rx;
14722 	int rc;
14723 
14724 	if (tcs)
14725 		tx_sets = tcs;
14726 
14727 	_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14728 
14729 	if (max_rx < rx_rings)
14730 		return -ENOMEM;
14731 
14732 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
14733 		rx_rings <<= 1;
14734 
14735 	hwr.rx = rx_rings;
14736 	hwr.tx = tx * tx_sets + tx_xdp;
14737 	if (max_tx < hwr.tx)
14738 		return -ENOMEM;
14739 
14740 	hwr.vnic = bnxt_get_total_vnics(bp, rx);
14741 
14742 	tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14743 	hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14744 	if (max_cp < hwr.cp)
14745 		return -ENOMEM;
14746 	hwr.stat = hwr.cp;
14747 	if (BNXT_NEW_RM(bp)) {
14748 		hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14749 		hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14750 		hwr.grp = rx;
14751 		hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14752 	}
14753 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14754 		hwr.cp_p5 = hwr.tx + rx;
14755 	rc = bnxt_hwrm_check_rings(bp, &hwr);
14756 	if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14757 		if (!bnxt_ulp_registered(bp->edev)) {
14758 			hwr.cp += bnxt_get_ulp_msix_num(bp);
14759 			hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14760 		}
14761 		if (hwr.cp > bp->total_irqs) {
14762 			int total_msix = bnxt_change_msix(bp, hwr.cp);
14763 
14764 			if (total_msix < hwr.cp) {
14765 				netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14766 					    hwr.cp, total_msix);
14767 				rc = -ENOSPC;
14768 			}
14769 		}
14770 	}
14771 	return rc;
14772 }
14773 
14774 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14775 {
14776 	if (bp->bar2) {
14777 		pci_iounmap(pdev, bp->bar2);
14778 		bp->bar2 = NULL;
14779 	}
14780 
14781 	if (bp->bar1) {
14782 		pci_iounmap(pdev, bp->bar1);
14783 		bp->bar1 = NULL;
14784 	}
14785 
14786 	if (bp->bar0) {
14787 		pci_iounmap(pdev, bp->bar0);
14788 		bp->bar0 = NULL;
14789 	}
14790 }
14791 
14792 static void bnxt_cleanup_pci(struct bnxt *bp)
14793 {
14794 	bnxt_unmap_bars(bp, bp->pdev);
14795 	pci_release_regions(bp->pdev);
14796 	if (pci_is_enabled(bp->pdev))
14797 		pci_disable_device(bp->pdev);
14798 }
14799 
14800 static void bnxt_init_dflt_coal(struct bnxt *bp)
14801 {
14802 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14803 	struct bnxt_coal *coal;
14804 	u16 flags = 0;
14805 
14806 	if (coal_cap->cmpl_params &
14807 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14808 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14809 
14810 	/* Tick values in micro seconds.
14811 	 * 1 coal_buf x bufs_per_record = 1 completion record.
14812 	 */
14813 	coal = &bp->rx_coal;
14814 	coal->coal_ticks = 10;
14815 	coal->coal_bufs = 30;
14816 	coal->coal_ticks_irq = 1;
14817 	coal->coal_bufs_irq = 2;
14818 	coal->idle_thresh = 50;
14819 	coal->bufs_per_record = 2;
14820 	coal->budget = 64;		/* NAPI budget */
14821 	coal->flags = flags;
14822 
14823 	coal = &bp->tx_coal;
14824 	coal->coal_ticks = 28;
14825 	coal->coal_bufs = 30;
14826 	coal->coal_ticks_irq = 2;
14827 	coal->coal_bufs_irq = 2;
14828 	coal->bufs_per_record = 1;
14829 	coal->flags = flags;
14830 
14831 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14832 }
14833 
14834 /* FW that pre-reserves 1 VNIC per function */
14835 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14836 {
14837 	u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14838 
14839 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14840 	    (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14841 		return true;
14842 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14843 	    (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14844 		return true;
14845 	return false;
14846 }
14847 
14848 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14849 {
14850 	struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14851 	struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14852 	int rc;
14853 
14854 	bp->max_pfcwd_tmo_ms = 0;
14855 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14856 	if (rc)
14857 		return;
14858 	resp = hwrm_req_hold(bp, req);
14859 	rc = hwrm_req_send_silent(bp, req);
14860 	if (!rc)
14861 		bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14862 	hwrm_req_drop(bp, req);
14863 }
14864 
14865 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14866 {
14867 	int rc;
14868 
14869 	bp->fw_cap = 0;
14870 	rc = bnxt_hwrm_ver_get(bp);
14871 	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
14872 	 * so wait before continuing with recovery.
14873 	 */
14874 	if (rc)
14875 		msleep(100);
14876 	bnxt_try_map_fw_health_reg(bp);
14877 	if (rc) {
14878 		rc = bnxt_try_recover_fw(bp);
14879 		if (rc)
14880 			return rc;
14881 		rc = bnxt_hwrm_ver_get(bp);
14882 		if (rc)
14883 			return rc;
14884 	}
14885 
14886 	bnxt_nvm_cfg_ver_get(bp);
14887 
14888 	rc = bnxt_hwrm_func_reset(bp);
14889 	if (rc)
14890 		return -ENODEV;
14891 
14892 	bnxt_hwrm_fw_set_time(bp);
14893 	return 0;
14894 }
14895 
14896 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14897 {
14898 	int rc;
14899 
14900 	/* Get the MAX capabilities for this function */
14901 	rc = bnxt_hwrm_func_qcaps(bp);
14902 	if (rc) {
14903 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14904 			   rc);
14905 		return -ENODEV;
14906 	}
14907 
14908 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14909 	if (rc)
14910 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14911 			    rc);
14912 
14913 	if (bnxt_alloc_fw_health(bp)) {
14914 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14915 	} else {
14916 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
14917 		if (rc)
14918 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14919 				    rc);
14920 	}
14921 
14922 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14923 	if (rc)
14924 		return -ENODEV;
14925 
14926 	rc = bnxt_alloc_crash_dump_mem(bp);
14927 	if (rc)
14928 		netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14929 			    rc);
14930 	if (!rc) {
14931 		rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14932 		if (rc) {
14933 			bnxt_free_crash_dump_mem(bp);
14934 			netdev_warn(bp->dev,
14935 				    "hwrm crash dump mem failure rc: %d\n", rc);
14936 		}
14937 	}
14938 
14939 	if (bnxt_fw_pre_resv_vnics(bp))
14940 		bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14941 
14942 	bnxt_hwrm_pfcwd_qcaps(bp);
14943 	bnxt_hwrm_func_qcfg(bp);
14944 	bnxt_hwrm_vnic_qcaps(bp);
14945 	bnxt_hwrm_port_led_qcaps(bp);
14946 	bnxt_ethtool_init(bp);
14947 	if (bp->fw_cap & BNXT_FW_CAP_PTP)
14948 		__bnxt_hwrm_ptp_qcfg(bp);
14949 	bnxt_dcb_init(bp);
14950 	bnxt_hwmon_init(bp);
14951 	return 0;
14952 }
14953 
14954 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14955 {
14956 	bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14957 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14958 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14959 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14960 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14961 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14962 		bp->rss_hash_delta = bp->rss_hash_cfg;
14963 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14964 		bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14965 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14966 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14967 	}
14968 }
14969 
14970 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14971 {
14972 	struct net_device *dev = bp->dev;
14973 
14974 	dev->hw_features &= ~NETIF_F_NTUPLE;
14975 	dev->features &= ~NETIF_F_NTUPLE;
14976 	bp->flags &= ~BNXT_FLAG_RFS;
14977 	if (bnxt_rfs_supported(bp)) {
14978 		dev->hw_features |= NETIF_F_NTUPLE;
14979 		if (bnxt_rfs_capable(bp, false)) {
14980 			bp->flags |= BNXT_FLAG_RFS;
14981 			dev->features |= NETIF_F_NTUPLE;
14982 		}
14983 	}
14984 }
14985 
14986 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14987 {
14988 	struct pci_dev *pdev = bp->pdev;
14989 
14990 	bnxt_set_dflt_rss_hash_type(bp);
14991 	bnxt_set_dflt_rfs(bp);
14992 
14993 	bnxt_get_wol_settings(bp);
14994 	if (bp->flags & BNXT_FLAG_WOL_CAP)
14995 		device_set_wakeup_enable(&pdev->dev, bp->wol);
14996 	else
14997 		device_set_wakeup_capable(&pdev->dev, false);
14998 
14999 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
15000 	bnxt_hwrm_coal_params_qcaps(bp);
15001 }
15002 
15003 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
15004 
15005 int bnxt_fw_init_one(struct bnxt *bp)
15006 {
15007 	int rc;
15008 
15009 	rc = bnxt_fw_init_one_p1(bp);
15010 	if (rc) {
15011 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
15012 		return rc;
15013 	}
15014 	rc = bnxt_fw_init_one_p2(bp);
15015 	if (rc) {
15016 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
15017 		return rc;
15018 	}
15019 	rc = bnxt_probe_phy(bp, false);
15020 	if (rc)
15021 		return rc;
15022 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
15023 	if (rc)
15024 		return rc;
15025 
15026 	bnxt_fw_init_one_p3(bp);
15027 	return 0;
15028 }
15029 
15030 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
15031 {
15032 	struct bnxt_fw_health *fw_health = bp->fw_health;
15033 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
15034 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
15035 	u32 reg_type, reg_off, delay_msecs;
15036 
15037 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
15038 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
15039 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
15040 	switch (reg_type) {
15041 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
15042 		pci_write_config_dword(bp->pdev, reg_off, val);
15043 		break;
15044 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
15045 		writel(reg_off & BNXT_GRC_BASE_MASK,
15046 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
15047 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
15048 		fallthrough;
15049 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
15050 		writel(val, bp->bar0 + reg_off);
15051 		break;
15052 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
15053 		writel(val, bp->bar1 + reg_off);
15054 		break;
15055 	}
15056 	if (delay_msecs) {
15057 		pci_read_config_dword(bp->pdev, 0, &val);
15058 		msleep(delay_msecs);
15059 	}
15060 }
15061 
15062 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
15063 {
15064 	struct hwrm_func_qcfg_output *resp;
15065 	struct hwrm_func_qcfg_input *req;
15066 	bool result = true; /* firmware will enforce if unknown */
15067 
15068 	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
15069 		return result;
15070 
15071 	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
15072 		return result;
15073 
15074 	req->fid = cpu_to_le16(0xffff);
15075 	resp = hwrm_req_hold(bp, req);
15076 	if (!hwrm_req_send(bp, req))
15077 		result = !!(le16_to_cpu(resp->flags) &
15078 			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15079 	hwrm_req_drop(bp, req);
15080 	return result;
15081 }
15082 
15083 static void bnxt_reset_all(struct bnxt *bp)
15084 {
15085 	struct bnxt_fw_health *fw_health = bp->fw_health;
15086 	int i, rc;
15087 
15088 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15089 		bnxt_fw_reset_via_optee(bp);
15090 		bp->fw_reset_timestamp = jiffies;
15091 		return;
15092 	}
15093 
15094 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15095 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15096 			bnxt_fw_reset_writel(bp, i);
15097 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15098 		struct hwrm_fw_reset_input *req;
15099 
15100 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15101 		if (!rc) {
15102 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15103 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15104 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15105 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15106 			rc = hwrm_req_send(bp, req);
15107 		}
15108 		if (rc != -ENODEV)
15109 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15110 	}
15111 	bp->fw_reset_timestamp = jiffies;
15112 }
15113 
15114 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15115 {
15116 	return time_after(jiffies, bp->fw_reset_timestamp +
15117 			  (bp->fw_reset_max_dsecs * HZ / 10));
15118 }
15119 
15120 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15121 {
15122 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15123 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15124 		bnxt_dl_health_fw_status_update(bp, false);
15125 	bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15126 	netif_close(bp->dev);
15127 }
15128 
15129 static void bnxt_fw_reset_task(struct work_struct *work)
15130 {
15131 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15132 	int rc = 0;
15133 
15134 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15135 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15136 		return;
15137 	}
15138 
15139 	switch (bp->fw_reset_state) {
15140 	case BNXT_FW_RESET_STATE_POLL_VF: {
15141 		int n = bnxt_get_registered_vfs(bp);
15142 		int tmo;
15143 
15144 		if (n < 0) {
15145 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15146 				   n, jiffies_to_msecs(jiffies -
15147 				   bp->fw_reset_timestamp));
15148 			goto fw_reset_abort;
15149 		} else if (n > 0) {
15150 			if (bnxt_fw_reset_timeout(bp)) {
15151 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15152 				bp->fw_reset_state = 0;
15153 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15154 					   n);
15155 				goto ulp_start;
15156 			}
15157 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15158 			return;
15159 		}
15160 		bp->fw_reset_timestamp = jiffies;
15161 		netdev_lock(bp->dev);
15162 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15163 			bnxt_fw_reset_abort(bp, rc);
15164 			netdev_unlock(bp->dev);
15165 			goto ulp_start;
15166 		}
15167 		bnxt_fw_reset_close(bp);
15168 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15169 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15170 			tmo = HZ / 10;
15171 		} else {
15172 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15173 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
15174 		}
15175 		netdev_unlock(bp->dev);
15176 		bnxt_queue_fw_reset_work(bp, tmo);
15177 		return;
15178 	}
15179 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15180 		u32 val;
15181 
15182 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15183 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15184 		    !bnxt_fw_reset_timeout(bp)) {
15185 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15186 			return;
15187 		}
15188 
15189 		if (!bp->fw_health->primary) {
15190 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15191 
15192 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15193 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15194 			return;
15195 		}
15196 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15197 	}
15198 		fallthrough;
15199 	case BNXT_FW_RESET_STATE_RESET_FW:
15200 		bnxt_reset_all(bp);
15201 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15202 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15203 		return;
15204 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
15205 		bnxt_inv_fw_health_reg(bp);
15206 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15207 		    !bp->fw_reset_min_dsecs) {
15208 			u16 val;
15209 
15210 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15211 			if (val == 0xffff) {
15212 				if (bnxt_fw_reset_timeout(bp)) {
15213 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15214 					rc = -ETIMEDOUT;
15215 					goto fw_reset_abort;
15216 				}
15217 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
15218 				return;
15219 			}
15220 		}
15221 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15222 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15223 		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15224 		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15225 			bnxt_dl_remote_reload(bp);
15226 		if (pci_enable_device(bp->pdev)) {
15227 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15228 			rc = -ENODEV;
15229 			goto fw_reset_abort;
15230 		}
15231 		pci_set_master(bp->pdev);
15232 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15233 		fallthrough;
15234 	case BNXT_FW_RESET_STATE_POLL_FW:
15235 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15236 		rc = bnxt_hwrm_poll(bp);
15237 		if (rc) {
15238 			if (bnxt_fw_reset_timeout(bp)) {
15239 				netdev_err(bp->dev, "Firmware reset aborted\n");
15240 				goto fw_reset_abort_status;
15241 			}
15242 			bnxt_queue_fw_reset_work(bp, HZ / 5);
15243 			return;
15244 		}
15245 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15246 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15247 		fallthrough;
15248 	case BNXT_FW_RESET_STATE_OPENING:
15249 		while (!netdev_trylock(bp->dev)) {
15250 			bnxt_queue_fw_reset_work(bp, HZ / 10);
15251 			return;
15252 		}
15253 		rc = bnxt_open(bp->dev);
15254 		if (rc) {
15255 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15256 			bnxt_fw_reset_abort(bp, rc);
15257 			netdev_unlock(bp->dev);
15258 			goto ulp_start;
15259 		}
15260 
15261 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15262 		    bp->fw_health->enabled) {
15263 			bp->fw_health->last_fw_reset_cnt =
15264 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15265 		}
15266 		bp->fw_reset_state = 0;
15267 		/* Make sure fw_reset_state is 0 before clearing the flag */
15268 		smp_mb__before_atomic();
15269 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15270 		bnxt_ptp_reapply_pps(bp);
15271 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15272 		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15273 			bnxt_dl_health_fw_recovery_done(bp);
15274 			bnxt_dl_health_fw_status_update(bp, true);
15275 		}
15276 		netdev_unlock(bp->dev);
15277 		bnxt_ulp_start(bp, 0);
15278 		bnxt_reenable_sriov(bp);
15279 		netdev_lock(bp->dev);
15280 		bnxt_vf_reps_alloc(bp);
15281 		bnxt_vf_reps_open(bp);
15282 		netdev_unlock(bp->dev);
15283 		break;
15284 	}
15285 	return;
15286 
15287 fw_reset_abort_status:
15288 	if (bp->fw_health->status_reliable ||
15289 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15290 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15291 
15292 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15293 	}
15294 fw_reset_abort:
15295 	netdev_lock(bp->dev);
15296 	bnxt_fw_reset_abort(bp, rc);
15297 	netdev_unlock(bp->dev);
15298 ulp_start:
15299 	bnxt_ulp_start(bp, rc);
15300 }
15301 
15302 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15303 {
15304 	int rc;
15305 	struct bnxt *bp = netdev_priv(dev);
15306 
15307 	SET_NETDEV_DEV(dev, &pdev->dev);
15308 
15309 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
15310 	rc = pci_enable_device(pdev);
15311 	if (rc) {
15312 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15313 		goto init_err;
15314 	}
15315 
15316 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15317 		dev_err(&pdev->dev,
15318 			"Cannot find PCI device base address, aborting\n");
15319 		rc = -ENODEV;
15320 		goto init_err_disable;
15321 	}
15322 
15323 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15324 	if (rc) {
15325 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15326 		goto init_err_disable;
15327 	}
15328 
15329 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15330 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15331 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15332 		rc = -EIO;
15333 		goto init_err_release;
15334 	}
15335 
15336 	pci_set_master(pdev);
15337 
15338 	bp->dev = dev;
15339 	bp->pdev = pdev;
15340 
15341 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15342 	 * determines the BAR size.
15343 	 */
15344 	bp->bar0 = pci_ioremap_bar(pdev, 0);
15345 	if (!bp->bar0) {
15346 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15347 		rc = -ENOMEM;
15348 		goto init_err_release;
15349 	}
15350 
15351 	bp->bar2 = pci_ioremap_bar(pdev, 4);
15352 	if (!bp->bar2) {
15353 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15354 		rc = -ENOMEM;
15355 		goto init_err_release;
15356 	}
15357 
15358 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
15359 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15360 
15361 	spin_lock_init(&bp->ntp_fltr_lock);
15362 #if BITS_PER_LONG == 32
15363 	spin_lock_init(&bp->db_lock);
15364 #endif
15365 
15366 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15367 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15368 
15369 	timer_setup(&bp->timer, bnxt_timer, 0);
15370 	bp->current_interval = BNXT_TIMER_INTERVAL;
15371 
15372 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15373 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15374 
15375 	clear_bit(BNXT_STATE_OPEN, &bp->state);
15376 	return 0;
15377 
15378 init_err_release:
15379 	bnxt_unmap_bars(bp, pdev);
15380 	pci_release_regions(pdev);
15381 
15382 init_err_disable:
15383 	pci_disable_device(pdev);
15384 
15385 init_err:
15386 	return rc;
15387 }
15388 
15389 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15390 {
15391 	struct sockaddr *addr = p;
15392 	struct bnxt *bp = netdev_priv(dev);
15393 	int rc = 0;
15394 
15395 	netdev_assert_locked(dev);
15396 
15397 	if (!is_valid_ether_addr(addr->sa_data))
15398 		return -EADDRNOTAVAIL;
15399 
15400 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15401 		return 0;
15402 
15403 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
15404 	if (rc)
15405 		return rc;
15406 
15407 	eth_hw_addr_set(dev, addr->sa_data);
15408 	bnxt_clear_usr_fltrs(bp, true);
15409 	if (netif_running(dev)) {
15410 		bnxt_close_nic(bp, false, false);
15411 		rc = bnxt_open_nic(bp, false, false);
15412 	}
15413 
15414 	return rc;
15415 }
15416 
15417 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15418 {
15419 	struct bnxt *bp = netdev_priv(dev);
15420 
15421 	netdev_assert_locked(dev);
15422 
15423 	if (netif_running(dev))
15424 		bnxt_close_nic(bp, true, false);
15425 
15426 	WRITE_ONCE(dev->mtu, new_mtu);
15427 
15428 	/* MTU change may change the AGG ring settings if an XDP multi-buffer
15429 	 * program is attached.  We need to set the AGG rings settings and
15430 	 * rx_skb_func accordingly.
15431 	 */
15432 	if (READ_ONCE(bp->xdp_prog))
15433 		bnxt_set_rx_skb_mode(bp, true);
15434 
15435 	bnxt_set_ring_params(bp);
15436 
15437 	if (netif_running(dev))
15438 		return bnxt_open_nic(bp, true, false);
15439 
15440 	return 0;
15441 }
15442 
15443 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15444 {
15445 	struct bnxt *bp = netdev_priv(dev);
15446 	bool sh = false;
15447 	int rc, tx_cp;
15448 
15449 	if (tc > bp->max_tc) {
15450 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15451 			   tc, bp->max_tc);
15452 		return -EINVAL;
15453 	}
15454 
15455 	if (bp->num_tc == tc)
15456 		return 0;
15457 
15458 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15459 		sh = true;
15460 
15461 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15462 			      sh, tc, bp->tx_nr_rings_xdp);
15463 	if (rc)
15464 		return rc;
15465 
15466 	/* Needs to close the device and do hw resource re-allocations */
15467 	if (netif_running(bp->dev))
15468 		bnxt_close_nic(bp, true, false);
15469 
15470 	if (tc) {
15471 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15472 		netdev_set_num_tc(dev, tc);
15473 		bp->num_tc = tc;
15474 	} else {
15475 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15476 		netdev_reset_tc(dev);
15477 		bp->num_tc = 0;
15478 	}
15479 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15480 	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15481 	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15482 			       tx_cp + bp->rx_nr_rings;
15483 
15484 	if (netif_running(bp->dev))
15485 		return bnxt_open_nic(bp, true, false);
15486 
15487 	return 0;
15488 }
15489 
15490 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15491 				  void *cb_priv)
15492 {
15493 	struct bnxt *bp = cb_priv;
15494 
15495 	if (!bnxt_tc_flower_enabled(bp) ||
15496 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15497 		return -EOPNOTSUPP;
15498 
15499 	switch (type) {
15500 	case TC_SETUP_CLSFLOWER:
15501 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15502 	default:
15503 		return -EOPNOTSUPP;
15504 	}
15505 }
15506 
15507 LIST_HEAD(bnxt_block_cb_list);
15508 
15509 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15510 			 void *type_data)
15511 {
15512 	struct bnxt *bp = netdev_priv(dev);
15513 
15514 	switch (type) {
15515 	case TC_SETUP_BLOCK:
15516 		return flow_block_cb_setup_simple(type_data,
15517 						  &bnxt_block_cb_list,
15518 						  bnxt_setup_tc_block_cb,
15519 						  bp, bp, true);
15520 	case TC_SETUP_QDISC_MQPRIO: {
15521 		struct tc_mqprio_qopt *mqprio = type_data;
15522 
15523 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15524 
15525 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15526 	}
15527 	default:
15528 		return -EOPNOTSUPP;
15529 	}
15530 }
15531 
15532 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15533 			    const struct sk_buff *skb)
15534 {
15535 	struct bnxt_vnic_info *vnic;
15536 
15537 	if (skb)
15538 		return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15539 
15540 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15541 	return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15542 }
15543 
15544 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15545 			   u32 idx)
15546 {
15547 	struct hlist_head *head;
15548 	int bit_id;
15549 
15550 	spin_lock_bh(&bp->ntp_fltr_lock);
15551 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15552 	if (bit_id < 0) {
15553 		spin_unlock_bh(&bp->ntp_fltr_lock);
15554 		return -ENOMEM;
15555 	}
15556 
15557 	fltr->base.sw_id = (u16)bit_id;
15558 	fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15559 	fltr->base.flags |= BNXT_ACT_RING_DST;
15560 	head = &bp->ntp_fltr_hash_tbl[idx];
15561 	hlist_add_head_rcu(&fltr->base.hash, head);
15562 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15563 	bnxt_insert_usr_fltr(bp, &fltr->base);
15564 	bp->ntp_fltr_count++;
15565 	spin_unlock_bh(&bp->ntp_fltr_lock);
15566 	return 0;
15567 }
15568 
15569 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15570 			    struct bnxt_ntuple_filter *f2)
15571 {
15572 	struct bnxt_flow_masks *masks1 = &f1->fmasks;
15573 	struct bnxt_flow_masks *masks2 = &f2->fmasks;
15574 	struct flow_keys *keys1 = &f1->fkeys;
15575 	struct flow_keys *keys2 = &f2->fkeys;
15576 
15577 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
15578 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
15579 		return false;
15580 
15581 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15582 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15583 		    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15584 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15585 		    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15586 			return false;
15587 	} else {
15588 		if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15589 				     &keys2->addrs.v6addrs.src) ||
15590 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15591 				     &masks2->addrs.v6addrs.src) ||
15592 		    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15593 				     &keys2->addrs.v6addrs.dst) ||
15594 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15595 				     &masks2->addrs.v6addrs.dst))
15596 			return false;
15597 	}
15598 
15599 	return keys1->ports.src == keys2->ports.src &&
15600 	       masks1->ports.src == masks2->ports.src &&
15601 	       keys1->ports.dst == keys2->ports.dst &&
15602 	       masks1->ports.dst == masks2->ports.dst &&
15603 	       keys1->control.flags == keys2->control.flags &&
15604 	       f1->l2_fltr == f2->l2_fltr;
15605 }
15606 
15607 struct bnxt_ntuple_filter *
15608 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15609 				struct bnxt_ntuple_filter *fltr, u32 idx)
15610 {
15611 	struct bnxt_ntuple_filter *f;
15612 	struct hlist_head *head;
15613 
15614 	head = &bp->ntp_fltr_hash_tbl[idx];
15615 	hlist_for_each_entry_rcu(f, head, base.hash) {
15616 		if (bnxt_fltr_match(f, fltr))
15617 			return f;
15618 	}
15619 	return NULL;
15620 }
15621 
15622 #ifdef CONFIG_RFS_ACCEL
15623 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15624 			      u16 rxq_index, u32 flow_id)
15625 {
15626 	struct bnxt *bp = netdev_priv(dev);
15627 	struct bnxt_ntuple_filter *fltr, *new_fltr;
15628 	struct flow_keys *fkeys;
15629 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15630 	struct bnxt_l2_filter *l2_fltr;
15631 	int rc = 0, idx;
15632 	u32 flags;
15633 
15634 	if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15635 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15636 		atomic_inc(&l2_fltr->refcnt);
15637 	} else {
15638 		struct bnxt_l2_key key;
15639 
15640 		ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15641 		key.vlan = 0;
15642 		l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15643 		if (!l2_fltr)
15644 			return -EINVAL;
15645 		if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15646 			bnxt_del_l2_filter(bp, l2_fltr);
15647 			return -EINVAL;
15648 		}
15649 	}
15650 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15651 	if (!new_fltr) {
15652 		bnxt_del_l2_filter(bp, l2_fltr);
15653 		return -ENOMEM;
15654 	}
15655 
15656 	fkeys = &new_fltr->fkeys;
15657 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15658 		rc = -EPROTONOSUPPORT;
15659 		goto err_free;
15660 	}
15661 
15662 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15663 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15664 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15665 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15666 		rc = -EPROTONOSUPPORT;
15667 		goto err_free;
15668 	}
15669 	new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15670 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15671 		if (bp->hwrm_spec_code < 0x10601) {
15672 			rc = -EPROTONOSUPPORT;
15673 			goto err_free;
15674 		}
15675 		new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15676 	}
15677 	flags = fkeys->control.flags;
15678 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
15679 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15680 		rc = -EPROTONOSUPPORT;
15681 		goto err_free;
15682 	}
15683 	new_fltr->l2_fltr = l2_fltr;
15684 
15685 	idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15686 	rcu_read_lock();
15687 	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15688 	if (fltr) {
15689 		rc = fltr->base.sw_id;
15690 		rcu_read_unlock();
15691 		goto err_free;
15692 	}
15693 	rcu_read_unlock();
15694 
15695 	new_fltr->flow_id = flow_id;
15696 	new_fltr->base.rxq = rxq_index;
15697 	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15698 	if (!rc) {
15699 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15700 		return new_fltr->base.sw_id;
15701 	}
15702 
15703 err_free:
15704 	bnxt_del_l2_filter(bp, l2_fltr);
15705 	kfree(new_fltr);
15706 	return rc;
15707 }
15708 #endif
15709 
15710 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15711 {
15712 	spin_lock_bh(&bp->ntp_fltr_lock);
15713 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15714 		spin_unlock_bh(&bp->ntp_fltr_lock);
15715 		return;
15716 	}
15717 	hlist_del_rcu(&fltr->base.hash);
15718 	bnxt_del_one_usr_fltr(bp, &fltr->base);
15719 	bp->ntp_fltr_count--;
15720 	spin_unlock_bh(&bp->ntp_fltr_lock);
15721 	bnxt_del_l2_filter(bp, fltr->l2_fltr);
15722 	clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15723 	kfree_rcu(fltr, base.rcu);
15724 }
15725 
15726 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15727 {
15728 #ifdef CONFIG_RFS_ACCEL
15729 	int i;
15730 
15731 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15732 		struct hlist_head *head;
15733 		struct hlist_node *tmp;
15734 		struct bnxt_ntuple_filter *fltr;
15735 		int rc;
15736 
15737 		head = &bp->ntp_fltr_hash_tbl[i];
15738 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15739 			bool del = false;
15740 
15741 			if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15742 				if (fltr->base.flags & BNXT_ACT_NO_AGING)
15743 					continue;
15744 				if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15745 							fltr->flow_id,
15746 							fltr->base.sw_id)) {
15747 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
15748 									 fltr);
15749 					del = true;
15750 				}
15751 			} else {
15752 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15753 								       fltr);
15754 				if (rc)
15755 					del = true;
15756 				else
15757 					set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15758 			}
15759 
15760 			if (del)
15761 				bnxt_del_ntp_filter(bp, fltr);
15762 		}
15763 	}
15764 #endif
15765 }
15766 
15767 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15768 				    unsigned int entry, struct udp_tunnel_info *ti)
15769 {
15770 	struct bnxt *bp = netdev_priv(netdev);
15771 	unsigned int cmd;
15772 
15773 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15774 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15775 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15776 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15777 	else
15778 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15779 
15780 	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15781 }
15782 
15783 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15784 				      unsigned int entry, struct udp_tunnel_info *ti)
15785 {
15786 	struct bnxt *bp = netdev_priv(netdev);
15787 	unsigned int cmd;
15788 
15789 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15790 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15791 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15792 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15793 	else
15794 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15795 
15796 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15797 }
15798 
15799 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15800 	.set_port	= bnxt_udp_tunnel_set_port,
15801 	.unset_port	= bnxt_udp_tunnel_unset_port,
15802 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15803 	.tables		= {
15804 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15805 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15806 	},
15807 }, bnxt_udp_tunnels_p7 = {
15808 	.set_port	= bnxt_udp_tunnel_set_port,
15809 	.unset_port	= bnxt_udp_tunnel_unset_port,
15810 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15811 	.tables		= {
15812 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
15813 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15814 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15815 	},
15816 };
15817 
15818 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15819 			       struct net_device *dev, u32 filter_mask,
15820 			       int nlflags)
15821 {
15822 	struct bnxt *bp = netdev_priv(dev);
15823 
15824 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15825 				       nlflags, filter_mask, NULL);
15826 }
15827 
15828 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15829 			       u16 flags, struct netlink_ext_ack *extack)
15830 {
15831 	struct bnxt *bp = netdev_priv(dev);
15832 	struct nlattr *attr, *br_spec;
15833 	int rem, rc = 0;
15834 
15835 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15836 		return -EOPNOTSUPP;
15837 
15838 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15839 	if (!br_spec)
15840 		return -EINVAL;
15841 
15842 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15843 		u16 mode;
15844 
15845 		mode = nla_get_u16(attr);
15846 		if (mode == bp->br_mode)
15847 			break;
15848 
15849 		rc = bnxt_hwrm_set_br_mode(bp, mode);
15850 		if (!rc)
15851 			bp->br_mode = mode;
15852 		break;
15853 	}
15854 	return rc;
15855 }
15856 
15857 int bnxt_get_port_parent_id(struct net_device *dev,
15858 			    struct netdev_phys_item_id *ppid)
15859 {
15860 	struct bnxt *bp = netdev_priv(dev);
15861 
15862 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15863 		return -EOPNOTSUPP;
15864 
15865 	/* The PF and it's VF-reps only support the switchdev framework */
15866 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15867 		return -EOPNOTSUPP;
15868 
15869 	ppid->id_len = sizeof(bp->dsn);
15870 	memcpy(ppid->id, bp->dsn, ppid->id_len);
15871 
15872 	return 0;
15873 }
15874 
15875 static const struct net_device_ops bnxt_netdev_ops = {
15876 	.ndo_open		= bnxt_open,
15877 	.ndo_start_xmit		= bnxt_start_xmit,
15878 	.ndo_stop		= bnxt_close,
15879 	.ndo_get_stats64	= bnxt_get_stats64,
15880 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
15881 	.ndo_eth_ioctl		= bnxt_ioctl,
15882 	.ndo_validate_addr	= eth_validate_addr,
15883 	.ndo_set_mac_address	= bnxt_change_mac_addr,
15884 	.ndo_change_mtu		= bnxt_change_mtu,
15885 	.ndo_fix_features	= bnxt_fix_features,
15886 	.ndo_set_features	= bnxt_set_features,
15887 	.ndo_features_check	= bnxt_features_check,
15888 	.ndo_tx_timeout		= bnxt_tx_timeout,
15889 #ifdef CONFIG_BNXT_SRIOV
15890 	.ndo_get_vf_config	= bnxt_get_vf_config,
15891 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
15892 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
15893 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
15894 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
15895 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
15896 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
15897 #endif
15898 	.ndo_setup_tc           = bnxt_setup_tc,
15899 #ifdef CONFIG_RFS_ACCEL
15900 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
15901 #endif
15902 	.ndo_bpf		= bnxt_xdp,
15903 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
15904 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
15905 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
15906 	.ndo_hwtstamp_get	= bnxt_hwtstamp_get,
15907 	.ndo_hwtstamp_set	= bnxt_hwtstamp_set,
15908 };
15909 
15910 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15911 				    struct netdev_queue_stats_rx *stats)
15912 {
15913 	struct bnxt *bp = netdev_priv(dev);
15914 	struct bnxt_cp_ring_info *cpr;
15915 	u64 *sw;
15916 
15917 	if (!bp->bnapi)
15918 		return;
15919 
15920 	cpr = &bp->bnapi[i]->cp_ring;
15921 	sw = cpr->stats.sw_stats;
15922 
15923 	stats->packets = 0;
15924 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15925 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15926 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15927 
15928 	stats->bytes = 0;
15929 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15930 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15931 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15932 
15933 	stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15934 }
15935 
15936 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15937 				    struct netdev_queue_stats_tx *stats)
15938 {
15939 	struct bnxt *bp = netdev_priv(dev);
15940 	struct bnxt_napi *bnapi;
15941 	u64 *sw;
15942 
15943 	if (!bp->tx_ring)
15944 		return;
15945 
15946 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15947 	sw = bnapi->cp_ring.stats.sw_stats;
15948 
15949 	stats->packets = 0;
15950 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15951 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15952 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15953 
15954 	stats->bytes = 0;
15955 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15956 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15957 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15958 }
15959 
15960 static void bnxt_get_base_stats(struct net_device *dev,
15961 				struct netdev_queue_stats_rx *rx,
15962 				struct netdev_queue_stats_tx *tx)
15963 {
15964 	struct bnxt *bp = netdev_priv(dev);
15965 
15966 	rx->packets = bp->net_stats_prev.rx_packets;
15967 	rx->bytes = bp->net_stats_prev.rx_bytes;
15968 	rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15969 
15970 	tx->packets = bp->net_stats_prev.tx_packets;
15971 	tx->bytes = bp->net_stats_prev.tx_bytes;
15972 }
15973 
15974 static const struct netdev_stat_ops bnxt_stat_ops = {
15975 	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
15976 	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
15977 	.get_base_stats		= bnxt_get_base_stats,
15978 };
15979 
15980 static void bnxt_queue_default_qcfg(struct net_device *dev,
15981 				    struct netdev_queue_config *qcfg)
15982 {
15983 	qcfg->rx_page_size = BNXT_RX_PAGE_SIZE;
15984 }
15985 
15986 static int bnxt_validate_qcfg(struct net_device *dev,
15987 			      struct netdev_queue_config *qcfg,
15988 			      struct netlink_ext_ack *extack)
15989 {
15990 	struct bnxt *bp = netdev_priv(dev);
15991 
15992 	/* Older chips need MSS calc so rx_page_size is not supported */
15993 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15994 	    qcfg->rx_page_size != BNXT_RX_PAGE_SIZE)
15995 		return -EINVAL;
15996 
15997 	if (!is_power_of_2(qcfg->rx_page_size))
15998 		return -ERANGE;
15999 
16000 	if (qcfg->rx_page_size < BNXT_RX_PAGE_SIZE ||
16001 	    qcfg->rx_page_size > BNXT_MAX_RX_PAGE_SIZE)
16002 		return -ERANGE;
16003 
16004 	return 0;
16005 }
16006 
16007 static int bnxt_queue_mem_alloc(struct net_device *dev,
16008 				struct netdev_queue_config *qcfg,
16009 				void *qmem, int idx)
16010 {
16011 	struct bnxt_rx_ring_info *rxr, *clone;
16012 	struct bnxt *bp = netdev_priv(dev);
16013 	struct bnxt_ring_struct *ring;
16014 	int rc;
16015 
16016 	if (!bp->rx_ring)
16017 		return -ENETDOWN;
16018 
16019 	rxr = &bp->rx_ring[idx];
16020 	clone = qmem;
16021 	memcpy(clone, rxr, sizeof(*rxr));
16022 	bnxt_init_rx_ring_struct(bp, clone);
16023 	bnxt_reset_rx_ring_struct(bp, clone);
16024 
16025 	clone->rx_prod = 0;
16026 	clone->rx_agg_prod = 0;
16027 	clone->rx_sw_agg_prod = 0;
16028 	clone->rx_next_cons = 0;
16029 	clone->need_head_pool = false;
16030 	clone->rx_page_size = qcfg->rx_page_size;
16031 
16032 	rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
16033 	if (rc)
16034 		return rc;
16035 
16036 	rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
16037 	if (rc < 0)
16038 		goto err_page_pool_destroy;
16039 
16040 	rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
16041 					MEM_TYPE_PAGE_POOL,
16042 					clone->page_pool);
16043 	if (rc)
16044 		goto err_rxq_info_unreg;
16045 
16046 	ring = &clone->rx_ring_struct;
16047 	rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16048 	if (rc)
16049 		goto err_free_rx_ring;
16050 
16051 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
16052 		ring = &clone->rx_agg_ring_struct;
16053 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
16054 		if (rc)
16055 			goto err_free_rx_agg_ring;
16056 
16057 		rc = bnxt_alloc_rx_agg_bmap(bp, clone);
16058 		if (rc)
16059 			goto err_free_rx_agg_ring;
16060 	}
16061 
16062 	if (bp->flags & BNXT_FLAG_TPA) {
16063 		rc = bnxt_alloc_one_tpa_info(bp, clone);
16064 		if (rc)
16065 			goto err_free_tpa_info;
16066 	}
16067 
16068 	bnxt_init_one_rx_ring_rxbd(bp, clone);
16069 	bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
16070 
16071 	bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
16072 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16073 		bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
16074 	if (bp->flags & BNXT_FLAG_TPA)
16075 		bnxt_alloc_one_tpa_info_data(bp, clone);
16076 
16077 	return 0;
16078 
16079 err_free_tpa_info:
16080 	bnxt_free_one_tpa_info(bp, clone);
16081 err_free_rx_agg_ring:
16082 	bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
16083 err_free_rx_ring:
16084 	bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
16085 err_rxq_info_unreg:
16086 	xdp_rxq_info_unreg(&clone->xdp_rxq);
16087 err_page_pool_destroy:
16088 	page_pool_destroy(clone->page_pool);
16089 	page_pool_destroy(clone->head_pool);
16090 	clone->page_pool = NULL;
16091 	clone->head_pool = NULL;
16092 	return rc;
16093 }
16094 
16095 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
16096 {
16097 	struct bnxt_rx_ring_info *rxr = qmem;
16098 	struct bnxt *bp = netdev_priv(dev);
16099 	struct bnxt_ring_struct *ring;
16100 
16101 	bnxt_free_one_rx_ring_skbs(bp, rxr);
16102 	bnxt_free_one_tpa_info(bp, rxr);
16103 
16104 	xdp_rxq_info_unreg(&rxr->xdp_rxq);
16105 
16106 	page_pool_destroy(rxr->page_pool);
16107 	page_pool_destroy(rxr->head_pool);
16108 	rxr->page_pool = NULL;
16109 	rxr->head_pool = NULL;
16110 
16111 	ring = &rxr->rx_ring_struct;
16112 	bnxt_free_ring(bp, &ring->ring_mem);
16113 
16114 	ring = &rxr->rx_agg_ring_struct;
16115 	bnxt_free_ring(bp, &ring->ring_mem);
16116 
16117 	kfree(rxr->rx_agg_bmap);
16118 	rxr->rx_agg_bmap = NULL;
16119 }
16120 
16121 static void bnxt_copy_rx_ring(struct bnxt *bp,
16122 			      struct bnxt_rx_ring_info *dst,
16123 			      struct bnxt_rx_ring_info *src)
16124 {
16125 	struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16126 	struct bnxt_ring_struct *dst_ring, *src_ring;
16127 	int i;
16128 
16129 	dst_ring = &dst->rx_ring_struct;
16130 	dst_rmem = &dst_ring->ring_mem;
16131 	src_ring = &src->rx_ring_struct;
16132 	src_rmem = &src_ring->ring_mem;
16133 
16134 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16135 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16136 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16137 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16138 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16139 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16140 
16141 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16142 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16143 	*dst_rmem->vmem = *src_rmem->vmem;
16144 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16145 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16146 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16147 	}
16148 
16149 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16150 		return;
16151 
16152 	dst_ring = &dst->rx_agg_ring_struct;
16153 	dst_rmem = &dst_ring->ring_mem;
16154 	src_ring = &src->rx_agg_ring_struct;
16155 	src_rmem = &src_ring->ring_mem;
16156 
16157 	dst->rx_page_size = src->rx_page_size;
16158 
16159 	WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16160 	WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16161 	WARN_ON(dst_rmem->flags != src_rmem->flags);
16162 	WARN_ON(dst_rmem->depth != src_rmem->depth);
16163 	WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16164 	WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16165 	WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16166 
16167 	dst_rmem->pg_tbl = src_rmem->pg_tbl;
16168 	dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16169 	*dst_rmem->vmem = *src_rmem->vmem;
16170 	for (i = 0; i < dst_rmem->nr_pages; i++) {
16171 		dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16172 		dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16173 	}
16174 
16175 	dst->rx_agg_bmap = src->rx_agg_bmap;
16176 }
16177 
16178 static int bnxt_queue_start(struct net_device *dev,
16179 			    struct netdev_queue_config *qcfg,
16180 			    void *qmem, int idx)
16181 {
16182 	struct bnxt *bp = netdev_priv(dev);
16183 	struct bnxt_rx_ring_info *rxr, *clone;
16184 	struct bnxt_cp_ring_info *cpr;
16185 	struct bnxt_vnic_info *vnic;
16186 	struct bnxt_napi *bnapi;
16187 	int i, rc;
16188 	u16 mru;
16189 
16190 	rxr = &bp->rx_ring[idx];
16191 	clone = qmem;
16192 
16193 	rxr->rx_prod = clone->rx_prod;
16194 	rxr->rx_agg_prod = clone->rx_agg_prod;
16195 	rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16196 	rxr->rx_next_cons = clone->rx_next_cons;
16197 	rxr->rx_tpa = clone->rx_tpa;
16198 	rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16199 	rxr->page_pool = clone->page_pool;
16200 	rxr->head_pool = clone->head_pool;
16201 	rxr->xdp_rxq = clone->xdp_rxq;
16202 	rxr->need_head_pool = clone->need_head_pool;
16203 
16204 	bnxt_copy_rx_ring(bp, rxr, clone);
16205 
16206 	bnapi = rxr->bnapi;
16207 	cpr = &bnapi->cp_ring;
16208 
16209 	/* All rings have been reserved and previously allocated.
16210 	 * Reallocating with the same parameters should never fail.
16211 	 */
16212 	rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16213 	if (rc)
16214 		goto err_reset;
16215 
16216 	if (bp->tph_mode) {
16217 		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16218 		if (rc)
16219 			goto err_reset;
16220 	}
16221 
16222 	rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16223 	if (rc)
16224 		goto err_reset;
16225 
16226 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16227 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16228 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16229 
16230 	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16231 		rc = bnxt_tx_queue_start(bp, idx);
16232 		if (rc)
16233 			goto err_reset;
16234 	}
16235 
16236 	bnxt_enable_rx_page_pool(rxr);
16237 	napi_enable_locked(&bnapi->napi);
16238 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16239 
16240 	mru = bp->dev->mtu + VLAN_ETH_HLEN;
16241 	for (i = 0; i < bp->nr_vnics; i++) {
16242 		vnic = &bp->vnic_info[i];
16243 
16244 		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16245 		if (rc)
16246 			return rc;
16247 	}
16248 	return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16249 
16250 err_reset:
16251 	netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16252 		   rc);
16253 	napi_enable_locked(&bnapi->napi);
16254 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16255 	bnxt_reset_task(bp, true);
16256 	return rc;
16257 }
16258 
16259 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16260 {
16261 	struct bnxt *bp = netdev_priv(dev);
16262 	struct bnxt_rx_ring_info *rxr;
16263 	struct bnxt_cp_ring_info *cpr;
16264 	struct bnxt_vnic_info *vnic;
16265 	struct bnxt_napi *bnapi;
16266 	int i;
16267 
16268 	for (i = 0; i < bp->nr_vnics; i++) {
16269 		vnic = &bp->vnic_info[i];
16270 
16271 		bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16272 	}
16273 	bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16274 	/* Make sure NAPI sees that the VNIC is disabled */
16275 	synchronize_net();
16276 	rxr = &bp->rx_ring[idx];
16277 	bnapi = rxr->bnapi;
16278 	cpr = &bnapi->cp_ring;
16279 	cancel_work_sync(&cpr->dim.work);
16280 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
16281 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16282 	page_pool_disable_direct_recycling(rxr->page_pool);
16283 	if (bnxt_separate_head_pool(rxr))
16284 		page_pool_disable_direct_recycling(rxr->head_pool);
16285 
16286 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16287 		bnxt_tx_queue_stop(bp, idx);
16288 
16289 	/* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16290 	 * completion is handled in NAPI to guarantee no more DMA on that ring
16291 	 * after seeing the completion.
16292 	 */
16293 	napi_disable_locked(&bnapi->napi);
16294 
16295 	if (bp->tph_mode) {
16296 		bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16297 		bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16298 	}
16299 	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16300 
16301 	memcpy(qmem, rxr, sizeof(*rxr));
16302 	bnxt_init_rx_ring_struct(bp, qmem);
16303 
16304 	return 0;
16305 }
16306 
16307 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16308 	.ndo_queue_mem_size	= sizeof(struct bnxt_rx_ring_info),
16309 	.ndo_queue_mem_alloc	= bnxt_queue_mem_alloc,
16310 	.ndo_queue_mem_free	= bnxt_queue_mem_free,
16311 	.ndo_queue_start	= bnxt_queue_start,
16312 	.ndo_queue_stop		= bnxt_queue_stop,
16313 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16314 	.ndo_validate_qcfg	= bnxt_validate_qcfg,
16315 	.supported_params	= QCFG_RX_PAGE_SIZE,
16316 };
16317 
16318 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops_unsupp = {
16319 	.ndo_default_qcfg	= bnxt_queue_default_qcfg,
16320 };
16321 
16322 static void bnxt_remove_one(struct pci_dev *pdev)
16323 {
16324 	struct net_device *dev = pci_get_drvdata(pdev);
16325 	struct bnxt *bp = netdev_priv(dev);
16326 
16327 	if (BNXT_PF(bp))
16328 		__bnxt_sriov_disable(bp);
16329 
16330 	bnxt_rdma_aux_device_del(bp);
16331 
16332 	unregister_netdev(dev);
16333 	bnxt_ptp_clear(bp);
16334 
16335 	bnxt_rdma_aux_device_uninit(bp);
16336 
16337 	bnxt_free_l2_filters(bp, true);
16338 	bnxt_free_ntp_fltrs(bp, true);
16339 	WARN_ON(bp->num_rss_ctx);
16340 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16341 	/* Flush any pending tasks */
16342 	cancel_work_sync(&bp->sp_task);
16343 	cancel_delayed_work_sync(&bp->fw_reset_task);
16344 	bp->sp_event = 0;
16345 
16346 	bnxt_dl_fw_reporters_destroy(bp);
16347 	bnxt_dl_unregister(bp);
16348 	bnxt_shutdown_tc(bp);
16349 
16350 	bnxt_clear_int_mode(bp);
16351 	bnxt_hwrm_func_drv_unrgtr(bp);
16352 	bnxt_free_hwrm_resources(bp);
16353 	bnxt_hwmon_uninit(bp);
16354 	bnxt_ethtool_free(bp);
16355 	bnxt_dcb_free(bp);
16356 	kfree(bp->ptp_cfg);
16357 	bp->ptp_cfg = NULL;
16358 	kfree(bp->fw_health);
16359 	bp->fw_health = NULL;
16360 	bnxt_cleanup_pci(bp);
16361 	bnxt_free_ctx_mem(bp, true);
16362 	bnxt_free_crash_dump_mem(bp);
16363 	kfree(bp->rss_indir_tbl);
16364 	bp->rss_indir_tbl = NULL;
16365 	bnxt_free_port_stats(bp);
16366 	free_netdev(dev);
16367 }
16368 
16369 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16370 {
16371 	int rc = 0;
16372 	struct bnxt_link_info *link_info = &bp->link_info;
16373 
16374 	bp->phy_flags = 0;
16375 	rc = bnxt_hwrm_phy_qcaps(bp);
16376 	if (rc) {
16377 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16378 			   rc);
16379 		return rc;
16380 	}
16381 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16382 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16383 	else
16384 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16385 
16386 	bp->mac_flags = 0;
16387 	bnxt_hwrm_mac_qcaps(bp);
16388 
16389 	if (!fw_dflt)
16390 		return 0;
16391 
16392 	mutex_lock(&bp->link_lock);
16393 	rc = bnxt_update_link(bp, false);
16394 	if (rc) {
16395 		mutex_unlock(&bp->link_lock);
16396 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16397 			   rc);
16398 		return rc;
16399 	}
16400 
16401 	/* Older firmware does not have supported_auto_speeds, so assume
16402 	 * that all supported speeds can be autonegotiated.
16403 	 */
16404 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16405 		link_info->support_auto_speeds = link_info->support_speeds;
16406 
16407 	bnxt_init_ethtool_link_settings(bp);
16408 	mutex_unlock(&bp->link_lock);
16409 	return 0;
16410 }
16411 
16412 static int bnxt_get_max_irq(struct pci_dev *pdev)
16413 {
16414 	u16 ctrl;
16415 
16416 	if (!pdev->msix_cap)
16417 		return 1;
16418 
16419 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16420 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16421 }
16422 
16423 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16424 				int *max_cp)
16425 {
16426 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16427 	int max_ring_grps = 0, max_irq;
16428 
16429 	*max_tx = hw_resc->max_tx_rings;
16430 	*max_rx = hw_resc->max_rx_rings;
16431 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16432 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16433 			bnxt_get_ulp_msix_num_in_use(bp),
16434 			hw_resc->max_stat_ctxs -
16435 			bnxt_get_ulp_stat_ctxs_in_use(bp));
16436 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16437 		*max_cp = min_t(int, *max_cp, max_irq);
16438 	max_ring_grps = hw_resc->max_hw_ring_grps;
16439 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16440 		*max_cp -= 1;
16441 		*max_rx -= 2;
16442 	}
16443 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
16444 		*max_rx >>= 1;
16445 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16446 		int rc;
16447 
16448 		rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16449 		if (rc) {
16450 			*max_rx = 0;
16451 			*max_tx = 0;
16452 		}
16453 		/* On P5 chips, max_cp output param should be available NQs */
16454 		*max_cp = max_irq;
16455 	}
16456 	*max_rx = min_t(int, *max_rx, max_ring_grps);
16457 }
16458 
16459 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16460 {
16461 	int rx, tx, cp;
16462 
16463 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
16464 	*max_rx = rx;
16465 	*max_tx = tx;
16466 	if (!rx || !tx || !cp)
16467 		return -ENOMEM;
16468 
16469 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16470 }
16471 
16472 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16473 			       bool shared)
16474 {
16475 	int rc;
16476 
16477 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16478 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16479 		/* Not enough rings, try disabling agg rings. */
16480 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16481 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16482 		if (rc) {
16483 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
16484 			bp->flags |= BNXT_FLAG_AGG_RINGS;
16485 			return rc;
16486 		}
16487 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16488 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16489 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16490 		bnxt_set_ring_params(bp);
16491 	}
16492 
16493 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16494 		int max_cp, max_stat, max_irq;
16495 
16496 		/* Reserve minimum resources for RoCE */
16497 		max_cp = bnxt_get_max_func_cp_rings(bp);
16498 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
16499 		max_irq = bnxt_get_max_func_irqs(bp);
16500 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16501 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16502 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16503 			return 0;
16504 
16505 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16506 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16507 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16508 		max_cp = min_t(int, max_cp, max_irq);
16509 		max_cp = min_t(int, max_cp, max_stat);
16510 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16511 		if (rc)
16512 			rc = 0;
16513 	}
16514 	return rc;
16515 }
16516 
16517 /* In initial default shared ring setting, each shared ring must have a
16518  * RX/TX ring pair.
16519  */
16520 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16521 {
16522 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16523 	bp->rx_nr_rings = bp->cp_nr_rings;
16524 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16525 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16526 }
16527 
16528 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16529 {
16530 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
16531 	int avail_msix;
16532 
16533 	if (!bnxt_can_reserve_rings(bp))
16534 		return 0;
16535 
16536 	if (sh)
16537 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
16538 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16539 	/* Reduce default rings on multi-port cards so that total default
16540 	 * rings do not exceed CPU count.
16541 	 */
16542 	if (bp->port_count > 1) {
16543 		int max_rings =
16544 			max_t(int, num_online_cpus() / bp->port_count, 1);
16545 
16546 		dflt_rings = min_t(int, dflt_rings, max_rings);
16547 	}
16548 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16549 	if (rc)
16550 		return rc;
16551 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16552 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16553 	if (sh)
16554 		bnxt_trim_dflt_sh_rings(bp);
16555 	else
16556 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16557 	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16558 
16559 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16560 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16561 		int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16562 
16563 		bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16564 		bnxt_set_dflt_ulp_stat_ctxs(bp);
16565 	}
16566 
16567 	rc = __bnxt_reserve_rings(bp);
16568 	if (rc && rc != -ENODEV)
16569 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16570 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16571 	if (sh)
16572 		bnxt_trim_dflt_sh_rings(bp);
16573 
16574 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
16575 	if (bnxt_need_reserve_rings(bp)) {
16576 		rc = __bnxt_reserve_rings(bp);
16577 		if (rc && rc != -ENODEV)
16578 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16579 		bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16580 	}
16581 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16582 		bp->rx_nr_rings++;
16583 		bp->cp_nr_rings++;
16584 	}
16585 	if (rc) {
16586 		bp->tx_nr_rings = 0;
16587 		bp->rx_nr_rings = 0;
16588 	}
16589 	return rc;
16590 }
16591 
16592 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16593 {
16594 	int rc;
16595 
16596 	if (bp->tx_nr_rings)
16597 		return 0;
16598 
16599 	bnxt_ulp_irq_stop(bp);
16600 	bnxt_clear_int_mode(bp);
16601 	rc = bnxt_set_dflt_rings(bp, true);
16602 	if (rc) {
16603 		if (BNXT_VF(bp) && rc == -ENODEV)
16604 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16605 		else
16606 			netdev_err(bp->dev, "Not enough rings available.\n");
16607 		goto init_dflt_ring_err;
16608 	}
16609 	rc = bnxt_init_int_mode(bp);
16610 	if (rc)
16611 		goto init_dflt_ring_err;
16612 
16613 	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16614 
16615 	bnxt_set_dflt_rfs(bp);
16616 
16617 init_dflt_ring_err:
16618 	bnxt_ulp_irq_restart(bp, rc);
16619 	return rc;
16620 }
16621 
16622 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16623 {
16624 	int rc;
16625 
16626 	netdev_ops_assert_locked(bp->dev);
16627 	bnxt_hwrm_func_qcaps(bp);
16628 
16629 	if (netif_running(bp->dev))
16630 		__bnxt_close_nic(bp, true, false);
16631 
16632 	bnxt_ulp_irq_stop(bp);
16633 	bnxt_clear_int_mode(bp);
16634 	rc = bnxt_init_int_mode(bp);
16635 	bnxt_ulp_irq_restart(bp, rc);
16636 
16637 	if (netif_running(bp->dev)) {
16638 		if (rc)
16639 			netif_close(bp->dev);
16640 		else
16641 			rc = bnxt_open_nic(bp, true, false);
16642 	}
16643 
16644 	return rc;
16645 }
16646 
16647 static int bnxt_init_mac_addr(struct bnxt *bp)
16648 {
16649 	int rc = 0;
16650 
16651 	if (BNXT_PF(bp)) {
16652 		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16653 	} else {
16654 #ifdef CONFIG_BNXT_SRIOV
16655 		struct bnxt_vf_info *vf = &bp->vf;
16656 		bool strict_approval = true;
16657 
16658 		if (is_valid_ether_addr(vf->mac_addr)) {
16659 			/* overwrite netdev dev_addr with admin VF MAC */
16660 			eth_hw_addr_set(bp->dev, vf->mac_addr);
16661 			/* Older PF driver or firmware may not approve this
16662 			 * correctly.
16663 			 */
16664 			strict_approval = false;
16665 		} else {
16666 			eth_hw_addr_random(bp->dev);
16667 		}
16668 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16669 #endif
16670 	}
16671 	return rc;
16672 }
16673 
16674 static void bnxt_vpd_read_info(struct bnxt *bp)
16675 {
16676 	struct pci_dev *pdev = bp->pdev;
16677 	unsigned int vpd_size, kw_len;
16678 	int pos, size;
16679 	u8 *vpd_data;
16680 
16681 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16682 	if (IS_ERR(vpd_data)) {
16683 		pci_warn(pdev, "Unable to read VPD\n");
16684 		return;
16685 	}
16686 
16687 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16688 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16689 	if (pos < 0)
16690 		goto read_sn;
16691 
16692 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16693 	memcpy(bp->board_partno, &vpd_data[pos], size);
16694 
16695 read_sn:
16696 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16697 					   PCI_VPD_RO_KEYWORD_SERIALNO,
16698 					   &kw_len);
16699 	if (pos < 0)
16700 		goto exit;
16701 
16702 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16703 	memcpy(bp->board_serialno, &vpd_data[pos], size);
16704 exit:
16705 	kfree(vpd_data);
16706 }
16707 
16708 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16709 {
16710 	struct pci_dev *pdev = bp->pdev;
16711 	u64 qword;
16712 
16713 	qword = pci_get_dsn(pdev);
16714 	if (!qword) {
16715 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16716 		return -EOPNOTSUPP;
16717 	}
16718 
16719 	put_unaligned_le64(qword, dsn);
16720 
16721 	bp->flags |= BNXT_FLAG_DSN_VALID;
16722 	return 0;
16723 }
16724 
16725 static int bnxt_map_db_bar(struct bnxt *bp)
16726 {
16727 	if (!bp->db_size)
16728 		return -ENODEV;
16729 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16730 	if (!bp->bar1)
16731 		return -ENOMEM;
16732 	return 0;
16733 }
16734 
16735 void bnxt_print_device_info(struct bnxt *bp)
16736 {
16737 	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16738 		    board_info[bp->board_idx].name,
16739 		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16740 
16741 	pcie_print_link_status(bp->pdev);
16742 }
16743 
16744 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16745 {
16746 	struct bnxt_hw_resc *hw_resc;
16747 	struct net_device *dev;
16748 	struct bnxt *bp;
16749 	int rc, max_irqs;
16750 
16751 	if (pci_is_bridge(pdev))
16752 		return -ENODEV;
16753 
16754 	if (!pdev->msix_cap) {
16755 		dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16756 		return -ENODEV;
16757 	}
16758 
16759 	/* Clear any pending DMA transactions from crash kernel
16760 	 * while loading driver in capture kernel.
16761 	 */
16762 	if (is_kdump_kernel()) {
16763 		pci_clear_master(pdev);
16764 		pcie_flr(pdev);
16765 	}
16766 
16767 	max_irqs = bnxt_get_max_irq(pdev);
16768 	dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16769 				 max_irqs);
16770 	if (!dev)
16771 		return -ENOMEM;
16772 
16773 	bp = netdev_priv(dev);
16774 	bp->board_idx = ent->driver_data;
16775 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16776 	bnxt_set_max_func_irqs(bp, max_irqs);
16777 
16778 	if (bnxt_vf_pciid(bp->board_idx))
16779 		bp->flags |= BNXT_FLAG_VF;
16780 
16781 	/* No devlink port registration in case of a VF */
16782 	if (BNXT_PF(bp))
16783 		SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16784 
16785 	rc = bnxt_init_board(pdev, dev);
16786 	if (rc < 0)
16787 		goto init_err_free;
16788 
16789 	dev->netdev_ops = &bnxt_netdev_ops;
16790 	dev->stat_ops = &bnxt_stat_ops;
16791 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16792 	dev->ethtool_ops = &bnxt_ethtool_ops;
16793 	pci_set_drvdata(pdev, dev);
16794 
16795 	rc = bnxt_alloc_hwrm_resources(bp);
16796 	if (rc)
16797 		goto init_err_pci_clean;
16798 
16799 	mutex_init(&bp->hwrm_cmd_lock);
16800 	mutex_init(&bp->link_lock);
16801 
16802 	rc = bnxt_fw_init_one_p1(bp);
16803 	if (rc)
16804 		goto init_err_pci_clean;
16805 
16806 	if (BNXT_PF(bp))
16807 		bnxt_vpd_read_info(bp);
16808 
16809 	if (BNXT_CHIP_P5_PLUS(bp)) {
16810 		bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16811 		if (BNXT_CHIP_P7(bp))
16812 			bp->flags |= BNXT_FLAG_CHIP_P7;
16813 	}
16814 
16815 	rc = bnxt_alloc_rss_indir_tbl(bp);
16816 	if (rc)
16817 		goto init_err_pci_clean;
16818 
16819 	rc = bnxt_fw_init_one_p2(bp);
16820 	if (rc)
16821 		goto init_err_pci_clean;
16822 
16823 	rc = bnxt_map_db_bar(bp);
16824 	if (rc) {
16825 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16826 			rc);
16827 		goto init_err_pci_clean;
16828 	}
16829 
16830 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16831 			   NETIF_F_TSO | NETIF_F_TSO6 |
16832 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16833 			   NETIF_F_GSO_IPXIP4 |
16834 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16835 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16836 			   NETIF_F_RXCSUM | NETIF_F_GRO;
16837 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16838 		dev->hw_features |= NETIF_F_GSO_UDP_L4;
16839 
16840 	if (BNXT_SUPPORTS_TPA(bp))
16841 		dev->hw_features |= NETIF_F_LRO;
16842 
16843 	dev->hw_enc_features =
16844 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16845 			NETIF_F_TSO | NETIF_F_TSO6 |
16846 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16847 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16848 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16849 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16850 		dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16851 	if (bp->flags & BNXT_FLAG_CHIP_P7)
16852 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16853 	else
16854 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16855 
16856 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16857 				    NETIF_F_GSO_GRE_CSUM;
16858 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16859 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16860 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16861 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16862 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16863 	if (BNXT_SUPPORTS_TPA(bp))
16864 		dev->hw_features |= NETIF_F_GRO_HW;
16865 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16866 	if (dev->features & NETIF_F_GRO_HW)
16867 		dev->features &= ~NETIF_F_LRO;
16868 	dev->priv_flags |= IFF_UNICAST_FLT;
16869 
16870 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16871 	if (bp->tso_max_segs)
16872 		netif_set_tso_max_segs(dev, bp->tso_max_segs);
16873 
16874 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16875 			    NETDEV_XDP_ACT_RX_SG;
16876 
16877 #ifdef CONFIG_BNXT_SRIOV
16878 	init_waitqueue_head(&bp->sriov_cfg_wait);
16879 #endif
16880 	if (BNXT_SUPPORTS_TPA(bp)) {
16881 		bp->gro_func = bnxt_gro_func_5730x;
16882 		if (BNXT_CHIP_P4(bp))
16883 			bp->gro_func = bnxt_gro_func_5731x;
16884 		else if (BNXT_CHIP_P5_PLUS(bp))
16885 			bp->gro_func = bnxt_gro_func_5750x;
16886 	}
16887 	if (!BNXT_CHIP_P4_PLUS(bp))
16888 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
16889 
16890 	rc = bnxt_init_mac_addr(bp);
16891 	if (rc) {
16892 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16893 		rc = -EADDRNOTAVAIL;
16894 		goto init_err_pci_clean;
16895 	}
16896 
16897 	if (BNXT_PF(bp)) {
16898 		/* Read the adapter's DSN to use as the eswitch switch_id */
16899 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16900 	}
16901 
16902 	/* MTU range: 60 - FW defined max */
16903 	dev->min_mtu = ETH_ZLEN;
16904 	dev->max_mtu = bp->max_mtu;
16905 
16906 	rc = bnxt_probe_phy(bp, true);
16907 	if (rc)
16908 		goto init_err_pci_clean;
16909 
16910 	hw_resc = &bp->hw_resc;
16911 	bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16912 		       BNXT_L2_FLTR_MAX_FLTR;
16913 	/* Older firmware may not report these filters properly */
16914 	if (bp->max_fltr < BNXT_MAX_FLTR)
16915 		bp->max_fltr = BNXT_MAX_FLTR;
16916 	bnxt_init_l2_fltr_tbl(bp);
16917 	__bnxt_set_rx_skb_mode(bp, false);
16918 	bnxt_set_tpa_flags(bp);
16919 	bnxt_init_ring_params(bp);
16920 	bnxt_set_ring_params(bp);
16921 	bnxt_rdma_aux_device_init(bp);
16922 	rc = bnxt_set_dflt_rings(bp, true);
16923 	if (rc) {
16924 		if (BNXT_VF(bp) && rc == -ENODEV) {
16925 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16926 		} else {
16927 			netdev_err(bp->dev, "Not enough rings available.\n");
16928 			rc = -ENOMEM;
16929 		}
16930 		goto init_err_pci_clean;
16931 	}
16932 
16933 	bnxt_fw_init_one_p3(bp);
16934 
16935 	bnxt_init_dflt_coal(bp);
16936 
16937 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16938 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
16939 
16940 	rc = bnxt_init_int_mode(bp);
16941 	if (rc)
16942 		goto init_err_pci_clean;
16943 
16944 	/* No TC has been set yet and rings may have been trimmed due to
16945 	 * limited MSIX, so we re-initialize the TX rings per TC.
16946 	 */
16947 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16948 
16949 	if (BNXT_PF(bp)) {
16950 		if (!bnxt_pf_wq) {
16951 			bnxt_pf_wq =
16952 				create_singlethread_workqueue("bnxt_pf_wq");
16953 			if (!bnxt_pf_wq) {
16954 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
16955 				rc = -ENOMEM;
16956 				goto init_err_pci_clean;
16957 			}
16958 		}
16959 		rc = bnxt_init_tc(bp);
16960 		if (rc)
16961 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16962 				   rc);
16963 	}
16964 
16965 	bnxt_inv_fw_health_reg(bp);
16966 	rc = bnxt_dl_register(bp);
16967 	if (rc)
16968 		goto init_err_dl;
16969 
16970 	INIT_LIST_HEAD(&bp->usr_fltr_list);
16971 
16972 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16973 		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16974 
16975 	dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops_unsupp;
16976 	if (BNXT_SUPPORTS_QUEUE_API(bp))
16977 		dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16978 	dev->netmem_tx = true;
16979 
16980 	rc = register_netdev(dev);
16981 	if (rc)
16982 		goto init_err_cleanup;
16983 
16984 	bnxt_dl_fw_reporters_create(bp);
16985 
16986 	bnxt_rdma_aux_device_add(bp);
16987 
16988 	bnxt_print_device_info(bp);
16989 
16990 	pci_save_state(pdev);
16991 
16992 	return 0;
16993 init_err_cleanup:
16994 	bnxt_rdma_aux_device_uninit(bp);
16995 	bnxt_dl_unregister(bp);
16996 init_err_dl:
16997 	bnxt_shutdown_tc(bp);
16998 	bnxt_clear_int_mode(bp);
16999 
17000 init_err_pci_clean:
17001 	bnxt_hwrm_func_drv_unrgtr(bp);
17002 	bnxt_ptp_clear(bp);
17003 	kfree(bp->ptp_cfg);
17004 	bp->ptp_cfg = NULL;
17005 	bnxt_free_hwrm_resources(bp);
17006 	bnxt_hwmon_uninit(bp);
17007 	bnxt_ethtool_free(bp);
17008 	kfree(bp->fw_health);
17009 	bp->fw_health = NULL;
17010 	bnxt_cleanup_pci(bp);
17011 	bnxt_free_ctx_mem(bp, true);
17012 	bnxt_free_crash_dump_mem(bp);
17013 	kfree(bp->rss_indir_tbl);
17014 	bp->rss_indir_tbl = NULL;
17015 
17016 init_err_free:
17017 	free_netdev(dev);
17018 	return rc;
17019 }
17020 
17021 static void bnxt_shutdown(struct pci_dev *pdev)
17022 {
17023 	struct net_device *dev = pci_get_drvdata(pdev);
17024 	struct bnxt *bp;
17025 
17026 	if (!dev)
17027 		return;
17028 
17029 	rtnl_lock();
17030 	netdev_lock(dev);
17031 	bp = netdev_priv(dev);
17032 	if (!bp)
17033 		goto shutdown_exit;
17034 
17035 	if (netif_running(dev))
17036 		netif_close(dev);
17037 
17038 	if (bnxt_hwrm_func_drv_unrgtr(bp)) {
17039 		pcie_flr(pdev);
17040 		goto shutdown_exit;
17041 	}
17042 	bnxt_ptp_clear(bp);
17043 	bnxt_clear_int_mode(bp);
17044 	pci_disable_device(pdev);
17045 
17046 	if (system_state == SYSTEM_POWER_OFF) {
17047 		pci_wake_from_d3(pdev, bp->wol);
17048 		pci_set_power_state(pdev, PCI_D3hot);
17049 	}
17050 
17051 shutdown_exit:
17052 	netdev_unlock(dev);
17053 	rtnl_unlock();
17054 }
17055 
17056 #ifdef CONFIG_PM_SLEEP
17057 static int bnxt_suspend(struct device *device)
17058 {
17059 	struct net_device *dev = dev_get_drvdata(device);
17060 	struct bnxt *bp = netdev_priv(dev);
17061 	int rc = 0;
17062 
17063 	bnxt_ulp_stop(bp);
17064 
17065 	netdev_lock(dev);
17066 	if (netif_running(dev)) {
17067 		netif_device_detach(dev);
17068 		rc = bnxt_close(dev);
17069 	}
17070 	bnxt_hwrm_func_drv_unrgtr(bp);
17071 	bnxt_ptp_clear(bp);
17072 	pci_disable_device(bp->pdev);
17073 	bnxt_free_ctx_mem(bp, false);
17074 	netdev_unlock(dev);
17075 	return rc;
17076 }
17077 
17078 static int bnxt_resume(struct device *device)
17079 {
17080 	struct net_device *dev = dev_get_drvdata(device);
17081 	struct bnxt *bp = netdev_priv(dev);
17082 	int rc = 0;
17083 
17084 	netdev_lock(dev);
17085 	rc = pci_enable_device(bp->pdev);
17086 	if (rc) {
17087 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
17088 			   rc);
17089 		goto resume_exit;
17090 	}
17091 	pci_set_master(bp->pdev);
17092 	if (bnxt_hwrm_ver_get(bp)) {
17093 		rc = -ENODEV;
17094 		goto resume_exit;
17095 	}
17096 	rc = bnxt_hwrm_func_reset(bp);
17097 	if (rc) {
17098 		rc = -EBUSY;
17099 		goto resume_exit;
17100 	}
17101 
17102 	rc = bnxt_hwrm_func_qcaps(bp);
17103 	if (rc)
17104 		goto resume_exit;
17105 
17106 	bnxt_clear_reservations(bp, true);
17107 
17108 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
17109 		rc = -ENODEV;
17110 		goto resume_exit;
17111 	}
17112 	if (bp->fw_crash_mem)
17113 		bnxt_hwrm_crash_dump_mem_cfg(bp);
17114 
17115 	if (bnxt_ptp_init(bp)) {
17116 		kfree(bp->ptp_cfg);
17117 		bp->ptp_cfg = NULL;
17118 	}
17119 	bnxt_get_wol_settings(bp);
17120 	if (netif_running(dev)) {
17121 		rc = bnxt_open(dev);
17122 		if (!rc)
17123 			netif_device_attach(dev);
17124 	}
17125 
17126 resume_exit:
17127 	netdev_unlock(bp->dev);
17128 	bnxt_ulp_start(bp, rc);
17129 	if (!rc)
17130 		bnxt_reenable_sriov(bp);
17131 	return rc;
17132 }
17133 
17134 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17135 #define BNXT_PM_OPS (&bnxt_pm_ops)
17136 
17137 #else
17138 
17139 #define BNXT_PM_OPS NULL
17140 
17141 #endif /* CONFIG_PM_SLEEP */
17142 
17143 /**
17144  * bnxt_io_error_detected - called when PCI error is detected
17145  * @pdev: Pointer to PCI device
17146  * @state: The current pci connection state
17147  *
17148  * This function is called after a PCI bus error affecting
17149  * this device has been detected.
17150  */
17151 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17152 					       pci_channel_state_t state)
17153 {
17154 	struct net_device *netdev = pci_get_drvdata(pdev);
17155 	struct bnxt *bp = netdev_priv(netdev);
17156 	bool abort = false;
17157 
17158 	netdev_info(netdev, "PCI I/O error detected\n");
17159 
17160 	bnxt_ulp_stop(bp);
17161 
17162 	netdev_lock(netdev);
17163 	netif_device_detach(netdev);
17164 
17165 	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17166 		netdev_err(bp->dev, "Firmware reset already in progress\n");
17167 		abort = true;
17168 	}
17169 
17170 	if (abort || state == pci_channel_io_perm_failure) {
17171 		netdev_unlock(netdev);
17172 		return PCI_ERS_RESULT_DISCONNECT;
17173 	}
17174 
17175 	/* Link is not reliable anymore if state is pci_channel_io_frozen
17176 	 * so we disable bus master to prevent any potential bad DMAs before
17177 	 * freeing kernel memory.
17178 	 */
17179 	if (state == pci_channel_io_frozen) {
17180 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17181 		bnxt_fw_fatal_close(bp);
17182 	}
17183 
17184 	if (netif_running(netdev))
17185 		__bnxt_close_nic(bp, true, true);
17186 
17187 	if (pci_is_enabled(pdev))
17188 		pci_disable_device(pdev);
17189 	bnxt_free_ctx_mem(bp, false);
17190 	netdev_unlock(netdev);
17191 
17192 	/* Request a slot reset. */
17193 	return PCI_ERS_RESULT_NEED_RESET;
17194 }
17195 
17196 /**
17197  * bnxt_io_slot_reset - called after the pci bus has been reset.
17198  * @pdev: Pointer to PCI device
17199  *
17200  * Restart the card from scratch, as if from a cold-boot.
17201  * At this point, the card has experienced a hard reset,
17202  * followed by fixups by BIOS, and has its config space
17203  * set up identically to what it was at cold boot.
17204  */
17205 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17206 {
17207 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17208 	struct net_device *netdev = pci_get_drvdata(pdev);
17209 	struct bnxt *bp = netdev_priv(netdev);
17210 	int retry = 0;
17211 	int err = 0;
17212 	int off;
17213 
17214 	netdev_info(bp->dev, "PCI Slot Reset\n");
17215 
17216 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17217 	    test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17218 		msleep(900);
17219 
17220 	netdev_lock(netdev);
17221 
17222 	if (pci_enable_device(pdev)) {
17223 		dev_err(&pdev->dev,
17224 			"Cannot re-enable PCI device after reset.\n");
17225 	} else {
17226 		pci_set_master(pdev);
17227 		/* Upon fatal error, our device internal logic that latches to
17228 		 * BAR value is getting reset and will restore only upon
17229 		 * rewriting the BARs.
17230 		 *
17231 		 * As pci_restore_state() does not re-write the BARs if the
17232 		 * value is same as saved value earlier, driver needs to
17233 		 * write the BARs to 0 to force restore, in case of fatal error.
17234 		 */
17235 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17236 				       &bp->state)) {
17237 			for (off = PCI_BASE_ADDRESS_0;
17238 			     off <= PCI_BASE_ADDRESS_5; off += 4)
17239 				pci_write_config_dword(bp->pdev, off, 0);
17240 		}
17241 		pci_restore_state(pdev);
17242 		pci_save_state(pdev);
17243 
17244 		bnxt_inv_fw_health_reg(bp);
17245 		bnxt_try_map_fw_health_reg(bp);
17246 
17247 		/* In some PCIe AER scenarios, firmware may take up to
17248 		 * 10 seconds to become ready in the worst case.
17249 		 */
17250 		do {
17251 			err = bnxt_try_recover_fw(bp);
17252 			if (!err)
17253 				break;
17254 			retry++;
17255 		} while (retry < BNXT_FW_SLOT_RESET_RETRY);
17256 
17257 		if (err) {
17258 			dev_err(&pdev->dev, "Firmware not ready\n");
17259 			goto reset_exit;
17260 		}
17261 
17262 		err = bnxt_hwrm_func_reset(bp);
17263 		if (!err)
17264 			result = PCI_ERS_RESULT_RECOVERED;
17265 
17266 		/* IRQ will be initialized later in bnxt_io_resume */
17267 		bnxt_ulp_irq_stop(bp);
17268 		bnxt_clear_int_mode(bp);
17269 	}
17270 
17271 reset_exit:
17272 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17273 	bnxt_clear_reservations(bp, true);
17274 	netdev_unlock(netdev);
17275 
17276 	return result;
17277 }
17278 
17279 /**
17280  * bnxt_io_resume - called when traffic can start flowing again.
17281  * @pdev: Pointer to PCI device
17282  *
17283  * This callback is called when the error recovery driver tells
17284  * us that its OK to resume normal operation.
17285  */
17286 static void bnxt_io_resume(struct pci_dev *pdev)
17287 {
17288 	struct net_device *netdev = pci_get_drvdata(pdev);
17289 	struct bnxt *bp = netdev_priv(netdev);
17290 	int err;
17291 
17292 	netdev_info(bp->dev, "PCI Slot Resume\n");
17293 	netdev_lock(netdev);
17294 
17295 	err = bnxt_hwrm_func_qcaps(bp);
17296 	if (!err) {
17297 		if (netif_running(netdev)) {
17298 			err = bnxt_open(netdev);
17299 		} else {
17300 			err = bnxt_reserve_rings(bp, true);
17301 			if (!err)
17302 				err = bnxt_init_int_mode(bp);
17303 		}
17304 	}
17305 
17306 	if (!err)
17307 		netif_device_attach(netdev);
17308 
17309 	netdev_unlock(netdev);
17310 	bnxt_ulp_start(bp, err);
17311 	if (!err)
17312 		bnxt_reenable_sriov(bp);
17313 }
17314 
17315 static const struct pci_error_handlers bnxt_err_handler = {
17316 	.error_detected	= bnxt_io_error_detected,
17317 	.slot_reset	= bnxt_io_slot_reset,
17318 	.resume		= bnxt_io_resume
17319 };
17320 
17321 static struct pci_driver bnxt_pci_driver = {
17322 	.name		= DRV_MODULE_NAME,
17323 	.id_table	= bnxt_pci_tbl,
17324 	.probe		= bnxt_init_one,
17325 	.remove		= bnxt_remove_one,
17326 	.shutdown	= bnxt_shutdown,
17327 	.driver.pm	= BNXT_PM_OPS,
17328 	.err_handler	= &bnxt_err_handler,
17329 #if defined(CONFIG_BNXT_SRIOV)
17330 	.sriov_configure = bnxt_sriov_configure,
17331 #endif
17332 };
17333 
17334 static int __init bnxt_init(void)
17335 {
17336 	int err;
17337 
17338 	bnxt_debug_init();
17339 	err = pci_register_driver(&bnxt_pci_driver);
17340 	if (err) {
17341 		bnxt_debug_exit();
17342 		return err;
17343 	}
17344 
17345 	return 0;
17346 }
17347 
17348 static void __exit bnxt_exit(void)
17349 {
17350 	pci_unregister_driver(&bnxt_pci_driver);
17351 	if (bnxt_pf_wq)
17352 		destroy_workqueue(bnxt_pf_wq);
17353 	bnxt_debug_exit();
17354 }
17355 
17356 module_init(bnxt_init);
17357 module_exit(bnxt_exit);
17358