xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c (revision 2845f512232de9e436b9e3b5529e906e62414013)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58 
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_hwmon.h"
73 
74 #define BNXT_TX_TIMEOUT		(5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
76 				 NETIF_MSG_TX_ERR)
77 
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
80 
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84 
85 #define BNXT_TX_PUSH_THRESH 164
86 
87 /* indexed by enum board_idx */
88 static const struct {
89 	char *name;
90 } board_info[] = {
91 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 	[BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 	[BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 	[BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 	[BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
140 	[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
141 };
142 
143 static const struct pci_device_id bnxt_pci_tbl[] = {
144 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
145 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
146 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
147 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
148 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
149 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
150 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
151 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
152 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
153 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
154 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
155 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
156 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
157 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
158 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
159 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
160 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
161 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
162 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
163 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
164 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
165 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
166 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
167 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
168 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
169 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
171 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
172 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
173 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
174 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
175 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
176 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
177 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
178 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
179 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
180 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
181 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
182 	{ PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
183 	{ PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
184 	{ PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
185 	{ PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
186 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
187 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
188 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
189 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
190 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
191 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
193 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
194 #ifdef CONFIG_BNXT_SRIOV
195 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
196 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
197 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
198 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
199 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
200 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
201 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
202 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
203 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
204 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
205 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
211 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
214 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
215 	{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
216 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 	{ 0 }
219 };
220 
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222 
223 static const u16 bnxt_vf_req_snif[] = {
224 	HWRM_FUNC_CFG,
225 	HWRM_FUNC_VF_CFG,
226 	HWRM_PORT_PHY_QCFG,
227 	HWRM_CFA_L2_FILTER_ALLOC,
228 };
229 
230 static const u16 bnxt_async_events_arr[] = {
231 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
233 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
234 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
235 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
236 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
237 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
238 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
239 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
240 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
241 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
242 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
243 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
244 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
245 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
246 	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
247 };
248 
249 static struct workqueue_struct *bnxt_pf_wq;
250 
251 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
252 			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
253 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
254 
255 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
256 	.ports = {
257 		.src = 0,
258 		.dst = 0,
259 	},
260 	.addrs = {
261 		.v6addrs = {
262 			.src = BNXT_IPV6_MASK_NONE,
263 			.dst = BNXT_IPV6_MASK_NONE,
264 		},
265 	},
266 };
267 
268 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
269 	.ports = {
270 		.src = cpu_to_be16(0xffff),
271 		.dst = cpu_to_be16(0xffff),
272 	},
273 	.addrs = {
274 		.v6addrs = {
275 			.src = BNXT_IPV6_MASK_ALL,
276 			.dst = BNXT_IPV6_MASK_ALL,
277 		},
278 	},
279 };
280 
281 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
282 	.ports = {
283 		.src = cpu_to_be16(0xffff),
284 		.dst = cpu_to_be16(0xffff),
285 	},
286 	.addrs = {
287 		.v4addrs = {
288 			.src = cpu_to_be32(0xffffffff),
289 			.dst = cpu_to_be32(0xffffffff),
290 		},
291 	},
292 };
293 
294 static bool bnxt_vf_pciid(enum board_idx idx)
295 {
296 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
297 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
298 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
299 		idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
300 }
301 
302 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
303 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
304 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
305 
306 #define BNXT_CP_DB_IRQ_DIS(db)						\
307 		writel(DB_CP_IRQ_DIS_FLAGS, db)
308 
309 #define BNXT_DB_CQ(db, idx)						\
310 	writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
311 
312 #define BNXT_DB_NQ_P5(db, idx)						\
313 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
314 		    (db)->doorbell)
315 
316 #define BNXT_DB_NQ_P7(db, idx)						\
317 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |		\
318 		    DB_RING_IDX(db, idx), (db)->doorbell)
319 
320 #define BNXT_DB_CQ_ARM(db, idx)						\
321 	writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
322 
323 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
324 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |		\
325 		    DB_RING_IDX(db, idx), (db)->doorbell)
326 
327 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328 {
329 	if (bp->flags & BNXT_FLAG_CHIP_P7)
330 		BNXT_DB_NQ_P7(db, idx);
331 	else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
332 		BNXT_DB_NQ_P5(db, idx);
333 	else
334 		BNXT_DB_CQ(db, idx);
335 }
336 
337 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
338 {
339 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
340 		BNXT_DB_NQ_ARM_P5(db, idx);
341 	else
342 		BNXT_DB_CQ_ARM(db, idx);
343 }
344 
345 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
346 {
347 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
348 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
349 			    DB_RING_IDX(db, idx), db->doorbell);
350 	else
351 		BNXT_DB_CQ(db, idx);
352 }
353 
354 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
355 {
356 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
357 		return;
358 
359 	if (BNXT_PF(bp))
360 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
361 	else
362 		schedule_delayed_work(&bp->fw_reset_task, delay);
363 }
364 
365 static void __bnxt_queue_sp_work(struct bnxt *bp)
366 {
367 	if (BNXT_PF(bp))
368 		queue_work(bnxt_pf_wq, &bp->sp_task);
369 	else
370 		schedule_work(&bp->sp_task);
371 }
372 
373 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
374 {
375 	set_bit(event, &bp->sp_event);
376 	__bnxt_queue_sp_work(bp);
377 }
378 
379 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
380 {
381 	if (!rxr->bnapi->in_reset) {
382 		rxr->bnapi->in_reset = true;
383 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
384 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
385 		else
386 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
387 		__bnxt_queue_sp_work(bp);
388 	}
389 	rxr->rx_next_cons = 0xffff;
390 }
391 
392 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
393 			  u16 curr)
394 {
395 	struct bnxt_napi *bnapi = txr->bnapi;
396 
397 	if (bnapi->tx_fault)
398 		return;
399 
400 	netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
401 		   txr->txq_index, txr->tx_hw_cons,
402 		   txr->tx_cons, txr->tx_prod, curr);
403 	WARN_ON_ONCE(1);
404 	bnapi->tx_fault = 1;
405 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
406 }
407 
408 const u16 bnxt_lhint_arr[] = {
409 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
410 	TX_BD_FLAGS_LHINT_512_TO_1023,
411 	TX_BD_FLAGS_LHINT_1024_TO_2047,
412 	TX_BD_FLAGS_LHINT_1024_TO_2047,
413 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
414 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
415 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
416 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
417 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
418 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
419 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
420 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
421 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
422 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
423 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
424 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
426 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
427 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
428 };
429 
430 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
431 {
432 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
433 
434 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
435 		return 0;
436 
437 	return md_dst->u.port_info.port_id;
438 }
439 
440 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
441 			     u16 prod)
442 {
443 	/* Sync BD data before updating doorbell */
444 	wmb();
445 	bnxt_db_write(bp, &txr->tx_db, prod);
446 	txr->kick_pending = 0;
447 }
448 
449 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
450 {
451 	struct bnxt *bp = netdev_priv(dev);
452 	struct tx_bd *txbd, *txbd0;
453 	struct tx_bd_ext *txbd1;
454 	struct netdev_queue *txq;
455 	int i;
456 	dma_addr_t mapping;
457 	unsigned int length, pad = 0;
458 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
459 	u16 prod, last_frag;
460 	struct pci_dev *pdev = bp->pdev;
461 	struct bnxt_tx_ring_info *txr;
462 	struct bnxt_sw_tx_bd *tx_buf;
463 	__le32 lflags = 0;
464 
465 	i = skb_get_queue_mapping(skb);
466 	if (unlikely(i >= bp->tx_nr_rings)) {
467 		dev_kfree_skb_any(skb);
468 		dev_core_stats_tx_dropped_inc(dev);
469 		return NETDEV_TX_OK;
470 	}
471 
472 	txq = netdev_get_tx_queue(dev, i);
473 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
474 	prod = txr->tx_prod;
475 
476 	free_size = bnxt_tx_avail(bp, txr);
477 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
478 		/* We must have raced with NAPI cleanup */
479 		if (net_ratelimit() && txr->kick_pending)
480 			netif_warn(bp, tx_err, dev,
481 				   "bnxt: ring busy w/ flush pending!\n");
482 		if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
483 					bp->tx_wake_thresh))
484 			return NETDEV_TX_BUSY;
485 	}
486 
487 	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
488 		goto tx_free;
489 
490 	length = skb->len;
491 	len = skb_headlen(skb);
492 	last_frag = skb_shinfo(skb)->nr_frags;
493 
494 	txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
495 
496 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
497 	tx_buf->skb = skb;
498 	tx_buf->nr_frags = last_frag;
499 
500 	vlan_tag_flags = 0;
501 	cfa_action = bnxt_xmit_get_cfa_action(skb);
502 	if (skb_vlan_tag_present(skb)) {
503 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
504 				 skb_vlan_tag_get(skb);
505 		/* Currently supports 8021Q, 8021AD vlan offloads
506 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
507 		 */
508 		if (skb->vlan_proto == htons(ETH_P_8021Q))
509 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
510 	}
511 
512 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
513 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
514 
515 		if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
516 		    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
517 			if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
518 					    &ptp->tx_hdr_off)) {
519 				if (vlan_tag_flags)
520 					ptp->tx_hdr_off += VLAN_HLEN;
521 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
522 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
523 			} else {
524 				atomic_inc(&bp->ptp_cfg->tx_avail);
525 			}
526 		}
527 	}
528 
529 	if (unlikely(skb->no_fcs))
530 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
531 
532 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
533 	    !lflags) {
534 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
535 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
536 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
537 		void __iomem *db = txr->tx_db.doorbell;
538 		void *pdata = tx_push_buf->data;
539 		u64 *end;
540 		int j, push_len;
541 
542 		/* Set COAL_NOW to be ready quickly for the next push */
543 		tx_push->tx_bd_len_flags_type =
544 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
545 					TX_BD_TYPE_LONG_TX_BD |
546 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
547 					TX_BD_FLAGS_COAL_NOW |
548 					TX_BD_FLAGS_PACKET_END |
549 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
550 
551 		if (skb->ip_summed == CHECKSUM_PARTIAL)
552 			tx_push1->tx_bd_hsize_lflags =
553 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
554 		else
555 			tx_push1->tx_bd_hsize_lflags = 0;
556 
557 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
558 		tx_push1->tx_bd_cfa_action =
559 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
560 
561 		end = pdata + length;
562 		end = PTR_ALIGN(end, 8) - 1;
563 		*end = 0;
564 
565 		skb_copy_from_linear_data(skb, pdata, len);
566 		pdata += len;
567 		for (j = 0; j < last_frag; j++) {
568 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
569 			void *fptr;
570 
571 			fptr = skb_frag_address_safe(frag);
572 			if (!fptr)
573 				goto normal_tx;
574 
575 			memcpy(pdata, fptr, skb_frag_size(frag));
576 			pdata += skb_frag_size(frag);
577 		}
578 
579 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
580 		txbd->tx_bd_haddr = txr->data_mapping;
581 		txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
582 		prod = NEXT_TX(prod);
583 		tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
584 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
585 		memcpy(txbd, tx_push1, sizeof(*txbd));
586 		prod = NEXT_TX(prod);
587 		tx_push->doorbell =
588 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
589 				    DB_RING_IDX(&txr->tx_db, prod));
590 		WRITE_ONCE(txr->tx_prod, prod);
591 
592 		tx_buf->is_push = 1;
593 		netdev_tx_sent_queue(txq, skb->len);
594 		wmb();	/* Sync is_push and byte queue before pushing data */
595 
596 		push_len = (length + sizeof(*tx_push) + 7) / 8;
597 		if (push_len > 16) {
598 			__iowrite64_copy(db, tx_push_buf, 16);
599 			__iowrite32_copy(db + 4, tx_push_buf + 1,
600 					 (push_len - 16) << 1);
601 		} else {
602 			__iowrite64_copy(db, tx_push_buf, push_len);
603 		}
604 
605 		goto tx_done;
606 	}
607 
608 normal_tx:
609 	if (length < BNXT_MIN_PKT_SIZE) {
610 		pad = BNXT_MIN_PKT_SIZE - length;
611 		if (skb_pad(skb, pad))
612 			/* SKB already freed. */
613 			goto tx_kick_pending;
614 		length = BNXT_MIN_PKT_SIZE;
615 	}
616 
617 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
618 
619 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
620 		goto tx_free;
621 
622 	dma_unmap_addr_set(tx_buf, mapping, mapping);
623 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
624 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
625 
626 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
627 	txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
628 
629 	prod = NEXT_TX(prod);
630 	txbd1 = (struct tx_bd_ext *)
631 		&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
632 
633 	txbd1->tx_bd_hsize_lflags = lflags;
634 	if (skb_is_gso(skb)) {
635 		bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
636 		u32 hdr_len;
637 
638 		if (skb->encapsulation) {
639 			if (udp_gso)
640 				hdr_len = skb_inner_transport_offset(skb) +
641 					  sizeof(struct udphdr);
642 			else
643 				hdr_len = skb_inner_tcp_all_headers(skb);
644 		} else if (udp_gso) {
645 			hdr_len = skb_transport_offset(skb) +
646 				  sizeof(struct udphdr);
647 		} else {
648 			hdr_len = skb_tcp_all_headers(skb);
649 		}
650 
651 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
652 					TX_BD_FLAGS_T_IPID |
653 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
654 		length = skb_shinfo(skb)->gso_size;
655 		txbd1->tx_bd_mss = cpu_to_le32(length);
656 		length += hdr_len;
657 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
658 		txbd1->tx_bd_hsize_lflags |=
659 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
660 		txbd1->tx_bd_mss = 0;
661 	}
662 
663 	length >>= 9;
664 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
665 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
666 				     skb->len);
667 		i = 0;
668 		goto tx_dma_error;
669 	}
670 	flags |= bnxt_lhint_arr[length];
671 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
672 
673 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
674 	txbd1->tx_bd_cfa_action =
675 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
676 	txbd0 = txbd;
677 	for (i = 0; i < last_frag; i++) {
678 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
679 
680 		prod = NEXT_TX(prod);
681 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
682 
683 		len = skb_frag_size(frag);
684 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
685 					   DMA_TO_DEVICE);
686 
687 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
688 			goto tx_dma_error;
689 
690 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
691 		dma_unmap_addr_set(tx_buf, mapping, mapping);
692 
693 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
694 
695 		flags = len << TX_BD_LEN_SHIFT;
696 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
697 	}
698 
699 	flags &= ~TX_BD_LEN;
700 	txbd->tx_bd_len_flags_type =
701 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
702 			    TX_BD_FLAGS_PACKET_END);
703 
704 	netdev_tx_sent_queue(txq, skb->len);
705 
706 	skb_tx_timestamp(skb);
707 
708 	prod = NEXT_TX(prod);
709 	WRITE_ONCE(txr->tx_prod, prod);
710 
711 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
712 		bnxt_txr_db_kick(bp, txr, prod);
713 	} else {
714 		if (free_size >= bp->tx_wake_thresh)
715 			txbd0->tx_bd_len_flags_type |=
716 				cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
717 		txr->kick_pending = 1;
718 	}
719 
720 tx_done:
721 
722 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
723 		if (netdev_xmit_more() && !tx_buf->is_push) {
724 			txbd0->tx_bd_len_flags_type &=
725 				cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
726 			bnxt_txr_db_kick(bp, txr, prod);
727 		}
728 
729 		netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
730 				   bp->tx_wake_thresh);
731 	}
732 	return NETDEV_TX_OK;
733 
734 tx_dma_error:
735 	last_frag = i;
736 
737 	/* start back at beginning and unmap skb */
738 	prod = txr->tx_prod;
739 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
740 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
741 			 skb_headlen(skb), DMA_TO_DEVICE);
742 	prod = NEXT_TX(prod);
743 
744 	/* unmap remaining mapped pages */
745 	for (i = 0; i < last_frag; i++) {
746 		prod = NEXT_TX(prod);
747 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
748 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
749 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
750 			       DMA_TO_DEVICE);
751 	}
752 
753 tx_free:
754 	dev_kfree_skb_any(skb);
755 tx_kick_pending:
756 	if (BNXT_TX_PTP_IS_SET(lflags))
757 		atomic_inc(&bp->ptp_cfg->tx_avail);
758 	if (txr->kick_pending)
759 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
760 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
761 	dev_core_stats_tx_dropped_inc(dev);
762 	return NETDEV_TX_OK;
763 }
764 
765 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
766 			  int budget)
767 {
768 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
769 	struct pci_dev *pdev = bp->pdev;
770 	u16 hw_cons = txr->tx_hw_cons;
771 	unsigned int tx_bytes = 0;
772 	u16 cons = txr->tx_cons;
773 	int tx_pkts = 0;
774 
775 	while (RING_TX(bp, cons) != hw_cons) {
776 		struct bnxt_sw_tx_bd *tx_buf;
777 		struct sk_buff *skb;
778 		int j, last;
779 
780 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
781 		cons = NEXT_TX(cons);
782 		skb = tx_buf->skb;
783 		tx_buf->skb = NULL;
784 
785 		if (unlikely(!skb)) {
786 			bnxt_sched_reset_txr(bp, txr, cons);
787 			return;
788 		}
789 
790 		tx_pkts++;
791 		tx_bytes += skb->len;
792 
793 		if (tx_buf->is_push) {
794 			tx_buf->is_push = 0;
795 			goto next_tx_int;
796 		}
797 
798 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
799 				 skb_headlen(skb), DMA_TO_DEVICE);
800 		last = tx_buf->nr_frags;
801 
802 		for (j = 0; j < last; j++) {
803 			cons = NEXT_TX(cons);
804 			tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
805 			dma_unmap_page(
806 				&pdev->dev,
807 				dma_unmap_addr(tx_buf, mapping),
808 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
809 				DMA_TO_DEVICE);
810 		}
811 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
812 			if (BNXT_CHIP_P5(bp)) {
813 				/* PTP worker takes ownership of the skb */
814 				if (!bnxt_get_tx_ts_p5(bp, skb))
815 					skb = NULL;
816 				else
817 					atomic_inc(&bp->ptp_cfg->tx_avail);
818 			}
819 		}
820 
821 next_tx_int:
822 		cons = NEXT_TX(cons);
823 
824 		dev_consume_skb_any(skb);
825 	}
826 
827 	WRITE_ONCE(txr->tx_cons, cons);
828 
829 	__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
830 				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
831 				   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
832 }
833 
834 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
835 {
836 	struct bnxt_tx_ring_info *txr;
837 	int i;
838 
839 	bnxt_for_each_napi_tx(i, bnapi, txr) {
840 		if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
841 			__bnxt_tx_int(bp, txr, budget);
842 	}
843 	bnapi->events &= ~BNXT_TX_CMP_EVENT;
844 }
845 
846 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
847 					 struct bnxt_rx_ring_info *rxr,
848 					 unsigned int *offset,
849 					 gfp_t gfp)
850 {
851 	struct page *page;
852 
853 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
854 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
855 						BNXT_RX_PAGE_SIZE);
856 	} else {
857 		page = page_pool_dev_alloc_pages(rxr->page_pool);
858 		*offset = 0;
859 	}
860 	if (!page)
861 		return NULL;
862 
863 	*mapping = page_pool_get_dma_addr(page) + *offset;
864 	return page;
865 }
866 
867 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
868 				       gfp_t gfp)
869 {
870 	u8 *data;
871 	struct pci_dev *pdev = bp->pdev;
872 
873 	if (gfp == GFP_ATOMIC)
874 		data = napi_alloc_frag(bp->rx_buf_size);
875 	else
876 		data = netdev_alloc_frag(bp->rx_buf_size);
877 	if (!data)
878 		return NULL;
879 
880 	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
881 					bp->rx_buf_use_size, bp->rx_dir,
882 					DMA_ATTR_WEAK_ORDERING);
883 
884 	if (dma_mapping_error(&pdev->dev, *mapping)) {
885 		skb_free_frag(data);
886 		data = NULL;
887 	}
888 	return data;
889 }
890 
891 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
892 		       u16 prod, gfp_t gfp)
893 {
894 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
895 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
896 	dma_addr_t mapping;
897 
898 	if (BNXT_RX_PAGE_MODE(bp)) {
899 		unsigned int offset;
900 		struct page *page =
901 			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
902 
903 		if (!page)
904 			return -ENOMEM;
905 
906 		mapping += bp->rx_dma_offset;
907 		rx_buf->data = page;
908 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
909 	} else {
910 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
911 
912 		if (!data)
913 			return -ENOMEM;
914 
915 		rx_buf->data = data;
916 		rx_buf->data_ptr = data + bp->rx_offset;
917 	}
918 	rx_buf->mapping = mapping;
919 
920 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
921 	return 0;
922 }
923 
924 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
925 {
926 	u16 prod = rxr->rx_prod;
927 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
928 	struct bnxt *bp = rxr->bnapi->bp;
929 	struct rx_bd *cons_bd, *prod_bd;
930 
931 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
932 	cons_rx_buf = &rxr->rx_buf_ring[cons];
933 
934 	prod_rx_buf->data = data;
935 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
936 
937 	prod_rx_buf->mapping = cons_rx_buf->mapping;
938 
939 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
940 	cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
941 
942 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
943 }
944 
945 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
946 {
947 	u16 next, max = rxr->rx_agg_bmap_size;
948 
949 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
950 	if (next >= max)
951 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
952 	return next;
953 }
954 
955 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
956 				     struct bnxt_rx_ring_info *rxr,
957 				     u16 prod, gfp_t gfp)
958 {
959 	struct rx_bd *rxbd =
960 		&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
961 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
962 	struct page *page;
963 	dma_addr_t mapping;
964 	u16 sw_prod = rxr->rx_sw_agg_prod;
965 	unsigned int offset = 0;
966 
967 	page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
968 
969 	if (!page)
970 		return -ENOMEM;
971 
972 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
973 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
974 
975 	__set_bit(sw_prod, rxr->rx_agg_bmap);
976 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
977 	rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
978 
979 	rx_agg_buf->page = page;
980 	rx_agg_buf->offset = offset;
981 	rx_agg_buf->mapping = mapping;
982 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
983 	rxbd->rx_bd_opaque = sw_prod;
984 	return 0;
985 }
986 
987 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
988 				       struct bnxt_cp_ring_info *cpr,
989 				       u16 cp_cons, u16 curr)
990 {
991 	struct rx_agg_cmp *agg;
992 
993 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
994 	agg = (struct rx_agg_cmp *)
995 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
996 	return agg;
997 }
998 
999 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1000 					      struct bnxt_rx_ring_info *rxr,
1001 					      u16 agg_id, u16 curr)
1002 {
1003 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1004 
1005 	return &tpa_info->agg_arr[curr];
1006 }
1007 
1008 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1009 				   u16 start, u32 agg_bufs, bool tpa)
1010 {
1011 	struct bnxt_napi *bnapi = cpr->bnapi;
1012 	struct bnxt *bp = bnapi->bp;
1013 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1014 	u16 prod = rxr->rx_agg_prod;
1015 	u16 sw_prod = rxr->rx_sw_agg_prod;
1016 	bool p5_tpa = false;
1017 	u32 i;
1018 
1019 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1020 		p5_tpa = true;
1021 
1022 	for (i = 0; i < agg_bufs; i++) {
1023 		u16 cons;
1024 		struct rx_agg_cmp *agg;
1025 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1026 		struct rx_bd *prod_bd;
1027 		struct page *page;
1028 
1029 		if (p5_tpa)
1030 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1031 		else
1032 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
1033 		cons = agg->rx_agg_cmp_opaque;
1034 		__clear_bit(cons, rxr->rx_agg_bmap);
1035 
1036 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1037 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1038 
1039 		__set_bit(sw_prod, rxr->rx_agg_bmap);
1040 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1041 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1042 
1043 		/* It is possible for sw_prod to be equal to cons, so
1044 		 * set cons_rx_buf->page to NULL first.
1045 		 */
1046 		page = cons_rx_buf->page;
1047 		cons_rx_buf->page = NULL;
1048 		prod_rx_buf->page = page;
1049 		prod_rx_buf->offset = cons_rx_buf->offset;
1050 
1051 		prod_rx_buf->mapping = cons_rx_buf->mapping;
1052 
1053 		prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1054 
1055 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1056 		prod_bd->rx_bd_opaque = sw_prod;
1057 
1058 		prod = NEXT_RX_AGG(prod);
1059 		sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1060 	}
1061 	rxr->rx_agg_prod = prod;
1062 	rxr->rx_sw_agg_prod = sw_prod;
1063 }
1064 
1065 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1066 					      struct bnxt_rx_ring_info *rxr,
1067 					      u16 cons, void *data, u8 *data_ptr,
1068 					      dma_addr_t dma_addr,
1069 					      unsigned int offset_and_len)
1070 {
1071 	unsigned int len = offset_and_len & 0xffff;
1072 	struct page *page = data;
1073 	u16 prod = rxr->rx_prod;
1074 	struct sk_buff *skb;
1075 	int err;
1076 
1077 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1078 	if (unlikely(err)) {
1079 		bnxt_reuse_rx_data(rxr, cons, data);
1080 		return NULL;
1081 	}
1082 	dma_addr -= bp->rx_dma_offset;
1083 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1084 				bp->rx_dir);
1085 	skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1086 	if (!skb) {
1087 		page_pool_recycle_direct(rxr->page_pool, page);
1088 		return NULL;
1089 	}
1090 	skb_mark_for_recycle(skb);
1091 	skb_reserve(skb, bp->rx_offset);
1092 	__skb_put(skb, len);
1093 
1094 	return skb;
1095 }
1096 
1097 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1098 					struct bnxt_rx_ring_info *rxr,
1099 					u16 cons, void *data, u8 *data_ptr,
1100 					dma_addr_t dma_addr,
1101 					unsigned int offset_and_len)
1102 {
1103 	unsigned int payload = offset_and_len >> 16;
1104 	unsigned int len = offset_and_len & 0xffff;
1105 	skb_frag_t *frag;
1106 	struct page *page = data;
1107 	u16 prod = rxr->rx_prod;
1108 	struct sk_buff *skb;
1109 	int off, err;
1110 
1111 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1112 	if (unlikely(err)) {
1113 		bnxt_reuse_rx_data(rxr, cons, data);
1114 		return NULL;
1115 	}
1116 	dma_addr -= bp->rx_dma_offset;
1117 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1118 				bp->rx_dir);
1119 
1120 	if (unlikely(!payload))
1121 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1122 
1123 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1124 	if (!skb) {
1125 		page_pool_recycle_direct(rxr->page_pool, page);
1126 		return NULL;
1127 	}
1128 
1129 	skb_mark_for_recycle(skb);
1130 	off = (void *)data_ptr - page_address(page);
1131 	skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1132 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1133 	       payload + NET_IP_ALIGN);
1134 
1135 	frag = &skb_shinfo(skb)->frags[0];
1136 	skb_frag_size_sub(frag, payload);
1137 	skb_frag_off_add(frag, payload);
1138 	skb->data_len -= payload;
1139 	skb->tail += payload;
1140 
1141 	return skb;
1142 }
1143 
1144 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1145 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1146 				   void *data, u8 *data_ptr,
1147 				   dma_addr_t dma_addr,
1148 				   unsigned int offset_and_len)
1149 {
1150 	u16 prod = rxr->rx_prod;
1151 	struct sk_buff *skb;
1152 	int err;
1153 
1154 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1155 	if (unlikely(err)) {
1156 		bnxt_reuse_rx_data(rxr, cons, data);
1157 		return NULL;
1158 	}
1159 
1160 	skb = napi_build_skb(data, bp->rx_buf_size);
1161 	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1162 			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1163 	if (!skb) {
1164 		skb_free_frag(data);
1165 		return NULL;
1166 	}
1167 
1168 	skb_reserve(skb, bp->rx_offset);
1169 	skb_put(skb, offset_and_len & 0xffff);
1170 	return skb;
1171 }
1172 
1173 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1174 			       struct bnxt_cp_ring_info *cpr,
1175 			       struct skb_shared_info *shinfo,
1176 			       u16 idx, u32 agg_bufs, bool tpa,
1177 			       struct xdp_buff *xdp)
1178 {
1179 	struct bnxt_napi *bnapi = cpr->bnapi;
1180 	struct pci_dev *pdev = bp->pdev;
1181 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1182 	u16 prod = rxr->rx_agg_prod;
1183 	u32 i, total_frag_len = 0;
1184 	bool p5_tpa = false;
1185 
1186 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1187 		p5_tpa = true;
1188 
1189 	for (i = 0; i < agg_bufs; i++) {
1190 		skb_frag_t *frag = &shinfo->frags[i];
1191 		u16 cons, frag_len;
1192 		struct rx_agg_cmp *agg;
1193 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1194 		struct page *page;
1195 		dma_addr_t mapping;
1196 
1197 		if (p5_tpa)
1198 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1199 		else
1200 			agg = bnxt_get_agg(bp, cpr, idx, i);
1201 		cons = agg->rx_agg_cmp_opaque;
1202 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1203 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1204 
1205 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1206 		skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1207 					cons_rx_buf->offset, frag_len);
1208 		shinfo->nr_frags = i + 1;
1209 		__clear_bit(cons, rxr->rx_agg_bmap);
1210 
1211 		/* It is possible for bnxt_alloc_rx_page() to allocate
1212 		 * a sw_prod index that equals the cons index, so we
1213 		 * need to clear the cons entry now.
1214 		 */
1215 		mapping = cons_rx_buf->mapping;
1216 		page = cons_rx_buf->page;
1217 		cons_rx_buf->page = NULL;
1218 
1219 		if (xdp && page_is_pfmemalloc(page))
1220 			xdp_buff_set_frag_pfmemalloc(xdp);
1221 
1222 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1223 			--shinfo->nr_frags;
1224 			cons_rx_buf->page = page;
1225 
1226 			/* Update prod since possibly some pages have been
1227 			 * allocated already.
1228 			 */
1229 			rxr->rx_agg_prod = prod;
1230 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1231 			return 0;
1232 		}
1233 
1234 		dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1235 					bp->rx_dir);
1236 
1237 		total_frag_len += frag_len;
1238 		prod = NEXT_RX_AGG(prod);
1239 	}
1240 	rxr->rx_agg_prod = prod;
1241 	return total_frag_len;
1242 }
1243 
1244 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1245 					     struct bnxt_cp_ring_info *cpr,
1246 					     struct sk_buff *skb, u16 idx,
1247 					     u32 agg_bufs, bool tpa)
1248 {
1249 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1250 	u32 total_frag_len = 0;
1251 
1252 	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1253 					     agg_bufs, tpa, NULL);
1254 	if (!total_frag_len) {
1255 		skb_mark_for_recycle(skb);
1256 		dev_kfree_skb(skb);
1257 		return NULL;
1258 	}
1259 
1260 	skb->data_len += total_frag_len;
1261 	skb->len += total_frag_len;
1262 	skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1263 	return skb;
1264 }
1265 
1266 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1267 				 struct bnxt_cp_ring_info *cpr,
1268 				 struct xdp_buff *xdp, u16 idx,
1269 				 u32 agg_bufs, bool tpa)
1270 {
1271 	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1272 	u32 total_frag_len = 0;
1273 
1274 	if (!xdp_buff_has_frags(xdp))
1275 		shinfo->nr_frags = 0;
1276 
1277 	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1278 					     idx, agg_bufs, tpa, xdp);
1279 	if (total_frag_len) {
1280 		xdp_buff_set_frags_flag(xdp);
1281 		shinfo->nr_frags = agg_bufs;
1282 		shinfo->xdp_frags_size = total_frag_len;
1283 	}
1284 	return total_frag_len;
1285 }
1286 
1287 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1288 			       u8 agg_bufs, u32 *raw_cons)
1289 {
1290 	u16 last;
1291 	struct rx_agg_cmp *agg;
1292 
1293 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1294 	last = RING_CMP(*raw_cons);
1295 	agg = (struct rx_agg_cmp *)
1296 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1297 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1298 }
1299 
1300 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1301 				      unsigned int len,
1302 				      dma_addr_t mapping)
1303 {
1304 	struct bnxt *bp = bnapi->bp;
1305 	struct pci_dev *pdev = bp->pdev;
1306 	struct sk_buff *skb;
1307 
1308 	skb = napi_alloc_skb(&bnapi->napi, len);
1309 	if (!skb)
1310 		return NULL;
1311 
1312 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1313 				bp->rx_dir);
1314 
1315 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1316 	       len + NET_IP_ALIGN);
1317 
1318 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1319 				   bp->rx_dir);
1320 
1321 	skb_put(skb, len);
1322 
1323 	return skb;
1324 }
1325 
1326 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1327 				     unsigned int len,
1328 				     dma_addr_t mapping)
1329 {
1330 	return bnxt_copy_data(bnapi, data, len, mapping);
1331 }
1332 
1333 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1334 				     struct xdp_buff *xdp,
1335 				     unsigned int len,
1336 				     dma_addr_t mapping)
1337 {
1338 	unsigned int metasize = 0;
1339 	u8 *data = xdp->data;
1340 	struct sk_buff *skb;
1341 
1342 	len = xdp->data_end - xdp->data_meta;
1343 	metasize = xdp->data - xdp->data_meta;
1344 	data = xdp->data_meta;
1345 
1346 	skb = bnxt_copy_data(bnapi, data, len, mapping);
1347 	if (!skb)
1348 		return skb;
1349 
1350 	if (metasize) {
1351 		skb_metadata_set(skb, metasize);
1352 		__skb_pull(skb, metasize);
1353 	}
1354 
1355 	return skb;
1356 }
1357 
1358 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1359 			   u32 *raw_cons, void *cmp)
1360 {
1361 	struct rx_cmp *rxcmp = cmp;
1362 	u32 tmp_raw_cons = *raw_cons;
1363 	u8 cmp_type, agg_bufs = 0;
1364 
1365 	cmp_type = RX_CMP_TYPE(rxcmp);
1366 
1367 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1368 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1369 			    RX_CMP_AGG_BUFS) >>
1370 			   RX_CMP_AGG_BUFS_SHIFT;
1371 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1372 		struct rx_tpa_end_cmp *tpa_end = cmp;
1373 
1374 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1375 			return 0;
1376 
1377 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1378 	}
1379 
1380 	if (agg_bufs) {
1381 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1382 			return -EBUSY;
1383 	}
1384 	*raw_cons = tmp_raw_cons;
1385 	return 0;
1386 }
1387 
1388 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1389 {
1390 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1391 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1392 
1393 	if (test_bit(idx, map->agg_idx_bmap))
1394 		idx = find_first_zero_bit(map->agg_idx_bmap,
1395 					  BNXT_AGG_IDX_BMAP_SIZE);
1396 	__set_bit(idx, map->agg_idx_bmap);
1397 	map->agg_id_tbl[agg_id] = idx;
1398 	return idx;
1399 }
1400 
1401 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1402 {
1403 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1404 
1405 	__clear_bit(idx, map->agg_idx_bmap);
1406 }
1407 
1408 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1409 {
1410 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1411 
1412 	return map->agg_id_tbl[agg_id];
1413 }
1414 
1415 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1416 			      struct rx_tpa_start_cmp *tpa_start,
1417 			      struct rx_tpa_start_cmp_ext *tpa_start1)
1418 {
1419 	tpa_info->cfa_code_valid = 1;
1420 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1421 	tpa_info->vlan_valid = 0;
1422 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1423 		tpa_info->vlan_valid = 1;
1424 		tpa_info->metadata =
1425 			le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1426 	}
1427 }
1428 
1429 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1430 				 struct rx_tpa_start_cmp *tpa_start,
1431 				 struct rx_tpa_start_cmp_ext *tpa_start1)
1432 {
1433 	tpa_info->vlan_valid = 0;
1434 	if (TPA_START_VLAN_VALID(tpa_start)) {
1435 		u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1436 		u32 vlan_proto = ETH_P_8021Q;
1437 
1438 		tpa_info->vlan_valid = 1;
1439 		if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1440 			vlan_proto = ETH_P_8021AD;
1441 		tpa_info->metadata = vlan_proto << 16 |
1442 				     TPA_START_METADATA0_TCI(tpa_start1);
1443 	}
1444 }
1445 
1446 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1447 			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1448 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1449 {
1450 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1451 	struct bnxt_tpa_info *tpa_info;
1452 	u16 cons, prod, agg_id;
1453 	struct rx_bd *prod_bd;
1454 	dma_addr_t mapping;
1455 
1456 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1457 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1458 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1459 	} else {
1460 		agg_id = TPA_START_AGG_ID(tpa_start);
1461 	}
1462 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1463 	prod = rxr->rx_prod;
1464 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1465 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1466 	tpa_info = &rxr->rx_tpa[agg_id];
1467 
1468 	if (unlikely(cons != rxr->rx_next_cons ||
1469 		     TPA_START_ERROR(tpa_start))) {
1470 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1471 			    cons, rxr->rx_next_cons,
1472 			    TPA_START_ERROR_CODE(tpa_start1));
1473 		bnxt_sched_reset_rxr(bp, rxr);
1474 		return;
1475 	}
1476 	prod_rx_buf->data = tpa_info->data;
1477 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1478 
1479 	mapping = tpa_info->mapping;
1480 	prod_rx_buf->mapping = mapping;
1481 
1482 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1483 
1484 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1485 
1486 	tpa_info->data = cons_rx_buf->data;
1487 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1488 	cons_rx_buf->data = NULL;
1489 	tpa_info->mapping = cons_rx_buf->mapping;
1490 
1491 	tpa_info->len =
1492 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1493 				RX_TPA_START_CMP_LEN_SHIFT;
1494 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1495 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1496 		tpa_info->gso_type = SKB_GSO_TCPV4;
1497 		if (TPA_START_IS_IPV6(tpa_start1))
1498 			tpa_info->gso_type = SKB_GSO_TCPV6;
1499 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1500 		else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1501 			 TPA_START_HASH_TYPE(tpa_start) == 3)
1502 			tpa_info->gso_type = SKB_GSO_TCPV6;
1503 		tpa_info->rss_hash =
1504 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1505 	} else {
1506 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1507 		tpa_info->gso_type = 0;
1508 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1509 	}
1510 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1511 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1512 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1513 		bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1514 	else
1515 		bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1516 	tpa_info->agg_count = 0;
1517 
1518 	rxr->rx_prod = NEXT_RX(prod);
1519 	cons = RING_RX(bp, NEXT_RX(cons));
1520 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1521 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1522 
1523 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1524 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1525 	cons_rx_buf->data = NULL;
1526 }
1527 
1528 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1529 {
1530 	if (agg_bufs)
1531 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1532 }
1533 
1534 #ifdef CONFIG_INET
1535 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1536 {
1537 	struct udphdr *uh = NULL;
1538 
1539 	if (ip_proto == htons(ETH_P_IP)) {
1540 		struct iphdr *iph = (struct iphdr *)skb->data;
1541 
1542 		if (iph->protocol == IPPROTO_UDP)
1543 			uh = (struct udphdr *)(iph + 1);
1544 	} else {
1545 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1546 
1547 		if (iph->nexthdr == IPPROTO_UDP)
1548 			uh = (struct udphdr *)(iph + 1);
1549 	}
1550 	if (uh) {
1551 		if (uh->check)
1552 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1553 		else
1554 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1555 	}
1556 }
1557 #endif
1558 
1559 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1560 					   int payload_off, int tcp_ts,
1561 					   struct sk_buff *skb)
1562 {
1563 #ifdef CONFIG_INET
1564 	struct tcphdr *th;
1565 	int len, nw_off;
1566 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1567 	u32 hdr_info = tpa_info->hdr_info;
1568 	bool loopback = false;
1569 
1570 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1571 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1572 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1573 
1574 	/* If the packet is an internal loopback packet, the offsets will
1575 	 * have an extra 4 bytes.
1576 	 */
1577 	if (inner_mac_off == 4) {
1578 		loopback = true;
1579 	} else if (inner_mac_off > 4) {
1580 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1581 					    ETH_HLEN - 2));
1582 
1583 		/* We only support inner iPv4/ipv6.  If we don't see the
1584 		 * correct protocol ID, it must be a loopback packet where
1585 		 * the offsets are off by 4.
1586 		 */
1587 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1588 			loopback = true;
1589 	}
1590 	if (loopback) {
1591 		/* internal loopback packet, subtract all offsets by 4 */
1592 		inner_ip_off -= 4;
1593 		inner_mac_off -= 4;
1594 		outer_ip_off -= 4;
1595 	}
1596 
1597 	nw_off = inner_ip_off - ETH_HLEN;
1598 	skb_set_network_header(skb, nw_off);
1599 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1600 		struct ipv6hdr *iph = ipv6_hdr(skb);
1601 
1602 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1603 		len = skb->len - skb_transport_offset(skb);
1604 		th = tcp_hdr(skb);
1605 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1606 	} else {
1607 		struct iphdr *iph = ip_hdr(skb);
1608 
1609 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1610 		len = skb->len - skb_transport_offset(skb);
1611 		th = tcp_hdr(skb);
1612 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1613 	}
1614 
1615 	if (inner_mac_off) { /* tunnel */
1616 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1617 					    ETH_HLEN - 2));
1618 
1619 		bnxt_gro_tunnel(skb, proto);
1620 	}
1621 #endif
1622 	return skb;
1623 }
1624 
1625 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1626 					   int payload_off, int tcp_ts,
1627 					   struct sk_buff *skb)
1628 {
1629 #ifdef CONFIG_INET
1630 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1631 	u32 hdr_info = tpa_info->hdr_info;
1632 	int iphdr_len, nw_off;
1633 
1634 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1635 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1636 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1637 
1638 	nw_off = inner_ip_off - ETH_HLEN;
1639 	skb_set_network_header(skb, nw_off);
1640 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1641 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1642 	skb_set_transport_header(skb, nw_off + iphdr_len);
1643 
1644 	if (inner_mac_off) { /* tunnel */
1645 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1646 					    ETH_HLEN - 2));
1647 
1648 		bnxt_gro_tunnel(skb, proto);
1649 	}
1650 #endif
1651 	return skb;
1652 }
1653 
1654 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1655 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1656 
1657 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1658 					   int payload_off, int tcp_ts,
1659 					   struct sk_buff *skb)
1660 {
1661 #ifdef CONFIG_INET
1662 	struct tcphdr *th;
1663 	int len, nw_off, tcp_opt_len = 0;
1664 
1665 	if (tcp_ts)
1666 		tcp_opt_len = 12;
1667 
1668 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1669 		struct iphdr *iph;
1670 
1671 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1672 			 ETH_HLEN;
1673 		skb_set_network_header(skb, nw_off);
1674 		iph = ip_hdr(skb);
1675 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1676 		len = skb->len - skb_transport_offset(skb);
1677 		th = tcp_hdr(skb);
1678 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1679 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1680 		struct ipv6hdr *iph;
1681 
1682 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1683 			 ETH_HLEN;
1684 		skb_set_network_header(skb, nw_off);
1685 		iph = ipv6_hdr(skb);
1686 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1687 		len = skb->len - skb_transport_offset(skb);
1688 		th = tcp_hdr(skb);
1689 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1690 	} else {
1691 		dev_kfree_skb_any(skb);
1692 		return NULL;
1693 	}
1694 
1695 	if (nw_off) /* tunnel */
1696 		bnxt_gro_tunnel(skb, skb->protocol);
1697 #endif
1698 	return skb;
1699 }
1700 
1701 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1702 					   struct bnxt_tpa_info *tpa_info,
1703 					   struct rx_tpa_end_cmp *tpa_end,
1704 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1705 					   struct sk_buff *skb)
1706 {
1707 #ifdef CONFIG_INET
1708 	int payload_off;
1709 	u16 segs;
1710 
1711 	segs = TPA_END_TPA_SEGS(tpa_end);
1712 	if (segs == 1)
1713 		return skb;
1714 
1715 	NAPI_GRO_CB(skb)->count = segs;
1716 	skb_shinfo(skb)->gso_size =
1717 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1718 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1719 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1720 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1721 	else
1722 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1723 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1724 	if (likely(skb))
1725 		tcp_gro_complete(skb);
1726 #endif
1727 	return skb;
1728 }
1729 
1730 /* Given the cfa_code of a received packet determine which
1731  * netdev (vf-rep or PF) the packet is destined to.
1732  */
1733 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1734 {
1735 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1736 
1737 	/* if vf-rep dev is NULL, the must belongs to the PF */
1738 	return dev ? dev : bp->dev;
1739 }
1740 
1741 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1742 					   struct bnxt_cp_ring_info *cpr,
1743 					   u32 *raw_cons,
1744 					   struct rx_tpa_end_cmp *tpa_end,
1745 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1746 					   u8 *event)
1747 {
1748 	struct bnxt_napi *bnapi = cpr->bnapi;
1749 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1750 	struct net_device *dev = bp->dev;
1751 	u8 *data_ptr, agg_bufs;
1752 	unsigned int len;
1753 	struct bnxt_tpa_info *tpa_info;
1754 	dma_addr_t mapping;
1755 	struct sk_buff *skb;
1756 	u16 idx = 0, agg_id;
1757 	void *data;
1758 	bool gro;
1759 
1760 	if (unlikely(bnapi->in_reset)) {
1761 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1762 
1763 		if (rc < 0)
1764 			return ERR_PTR(-EBUSY);
1765 		return NULL;
1766 	}
1767 
1768 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1769 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1770 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1771 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1772 		tpa_info = &rxr->rx_tpa[agg_id];
1773 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1774 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1775 				    agg_bufs, tpa_info->agg_count);
1776 			agg_bufs = tpa_info->agg_count;
1777 		}
1778 		tpa_info->agg_count = 0;
1779 		*event |= BNXT_AGG_EVENT;
1780 		bnxt_free_agg_idx(rxr, agg_id);
1781 		idx = agg_id;
1782 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1783 	} else {
1784 		agg_id = TPA_END_AGG_ID(tpa_end);
1785 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1786 		tpa_info = &rxr->rx_tpa[agg_id];
1787 		idx = RING_CMP(*raw_cons);
1788 		if (agg_bufs) {
1789 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1790 				return ERR_PTR(-EBUSY);
1791 
1792 			*event |= BNXT_AGG_EVENT;
1793 			idx = NEXT_CMP(idx);
1794 		}
1795 		gro = !!TPA_END_GRO(tpa_end);
1796 	}
1797 	data = tpa_info->data;
1798 	data_ptr = tpa_info->data_ptr;
1799 	prefetch(data_ptr);
1800 	len = tpa_info->len;
1801 	mapping = tpa_info->mapping;
1802 
1803 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1804 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1805 		if (agg_bufs > MAX_SKB_FRAGS)
1806 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1807 				    agg_bufs, (int)MAX_SKB_FRAGS);
1808 		return NULL;
1809 	}
1810 
1811 	if (len <= bp->rx_copy_thresh) {
1812 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1813 		if (!skb) {
1814 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1815 			cpr->sw_stats->rx.rx_oom_discards += 1;
1816 			return NULL;
1817 		}
1818 	} else {
1819 		u8 *new_data;
1820 		dma_addr_t new_mapping;
1821 
1822 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1823 		if (!new_data) {
1824 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1825 			cpr->sw_stats->rx.rx_oom_discards += 1;
1826 			return NULL;
1827 		}
1828 
1829 		tpa_info->data = new_data;
1830 		tpa_info->data_ptr = new_data + bp->rx_offset;
1831 		tpa_info->mapping = new_mapping;
1832 
1833 		skb = napi_build_skb(data, bp->rx_buf_size);
1834 		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1835 				       bp->rx_buf_use_size, bp->rx_dir,
1836 				       DMA_ATTR_WEAK_ORDERING);
1837 
1838 		if (!skb) {
1839 			skb_free_frag(data);
1840 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1841 			cpr->sw_stats->rx.rx_oom_discards += 1;
1842 			return NULL;
1843 		}
1844 		skb_reserve(skb, bp->rx_offset);
1845 		skb_put(skb, len);
1846 	}
1847 
1848 	if (agg_bufs) {
1849 		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1850 		if (!skb) {
1851 			/* Page reuse already handled by bnxt_rx_pages(). */
1852 			cpr->sw_stats->rx.rx_oom_discards += 1;
1853 			return NULL;
1854 		}
1855 	}
1856 
1857 	if (tpa_info->cfa_code_valid)
1858 		dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1859 	skb->protocol = eth_type_trans(skb, dev);
1860 
1861 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1862 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1863 
1864 	if (tpa_info->vlan_valid &&
1865 	    (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1866 		__be16 vlan_proto = htons(tpa_info->metadata >>
1867 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1868 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1869 
1870 		if (eth_type_vlan(vlan_proto)) {
1871 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1872 		} else {
1873 			dev_kfree_skb(skb);
1874 			return NULL;
1875 		}
1876 	}
1877 
1878 	skb_checksum_none_assert(skb);
1879 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1880 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1881 		skb->csum_level =
1882 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1883 	}
1884 
1885 	if (gro)
1886 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1887 
1888 	return skb;
1889 }
1890 
1891 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1892 			 struct rx_agg_cmp *rx_agg)
1893 {
1894 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1895 	struct bnxt_tpa_info *tpa_info;
1896 
1897 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1898 	tpa_info = &rxr->rx_tpa[agg_id];
1899 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1900 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1901 }
1902 
1903 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1904 			     struct sk_buff *skb)
1905 {
1906 	skb_mark_for_recycle(skb);
1907 
1908 	if (skb->dev != bp->dev) {
1909 		/* this packet belongs to a vf-rep */
1910 		bnxt_vf_rep_rx(bp, skb);
1911 		return;
1912 	}
1913 	skb_record_rx_queue(skb, bnapi->index);
1914 	napi_gro_receive(&bnapi->napi, skb);
1915 }
1916 
1917 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1918 			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1919 {
1920 	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1921 
1922 	if (BNXT_PTP_RX_TS_VALID(flags))
1923 		goto ts_valid;
1924 	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1925 		return false;
1926 
1927 ts_valid:
1928 	*cmpl_ts = ts;
1929 	return true;
1930 }
1931 
1932 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1933 				    struct rx_cmp *rxcmp,
1934 				    struct rx_cmp_ext *rxcmp1)
1935 {
1936 	__be16 vlan_proto;
1937 	u16 vtag;
1938 
1939 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1940 		__le32 flags2 = rxcmp1->rx_cmp_flags2;
1941 		u32 meta_data;
1942 
1943 		if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1944 			return skb;
1945 
1946 		meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1947 		vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1948 		vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1949 		if (eth_type_vlan(vlan_proto))
1950 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1951 		else
1952 			goto vlan_err;
1953 	} else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1954 		if (RX_CMP_VLAN_VALID(rxcmp)) {
1955 			u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1956 
1957 			if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1958 				vlan_proto = htons(ETH_P_8021Q);
1959 			else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1960 				vlan_proto = htons(ETH_P_8021AD);
1961 			else
1962 				goto vlan_err;
1963 			vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1964 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1965 		}
1966 	}
1967 	return skb;
1968 vlan_err:
1969 	dev_kfree_skb(skb);
1970 	return NULL;
1971 }
1972 
1973 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1974 					   struct rx_cmp *rxcmp)
1975 {
1976 	u8 ext_op;
1977 
1978 	ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
1979 	switch (ext_op) {
1980 	case EXT_OP_INNER_4:
1981 	case EXT_OP_OUTER_4:
1982 	case EXT_OP_INNFL_3:
1983 	case EXT_OP_OUTFL_3:
1984 		return PKT_HASH_TYPE_L4;
1985 	default:
1986 		return PKT_HASH_TYPE_L3;
1987 	}
1988 }
1989 
1990 /* returns the following:
1991  * 1       - 1 packet successfully received
1992  * 0       - successful TPA_START, packet not completed yet
1993  * -EBUSY  - completion ring does not have all the agg buffers yet
1994  * -ENOMEM - packet aborted due to out of memory
1995  * -EIO    - packet aborted due to hw error indicated in BD
1996  */
1997 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1998 		       u32 *raw_cons, u8 *event)
1999 {
2000 	struct bnxt_napi *bnapi = cpr->bnapi;
2001 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2002 	struct net_device *dev = bp->dev;
2003 	struct rx_cmp *rxcmp;
2004 	struct rx_cmp_ext *rxcmp1;
2005 	u32 tmp_raw_cons = *raw_cons;
2006 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2007 	struct bnxt_sw_rx_bd *rx_buf;
2008 	unsigned int len;
2009 	u8 *data_ptr, agg_bufs, cmp_type;
2010 	bool xdp_active = false;
2011 	dma_addr_t dma_addr;
2012 	struct sk_buff *skb;
2013 	struct xdp_buff xdp;
2014 	u32 flags, misc;
2015 	u32 cmpl_ts;
2016 	void *data;
2017 	int rc = 0;
2018 
2019 	rxcmp = (struct rx_cmp *)
2020 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2021 
2022 	cmp_type = RX_CMP_TYPE(rxcmp);
2023 
2024 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2025 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2026 		goto next_rx_no_prod_no_len;
2027 	}
2028 
2029 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2030 	cp_cons = RING_CMP(tmp_raw_cons);
2031 	rxcmp1 = (struct rx_cmp_ext *)
2032 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2033 
2034 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2035 		return -EBUSY;
2036 
2037 	/* The valid test of the entry must be done first before
2038 	 * reading any further.
2039 	 */
2040 	dma_rmb();
2041 	prod = rxr->rx_prod;
2042 
2043 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2044 	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2045 		bnxt_tpa_start(bp, rxr, cmp_type,
2046 			       (struct rx_tpa_start_cmp *)rxcmp,
2047 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
2048 
2049 		*event |= BNXT_RX_EVENT;
2050 		goto next_rx_no_prod_no_len;
2051 
2052 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2053 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2054 				   (struct rx_tpa_end_cmp *)rxcmp,
2055 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2056 
2057 		if (IS_ERR(skb))
2058 			return -EBUSY;
2059 
2060 		rc = -ENOMEM;
2061 		if (likely(skb)) {
2062 			bnxt_deliver_skb(bp, bnapi, skb);
2063 			rc = 1;
2064 		}
2065 		*event |= BNXT_RX_EVENT;
2066 		goto next_rx_no_prod_no_len;
2067 	}
2068 
2069 	cons = rxcmp->rx_cmp_opaque;
2070 	if (unlikely(cons != rxr->rx_next_cons)) {
2071 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2072 
2073 		/* 0xffff is forced error, don't print it */
2074 		if (rxr->rx_next_cons != 0xffff)
2075 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2076 				    cons, rxr->rx_next_cons);
2077 		bnxt_sched_reset_rxr(bp, rxr);
2078 		if (rc1)
2079 			return rc1;
2080 		goto next_rx_no_prod_no_len;
2081 	}
2082 	rx_buf = &rxr->rx_buf_ring[cons];
2083 	data = rx_buf->data;
2084 	data_ptr = rx_buf->data_ptr;
2085 	prefetch(data_ptr);
2086 
2087 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2088 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2089 
2090 	if (agg_bufs) {
2091 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2092 			return -EBUSY;
2093 
2094 		cp_cons = NEXT_CMP(cp_cons);
2095 		*event |= BNXT_AGG_EVENT;
2096 	}
2097 	*event |= BNXT_RX_EVENT;
2098 
2099 	rx_buf->data = NULL;
2100 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2101 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2102 
2103 		bnxt_reuse_rx_data(rxr, cons, data);
2104 		if (agg_bufs)
2105 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2106 					       false);
2107 
2108 		rc = -EIO;
2109 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2110 			bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2111 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2112 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2113 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
2114 						 rx_err);
2115 				bnxt_sched_reset_rxr(bp, rxr);
2116 			}
2117 		}
2118 		goto next_rx_no_len;
2119 	}
2120 
2121 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2122 	len = flags >> RX_CMP_LEN_SHIFT;
2123 	dma_addr = rx_buf->mapping;
2124 
2125 	if (bnxt_xdp_attached(bp, rxr)) {
2126 		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2127 		if (agg_bufs) {
2128 			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2129 							     cp_cons, agg_bufs,
2130 							     false);
2131 			if (!frag_len)
2132 				goto oom_next_rx;
2133 		}
2134 		xdp_active = true;
2135 	}
2136 
2137 	if (xdp_active) {
2138 		if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2139 			rc = 1;
2140 			goto next_rx;
2141 		}
2142 	}
2143 
2144 	if (len <= bp->rx_copy_thresh) {
2145 		if (!xdp_active)
2146 			skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2147 		else
2148 			skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2149 		bnxt_reuse_rx_data(rxr, cons, data);
2150 		if (!skb) {
2151 			if (agg_bufs) {
2152 				if (!xdp_active)
2153 					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2154 							       agg_bufs, false);
2155 				else
2156 					bnxt_xdp_buff_frags_free(rxr, &xdp);
2157 			}
2158 			goto oom_next_rx;
2159 		}
2160 	} else {
2161 		u32 payload;
2162 
2163 		if (rx_buf->data_ptr == data_ptr)
2164 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
2165 		else
2166 			payload = 0;
2167 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2168 				      payload | len);
2169 		if (!skb)
2170 			goto oom_next_rx;
2171 	}
2172 
2173 	if (agg_bufs) {
2174 		if (!xdp_active) {
2175 			skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2176 			if (!skb)
2177 				goto oom_next_rx;
2178 		} else {
2179 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2180 			if (!skb) {
2181 				/* we should be able to free the old skb here */
2182 				bnxt_xdp_buff_frags_free(rxr, &xdp);
2183 				goto oom_next_rx;
2184 			}
2185 		}
2186 	}
2187 
2188 	if (RX_CMP_HASH_VALID(rxcmp)) {
2189 		enum pkt_hash_types type;
2190 
2191 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2192 			type = bnxt_rss_ext_op(bp, rxcmp);
2193 		} else {
2194 			u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2195 
2196 			/* RSS profiles 1 and 3 with extract code 0 for inner
2197 			 * 4-tuple
2198 			 */
2199 			if (hash_type != 1 && hash_type != 3)
2200 				type = PKT_HASH_TYPE_L3;
2201 			else
2202 				type = PKT_HASH_TYPE_L4;
2203 		}
2204 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2205 	}
2206 
2207 	if (cmp_type == CMP_TYPE_RX_L2_CMP)
2208 		dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2209 	skb->protocol = eth_type_trans(skb, dev);
2210 
2211 	if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2212 		skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2213 		if (!skb)
2214 			goto next_rx;
2215 	}
2216 
2217 	skb_checksum_none_assert(skb);
2218 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
2219 		if (dev->features & NETIF_F_RXCSUM) {
2220 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2221 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2222 		}
2223 	} else {
2224 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2225 			if (dev->features & NETIF_F_RXCSUM)
2226 				bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2227 		}
2228 	}
2229 
2230 	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2231 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2232 			u64 ns, ts;
2233 
2234 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2235 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2236 
2237 				spin_lock_bh(&ptp->ptp_lock);
2238 				ns = timecounter_cyc2time(&ptp->tc, ts);
2239 				spin_unlock_bh(&ptp->ptp_lock);
2240 				memset(skb_hwtstamps(skb), 0,
2241 				       sizeof(*skb_hwtstamps(skb)));
2242 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2243 			}
2244 		}
2245 	}
2246 	bnxt_deliver_skb(bp, bnapi, skb);
2247 	rc = 1;
2248 
2249 next_rx:
2250 	cpr->rx_packets += 1;
2251 	cpr->rx_bytes += len;
2252 
2253 next_rx_no_len:
2254 	rxr->rx_prod = NEXT_RX(prod);
2255 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2256 
2257 next_rx_no_prod_no_len:
2258 	*raw_cons = tmp_raw_cons;
2259 
2260 	return rc;
2261 
2262 oom_next_rx:
2263 	cpr->sw_stats->rx.rx_oom_discards += 1;
2264 	rc = -ENOMEM;
2265 	goto next_rx;
2266 }
2267 
2268 /* In netpoll mode, if we are using a combined completion ring, we need to
2269  * discard the rx packets and recycle the buffers.
2270  */
2271 static int bnxt_force_rx_discard(struct bnxt *bp,
2272 				 struct bnxt_cp_ring_info *cpr,
2273 				 u32 *raw_cons, u8 *event)
2274 {
2275 	u32 tmp_raw_cons = *raw_cons;
2276 	struct rx_cmp_ext *rxcmp1;
2277 	struct rx_cmp *rxcmp;
2278 	u16 cp_cons;
2279 	u8 cmp_type;
2280 	int rc;
2281 
2282 	cp_cons = RING_CMP(tmp_raw_cons);
2283 	rxcmp = (struct rx_cmp *)
2284 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2285 
2286 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2287 	cp_cons = RING_CMP(tmp_raw_cons);
2288 	rxcmp1 = (struct rx_cmp_ext *)
2289 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2290 
2291 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2292 		return -EBUSY;
2293 
2294 	/* The valid test of the entry must be done first before
2295 	 * reading any further.
2296 	 */
2297 	dma_rmb();
2298 	cmp_type = RX_CMP_TYPE(rxcmp);
2299 	if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2300 	    cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2301 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2302 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2303 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2304 		struct rx_tpa_end_cmp_ext *tpa_end1;
2305 
2306 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2307 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2308 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2309 	}
2310 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2311 	if (rc && rc != -EBUSY)
2312 		cpr->sw_stats->rx.rx_netpoll_discards += 1;
2313 	return rc;
2314 }
2315 
2316 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2317 {
2318 	struct bnxt_fw_health *fw_health = bp->fw_health;
2319 	u32 reg = fw_health->regs[reg_idx];
2320 	u32 reg_type, reg_off, val = 0;
2321 
2322 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2323 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2324 	switch (reg_type) {
2325 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2326 		pci_read_config_dword(bp->pdev, reg_off, &val);
2327 		break;
2328 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2329 		reg_off = fw_health->mapped_regs[reg_idx];
2330 		fallthrough;
2331 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2332 		val = readl(bp->bar0 + reg_off);
2333 		break;
2334 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2335 		val = readl(bp->bar1 + reg_off);
2336 		break;
2337 	}
2338 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2339 		val &= fw_health->fw_reset_inprog_reg_mask;
2340 	return val;
2341 }
2342 
2343 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2344 {
2345 	int i;
2346 
2347 	for (i = 0; i < bp->rx_nr_rings; i++) {
2348 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2349 		struct bnxt_ring_grp_info *grp_info;
2350 
2351 		grp_info = &bp->grp_info[grp_idx];
2352 		if (grp_info->agg_fw_ring_id == ring_id)
2353 			return grp_idx;
2354 	}
2355 	return INVALID_HW_RING_ID;
2356 }
2357 
2358 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2359 {
2360 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2361 
2362 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2363 		return link_info->force_link_speed2;
2364 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2365 		return link_info->force_pam4_link_speed;
2366 	return link_info->force_link_speed;
2367 }
2368 
2369 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2370 {
2371 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2372 
2373 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2374 		link_info->req_link_speed = link_info->force_link_speed2;
2375 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2376 		switch (link_info->req_link_speed) {
2377 		case BNXT_LINK_SPEED_50GB_PAM4:
2378 		case BNXT_LINK_SPEED_100GB_PAM4:
2379 		case BNXT_LINK_SPEED_200GB_PAM4:
2380 		case BNXT_LINK_SPEED_400GB_PAM4:
2381 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2382 			break;
2383 		case BNXT_LINK_SPEED_100GB_PAM4_112:
2384 		case BNXT_LINK_SPEED_200GB_PAM4_112:
2385 		case BNXT_LINK_SPEED_400GB_PAM4_112:
2386 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2387 			break;
2388 		default:
2389 			link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2390 		}
2391 		return;
2392 	}
2393 	link_info->req_link_speed = link_info->force_link_speed;
2394 	link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2395 	if (link_info->force_pam4_link_speed) {
2396 		link_info->req_link_speed = link_info->force_pam4_link_speed;
2397 		link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2398 	}
2399 }
2400 
2401 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2402 {
2403 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2404 
2405 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2406 		link_info->advertising = link_info->auto_link_speeds2;
2407 		return;
2408 	}
2409 	link_info->advertising = link_info->auto_link_speeds;
2410 	link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2411 }
2412 
2413 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2414 {
2415 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2416 
2417 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2418 		if (link_info->req_link_speed != link_info->force_link_speed2)
2419 			return true;
2420 		return false;
2421 	}
2422 	if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2423 	    link_info->req_link_speed != link_info->force_link_speed)
2424 		return true;
2425 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2426 	    link_info->req_link_speed != link_info->force_pam4_link_speed)
2427 		return true;
2428 	return false;
2429 }
2430 
2431 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2432 {
2433 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2434 
2435 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2436 		if (link_info->advertising != link_info->auto_link_speeds2)
2437 			return true;
2438 		return false;
2439 	}
2440 	if (link_info->advertising != link_info->auto_link_speeds ||
2441 	    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2442 		return true;
2443 	return false;
2444 }
2445 
2446 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)				\
2447 	((data2) &							\
2448 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2449 
2450 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)			\
2451 	(((data2) &							\
2452 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2453 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2454 
2455 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)			\
2456 	((data1) &							\
2457 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2458 
2459 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)		\
2460 	(((data1) &							\
2461 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2462 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2463 
2464 /* Return true if the workqueue has to be scheduled */
2465 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2466 {
2467 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2468 
2469 	switch (err_type) {
2470 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2471 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2472 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2473 		break;
2474 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2475 		netdev_warn(bp->dev, "Pause Storm detected!\n");
2476 		break;
2477 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2478 		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2479 		break;
2480 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2481 		u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2482 		char *threshold_type;
2483 		bool notify = false;
2484 		char *dir_str;
2485 
2486 		switch (type) {
2487 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2488 			threshold_type = "warning";
2489 			break;
2490 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2491 			threshold_type = "critical";
2492 			break;
2493 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2494 			threshold_type = "fatal";
2495 			break;
2496 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2497 			threshold_type = "shutdown";
2498 			break;
2499 		default:
2500 			netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2501 			return false;
2502 		}
2503 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2504 			dir_str = "above";
2505 			notify = true;
2506 		} else {
2507 			dir_str = "below";
2508 		}
2509 		netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2510 			    dir_str, threshold_type);
2511 		netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2512 			    BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2513 			    BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2514 		if (notify) {
2515 			bp->thermal_threshold_type = type;
2516 			set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2517 			return true;
2518 		}
2519 		return false;
2520 	}
2521 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2522 		netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2523 		break;
2524 	default:
2525 		netdev_err(bp->dev, "FW reported unknown error type %u\n",
2526 			   err_type);
2527 		break;
2528 	}
2529 	return false;
2530 }
2531 
2532 #define BNXT_GET_EVENT_PORT(data)	\
2533 	((data) &			\
2534 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2535 
2536 #define BNXT_EVENT_RING_TYPE(data2)	\
2537 	((data2) &			\
2538 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2539 
2540 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2541 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2542 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2543 
2544 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
2545 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2546 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2547 
2548 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
2549 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2550 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2551 
2552 #define BNXT_PHC_BITS	48
2553 
2554 static int bnxt_async_event_process(struct bnxt *bp,
2555 				    struct hwrm_async_event_cmpl *cmpl)
2556 {
2557 	u16 event_id = le16_to_cpu(cmpl->event_id);
2558 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2559 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2560 
2561 	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2562 		   event_id, data1, data2);
2563 
2564 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2565 	switch (event_id) {
2566 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2567 		struct bnxt_link_info *link_info = &bp->link_info;
2568 
2569 		if (BNXT_VF(bp))
2570 			goto async_event_process_exit;
2571 
2572 		/* print unsupported speed warning in forced speed mode only */
2573 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2574 		    (data1 & 0x20000)) {
2575 			u16 fw_speed = bnxt_get_force_speed(link_info);
2576 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2577 
2578 			if (speed != SPEED_UNKNOWN)
2579 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2580 					    speed);
2581 		}
2582 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2583 	}
2584 		fallthrough;
2585 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2586 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2587 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2588 		fallthrough;
2589 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2590 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2591 		break;
2592 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2593 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2594 		break;
2595 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2596 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2597 
2598 		if (BNXT_VF(bp))
2599 			break;
2600 
2601 		if (bp->pf.port_id != port_id)
2602 			break;
2603 
2604 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2605 		break;
2606 	}
2607 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2608 		if (BNXT_PF(bp))
2609 			goto async_event_process_exit;
2610 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2611 		break;
2612 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2613 		char *type_str = "Solicited";
2614 
2615 		if (!bp->fw_health)
2616 			goto async_event_process_exit;
2617 
2618 		bp->fw_reset_timestamp = jiffies;
2619 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2620 		if (!bp->fw_reset_min_dsecs)
2621 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2622 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2623 		if (!bp->fw_reset_max_dsecs)
2624 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2625 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2626 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2627 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2628 			type_str = "Fatal";
2629 			bp->fw_health->fatalities++;
2630 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2631 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2632 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2633 			type_str = "Non-fatal";
2634 			bp->fw_health->survivals++;
2635 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2636 		}
2637 		netif_warn(bp, hw, bp->dev,
2638 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2639 			   type_str, data1, data2,
2640 			   bp->fw_reset_min_dsecs * 100,
2641 			   bp->fw_reset_max_dsecs * 100);
2642 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2643 		break;
2644 	}
2645 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2646 		struct bnxt_fw_health *fw_health = bp->fw_health;
2647 		char *status_desc = "healthy";
2648 		u32 status;
2649 
2650 		if (!fw_health)
2651 			goto async_event_process_exit;
2652 
2653 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2654 			fw_health->enabled = false;
2655 			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2656 			break;
2657 		}
2658 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2659 		fw_health->tmr_multiplier =
2660 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2661 				     bp->current_interval * 10);
2662 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2663 		if (!fw_health->enabled)
2664 			fw_health->last_fw_heartbeat =
2665 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2666 		fw_health->last_fw_reset_cnt =
2667 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2668 		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2669 		if (status != BNXT_FW_STATUS_HEALTHY)
2670 			status_desc = "unhealthy";
2671 		netif_info(bp, drv, bp->dev,
2672 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2673 			   fw_health->primary ? "primary" : "backup", status,
2674 			   status_desc, fw_health->last_fw_reset_cnt);
2675 		if (!fw_health->enabled) {
2676 			/* Make sure tmr_counter is set and visible to
2677 			 * bnxt_health_check() before setting enabled to true.
2678 			 */
2679 			smp_wmb();
2680 			fw_health->enabled = true;
2681 		}
2682 		goto async_event_process_exit;
2683 	}
2684 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2685 		netif_notice(bp, hw, bp->dev,
2686 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2687 			     data1, data2);
2688 		goto async_event_process_exit;
2689 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2690 		struct bnxt_rx_ring_info *rxr;
2691 		u16 grp_idx;
2692 
2693 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2694 			goto async_event_process_exit;
2695 
2696 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2697 			    BNXT_EVENT_RING_TYPE(data2), data1);
2698 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2699 			goto async_event_process_exit;
2700 
2701 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2702 		if (grp_idx == INVALID_HW_RING_ID) {
2703 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2704 				    data1);
2705 			goto async_event_process_exit;
2706 		}
2707 		rxr = bp->bnapi[grp_idx]->rx_ring;
2708 		bnxt_sched_reset_rxr(bp, rxr);
2709 		goto async_event_process_exit;
2710 	}
2711 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2712 		struct bnxt_fw_health *fw_health = bp->fw_health;
2713 
2714 		netif_notice(bp, hw, bp->dev,
2715 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2716 			     data1, data2);
2717 		if (fw_health) {
2718 			fw_health->echo_req_data1 = data1;
2719 			fw_health->echo_req_data2 = data2;
2720 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2721 			break;
2722 		}
2723 		goto async_event_process_exit;
2724 	}
2725 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2726 		bnxt_ptp_pps_event(bp, data1, data2);
2727 		goto async_event_process_exit;
2728 	}
2729 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2730 		if (bnxt_event_error_report(bp, data1, data2))
2731 			break;
2732 		goto async_event_process_exit;
2733 	}
2734 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2735 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2736 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2737 			if (BNXT_PTP_USE_RTC(bp)) {
2738 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2739 				u64 ns;
2740 
2741 				if (!ptp)
2742 					goto async_event_process_exit;
2743 
2744 				spin_lock_bh(&ptp->ptp_lock);
2745 				bnxt_ptp_update_current_time(bp);
2746 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2747 				       BNXT_PHC_BITS) | ptp->current_time);
2748 				bnxt_ptp_rtc_timecounter_init(ptp, ns);
2749 				spin_unlock_bh(&ptp->ptp_lock);
2750 			}
2751 			break;
2752 		}
2753 		goto async_event_process_exit;
2754 	}
2755 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2756 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2757 
2758 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2759 		goto async_event_process_exit;
2760 	}
2761 	default:
2762 		goto async_event_process_exit;
2763 	}
2764 	__bnxt_queue_sp_work(bp);
2765 async_event_process_exit:
2766 	return 0;
2767 }
2768 
2769 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2770 {
2771 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2772 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2773 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2774 				(struct hwrm_fwd_req_cmpl *)txcmp;
2775 
2776 	switch (cmpl_type) {
2777 	case CMPL_BASE_TYPE_HWRM_DONE:
2778 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2779 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2780 		break;
2781 
2782 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2783 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2784 
2785 		if ((vf_id < bp->pf.first_vf_id) ||
2786 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2787 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2788 				   vf_id);
2789 			return -EINVAL;
2790 		}
2791 
2792 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2793 		bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2794 		break;
2795 
2796 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2797 		bnxt_async_event_process(bp,
2798 					 (struct hwrm_async_event_cmpl *)txcmp);
2799 		break;
2800 
2801 	default:
2802 		break;
2803 	}
2804 
2805 	return 0;
2806 }
2807 
2808 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2809 {
2810 	struct bnxt_napi *bnapi = dev_instance;
2811 	struct bnxt *bp = bnapi->bp;
2812 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2813 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2814 
2815 	cpr->event_ctr++;
2816 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2817 	napi_schedule(&bnapi->napi);
2818 	return IRQ_HANDLED;
2819 }
2820 
2821 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2822 {
2823 	u32 raw_cons = cpr->cp_raw_cons;
2824 	u16 cons = RING_CMP(raw_cons);
2825 	struct tx_cmp *txcmp;
2826 
2827 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2828 
2829 	return TX_CMP_VALID(txcmp, raw_cons);
2830 }
2831 
2832 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2833 {
2834 	struct bnxt_napi *bnapi = dev_instance;
2835 	struct bnxt *bp = bnapi->bp;
2836 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2837 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2838 	u32 int_status;
2839 
2840 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2841 
2842 	if (!bnxt_has_work(bp, cpr)) {
2843 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2844 		/* return if erroneous interrupt */
2845 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2846 			return IRQ_NONE;
2847 	}
2848 
2849 	/* disable ring IRQ */
2850 	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2851 
2852 	/* Return here if interrupt is shared and is disabled. */
2853 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2854 		return IRQ_HANDLED;
2855 
2856 	napi_schedule(&bnapi->napi);
2857 	return IRQ_HANDLED;
2858 }
2859 
2860 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2861 			    int budget)
2862 {
2863 	struct bnxt_napi *bnapi = cpr->bnapi;
2864 	u32 raw_cons = cpr->cp_raw_cons;
2865 	u32 cons;
2866 	int rx_pkts = 0;
2867 	u8 event = 0;
2868 	struct tx_cmp *txcmp;
2869 
2870 	cpr->has_more_work = 0;
2871 	cpr->had_work_done = 1;
2872 	while (1) {
2873 		u8 cmp_type;
2874 		int rc;
2875 
2876 		cons = RING_CMP(raw_cons);
2877 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2878 
2879 		if (!TX_CMP_VALID(txcmp, raw_cons))
2880 			break;
2881 
2882 		/* The valid test of the entry must be done first before
2883 		 * reading any further.
2884 		 */
2885 		dma_rmb();
2886 		cmp_type = TX_CMP_TYPE(txcmp);
2887 		if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2888 		    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2889 			u32 opaque = txcmp->tx_cmp_opaque;
2890 			struct bnxt_tx_ring_info *txr;
2891 			u16 tx_freed;
2892 
2893 			txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2894 			event |= BNXT_TX_CMP_EVENT;
2895 			if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2896 				txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2897 			else
2898 				txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2899 			tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2900 				   bp->tx_ring_mask;
2901 			/* return full budget so NAPI will complete. */
2902 			if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2903 				rx_pkts = budget;
2904 				raw_cons = NEXT_RAW_CMP(raw_cons);
2905 				if (budget)
2906 					cpr->has_more_work = 1;
2907 				break;
2908 			}
2909 		} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2910 			   cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2911 			if (likely(budget))
2912 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2913 			else
2914 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2915 							   &event);
2916 			if (likely(rc >= 0))
2917 				rx_pkts += rc;
2918 			/* Increment rx_pkts when rc is -ENOMEM to count towards
2919 			 * the NAPI budget.  Otherwise, we may potentially loop
2920 			 * here forever if we consistently cannot allocate
2921 			 * buffers.
2922 			 */
2923 			else if (rc == -ENOMEM && budget)
2924 				rx_pkts++;
2925 			else if (rc == -EBUSY)	/* partial completion */
2926 				break;
2927 		} else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2928 				    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2929 				    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
2930 			bnxt_hwrm_handler(bp, txcmp);
2931 		}
2932 		raw_cons = NEXT_RAW_CMP(raw_cons);
2933 
2934 		if (rx_pkts && rx_pkts == budget) {
2935 			cpr->has_more_work = 1;
2936 			break;
2937 		}
2938 	}
2939 
2940 	if (event & BNXT_REDIRECT_EVENT)
2941 		xdp_do_flush();
2942 
2943 	if (event & BNXT_TX_EVENT) {
2944 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
2945 		u16 prod = txr->tx_prod;
2946 
2947 		/* Sync BD data before updating doorbell */
2948 		wmb();
2949 
2950 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2951 	}
2952 
2953 	cpr->cp_raw_cons = raw_cons;
2954 	bnapi->events |= event;
2955 	return rx_pkts;
2956 }
2957 
2958 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2959 				  int budget)
2960 {
2961 	if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
2962 		bnapi->tx_int(bp, bnapi, budget);
2963 
2964 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2965 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2966 
2967 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2968 	}
2969 	if (bnapi->events & BNXT_AGG_EVENT) {
2970 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2971 
2972 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2973 	}
2974 	bnapi->events &= BNXT_TX_CMP_EVENT;
2975 }
2976 
2977 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2978 			  int budget)
2979 {
2980 	struct bnxt_napi *bnapi = cpr->bnapi;
2981 	int rx_pkts;
2982 
2983 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2984 
2985 	/* ACK completion ring before freeing tx ring and producing new
2986 	 * buffers in rx/agg rings to prevent overflowing the completion
2987 	 * ring.
2988 	 */
2989 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2990 
2991 	__bnxt_poll_work_done(bp, bnapi, budget);
2992 	return rx_pkts;
2993 }
2994 
2995 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2996 {
2997 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2998 	struct bnxt *bp = bnapi->bp;
2999 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3000 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3001 	struct tx_cmp *txcmp;
3002 	struct rx_cmp_ext *rxcmp1;
3003 	u32 cp_cons, tmp_raw_cons;
3004 	u32 raw_cons = cpr->cp_raw_cons;
3005 	bool flush_xdp = false;
3006 	u32 rx_pkts = 0;
3007 	u8 event = 0;
3008 
3009 	while (1) {
3010 		int rc;
3011 
3012 		cp_cons = RING_CMP(raw_cons);
3013 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3014 
3015 		if (!TX_CMP_VALID(txcmp, raw_cons))
3016 			break;
3017 
3018 		/* The valid test of the entry must be done first before
3019 		 * reading any further.
3020 		 */
3021 		dma_rmb();
3022 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3023 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3024 			cp_cons = RING_CMP(tmp_raw_cons);
3025 			rxcmp1 = (struct rx_cmp_ext *)
3026 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3027 
3028 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3029 				break;
3030 
3031 			/* force an error to recycle the buffer */
3032 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3033 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3034 
3035 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3036 			if (likely(rc == -EIO) && budget)
3037 				rx_pkts++;
3038 			else if (rc == -EBUSY)	/* partial completion */
3039 				break;
3040 			if (event & BNXT_REDIRECT_EVENT)
3041 				flush_xdp = true;
3042 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
3043 				    CMPL_BASE_TYPE_HWRM_DONE)) {
3044 			bnxt_hwrm_handler(bp, txcmp);
3045 		} else {
3046 			netdev_err(bp->dev,
3047 				   "Invalid completion received on special ring\n");
3048 		}
3049 		raw_cons = NEXT_RAW_CMP(raw_cons);
3050 
3051 		if (rx_pkts == budget)
3052 			break;
3053 	}
3054 
3055 	cpr->cp_raw_cons = raw_cons;
3056 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3057 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3058 
3059 	if (event & BNXT_AGG_EVENT)
3060 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3061 	if (flush_xdp)
3062 		xdp_do_flush();
3063 
3064 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3065 		napi_complete_done(napi, rx_pkts);
3066 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3067 	}
3068 	return rx_pkts;
3069 }
3070 
3071 static int bnxt_poll(struct napi_struct *napi, int budget)
3072 {
3073 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3074 	struct bnxt *bp = bnapi->bp;
3075 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3076 	int work_done = 0;
3077 
3078 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3079 		napi_complete(napi);
3080 		return 0;
3081 	}
3082 	while (1) {
3083 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3084 
3085 		if (work_done >= budget) {
3086 			if (!budget)
3087 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3088 			break;
3089 		}
3090 
3091 		if (!bnxt_has_work(bp, cpr)) {
3092 			if (napi_complete_done(napi, work_done))
3093 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3094 			break;
3095 		}
3096 	}
3097 	if (bp->flags & BNXT_FLAG_DIM) {
3098 		struct dim_sample dim_sample = {};
3099 
3100 		dim_update_sample(cpr->event_ctr,
3101 				  cpr->rx_packets,
3102 				  cpr->rx_bytes,
3103 				  &dim_sample);
3104 		net_dim(&cpr->dim, dim_sample);
3105 	}
3106 	return work_done;
3107 }
3108 
3109 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3110 {
3111 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3112 	int i, work_done = 0;
3113 
3114 	for (i = 0; i < cpr->cp_ring_count; i++) {
3115 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3116 
3117 		if (cpr2->had_nqe_notify) {
3118 			work_done += __bnxt_poll_work(bp, cpr2,
3119 						      budget - work_done);
3120 			cpr->has_more_work |= cpr2->has_more_work;
3121 		}
3122 	}
3123 	return work_done;
3124 }
3125 
3126 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3127 				 u64 dbr_type, int budget)
3128 {
3129 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3130 	int i;
3131 
3132 	for (i = 0; i < cpr->cp_ring_count; i++) {
3133 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3134 		struct bnxt_db_info *db;
3135 
3136 		if (cpr2->had_work_done) {
3137 			u32 tgl = 0;
3138 
3139 			if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3140 				cpr2->had_nqe_notify = 0;
3141 				tgl = cpr2->toggle;
3142 			}
3143 			db = &cpr2->cp_db;
3144 			bnxt_writeq(bp,
3145 				    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3146 				    DB_RING_IDX(db, cpr2->cp_raw_cons),
3147 				    db->doorbell);
3148 			cpr2->had_work_done = 0;
3149 		}
3150 	}
3151 	__bnxt_poll_work_done(bp, bnapi, budget);
3152 }
3153 
3154 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3155 {
3156 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3157 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3158 	struct bnxt_cp_ring_info *cpr_rx;
3159 	u32 raw_cons = cpr->cp_raw_cons;
3160 	struct bnxt *bp = bnapi->bp;
3161 	struct nqe_cn *nqcmp;
3162 	int work_done = 0;
3163 	u32 cons;
3164 
3165 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3166 		napi_complete(napi);
3167 		return 0;
3168 	}
3169 	if (cpr->has_more_work) {
3170 		cpr->has_more_work = 0;
3171 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3172 	}
3173 	while (1) {
3174 		u16 type;
3175 
3176 		cons = RING_CMP(raw_cons);
3177 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3178 
3179 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3180 			if (cpr->has_more_work)
3181 				break;
3182 
3183 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3184 					     budget);
3185 			cpr->cp_raw_cons = raw_cons;
3186 			if (napi_complete_done(napi, work_done))
3187 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3188 						  cpr->cp_raw_cons);
3189 			goto poll_done;
3190 		}
3191 
3192 		/* The valid test of the entry must be done first before
3193 		 * reading any further.
3194 		 */
3195 		dma_rmb();
3196 
3197 		type = le16_to_cpu(nqcmp->type);
3198 		if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3199 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3200 			u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3201 			struct bnxt_cp_ring_info *cpr2;
3202 
3203 			/* No more budget for RX work */
3204 			if (budget && work_done >= budget &&
3205 			    cq_type == BNXT_NQ_HDL_TYPE_RX)
3206 				break;
3207 
3208 			idx = BNXT_NQ_HDL_IDX(idx);
3209 			cpr2 = &cpr->cp_ring_arr[idx];
3210 			cpr2->had_nqe_notify = 1;
3211 			cpr2->toggle = NQE_CN_TOGGLE(type);
3212 			work_done += __bnxt_poll_work(bp, cpr2,
3213 						      budget - work_done);
3214 			cpr->has_more_work |= cpr2->has_more_work;
3215 		} else {
3216 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3217 		}
3218 		raw_cons = NEXT_RAW_CMP(raw_cons);
3219 	}
3220 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3221 	if (raw_cons != cpr->cp_raw_cons) {
3222 		cpr->cp_raw_cons = raw_cons;
3223 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3224 	}
3225 poll_done:
3226 	cpr_rx = &cpr->cp_ring_arr[0];
3227 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3228 	    (bp->flags & BNXT_FLAG_DIM)) {
3229 		struct dim_sample dim_sample = {};
3230 
3231 		dim_update_sample(cpr->event_ctr,
3232 				  cpr_rx->rx_packets,
3233 				  cpr_rx->rx_bytes,
3234 				  &dim_sample);
3235 		net_dim(&cpr->dim, dim_sample);
3236 	}
3237 	return work_done;
3238 }
3239 
3240 static void bnxt_free_tx_skbs(struct bnxt *bp)
3241 {
3242 	int i, max_idx;
3243 	struct pci_dev *pdev = bp->pdev;
3244 
3245 	if (!bp->tx_ring)
3246 		return;
3247 
3248 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3249 	for (i = 0; i < bp->tx_nr_rings; i++) {
3250 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3251 		int j;
3252 
3253 		if (!txr->tx_buf_ring)
3254 			continue;
3255 
3256 		for (j = 0; j < max_idx;) {
3257 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3258 			struct sk_buff *skb;
3259 			int k, last;
3260 
3261 			if (i < bp->tx_nr_rings_xdp &&
3262 			    tx_buf->action == XDP_REDIRECT) {
3263 				dma_unmap_single(&pdev->dev,
3264 					dma_unmap_addr(tx_buf, mapping),
3265 					dma_unmap_len(tx_buf, len),
3266 					DMA_TO_DEVICE);
3267 				xdp_return_frame(tx_buf->xdpf);
3268 				tx_buf->action = 0;
3269 				tx_buf->xdpf = NULL;
3270 				j++;
3271 				continue;
3272 			}
3273 
3274 			skb = tx_buf->skb;
3275 			if (!skb) {
3276 				j++;
3277 				continue;
3278 			}
3279 
3280 			tx_buf->skb = NULL;
3281 
3282 			if (tx_buf->is_push) {
3283 				dev_kfree_skb(skb);
3284 				j += 2;
3285 				continue;
3286 			}
3287 
3288 			dma_unmap_single(&pdev->dev,
3289 					 dma_unmap_addr(tx_buf, mapping),
3290 					 skb_headlen(skb),
3291 					 DMA_TO_DEVICE);
3292 
3293 			last = tx_buf->nr_frags;
3294 			j += 2;
3295 			for (k = 0; k < last; k++, j++) {
3296 				int ring_idx = j & bp->tx_ring_mask;
3297 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3298 
3299 				tx_buf = &txr->tx_buf_ring[ring_idx];
3300 				dma_unmap_page(
3301 					&pdev->dev,
3302 					dma_unmap_addr(tx_buf, mapping),
3303 					skb_frag_size(frag), DMA_TO_DEVICE);
3304 			}
3305 			dev_kfree_skb(skb);
3306 		}
3307 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3308 	}
3309 }
3310 
3311 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3312 {
3313 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3314 	struct pci_dev *pdev = bp->pdev;
3315 	struct bnxt_tpa_idx_map *map;
3316 	int i, max_idx, max_agg_idx;
3317 
3318 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3319 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3320 	if (!rxr->rx_tpa)
3321 		goto skip_rx_tpa_free;
3322 
3323 	for (i = 0; i < bp->max_tpa; i++) {
3324 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3325 		u8 *data = tpa_info->data;
3326 
3327 		if (!data)
3328 			continue;
3329 
3330 		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
3331 				       bp->rx_buf_use_size, bp->rx_dir,
3332 				       DMA_ATTR_WEAK_ORDERING);
3333 
3334 		tpa_info->data = NULL;
3335 
3336 		skb_free_frag(data);
3337 	}
3338 
3339 skip_rx_tpa_free:
3340 	if (!rxr->rx_buf_ring)
3341 		goto skip_rx_buf_free;
3342 
3343 	for (i = 0; i < max_idx; i++) {
3344 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3345 		dma_addr_t mapping = rx_buf->mapping;
3346 		void *data = rx_buf->data;
3347 
3348 		if (!data)
3349 			continue;
3350 
3351 		rx_buf->data = NULL;
3352 		if (BNXT_RX_PAGE_MODE(bp)) {
3353 			page_pool_recycle_direct(rxr->page_pool, data);
3354 		} else {
3355 			dma_unmap_single_attrs(&pdev->dev, mapping,
3356 					       bp->rx_buf_use_size, bp->rx_dir,
3357 					       DMA_ATTR_WEAK_ORDERING);
3358 			skb_free_frag(data);
3359 		}
3360 	}
3361 
3362 skip_rx_buf_free:
3363 	if (!rxr->rx_agg_ring)
3364 		goto skip_rx_agg_free;
3365 
3366 	for (i = 0; i < max_agg_idx; i++) {
3367 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3368 		struct page *page = rx_agg_buf->page;
3369 
3370 		if (!page)
3371 			continue;
3372 
3373 		rx_agg_buf->page = NULL;
3374 		__clear_bit(i, rxr->rx_agg_bmap);
3375 
3376 		page_pool_recycle_direct(rxr->page_pool, page);
3377 	}
3378 
3379 skip_rx_agg_free:
3380 	map = rxr->rx_tpa_idx_map;
3381 	if (map)
3382 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3383 }
3384 
3385 static void bnxt_free_rx_skbs(struct bnxt *bp)
3386 {
3387 	int i;
3388 
3389 	if (!bp->rx_ring)
3390 		return;
3391 
3392 	for (i = 0; i < bp->rx_nr_rings; i++)
3393 		bnxt_free_one_rx_ring_skbs(bp, i);
3394 }
3395 
3396 static void bnxt_free_skbs(struct bnxt *bp)
3397 {
3398 	bnxt_free_tx_skbs(bp);
3399 	bnxt_free_rx_skbs(bp);
3400 }
3401 
3402 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3403 {
3404 	u8 init_val = ctxm->init_value;
3405 	u16 offset = ctxm->init_offset;
3406 	u8 *p2 = p;
3407 	int i;
3408 
3409 	if (!init_val)
3410 		return;
3411 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3412 		memset(p, init_val, len);
3413 		return;
3414 	}
3415 	for (i = 0; i < len; i += ctxm->entry_size)
3416 		*(p2 + i + offset) = init_val;
3417 }
3418 
3419 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3420 {
3421 	struct pci_dev *pdev = bp->pdev;
3422 	int i;
3423 
3424 	if (!rmem->pg_arr)
3425 		goto skip_pages;
3426 
3427 	for (i = 0; i < rmem->nr_pages; i++) {
3428 		if (!rmem->pg_arr[i])
3429 			continue;
3430 
3431 		dma_free_coherent(&pdev->dev, rmem->page_size,
3432 				  rmem->pg_arr[i], rmem->dma_arr[i]);
3433 
3434 		rmem->pg_arr[i] = NULL;
3435 	}
3436 skip_pages:
3437 	if (rmem->pg_tbl) {
3438 		size_t pg_tbl_size = rmem->nr_pages * 8;
3439 
3440 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3441 			pg_tbl_size = rmem->page_size;
3442 		dma_free_coherent(&pdev->dev, pg_tbl_size,
3443 				  rmem->pg_tbl, rmem->pg_tbl_map);
3444 		rmem->pg_tbl = NULL;
3445 	}
3446 	if (rmem->vmem_size && *rmem->vmem) {
3447 		vfree(*rmem->vmem);
3448 		*rmem->vmem = NULL;
3449 	}
3450 }
3451 
3452 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3453 {
3454 	struct pci_dev *pdev = bp->pdev;
3455 	u64 valid_bit = 0;
3456 	int i;
3457 
3458 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3459 		valid_bit = PTU_PTE_VALID;
3460 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3461 		size_t pg_tbl_size = rmem->nr_pages * 8;
3462 
3463 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3464 			pg_tbl_size = rmem->page_size;
3465 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3466 						  &rmem->pg_tbl_map,
3467 						  GFP_KERNEL);
3468 		if (!rmem->pg_tbl)
3469 			return -ENOMEM;
3470 	}
3471 
3472 	for (i = 0; i < rmem->nr_pages; i++) {
3473 		u64 extra_bits = valid_bit;
3474 
3475 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3476 						     rmem->page_size,
3477 						     &rmem->dma_arr[i],
3478 						     GFP_KERNEL);
3479 		if (!rmem->pg_arr[i])
3480 			return -ENOMEM;
3481 
3482 		if (rmem->ctx_mem)
3483 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3484 					  rmem->page_size);
3485 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
3486 			if (i == rmem->nr_pages - 2 &&
3487 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3488 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
3489 			else if (i == rmem->nr_pages - 1 &&
3490 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3491 				extra_bits |= PTU_PTE_LAST;
3492 			rmem->pg_tbl[i] =
3493 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3494 		}
3495 	}
3496 
3497 	if (rmem->vmem_size) {
3498 		*rmem->vmem = vzalloc(rmem->vmem_size);
3499 		if (!(*rmem->vmem))
3500 			return -ENOMEM;
3501 	}
3502 	return 0;
3503 }
3504 
3505 static void bnxt_free_tpa_info(struct bnxt *bp)
3506 {
3507 	int i, j;
3508 
3509 	for (i = 0; i < bp->rx_nr_rings; i++) {
3510 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3511 
3512 		kfree(rxr->rx_tpa_idx_map);
3513 		rxr->rx_tpa_idx_map = NULL;
3514 		if (rxr->rx_tpa) {
3515 			for (j = 0; j < bp->max_tpa; j++) {
3516 				kfree(rxr->rx_tpa[j].agg_arr);
3517 				rxr->rx_tpa[j].agg_arr = NULL;
3518 			}
3519 		}
3520 		kfree(rxr->rx_tpa);
3521 		rxr->rx_tpa = NULL;
3522 	}
3523 }
3524 
3525 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3526 {
3527 	int i, j;
3528 
3529 	bp->max_tpa = MAX_TPA;
3530 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3531 		if (!bp->max_tpa_v2)
3532 			return 0;
3533 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3534 	}
3535 
3536 	for (i = 0; i < bp->rx_nr_rings; i++) {
3537 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3538 		struct rx_agg_cmp *agg;
3539 
3540 		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3541 				      GFP_KERNEL);
3542 		if (!rxr->rx_tpa)
3543 			return -ENOMEM;
3544 
3545 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3546 			continue;
3547 		for (j = 0; j < bp->max_tpa; j++) {
3548 			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3549 			if (!agg)
3550 				return -ENOMEM;
3551 			rxr->rx_tpa[j].agg_arr = agg;
3552 		}
3553 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3554 					      GFP_KERNEL);
3555 		if (!rxr->rx_tpa_idx_map)
3556 			return -ENOMEM;
3557 	}
3558 	return 0;
3559 }
3560 
3561 static void bnxt_free_rx_rings(struct bnxt *bp)
3562 {
3563 	int i;
3564 
3565 	if (!bp->rx_ring)
3566 		return;
3567 
3568 	bnxt_free_tpa_info(bp);
3569 	for (i = 0; i < bp->rx_nr_rings; i++) {
3570 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3571 		struct bnxt_ring_struct *ring;
3572 
3573 		if (rxr->xdp_prog)
3574 			bpf_prog_put(rxr->xdp_prog);
3575 
3576 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3577 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3578 
3579 		page_pool_destroy(rxr->page_pool);
3580 		rxr->page_pool = NULL;
3581 
3582 		kfree(rxr->rx_agg_bmap);
3583 		rxr->rx_agg_bmap = NULL;
3584 
3585 		ring = &rxr->rx_ring_struct;
3586 		bnxt_free_ring(bp, &ring->ring_mem);
3587 
3588 		ring = &rxr->rx_agg_ring_struct;
3589 		bnxt_free_ring(bp, &ring->ring_mem);
3590 	}
3591 }
3592 
3593 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3594 				   struct bnxt_rx_ring_info *rxr,
3595 				   int numa_node)
3596 {
3597 	struct page_pool_params pp = { 0 };
3598 
3599 	pp.pool_size = bp->rx_agg_ring_size;
3600 	if (BNXT_RX_PAGE_MODE(bp))
3601 		pp.pool_size += bp->rx_ring_size;
3602 	pp.nid = numa_node;
3603 	pp.napi = &rxr->bnapi->napi;
3604 	pp.netdev = bp->dev;
3605 	pp.dev = &bp->pdev->dev;
3606 	pp.dma_dir = bp->rx_dir;
3607 	pp.max_len = PAGE_SIZE;
3608 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3609 
3610 	rxr->page_pool = page_pool_create(&pp);
3611 	if (IS_ERR(rxr->page_pool)) {
3612 		int err = PTR_ERR(rxr->page_pool);
3613 
3614 		rxr->page_pool = NULL;
3615 		return err;
3616 	}
3617 	return 0;
3618 }
3619 
3620 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3621 {
3622 	int numa_node = dev_to_node(&bp->pdev->dev);
3623 	int i, rc = 0, agg_rings = 0, cpu;
3624 
3625 	if (!bp->rx_ring)
3626 		return -ENOMEM;
3627 
3628 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3629 		agg_rings = 1;
3630 
3631 	for (i = 0; i < bp->rx_nr_rings; i++) {
3632 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3633 		struct bnxt_ring_struct *ring;
3634 		int cpu_node;
3635 
3636 		ring = &rxr->rx_ring_struct;
3637 
3638 		cpu = cpumask_local_spread(i, numa_node);
3639 		cpu_node = cpu_to_node(cpu);
3640 		netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3641 			   i, cpu_node);
3642 		rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3643 		if (rc)
3644 			return rc;
3645 
3646 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3647 		if (rc < 0)
3648 			return rc;
3649 
3650 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3651 						MEM_TYPE_PAGE_POOL,
3652 						rxr->page_pool);
3653 		if (rc) {
3654 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3655 			return rc;
3656 		}
3657 
3658 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3659 		if (rc)
3660 			return rc;
3661 
3662 		ring->grp_idx = i;
3663 		if (agg_rings) {
3664 			u16 mem_size;
3665 
3666 			ring = &rxr->rx_agg_ring_struct;
3667 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3668 			if (rc)
3669 				return rc;
3670 
3671 			ring->grp_idx = i;
3672 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3673 			mem_size = rxr->rx_agg_bmap_size / 8;
3674 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3675 			if (!rxr->rx_agg_bmap)
3676 				return -ENOMEM;
3677 		}
3678 	}
3679 	if (bp->flags & BNXT_FLAG_TPA)
3680 		rc = bnxt_alloc_tpa_info(bp);
3681 	return rc;
3682 }
3683 
3684 static void bnxt_free_tx_rings(struct bnxt *bp)
3685 {
3686 	int i;
3687 	struct pci_dev *pdev = bp->pdev;
3688 
3689 	if (!bp->tx_ring)
3690 		return;
3691 
3692 	for (i = 0; i < bp->tx_nr_rings; i++) {
3693 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3694 		struct bnxt_ring_struct *ring;
3695 
3696 		if (txr->tx_push) {
3697 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3698 					  txr->tx_push, txr->tx_push_mapping);
3699 			txr->tx_push = NULL;
3700 		}
3701 
3702 		ring = &txr->tx_ring_struct;
3703 
3704 		bnxt_free_ring(bp, &ring->ring_mem);
3705 	}
3706 }
3707 
3708 #define BNXT_TC_TO_RING_BASE(bp, tc)	\
3709 	((tc) * (bp)->tx_nr_rings_per_tc)
3710 
3711 #define BNXT_RING_TO_TC_OFF(bp, tx)	\
3712 	((tx) % (bp)->tx_nr_rings_per_tc)
3713 
3714 #define BNXT_RING_TO_TC(bp, tx)		\
3715 	((tx) / (bp)->tx_nr_rings_per_tc)
3716 
3717 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3718 {
3719 	int i, j, rc;
3720 	struct pci_dev *pdev = bp->pdev;
3721 
3722 	bp->tx_push_size = 0;
3723 	if (bp->tx_push_thresh) {
3724 		int push_size;
3725 
3726 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3727 					bp->tx_push_thresh);
3728 
3729 		if (push_size > 256) {
3730 			push_size = 0;
3731 			bp->tx_push_thresh = 0;
3732 		}
3733 
3734 		bp->tx_push_size = push_size;
3735 	}
3736 
3737 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3738 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3739 		struct bnxt_ring_struct *ring;
3740 		u8 qidx;
3741 
3742 		ring = &txr->tx_ring_struct;
3743 
3744 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3745 		if (rc)
3746 			return rc;
3747 
3748 		ring->grp_idx = txr->bnapi->index;
3749 		if (bp->tx_push_size) {
3750 			dma_addr_t mapping;
3751 
3752 			/* One pre-allocated DMA buffer to backup
3753 			 * TX push operation
3754 			 */
3755 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
3756 						bp->tx_push_size,
3757 						&txr->tx_push_mapping,
3758 						GFP_KERNEL);
3759 
3760 			if (!txr->tx_push)
3761 				return -ENOMEM;
3762 
3763 			mapping = txr->tx_push_mapping +
3764 				sizeof(struct tx_push_bd);
3765 			txr->data_mapping = cpu_to_le64(mapping);
3766 		}
3767 		qidx = bp->tc_to_qidx[j];
3768 		ring->queue_id = bp->q_info[qidx].queue_id;
3769 		spin_lock_init(&txr->xdp_tx_lock);
3770 		if (i < bp->tx_nr_rings_xdp)
3771 			continue;
3772 		if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3773 			j++;
3774 	}
3775 	return 0;
3776 }
3777 
3778 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3779 {
3780 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3781 
3782 	kfree(cpr->cp_desc_ring);
3783 	cpr->cp_desc_ring = NULL;
3784 	ring->ring_mem.pg_arr = NULL;
3785 	kfree(cpr->cp_desc_mapping);
3786 	cpr->cp_desc_mapping = NULL;
3787 	ring->ring_mem.dma_arr = NULL;
3788 }
3789 
3790 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3791 {
3792 	cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3793 	if (!cpr->cp_desc_ring)
3794 		return -ENOMEM;
3795 	cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3796 				       GFP_KERNEL);
3797 	if (!cpr->cp_desc_mapping)
3798 		return -ENOMEM;
3799 	return 0;
3800 }
3801 
3802 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3803 {
3804 	int i;
3805 
3806 	if (!bp->bnapi)
3807 		return;
3808 	for (i = 0; i < bp->cp_nr_rings; i++) {
3809 		struct bnxt_napi *bnapi = bp->bnapi[i];
3810 
3811 		if (!bnapi)
3812 			continue;
3813 		bnxt_free_cp_arrays(&bnapi->cp_ring);
3814 	}
3815 }
3816 
3817 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3818 {
3819 	int i, n = bp->cp_nr_pages;
3820 
3821 	for (i = 0; i < bp->cp_nr_rings; i++) {
3822 		struct bnxt_napi *bnapi = bp->bnapi[i];
3823 		int rc;
3824 
3825 		if (!bnapi)
3826 			continue;
3827 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3828 		if (rc)
3829 			return rc;
3830 	}
3831 	return 0;
3832 }
3833 
3834 static void bnxt_free_cp_rings(struct bnxt *bp)
3835 {
3836 	int i;
3837 
3838 	if (!bp->bnapi)
3839 		return;
3840 
3841 	for (i = 0; i < bp->cp_nr_rings; i++) {
3842 		struct bnxt_napi *bnapi = bp->bnapi[i];
3843 		struct bnxt_cp_ring_info *cpr;
3844 		struct bnxt_ring_struct *ring;
3845 		int j;
3846 
3847 		if (!bnapi)
3848 			continue;
3849 
3850 		cpr = &bnapi->cp_ring;
3851 		ring = &cpr->cp_ring_struct;
3852 
3853 		bnxt_free_ring(bp, &ring->ring_mem);
3854 
3855 		if (!cpr->cp_ring_arr)
3856 			continue;
3857 
3858 		for (j = 0; j < cpr->cp_ring_count; j++) {
3859 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3860 
3861 			ring = &cpr2->cp_ring_struct;
3862 			bnxt_free_ring(bp, &ring->ring_mem);
3863 			bnxt_free_cp_arrays(cpr2);
3864 		}
3865 		kfree(cpr->cp_ring_arr);
3866 		cpr->cp_ring_arr = NULL;
3867 		cpr->cp_ring_count = 0;
3868 	}
3869 }
3870 
3871 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3872 				  struct bnxt_cp_ring_info *cpr)
3873 {
3874 	struct bnxt_ring_mem_info *rmem;
3875 	struct bnxt_ring_struct *ring;
3876 	int rc;
3877 
3878 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3879 	if (rc) {
3880 		bnxt_free_cp_arrays(cpr);
3881 		return -ENOMEM;
3882 	}
3883 	ring = &cpr->cp_ring_struct;
3884 	rmem = &ring->ring_mem;
3885 	rmem->nr_pages = bp->cp_nr_pages;
3886 	rmem->page_size = HW_CMPD_RING_SIZE;
3887 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
3888 	rmem->dma_arr = cpr->cp_desc_mapping;
3889 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3890 	rc = bnxt_alloc_ring(bp, rmem);
3891 	if (rc) {
3892 		bnxt_free_ring(bp, rmem);
3893 		bnxt_free_cp_arrays(cpr);
3894 	}
3895 	return rc;
3896 }
3897 
3898 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3899 {
3900 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3901 	int i, j, rc, ulp_msix;
3902 	int tcs = bp->num_tc;
3903 
3904 	if (!tcs)
3905 		tcs = 1;
3906 	ulp_msix = bnxt_get_ulp_msix_num(bp);
3907 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3908 		struct bnxt_napi *bnapi = bp->bnapi[i];
3909 		struct bnxt_cp_ring_info *cpr, *cpr2;
3910 		struct bnxt_ring_struct *ring;
3911 		int cp_count = 0, k;
3912 		int rx = 0, tx = 0;
3913 
3914 		if (!bnapi)
3915 			continue;
3916 
3917 		cpr = &bnapi->cp_ring;
3918 		cpr->bnapi = bnapi;
3919 		ring = &cpr->cp_ring_struct;
3920 
3921 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3922 		if (rc)
3923 			return rc;
3924 
3925 		ring->map_idx = ulp_msix + i;
3926 
3927 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3928 			continue;
3929 
3930 		if (i < bp->rx_nr_rings) {
3931 			cp_count++;
3932 			rx = 1;
3933 		}
3934 		if (i < bp->tx_nr_rings_xdp) {
3935 			cp_count++;
3936 			tx = 1;
3937 		} else if ((sh && i < bp->tx_nr_rings) ||
3938 			 (!sh && i >= bp->rx_nr_rings)) {
3939 			cp_count += tcs;
3940 			tx = 1;
3941 		}
3942 
3943 		cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3944 					   GFP_KERNEL);
3945 		if (!cpr->cp_ring_arr)
3946 			return -ENOMEM;
3947 		cpr->cp_ring_count = cp_count;
3948 
3949 		for (k = 0; k < cp_count; k++) {
3950 			cpr2 = &cpr->cp_ring_arr[k];
3951 			rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3952 			if (rc)
3953 				return rc;
3954 			cpr2->bnapi = bnapi;
3955 			cpr2->sw_stats = cpr->sw_stats;
3956 			cpr2->cp_idx = k;
3957 			if (!k && rx) {
3958 				bp->rx_ring[i].rx_cpr = cpr2;
3959 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
3960 			} else {
3961 				int n, tc = k - rx;
3962 
3963 				n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3964 				bp->tx_ring[n].tx_cpr = cpr2;
3965 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
3966 			}
3967 		}
3968 		if (tx)
3969 			j++;
3970 	}
3971 	return 0;
3972 }
3973 
3974 static void bnxt_init_ring_struct(struct bnxt *bp)
3975 {
3976 	int i, j;
3977 
3978 	for (i = 0; i < bp->cp_nr_rings; i++) {
3979 		struct bnxt_napi *bnapi = bp->bnapi[i];
3980 		struct bnxt_ring_mem_info *rmem;
3981 		struct bnxt_cp_ring_info *cpr;
3982 		struct bnxt_rx_ring_info *rxr;
3983 		struct bnxt_tx_ring_info *txr;
3984 		struct bnxt_ring_struct *ring;
3985 
3986 		if (!bnapi)
3987 			continue;
3988 
3989 		cpr = &bnapi->cp_ring;
3990 		ring = &cpr->cp_ring_struct;
3991 		rmem = &ring->ring_mem;
3992 		rmem->nr_pages = bp->cp_nr_pages;
3993 		rmem->page_size = HW_CMPD_RING_SIZE;
3994 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
3995 		rmem->dma_arr = cpr->cp_desc_mapping;
3996 		rmem->vmem_size = 0;
3997 
3998 		rxr = bnapi->rx_ring;
3999 		if (!rxr)
4000 			goto skip_rx;
4001 
4002 		ring = &rxr->rx_ring_struct;
4003 		rmem = &ring->ring_mem;
4004 		rmem->nr_pages = bp->rx_nr_pages;
4005 		rmem->page_size = HW_RXBD_RING_SIZE;
4006 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
4007 		rmem->dma_arr = rxr->rx_desc_mapping;
4008 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4009 		rmem->vmem = (void **)&rxr->rx_buf_ring;
4010 
4011 		ring = &rxr->rx_agg_ring_struct;
4012 		rmem = &ring->ring_mem;
4013 		rmem->nr_pages = bp->rx_agg_nr_pages;
4014 		rmem->page_size = HW_RXBD_RING_SIZE;
4015 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4016 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
4017 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4018 		rmem->vmem = (void **)&rxr->rx_agg_ring;
4019 
4020 skip_rx:
4021 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4022 			ring = &txr->tx_ring_struct;
4023 			rmem = &ring->ring_mem;
4024 			rmem->nr_pages = bp->tx_nr_pages;
4025 			rmem->page_size = HW_TXBD_RING_SIZE;
4026 			rmem->pg_arr = (void **)txr->tx_desc_ring;
4027 			rmem->dma_arr = txr->tx_desc_mapping;
4028 			rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4029 			rmem->vmem = (void **)&txr->tx_buf_ring;
4030 		}
4031 	}
4032 }
4033 
4034 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4035 {
4036 	int i;
4037 	u32 prod;
4038 	struct rx_bd **rx_buf_ring;
4039 
4040 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4041 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4042 		int j;
4043 		struct rx_bd *rxbd;
4044 
4045 		rxbd = rx_buf_ring[i];
4046 		if (!rxbd)
4047 			continue;
4048 
4049 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4050 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4051 			rxbd->rx_bd_opaque = prod;
4052 		}
4053 	}
4054 }
4055 
4056 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4057 {
4058 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4059 	struct net_device *dev = bp->dev;
4060 	u32 prod;
4061 	int i;
4062 
4063 	prod = rxr->rx_prod;
4064 	for (i = 0; i < bp->rx_ring_size; i++) {
4065 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4066 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
4067 				    ring_nr, i, bp->rx_ring_size);
4068 			break;
4069 		}
4070 		prod = NEXT_RX(prod);
4071 	}
4072 	rxr->rx_prod = prod;
4073 
4074 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4075 		return 0;
4076 
4077 	prod = rxr->rx_agg_prod;
4078 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
4079 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4080 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
4081 				    ring_nr, i, bp->rx_ring_size);
4082 			break;
4083 		}
4084 		prod = NEXT_RX_AGG(prod);
4085 	}
4086 	rxr->rx_agg_prod = prod;
4087 
4088 	if (rxr->rx_tpa) {
4089 		dma_addr_t mapping;
4090 		u8 *data;
4091 
4092 		for (i = 0; i < bp->max_tpa; i++) {
4093 			data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4094 			if (!data)
4095 				return -ENOMEM;
4096 
4097 			rxr->rx_tpa[i].data = data;
4098 			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4099 			rxr->rx_tpa[i].mapping = mapping;
4100 		}
4101 	}
4102 	return 0;
4103 }
4104 
4105 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4106 {
4107 	struct bnxt_rx_ring_info *rxr;
4108 	struct bnxt_ring_struct *ring;
4109 	u32 type;
4110 
4111 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4112 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4113 
4114 	if (NET_IP_ALIGN == 2)
4115 		type |= RX_BD_FLAGS_SOP;
4116 
4117 	rxr = &bp->rx_ring[ring_nr];
4118 	ring = &rxr->rx_ring_struct;
4119 	bnxt_init_rxbd_pages(ring, type);
4120 
4121 	netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4122 			     &rxr->bnapi->napi);
4123 
4124 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4125 		bpf_prog_add(bp->xdp_prog, 1);
4126 		rxr->xdp_prog = bp->xdp_prog;
4127 	}
4128 	ring->fw_ring_id = INVALID_HW_RING_ID;
4129 
4130 	ring = &rxr->rx_agg_ring_struct;
4131 	ring->fw_ring_id = INVALID_HW_RING_ID;
4132 
4133 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4134 		type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4135 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4136 
4137 		bnxt_init_rxbd_pages(ring, type);
4138 	}
4139 
4140 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
4141 }
4142 
4143 static void bnxt_init_cp_rings(struct bnxt *bp)
4144 {
4145 	int i, j;
4146 
4147 	for (i = 0; i < bp->cp_nr_rings; i++) {
4148 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4149 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4150 
4151 		ring->fw_ring_id = INVALID_HW_RING_ID;
4152 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4153 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4154 		if (!cpr->cp_ring_arr)
4155 			continue;
4156 		for (j = 0; j < cpr->cp_ring_count; j++) {
4157 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4158 
4159 			ring = &cpr2->cp_ring_struct;
4160 			ring->fw_ring_id = INVALID_HW_RING_ID;
4161 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4162 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4163 		}
4164 	}
4165 }
4166 
4167 static int bnxt_init_rx_rings(struct bnxt *bp)
4168 {
4169 	int i, rc = 0;
4170 
4171 	if (BNXT_RX_PAGE_MODE(bp)) {
4172 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4173 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4174 	} else {
4175 		bp->rx_offset = BNXT_RX_OFFSET;
4176 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4177 	}
4178 
4179 	for (i = 0; i < bp->rx_nr_rings; i++) {
4180 		rc = bnxt_init_one_rx_ring(bp, i);
4181 		if (rc)
4182 			break;
4183 	}
4184 
4185 	return rc;
4186 }
4187 
4188 static int bnxt_init_tx_rings(struct bnxt *bp)
4189 {
4190 	u16 i;
4191 
4192 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4193 				   BNXT_MIN_TX_DESC_CNT);
4194 
4195 	for (i = 0; i < bp->tx_nr_rings; i++) {
4196 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4197 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4198 
4199 		ring->fw_ring_id = INVALID_HW_RING_ID;
4200 
4201 		if (i >= bp->tx_nr_rings_xdp)
4202 			netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4203 					     NETDEV_QUEUE_TYPE_TX,
4204 					     &txr->bnapi->napi);
4205 	}
4206 
4207 	return 0;
4208 }
4209 
4210 static void bnxt_free_ring_grps(struct bnxt *bp)
4211 {
4212 	kfree(bp->grp_info);
4213 	bp->grp_info = NULL;
4214 }
4215 
4216 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4217 {
4218 	int i;
4219 
4220 	if (irq_re_init) {
4221 		bp->grp_info = kcalloc(bp->cp_nr_rings,
4222 				       sizeof(struct bnxt_ring_grp_info),
4223 				       GFP_KERNEL);
4224 		if (!bp->grp_info)
4225 			return -ENOMEM;
4226 	}
4227 	for (i = 0; i < bp->cp_nr_rings; i++) {
4228 		if (irq_re_init)
4229 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4230 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4231 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4232 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4233 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4234 	}
4235 	return 0;
4236 }
4237 
4238 static void bnxt_free_vnics(struct bnxt *bp)
4239 {
4240 	kfree(bp->vnic_info);
4241 	bp->vnic_info = NULL;
4242 	bp->nr_vnics = 0;
4243 }
4244 
4245 static int bnxt_alloc_vnics(struct bnxt *bp)
4246 {
4247 	int num_vnics = 1;
4248 
4249 #ifdef CONFIG_RFS_ACCEL
4250 	if (bp->flags & BNXT_FLAG_RFS) {
4251 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4252 			num_vnics++;
4253 		else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4254 			num_vnics += bp->rx_nr_rings;
4255 	}
4256 #endif
4257 
4258 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4259 		num_vnics++;
4260 
4261 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4262 				GFP_KERNEL);
4263 	if (!bp->vnic_info)
4264 		return -ENOMEM;
4265 
4266 	bp->nr_vnics = num_vnics;
4267 	return 0;
4268 }
4269 
4270 static void bnxt_init_vnics(struct bnxt *bp)
4271 {
4272 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4273 	int i;
4274 
4275 	for (i = 0; i < bp->nr_vnics; i++) {
4276 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4277 		int j;
4278 
4279 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
4280 		vnic->vnic_id = i;
4281 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4282 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4283 
4284 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4285 
4286 		if (bp->vnic_info[i].rss_hash_key) {
4287 			if (i == BNXT_VNIC_DEFAULT) {
4288 				u8 *key = (void *)vnic->rss_hash_key;
4289 				int k;
4290 
4291 				if (!bp->rss_hash_key_valid &&
4292 				    !bp->rss_hash_key_updated) {
4293 					get_random_bytes(bp->rss_hash_key,
4294 							 HW_HASH_KEY_SIZE);
4295 					bp->rss_hash_key_updated = true;
4296 				}
4297 
4298 				memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4299 				       HW_HASH_KEY_SIZE);
4300 
4301 				if (!bp->rss_hash_key_updated)
4302 					continue;
4303 
4304 				bp->rss_hash_key_updated = false;
4305 				bp->rss_hash_key_valid = true;
4306 
4307 				bp->toeplitz_prefix = 0;
4308 				for (k = 0; k < 8; k++) {
4309 					bp->toeplitz_prefix <<= 8;
4310 					bp->toeplitz_prefix |= key[k];
4311 				}
4312 			} else {
4313 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4314 				       HW_HASH_KEY_SIZE);
4315 			}
4316 		}
4317 	}
4318 }
4319 
4320 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4321 {
4322 	int pages;
4323 
4324 	pages = ring_size / desc_per_pg;
4325 
4326 	if (!pages)
4327 		return 1;
4328 
4329 	pages++;
4330 
4331 	while (pages & (pages - 1))
4332 		pages++;
4333 
4334 	return pages;
4335 }
4336 
4337 void bnxt_set_tpa_flags(struct bnxt *bp)
4338 {
4339 	bp->flags &= ~BNXT_FLAG_TPA;
4340 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4341 		return;
4342 	if (bp->dev->features & NETIF_F_LRO)
4343 		bp->flags |= BNXT_FLAG_LRO;
4344 	else if (bp->dev->features & NETIF_F_GRO_HW)
4345 		bp->flags |= BNXT_FLAG_GRO;
4346 }
4347 
4348 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4349  * be set on entry.
4350  */
4351 void bnxt_set_ring_params(struct bnxt *bp)
4352 {
4353 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4354 	u32 agg_factor = 0, agg_ring_size = 0;
4355 
4356 	/* 8 for CRC and VLAN */
4357 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4358 
4359 	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4360 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4361 
4362 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4363 	ring_size = bp->rx_ring_size;
4364 	bp->rx_agg_ring_size = 0;
4365 	bp->rx_agg_nr_pages = 0;
4366 
4367 	if (bp->flags & BNXT_FLAG_TPA)
4368 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4369 
4370 	bp->flags &= ~BNXT_FLAG_JUMBO;
4371 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4372 		u32 jumbo_factor;
4373 
4374 		bp->flags |= BNXT_FLAG_JUMBO;
4375 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4376 		if (jumbo_factor > agg_factor)
4377 			agg_factor = jumbo_factor;
4378 	}
4379 	if (agg_factor) {
4380 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4381 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4382 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4383 				    bp->rx_ring_size, ring_size);
4384 			bp->rx_ring_size = ring_size;
4385 		}
4386 		agg_ring_size = ring_size * agg_factor;
4387 
4388 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4389 							RX_DESC_CNT);
4390 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4391 			u32 tmp = agg_ring_size;
4392 
4393 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4394 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4395 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4396 				    tmp, agg_ring_size);
4397 		}
4398 		bp->rx_agg_ring_size = agg_ring_size;
4399 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4400 
4401 		if (BNXT_RX_PAGE_MODE(bp)) {
4402 			rx_space = PAGE_SIZE;
4403 			rx_size = PAGE_SIZE -
4404 				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4405 				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4406 		} else {
4407 			rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
4408 			rx_space = rx_size + NET_SKB_PAD +
4409 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4410 		}
4411 	}
4412 
4413 	bp->rx_buf_use_size = rx_size;
4414 	bp->rx_buf_size = rx_space;
4415 
4416 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4417 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4418 
4419 	ring_size = bp->tx_ring_size;
4420 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4421 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4422 
4423 	max_rx_cmpl = bp->rx_ring_size;
4424 	/* MAX TPA needs to be added because TPA_START completions are
4425 	 * immediately recycled, so the TPA completions are not bound by
4426 	 * the RX ring size.
4427 	 */
4428 	if (bp->flags & BNXT_FLAG_TPA)
4429 		max_rx_cmpl += bp->max_tpa;
4430 	/* RX and TPA completions are 32-byte, all others are 16-byte */
4431 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4432 	bp->cp_ring_size = ring_size;
4433 
4434 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4435 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
4436 		bp->cp_nr_pages = MAX_CP_PAGES;
4437 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4438 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4439 			    ring_size, bp->cp_ring_size);
4440 	}
4441 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4442 	bp->cp_ring_mask = bp->cp_bit - 1;
4443 }
4444 
4445 /* Changing allocation mode of RX rings.
4446  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4447  */
4448 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4449 {
4450 	struct net_device *dev = bp->dev;
4451 
4452 	if (page_mode) {
4453 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4454 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4455 
4456 		if (bp->xdp_prog->aux->xdp_has_frags)
4457 			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4458 		else
4459 			dev->max_mtu =
4460 				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4461 		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4462 			bp->flags |= BNXT_FLAG_JUMBO;
4463 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
4464 		} else {
4465 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4466 			bp->rx_skb_func = bnxt_rx_page_skb;
4467 		}
4468 		bp->rx_dir = DMA_BIDIRECTIONAL;
4469 		/* Disable LRO or GRO_HW */
4470 		netdev_update_features(dev);
4471 	} else {
4472 		dev->max_mtu = bp->max_mtu;
4473 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4474 		bp->rx_dir = DMA_FROM_DEVICE;
4475 		bp->rx_skb_func = bnxt_rx_skb;
4476 	}
4477 	return 0;
4478 }
4479 
4480 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4481 {
4482 	int i;
4483 	struct bnxt_vnic_info *vnic;
4484 	struct pci_dev *pdev = bp->pdev;
4485 
4486 	if (!bp->vnic_info)
4487 		return;
4488 
4489 	for (i = 0; i < bp->nr_vnics; i++) {
4490 		vnic = &bp->vnic_info[i];
4491 
4492 		kfree(vnic->fw_grp_ids);
4493 		vnic->fw_grp_ids = NULL;
4494 
4495 		kfree(vnic->uc_list);
4496 		vnic->uc_list = NULL;
4497 
4498 		if (vnic->mc_list) {
4499 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4500 					  vnic->mc_list, vnic->mc_list_mapping);
4501 			vnic->mc_list = NULL;
4502 		}
4503 
4504 		if (vnic->rss_table) {
4505 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4506 					  vnic->rss_table,
4507 					  vnic->rss_table_dma_addr);
4508 			vnic->rss_table = NULL;
4509 		}
4510 
4511 		vnic->rss_hash_key = NULL;
4512 		vnic->flags = 0;
4513 	}
4514 }
4515 
4516 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4517 {
4518 	int i, rc = 0, size;
4519 	struct bnxt_vnic_info *vnic;
4520 	struct pci_dev *pdev = bp->pdev;
4521 	int max_rings;
4522 
4523 	for (i = 0; i < bp->nr_vnics; i++) {
4524 		vnic = &bp->vnic_info[i];
4525 
4526 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4527 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4528 
4529 			if (mem_size > 0) {
4530 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4531 				if (!vnic->uc_list) {
4532 					rc = -ENOMEM;
4533 					goto out;
4534 				}
4535 			}
4536 		}
4537 
4538 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4539 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4540 			vnic->mc_list =
4541 				dma_alloc_coherent(&pdev->dev,
4542 						   vnic->mc_list_size,
4543 						   &vnic->mc_list_mapping,
4544 						   GFP_KERNEL);
4545 			if (!vnic->mc_list) {
4546 				rc = -ENOMEM;
4547 				goto out;
4548 			}
4549 		}
4550 
4551 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4552 			goto vnic_skip_grps;
4553 
4554 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4555 			max_rings = bp->rx_nr_rings;
4556 		else
4557 			max_rings = 1;
4558 
4559 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4560 		if (!vnic->fw_grp_ids) {
4561 			rc = -ENOMEM;
4562 			goto out;
4563 		}
4564 vnic_skip_grps:
4565 		if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4566 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4567 			continue;
4568 
4569 		/* Allocate rss table and hash key */
4570 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4571 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4572 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4573 
4574 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4575 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4576 						     vnic->rss_table_size,
4577 						     &vnic->rss_table_dma_addr,
4578 						     GFP_KERNEL);
4579 		if (!vnic->rss_table) {
4580 			rc = -ENOMEM;
4581 			goto out;
4582 		}
4583 
4584 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4585 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4586 	}
4587 	return 0;
4588 
4589 out:
4590 	return rc;
4591 }
4592 
4593 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4594 {
4595 	struct bnxt_hwrm_wait_token *token;
4596 
4597 	dma_pool_destroy(bp->hwrm_dma_pool);
4598 	bp->hwrm_dma_pool = NULL;
4599 
4600 	rcu_read_lock();
4601 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4602 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4603 	rcu_read_unlock();
4604 }
4605 
4606 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4607 {
4608 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4609 					    BNXT_HWRM_DMA_SIZE,
4610 					    BNXT_HWRM_DMA_ALIGN, 0);
4611 	if (!bp->hwrm_dma_pool)
4612 		return -ENOMEM;
4613 
4614 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4615 
4616 	return 0;
4617 }
4618 
4619 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4620 {
4621 	kfree(stats->hw_masks);
4622 	stats->hw_masks = NULL;
4623 	kfree(stats->sw_stats);
4624 	stats->sw_stats = NULL;
4625 	if (stats->hw_stats) {
4626 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4627 				  stats->hw_stats_map);
4628 		stats->hw_stats = NULL;
4629 	}
4630 }
4631 
4632 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4633 				bool alloc_masks)
4634 {
4635 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4636 					     &stats->hw_stats_map, GFP_KERNEL);
4637 	if (!stats->hw_stats)
4638 		return -ENOMEM;
4639 
4640 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4641 	if (!stats->sw_stats)
4642 		goto stats_mem_err;
4643 
4644 	if (alloc_masks) {
4645 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4646 		if (!stats->hw_masks)
4647 			goto stats_mem_err;
4648 	}
4649 	return 0;
4650 
4651 stats_mem_err:
4652 	bnxt_free_stats_mem(bp, stats);
4653 	return -ENOMEM;
4654 }
4655 
4656 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4657 {
4658 	int i;
4659 
4660 	for (i = 0; i < count; i++)
4661 		mask_arr[i] = mask;
4662 }
4663 
4664 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4665 {
4666 	int i;
4667 
4668 	for (i = 0; i < count; i++)
4669 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4670 }
4671 
4672 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4673 				    struct bnxt_stats_mem *stats)
4674 {
4675 	struct hwrm_func_qstats_ext_output *resp;
4676 	struct hwrm_func_qstats_ext_input *req;
4677 	__le64 *hw_masks;
4678 	int rc;
4679 
4680 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4681 	    !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4682 		return -EOPNOTSUPP;
4683 
4684 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4685 	if (rc)
4686 		return rc;
4687 
4688 	req->fid = cpu_to_le16(0xffff);
4689 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4690 
4691 	resp = hwrm_req_hold(bp, req);
4692 	rc = hwrm_req_send(bp, req);
4693 	if (!rc) {
4694 		hw_masks = &resp->rx_ucast_pkts;
4695 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4696 	}
4697 	hwrm_req_drop(bp, req);
4698 	return rc;
4699 }
4700 
4701 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4702 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4703 
4704 static void bnxt_init_stats(struct bnxt *bp)
4705 {
4706 	struct bnxt_napi *bnapi = bp->bnapi[0];
4707 	struct bnxt_cp_ring_info *cpr;
4708 	struct bnxt_stats_mem *stats;
4709 	__le64 *rx_stats, *tx_stats;
4710 	int rc, rx_count, tx_count;
4711 	u64 *rx_masks, *tx_masks;
4712 	u64 mask;
4713 	u8 flags;
4714 
4715 	cpr = &bnapi->cp_ring;
4716 	stats = &cpr->stats;
4717 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4718 	if (rc) {
4719 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4720 			mask = (1ULL << 48) - 1;
4721 		else
4722 			mask = -1ULL;
4723 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4724 	}
4725 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
4726 		stats = &bp->port_stats;
4727 		rx_stats = stats->hw_stats;
4728 		rx_masks = stats->hw_masks;
4729 		rx_count = sizeof(struct rx_port_stats) / 8;
4730 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4731 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4732 		tx_count = sizeof(struct tx_port_stats) / 8;
4733 
4734 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4735 		rc = bnxt_hwrm_port_qstats(bp, flags);
4736 		if (rc) {
4737 			mask = (1ULL << 40) - 1;
4738 
4739 			bnxt_fill_masks(rx_masks, mask, rx_count);
4740 			bnxt_fill_masks(tx_masks, mask, tx_count);
4741 		} else {
4742 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4743 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4744 			bnxt_hwrm_port_qstats(bp, 0);
4745 		}
4746 	}
4747 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4748 		stats = &bp->rx_port_stats_ext;
4749 		rx_stats = stats->hw_stats;
4750 		rx_masks = stats->hw_masks;
4751 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
4752 		stats = &bp->tx_port_stats_ext;
4753 		tx_stats = stats->hw_stats;
4754 		tx_masks = stats->hw_masks;
4755 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
4756 
4757 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4758 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4759 		if (rc) {
4760 			mask = (1ULL << 40) - 1;
4761 
4762 			bnxt_fill_masks(rx_masks, mask, rx_count);
4763 			if (tx_stats)
4764 				bnxt_fill_masks(tx_masks, mask, tx_count);
4765 		} else {
4766 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4767 			if (tx_stats)
4768 				bnxt_copy_hw_masks(tx_masks, tx_stats,
4769 						   tx_count);
4770 			bnxt_hwrm_port_qstats_ext(bp, 0);
4771 		}
4772 	}
4773 }
4774 
4775 static void bnxt_free_port_stats(struct bnxt *bp)
4776 {
4777 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
4778 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4779 
4780 	bnxt_free_stats_mem(bp, &bp->port_stats);
4781 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4782 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4783 }
4784 
4785 static void bnxt_free_ring_stats(struct bnxt *bp)
4786 {
4787 	int i;
4788 
4789 	if (!bp->bnapi)
4790 		return;
4791 
4792 	for (i = 0; i < bp->cp_nr_rings; i++) {
4793 		struct bnxt_napi *bnapi = bp->bnapi[i];
4794 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4795 
4796 		bnxt_free_stats_mem(bp, &cpr->stats);
4797 
4798 		kfree(cpr->sw_stats);
4799 		cpr->sw_stats = NULL;
4800 	}
4801 }
4802 
4803 static int bnxt_alloc_stats(struct bnxt *bp)
4804 {
4805 	u32 size, i;
4806 	int rc;
4807 
4808 	size = bp->hw_ring_stats_size;
4809 
4810 	for (i = 0; i < bp->cp_nr_rings; i++) {
4811 		struct bnxt_napi *bnapi = bp->bnapi[i];
4812 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4813 
4814 		cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
4815 		if (!cpr->sw_stats)
4816 			return -ENOMEM;
4817 
4818 		cpr->stats.len = size;
4819 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4820 		if (rc)
4821 			return rc;
4822 
4823 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4824 	}
4825 
4826 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4827 		return 0;
4828 
4829 	if (bp->port_stats.hw_stats)
4830 		goto alloc_ext_stats;
4831 
4832 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4833 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4834 	if (rc)
4835 		return rc;
4836 
4837 	bp->flags |= BNXT_FLAG_PORT_STATS;
4838 
4839 alloc_ext_stats:
4840 	/* Display extended statistics only if FW supports it */
4841 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4842 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4843 			return 0;
4844 
4845 	if (bp->rx_port_stats_ext.hw_stats)
4846 		goto alloc_tx_ext_stats;
4847 
4848 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4849 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4850 	/* Extended stats are optional */
4851 	if (rc)
4852 		return 0;
4853 
4854 alloc_tx_ext_stats:
4855 	if (bp->tx_port_stats_ext.hw_stats)
4856 		return 0;
4857 
4858 	if (bp->hwrm_spec_code >= 0x10902 ||
4859 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4860 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4861 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4862 		/* Extended stats are optional */
4863 		if (rc)
4864 			return 0;
4865 	}
4866 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4867 	return 0;
4868 }
4869 
4870 static void bnxt_clear_ring_indices(struct bnxt *bp)
4871 {
4872 	int i, j;
4873 
4874 	if (!bp->bnapi)
4875 		return;
4876 
4877 	for (i = 0; i < bp->cp_nr_rings; i++) {
4878 		struct bnxt_napi *bnapi = bp->bnapi[i];
4879 		struct bnxt_cp_ring_info *cpr;
4880 		struct bnxt_rx_ring_info *rxr;
4881 		struct bnxt_tx_ring_info *txr;
4882 
4883 		if (!bnapi)
4884 			continue;
4885 
4886 		cpr = &bnapi->cp_ring;
4887 		cpr->cp_raw_cons = 0;
4888 
4889 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4890 			txr->tx_prod = 0;
4891 			txr->tx_cons = 0;
4892 			txr->tx_hw_cons = 0;
4893 		}
4894 
4895 		rxr = bnapi->rx_ring;
4896 		if (rxr) {
4897 			rxr->rx_prod = 0;
4898 			rxr->rx_agg_prod = 0;
4899 			rxr->rx_sw_agg_prod = 0;
4900 			rxr->rx_next_cons = 0;
4901 		}
4902 		bnapi->events = 0;
4903 	}
4904 }
4905 
4906 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4907 {
4908 	u8 type = fltr->type, flags = fltr->flags;
4909 
4910 	INIT_LIST_HEAD(&fltr->list);
4911 	if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
4912 	    (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
4913 		list_add_tail(&fltr->list, &bp->usr_fltr_list);
4914 }
4915 
4916 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4917 {
4918 	if (!list_empty(&fltr->list))
4919 		list_del_init(&fltr->list);
4920 }
4921 
4922 void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
4923 {
4924 	struct bnxt_filter_base *usr_fltr, *tmp;
4925 
4926 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
4927 		if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
4928 			continue;
4929 		bnxt_del_one_usr_fltr(bp, usr_fltr);
4930 	}
4931 }
4932 
4933 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4934 {
4935 	hlist_del(&fltr->hash);
4936 	bnxt_del_one_usr_fltr(bp, fltr);
4937 	if (fltr->flags) {
4938 		clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
4939 		bp->ntp_fltr_count--;
4940 	}
4941 	kfree(fltr);
4942 }
4943 
4944 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
4945 {
4946 	int i;
4947 
4948 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
4949 	 * safe to delete the hash table.
4950 	 */
4951 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4952 		struct hlist_head *head;
4953 		struct hlist_node *tmp;
4954 		struct bnxt_ntuple_filter *fltr;
4955 
4956 		head = &bp->ntp_fltr_hash_tbl[i];
4957 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4958 			bnxt_del_l2_filter(bp, fltr->l2_fltr);
4959 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
4960 				     !list_empty(&fltr->base.list)))
4961 				continue;
4962 			bnxt_del_fltr(bp, &fltr->base);
4963 		}
4964 	}
4965 	if (!all)
4966 		return;
4967 
4968 	bitmap_free(bp->ntp_fltr_bmap);
4969 	bp->ntp_fltr_bmap = NULL;
4970 	bp->ntp_fltr_count = 0;
4971 }
4972 
4973 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4974 {
4975 	int i, rc = 0;
4976 
4977 	if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
4978 		return 0;
4979 
4980 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4981 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4982 
4983 	bp->ntp_fltr_count = 0;
4984 	bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
4985 
4986 	if (!bp->ntp_fltr_bmap)
4987 		rc = -ENOMEM;
4988 
4989 	return rc;
4990 }
4991 
4992 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
4993 {
4994 	int i;
4995 
4996 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
4997 		struct hlist_head *head;
4998 		struct hlist_node *tmp;
4999 		struct bnxt_l2_filter *fltr;
5000 
5001 		head = &bp->l2_fltr_hash_tbl[i];
5002 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5003 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5004 				     !list_empty(&fltr->base.list)))
5005 				continue;
5006 			bnxt_del_fltr(bp, &fltr->base);
5007 		}
5008 	}
5009 }
5010 
5011 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5012 {
5013 	int i;
5014 
5015 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5016 		INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5017 	get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5018 }
5019 
5020 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5021 {
5022 	bnxt_free_vnic_attributes(bp);
5023 	bnxt_free_tx_rings(bp);
5024 	bnxt_free_rx_rings(bp);
5025 	bnxt_free_cp_rings(bp);
5026 	bnxt_free_all_cp_arrays(bp);
5027 	bnxt_free_ntp_fltrs(bp, false);
5028 	bnxt_free_l2_filters(bp, false);
5029 	if (irq_re_init) {
5030 		bnxt_free_ring_stats(bp);
5031 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5032 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5033 			bnxt_free_port_stats(bp);
5034 		bnxt_free_ring_grps(bp);
5035 		bnxt_free_vnics(bp);
5036 		kfree(bp->tx_ring_map);
5037 		bp->tx_ring_map = NULL;
5038 		kfree(bp->tx_ring);
5039 		bp->tx_ring = NULL;
5040 		kfree(bp->rx_ring);
5041 		bp->rx_ring = NULL;
5042 		kfree(bp->bnapi);
5043 		bp->bnapi = NULL;
5044 	} else {
5045 		bnxt_clear_ring_indices(bp);
5046 	}
5047 }
5048 
5049 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5050 {
5051 	int i, j, rc, size, arr_size;
5052 	void *bnapi;
5053 
5054 	if (irq_re_init) {
5055 		/* Allocate bnapi mem pointer array and mem block for
5056 		 * all queues
5057 		 */
5058 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5059 				bp->cp_nr_rings);
5060 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5061 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5062 		if (!bnapi)
5063 			return -ENOMEM;
5064 
5065 		bp->bnapi = bnapi;
5066 		bnapi += arr_size;
5067 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5068 			bp->bnapi[i] = bnapi;
5069 			bp->bnapi[i]->index = i;
5070 			bp->bnapi[i]->bp = bp;
5071 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5072 				struct bnxt_cp_ring_info *cpr =
5073 					&bp->bnapi[i]->cp_ring;
5074 
5075 				cpr->cp_ring_struct.ring_mem.flags =
5076 					BNXT_RMEM_RING_PTE_FLAG;
5077 			}
5078 		}
5079 
5080 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
5081 				      sizeof(struct bnxt_rx_ring_info),
5082 				      GFP_KERNEL);
5083 		if (!bp->rx_ring)
5084 			return -ENOMEM;
5085 
5086 		for (i = 0; i < bp->rx_nr_rings; i++) {
5087 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5088 
5089 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5090 				rxr->rx_ring_struct.ring_mem.flags =
5091 					BNXT_RMEM_RING_PTE_FLAG;
5092 				rxr->rx_agg_ring_struct.ring_mem.flags =
5093 					BNXT_RMEM_RING_PTE_FLAG;
5094 			} else {
5095 				rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5096 			}
5097 			rxr->bnapi = bp->bnapi[i];
5098 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5099 		}
5100 
5101 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
5102 				      sizeof(struct bnxt_tx_ring_info),
5103 				      GFP_KERNEL);
5104 		if (!bp->tx_ring)
5105 			return -ENOMEM;
5106 
5107 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5108 					  GFP_KERNEL);
5109 
5110 		if (!bp->tx_ring_map)
5111 			return -ENOMEM;
5112 
5113 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5114 			j = 0;
5115 		else
5116 			j = bp->rx_nr_rings;
5117 
5118 		for (i = 0; i < bp->tx_nr_rings; i++) {
5119 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5120 			struct bnxt_napi *bnapi2;
5121 
5122 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5123 				txr->tx_ring_struct.ring_mem.flags =
5124 					BNXT_RMEM_RING_PTE_FLAG;
5125 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5126 			if (i >= bp->tx_nr_rings_xdp) {
5127 				int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5128 
5129 				bnapi2 = bp->bnapi[k];
5130 				txr->txq_index = i - bp->tx_nr_rings_xdp;
5131 				txr->tx_napi_idx =
5132 					BNXT_RING_TO_TC(bp, txr->txq_index);
5133 				bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5134 				bnapi2->tx_int = bnxt_tx_int;
5135 			} else {
5136 				bnapi2 = bp->bnapi[j];
5137 				bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5138 				bnapi2->tx_ring[0] = txr;
5139 				bnapi2->tx_int = bnxt_tx_int_xdp;
5140 				j++;
5141 			}
5142 			txr->bnapi = bnapi2;
5143 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5144 				txr->tx_cpr = &bnapi2->cp_ring;
5145 		}
5146 
5147 		rc = bnxt_alloc_stats(bp);
5148 		if (rc)
5149 			goto alloc_mem_err;
5150 		bnxt_init_stats(bp);
5151 
5152 		rc = bnxt_alloc_ntp_fltrs(bp);
5153 		if (rc)
5154 			goto alloc_mem_err;
5155 
5156 		rc = bnxt_alloc_vnics(bp);
5157 		if (rc)
5158 			goto alloc_mem_err;
5159 	}
5160 
5161 	rc = bnxt_alloc_all_cp_arrays(bp);
5162 	if (rc)
5163 		goto alloc_mem_err;
5164 
5165 	bnxt_init_ring_struct(bp);
5166 
5167 	rc = bnxt_alloc_rx_rings(bp);
5168 	if (rc)
5169 		goto alloc_mem_err;
5170 
5171 	rc = bnxt_alloc_tx_rings(bp);
5172 	if (rc)
5173 		goto alloc_mem_err;
5174 
5175 	rc = bnxt_alloc_cp_rings(bp);
5176 	if (rc)
5177 		goto alloc_mem_err;
5178 
5179 	bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5180 						  BNXT_VNIC_MCAST_FLAG |
5181 						  BNXT_VNIC_UCAST_FLAG;
5182 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5183 		bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5184 			BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5185 
5186 	rc = bnxt_alloc_vnic_attributes(bp);
5187 	if (rc)
5188 		goto alloc_mem_err;
5189 	return 0;
5190 
5191 alloc_mem_err:
5192 	bnxt_free_mem(bp, true);
5193 	return rc;
5194 }
5195 
5196 static void bnxt_disable_int(struct bnxt *bp)
5197 {
5198 	int i;
5199 
5200 	if (!bp->bnapi)
5201 		return;
5202 
5203 	for (i = 0; i < bp->cp_nr_rings; i++) {
5204 		struct bnxt_napi *bnapi = bp->bnapi[i];
5205 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5206 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5207 
5208 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
5209 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5210 	}
5211 }
5212 
5213 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5214 {
5215 	struct bnxt_napi *bnapi = bp->bnapi[n];
5216 	struct bnxt_cp_ring_info *cpr;
5217 
5218 	cpr = &bnapi->cp_ring;
5219 	return cpr->cp_ring_struct.map_idx;
5220 }
5221 
5222 static void bnxt_disable_int_sync(struct bnxt *bp)
5223 {
5224 	int i;
5225 
5226 	if (!bp->irq_tbl)
5227 		return;
5228 
5229 	atomic_inc(&bp->intr_sem);
5230 
5231 	bnxt_disable_int(bp);
5232 	for (i = 0; i < bp->cp_nr_rings; i++) {
5233 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5234 
5235 		synchronize_irq(bp->irq_tbl[map_idx].vector);
5236 	}
5237 }
5238 
5239 static void bnxt_enable_int(struct bnxt *bp)
5240 {
5241 	int i;
5242 
5243 	atomic_set(&bp->intr_sem, 0);
5244 	for (i = 0; i < bp->cp_nr_rings; i++) {
5245 		struct bnxt_napi *bnapi = bp->bnapi[i];
5246 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5247 
5248 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5249 	}
5250 }
5251 
5252 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5253 			    bool async_only)
5254 {
5255 	DECLARE_BITMAP(async_events_bmap, 256);
5256 	u32 *events = (u32 *)async_events_bmap;
5257 	struct hwrm_func_drv_rgtr_output *resp;
5258 	struct hwrm_func_drv_rgtr_input *req;
5259 	u32 flags;
5260 	int rc, i;
5261 
5262 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5263 	if (rc)
5264 		return rc;
5265 
5266 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5267 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5268 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5269 
5270 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5271 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5272 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5273 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5274 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5275 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5276 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5277 	req->flags = cpu_to_le32(flags);
5278 	req->ver_maj_8b = DRV_VER_MAJ;
5279 	req->ver_min_8b = DRV_VER_MIN;
5280 	req->ver_upd_8b = DRV_VER_UPD;
5281 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5282 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
5283 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5284 
5285 	if (BNXT_PF(bp)) {
5286 		u32 data[8];
5287 		int i;
5288 
5289 		memset(data, 0, sizeof(data));
5290 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5291 			u16 cmd = bnxt_vf_req_snif[i];
5292 			unsigned int bit, idx;
5293 
5294 			idx = cmd / 32;
5295 			bit = cmd % 32;
5296 			data[idx] |= 1 << bit;
5297 		}
5298 
5299 		for (i = 0; i < 8; i++)
5300 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5301 
5302 		req->enables |=
5303 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5304 	}
5305 
5306 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5307 		req->flags |= cpu_to_le32(
5308 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5309 
5310 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
5311 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5312 		u16 event_id = bnxt_async_events_arr[i];
5313 
5314 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5315 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5316 			continue;
5317 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5318 		    !bp->ptp_cfg)
5319 			continue;
5320 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
5321 	}
5322 	if (bmap && bmap_size) {
5323 		for (i = 0; i < bmap_size; i++) {
5324 			if (test_bit(i, bmap))
5325 				__set_bit(i, async_events_bmap);
5326 		}
5327 	}
5328 	for (i = 0; i < 8; i++)
5329 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5330 
5331 	if (async_only)
5332 		req->enables =
5333 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5334 
5335 	resp = hwrm_req_hold(bp, req);
5336 	rc = hwrm_req_send(bp, req);
5337 	if (!rc) {
5338 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5339 		if (resp->flags &
5340 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5341 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5342 	}
5343 	hwrm_req_drop(bp, req);
5344 	return rc;
5345 }
5346 
5347 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5348 {
5349 	struct hwrm_func_drv_unrgtr_input *req;
5350 	int rc;
5351 
5352 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5353 		return 0;
5354 
5355 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5356 	if (rc)
5357 		return rc;
5358 	return hwrm_req_send(bp, req);
5359 }
5360 
5361 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5362 
5363 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5364 {
5365 	struct hwrm_tunnel_dst_port_free_input *req;
5366 	int rc;
5367 
5368 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5369 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5370 		return 0;
5371 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5372 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5373 		return 0;
5374 
5375 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5376 	if (rc)
5377 		return rc;
5378 
5379 	req->tunnel_type = tunnel_type;
5380 
5381 	switch (tunnel_type) {
5382 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5383 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5384 		bp->vxlan_port = 0;
5385 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5386 		break;
5387 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5388 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5389 		bp->nge_port = 0;
5390 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5391 		break;
5392 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5393 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5394 		bp->vxlan_gpe_port = 0;
5395 		bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5396 		break;
5397 	default:
5398 		break;
5399 	}
5400 
5401 	rc = hwrm_req_send(bp, req);
5402 	if (rc)
5403 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5404 			   rc);
5405 	if (bp->flags & BNXT_FLAG_TPA)
5406 		bnxt_set_tpa(bp, true);
5407 	return rc;
5408 }
5409 
5410 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5411 					   u8 tunnel_type)
5412 {
5413 	struct hwrm_tunnel_dst_port_alloc_output *resp;
5414 	struct hwrm_tunnel_dst_port_alloc_input *req;
5415 	int rc;
5416 
5417 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5418 	if (rc)
5419 		return rc;
5420 
5421 	req->tunnel_type = tunnel_type;
5422 	req->tunnel_dst_port_val = port;
5423 
5424 	resp = hwrm_req_hold(bp, req);
5425 	rc = hwrm_req_send(bp, req);
5426 	if (rc) {
5427 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5428 			   rc);
5429 		goto err_out;
5430 	}
5431 
5432 	switch (tunnel_type) {
5433 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5434 		bp->vxlan_port = port;
5435 		bp->vxlan_fw_dst_port_id =
5436 			le16_to_cpu(resp->tunnel_dst_port_id);
5437 		break;
5438 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5439 		bp->nge_port = port;
5440 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5441 		break;
5442 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5443 		bp->vxlan_gpe_port = port;
5444 		bp->vxlan_gpe_fw_dst_port_id =
5445 			le16_to_cpu(resp->tunnel_dst_port_id);
5446 		break;
5447 	default:
5448 		break;
5449 	}
5450 	if (bp->flags & BNXT_FLAG_TPA)
5451 		bnxt_set_tpa(bp, true);
5452 
5453 err_out:
5454 	hwrm_req_drop(bp, req);
5455 	return rc;
5456 }
5457 
5458 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5459 {
5460 	struct hwrm_cfa_l2_set_rx_mask_input *req;
5461 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5462 	int rc;
5463 
5464 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5465 	if (rc)
5466 		return rc;
5467 
5468 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5469 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5470 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5471 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5472 	}
5473 	req->mask = cpu_to_le32(vnic->rx_mask);
5474 	return hwrm_req_send_silent(bp, req);
5475 }
5476 
5477 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5478 {
5479 	if (!atomic_dec_and_test(&fltr->refcnt))
5480 		return;
5481 	spin_lock_bh(&bp->ntp_fltr_lock);
5482 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5483 		spin_unlock_bh(&bp->ntp_fltr_lock);
5484 		return;
5485 	}
5486 	hlist_del_rcu(&fltr->base.hash);
5487 	bnxt_del_one_usr_fltr(bp, &fltr->base);
5488 	if (fltr->base.flags) {
5489 		clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5490 		bp->ntp_fltr_count--;
5491 	}
5492 	spin_unlock_bh(&bp->ntp_fltr_lock);
5493 	kfree_rcu(fltr, base.rcu);
5494 }
5495 
5496 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5497 						      struct bnxt_l2_key *key,
5498 						      u32 idx)
5499 {
5500 	struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5501 	struct bnxt_l2_filter *fltr;
5502 
5503 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
5504 		struct bnxt_l2_key *l2_key = &fltr->l2_key;
5505 
5506 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5507 		    l2_key->vlan == key->vlan)
5508 			return fltr;
5509 	}
5510 	return NULL;
5511 }
5512 
5513 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5514 						    struct bnxt_l2_key *key,
5515 						    u32 idx)
5516 {
5517 	struct bnxt_l2_filter *fltr = NULL;
5518 
5519 	rcu_read_lock();
5520 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5521 	if (fltr)
5522 		atomic_inc(&fltr->refcnt);
5523 	rcu_read_unlock();
5524 	return fltr;
5525 }
5526 
5527 #define BNXT_IPV4_4TUPLE(bp, fkeys)					\
5528 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5529 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||	\
5530 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5531 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5532 
5533 #define BNXT_IPV6_4TUPLE(bp, fkeys)					\
5534 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5535 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||	\
5536 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5537 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5538 
5539 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5540 {
5541 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5542 		if (BNXT_IPV4_4TUPLE(bp, fkeys))
5543 			return sizeof(fkeys->addrs.v4addrs) +
5544 			       sizeof(fkeys->ports);
5545 
5546 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5547 			return sizeof(fkeys->addrs.v4addrs);
5548 	}
5549 
5550 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5551 		if (BNXT_IPV6_4TUPLE(bp, fkeys))
5552 			return sizeof(fkeys->addrs.v6addrs) +
5553 			       sizeof(fkeys->ports);
5554 
5555 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5556 			return sizeof(fkeys->addrs.v6addrs);
5557 	}
5558 
5559 	return 0;
5560 }
5561 
5562 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5563 			 const unsigned char *key)
5564 {
5565 	u64 prefix = bp->toeplitz_prefix, hash = 0;
5566 	struct bnxt_ipv4_tuple tuple4;
5567 	struct bnxt_ipv6_tuple tuple6;
5568 	int i, j, len = 0;
5569 	u8 *four_tuple;
5570 
5571 	len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5572 	if (!len)
5573 		return 0;
5574 
5575 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5576 		tuple4.v4addrs = fkeys->addrs.v4addrs;
5577 		tuple4.ports = fkeys->ports;
5578 		four_tuple = (unsigned char *)&tuple4;
5579 	} else {
5580 		tuple6.v6addrs = fkeys->addrs.v6addrs;
5581 		tuple6.ports = fkeys->ports;
5582 		four_tuple = (unsigned char *)&tuple6;
5583 	}
5584 
5585 	for (i = 0, j = 8; i < len; i++, j++) {
5586 		u8 byte = four_tuple[i];
5587 		int bit;
5588 
5589 		for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5590 			if (byte & 0x80)
5591 				hash ^= prefix;
5592 		}
5593 		prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5594 	}
5595 
5596 	/* The valid part of the hash is in the upper 32 bits. */
5597 	return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5598 }
5599 
5600 #ifdef CONFIG_RFS_ACCEL
5601 static struct bnxt_l2_filter *
5602 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5603 {
5604 	struct bnxt_l2_filter *fltr;
5605 	u32 idx;
5606 
5607 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5608 	      BNXT_L2_FLTR_HASH_MASK;
5609 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
5610 	return fltr;
5611 }
5612 #endif
5613 
5614 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5615 			       struct bnxt_l2_key *key, u32 idx)
5616 {
5617 	struct hlist_head *head;
5618 
5619 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5620 	fltr->l2_key.vlan = key->vlan;
5621 	fltr->base.type = BNXT_FLTR_TYPE_L2;
5622 	if (fltr->base.flags) {
5623 		int bit_id;
5624 
5625 		bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5626 						 bp->max_fltr, 0);
5627 		if (bit_id < 0)
5628 			return -ENOMEM;
5629 		fltr->base.sw_id = (u16)bit_id;
5630 		bp->ntp_fltr_count++;
5631 	}
5632 	head = &bp->l2_fltr_hash_tbl[idx];
5633 	hlist_add_head_rcu(&fltr->base.hash, head);
5634 	bnxt_insert_usr_fltr(bp, &fltr->base);
5635 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5636 	atomic_set(&fltr->refcnt, 1);
5637 	return 0;
5638 }
5639 
5640 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5641 						   struct bnxt_l2_key *key,
5642 						   gfp_t gfp)
5643 {
5644 	struct bnxt_l2_filter *fltr;
5645 	u32 idx;
5646 	int rc;
5647 
5648 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5649 	      BNXT_L2_FLTR_HASH_MASK;
5650 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
5651 	if (fltr)
5652 		return fltr;
5653 
5654 	fltr = kzalloc(sizeof(*fltr), gfp);
5655 	if (!fltr)
5656 		return ERR_PTR(-ENOMEM);
5657 	spin_lock_bh(&bp->ntp_fltr_lock);
5658 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5659 	spin_unlock_bh(&bp->ntp_fltr_lock);
5660 	if (rc) {
5661 		bnxt_del_l2_filter(bp, fltr);
5662 		fltr = ERR_PTR(rc);
5663 	}
5664 	return fltr;
5665 }
5666 
5667 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5668 						struct bnxt_l2_key *key,
5669 						u16 flags)
5670 {
5671 	struct bnxt_l2_filter *fltr;
5672 	u32 idx;
5673 	int rc;
5674 
5675 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5676 	      BNXT_L2_FLTR_HASH_MASK;
5677 	spin_lock_bh(&bp->ntp_fltr_lock);
5678 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5679 	if (fltr) {
5680 		fltr = ERR_PTR(-EEXIST);
5681 		goto l2_filter_exit;
5682 	}
5683 	fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5684 	if (!fltr) {
5685 		fltr = ERR_PTR(-ENOMEM);
5686 		goto l2_filter_exit;
5687 	}
5688 	fltr->base.flags = flags;
5689 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5690 	if (rc) {
5691 		spin_unlock_bh(&bp->ntp_fltr_lock);
5692 		bnxt_del_l2_filter(bp, fltr);
5693 		return ERR_PTR(rc);
5694 	}
5695 
5696 l2_filter_exit:
5697 	spin_unlock_bh(&bp->ntp_fltr_lock);
5698 	return fltr;
5699 }
5700 
5701 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
5702 {
5703 #ifdef CONFIG_BNXT_SRIOV
5704 	struct bnxt_vf_info *vf = &pf->vf[vf_idx];
5705 
5706 	return vf->fw_fid;
5707 #else
5708 	return INVALID_HW_RING_ID;
5709 #endif
5710 }
5711 
5712 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5713 {
5714 	struct hwrm_cfa_l2_filter_free_input *req;
5715 	u16 target_id = 0xffff;
5716 	int rc;
5717 
5718 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5719 		struct bnxt_pf_info *pf = &bp->pf;
5720 
5721 		if (fltr->base.vf_idx >= pf->active_vfs)
5722 			return -EINVAL;
5723 
5724 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5725 		if (target_id == INVALID_HW_RING_ID)
5726 			return -EINVAL;
5727 	}
5728 
5729 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5730 	if (rc)
5731 		return rc;
5732 
5733 	req->target_id = cpu_to_le16(target_id);
5734 	req->l2_filter_id = fltr->base.filter_id;
5735 	return hwrm_req_send(bp, req);
5736 }
5737 
5738 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5739 {
5740 	struct hwrm_cfa_l2_filter_alloc_output *resp;
5741 	struct hwrm_cfa_l2_filter_alloc_input *req;
5742 	u16 target_id = 0xffff;
5743 	int rc;
5744 
5745 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5746 		struct bnxt_pf_info *pf = &bp->pf;
5747 
5748 		if (fltr->base.vf_idx >= pf->active_vfs)
5749 			return -EINVAL;
5750 
5751 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5752 	}
5753 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5754 	if (rc)
5755 		return rc;
5756 
5757 	req->target_id = cpu_to_le16(target_id);
5758 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5759 
5760 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5761 		req->flags |=
5762 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5763 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
5764 	req->enables =
5765 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5766 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5767 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5768 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
5769 	eth_broadcast_addr(req->l2_addr_mask);
5770 
5771 	if (fltr->l2_key.vlan) {
5772 		req->enables |=
5773 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5774 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5775 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
5776 		req->num_vlans = 1;
5777 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
5778 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
5779 	}
5780 
5781 	resp = hwrm_req_hold(bp, req);
5782 	rc = hwrm_req_send(bp, req);
5783 	if (!rc) {
5784 		fltr->base.filter_id = resp->l2_filter_id;
5785 		set_bit(BNXT_FLTR_VALID, &fltr->base.state);
5786 	}
5787 	hwrm_req_drop(bp, req);
5788 	return rc;
5789 }
5790 
5791 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5792 				     struct bnxt_ntuple_filter *fltr)
5793 {
5794 	struct hwrm_cfa_ntuple_filter_free_input *req;
5795 	int rc;
5796 
5797 	set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
5798 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5799 	if (rc)
5800 		return rc;
5801 
5802 	req->ntuple_filter_id = fltr->base.filter_id;
5803 	return hwrm_req_send(bp, req);
5804 }
5805 
5806 #define BNXT_NTP_FLTR_FLAGS					\
5807 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
5808 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
5809 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
5810 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
5811 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
5812 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
5813 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
5814 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
5815 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
5816 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
5817 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
5818 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
5819 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5820 
5821 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
5822 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5823 
5824 void bnxt_fill_ipv6_mask(__be32 mask[4])
5825 {
5826 	int i;
5827 
5828 	for (i = 0; i < 4; i++)
5829 		mask[i] = cpu_to_be32(~0);
5830 }
5831 
5832 static void
5833 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
5834 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
5835 			  struct bnxt_ntuple_filter *fltr)
5836 {
5837 	struct bnxt_rss_ctx *rss_ctx, *tmp;
5838 	u16 rxq = fltr->base.rxq;
5839 
5840 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
5841 		list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
5842 			if (rss_ctx->index == fltr->base.fw_vnic_id) {
5843 				struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
5844 
5845 				req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5846 				break;
5847 			}
5848 		}
5849 		return;
5850 	}
5851 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
5852 		struct bnxt_vnic_info *vnic;
5853 		u32 enables;
5854 
5855 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
5856 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5857 		enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
5858 		req->enables |= cpu_to_le32(enables);
5859 		req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
5860 	} else {
5861 		u32 flags;
5862 
5863 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5864 		req->flags |= cpu_to_le32(flags);
5865 		req->dst_id = cpu_to_le16(rxq);
5866 	}
5867 }
5868 
5869 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5870 				      struct bnxt_ntuple_filter *fltr)
5871 {
5872 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5873 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
5874 	struct bnxt_flow_masks *masks = &fltr->fmasks;
5875 	struct flow_keys *keys = &fltr->fkeys;
5876 	struct bnxt_l2_filter *l2_fltr;
5877 	struct bnxt_vnic_info *vnic;
5878 	int rc;
5879 
5880 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5881 	if (rc)
5882 		return rc;
5883 
5884 	l2_fltr = fltr->l2_fltr;
5885 	req->l2_filter_id = l2_fltr->base.filter_id;
5886 
5887 	if (fltr->base.flags & BNXT_ACT_DROP) {
5888 		req->flags =
5889 			cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
5890 	} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5891 		bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
5892 	} else {
5893 		vnic = &bp->vnic_info[fltr->base.rxq + 1];
5894 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5895 	}
5896 	req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
5897 
5898 	req->ethertype = htons(ETH_P_IP);
5899 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
5900 	req->ip_protocol = keys->basic.ip_proto;
5901 
5902 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
5903 		req->ethertype = htons(ETH_P_IPV6);
5904 		req->ip_addr_type =
5905 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5906 		*(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
5907 		*(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
5908 		*(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
5909 		*(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
5910 	} else {
5911 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5912 		req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
5913 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5914 		req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
5915 	}
5916 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5917 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5918 		req->tunnel_type =
5919 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5920 	}
5921 
5922 	req->src_port = keys->ports.src;
5923 	req->src_port_mask = masks->ports.src;
5924 	req->dst_port = keys->ports.dst;
5925 	req->dst_port_mask = masks->ports.dst;
5926 
5927 	resp = hwrm_req_hold(bp, req);
5928 	rc = hwrm_req_send(bp, req);
5929 	if (!rc)
5930 		fltr->base.filter_id = resp->ntuple_filter_id;
5931 	hwrm_req_drop(bp, req);
5932 	return rc;
5933 }
5934 
5935 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5936 				     const u8 *mac_addr)
5937 {
5938 	struct bnxt_l2_filter *fltr;
5939 	struct bnxt_l2_key key;
5940 	int rc;
5941 
5942 	ether_addr_copy(key.dst_mac_addr, mac_addr);
5943 	key.vlan = 0;
5944 	fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
5945 	if (IS_ERR(fltr))
5946 		return PTR_ERR(fltr);
5947 
5948 	fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
5949 	rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5950 	if (rc)
5951 		bnxt_del_l2_filter(bp, fltr);
5952 	else
5953 		bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
5954 	return rc;
5955 }
5956 
5957 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5958 {
5959 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5960 
5961 	/* Any associated ntuple filters will also be cleared by firmware. */
5962 	for (i = 0; i < num_of_vnics; i++) {
5963 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5964 
5965 		for (j = 0; j < vnic->uc_filter_count; j++) {
5966 			struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
5967 
5968 			bnxt_hwrm_l2_filter_free(bp, fltr);
5969 			bnxt_del_l2_filter(bp, fltr);
5970 		}
5971 		vnic->uc_filter_count = 0;
5972 	}
5973 }
5974 
5975 #define BNXT_DFLT_TUNL_TPA_BMAP				\
5976 	(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |	\
5977 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |	\
5978 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5979 
5980 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5981 					   struct hwrm_vnic_tpa_cfg_input *req)
5982 {
5983 	u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
5984 
5985 	if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5986 		return;
5987 
5988 	if (bp->vxlan_port)
5989 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
5990 	if (bp->vxlan_gpe_port)
5991 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
5992 	if (bp->nge_port)
5993 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
5994 
5995 	req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
5996 	req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
5997 }
5998 
5999 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6000 			   u32 tpa_flags)
6001 {
6002 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6003 	struct hwrm_vnic_tpa_cfg_input *req;
6004 	int rc;
6005 
6006 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6007 		return 0;
6008 
6009 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6010 	if (rc)
6011 		return rc;
6012 
6013 	if (tpa_flags) {
6014 		u16 mss = bp->dev->mtu - 40;
6015 		u32 nsegs, n, segs = 0, flags;
6016 
6017 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6018 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6019 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6020 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6021 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6022 		if (tpa_flags & BNXT_FLAG_GRO)
6023 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6024 
6025 		req->flags = cpu_to_le32(flags);
6026 
6027 		req->enables =
6028 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6029 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6030 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6031 
6032 		/* Number of segs are log2 units, and first packet is not
6033 		 * included as part of this units.
6034 		 */
6035 		if (mss <= BNXT_RX_PAGE_SIZE) {
6036 			n = BNXT_RX_PAGE_SIZE / mss;
6037 			nsegs = (MAX_SKB_FRAGS - 1) * n;
6038 		} else {
6039 			n = mss / BNXT_RX_PAGE_SIZE;
6040 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
6041 				n++;
6042 			nsegs = (MAX_SKB_FRAGS - n) / n;
6043 		}
6044 
6045 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6046 			segs = MAX_TPA_SEGS_P5;
6047 			max_aggs = bp->max_tpa;
6048 		} else {
6049 			segs = ilog2(nsegs);
6050 		}
6051 		req->max_agg_segs = cpu_to_le16(segs);
6052 		req->max_aggs = cpu_to_le16(max_aggs);
6053 
6054 		req->min_agg_len = cpu_to_le32(512);
6055 		bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6056 	}
6057 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6058 
6059 	return hwrm_req_send(bp, req);
6060 }
6061 
6062 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6063 {
6064 	struct bnxt_ring_grp_info *grp_info;
6065 
6066 	grp_info = &bp->grp_info[ring->grp_idx];
6067 	return grp_info->cp_fw_ring_id;
6068 }
6069 
6070 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6071 {
6072 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6073 		return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6074 	else
6075 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6076 }
6077 
6078 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6079 {
6080 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6081 		return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6082 	else
6083 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6084 }
6085 
6086 int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
6087 {
6088 	int entries;
6089 	u16 *tbl;
6090 
6091 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6092 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6093 	else
6094 		entries = HW_HASH_INDEX_SIZE;
6095 
6096 	bp->rss_indir_tbl_entries = entries;
6097 	tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6098 	if (!tbl)
6099 		return -ENOMEM;
6100 
6101 	if (rss_ctx)
6102 		rss_ctx->rss_indir_tbl = tbl;
6103 	else
6104 		bp->rss_indir_tbl = tbl;
6105 
6106 	return 0;
6107 }
6108 
6109 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
6110 {
6111 	u16 max_rings, max_entries, pad, i;
6112 	u16 *rss_indir_tbl;
6113 
6114 	if (!bp->rx_nr_rings)
6115 		return;
6116 
6117 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6118 		max_rings = bp->rx_nr_rings - 1;
6119 	else
6120 		max_rings = bp->rx_nr_rings;
6121 
6122 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6123 	if (rss_ctx)
6124 		rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
6125 	else
6126 		rss_indir_tbl = &bp->rss_indir_tbl[0];
6127 
6128 	for (i = 0; i < max_entries; i++)
6129 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6130 
6131 	pad = bp->rss_indir_tbl_entries - max_entries;
6132 	if (pad)
6133 		memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
6134 }
6135 
6136 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6137 {
6138 	u16 i, tbl_size, max_ring = 0;
6139 
6140 	if (!bp->rss_indir_tbl)
6141 		return 0;
6142 
6143 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6144 	for (i = 0; i < tbl_size; i++)
6145 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6146 	return max_ring;
6147 }
6148 
6149 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6150 {
6151 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6152 		if (!rx_rings)
6153 			return 0;
6154 		return bnxt_calc_nr_ring_pages(rx_rings - 1,
6155 					       BNXT_RSS_TABLE_ENTRIES_P5);
6156 	}
6157 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6158 		return 2;
6159 	return 1;
6160 }
6161 
6162 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6163 {
6164 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6165 	u16 i, j;
6166 
6167 	/* Fill the RSS indirection table with ring group ids */
6168 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6169 		if (!no_rss)
6170 			j = bp->rss_indir_tbl[i];
6171 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6172 	}
6173 }
6174 
6175 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6176 				    struct bnxt_vnic_info *vnic)
6177 {
6178 	__le16 *ring_tbl = vnic->rss_table;
6179 	struct bnxt_rx_ring_info *rxr;
6180 	u16 tbl_size, i;
6181 
6182 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6183 
6184 	for (i = 0; i < tbl_size; i++) {
6185 		u16 ring_id, j;
6186 
6187 		if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6188 			j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6189 		else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6190 			j = vnic->rss_ctx->rss_indir_tbl[i];
6191 		else
6192 			j = bp->rss_indir_tbl[i];
6193 		rxr = &bp->rx_ring[j];
6194 
6195 		ring_id = rxr->rx_ring_struct.fw_ring_id;
6196 		*ring_tbl++ = cpu_to_le16(ring_id);
6197 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6198 		*ring_tbl++ = cpu_to_le16(ring_id);
6199 	}
6200 }
6201 
6202 static void
6203 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6204 			 struct bnxt_vnic_info *vnic)
6205 {
6206 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6207 		bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6208 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6209 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6210 	} else {
6211 		bnxt_fill_hw_rss_tbl(bp, vnic);
6212 	}
6213 
6214 	if (bp->rss_hash_delta) {
6215 		req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6216 		if (bp->rss_hash_cfg & bp->rss_hash_delta)
6217 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6218 		else
6219 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6220 	} else {
6221 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6222 	}
6223 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6224 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6225 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6226 }
6227 
6228 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6229 				  bool set_rss)
6230 {
6231 	struct hwrm_vnic_rss_cfg_input *req;
6232 	int rc;
6233 
6234 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6235 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6236 		return 0;
6237 
6238 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6239 	if (rc)
6240 		return rc;
6241 
6242 	if (set_rss)
6243 		__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6244 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6245 	return hwrm_req_send(bp, req);
6246 }
6247 
6248 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6249 				     struct bnxt_vnic_info *vnic, bool set_rss)
6250 {
6251 	struct hwrm_vnic_rss_cfg_input *req;
6252 	dma_addr_t ring_tbl_map;
6253 	u32 i, nr_ctxs;
6254 	int rc;
6255 
6256 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6257 	if (rc)
6258 		return rc;
6259 
6260 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6261 	if (!set_rss)
6262 		return hwrm_req_send(bp, req);
6263 
6264 	__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6265 	ring_tbl_map = vnic->rss_table_dma_addr;
6266 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6267 
6268 	hwrm_req_hold(bp, req);
6269 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6270 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6271 		req->ring_table_pair_index = i;
6272 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6273 		rc = hwrm_req_send(bp, req);
6274 		if (rc)
6275 			goto exit;
6276 	}
6277 
6278 exit:
6279 	hwrm_req_drop(bp, req);
6280 	return rc;
6281 }
6282 
6283 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6284 {
6285 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6286 	struct hwrm_vnic_rss_qcfg_output *resp;
6287 	struct hwrm_vnic_rss_qcfg_input *req;
6288 
6289 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6290 		return;
6291 
6292 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6293 	/* all contexts configured to same hash_type, zero always exists */
6294 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6295 	resp = hwrm_req_hold(bp, req);
6296 	if (!hwrm_req_send(bp, req)) {
6297 		bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6298 		bp->rss_hash_delta = 0;
6299 	}
6300 	hwrm_req_drop(bp, req);
6301 }
6302 
6303 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6304 {
6305 	struct hwrm_vnic_plcmodes_cfg_input *req;
6306 	int rc;
6307 
6308 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6309 	if (rc)
6310 		return rc;
6311 
6312 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6313 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6314 
6315 	if (BNXT_RX_PAGE_MODE(bp)) {
6316 		req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6317 	} else {
6318 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6319 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6320 		req->enables |=
6321 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6322 		req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6323 		req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6324 	}
6325 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6326 	return hwrm_req_send(bp, req);
6327 }
6328 
6329 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6330 					struct bnxt_vnic_info *vnic,
6331 					u16 ctx_idx)
6332 {
6333 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6334 
6335 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6336 		return;
6337 
6338 	req->rss_cos_lb_ctx_id =
6339 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6340 
6341 	hwrm_req_send(bp, req);
6342 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6343 }
6344 
6345 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6346 {
6347 	int i, j;
6348 
6349 	for (i = 0; i < bp->nr_vnics; i++) {
6350 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6351 
6352 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6353 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6354 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6355 		}
6356 	}
6357 	bp->rsscos_nr_ctxs = 0;
6358 }
6359 
6360 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6361 				    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6362 {
6363 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6364 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6365 	int rc;
6366 
6367 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6368 	if (rc)
6369 		return rc;
6370 
6371 	resp = hwrm_req_hold(bp, req);
6372 	rc = hwrm_req_send(bp, req);
6373 	if (!rc)
6374 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6375 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
6376 	hwrm_req_drop(bp, req);
6377 
6378 	return rc;
6379 }
6380 
6381 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6382 {
6383 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6384 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6385 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6386 }
6387 
6388 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6389 {
6390 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6391 	struct hwrm_vnic_cfg_input *req;
6392 	unsigned int ring = 0, grp_idx;
6393 	u16 def_vlan = 0;
6394 	int rc;
6395 
6396 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6397 	if (rc)
6398 		return rc;
6399 
6400 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6401 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6402 
6403 		req->default_rx_ring_id =
6404 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6405 		req->default_cmpl_ring_id =
6406 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6407 		req->enables =
6408 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6409 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6410 		goto vnic_mru;
6411 	}
6412 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6413 	/* Only RSS support for now TBD: COS & LB */
6414 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6415 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6416 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6417 					   VNIC_CFG_REQ_ENABLES_MRU);
6418 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6419 		req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6420 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6421 					   VNIC_CFG_REQ_ENABLES_MRU);
6422 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6423 	} else {
6424 		req->rss_rule = cpu_to_le16(0xffff);
6425 	}
6426 
6427 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6428 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6429 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6430 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6431 	} else {
6432 		req->cos_rule = cpu_to_le16(0xffff);
6433 	}
6434 
6435 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6436 		ring = 0;
6437 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6438 		ring = vnic->vnic_id - 1;
6439 	else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6440 		ring = bp->rx_nr_rings - 1;
6441 
6442 	grp_idx = bp->rx_ring[ring].bnapi->index;
6443 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6444 	req->lb_rule = cpu_to_le16(0xffff);
6445 vnic_mru:
6446 	req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
6447 
6448 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6449 #ifdef CONFIG_BNXT_SRIOV
6450 	if (BNXT_VF(bp))
6451 		def_vlan = bp->vf.vlan;
6452 #endif
6453 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6454 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6455 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6456 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6457 
6458 	return hwrm_req_send(bp, req);
6459 }
6460 
6461 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6462 				    struct bnxt_vnic_info *vnic)
6463 {
6464 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6465 		struct hwrm_vnic_free_input *req;
6466 
6467 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6468 			return;
6469 
6470 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6471 
6472 		hwrm_req_send(bp, req);
6473 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
6474 	}
6475 }
6476 
6477 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6478 {
6479 	u16 i;
6480 
6481 	for (i = 0; i < bp->nr_vnics; i++)
6482 		bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6483 }
6484 
6485 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6486 			 unsigned int start_rx_ring_idx,
6487 			 unsigned int nr_rings)
6488 {
6489 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6490 	struct hwrm_vnic_alloc_output *resp;
6491 	struct hwrm_vnic_alloc_input *req;
6492 	int rc;
6493 
6494 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6495 	if (rc)
6496 		return rc;
6497 
6498 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6499 		goto vnic_no_ring_grps;
6500 
6501 	/* map ring groups to this vnic */
6502 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6503 		grp_idx = bp->rx_ring[i].bnapi->index;
6504 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6505 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6506 				   j, nr_rings);
6507 			break;
6508 		}
6509 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6510 	}
6511 
6512 vnic_no_ring_grps:
6513 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6514 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6515 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6516 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6517 
6518 	resp = hwrm_req_hold(bp, req);
6519 	rc = hwrm_req_send(bp, req);
6520 	if (!rc)
6521 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6522 	hwrm_req_drop(bp, req);
6523 	return rc;
6524 }
6525 
6526 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6527 {
6528 	struct hwrm_vnic_qcaps_output *resp;
6529 	struct hwrm_vnic_qcaps_input *req;
6530 	int rc;
6531 
6532 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6533 	bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6534 	bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6535 	if (bp->hwrm_spec_code < 0x10600)
6536 		return 0;
6537 
6538 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6539 	if (rc)
6540 		return rc;
6541 
6542 	resp = hwrm_req_hold(bp, req);
6543 	rc = hwrm_req_send(bp, req);
6544 	if (!rc) {
6545 		u32 flags = le32_to_cpu(resp->flags);
6546 
6547 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6548 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6549 			bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6550 		if (flags &
6551 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6552 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6553 
6554 		/* Older P5 fw before EXT_HW_STATS support did not set
6555 		 * VLAN_STRIP_CAP properly.
6556 		 */
6557 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6558 		    (BNXT_CHIP_P5(bp) &&
6559 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6560 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6561 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6562 			bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6563 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6564 			bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6565 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6566 		if (bp->max_tpa_v2) {
6567 			if (BNXT_CHIP_P5(bp))
6568 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6569 			else
6570 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6571 		}
6572 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6573 			bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6574 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6575 			bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6576 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6577 			bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6578 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6579 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6580 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6581 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6582 	}
6583 	hwrm_req_drop(bp, req);
6584 	return rc;
6585 }
6586 
6587 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6588 {
6589 	struct hwrm_ring_grp_alloc_output *resp;
6590 	struct hwrm_ring_grp_alloc_input *req;
6591 	int rc;
6592 	u16 i;
6593 
6594 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6595 		return 0;
6596 
6597 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6598 	if (rc)
6599 		return rc;
6600 
6601 	resp = hwrm_req_hold(bp, req);
6602 	for (i = 0; i < bp->rx_nr_rings; i++) {
6603 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6604 
6605 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6606 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6607 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6608 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6609 
6610 		rc = hwrm_req_send(bp, req);
6611 
6612 		if (rc)
6613 			break;
6614 
6615 		bp->grp_info[grp_idx].fw_grp_id =
6616 			le32_to_cpu(resp->ring_group_id);
6617 	}
6618 	hwrm_req_drop(bp, req);
6619 	return rc;
6620 }
6621 
6622 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6623 {
6624 	struct hwrm_ring_grp_free_input *req;
6625 	u16 i;
6626 
6627 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6628 		return;
6629 
6630 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6631 		return;
6632 
6633 	hwrm_req_hold(bp, req);
6634 	for (i = 0; i < bp->cp_nr_rings; i++) {
6635 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6636 			continue;
6637 		req->ring_group_id =
6638 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
6639 
6640 		hwrm_req_send(bp, req);
6641 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6642 	}
6643 	hwrm_req_drop(bp, req);
6644 }
6645 
6646 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6647 				    struct bnxt_ring_struct *ring,
6648 				    u32 ring_type, u32 map_index)
6649 {
6650 	struct hwrm_ring_alloc_output *resp;
6651 	struct hwrm_ring_alloc_input *req;
6652 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6653 	struct bnxt_ring_grp_info *grp_info;
6654 	int rc, err = 0;
6655 	u16 ring_id;
6656 
6657 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6658 	if (rc)
6659 		goto exit;
6660 
6661 	req->enables = 0;
6662 	if (rmem->nr_pages > 1) {
6663 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6664 		/* Page size is in log2 units */
6665 		req->page_size = BNXT_PAGE_SHIFT;
6666 		req->page_tbl_depth = 1;
6667 	} else {
6668 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
6669 	}
6670 	req->fbo = 0;
6671 	/* Association of ring index with doorbell index and MSIX number */
6672 	req->logical_id = cpu_to_le16(map_index);
6673 
6674 	switch (ring_type) {
6675 	case HWRM_RING_ALLOC_TX: {
6676 		struct bnxt_tx_ring_info *txr;
6677 
6678 		txr = container_of(ring, struct bnxt_tx_ring_info,
6679 				   tx_ring_struct);
6680 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6681 		/* Association of transmit ring with completion ring */
6682 		grp_info = &bp->grp_info[ring->grp_idx];
6683 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6684 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6685 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6686 		req->queue_id = cpu_to_le16(ring->queue_id);
6687 		if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6688 			req->cmpl_coal_cnt =
6689 				RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6690 		break;
6691 	}
6692 	case HWRM_RING_ALLOC_RX:
6693 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6694 		req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6695 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6696 			u16 flags = 0;
6697 
6698 			/* Association of rx ring with stats context */
6699 			grp_info = &bp->grp_info[ring->grp_idx];
6700 			req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6701 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6702 			req->enables |= cpu_to_le32(
6703 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6704 			if (NET_IP_ALIGN == 2)
6705 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
6706 			req->flags = cpu_to_le16(flags);
6707 		}
6708 		break;
6709 	case HWRM_RING_ALLOC_AGG:
6710 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6711 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
6712 			/* Association of agg ring with rx ring */
6713 			grp_info = &bp->grp_info[ring->grp_idx];
6714 			req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
6715 			req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
6716 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6717 			req->enables |= cpu_to_le32(
6718 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
6719 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6720 		} else {
6721 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6722 		}
6723 		req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6724 		break;
6725 	case HWRM_RING_ALLOC_CMPL:
6726 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
6727 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6728 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6729 			/* Association of cp ring with nq */
6730 			grp_info = &bp->grp_info[map_index];
6731 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
6732 			req->cq_handle = cpu_to_le64(ring->handle);
6733 			req->enables |= cpu_to_le32(
6734 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
6735 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
6736 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6737 		}
6738 		break;
6739 	case HWRM_RING_ALLOC_NQ:
6740 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
6741 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6742 		if (bp->flags & BNXT_FLAG_USING_MSIX)
6743 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6744 		break;
6745 	default:
6746 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6747 			   ring_type);
6748 		return -1;
6749 	}
6750 
6751 	resp = hwrm_req_hold(bp, req);
6752 	rc = hwrm_req_send(bp, req);
6753 	err = le16_to_cpu(resp->error_code);
6754 	ring_id = le16_to_cpu(resp->ring_id);
6755 	hwrm_req_drop(bp, req);
6756 
6757 exit:
6758 	if (rc || err) {
6759 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6760 			   ring_type, rc, err);
6761 		return -EIO;
6762 	}
6763 	ring->fw_ring_id = ring_id;
6764 	return rc;
6765 }
6766 
6767 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6768 {
6769 	int rc;
6770 
6771 	if (BNXT_PF(bp)) {
6772 		struct hwrm_func_cfg_input *req;
6773 
6774 		rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6775 		if (rc)
6776 			return rc;
6777 
6778 		req->fid = cpu_to_le16(0xffff);
6779 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6780 		req->async_event_cr = cpu_to_le16(idx);
6781 		return hwrm_req_send(bp, req);
6782 	} else {
6783 		struct hwrm_func_vf_cfg_input *req;
6784 
6785 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6786 		if (rc)
6787 			return rc;
6788 
6789 		req->enables =
6790 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6791 		req->async_event_cr = cpu_to_le16(idx);
6792 		return hwrm_req_send(bp, req);
6793 	}
6794 }
6795 
6796 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6797 			     u32 ring_type)
6798 {
6799 	switch (ring_type) {
6800 	case HWRM_RING_ALLOC_TX:
6801 		db->db_ring_mask = bp->tx_ring_mask;
6802 		break;
6803 	case HWRM_RING_ALLOC_RX:
6804 		db->db_ring_mask = bp->rx_ring_mask;
6805 		break;
6806 	case HWRM_RING_ALLOC_AGG:
6807 		db->db_ring_mask = bp->rx_agg_ring_mask;
6808 		break;
6809 	case HWRM_RING_ALLOC_CMPL:
6810 	case HWRM_RING_ALLOC_NQ:
6811 		db->db_ring_mask = bp->cp_ring_mask;
6812 		break;
6813 	}
6814 	if (bp->flags & BNXT_FLAG_CHIP_P7) {
6815 		db->db_epoch_mask = db->db_ring_mask + 1;
6816 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
6817 	}
6818 }
6819 
6820 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6821 			u32 map_idx, u32 xid)
6822 {
6823 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6824 		switch (ring_type) {
6825 		case HWRM_RING_ALLOC_TX:
6826 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
6827 			break;
6828 		case HWRM_RING_ALLOC_RX:
6829 		case HWRM_RING_ALLOC_AGG:
6830 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
6831 			break;
6832 		case HWRM_RING_ALLOC_CMPL:
6833 			db->db_key64 = DBR_PATH_L2;
6834 			break;
6835 		case HWRM_RING_ALLOC_NQ:
6836 			db->db_key64 = DBR_PATH_L2;
6837 			break;
6838 		}
6839 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
6840 
6841 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6842 			db->db_key64 |= DBR_VALID;
6843 
6844 		db->doorbell = bp->bar1 + bp->db_offset;
6845 	} else {
6846 		db->doorbell = bp->bar1 + map_idx * 0x80;
6847 		switch (ring_type) {
6848 		case HWRM_RING_ALLOC_TX:
6849 			db->db_key32 = DB_KEY_TX;
6850 			break;
6851 		case HWRM_RING_ALLOC_RX:
6852 		case HWRM_RING_ALLOC_AGG:
6853 			db->db_key32 = DB_KEY_RX;
6854 			break;
6855 		case HWRM_RING_ALLOC_CMPL:
6856 			db->db_key32 = DB_KEY_CP;
6857 			break;
6858 		}
6859 	}
6860 	bnxt_set_db_mask(bp, db, ring_type);
6861 }
6862 
6863 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
6864 {
6865 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
6866 	int i, rc = 0;
6867 	u32 type;
6868 
6869 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6870 		type = HWRM_RING_ALLOC_NQ;
6871 	else
6872 		type = HWRM_RING_ALLOC_CMPL;
6873 	for (i = 0; i < bp->cp_nr_rings; i++) {
6874 		struct bnxt_napi *bnapi = bp->bnapi[i];
6875 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6876 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
6877 		u32 map_idx = ring->map_idx;
6878 		unsigned int vector;
6879 
6880 		vector = bp->irq_tbl[map_idx].vector;
6881 		disable_irq_nosync(vector);
6882 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6883 		if (rc) {
6884 			enable_irq(vector);
6885 			goto err_out;
6886 		}
6887 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6888 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
6889 		enable_irq(vector);
6890 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
6891 
6892 		if (!i) {
6893 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6894 			if (rc)
6895 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
6896 		}
6897 	}
6898 
6899 	type = HWRM_RING_ALLOC_TX;
6900 	for (i = 0; i < bp->tx_nr_rings; i++) {
6901 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6902 		struct bnxt_ring_struct *ring;
6903 		u32 map_idx;
6904 
6905 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6906 			struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
6907 			struct bnxt_napi *bnapi = txr->bnapi;
6908 			u32 type2 = HWRM_RING_ALLOC_CMPL;
6909 
6910 			ring = &cpr2->cp_ring_struct;
6911 			ring->handle = BNXT_SET_NQ_HDL(cpr2);
6912 			map_idx = bnapi->index;
6913 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6914 			if (rc)
6915 				goto err_out;
6916 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6917 				    ring->fw_ring_id);
6918 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6919 		}
6920 		ring = &txr->tx_ring_struct;
6921 		map_idx = i;
6922 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6923 		if (rc)
6924 			goto err_out;
6925 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
6926 	}
6927 
6928 	type = HWRM_RING_ALLOC_RX;
6929 	for (i = 0; i < bp->rx_nr_rings; i++) {
6930 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6931 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6932 		struct bnxt_napi *bnapi = rxr->bnapi;
6933 		u32 map_idx = bnapi->index;
6934 
6935 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6936 		if (rc)
6937 			goto err_out;
6938 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6939 		/* If we have agg rings, post agg buffers first. */
6940 		if (!agg_rings)
6941 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6942 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6943 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6944 			struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
6945 			u32 type2 = HWRM_RING_ALLOC_CMPL;
6946 
6947 			ring = &cpr2->cp_ring_struct;
6948 			ring->handle = BNXT_SET_NQ_HDL(cpr2);
6949 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6950 			if (rc)
6951 				goto err_out;
6952 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6953 				    ring->fw_ring_id);
6954 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6955 		}
6956 	}
6957 
6958 	if (agg_rings) {
6959 		type = HWRM_RING_ALLOC_AGG;
6960 		for (i = 0; i < bp->rx_nr_rings; i++) {
6961 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6962 			struct bnxt_ring_struct *ring =
6963 						&rxr->rx_agg_ring_struct;
6964 			u32 grp_idx = ring->grp_idx;
6965 			u32 map_idx = grp_idx + bp->rx_nr_rings;
6966 
6967 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6968 			if (rc)
6969 				goto err_out;
6970 
6971 			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6972 				    ring->fw_ring_id);
6973 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6974 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6975 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6976 		}
6977 	}
6978 err_out:
6979 	return rc;
6980 }
6981 
6982 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6983 				   struct bnxt_ring_struct *ring,
6984 				   u32 ring_type, int cmpl_ring_id)
6985 {
6986 	struct hwrm_ring_free_output *resp;
6987 	struct hwrm_ring_free_input *req;
6988 	u16 error_code = 0;
6989 	int rc;
6990 
6991 	if (BNXT_NO_FW_ACCESS(bp))
6992 		return 0;
6993 
6994 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6995 	if (rc)
6996 		goto exit;
6997 
6998 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6999 	req->ring_type = ring_type;
7000 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
7001 
7002 	resp = hwrm_req_hold(bp, req);
7003 	rc = hwrm_req_send(bp, req);
7004 	error_code = le16_to_cpu(resp->error_code);
7005 	hwrm_req_drop(bp, req);
7006 exit:
7007 	if (rc || error_code) {
7008 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7009 			   ring_type, rc, error_code);
7010 		return -EIO;
7011 	}
7012 	return 0;
7013 }
7014 
7015 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7016 {
7017 	u32 type;
7018 	int i;
7019 
7020 	if (!bp->bnapi)
7021 		return;
7022 
7023 	for (i = 0; i < bp->tx_nr_rings; i++) {
7024 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7025 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7026 
7027 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7028 			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
7029 
7030 			hwrm_ring_free_send_msg(bp, ring,
7031 						RING_FREE_REQ_RING_TYPE_TX,
7032 						close_path ? cmpl_ring_id :
7033 						INVALID_HW_RING_ID);
7034 			ring->fw_ring_id = INVALID_HW_RING_ID;
7035 		}
7036 	}
7037 
7038 	for (i = 0; i < bp->rx_nr_rings; i++) {
7039 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7040 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7041 		u32 grp_idx = rxr->bnapi->index;
7042 
7043 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7044 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7045 
7046 			hwrm_ring_free_send_msg(bp, ring,
7047 						RING_FREE_REQ_RING_TYPE_RX,
7048 						close_path ? cmpl_ring_id :
7049 						INVALID_HW_RING_ID);
7050 			ring->fw_ring_id = INVALID_HW_RING_ID;
7051 			bp->grp_info[grp_idx].rx_fw_ring_id =
7052 				INVALID_HW_RING_ID;
7053 		}
7054 	}
7055 
7056 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7057 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7058 	else
7059 		type = RING_FREE_REQ_RING_TYPE_RX;
7060 	for (i = 0; i < bp->rx_nr_rings; i++) {
7061 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7062 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7063 		u32 grp_idx = rxr->bnapi->index;
7064 
7065 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7066 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7067 
7068 			hwrm_ring_free_send_msg(bp, ring, type,
7069 						close_path ? cmpl_ring_id :
7070 						INVALID_HW_RING_ID);
7071 			ring->fw_ring_id = INVALID_HW_RING_ID;
7072 			bp->grp_info[grp_idx].agg_fw_ring_id =
7073 				INVALID_HW_RING_ID;
7074 		}
7075 	}
7076 
7077 	/* The completion rings are about to be freed.  After that the
7078 	 * IRQ doorbell will not work anymore.  So we need to disable
7079 	 * IRQ here.
7080 	 */
7081 	bnxt_disable_int_sync(bp);
7082 
7083 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7084 		type = RING_FREE_REQ_RING_TYPE_NQ;
7085 	else
7086 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7087 	for (i = 0; i < bp->cp_nr_rings; i++) {
7088 		struct bnxt_napi *bnapi = bp->bnapi[i];
7089 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7090 		struct bnxt_ring_struct *ring;
7091 		int j;
7092 
7093 		for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7094 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7095 
7096 			ring = &cpr2->cp_ring_struct;
7097 			if (ring->fw_ring_id == INVALID_HW_RING_ID)
7098 				continue;
7099 			hwrm_ring_free_send_msg(bp, ring,
7100 						RING_FREE_REQ_RING_TYPE_L2_CMPL,
7101 						INVALID_HW_RING_ID);
7102 			ring->fw_ring_id = INVALID_HW_RING_ID;
7103 		}
7104 		ring = &cpr->cp_ring_struct;
7105 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7106 			hwrm_ring_free_send_msg(bp, ring, type,
7107 						INVALID_HW_RING_ID);
7108 			ring->fw_ring_id = INVALID_HW_RING_ID;
7109 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7110 		}
7111 	}
7112 }
7113 
7114 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7115 			     bool shared);
7116 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7117 			   bool shared);
7118 
7119 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7120 {
7121 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7122 	struct hwrm_func_qcfg_output *resp;
7123 	struct hwrm_func_qcfg_input *req;
7124 	int rc;
7125 
7126 	if (bp->hwrm_spec_code < 0x10601)
7127 		return 0;
7128 
7129 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7130 	if (rc)
7131 		return rc;
7132 
7133 	req->fid = cpu_to_le16(0xffff);
7134 	resp = hwrm_req_hold(bp, req);
7135 	rc = hwrm_req_send(bp, req);
7136 	if (rc) {
7137 		hwrm_req_drop(bp, req);
7138 		return rc;
7139 	}
7140 
7141 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7142 	if (BNXT_NEW_RM(bp)) {
7143 		u16 cp, stats;
7144 
7145 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7146 		hw_resc->resv_hw_ring_grps =
7147 			le32_to_cpu(resp->alloc_hw_ring_grps);
7148 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7149 		hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7150 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
7151 		stats = le16_to_cpu(resp->alloc_stat_ctx);
7152 		hw_resc->resv_irqs = cp;
7153 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7154 			int rx = hw_resc->resv_rx_rings;
7155 			int tx = hw_resc->resv_tx_rings;
7156 
7157 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
7158 				rx >>= 1;
7159 			if (cp < (rx + tx)) {
7160 				rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7161 				if (rc)
7162 					goto get_rings_exit;
7163 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
7164 					rx <<= 1;
7165 				hw_resc->resv_rx_rings = rx;
7166 				hw_resc->resv_tx_rings = tx;
7167 			}
7168 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7169 			hw_resc->resv_hw_ring_grps = rx;
7170 		}
7171 		hw_resc->resv_cp_rings = cp;
7172 		hw_resc->resv_stat_ctxs = stats;
7173 	}
7174 get_rings_exit:
7175 	hwrm_req_drop(bp, req);
7176 	return rc;
7177 }
7178 
7179 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7180 {
7181 	struct hwrm_func_qcfg_output *resp;
7182 	struct hwrm_func_qcfg_input *req;
7183 	int rc;
7184 
7185 	if (bp->hwrm_spec_code < 0x10601)
7186 		return 0;
7187 
7188 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7189 	if (rc)
7190 		return rc;
7191 
7192 	req->fid = cpu_to_le16(fid);
7193 	resp = hwrm_req_hold(bp, req);
7194 	rc = hwrm_req_send(bp, req);
7195 	if (!rc)
7196 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7197 
7198 	hwrm_req_drop(bp, req);
7199 	return rc;
7200 }
7201 
7202 static bool bnxt_rfs_supported(struct bnxt *bp);
7203 
7204 static struct hwrm_func_cfg_input *
7205 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7206 {
7207 	struct hwrm_func_cfg_input *req;
7208 	u32 enables = 0;
7209 
7210 	if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7211 		return NULL;
7212 
7213 	req->fid = cpu_to_le16(0xffff);
7214 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7215 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7216 	if (BNXT_NEW_RM(bp)) {
7217 		enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7218 		enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7219 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7220 			enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7221 			enables |= hwr->cp_p5 ?
7222 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7223 		} else {
7224 			enables |= hwr->cp ?
7225 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7226 			enables |= hwr->grp ?
7227 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7228 		}
7229 		enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7230 		enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7231 					  0;
7232 		req->num_rx_rings = cpu_to_le16(hwr->rx);
7233 		req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7234 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7235 			req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7236 			req->num_msix = cpu_to_le16(hwr->cp);
7237 		} else {
7238 			req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7239 			req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7240 		}
7241 		req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7242 		req->num_vnics = cpu_to_le16(hwr->vnic);
7243 	}
7244 	req->enables = cpu_to_le32(enables);
7245 	return req;
7246 }
7247 
7248 static struct hwrm_func_vf_cfg_input *
7249 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7250 {
7251 	struct hwrm_func_vf_cfg_input *req;
7252 	u32 enables = 0;
7253 
7254 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7255 		return NULL;
7256 
7257 	enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7258 	enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7259 			     FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7260 	enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7261 	enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7262 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7263 		enables |= hwr->cp_p5 ?
7264 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7265 	} else {
7266 		enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7267 		enables |= hwr->grp ?
7268 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7269 	}
7270 	enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7271 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7272 
7273 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7274 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7275 	req->num_rx_rings = cpu_to_le16(hwr->rx);
7276 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7277 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7278 		req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7279 	} else {
7280 		req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7281 		req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7282 	}
7283 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7284 	req->num_vnics = cpu_to_le16(hwr->vnic);
7285 
7286 	req->enables = cpu_to_le32(enables);
7287 	return req;
7288 }
7289 
7290 static int
7291 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7292 {
7293 	struct hwrm_func_cfg_input *req;
7294 	int rc;
7295 
7296 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7297 	if (!req)
7298 		return -ENOMEM;
7299 
7300 	if (!req->enables) {
7301 		hwrm_req_drop(bp, req);
7302 		return 0;
7303 	}
7304 
7305 	rc = hwrm_req_send(bp, req);
7306 	if (rc)
7307 		return rc;
7308 
7309 	if (bp->hwrm_spec_code < 0x10601)
7310 		bp->hw_resc.resv_tx_rings = hwr->tx;
7311 
7312 	return bnxt_hwrm_get_rings(bp);
7313 }
7314 
7315 static int
7316 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7317 {
7318 	struct hwrm_func_vf_cfg_input *req;
7319 	int rc;
7320 
7321 	if (!BNXT_NEW_RM(bp)) {
7322 		bp->hw_resc.resv_tx_rings = hwr->tx;
7323 		return 0;
7324 	}
7325 
7326 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7327 	if (!req)
7328 		return -ENOMEM;
7329 
7330 	rc = hwrm_req_send(bp, req);
7331 	if (rc)
7332 		return rc;
7333 
7334 	return bnxt_hwrm_get_rings(bp);
7335 }
7336 
7337 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7338 {
7339 	if (BNXT_PF(bp))
7340 		return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7341 	else
7342 		return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7343 }
7344 
7345 int bnxt_nq_rings_in_use(struct bnxt *bp)
7346 {
7347 	return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7348 }
7349 
7350 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7351 {
7352 	int cp;
7353 
7354 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7355 		return bnxt_nq_rings_in_use(bp);
7356 
7357 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
7358 	return cp;
7359 }
7360 
7361 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7362 {
7363 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7364 }
7365 
7366 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7367 {
7368 	if (!hwr->grp)
7369 		return 0;
7370 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7371 		int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7372 
7373 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7374 			rss_ctx *= hwr->vnic;
7375 		return rss_ctx;
7376 	}
7377 	if (BNXT_VF(bp))
7378 		return BNXT_VF_MAX_RSS_CTX;
7379 	if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7380 		return hwr->grp + 1;
7381 	return 1;
7382 }
7383 
7384 /* Check if a default RSS map needs to be setup.  This function is only
7385  * used on older firmware that does not require reserving RX rings.
7386  */
7387 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7388 {
7389 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7390 
7391 	/* The RSS map is valid for RX rings set to resv_rx_rings */
7392 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7393 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
7394 		if (!netif_is_rxfh_configured(bp->dev))
7395 			bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7396 	}
7397 }
7398 
7399 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7400 {
7401 	if (bp->flags & BNXT_FLAG_RFS) {
7402 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7403 			return 2 + bp->num_rss_ctx;
7404 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7405 			return rx_rings + 1;
7406 	}
7407 	return 1;
7408 }
7409 
7410 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7411 {
7412 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7413 	int cp = bnxt_cp_rings_in_use(bp);
7414 	int nq = bnxt_nq_rings_in_use(bp);
7415 	int rx = bp->rx_nr_rings, stat;
7416 	int vnic, grp = rx;
7417 
7418 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7419 	    bp->hwrm_spec_code >= 0x10601)
7420 		return true;
7421 
7422 	/* Old firmware does not need RX ring reservations but we still
7423 	 * need to setup a default RSS map when needed.  With new firmware
7424 	 * we go through RX ring reservations first and then set up the
7425 	 * RSS map for the successfully reserved RX rings when needed.
7426 	 */
7427 	if (!BNXT_NEW_RM(bp)) {
7428 		bnxt_check_rss_tbl_no_rmgr(bp);
7429 		return false;
7430 	}
7431 
7432 	vnic = bnxt_get_total_vnics(bp, rx);
7433 
7434 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7435 		rx <<= 1;
7436 	stat = bnxt_get_func_stat_ctxs(bp);
7437 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7438 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7439 	    (hw_resc->resv_hw_ring_grps != grp &&
7440 	     !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7441 		return true;
7442 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7443 	    hw_resc->resv_irqs != nq)
7444 		return true;
7445 	return false;
7446 }
7447 
7448 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7449 {
7450 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7451 
7452 	hwr->tx = hw_resc->resv_tx_rings;
7453 	if (BNXT_NEW_RM(bp)) {
7454 		hwr->rx = hw_resc->resv_rx_rings;
7455 		hwr->cp = hw_resc->resv_irqs;
7456 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7457 			hwr->cp_p5 = hw_resc->resv_cp_rings;
7458 		hwr->grp = hw_resc->resv_hw_ring_grps;
7459 		hwr->vnic = hw_resc->resv_vnics;
7460 		hwr->stat = hw_resc->resv_stat_ctxs;
7461 		hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7462 	}
7463 }
7464 
7465 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7466 {
7467 	return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7468 	       hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7469 }
7470 
7471 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7472 
7473 static int __bnxt_reserve_rings(struct bnxt *bp)
7474 {
7475 	struct bnxt_hw_rings hwr = {0};
7476 	int cp = bp->cp_nr_rings;
7477 	int rx_rings, rc;
7478 	int ulp_msix = 0;
7479 	bool sh = false;
7480 	int tx_cp;
7481 
7482 	if (!bnxt_need_reserve_rings(bp))
7483 		return 0;
7484 
7485 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7486 		ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7487 		if (!ulp_msix)
7488 			bnxt_set_ulp_stat_ctxs(bp, 0);
7489 
7490 		if (ulp_msix > bp->ulp_num_msix_want)
7491 			ulp_msix = bp->ulp_num_msix_want;
7492 		hwr.cp = cp + ulp_msix;
7493 	} else {
7494 		hwr.cp = bnxt_nq_rings_in_use(bp);
7495 	}
7496 
7497 	hwr.tx = bp->tx_nr_rings;
7498 	hwr.rx = bp->rx_nr_rings;
7499 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7500 		sh = true;
7501 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7502 		hwr.cp_p5 = hwr.rx + hwr.tx;
7503 
7504 	hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7505 
7506 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7507 		hwr.rx <<= 1;
7508 	hwr.grp = bp->rx_nr_rings;
7509 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7510 	hwr.stat = bnxt_get_func_stat_ctxs(bp);
7511 
7512 	rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7513 	if (rc)
7514 		return rc;
7515 
7516 	bnxt_copy_reserved_rings(bp, &hwr);
7517 
7518 	rx_rings = hwr.rx;
7519 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7520 		if (hwr.rx >= 2) {
7521 			rx_rings = hwr.rx >> 1;
7522 		} else {
7523 			if (netif_running(bp->dev))
7524 				return -ENOMEM;
7525 
7526 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7527 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7528 			bp->dev->hw_features &= ~NETIF_F_LRO;
7529 			bp->dev->features &= ~NETIF_F_LRO;
7530 			bnxt_set_ring_params(bp);
7531 		}
7532 	}
7533 	rx_rings = min_t(int, rx_rings, hwr.grp);
7534 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7535 	if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7536 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7537 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
7538 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7539 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7540 		hwr.rx = rx_rings << 1;
7541 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7542 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7543 	bp->tx_nr_rings = hwr.tx;
7544 
7545 	/* If we cannot reserve all the RX rings, reset the RSS map only
7546 	 * if absolutely necessary
7547 	 */
7548 	if (rx_rings != bp->rx_nr_rings) {
7549 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7550 			    rx_rings, bp->rx_nr_rings);
7551 		if (netif_is_rxfh_configured(bp->dev) &&
7552 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7553 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7554 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7555 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7556 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7557 		}
7558 	}
7559 	bp->rx_nr_rings = rx_rings;
7560 	bp->cp_nr_rings = hwr.cp;
7561 
7562 	if (!bnxt_rings_ok(bp, &hwr))
7563 		return -ENOMEM;
7564 
7565 	if (!netif_is_rxfh_configured(bp->dev))
7566 		bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7567 
7568 	if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
7569 		int resv_msix, resv_ctx, ulp_ctxs;
7570 		struct bnxt_hw_resc *hw_resc;
7571 
7572 		hw_resc = &bp->hw_resc;
7573 		resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
7574 		ulp_msix = min_t(int, resv_msix, ulp_msix);
7575 		bnxt_set_ulp_msix_num(bp, ulp_msix);
7576 		resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
7577 		ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
7578 		bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
7579 	}
7580 
7581 	return rc;
7582 }
7583 
7584 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7585 {
7586 	struct hwrm_func_vf_cfg_input *req;
7587 	u32 flags;
7588 
7589 	if (!BNXT_NEW_RM(bp))
7590 		return 0;
7591 
7592 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7593 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7594 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7595 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7596 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7597 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7598 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7599 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7600 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7601 
7602 	req->flags = cpu_to_le32(flags);
7603 	return hwrm_req_send_silent(bp, req);
7604 }
7605 
7606 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7607 {
7608 	struct hwrm_func_cfg_input *req;
7609 	u32 flags;
7610 
7611 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7612 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7613 	if (BNXT_NEW_RM(bp)) {
7614 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7615 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7616 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7617 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7618 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7619 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7620 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7621 		else
7622 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7623 	}
7624 
7625 	req->flags = cpu_to_le32(flags);
7626 	return hwrm_req_send_silent(bp, req);
7627 }
7628 
7629 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7630 {
7631 	if (bp->hwrm_spec_code < 0x10801)
7632 		return 0;
7633 
7634 	if (BNXT_PF(bp))
7635 		return bnxt_hwrm_check_pf_rings(bp, hwr);
7636 
7637 	return bnxt_hwrm_check_vf_rings(bp, hwr);
7638 }
7639 
7640 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7641 {
7642 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7643 	struct hwrm_ring_aggint_qcaps_output *resp;
7644 	struct hwrm_ring_aggint_qcaps_input *req;
7645 	int rc;
7646 
7647 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
7648 	coal_cap->num_cmpl_dma_aggr_max = 63;
7649 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
7650 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
7651 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
7652 	coal_cap->int_lat_tmr_min_max = 65535;
7653 	coal_cap->int_lat_tmr_max_max = 65535;
7654 	coal_cap->num_cmpl_aggr_int_max = 65535;
7655 	coal_cap->timer_units = 80;
7656 
7657 	if (bp->hwrm_spec_code < 0x10902)
7658 		return;
7659 
7660 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7661 		return;
7662 
7663 	resp = hwrm_req_hold(bp, req);
7664 	rc = hwrm_req_send_silent(bp, req);
7665 	if (!rc) {
7666 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
7667 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
7668 		coal_cap->num_cmpl_dma_aggr_max =
7669 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
7670 		coal_cap->num_cmpl_dma_aggr_during_int_max =
7671 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
7672 		coal_cap->cmpl_aggr_dma_tmr_max =
7673 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
7674 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
7675 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
7676 		coal_cap->int_lat_tmr_min_max =
7677 			le16_to_cpu(resp->int_lat_tmr_min_max);
7678 		coal_cap->int_lat_tmr_max_max =
7679 			le16_to_cpu(resp->int_lat_tmr_max_max);
7680 		coal_cap->num_cmpl_aggr_int_max =
7681 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
7682 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
7683 	}
7684 	hwrm_req_drop(bp, req);
7685 }
7686 
7687 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7688 {
7689 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7690 
7691 	return usec * 1000 / coal_cap->timer_units;
7692 }
7693 
7694 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7695 	struct bnxt_coal *hw_coal,
7696 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7697 {
7698 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7699 	u16 val, tmr, max, flags = hw_coal->flags;
7700 	u32 cmpl_params = coal_cap->cmpl_params;
7701 
7702 	max = hw_coal->bufs_per_record * 128;
7703 	if (hw_coal->budget)
7704 		max = hw_coal->bufs_per_record * hw_coal->budget;
7705 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
7706 
7707 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
7708 	req->num_cmpl_aggr_int = cpu_to_le16(val);
7709 
7710 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
7711 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
7712 
7713 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
7714 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
7715 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
7716 
7717 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7718 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
7719 	req->int_lat_tmr_max = cpu_to_le16(tmr);
7720 
7721 	/* min timer set to 1/2 of interrupt timer */
7722 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
7723 		val = tmr / 2;
7724 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
7725 		req->int_lat_tmr_min = cpu_to_le16(val);
7726 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7727 	}
7728 
7729 	/* buf timer set to 1/4 of interrupt timer */
7730 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
7731 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
7732 
7733 	if (cmpl_params &
7734 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
7735 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7736 		val = clamp_t(u16, tmr, 1,
7737 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
7738 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
7739 		req->enables |=
7740 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
7741 	}
7742 
7743 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
7744 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
7745 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
7746 	req->flags = cpu_to_le16(flags);
7747 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
7748 }
7749 
7750 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7751 				   struct bnxt_coal *hw_coal)
7752 {
7753 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
7754 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7755 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7756 	u32 nq_params = coal_cap->nq_params;
7757 	u16 tmr;
7758 	int rc;
7759 
7760 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
7761 		return 0;
7762 
7763 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7764 	if (rc)
7765 		return rc;
7766 
7767 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7768 	req->flags =
7769 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
7770 
7771 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7772 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
7773 	req->int_lat_tmr_min = cpu_to_le16(tmr);
7774 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7775 	return hwrm_req_send(bp, req);
7776 }
7777 
7778 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7779 {
7780 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
7781 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7782 	struct bnxt_coal coal;
7783 	int rc;
7784 
7785 	/* Tick values in micro seconds.
7786 	 * 1 coal_buf x bufs_per_record = 1 completion record.
7787 	 */
7788 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7789 
7790 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7791 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7792 
7793 	if (!bnapi->rx_ring)
7794 		return -ENODEV;
7795 
7796 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7797 	if (rc)
7798 		return rc;
7799 
7800 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7801 
7802 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7803 
7804 	return hwrm_req_send(bp, req_rx);
7805 }
7806 
7807 static int
7808 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7809 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7810 {
7811 	u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7812 
7813 	req->ring_id = cpu_to_le16(ring_id);
7814 	return hwrm_req_send(bp, req);
7815 }
7816 
7817 static int
7818 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7819 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7820 {
7821 	struct bnxt_tx_ring_info *txr;
7822 	int i, rc;
7823 
7824 	bnxt_for_each_napi_tx(i, bnapi, txr) {
7825 		u16 ring_id;
7826 
7827 		ring_id = bnxt_cp_ring_for_tx(bp, txr);
7828 		req->ring_id = cpu_to_le16(ring_id);
7829 		rc = hwrm_req_send(bp, req);
7830 		if (rc)
7831 			return rc;
7832 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7833 			return 0;
7834 	}
7835 	return 0;
7836 }
7837 
7838 int bnxt_hwrm_set_coal(struct bnxt *bp)
7839 {
7840 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
7841 	int i, rc;
7842 
7843 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7844 	if (rc)
7845 		return rc;
7846 
7847 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7848 	if (rc) {
7849 		hwrm_req_drop(bp, req_rx);
7850 		return rc;
7851 	}
7852 
7853 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
7854 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
7855 
7856 	hwrm_req_hold(bp, req_rx);
7857 	hwrm_req_hold(bp, req_tx);
7858 	for (i = 0; i < bp->cp_nr_rings; i++) {
7859 		struct bnxt_napi *bnapi = bp->bnapi[i];
7860 		struct bnxt_coal *hw_coal;
7861 
7862 		if (!bnapi->rx_ring)
7863 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7864 		else
7865 			rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7866 		if (rc)
7867 			break;
7868 
7869 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7870 			continue;
7871 
7872 		if (bnapi->rx_ring && bnapi->tx_ring[0]) {
7873 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7874 			if (rc)
7875 				break;
7876 		}
7877 		if (bnapi->rx_ring)
7878 			hw_coal = &bp->rx_coal;
7879 		else
7880 			hw_coal = &bp->tx_coal;
7881 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
7882 	}
7883 	hwrm_req_drop(bp, req_rx);
7884 	hwrm_req_drop(bp, req_tx);
7885 	return rc;
7886 }
7887 
7888 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
7889 {
7890 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
7891 	struct hwrm_stat_ctx_free_input *req;
7892 	int i;
7893 
7894 	if (!bp->bnapi)
7895 		return;
7896 
7897 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7898 		return;
7899 
7900 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
7901 		return;
7902 	if (BNXT_FW_MAJ(bp) <= 20) {
7903 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
7904 			hwrm_req_drop(bp, req);
7905 			return;
7906 		}
7907 		hwrm_req_hold(bp, req0);
7908 	}
7909 	hwrm_req_hold(bp, req);
7910 	for (i = 0; i < bp->cp_nr_rings; i++) {
7911 		struct bnxt_napi *bnapi = bp->bnapi[i];
7912 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7913 
7914 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
7915 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
7916 			if (req0) {
7917 				req0->stat_ctx_id = req->stat_ctx_id;
7918 				hwrm_req_send(bp, req0);
7919 			}
7920 			hwrm_req_send(bp, req);
7921 
7922 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
7923 		}
7924 	}
7925 	hwrm_req_drop(bp, req);
7926 	if (req0)
7927 		hwrm_req_drop(bp, req0);
7928 }
7929 
7930 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
7931 {
7932 	struct hwrm_stat_ctx_alloc_output *resp;
7933 	struct hwrm_stat_ctx_alloc_input *req;
7934 	int rc, i;
7935 
7936 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7937 		return 0;
7938 
7939 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7940 	if (rc)
7941 		return rc;
7942 
7943 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
7944 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
7945 
7946 	resp = hwrm_req_hold(bp, req);
7947 	for (i = 0; i < bp->cp_nr_rings; i++) {
7948 		struct bnxt_napi *bnapi = bp->bnapi[i];
7949 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7950 
7951 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
7952 
7953 		rc = hwrm_req_send(bp, req);
7954 		if (rc)
7955 			break;
7956 
7957 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
7958 
7959 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
7960 	}
7961 	hwrm_req_drop(bp, req);
7962 	return rc;
7963 }
7964 
7965 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
7966 {
7967 	struct hwrm_func_qcfg_output *resp;
7968 	struct hwrm_func_qcfg_input *req;
7969 	u16 flags;
7970 	int rc;
7971 
7972 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7973 	if (rc)
7974 		return rc;
7975 
7976 	req->fid = cpu_to_le16(0xffff);
7977 	resp = hwrm_req_hold(bp, req);
7978 	rc = hwrm_req_send(bp, req);
7979 	if (rc)
7980 		goto func_qcfg_exit;
7981 
7982 #ifdef CONFIG_BNXT_SRIOV
7983 	if (BNXT_VF(bp)) {
7984 		struct bnxt_vf_info *vf = &bp->vf;
7985 
7986 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
7987 	} else {
7988 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
7989 	}
7990 #endif
7991 	flags = le16_to_cpu(resp->flags);
7992 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
7993 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
7994 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
7995 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
7996 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
7997 	}
7998 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
7999 		bp->flags |= BNXT_FLAG_MULTI_HOST;
8000 
8001 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8002 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8003 
8004 	switch (resp->port_partition_type) {
8005 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8006 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8007 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8008 		bp->port_partition_type = resp->port_partition_type;
8009 		break;
8010 	}
8011 	if (bp->hwrm_spec_code < 0x10707 ||
8012 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8013 		bp->br_mode = BRIDGE_MODE_VEB;
8014 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8015 		bp->br_mode = BRIDGE_MODE_VEPA;
8016 	else
8017 		bp->br_mode = BRIDGE_MODE_UNDEF;
8018 
8019 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8020 	if (!bp->max_mtu)
8021 		bp->max_mtu = BNXT_MAX_MTU;
8022 
8023 	if (bp->db_size)
8024 		goto func_qcfg_exit;
8025 
8026 	bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8027 	if (BNXT_CHIP_P5(bp)) {
8028 		if (BNXT_PF(bp))
8029 			bp->db_offset = DB_PF_OFFSET_P5;
8030 		else
8031 			bp->db_offset = DB_VF_OFFSET_P5;
8032 	}
8033 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8034 				 1024);
8035 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8036 	    bp->db_size <= bp->db_offset)
8037 		bp->db_size = pci_resource_len(bp->pdev, 2);
8038 
8039 func_qcfg_exit:
8040 	hwrm_req_drop(bp, req);
8041 	return rc;
8042 }
8043 
8044 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8045 				      u8 init_val, u8 init_offset,
8046 				      bool init_mask_set)
8047 {
8048 	ctxm->init_value = init_val;
8049 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8050 	if (init_mask_set)
8051 		ctxm->init_offset = init_offset * 4;
8052 	else
8053 		ctxm->init_value = 0;
8054 }
8055 
8056 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8057 {
8058 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8059 	u16 type;
8060 
8061 	for (type = 0; type < ctx_max; type++) {
8062 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8063 		int n = 1;
8064 
8065 		if (!ctxm->max_entries)
8066 			continue;
8067 
8068 		if (ctxm->instance_bmap)
8069 			n = hweight32(ctxm->instance_bmap);
8070 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8071 		if (!ctxm->pg_info)
8072 			return -ENOMEM;
8073 	}
8074 	return 0;
8075 }
8076 
8077 #define BNXT_CTX_INIT_VALID(flags)	\
8078 	(!!((flags) &			\
8079 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8080 
8081 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8082 {
8083 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
8084 	struct hwrm_func_backing_store_qcaps_v2_input *req;
8085 	struct bnxt_ctx_mem_info *ctx;
8086 	u16 type;
8087 	int rc;
8088 
8089 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8090 	if (rc)
8091 		return rc;
8092 
8093 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8094 	if (!ctx)
8095 		return -ENOMEM;
8096 	bp->ctx = ctx;
8097 
8098 	resp = hwrm_req_hold(bp, req);
8099 
8100 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8101 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8102 		u8 init_val, init_off, i;
8103 		__le32 *p;
8104 		u32 flags;
8105 
8106 		req->type = cpu_to_le16(type);
8107 		rc = hwrm_req_send(bp, req);
8108 		if (rc)
8109 			goto ctx_done;
8110 		flags = le32_to_cpu(resp->flags);
8111 		type = le16_to_cpu(resp->next_valid_type);
8112 		if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
8113 			continue;
8114 
8115 		ctxm->type = le16_to_cpu(resp->type);
8116 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
8117 		ctxm->flags = flags;
8118 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8119 		ctxm->entry_multiple = resp->entry_multiple;
8120 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
8121 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8122 		init_val = resp->ctx_init_value;
8123 		init_off = resp->ctx_init_offset;
8124 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8125 					  BNXT_CTX_INIT_VALID(flags));
8126 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8127 					      BNXT_MAX_SPLIT_ENTRY);
8128 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8129 		     i++, p++)
8130 			ctxm->split[i] = le32_to_cpu(*p);
8131 	}
8132 	rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8133 
8134 ctx_done:
8135 	hwrm_req_drop(bp, req);
8136 	return rc;
8137 }
8138 
8139 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8140 {
8141 	struct hwrm_func_backing_store_qcaps_output *resp;
8142 	struct hwrm_func_backing_store_qcaps_input *req;
8143 	int rc;
8144 
8145 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
8146 		return 0;
8147 
8148 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8149 		return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8150 
8151 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8152 	if (rc)
8153 		return rc;
8154 
8155 	resp = hwrm_req_hold(bp, req);
8156 	rc = hwrm_req_send_silent(bp, req);
8157 	if (!rc) {
8158 		struct bnxt_ctx_mem_type *ctxm;
8159 		struct bnxt_ctx_mem_info *ctx;
8160 		u8 init_val, init_idx = 0;
8161 		u16 init_mask;
8162 
8163 		ctx = bp->ctx;
8164 		if (!ctx) {
8165 			ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8166 			if (!ctx) {
8167 				rc = -ENOMEM;
8168 				goto ctx_err;
8169 			}
8170 			bp->ctx = ctx;
8171 		}
8172 		init_val = resp->ctx_kind_initializer;
8173 		init_mask = le16_to_cpu(resp->ctx_init_mask);
8174 
8175 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8176 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8177 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8178 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8179 		ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8180 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8181 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8182 					  (init_mask & (1 << init_idx++)) != 0);
8183 
8184 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8185 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8186 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8187 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8188 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8189 					  (init_mask & (1 << init_idx++)) != 0);
8190 
8191 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8192 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8193 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8194 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8195 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8196 					  (init_mask & (1 << init_idx++)) != 0);
8197 
8198 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8199 		ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8200 		ctxm->max_entries = ctxm->vnic_entries +
8201 			le16_to_cpu(resp->vnic_max_ring_table_entries);
8202 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8203 		bnxt_init_ctx_initializer(ctxm, init_val,
8204 					  resp->vnic_init_offset,
8205 					  (init_mask & (1 << init_idx++)) != 0);
8206 
8207 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8208 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8209 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8210 		bnxt_init_ctx_initializer(ctxm, init_val,
8211 					  resp->stat_init_offset,
8212 					  (init_mask & (1 << init_idx++)) != 0);
8213 
8214 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8215 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8216 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8217 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8218 		ctxm->entry_multiple = resp->tqm_entries_multiple;
8219 		if (!ctxm->entry_multiple)
8220 			ctxm->entry_multiple = 1;
8221 
8222 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8223 
8224 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8225 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8226 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8227 		ctxm->mrav_num_entries_units =
8228 			le16_to_cpu(resp->mrav_num_entries_units);
8229 		bnxt_init_ctx_initializer(ctxm, init_val,
8230 					  resp->mrav_init_offset,
8231 					  (init_mask & (1 << init_idx++)) != 0);
8232 
8233 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8234 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8235 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8236 
8237 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8238 		if (!ctx->tqm_fp_rings_count)
8239 			ctx->tqm_fp_rings_count = bp->max_q;
8240 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8241 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8242 
8243 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8244 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8245 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8246 
8247 		rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8248 	} else {
8249 		rc = 0;
8250 	}
8251 ctx_err:
8252 	hwrm_req_drop(bp, req);
8253 	return rc;
8254 }
8255 
8256 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8257 				  __le64 *pg_dir)
8258 {
8259 	if (!rmem->nr_pages)
8260 		return;
8261 
8262 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8263 	if (rmem->depth >= 1) {
8264 		if (rmem->depth == 2)
8265 			*pg_attr |= 2;
8266 		else
8267 			*pg_attr |= 1;
8268 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8269 	} else {
8270 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8271 	}
8272 }
8273 
8274 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
8275 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
8276 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
8277 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
8278 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
8279 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8280 
8281 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8282 {
8283 	struct hwrm_func_backing_store_cfg_input *req;
8284 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8285 	struct bnxt_ctx_pg_info *ctx_pg;
8286 	struct bnxt_ctx_mem_type *ctxm;
8287 	void **__req = (void **)&req;
8288 	u32 req_len = sizeof(*req);
8289 	__le32 *num_entries;
8290 	__le64 *pg_dir;
8291 	u32 flags = 0;
8292 	u8 *pg_attr;
8293 	u32 ena;
8294 	int rc;
8295 	int i;
8296 
8297 	if (!ctx)
8298 		return 0;
8299 
8300 	if (req_len > bp->hwrm_max_ext_req_len)
8301 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8302 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8303 	if (rc)
8304 		return rc;
8305 
8306 	req->enables = cpu_to_le32(enables);
8307 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8308 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8309 		ctx_pg = ctxm->pg_info;
8310 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8311 		req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8312 		req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8313 		req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8314 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8315 				      &req->qpc_pg_size_qpc_lvl,
8316 				      &req->qpc_page_dir);
8317 
8318 		if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8319 			req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8320 	}
8321 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8322 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8323 		ctx_pg = ctxm->pg_info;
8324 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8325 		req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8326 		req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8327 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8328 				      &req->srq_pg_size_srq_lvl,
8329 				      &req->srq_page_dir);
8330 	}
8331 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8332 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8333 		ctx_pg = ctxm->pg_info;
8334 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8335 		req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8336 		req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8337 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8338 				      &req->cq_pg_size_cq_lvl,
8339 				      &req->cq_page_dir);
8340 	}
8341 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8342 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8343 		ctx_pg = ctxm->pg_info;
8344 		req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8345 		req->vnic_num_ring_table_entries =
8346 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8347 		req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8348 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8349 				      &req->vnic_pg_size_vnic_lvl,
8350 				      &req->vnic_page_dir);
8351 	}
8352 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8353 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8354 		ctx_pg = ctxm->pg_info;
8355 		req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8356 		req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8357 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8358 				      &req->stat_pg_size_stat_lvl,
8359 				      &req->stat_page_dir);
8360 	}
8361 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8362 		u32 units;
8363 
8364 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8365 		ctx_pg = ctxm->pg_info;
8366 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8367 		units = ctxm->mrav_num_entries_units;
8368 		if (units) {
8369 			u32 num_mr, num_ah = ctxm->mrav_av_entries;
8370 			u32 entries;
8371 
8372 			num_mr = ctx_pg->entries - num_ah;
8373 			entries = ((num_mr / units) << 16) | (num_ah / units);
8374 			req->mrav_num_entries = cpu_to_le32(entries);
8375 			flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8376 		}
8377 		req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8378 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8379 				      &req->mrav_pg_size_mrav_lvl,
8380 				      &req->mrav_page_dir);
8381 	}
8382 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8383 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8384 		ctx_pg = ctxm->pg_info;
8385 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8386 		req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8387 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8388 				      &req->tim_pg_size_tim_lvl,
8389 				      &req->tim_page_dir);
8390 	}
8391 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8392 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
8393 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8394 	     pg_dir = &req->tqm_sp_page_dir,
8395 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8396 	     ctx_pg = ctxm->pg_info;
8397 	     i < BNXT_MAX_TQM_RINGS;
8398 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8399 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8400 		if (!(enables & ena))
8401 			continue;
8402 
8403 		req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8404 		*num_entries = cpu_to_le32(ctx_pg->entries);
8405 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8406 	}
8407 	req->flags = cpu_to_le32(flags);
8408 	return hwrm_req_send(bp, req);
8409 }
8410 
8411 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8412 				  struct bnxt_ctx_pg_info *ctx_pg)
8413 {
8414 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8415 
8416 	rmem->page_size = BNXT_PAGE_SIZE;
8417 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
8418 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
8419 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8420 	if (rmem->depth >= 1)
8421 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8422 	return bnxt_alloc_ring(bp, rmem);
8423 }
8424 
8425 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8426 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8427 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
8428 {
8429 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8430 	int rc;
8431 
8432 	if (!mem_size)
8433 		return -EINVAL;
8434 
8435 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8436 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8437 		ctx_pg->nr_pages = 0;
8438 		return -EINVAL;
8439 	}
8440 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8441 		int nr_tbls, i;
8442 
8443 		rmem->depth = 2;
8444 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8445 					     GFP_KERNEL);
8446 		if (!ctx_pg->ctx_pg_tbl)
8447 			return -ENOMEM;
8448 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8449 		rmem->nr_pages = nr_tbls;
8450 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8451 		if (rc)
8452 			return rc;
8453 		for (i = 0; i < nr_tbls; i++) {
8454 			struct bnxt_ctx_pg_info *pg_tbl;
8455 
8456 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8457 			if (!pg_tbl)
8458 				return -ENOMEM;
8459 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8460 			rmem = &pg_tbl->ring_mem;
8461 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8462 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8463 			rmem->depth = 1;
8464 			rmem->nr_pages = MAX_CTX_PAGES;
8465 			rmem->ctx_mem = ctxm;
8466 			if (i == (nr_tbls - 1)) {
8467 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8468 
8469 				if (rem)
8470 					rmem->nr_pages = rem;
8471 			}
8472 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8473 			if (rc)
8474 				break;
8475 		}
8476 	} else {
8477 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8478 		if (rmem->nr_pages > 1 || depth)
8479 			rmem->depth = 1;
8480 		rmem->ctx_mem = ctxm;
8481 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8482 	}
8483 	return rc;
8484 }
8485 
8486 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8487 				  struct bnxt_ctx_pg_info *ctx_pg)
8488 {
8489 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8490 
8491 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8492 	    ctx_pg->ctx_pg_tbl) {
8493 		int i, nr_tbls = rmem->nr_pages;
8494 
8495 		for (i = 0; i < nr_tbls; i++) {
8496 			struct bnxt_ctx_pg_info *pg_tbl;
8497 			struct bnxt_ring_mem_info *rmem2;
8498 
8499 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
8500 			if (!pg_tbl)
8501 				continue;
8502 			rmem2 = &pg_tbl->ring_mem;
8503 			bnxt_free_ring(bp, rmem2);
8504 			ctx_pg->ctx_pg_arr[i] = NULL;
8505 			kfree(pg_tbl);
8506 			ctx_pg->ctx_pg_tbl[i] = NULL;
8507 		}
8508 		kfree(ctx_pg->ctx_pg_tbl);
8509 		ctx_pg->ctx_pg_tbl = NULL;
8510 	}
8511 	bnxt_free_ring(bp, rmem);
8512 	ctx_pg->nr_pages = 0;
8513 }
8514 
8515 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8516 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
8517 				   u8 pg_lvl)
8518 {
8519 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8520 	int i, rc = 0, n = 1;
8521 	u32 mem_size;
8522 
8523 	if (!ctxm->entry_size || !ctx_pg)
8524 		return -EINVAL;
8525 	if (ctxm->instance_bmap)
8526 		n = hweight32(ctxm->instance_bmap);
8527 	if (ctxm->entry_multiple)
8528 		entries = roundup(entries, ctxm->entry_multiple);
8529 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8530 	mem_size = entries * ctxm->entry_size;
8531 	for (i = 0; i < n && !rc; i++) {
8532 		ctx_pg[i].entries = entries;
8533 		rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8534 					    ctxm->init_value ? ctxm : NULL);
8535 	}
8536 	return rc;
8537 }
8538 
8539 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8540 					       struct bnxt_ctx_mem_type *ctxm,
8541 					       bool last)
8542 {
8543 	struct hwrm_func_backing_store_cfg_v2_input *req;
8544 	u32 instance_bmap = ctxm->instance_bmap;
8545 	int i, j, rc = 0, n = 1;
8546 	__le32 *p;
8547 
8548 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8549 		return 0;
8550 
8551 	if (instance_bmap)
8552 		n = hweight32(ctxm->instance_bmap);
8553 	else
8554 		instance_bmap = 1;
8555 
8556 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8557 	if (rc)
8558 		return rc;
8559 	hwrm_req_hold(bp, req);
8560 	req->type = cpu_to_le16(ctxm->type);
8561 	req->entry_size = cpu_to_le16(ctxm->entry_size);
8562 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
8563 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8564 		p[i] = cpu_to_le32(ctxm->split[i]);
8565 	for (i = 0, j = 0; j < n && !rc; i++) {
8566 		struct bnxt_ctx_pg_info *ctx_pg;
8567 
8568 		if (!(instance_bmap & (1 << i)))
8569 			continue;
8570 		req->instance = cpu_to_le16(i);
8571 		ctx_pg = &ctxm->pg_info[j++];
8572 		if (!ctx_pg->entries)
8573 			continue;
8574 		req->num_entries = cpu_to_le32(ctx_pg->entries);
8575 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8576 				      &req->page_size_pbl_level,
8577 				      &req->page_dir);
8578 		if (last && j == n)
8579 			req->flags =
8580 				cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
8581 		rc = hwrm_req_send(bp, req);
8582 	}
8583 	hwrm_req_drop(bp, req);
8584 	return rc;
8585 }
8586 
8587 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8588 {
8589 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8590 	struct bnxt_ctx_mem_type *ctxm;
8591 	u16 last_type;
8592 	int rc = 0;
8593 	u16 type;
8594 
8595 	if (!ena)
8596 		return 0;
8597 	else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
8598 		last_type = BNXT_CTX_MAX - 1;
8599 	else
8600 		last_type = BNXT_CTX_L2_MAX - 1;
8601 	ctx->ctx_arr[last_type].last = 1;
8602 
8603 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
8604 		ctxm = &ctx->ctx_arr[type];
8605 
8606 		rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8607 		if (rc)
8608 			return rc;
8609 	}
8610 	return 0;
8611 }
8612 
8613 void bnxt_free_ctx_mem(struct bnxt *bp)
8614 {
8615 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8616 	u16 type;
8617 
8618 	if (!ctx)
8619 		return;
8620 
8621 	for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
8622 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8623 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8624 		int i, n = 1;
8625 
8626 		if (!ctx_pg)
8627 			continue;
8628 		if (ctxm->instance_bmap)
8629 			n = hweight32(ctxm->instance_bmap);
8630 		for (i = 0; i < n; i++)
8631 			bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8632 
8633 		kfree(ctx_pg);
8634 		ctxm->pg_info = NULL;
8635 	}
8636 
8637 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
8638 	kfree(ctx);
8639 	bp->ctx = NULL;
8640 }
8641 
8642 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8643 {
8644 	struct bnxt_ctx_mem_type *ctxm;
8645 	struct bnxt_ctx_mem_info *ctx;
8646 	u32 l2_qps, qp1_qps, max_qps;
8647 	u32 ena, entries_sp, entries;
8648 	u32 srqs, max_srqs, min;
8649 	u32 num_mr, num_ah;
8650 	u32 extra_srqs = 0;
8651 	u32 extra_qps = 0;
8652 	u32 fast_qpmd_qps;
8653 	u8 pg_lvl = 1;
8654 	int i, rc;
8655 
8656 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8657 	if (rc) {
8658 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8659 			   rc);
8660 		return rc;
8661 	}
8662 	ctx = bp->ctx;
8663 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
8664 		return 0;
8665 
8666 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8667 	l2_qps = ctxm->qp_l2_entries;
8668 	qp1_qps = ctxm->qp_qp1_entries;
8669 	fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
8670 	max_qps = ctxm->max_entries;
8671 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8672 	srqs = ctxm->srq_l2_entries;
8673 	max_srqs = ctxm->max_entries;
8674 	ena = 0;
8675 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8676 		pg_lvl = 2;
8677 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8678 		/* allocate extra qps if fw supports RoCE fast qp destroy feature */
8679 		extra_qps += fast_qpmd_qps;
8680 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8681 		if (fast_qpmd_qps)
8682 			ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
8683 	}
8684 
8685 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8686 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8687 				     pg_lvl);
8688 	if (rc)
8689 		return rc;
8690 
8691 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8692 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8693 	if (rc)
8694 		return rc;
8695 
8696 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8697 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8698 				     extra_qps * 2, pg_lvl);
8699 	if (rc)
8700 		return rc;
8701 
8702 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8703 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8704 	if (rc)
8705 		return rc;
8706 
8707 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8708 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8709 	if (rc)
8710 		return rc;
8711 
8712 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8713 		goto skip_rdma;
8714 
8715 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8716 	/* 128K extra is needed to accommodate static AH context
8717 	 * allocation by f/w.
8718 	 */
8719 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
8720 	num_ah = min_t(u32, num_mr, 1024 * 128);
8721 	ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
8722 	if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
8723 		ctxm->mrav_av_entries = num_ah;
8724 
8725 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8726 	if (rc)
8727 		return rc;
8728 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8729 
8730 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8731 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8732 	if (rc)
8733 		return rc;
8734 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
8735 
8736 skip_rdma:
8737 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8738 	min = ctxm->min_entries;
8739 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
8740 		     2 * (extra_qps + qp1_qps) + min;
8741 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8742 	if (rc)
8743 		return rc;
8744 
8745 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8746 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
8747 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8748 	if (rc)
8749 		return rc;
8750 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
8751 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
8752 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
8753 
8754 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8755 		rc = bnxt_backing_store_cfg_v2(bp, ena);
8756 	else
8757 		rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8758 	if (rc) {
8759 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8760 			   rc);
8761 		return rc;
8762 	}
8763 	ctx->flags |= BNXT_CTX_FLAG_INITED;
8764 	return 0;
8765 }
8766 
8767 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8768 {
8769 	struct hwrm_func_resource_qcaps_output *resp;
8770 	struct hwrm_func_resource_qcaps_input *req;
8771 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8772 	int rc;
8773 
8774 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8775 	if (rc)
8776 		return rc;
8777 
8778 	req->fid = cpu_to_le16(0xffff);
8779 	resp = hwrm_req_hold(bp, req);
8780 	rc = hwrm_req_send_silent(bp, req);
8781 	if (rc)
8782 		goto hwrm_func_resc_qcaps_exit;
8783 
8784 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
8785 	if (!all)
8786 		goto hwrm_func_resc_qcaps_exit;
8787 
8788 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
8789 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8790 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
8791 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8792 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
8793 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8794 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
8795 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8796 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
8797 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
8798 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
8799 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8800 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
8801 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8802 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
8803 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8804 
8805 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8806 		u16 max_msix = le16_to_cpu(resp->max_msix);
8807 
8808 		hw_resc->max_nqs = max_msix;
8809 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
8810 	}
8811 
8812 	if (BNXT_PF(bp)) {
8813 		struct bnxt_pf_info *pf = &bp->pf;
8814 
8815 		pf->vf_resv_strategy =
8816 			le16_to_cpu(resp->vf_reservation_strategy);
8817 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
8818 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
8819 	}
8820 hwrm_func_resc_qcaps_exit:
8821 	hwrm_req_drop(bp, req);
8822 	return rc;
8823 }
8824 
8825 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
8826 {
8827 	struct hwrm_port_mac_ptp_qcfg_output *resp;
8828 	struct hwrm_port_mac_ptp_qcfg_input *req;
8829 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
8830 	bool phc_cfg;
8831 	u8 flags;
8832 	int rc;
8833 
8834 	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
8835 		rc = -ENODEV;
8836 		goto no_ptp;
8837 	}
8838 
8839 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8840 	if (rc)
8841 		goto no_ptp;
8842 
8843 	req->port_id = cpu_to_le16(bp->pf.port_id);
8844 	resp = hwrm_req_hold(bp, req);
8845 	rc = hwrm_req_send(bp, req);
8846 	if (rc)
8847 		goto exit;
8848 
8849 	flags = resp->flags;
8850 	if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
8851 		rc = -ENODEV;
8852 		goto exit;
8853 	}
8854 	if (!ptp) {
8855 		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
8856 		if (!ptp) {
8857 			rc = -ENOMEM;
8858 			goto exit;
8859 		}
8860 		ptp->bp = bp;
8861 		bp->ptp_cfg = ptp;
8862 	}
8863 	if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
8864 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
8865 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
8866 	} else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8867 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
8868 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
8869 	} else {
8870 		rc = -ENODEV;
8871 		goto exit;
8872 	}
8873 	phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
8874 	rc = bnxt_ptp_init(bp, phc_cfg);
8875 	if (rc)
8876 		netdev_warn(bp->dev, "PTP initialization failed.\n");
8877 exit:
8878 	hwrm_req_drop(bp, req);
8879 	if (!rc)
8880 		return 0;
8881 
8882 no_ptp:
8883 	bnxt_ptp_clear(bp);
8884 	kfree(ptp);
8885 	bp->ptp_cfg = NULL;
8886 	return rc;
8887 }
8888 
8889 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8890 {
8891 	struct hwrm_func_qcaps_output *resp;
8892 	struct hwrm_func_qcaps_input *req;
8893 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8894 	u32 flags, flags_ext, flags_ext2;
8895 	int rc;
8896 
8897 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8898 	if (rc)
8899 		return rc;
8900 
8901 	req->fid = cpu_to_le16(0xffff);
8902 	resp = hwrm_req_hold(bp, req);
8903 	rc = hwrm_req_send(bp, req);
8904 	if (rc)
8905 		goto hwrm_func_qcaps_exit;
8906 
8907 	flags = le32_to_cpu(resp->flags);
8908 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
8909 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
8910 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
8911 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
8912 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
8913 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
8914 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
8915 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
8916 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
8917 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
8918 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
8919 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
8920 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
8921 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
8922 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
8923 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
8924 	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
8925 		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
8926 
8927 	flags_ext = le32_to_cpu(resp->flags_ext);
8928 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
8929 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
8930 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
8931 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
8932 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
8933 		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
8934 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
8935 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
8936 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
8937 		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
8938 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
8939 		bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8940 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
8941 		bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
8942 
8943 	flags_ext2 = le32_to_cpu(resp->flags_ext2);
8944 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
8945 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8946 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
8947 		bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
8948 
8949 	bp->tx_push_thresh = 0;
8950 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
8951 	    BNXT_FW_MAJ(bp) > 217)
8952 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
8953 
8954 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8955 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8956 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8957 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8958 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
8959 	if (!hw_resc->max_hw_ring_grps)
8960 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
8961 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8962 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8963 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8964 
8965 	hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
8966 	hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
8967 	hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
8968 	hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
8969 	hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
8970 	hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
8971 
8972 	if (BNXT_PF(bp)) {
8973 		struct bnxt_pf_info *pf = &bp->pf;
8974 
8975 		pf->fw_fid = le16_to_cpu(resp->fid);
8976 		pf->port_id = le16_to_cpu(resp->port_id);
8977 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
8978 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
8979 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
8980 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
8981 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
8982 			bp->flags |= BNXT_FLAG_WOL_CAP;
8983 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
8984 			bp->fw_cap |= BNXT_FW_CAP_PTP;
8985 		} else {
8986 			bnxt_ptp_clear(bp);
8987 			kfree(bp->ptp_cfg);
8988 			bp->ptp_cfg = NULL;
8989 		}
8990 	} else {
8991 #ifdef CONFIG_BNXT_SRIOV
8992 		struct bnxt_vf_info *vf = &bp->vf;
8993 
8994 		vf->fw_fid = le16_to_cpu(resp->fid);
8995 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
8996 #endif
8997 	}
8998 	bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
8999 
9000 hwrm_func_qcaps_exit:
9001 	hwrm_req_drop(bp, req);
9002 	return rc;
9003 }
9004 
9005 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9006 {
9007 	struct hwrm_dbg_qcaps_output *resp;
9008 	struct hwrm_dbg_qcaps_input *req;
9009 	int rc;
9010 
9011 	bp->fw_dbg_cap = 0;
9012 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9013 		return;
9014 
9015 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9016 	if (rc)
9017 		return;
9018 
9019 	req->fid = cpu_to_le16(0xffff);
9020 	resp = hwrm_req_hold(bp, req);
9021 	rc = hwrm_req_send(bp, req);
9022 	if (rc)
9023 		goto hwrm_dbg_qcaps_exit;
9024 
9025 	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9026 
9027 hwrm_dbg_qcaps_exit:
9028 	hwrm_req_drop(bp, req);
9029 }
9030 
9031 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9032 
9033 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9034 {
9035 	int rc;
9036 
9037 	rc = __bnxt_hwrm_func_qcaps(bp);
9038 	if (rc)
9039 		return rc;
9040 
9041 	bnxt_hwrm_dbg_qcaps(bp);
9042 
9043 	rc = bnxt_hwrm_queue_qportcfg(bp);
9044 	if (rc) {
9045 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9046 		return rc;
9047 	}
9048 	if (bp->hwrm_spec_code >= 0x10803) {
9049 		rc = bnxt_alloc_ctx_mem(bp);
9050 		if (rc)
9051 			return rc;
9052 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9053 		if (!rc)
9054 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9055 	}
9056 	return 0;
9057 }
9058 
9059 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9060 {
9061 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9062 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9063 	u32 flags;
9064 	int rc;
9065 
9066 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9067 		return 0;
9068 
9069 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9070 	if (rc)
9071 		return rc;
9072 
9073 	resp = hwrm_req_hold(bp, req);
9074 	rc = hwrm_req_send(bp, req);
9075 	if (rc)
9076 		goto hwrm_cfa_adv_qcaps_exit;
9077 
9078 	flags = le32_to_cpu(resp->flags);
9079 	if (flags &
9080 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9081 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9082 
9083 	if (flags &
9084 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9085 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9086 
9087 	if (flags &
9088 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9089 		bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9090 
9091 hwrm_cfa_adv_qcaps_exit:
9092 	hwrm_req_drop(bp, req);
9093 	return rc;
9094 }
9095 
9096 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9097 {
9098 	if (bp->fw_health)
9099 		return 0;
9100 
9101 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9102 	if (!bp->fw_health)
9103 		return -ENOMEM;
9104 
9105 	mutex_init(&bp->fw_health->lock);
9106 	return 0;
9107 }
9108 
9109 static int bnxt_alloc_fw_health(struct bnxt *bp)
9110 {
9111 	int rc;
9112 
9113 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9114 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9115 		return 0;
9116 
9117 	rc = __bnxt_alloc_fw_health(bp);
9118 	if (rc) {
9119 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9120 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9121 		return rc;
9122 	}
9123 
9124 	return 0;
9125 }
9126 
9127 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9128 {
9129 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9130 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9131 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
9132 }
9133 
9134 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9135 {
9136 	struct bnxt_fw_health *fw_health = bp->fw_health;
9137 	u32 reg_type;
9138 
9139 	if (!fw_health)
9140 		return;
9141 
9142 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9143 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9144 		fw_health->status_reliable = false;
9145 
9146 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9147 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9148 		fw_health->resets_reliable = false;
9149 }
9150 
9151 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9152 {
9153 	void __iomem *hs;
9154 	u32 status_loc;
9155 	u32 reg_type;
9156 	u32 sig;
9157 
9158 	if (bp->fw_health)
9159 		bp->fw_health->status_reliable = false;
9160 
9161 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9162 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9163 
9164 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9165 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9166 		if (!bp->chip_num) {
9167 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9168 			bp->chip_num = readl(bp->bar0 +
9169 					     BNXT_FW_HEALTH_WIN_BASE +
9170 					     BNXT_GRC_REG_CHIP_NUM);
9171 		}
9172 		if (!BNXT_CHIP_P5_PLUS(bp))
9173 			return;
9174 
9175 		status_loc = BNXT_GRC_REG_STATUS_P5 |
9176 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
9177 	} else {
9178 		status_loc = readl(hs + offsetof(struct hcomm_status,
9179 						 fw_status_loc));
9180 	}
9181 
9182 	if (__bnxt_alloc_fw_health(bp)) {
9183 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
9184 		return;
9185 	}
9186 
9187 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9188 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9189 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9190 		__bnxt_map_fw_health_reg(bp, status_loc);
9191 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9192 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
9193 	}
9194 
9195 	bp->fw_health->status_reliable = true;
9196 }
9197 
9198 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9199 {
9200 	struct bnxt_fw_health *fw_health = bp->fw_health;
9201 	u32 reg_base = 0xffffffff;
9202 	int i;
9203 
9204 	bp->fw_health->status_reliable = false;
9205 	bp->fw_health->resets_reliable = false;
9206 	/* Only pre-map the monitoring GRC registers using window 3 */
9207 	for (i = 0; i < 4; i++) {
9208 		u32 reg = fw_health->regs[i];
9209 
9210 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9211 			continue;
9212 		if (reg_base == 0xffffffff)
9213 			reg_base = reg & BNXT_GRC_BASE_MASK;
9214 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9215 			return -ERANGE;
9216 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9217 	}
9218 	bp->fw_health->status_reliable = true;
9219 	bp->fw_health->resets_reliable = true;
9220 	if (reg_base == 0xffffffff)
9221 		return 0;
9222 
9223 	__bnxt_map_fw_health_reg(bp, reg_base);
9224 	return 0;
9225 }
9226 
9227 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9228 {
9229 	if (!bp->fw_health)
9230 		return;
9231 
9232 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9233 		bp->fw_health->status_reliable = true;
9234 		bp->fw_health->resets_reliable = true;
9235 	} else {
9236 		bnxt_try_map_fw_health_reg(bp);
9237 	}
9238 }
9239 
9240 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9241 {
9242 	struct bnxt_fw_health *fw_health = bp->fw_health;
9243 	struct hwrm_error_recovery_qcfg_output *resp;
9244 	struct hwrm_error_recovery_qcfg_input *req;
9245 	int rc, i;
9246 
9247 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9248 		return 0;
9249 
9250 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9251 	if (rc)
9252 		return rc;
9253 
9254 	resp = hwrm_req_hold(bp, req);
9255 	rc = hwrm_req_send(bp, req);
9256 	if (rc)
9257 		goto err_recovery_out;
9258 	fw_health->flags = le32_to_cpu(resp->flags);
9259 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9260 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9261 		rc = -EINVAL;
9262 		goto err_recovery_out;
9263 	}
9264 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9265 	fw_health->master_func_wait_dsecs =
9266 		le32_to_cpu(resp->master_func_wait_period);
9267 	fw_health->normal_func_wait_dsecs =
9268 		le32_to_cpu(resp->normal_func_wait_period);
9269 	fw_health->post_reset_wait_dsecs =
9270 		le32_to_cpu(resp->master_func_wait_period_after_reset);
9271 	fw_health->post_reset_max_wait_dsecs =
9272 		le32_to_cpu(resp->max_bailout_time_after_reset);
9273 	fw_health->regs[BNXT_FW_HEALTH_REG] =
9274 		le32_to_cpu(resp->fw_health_status_reg);
9275 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9276 		le32_to_cpu(resp->fw_heartbeat_reg);
9277 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9278 		le32_to_cpu(resp->fw_reset_cnt_reg);
9279 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9280 		le32_to_cpu(resp->reset_inprogress_reg);
9281 	fw_health->fw_reset_inprog_reg_mask =
9282 		le32_to_cpu(resp->reset_inprogress_reg_mask);
9283 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9284 	if (fw_health->fw_reset_seq_cnt >= 16) {
9285 		rc = -EINVAL;
9286 		goto err_recovery_out;
9287 	}
9288 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9289 		fw_health->fw_reset_seq_regs[i] =
9290 			le32_to_cpu(resp->reset_reg[i]);
9291 		fw_health->fw_reset_seq_vals[i] =
9292 			le32_to_cpu(resp->reset_reg_val[i]);
9293 		fw_health->fw_reset_seq_delay_msec[i] =
9294 			resp->delay_after_reset[i];
9295 	}
9296 err_recovery_out:
9297 	hwrm_req_drop(bp, req);
9298 	if (!rc)
9299 		rc = bnxt_map_fw_health_regs(bp);
9300 	if (rc)
9301 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9302 	return rc;
9303 }
9304 
9305 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9306 {
9307 	struct hwrm_func_reset_input *req;
9308 	int rc;
9309 
9310 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9311 	if (rc)
9312 		return rc;
9313 
9314 	req->enables = 0;
9315 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9316 	return hwrm_req_send(bp, req);
9317 }
9318 
9319 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9320 {
9321 	struct hwrm_nvm_get_dev_info_output nvm_info;
9322 
9323 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9324 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9325 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9326 			 nvm_info.nvm_cfg_ver_upd);
9327 }
9328 
9329 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9330 {
9331 	struct hwrm_queue_qportcfg_output *resp;
9332 	struct hwrm_queue_qportcfg_input *req;
9333 	u8 i, j, *qptr;
9334 	bool no_rdma;
9335 	int rc = 0;
9336 
9337 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9338 	if (rc)
9339 		return rc;
9340 
9341 	resp = hwrm_req_hold(bp, req);
9342 	rc = hwrm_req_send(bp, req);
9343 	if (rc)
9344 		goto qportcfg_exit;
9345 
9346 	if (!resp->max_configurable_queues) {
9347 		rc = -EINVAL;
9348 		goto qportcfg_exit;
9349 	}
9350 	bp->max_tc = resp->max_configurable_queues;
9351 	bp->max_lltc = resp->max_configurable_lossless_queues;
9352 	if (bp->max_tc > BNXT_MAX_QUEUE)
9353 		bp->max_tc = BNXT_MAX_QUEUE;
9354 
9355 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9356 	qptr = &resp->queue_id0;
9357 	for (i = 0, j = 0; i < bp->max_tc; i++) {
9358 		bp->q_info[j].queue_id = *qptr;
9359 		bp->q_ids[i] = *qptr++;
9360 		bp->q_info[j].queue_profile = *qptr++;
9361 		bp->tc_to_qidx[j] = j;
9362 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9363 		    (no_rdma && BNXT_PF(bp)))
9364 			j++;
9365 	}
9366 	bp->max_q = bp->max_tc;
9367 	bp->max_tc = max_t(u8, j, 1);
9368 
9369 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9370 		bp->max_tc = 1;
9371 
9372 	if (bp->max_lltc > bp->max_tc)
9373 		bp->max_lltc = bp->max_tc;
9374 
9375 qportcfg_exit:
9376 	hwrm_req_drop(bp, req);
9377 	return rc;
9378 }
9379 
9380 static int bnxt_hwrm_poll(struct bnxt *bp)
9381 {
9382 	struct hwrm_ver_get_input *req;
9383 	int rc;
9384 
9385 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9386 	if (rc)
9387 		return rc;
9388 
9389 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9390 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
9391 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9392 
9393 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9394 	rc = hwrm_req_send(bp, req);
9395 	return rc;
9396 }
9397 
9398 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9399 {
9400 	struct hwrm_ver_get_output *resp;
9401 	struct hwrm_ver_get_input *req;
9402 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
9403 	u32 dev_caps_cfg, hwrm_ver;
9404 	int rc, len;
9405 
9406 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9407 	if (rc)
9408 		return rc;
9409 
9410 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9411 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9412 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9413 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
9414 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9415 
9416 	resp = hwrm_req_hold(bp, req);
9417 	rc = hwrm_req_send(bp, req);
9418 	if (rc)
9419 		goto hwrm_ver_get_exit;
9420 
9421 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9422 
9423 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9424 			     resp->hwrm_intf_min_8b << 8 |
9425 			     resp->hwrm_intf_upd_8b;
9426 	if (resp->hwrm_intf_maj_8b < 1) {
9427 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9428 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9429 			    resp->hwrm_intf_upd_8b);
9430 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9431 	}
9432 
9433 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
9434 			HWRM_VERSION_UPDATE;
9435 
9436 	if (bp->hwrm_spec_code > hwrm_ver)
9437 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9438 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
9439 			 HWRM_VERSION_UPDATE);
9440 	else
9441 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9442 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9443 			 resp->hwrm_intf_upd_8b);
9444 
9445 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
9446 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9447 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
9448 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
9449 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
9450 		len = FW_VER_STR_LEN;
9451 	} else {
9452 		fw_maj = resp->hwrm_fw_maj_8b;
9453 		fw_min = resp->hwrm_fw_min_8b;
9454 		fw_bld = resp->hwrm_fw_bld_8b;
9455 		fw_rsv = resp->hwrm_fw_rsvd_8b;
9456 		len = BC_HWRM_STR_LEN;
9457 	}
9458 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9459 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9460 		 fw_rsv);
9461 
9462 	if (strlen(resp->active_pkg_name)) {
9463 		int fw_ver_len = strlen(bp->fw_ver_str);
9464 
9465 		snprintf(bp->fw_ver_str + fw_ver_len,
9466 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
9467 			 resp->active_pkg_name);
9468 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9469 	}
9470 
9471 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9472 	if (!bp->hwrm_cmd_timeout)
9473 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9474 	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9475 	if (!bp->hwrm_cmd_max_timeout)
9476 		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9477 	else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9478 		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9479 			    bp->hwrm_cmd_max_timeout / 1000);
9480 
9481 	if (resp->hwrm_intf_maj_8b >= 1) {
9482 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9483 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9484 	}
9485 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9486 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9487 
9488 	bp->chip_num = le16_to_cpu(resp->chip_num);
9489 	bp->chip_rev = resp->chip_rev;
9490 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9491 	    !resp->chip_metal)
9492 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9493 
9494 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
9495 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
9496 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
9497 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9498 
9499 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
9500 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9501 
9502 	if (dev_caps_cfg &
9503 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
9504 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9505 
9506 	if (dev_caps_cfg &
9507 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
9508 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9509 
9510 	if (dev_caps_cfg &
9511 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
9512 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9513 
9514 hwrm_ver_get_exit:
9515 	hwrm_req_drop(bp, req);
9516 	return rc;
9517 }
9518 
9519 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9520 {
9521 	struct hwrm_fw_set_time_input *req;
9522 	struct tm tm;
9523 	time64_t now = ktime_get_real_seconds();
9524 	int rc;
9525 
9526 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9527 	    bp->hwrm_spec_code < 0x10400)
9528 		return -EOPNOTSUPP;
9529 
9530 	time64_to_tm(now, 0, &tm);
9531 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9532 	if (rc)
9533 		return rc;
9534 
9535 	req->year = cpu_to_le16(1900 + tm.tm_year);
9536 	req->month = 1 + tm.tm_mon;
9537 	req->day = tm.tm_mday;
9538 	req->hour = tm.tm_hour;
9539 	req->minute = tm.tm_min;
9540 	req->second = tm.tm_sec;
9541 	return hwrm_req_send(bp, req);
9542 }
9543 
9544 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
9545 {
9546 	u64 sw_tmp;
9547 
9548 	hw &= mask;
9549 	sw_tmp = (*sw & ~mask) | hw;
9550 	if (hw < (*sw & mask))
9551 		sw_tmp += mask + 1;
9552 	WRITE_ONCE(*sw, sw_tmp);
9553 }
9554 
9555 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
9556 				    int count, bool ignore_zero)
9557 {
9558 	int i;
9559 
9560 	for (i = 0; i < count; i++) {
9561 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
9562 
9563 		if (ignore_zero && !hw)
9564 			continue;
9565 
9566 		if (masks[i] == -1ULL)
9567 			sw_stats[i] = hw;
9568 		else
9569 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
9570 	}
9571 }
9572 
9573 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
9574 {
9575 	if (!stats->hw_stats)
9576 		return;
9577 
9578 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9579 				stats->hw_masks, stats->len / 8, false);
9580 }
9581 
9582 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9583 {
9584 	struct bnxt_stats_mem *ring0_stats;
9585 	bool ignore_zero = false;
9586 	int i;
9587 
9588 	/* Chip bug.  Counter intermittently becomes 0. */
9589 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9590 		ignore_zero = true;
9591 
9592 	for (i = 0; i < bp->cp_nr_rings; i++) {
9593 		struct bnxt_napi *bnapi = bp->bnapi[i];
9594 		struct bnxt_cp_ring_info *cpr;
9595 		struct bnxt_stats_mem *stats;
9596 
9597 		cpr = &bnapi->cp_ring;
9598 		stats = &cpr->stats;
9599 		if (!i)
9600 			ring0_stats = stats;
9601 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9602 					ring0_stats->hw_masks,
9603 					ring0_stats->len / 8, ignore_zero);
9604 	}
9605 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
9606 		struct bnxt_stats_mem *stats = &bp->port_stats;
9607 		__le64 *hw_stats = stats->hw_stats;
9608 		u64 *sw_stats = stats->sw_stats;
9609 		u64 *masks = stats->hw_masks;
9610 		int cnt;
9611 
9612 		cnt = sizeof(struct rx_port_stats) / 8;
9613 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9614 
9615 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9616 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9617 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9618 		cnt = sizeof(struct tx_port_stats) / 8;
9619 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9620 	}
9621 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9622 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9623 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9624 	}
9625 }
9626 
9627 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9628 {
9629 	struct hwrm_port_qstats_input *req;
9630 	struct bnxt_pf_info *pf = &bp->pf;
9631 	int rc;
9632 
9633 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9634 		return 0;
9635 
9636 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9637 		return -EOPNOTSUPP;
9638 
9639 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9640 	if (rc)
9641 		return rc;
9642 
9643 	req->flags = flags;
9644 	req->port_id = cpu_to_le16(pf->port_id);
9645 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9646 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
9647 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9648 	return hwrm_req_send(bp, req);
9649 }
9650 
9651 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9652 {
9653 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
9654 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
9655 	struct hwrm_port_qstats_ext_output *resp_qs;
9656 	struct hwrm_port_qstats_ext_input *req_qs;
9657 	struct bnxt_pf_info *pf = &bp->pf;
9658 	u32 tx_stat_size;
9659 	int rc;
9660 
9661 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9662 		return 0;
9663 
9664 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9665 		return -EOPNOTSUPP;
9666 
9667 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9668 	if (rc)
9669 		return rc;
9670 
9671 	req_qs->flags = flags;
9672 	req_qs->port_id = cpu_to_le16(pf->port_id);
9673 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
9674 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9675 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9676 		       sizeof(struct tx_port_stats_ext) : 0;
9677 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
9678 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9679 	resp_qs = hwrm_req_hold(bp, req_qs);
9680 	rc = hwrm_req_send(bp, req_qs);
9681 	if (!rc) {
9682 		bp->fw_rx_stats_ext_size =
9683 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
9684 		if (BNXT_FW_MAJ(bp) < 220 &&
9685 		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9686 			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9687 
9688 		bp->fw_tx_stats_ext_size = tx_stat_size ?
9689 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
9690 	} else {
9691 		bp->fw_rx_stats_ext_size = 0;
9692 		bp->fw_tx_stats_ext_size = 0;
9693 	}
9694 	hwrm_req_drop(bp, req_qs);
9695 
9696 	if (flags)
9697 		return rc;
9698 
9699 	if (bp->fw_tx_stats_ext_size <=
9700 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
9701 		bp->pri2cos_valid = 0;
9702 		return rc;
9703 	}
9704 
9705 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9706 	if (rc)
9707 		return rc;
9708 
9709 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
9710 
9711 	resp_qc = hwrm_req_hold(bp, req_qc);
9712 	rc = hwrm_req_send(bp, req_qc);
9713 	if (!rc) {
9714 		u8 *pri2cos;
9715 		int i, j;
9716 
9717 		pri2cos = &resp_qc->pri0_cos_queue_id;
9718 		for (i = 0; i < 8; i++) {
9719 			u8 queue_id = pri2cos[i];
9720 			u8 queue_idx;
9721 
9722 			/* Per port queue IDs start from 0, 10, 20, etc */
9723 			queue_idx = queue_id % 10;
9724 			if (queue_idx > BNXT_MAX_QUEUE) {
9725 				bp->pri2cos_valid = false;
9726 				hwrm_req_drop(bp, req_qc);
9727 				return rc;
9728 			}
9729 			for (j = 0; j < bp->max_q; j++) {
9730 				if (bp->q_ids[j] == queue_id)
9731 					bp->pri2cos_idx[i] = queue_idx;
9732 			}
9733 		}
9734 		bp->pri2cos_valid = true;
9735 	}
9736 	hwrm_req_drop(bp, req_qc);
9737 
9738 	return rc;
9739 }
9740 
9741 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9742 {
9743 	bnxt_hwrm_tunnel_dst_port_free(bp,
9744 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9745 	bnxt_hwrm_tunnel_dst_port_free(bp,
9746 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9747 }
9748 
9749 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9750 {
9751 	int rc, i;
9752 	u32 tpa_flags = 0;
9753 
9754 	if (set_tpa)
9755 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
9756 	else if (BNXT_NO_FW_ACCESS(bp))
9757 		return 0;
9758 	for (i = 0; i < bp->nr_vnics; i++) {
9759 		rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
9760 		if (rc) {
9761 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9762 				   i, rc);
9763 			return rc;
9764 		}
9765 	}
9766 	return 0;
9767 }
9768 
9769 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
9770 {
9771 	int i;
9772 
9773 	for (i = 0; i < bp->nr_vnics; i++)
9774 		bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
9775 }
9776 
9777 static void bnxt_clear_vnic(struct bnxt *bp)
9778 {
9779 	if (!bp->vnic_info)
9780 		return;
9781 
9782 	bnxt_hwrm_clear_vnic_filter(bp);
9783 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
9784 		/* clear all RSS setting before free vnic ctx */
9785 		bnxt_hwrm_clear_vnic_rss(bp);
9786 		bnxt_hwrm_vnic_ctx_free(bp);
9787 	}
9788 	/* before free the vnic, undo the vnic tpa settings */
9789 	if (bp->flags & BNXT_FLAG_TPA)
9790 		bnxt_set_tpa(bp, false);
9791 	bnxt_hwrm_vnic_free(bp);
9792 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9793 		bnxt_hwrm_vnic_ctx_free(bp);
9794 }
9795 
9796 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
9797 				    bool irq_re_init)
9798 {
9799 	bnxt_clear_vnic(bp);
9800 	bnxt_hwrm_ring_free(bp, close_path);
9801 	bnxt_hwrm_ring_grp_free(bp);
9802 	if (irq_re_init) {
9803 		bnxt_hwrm_stat_ctx_free(bp);
9804 		bnxt_hwrm_free_tunnel_ports(bp);
9805 	}
9806 }
9807 
9808 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
9809 {
9810 	struct hwrm_func_cfg_input *req;
9811 	u8 evb_mode;
9812 	int rc;
9813 
9814 	if (br_mode == BRIDGE_MODE_VEB)
9815 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
9816 	else if (br_mode == BRIDGE_MODE_VEPA)
9817 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
9818 	else
9819 		return -EINVAL;
9820 
9821 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9822 	if (rc)
9823 		return rc;
9824 
9825 	req->fid = cpu_to_le16(0xffff);
9826 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
9827 	req->evb_mode = evb_mode;
9828 	return hwrm_req_send(bp, req);
9829 }
9830 
9831 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
9832 {
9833 	struct hwrm_func_cfg_input *req;
9834 	int rc;
9835 
9836 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
9837 		return 0;
9838 
9839 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9840 	if (rc)
9841 		return rc;
9842 
9843 	req->fid = cpu_to_le16(0xffff);
9844 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
9845 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
9846 	if (size == 128)
9847 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
9848 
9849 	return hwrm_req_send(bp, req);
9850 }
9851 
9852 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9853 {
9854 	int rc;
9855 
9856 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
9857 		goto skip_rss_ctx;
9858 
9859 	/* allocate context for vnic */
9860 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
9861 	if (rc) {
9862 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9863 			   vnic->vnic_id, rc);
9864 		goto vnic_setup_err;
9865 	}
9866 	bp->rsscos_nr_ctxs++;
9867 
9868 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9869 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
9870 		if (rc) {
9871 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9872 				   vnic->vnic_id, rc);
9873 			goto vnic_setup_err;
9874 		}
9875 		bp->rsscos_nr_ctxs++;
9876 	}
9877 
9878 skip_rss_ctx:
9879 	/* configure default vnic, ring grp */
9880 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
9881 	if (rc) {
9882 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9883 			   vnic->vnic_id, rc);
9884 		goto vnic_setup_err;
9885 	}
9886 
9887 	/* Enable RSS hashing on vnic */
9888 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
9889 	if (rc) {
9890 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9891 			   vnic->vnic_id, rc);
9892 		goto vnic_setup_err;
9893 	}
9894 
9895 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9896 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
9897 		if (rc) {
9898 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9899 				   vnic->vnic_id, rc);
9900 		}
9901 	}
9902 
9903 vnic_setup_err:
9904 	return rc;
9905 }
9906 
9907 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9908 {
9909 	int rc;
9910 
9911 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
9912 	if (rc) {
9913 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9914 			   vnic->vnic_id, rc);
9915 		return rc;
9916 	}
9917 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
9918 	if (rc)
9919 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9920 			   vnic->vnic_id, rc);
9921 	return rc;
9922 }
9923 
9924 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9925 {
9926 	int rc, i, nr_ctxs;
9927 
9928 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
9929 	for (i = 0; i < nr_ctxs; i++) {
9930 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
9931 		if (rc) {
9932 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9933 				   vnic->vnic_id, i, rc);
9934 			break;
9935 		}
9936 		bp->rsscos_nr_ctxs++;
9937 	}
9938 	if (i < nr_ctxs)
9939 		return -ENOMEM;
9940 
9941 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
9942 	if (rc)
9943 		return rc;
9944 
9945 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9946 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
9947 		if (rc) {
9948 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9949 				   vnic->vnic_id, rc);
9950 		}
9951 	}
9952 	return rc;
9953 }
9954 
9955 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9956 {
9957 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9958 		return __bnxt_setup_vnic_p5(bp, vnic);
9959 	else
9960 		return __bnxt_setup_vnic(bp, vnic);
9961 }
9962 
9963 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
9964 				     struct bnxt_vnic_info *vnic,
9965 				     u16 start_rx_ring_idx, int rx_rings)
9966 {
9967 	int rc;
9968 
9969 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
9970 	if (rc) {
9971 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9972 			   vnic->vnic_id, rc);
9973 		return rc;
9974 	}
9975 	return bnxt_setup_vnic(bp, vnic);
9976 }
9977 
9978 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
9979 {
9980 	struct bnxt_vnic_info *vnic;
9981 	int i, rc = 0;
9982 
9983 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
9984 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
9985 		return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
9986 	}
9987 
9988 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9989 		return 0;
9990 
9991 	for (i = 0; i < bp->rx_nr_rings; i++) {
9992 		u16 vnic_id = i + 1;
9993 		u16 ring_id = i;
9994 
9995 		if (vnic_id >= bp->nr_vnics)
9996 			break;
9997 
9998 		vnic = &bp->vnic_info[vnic_id];
9999 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
10000 		if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10001 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10002 		if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10003 			break;
10004 	}
10005 	return rc;
10006 }
10007 
10008 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10009 			  bool all)
10010 {
10011 	struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10012 	struct bnxt_filter_base *usr_fltr, *tmp;
10013 	struct bnxt_ntuple_filter *ntp_fltr;
10014 	int i;
10015 
10016 	bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10017 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10018 		if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10019 			bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10020 	}
10021 	if (!all)
10022 		return;
10023 
10024 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10025 		if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10026 		    usr_fltr->fw_vnic_id == rss_ctx->index) {
10027 			ntp_fltr = container_of(usr_fltr,
10028 						struct bnxt_ntuple_filter,
10029 						base);
10030 			bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10031 			bnxt_del_ntp_filter(bp, ntp_fltr);
10032 			bnxt_del_one_usr_fltr(bp, usr_fltr);
10033 		}
10034 	}
10035 
10036 	if (vnic->rss_table)
10037 		dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10038 				  vnic->rss_table,
10039 				  vnic->rss_table_dma_addr);
10040 	kfree(rss_ctx->rss_indir_tbl);
10041 	list_del(&rss_ctx->list);
10042 	bp->num_rss_ctx--;
10043 	clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
10044 	kfree(rss_ctx);
10045 }
10046 
10047 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10048 {
10049 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10050 	struct bnxt_rss_ctx *rss_ctx, *tmp;
10051 
10052 	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
10053 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10054 
10055 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10056 		    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10057 		    __bnxt_setup_vnic_p5(bp, vnic)) {
10058 			netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10059 				   rss_ctx->index);
10060 			bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10061 		}
10062 	}
10063 }
10064 
10065 struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
10066 {
10067 	struct bnxt_rss_ctx *rss_ctx = NULL;
10068 
10069 	rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
10070 	if (rss_ctx) {
10071 		rss_ctx->vnic.rss_ctx = rss_ctx;
10072 		list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
10073 		bp->num_rss_ctx++;
10074 	}
10075 	return rss_ctx;
10076 }
10077 
10078 void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
10079 {
10080 	struct bnxt_rss_ctx *rss_ctx, *tmp;
10081 
10082 	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
10083 		bnxt_del_one_rss_ctx(bp, rss_ctx, all);
10084 
10085 	if (all)
10086 		bitmap_free(bp->rss_ctx_bmap);
10087 }
10088 
10089 static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
10090 {
10091 	bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
10092 	if (bp->rss_ctx_bmap) {
10093 		/* burn index 0 since we cannot have context 0 */
10094 		__set_bit(0, bp->rss_ctx_bmap);
10095 		INIT_LIST_HEAD(&bp->rss_ctx_list);
10096 		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
10097 	}
10098 }
10099 
10100 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
10101 static bool bnxt_promisc_ok(struct bnxt *bp)
10102 {
10103 #ifdef CONFIG_BNXT_SRIOV
10104 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10105 		return false;
10106 #endif
10107 	return true;
10108 }
10109 
10110 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10111 {
10112 	struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10113 	unsigned int rc = 0;
10114 
10115 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10116 	if (rc) {
10117 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10118 			   rc);
10119 		return rc;
10120 	}
10121 
10122 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10123 	if (rc) {
10124 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10125 			   rc);
10126 		return rc;
10127 	}
10128 	return rc;
10129 }
10130 
10131 static int bnxt_cfg_rx_mode(struct bnxt *);
10132 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10133 
10134 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10135 {
10136 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10137 	int rc = 0;
10138 	unsigned int rx_nr_rings = bp->rx_nr_rings;
10139 
10140 	if (irq_re_init) {
10141 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
10142 		if (rc) {
10143 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10144 				   rc);
10145 			goto err_out;
10146 		}
10147 	}
10148 
10149 	rc = bnxt_hwrm_ring_alloc(bp);
10150 	if (rc) {
10151 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10152 		goto err_out;
10153 	}
10154 
10155 	rc = bnxt_hwrm_ring_grp_alloc(bp);
10156 	if (rc) {
10157 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10158 		goto err_out;
10159 	}
10160 
10161 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10162 		rx_nr_rings--;
10163 
10164 	/* default vnic 0 */
10165 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10166 	if (rc) {
10167 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10168 		goto err_out;
10169 	}
10170 
10171 	if (BNXT_VF(bp))
10172 		bnxt_hwrm_func_qcfg(bp);
10173 
10174 	rc = bnxt_setup_vnic(bp, vnic);
10175 	if (rc)
10176 		goto err_out;
10177 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10178 		bnxt_hwrm_update_rss_hash_cfg(bp);
10179 
10180 	if (bp->flags & BNXT_FLAG_RFS) {
10181 		rc = bnxt_alloc_rfs_vnics(bp);
10182 		if (rc)
10183 			goto err_out;
10184 	}
10185 
10186 	if (bp->flags & BNXT_FLAG_TPA) {
10187 		rc = bnxt_set_tpa(bp, true);
10188 		if (rc)
10189 			goto err_out;
10190 	}
10191 
10192 	if (BNXT_VF(bp))
10193 		bnxt_update_vf_mac(bp);
10194 
10195 	/* Filter for default vnic 0 */
10196 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10197 	if (rc) {
10198 		if (BNXT_VF(bp) && rc == -ENODEV)
10199 			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10200 		else
10201 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10202 		goto err_out;
10203 	}
10204 	vnic->uc_filter_count = 1;
10205 
10206 	vnic->rx_mask = 0;
10207 	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10208 		goto skip_rx_mask;
10209 
10210 	if (bp->dev->flags & IFF_BROADCAST)
10211 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10212 
10213 	if (bp->dev->flags & IFF_PROMISC)
10214 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10215 
10216 	if (bp->dev->flags & IFF_ALLMULTI) {
10217 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10218 		vnic->mc_list_count = 0;
10219 	} else if (bp->dev->flags & IFF_MULTICAST) {
10220 		u32 mask = 0;
10221 
10222 		bnxt_mc_list_updated(bp, &mask);
10223 		vnic->rx_mask |= mask;
10224 	}
10225 
10226 	rc = bnxt_cfg_rx_mode(bp);
10227 	if (rc)
10228 		goto err_out;
10229 
10230 skip_rx_mask:
10231 	rc = bnxt_hwrm_set_coal(bp);
10232 	if (rc)
10233 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10234 				rc);
10235 
10236 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10237 		rc = bnxt_setup_nitroa0_vnic(bp);
10238 		if (rc)
10239 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10240 				   rc);
10241 	}
10242 
10243 	if (BNXT_VF(bp)) {
10244 		bnxt_hwrm_func_qcfg(bp);
10245 		netdev_update_features(bp->dev);
10246 	}
10247 
10248 	return 0;
10249 
10250 err_out:
10251 	bnxt_hwrm_resource_free(bp, 0, true);
10252 
10253 	return rc;
10254 }
10255 
10256 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10257 {
10258 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10259 	return 0;
10260 }
10261 
10262 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10263 {
10264 	bnxt_init_cp_rings(bp);
10265 	bnxt_init_rx_rings(bp);
10266 	bnxt_init_tx_rings(bp);
10267 	bnxt_init_ring_grps(bp, irq_re_init);
10268 	bnxt_init_vnics(bp);
10269 
10270 	return bnxt_init_chip(bp, irq_re_init);
10271 }
10272 
10273 static int bnxt_set_real_num_queues(struct bnxt *bp)
10274 {
10275 	int rc;
10276 	struct net_device *dev = bp->dev;
10277 
10278 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10279 					  bp->tx_nr_rings_xdp);
10280 	if (rc)
10281 		return rc;
10282 
10283 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10284 	if (rc)
10285 		return rc;
10286 
10287 #ifdef CONFIG_RFS_ACCEL
10288 	if (bp->flags & BNXT_FLAG_RFS)
10289 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10290 #endif
10291 
10292 	return rc;
10293 }
10294 
10295 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10296 			     bool shared)
10297 {
10298 	int _rx = *rx, _tx = *tx;
10299 
10300 	if (shared) {
10301 		*rx = min_t(int, _rx, max);
10302 		*tx = min_t(int, _tx, max);
10303 	} else {
10304 		if (max < 2)
10305 			return -ENOMEM;
10306 
10307 		while (_rx + _tx > max) {
10308 			if (_rx > _tx && _rx > 1)
10309 				_rx--;
10310 			else if (_tx > 1)
10311 				_tx--;
10312 		}
10313 		*rx = _rx;
10314 		*tx = _tx;
10315 	}
10316 	return 0;
10317 }
10318 
10319 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10320 {
10321 	return (tx - tx_xdp) / tx_sets + tx_xdp;
10322 }
10323 
10324 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10325 {
10326 	int tcs = bp->num_tc;
10327 
10328 	if (!tcs)
10329 		tcs = 1;
10330 	return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10331 }
10332 
10333 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10334 {
10335 	int tcs = bp->num_tc;
10336 
10337 	return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10338 	       bp->tx_nr_rings_xdp;
10339 }
10340 
10341 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10342 			   bool sh)
10343 {
10344 	int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10345 
10346 	if (tx_cp != *tx) {
10347 		int tx_saved = tx_cp, rc;
10348 
10349 		rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10350 		if (rc)
10351 			return rc;
10352 		if (tx_cp != tx_saved)
10353 			*tx = bnxt_num_cp_to_tx(bp, tx_cp);
10354 		return 0;
10355 	}
10356 	return __bnxt_trim_rings(bp, rx, tx, max, sh);
10357 }
10358 
10359 static void bnxt_setup_msix(struct bnxt *bp)
10360 {
10361 	const int len = sizeof(bp->irq_tbl[0].name);
10362 	struct net_device *dev = bp->dev;
10363 	int tcs, i;
10364 
10365 	tcs = bp->num_tc;
10366 	if (tcs) {
10367 		int i, off, count;
10368 
10369 		for (i = 0; i < tcs; i++) {
10370 			count = bp->tx_nr_rings_per_tc;
10371 			off = BNXT_TC_TO_RING_BASE(bp, i);
10372 			netdev_set_tc_queue(dev, i, count, off);
10373 		}
10374 	}
10375 
10376 	for (i = 0; i < bp->cp_nr_rings; i++) {
10377 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10378 		char *attr;
10379 
10380 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10381 			attr = "TxRx";
10382 		else if (i < bp->rx_nr_rings)
10383 			attr = "rx";
10384 		else
10385 			attr = "tx";
10386 
10387 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10388 			 attr, i);
10389 		bp->irq_tbl[map_idx].handler = bnxt_msix;
10390 	}
10391 }
10392 
10393 static void bnxt_setup_inta(struct bnxt *bp)
10394 {
10395 	const int len = sizeof(bp->irq_tbl[0].name);
10396 
10397 	if (bp->num_tc) {
10398 		netdev_reset_tc(bp->dev);
10399 		bp->num_tc = 0;
10400 	}
10401 
10402 	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
10403 		 0);
10404 	bp->irq_tbl[0].handler = bnxt_inta;
10405 }
10406 
10407 static int bnxt_init_int_mode(struct bnxt *bp);
10408 
10409 static int bnxt_setup_int_mode(struct bnxt *bp)
10410 {
10411 	int rc;
10412 
10413 	if (!bp->irq_tbl) {
10414 		rc = bnxt_init_int_mode(bp);
10415 		if (rc || !bp->irq_tbl)
10416 			return rc ?: -ENODEV;
10417 	}
10418 
10419 	if (bp->flags & BNXT_FLAG_USING_MSIX)
10420 		bnxt_setup_msix(bp);
10421 	else
10422 		bnxt_setup_inta(bp);
10423 
10424 	rc = bnxt_set_real_num_queues(bp);
10425 	return rc;
10426 }
10427 
10428 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10429 {
10430 	return bp->hw_resc.max_rsscos_ctxs;
10431 }
10432 
10433 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10434 {
10435 	return bp->hw_resc.max_vnics;
10436 }
10437 
10438 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10439 {
10440 	return bp->hw_resc.max_stat_ctxs;
10441 }
10442 
10443 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10444 {
10445 	return bp->hw_resc.max_cp_rings;
10446 }
10447 
10448 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10449 {
10450 	unsigned int cp = bp->hw_resc.max_cp_rings;
10451 
10452 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10453 		cp -= bnxt_get_ulp_msix_num(bp);
10454 
10455 	return cp;
10456 }
10457 
10458 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10459 {
10460 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10461 
10462 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10463 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
10464 
10465 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
10466 }
10467 
10468 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10469 {
10470 	bp->hw_resc.max_irqs = max_irqs;
10471 }
10472 
10473 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10474 {
10475 	unsigned int cp;
10476 
10477 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
10478 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10479 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10480 	else
10481 		return cp - bp->cp_nr_rings;
10482 }
10483 
10484 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10485 {
10486 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10487 }
10488 
10489 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
10490 {
10491 	int max_irq = bnxt_get_max_func_irqs(bp);
10492 	int total_req = bp->cp_nr_rings + num;
10493 
10494 	if (max_irq < total_req) {
10495 		num = max_irq - bp->cp_nr_rings;
10496 		if (num <= 0)
10497 			return 0;
10498 	}
10499 	return num;
10500 }
10501 
10502 static int bnxt_get_num_msix(struct bnxt *bp)
10503 {
10504 	if (!BNXT_NEW_RM(bp))
10505 		return bnxt_get_max_func_irqs(bp);
10506 
10507 	return bnxt_nq_rings_in_use(bp);
10508 }
10509 
10510 static int bnxt_init_msix(struct bnxt *bp)
10511 {
10512 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
10513 	struct msix_entry *msix_ent;
10514 
10515 	total_vecs = bnxt_get_num_msix(bp);
10516 	max = bnxt_get_max_func_irqs(bp);
10517 	if (total_vecs > max)
10518 		total_vecs = max;
10519 
10520 	if (!total_vecs)
10521 		return 0;
10522 
10523 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
10524 	if (!msix_ent)
10525 		return -ENOMEM;
10526 
10527 	for (i = 0; i < total_vecs; i++) {
10528 		msix_ent[i].entry = i;
10529 		msix_ent[i].vector = 0;
10530 	}
10531 
10532 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10533 		min = 2;
10534 
10535 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
10536 	ulp_msix = bnxt_get_ulp_msix_num(bp);
10537 	if (total_vecs < 0 || total_vecs < ulp_msix) {
10538 		rc = -ENODEV;
10539 		goto msix_setup_exit;
10540 	}
10541 
10542 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
10543 	if (bp->irq_tbl) {
10544 		for (i = 0; i < total_vecs; i++)
10545 			bp->irq_tbl[i].vector = msix_ent[i].vector;
10546 
10547 		bp->total_irqs = total_vecs;
10548 		/* Trim rings based upon num of vectors allocated */
10549 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10550 				     total_vecs - ulp_msix, min == 1);
10551 		if (rc)
10552 			goto msix_setup_exit;
10553 
10554 		tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10555 		bp->cp_nr_rings = (min == 1) ?
10556 				  max_t(int, tx_cp, bp->rx_nr_rings) :
10557 				  tx_cp + bp->rx_nr_rings;
10558 
10559 	} else {
10560 		rc = -ENOMEM;
10561 		goto msix_setup_exit;
10562 	}
10563 	bp->flags |= BNXT_FLAG_USING_MSIX;
10564 	kfree(msix_ent);
10565 	return 0;
10566 
10567 msix_setup_exit:
10568 	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10569 	kfree(bp->irq_tbl);
10570 	bp->irq_tbl = NULL;
10571 	pci_disable_msix(bp->pdev);
10572 	kfree(msix_ent);
10573 	return rc;
10574 }
10575 
10576 static int bnxt_init_inta(struct bnxt *bp)
10577 {
10578 	bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
10579 	if (!bp->irq_tbl)
10580 		return -ENOMEM;
10581 
10582 	bp->total_irqs = 1;
10583 	bp->rx_nr_rings = 1;
10584 	bp->tx_nr_rings = 1;
10585 	bp->cp_nr_rings = 1;
10586 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
10587 	bp->irq_tbl[0].vector = bp->pdev->irq;
10588 	return 0;
10589 }
10590 
10591 static int bnxt_init_int_mode(struct bnxt *bp)
10592 {
10593 	int rc = -ENODEV;
10594 
10595 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
10596 		rc = bnxt_init_msix(bp);
10597 
10598 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
10599 		/* fallback to INTA */
10600 		rc = bnxt_init_inta(bp);
10601 	}
10602 	return rc;
10603 }
10604 
10605 static void bnxt_clear_int_mode(struct bnxt *bp)
10606 {
10607 	if (bp->flags & BNXT_FLAG_USING_MSIX)
10608 		pci_disable_msix(bp->pdev);
10609 
10610 	kfree(bp->irq_tbl);
10611 	bp->irq_tbl = NULL;
10612 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
10613 }
10614 
10615 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10616 {
10617 	bool irq_cleared = false;
10618 	int tcs = bp->num_tc;
10619 	int irqs_required;
10620 	int rc;
10621 
10622 	if (!bnxt_need_reserve_rings(bp))
10623 		return 0;
10624 
10625 	if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
10626 		int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
10627 
10628 		if (ulp_msix > bp->ulp_num_msix_want)
10629 			ulp_msix = bp->ulp_num_msix_want;
10630 		irqs_required = ulp_msix + bp->cp_nr_rings;
10631 	} else {
10632 		irqs_required = bnxt_get_num_msix(bp);
10633 	}
10634 
10635 	if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
10636 		bnxt_ulp_irq_stop(bp);
10637 		bnxt_clear_int_mode(bp);
10638 		irq_cleared = true;
10639 	}
10640 	rc = __bnxt_reserve_rings(bp);
10641 	if (irq_cleared) {
10642 		if (!rc)
10643 			rc = bnxt_init_int_mode(bp);
10644 		bnxt_ulp_irq_restart(bp, rc);
10645 	}
10646 	if (rc) {
10647 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10648 		return rc;
10649 	}
10650 	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10651 		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10652 		netdev_err(bp->dev, "tx ring reservation failure\n");
10653 		netdev_reset_tc(bp->dev);
10654 		bp->num_tc = 0;
10655 		if (bp->tx_nr_rings_xdp)
10656 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10657 		else
10658 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10659 		return -ENOMEM;
10660 	}
10661 	return 0;
10662 }
10663 
10664 static void bnxt_free_irq(struct bnxt *bp)
10665 {
10666 	struct bnxt_irq *irq;
10667 	int i;
10668 
10669 #ifdef CONFIG_RFS_ACCEL
10670 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10671 	bp->dev->rx_cpu_rmap = NULL;
10672 #endif
10673 	if (!bp->irq_tbl || !bp->bnapi)
10674 		return;
10675 
10676 	for (i = 0; i < bp->cp_nr_rings; i++) {
10677 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10678 
10679 		irq = &bp->irq_tbl[map_idx];
10680 		if (irq->requested) {
10681 			if (irq->have_cpumask) {
10682 				irq_set_affinity_hint(irq->vector, NULL);
10683 				free_cpumask_var(irq->cpu_mask);
10684 				irq->have_cpumask = 0;
10685 			}
10686 			free_irq(irq->vector, bp->bnapi[i]);
10687 		}
10688 
10689 		irq->requested = 0;
10690 	}
10691 }
10692 
10693 static int bnxt_request_irq(struct bnxt *bp)
10694 {
10695 	int i, j, rc = 0;
10696 	unsigned long flags = 0;
10697 #ifdef CONFIG_RFS_ACCEL
10698 	struct cpu_rmap *rmap;
10699 #endif
10700 
10701 	rc = bnxt_setup_int_mode(bp);
10702 	if (rc) {
10703 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10704 			   rc);
10705 		return rc;
10706 	}
10707 #ifdef CONFIG_RFS_ACCEL
10708 	rmap = bp->dev->rx_cpu_rmap;
10709 #endif
10710 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
10711 		flags = IRQF_SHARED;
10712 
10713 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10714 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10715 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10716 
10717 #ifdef CONFIG_RFS_ACCEL
10718 		if (rmap && bp->bnapi[i]->rx_ring) {
10719 			rc = irq_cpu_rmap_add(rmap, irq->vector);
10720 			if (rc)
10721 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10722 					    j);
10723 			j++;
10724 		}
10725 #endif
10726 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10727 				 bp->bnapi[i]);
10728 		if (rc)
10729 			break;
10730 
10731 		netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10732 		irq->requested = 1;
10733 
10734 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
10735 			int numa_node = dev_to_node(&bp->pdev->dev);
10736 
10737 			irq->have_cpumask = 1;
10738 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
10739 					irq->cpu_mask);
10740 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10741 			if (rc) {
10742 				netdev_warn(bp->dev,
10743 					    "Set affinity failed, IRQ = %d\n",
10744 					    irq->vector);
10745 				break;
10746 			}
10747 		}
10748 	}
10749 	return rc;
10750 }
10751 
10752 static void bnxt_del_napi(struct bnxt *bp)
10753 {
10754 	int i;
10755 
10756 	if (!bp->bnapi)
10757 		return;
10758 
10759 	for (i = 0; i < bp->rx_nr_rings; i++)
10760 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10761 	for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10762 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10763 
10764 	for (i = 0; i < bp->cp_nr_rings; i++) {
10765 		struct bnxt_napi *bnapi = bp->bnapi[i];
10766 
10767 		__netif_napi_del(&bnapi->napi);
10768 	}
10769 	/* We called __netif_napi_del(), we need
10770 	 * to respect an RCU grace period before freeing napi structures.
10771 	 */
10772 	synchronize_net();
10773 }
10774 
10775 static void bnxt_init_napi(struct bnxt *bp)
10776 {
10777 	int i;
10778 	unsigned int cp_nr_rings = bp->cp_nr_rings;
10779 	struct bnxt_napi *bnapi;
10780 
10781 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
10782 		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
10783 
10784 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10785 			poll_fn = bnxt_poll_p5;
10786 		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10787 			cp_nr_rings--;
10788 		for (i = 0; i < cp_nr_rings; i++) {
10789 			bnapi = bp->bnapi[i];
10790 			netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10791 		}
10792 		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10793 			bnapi = bp->bnapi[cp_nr_rings];
10794 			netif_napi_add(bp->dev, &bnapi->napi,
10795 				       bnxt_poll_nitroa0);
10796 		}
10797 	} else {
10798 		bnapi = bp->bnapi[0];
10799 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
10800 	}
10801 }
10802 
10803 static void bnxt_disable_napi(struct bnxt *bp)
10804 {
10805 	int i;
10806 
10807 	if (!bp->bnapi ||
10808 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
10809 		return;
10810 
10811 	for (i = 0; i < bp->cp_nr_rings; i++) {
10812 		struct bnxt_napi *bnapi = bp->bnapi[i];
10813 		struct bnxt_cp_ring_info *cpr;
10814 
10815 		cpr = &bnapi->cp_ring;
10816 		if (bnapi->tx_fault)
10817 			cpr->sw_stats->tx.tx_resets++;
10818 		if (bnapi->in_reset)
10819 			cpr->sw_stats->rx.rx_resets++;
10820 		napi_disable(&bnapi->napi);
10821 		if (bnapi->rx_ring)
10822 			cancel_work_sync(&cpr->dim.work);
10823 	}
10824 }
10825 
10826 static void bnxt_enable_napi(struct bnxt *bp)
10827 {
10828 	int i;
10829 
10830 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
10831 	for (i = 0; i < bp->cp_nr_rings; i++) {
10832 		struct bnxt_napi *bnapi = bp->bnapi[i];
10833 		struct bnxt_cp_ring_info *cpr;
10834 
10835 		bnapi->tx_fault = 0;
10836 
10837 		cpr = &bnapi->cp_ring;
10838 		bnapi->in_reset = false;
10839 
10840 		if (bnapi->rx_ring) {
10841 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
10842 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
10843 		}
10844 		napi_enable(&bnapi->napi);
10845 	}
10846 }
10847 
10848 void bnxt_tx_disable(struct bnxt *bp)
10849 {
10850 	int i;
10851 	struct bnxt_tx_ring_info *txr;
10852 
10853 	if (bp->tx_ring) {
10854 		for (i = 0; i < bp->tx_nr_rings; i++) {
10855 			txr = &bp->tx_ring[i];
10856 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
10857 		}
10858 	}
10859 	/* Make sure napi polls see @dev_state change */
10860 	synchronize_net();
10861 	/* Drop carrier first to prevent TX timeout */
10862 	netif_carrier_off(bp->dev);
10863 	/* Stop all TX queues */
10864 	netif_tx_disable(bp->dev);
10865 }
10866 
10867 void bnxt_tx_enable(struct bnxt *bp)
10868 {
10869 	int i;
10870 	struct bnxt_tx_ring_info *txr;
10871 
10872 	for (i = 0; i < bp->tx_nr_rings; i++) {
10873 		txr = &bp->tx_ring[i];
10874 		WRITE_ONCE(txr->dev_state, 0);
10875 	}
10876 	/* Make sure napi polls see @dev_state change */
10877 	synchronize_net();
10878 	netif_tx_wake_all_queues(bp->dev);
10879 	if (BNXT_LINK_IS_UP(bp))
10880 		netif_carrier_on(bp->dev);
10881 }
10882 
10883 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
10884 {
10885 	u8 active_fec = link_info->active_fec_sig_mode &
10886 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
10887 
10888 	switch (active_fec) {
10889 	default:
10890 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
10891 		return "None";
10892 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
10893 		return "Clause 74 BaseR";
10894 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
10895 		return "Clause 91 RS(528,514)";
10896 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
10897 		return "Clause 91 RS544_1XN";
10898 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
10899 		return "Clause 91 RS(544,514)";
10900 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
10901 		return "Clause 91 RS272_1XN";
10902 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
10903 		return "Clause 91 RS(272,257)";
10904 	}
10905 }
10906 
10907 void bnxt_report_link(struct bnxt *bp)
10908 {
10909 	if (BNXT_LINK_IS_UP(bp)) {
10910 		const char *signal = "";
10911 		const char *flow_ctrl;
10912 		const char *duplex;
10913 		u32 speed;
10914 		u16 fec;
10915 
10916 		netif_carrier_on(bp->dev);
10917 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
10918 		if (speed == SPEED_UNKNOWN) {
10919 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
10920 			return;
10921 		}
10922 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
10923 			duplex = "full";
10924 		else
10925 			duplex = "half";
10926 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
10927 			flow_ctrl = "ON - receive & transmit";
10928 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
10929 			flow_ctrl = "ON - transmit";
10930 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
10931 			flow_ctrl = "ON - receive";
10932 		else
10933 			flow_ctrl = "none";
10934 		if (bp->link_info.phy_qcfg_resp.option_flags &
10935 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
10936 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
10937 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
10938 			switch (sig_mode) {
10939 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
10940 				signal = "(NRZ) ";
10941 				break;
10942 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
10943 				signal = "(PAM4 56Gbps) ";
10944 				break;
10945 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
10946 				signal = "(PAM4 112Gbps) ";
10947 				break;
10948 			default:
10949 				break;
10950 			}
10951 		}
10952 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
10953 			    speed, signal, duplex, flow_ctrl);
10954 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
10955 			netdev_info(bp->dev, "EEE is %s\n",
10956 				    bp->eee.eee_active ? "active" :
10957 							 "not active");
10958 		fec = bp->link_info.fec_cfg;
10959 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
10960 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
10961 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
10962 				    bnxt_report_fec(&bp->link_info));
10963 	} else {
10964 		netif_carrier_off(bp->dev);
10965 		netdev_err(bp->dev, "NIC Link is Down\n");
10966 	}
10967 }
10968 
10969 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
10970 {
10971 	if (!resp->supported_speeds_auto_mode &&
10972 	    !resp->supported_speeds_force_mode &&
10973 	    !resp->supported_pam4_speeds_auto_mode &&
10974 	    !resp->supported_pam4_speeds_force_mode &&
10975 	    !resp->supported_speeds2_auto_mode &&
10976 	    !resp->supported_speeds2_force_mode)
10977 		return true;
10978 	return false;
10979 }
10980 
10981 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
10982 {
10983 	struct bnxt_link_info *link_info = &bp->link_info;
10984 	struct hwrm_port_phy_qcaps_output *resp;
10985 	struct hwrm_port_phy_qcaps_input *req;
10986 	int rc = 0;
10987 
10988 	if (bp->hwrm_spec_code < 0x10201)
10989 		return 0;
10990 
10991 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10992 	if (rc)
10993 		return rc;
10994 
10995 	resp = hwrm_req_hold(bp, req);
10996 	rc = hwrm_req_send(bp, req);
10997 	if (rc)
10998 		goto hwrm_phy_qcaps_exit;
10999 
11000 	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11001 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11002 		struct ethtool_keee *eee = &bp->eee;
11003 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11004 
11005 		_bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11006 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11007 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11008 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11009 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11010 	}
11011 
11012 	if (bp->hwrm_spec_code >= 0x10a01) {
11013 		if (bnxt_phy_qcaps_no_speed(resp)) {
11014 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11015 			netdev_warn(bp->dev, "Ethernet link disabled\n");
11016 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11017 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11018 			netdev_info(bp->dev, "Ethernet link enabled\n");
11019 			/* Phy re-enabled, reprobe the speeds */
11020 			link_info->support_auto_speeds = 0;
11021 			link_info->support_pam4_auto_speeds = 0;
11022 			link_info->support_auto_speeds2 = 0;
11023 		}
11024 	}
11025 	if (resp->supported_speeds_auto_mode)
11026 		link_info->support_auto_speeds =
11027 			le16_to_cpu(resp->supported_speeds_auto_mode);
11028 	if (resp->supported_pam4_speeds_auto_mode)
11029 		link_info->support_pam4_auto_speeds =
11030 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11031 	if (resp->supported_speeds2_auto_mode)
11032 		link_info->support_auto_speeds2 =
11033 			le16_to_cpu(resp->supported_speeds2_auto_mode);
11034 
11035 	bp->port_count = resp->port_cnt;
11036 
11037 hwrm_phy_qcaps_exit:
11038 	hwrm_req_drop(bp, req);
11039 	return rc;
11040 }
11041 
11042 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11043 {
11044 	u16 diff = advertising ^ supported;
11045 
11046 	return ((supported | diff) != supported);
11047 }
11048 
11049 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
11050 {
11051 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
11052 
11053 	/* Check if any advertised speeds are no longer supported. The caller
11054 	 * holds the link_lock mutex, so we can modify link_info settings.
11055 	 */
11056 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11057 		if (bnxt_support_dropped(link_info->advertising,
11058 					 link_info->support_auto_speeds2)) {
11059 			link_info->advertising = link_info->support_auto_speeds2;
11060 			return true;
11061 		}
11062 		return false;
11063 	}
11064 	if (bnxt_support_dropped(link_info->advertising,
11065 				 link_info->support_auto_speeds)) {
11066 		link_info->advertising = link_info->support_auto_speeds;
11067 		return true;
11068 	}
11069 	if (bnxt_support_dropped(link_info->advertising_pam4,
11070 				 link_info->support_pam4_auto_speeds)) {
11071 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
11072 		return true;
11073 	}
11074 	return false;
11075 }
11076 
11077 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
11078 {
11079 	struct bnxt_link_info *link_info = &bp->link_info;
11080 	struct hwrm_port_phy_qcfg_output *resp;
11081 	struct hwrm_port_phy_qcfg_input *req;
11082 	u8 link_state = link_info->link_state;
11083 	bool support_changed;
11084 	int rc;
11085 
11086 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
11087 	if (rc)
11088 		return rc;
11089 
11090 	resp = hwrm_req_hold(bp, req);
11091 	rc = hwrm_req_send(bp, req);
11092 	if (rc) {
11093 		hwrm_req_drop(bp, req);
11094 		if (BNXT_VF(bp) && rc == -ENODEV) {
11095 			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
11096 			rc = 0;
11097 		}
11098 		return rc;
11099 	}
11100 
11101 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
11102 	link_info->phy_link_status = resp->link;
11103 	link_info->duplex = resp->duplex_cfg;
11104 	if (bp->hwrm_spec_code >= 0x10800)
11105 		link_info->duplex = resp->duplex_state;
11106 	link_info->pause = resp->pause;
11107 	link_info->auto_mode = resp->auto_mode;
11108 	link_info->auto_pause_setting = resp->auto_pause;
11109 	link_info->lp_pause = resp->link_partner_adv_pause;
11110 	link_info->force_pause_setting = resp->force_pause;
11111 	link_info->duplex_setting = resp->duplex_cfg;
11112 	if (link_info->phy_link_status == BNXT_LINK_LINK) {
11113 		link_info->link_speed = le16_to_cpu(resp->link_speed);
11114 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
11115 			link_info->active_lanes = resp->active_lanes;
11116 	} else {
11117 		link_info->link_speed = 0;
11118 		link_info->active_lanes = 0;
11119 	}
11120 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
11121 	link_info->force_pam4_link_speed =
11122 		le16_to_cpu(resp->force_pam4_link_speed);
11123 	link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
11124 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
11125 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
11126 	link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
11127 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
11128 	link_info->auto_pam4_link_speeds =
11129 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
11130 	link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
11131 	link_info->lp_auto_link_speeds =
11132 		le16_to_cpu(resp->link_partner_adv_speeds);
11133 	link_info->lp_auto_pam4_link_speeds =
11134 		resp->link_partner_pam4_adv_speeds;
11135 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
11136 	link_info->phy_ver[0] = resp->phy_maj;
11137 	link_info->phy_ver[1] = resp->phy_min;
11138 	link_info->phy_ver[2] = resp->phy_bld;
11139 	link_info->media_type = resp->media_type;
11140 	link_info->phy_type = resp->phy_type;
11141 	link_info->transceiver = resp->xcvr_pkg_type;
11142 	link_info->phy_addr = resp->eee_config_phy_addr &
11143 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
11144 	link_info->module_status = resp->module_status;
11145 
11146 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
11147 		struct ethtool_keee *eee = &bp->eee;
11148 		u16 fw_speeds;
11149 
11150 		eee->eee_active = 0;
11151 		if (resp->eee_config_phy_addr &
11152 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
11153 			eee->eee_active = 1;
11154 			fw_speeds = le16_to_cpu(
11155 				resp->link_partner_adv_eee_link_speed_mask);
11156 			_bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
11157 		}
11158 
11159 		/* Pull initial EEE config */
11160 		if (!chng_link_state) {
11161 			if (resp->eee_config_phy_addr &
11162 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
11163 				eee->eee_enabled = 1;
11164 
11165 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
11166 			_bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
11167 
11168 			if (resp->eee_config_phy_addr &
11169 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
11170 				__le32 tmr;
11171 
11172 				eee->tx_lpi_enabled = 1;
11173 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
11174 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
11175 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
11176 			}
11177 		}
11178 	}
11179 
11180 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
11181 	if (bp->hwrm_spec_code >= 0x10504) {
11182 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
11183 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11184 	}
11185 	/* TODO: need to add more logic to report VF link */
11186 	if (chng_link_state) {
11187 		if (link_info->phy_link_status == BNXT_LINK_LINK)
11188 			link_info->link_state = BNXT_LINK_STATE_UP;
11189 		else
11190 			link_info->link_state = BNXT_LINK_STATE_DOWN;
11191 		if (link_state != link_info->link_state)
11192 			bnxt_report_link(bp);
11193 	} else {
11194 		/* always link down if not require to update link state */
11195 		link_info->link_state = BNXT_LINK_STATE_DOWN;
11196 	}
11197 	hwrm_req_drop(bp, req);
11198 
11199 	if (!BNXT_PHY_CFG_ABLE(bp))
11200 		return 0;
11201 
11202 	support_changed = bnxt_support_speed_dropped(link_info);
11203 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11204 		bnxt_hwrm_set_link_setting(bp, true, false);
11205 	return 0;
11206 }
11207 
11208 static void bnxt_get_port_module_status(struct bnxt *bp)
11209 {
11210 	struct bnxt_link_info *link_info = &bp->link_info;
11211 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11212 	u8 module_status;
11213 
11214 	if (bnxt_update_link(bp, true))
11215 		return;
11216 
11217 	module_status = link_info->module_status;
11218 	switch (module_status) {
11219 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11220 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11221 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11222 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11223 			    bp->pf.port_id);
11224 		if (bp->hwrm_spec_code >= 0x10201) {
11225 			netdev_warn(bp->dev, "Module part number %s\n",
11226 				    resp->phy_vendor_partnumber);
11227 		}
11228 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11229 			netdev_warn(bp->dev, "TX is disabled\n");
11230 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11231 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11232 	}
11233 }
11234 
11235 static void
11236 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11237 {
11238 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11239 		if (bp->hwrm_spec_code >= 0x10201)
11240 			req->auto_pause =
11241 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11242 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11243 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11244 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11245 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11246 		req->enables |=
11247 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11248 	} else {
11249 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11250 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11251 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11252 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11253 		req->enables |=
11254 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11255 		if (bp->hwrm_spec_code >= 0x10201) {
11256 			req->auto_pause = req->force_pause;
11257 			req->enables |= cpu_to_le32(
11258 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11259 		}
11260 	}
11261 }
11262 
11263 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11264 {
11265 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11266 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11267 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11268 			req->enables |=
11269 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11270 			req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11271 		} else if (bp->link_info.advertising) {
11272 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11273 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11274 		}
11275 		if (bp->link_info.advertising_pam4) {
11276 			req->enables |=
11277 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11278 			req->auto_link_pam4_speed_mask =
11279 				cpu_to_le16(bp->link_info.advertising_pam4);
11280 		}
11281 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11282 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11283 	} else {
11284 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11285 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11286 			req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11287 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11288 			netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11289 				   (u32)bp->link_info.req_link_speed);
11290 		} else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11291 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11292 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11293 		} else {
11294 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11295 		}
11296 	}
11297 
11298 	/* tell chimp that the setting takes effect immediately */
11299 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11300 }
11301 
11302 int bnxt_hwrm_set_pause(struct bnxt *bp)
11303 {
11304 	struct hwrm_port_phy_cfg_input *req;
11305 	int rc;
11306 
11307 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11308 	if (rc)
11309 		return rc;
11310 
11311 	bnxt_hwrm_set_pause_common(bp, req);
11312 
11313 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11314 	    bp->link_info.force_link_chng)
11315 		bnxt_hwrm_set_link_common(bp, req);
11316 
11317 	rc = hwrm_req_send(bp, req);
11318 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11319 		/* since changing of pause setting doesn't trigger any link
11320 		 * change event, the driver needs to update the current pause
11321 		 * result upon successfully return of the phy_cfg command
11322 		 */
11323 		bp->link_info.pause =
11324 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11325 		bp->link_info.auto_pause_setting = 0;
11326 		if (!bp->link_info.force_link_chng)
11327 			bnxt_report_link(bp);
11328 	}
11329 	bp->link_info.force_link_chng = false;
11330 	return rc;
11331 }
11332 
11333 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11334 			      struct hwrm_port_phy_cfg_input *req)
11335 {
11336 	struct ethtool_keee *eee = &bp->eee;
11337 
11338 	if (eee->eee_enabled) {
11339 		u16 eee_speeds;
11340 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11341 
11342 		if (eee->tx_lpi_enabled)
11343 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11344 		else
11345 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11346 
11347 		req->flags |= cpu_to_le32(flags);
11348 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11349 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11350 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11351 	} else {
11352 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11353 	}
11354 }
11355 
11356 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11357 {
11358 	struct hwrm_port_phy_cfg_input *req;
11359 	int rc;
11360 
11361 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11362 	if (rc)
11363 		return rc;
11364 
11365 	if (set_pause)
11366 		bnxt_hwrm_set_pause_common(bp, req);
11367 
11368 	bnxt_hwrm_set_link_common(bp, req);
11369 
11370 	if (set_eee)
11371 		bnxt_hwrm_set_eee(bp, req);
11372 	return hwrm_req_send(bp, req);
11373 }
11374 
11375 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11376 {
11377 	struct hwrm_port_phy_cfg_input *req;
11378 	int rc;
11379 
11380 	if (!BNXT_SINGLE_PF(bp))
11381 		return 0;
11382 
11383 	if (pci_num_vf(bp->pdev) &&
11384 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11385 		return 0;
11386 
11387 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11388 	if (rc)
11389 		return rc;
11390 
11391 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11392 	rc = hwrm_req_send(bp, req);
11393 	if (!rc) {
11394 		mutex_lock(&bp->link_lock);
11395 		/* Device is not obliged link down in certain scenarios, even
11396 		 * when forced. Setting the state unknown is consistent with
11397 		 * driver startup and will force link state to be reported
11398 		 * during subsequent open based on PORT_PHY_QCFG.
11399 		 */
11400 		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11401 		mutex_unlock(&bp->link_lock);
11402 	}
11403 	return rc;
11404 }
11405 
11406 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11407 {
11408 #ifdef CONFIG_TEE_BNXT_FW
11409 	int rc = tee_bnxt_fw_load();
11410 
11411 	if (rc)
11412 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11413 
11414 	return rc;
11415 #else
11416 	netdev_err(bp->dev, "OP-TEE not supported\n");
11417 	return -ENODEV;
11418 #endif
11419 }
11420 
11421 static int bnxt_try_recover_fw(struct bnxt *bp)
11422 {
11423 	if (bp->fw_health && bp->fw_health->status_reliable) {
11424 		int retry = 0, rc;
11425 		u32 sts;
11426 
11427 		do {
11428 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11429 			rc = bnxt_hwrm_poll(bp);
11430 			if (!BNXT_FW_IS_BOOTING(sts) &&
11431 			    !BNXT_FW_IS_RECOVERING(sts))
11432 				break;
11433 			retry++;
11434 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11435 
11436 		if (!BNXT_FW_IS_HEALTHY(sts)) {
11437 			netdev_err(bp->dev,
11438 				   "Firmware not responding, status: 0x%x\n",
11439 				   sts);
11440 			rc = -ENODEV;
11441 		}
11442 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11443 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11444 			return bnxt_fw_reset_via_optee(bp);
11445 		}
11446 		return rc;
11447 	}
11448 
11449 	return -ENODEV;
11450 }
11451 
11452 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11453 {
11454 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11455 
11456 	if (!BNXT_NEW_RM(bp))
11457 		return; /* no resource reservations required */
11458 
11459 	hw_resc->resv_cp_rings = 0;
11460 	hw_resc->resv_stat_ctxs = 0;
11461 	hw_resc->resv_irqs = 0;
11462 	hw_resc->resv_tx_rings = 0;
11463 	hw_resc->resv_rx_rings = 0;
11464 	hw_resc->resv_hw_ring_grps = 0;
11465 	hw_resc->resv_vnics = 0;
11466 	hw_resc->resv_rsscos_ctxs = 0;
11467 	if (!fw_reset) {
11468 		bp->tx_nr_rings = 0;
11469 		bp->rx_nr_rings = 0;
11470 	}
11471 }
11472 
11473 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11474 {
11475 	int rc;
11476 
11477 	if (!BNXT_NEW_RM(bp))
11478 		return 0; /* no resource reservations required */
11479 
11480 	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11481 	if (rc)
11482 		netdev_err(bp->dev, "resc_qcaps failed\n");
11483 
11484 	bnxt_clear_reservations(bp, fw_reset);
11485 
11486 	return rc;
11487 }
11488 
11489 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11490 {
11491 	struct hwrm_func_drv_if_change_output *resp;
11492 	struct hwrm_func_drv_if_change_input *req;
11493 	bool fw_reset = !bp->irq_tbl;
11494 	bool resc_reinit = false;
11495 	int rc, retry = 0;
11496 	u32 flags = 0;
11497 
11498 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11499 		return 0;
11500 
11501 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11502 	if (rc)
11503 		return rc;
11504 
11505 	if (up)
11506 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
11507 	resp = hwrm_req_hold(bp, req);
11508 
11509 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11510 	while (retry < BNXT_FW_IF_RETRY) {
11511 		rc = hwrm_req_send(bp, req);
11512 		if (rc != -EAGAIN)
11513 			break;
11514 
11515 		msleep(50);
11516 		retry++;
11517 	}
11518 
11519 	if (rc == -EAGAIN) {
11520 		hwrm_req_drop(bp, req);
11521 		return rc;
11522 	} else if (!rc) {
11523 		flags = le32_to_cpu(resp->flags);
11524 	} else if (up) {
11525 		rc = bnxt_try_recover_fw(bp);
11526 		fw_reset = true;
11527 	}
11528 	hwrm_req_drop(bp, req);
11529 	if (rc)
11530 		return rc;
11531 
11532 	if (!up) {
11533 		bnxt_inv_fw_health_reg(bp);
11534 		return 0;
11535 	}
11536 
11537 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
11538 		resc_reinit = true;
11539 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
11540 	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11541 		fw_reset = true;
11542 	else
11543 		bnxt_remap_fw_health_regs(bp);
11544 
11545 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11546 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11547 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11548 		return -ENODEV;
11549 	}
11550 	if (resc_reinit || fw_reset) {
11551 		if (fw_reset) {
11552 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11553 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11554 				bnxt_ulp_irq_stop(bp);
11555 			bnxt_free_ctx_mem(bp);
11556 			bnxt_dcb_free(bp);
11557 			rc = bnxt_fw_init_one(bp);
11558 			if (rc) {
11559 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11560 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11561 				return rc;
11562 			}
11563 			bnxt_clear_int_mode(bp);
11564 			rc = bnxt_init_int_mode(bp);
11565 			if (rc) {
11566 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11567 				netdev_err(bp->dev, "init int mode failed\n");
11568 				return rc;
11569 			}
11570 		}
11571 		rc = bnxt_cancel_reservations(bp, fw_reset);
11572 	}
11573 	return rc;
11574 }
11575 
11576 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11577 {
11578 	struct hwrm_port_led_qcaps_output *resp;
11579 	struct hwrm_port_led_qcaps_input *req;
11580 	struct bnxt_pf_info *pf = &bp->pf;
11581 	int rc;
11582 
11583 	bp->num_leds = 0;
11584 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11585 		return 0;
11586 
11587 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11588 	if (rc)
11589 		return rc;
11590 
11591 	req->port_id = cpu_to_le16(pf->port_id);
11592 	resp = hwrm_req_hold(bp, req);
11593 	rc = hwrm_req_send(bp, req);
11594 	if (rc) {
11595 		hwrm_req_drop(bp, req);
11596 		return rc;
11597 	}
11598 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
11599 		int i;
11600 
11601 		bp->num_leds = resp->num_leds;
11602 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11603 						 bp->num_leds);
11604 		for (i = 0; i < bp->num_leds; i++) {
11605 			struct bnxt_led_info *led = &bp->leds[i];
11606 			__le16 caps = led->led_state_caps;
11607 
11608 			if (!led->led_group_id ||
11609 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
11610 				bp->num_leds = 0;
11611 				break;
11612 			}
11613 		}
11614 	}
11615 	hwrm_req_drop(bp, req);
11616 	return 0;
11617 }
11618 
11619 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11620 {
11621 	struct hwrm_wol_filter_alloc_output *resp;
11622 	struct hwrm_wol_filter_alloc_input *req;
11623 	int rc;
11624 
11625 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11626 	if (rc)
11627 		return rc;
11628 
11629 	req->port_id = cpu_to_le16(bp->pf.port_id);
11630 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
11631 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
11632 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11633 
11634 	resp = hwrm_req_hold(bp, req);
11635 	rc = hwrm_req_send(bp, req);
11636 	if (!rc)
11637 		bp->wol_filter_id = resp->wol_filter_id;
11638 	hwrm_req_drop(bp, req);
11639 	return rc;
11640 }
11641 
11642 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11643 {
11644 	struct hwrm_wol_filter_free_input *req;
11645 	int rc;
11646 
11647 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11648 	if (rc)
11649 		return rc;
11650 
11651 	req->port_id = cpu_to_le16(bp->pf.port_id);
11652 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
11653 	req->wol_filter_id = bp->wol_filter_id;
11654 
11655 	return hwrm_req_send(bp, req);
11656 }
11657 
11658 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11659 {
11660 	struct hwrm_wol_filter_qcfg_output *resp;
11661 	struct hwrm_wol_filter_qcfg_input *req;
11662 	u16 next_handle = 0;
11663 	int rc;
11664 
11665 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11666 	if (rc)
11667 		return rc;
11668 
11669 	req->port_id = cpu_to_le16(bp->pf.port_id);
11670 	req->handle = cpu_to_le16(handle);
11671 	resp = hwrm_req_hold(bp, req);
11672 	rc = hwrm_req_send(bp, req);
11673 	if (!rc) {
11674 		next_handle = le16_to_cpu(resp->next_handle);
11675 		if (next_handle != 0) {
11676 			if (resp->wol_type ==
11677 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
11678 				bp->wol = 1;
11679 				bp->wol_filter_id = resp->wol_filter_id;
11680 			}
11681 		}
11682 	}
11683 	hwrm_req_drop(bp, req);
11684 	return next_handle;
11685 }
11686 
11687 static void bnxt_get_wol_settings(struct bnxt *bp)
11688 {
11689 	u16 handle = 0;
11690 
11691 	bp->wol = 0;
11692 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11693 		return;
11694 
11695 	do {
11696 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11697 	} while (handle && handle != 0xffff);
11698 }
11699 
11700 static bool bnxt_eee_config_ok(struct bnxt *bp)
11701 {
11702 	struct ethtool_keee *eee = &bp->eee;
11703 	struct bnxt_link_info *link_info = &bp->link_info;
11704 
11705 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11706 		return true;
11707 
11708 	if (eee->eee_enabled) {
11709 		__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
11710 		__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
11711 
11712 		_bnxt_fw_to_linkmode(advertising, link_info->advertising);
11713 
11714 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11715 			eee->eee_enabled = 0;
11716 			return false;
11717 		}
11718 		if (linkmode_andnot(tmp, eee->advertised, advertising)) {
11719 			linkmode_and(eee->advertised, advertising,
11720 				     eee->supported);
11721 			return false;
11722 		}
11723 	}
11724 	return true;
11725 }
11726 
11727 static int bnxt_update_phy_setting(struct bnxt *bp)
11728 {
11729 	int rc;
11730 	bool update_link = false;
11731 	bool update_pause = false;
11732 	bool update_eee = false;
11733 	struct bnxt_link_info *link_info = &bp->link_info;
11734 
11735 	rc = bnxt_update_link(bp, true);
11736 	if (rc) {
11737 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11738 			   rc);
11739 		return rc;
11740 	}
11741 	if (!BNXT_SINGLE_PF(bp))
11742 		return 0;
11743 
11744 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11745 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
11746 	    link_info->req_flow_ctrl)
11747 		update_pause = true;
11748 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11749 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
11750 		update_pause = true;
11751 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11752 		if (BNXT_AUTO_MODE(link_info->auto_mode))
11753 			update_link = true;
11754 		if (bnxt_force_speed_updated(link_info))
11755 			update_link = true;
11756 		if (link_info->req_duplex != link_info->duplex_setting)
11757 			update_link = true;
11758 	} else {
11759 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
11760 			update_link = true;
11761 		if (bnxt_auto_speed_updated(link_info))
11762 			update_link = true;
11763 	}
11764 
11765 	/* The last close may have shutdown the link, so need to call
11766 	 * PHY_CFG to bring it back up.
11767 	 */
11768 	if (!BNXT_LINK_IS_UP(bp))
11769 		update_link = true;
11770 
11771 	if (!bnxt_eee_config_ok(bp))
11772 		update_eee = true;
11773 
11774 	if (update_link)
11775 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11776 	else if (update_pause)
11777 		rc = bnxt_hwrm_set_pause(bp);
11778 	if (rc) {
11779 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11780 			   rc);
11781 		return rc;
11782 	}
11783 
11784 	return rc;
11785 }
11786 
11787 /* Common routine to pre-map certain register block to different GRC window.
11788  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
11789  * in PF and 3 windows in VF that can be customized to map in different
11790  * register blocks.
11791  */
11792 static void bnxt_preset_reg_win(struct bnxt *bp)
11793 {
11794 	if (BNXT_PF(bp)) {
11795 		/* CAG registers map to GRC window #4 */
11796 		writel(BNXT_CAG_REG_BASE,
11797 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
11798 	}
11799 }
11800 
11801 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11802 
11803 static int bnxt_reinit_after_abort(struct bnxt *bp)
11804 {
11805 	int rc;
11806 
11807 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11808 		return -EBUSY;
11809 
11810 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
11811 		return -ENODEV;
11812 
11813 	rc = bnxt_fw_init_one(bp);
11814 	if (!rc) {
11815 		bnxt_clear_int_mode(bp);
11816 		rc = bnxt_init_int_mode(bp);
11817 		if (!rc) {
11818 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11819 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11820 		}
11821 	}
11822 	return rc;
11823 }
11824 
11825 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
11826 {
11827 	struct bnxt_ntuple_filter *ntp_fltr;
11828 	struct bnxt_l2_filter *l2_fltr;
11829 
11830 	if (list_empty(&fltr->list))
11831 		return;
11832 
11833 	if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
11834 		ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
11835 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
11836 		atomic_inc(&l2_fltr->refcnt);
11837 		ntp_fltr->l2_fltr = l2_fltr;
11838 		if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
11839 			bnxt_del_ntp_filter(bp, ntp_fltr);
11840 			netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
11841 				   fltr->sw_id);
11842 		}
11843 	} else if (fltr->type == BNXT_FLTR_TYPE_L2) {
11844 		l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
11845 		if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
11846 			bnxt_del_l2_filter(bp, l2_fltr);
11847 			netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
11848 				   fltr->sw_id);
11849 		}
11850 	}
11851 }
11852 
11853 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
11854 {
11855 	struct bnxt_filter_base *usr_fltr, *tmp;
11856 
11857 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
11858 		bnxt_cfg_one_usr_fltr(bp, usr_fltr);
11859 }
11860 
11861 static int bnxt_set_xps_mapping(struct bnxt *bp)
11862 {
11863 	int numa_node = dev_to_node(&bp->pdev->dev);
11864 	unsigned int q_idx, map_idx, cpu, i;
11865 	const struct cpumask *cpu_mask_ptr;
11866 	int nr_cpus = num_online_cpus();
11867 	cpumask_t *q_map;
11868 	int rc = 0;
11869 
11870 	q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
11871 	if (!q_map)
11872 		return -ENOMEM;
11873 
11874 	/* Create CPU mask for all TX queues across MQPRIO traffic classes.
11875 	 * Each TC has the same number of TX queues. The nth TX queue for each
11876 	 * TC will have the same CPU mask.
11877 	 */
11878 	for (i = 0; i < nr_cpus; i++) {
11879 		map_idx = i % bp->tx_nr_rings_per_tc;
11880 		cpu = cpumask_local_spread(i, numa_node);
11881 		cpu_mask_ptr = get_cpu_mask(cpu);
11882 		cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
11883 	}
11884 
11885 	/* Register CPU mask for each TX queue except the ones marked for XDP */
11886 	for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
11887 		map_idx = q_idx % bp->tx_nr_rings_per_tc;
11888 		rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
11889 		if (rc) {
11890 			netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
11891 				    q_idx);
11892 			break;
11893 		}
11894 	}
11895 
11896 	kfree(q_map);
11897 
11898 	return rc;
11899 }
11900 
11901 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11902 {
11903 	int rc = 0;
11904 
11905 	bnxt_preset_reg_win(bp);
11906 	netif_carrier_off(bp->dev);
11907 	if (irq_re_init) {
11908 		/* Reserve rings now if none were reserved at driver probe. */
11909 		rc = bnxt_init_dflt_ring_mode(bp);
11910 		if (rc) {
11911 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
11912 			return rc;
11913 		}
11914 	}
11915 	rc = bnxt_reserve_rings(bp, irq_re_init);
11916 	if (rc)
11917 		return rc;
11918 	if ((bp->flags & BNXT_FLAG_RFS) &&
11919 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
11920 		/* disable RFS if falling back to INTA */
11921 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
11922 		bp->flags &= ~BNXT_FLAG_RFS;
11923 	}
11924 
11925 	rc = bnxt_alloc_mem(bp, irq_re_init);
11926 	if (rc) {
11927 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11928 		goto open_err_free_mem;
11929 	}
11930 
11931 	if (irq_re_init) {
11932 		bnxt_init_napi(bp);
11933 		rc = bnxt_request_irq(bp);
11934 		if (rc) {
11935 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11936 			goto open_err_irq;
11937 		}
11938 	}
11939 
11940 	rc = bnxt_init_nic(bp, irq_re_init);
11941 	if (rc) {
11942 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11943 		goto open_err_irq;
11944 	}
11945 
11946 	bnxt_enable_napi(bp);
11947 	bnxt_debug_dev_init(bp);
11948 
11949 	if (link_re_init) {
11950 		mutex_lock(&bp->link_lock);
11951 		rc = bnxt_update_phy_setting(bp);
11952 		mutex_unlock(&bp->link_lock);
11953 		if (rc) {
11954 			netdev_warn(bp->dev, "failed to update phy settings\n");
11955 			if (BNXT_SINGLE_PF(bp)) {
11956 				bp->link_info.phy_retry = true;
11957 				bp->link_info.phy_retry_expires =
11958 					jiffies + 5 * HZ;
11959 			}
11960 		}
11961 	}
11962 
11963 	if (irq_re_init) {
11964 		udp_tunnel_nic_reset_ntf(bp->dev);
11965 		rc = bnxt_set_xps_mapping(bp);
11966 		if (rc)
11967 			netdev_warn(bp->dev, "failed to set xps mapping\n");
11968 	}
11969 
11970 	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
11971 		if (!static_key_enabled(&bnxt_xdp_locking_key))
11972 			static_branch_enable(&bnxt_xdp_locking_key);
11973 	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
11974 		static_branch_disable(&bnxt_xdp_locking_key);
11975 	}
11976 	set_bit(BNXT_STATE_OPEN, &bp->state);
11977 	bnxt_enable_int(bp);
11978 	/* Enable TX queues */
11979 	bnxt_tx_enable(bp);
11980 	mod_timer(&bp->timer, jiffies + bp->current_interval);
11981 	/* Poll link status and check for SFP+ module status */
11982 	mutex_lock(&bp->link_lock);
11983 	bnxt_get_port_module_status(bp);
11984 	mutex_unlock(&bp->link_lock);
11985 
11986 	/* VF-reps may need to be re-opened after the PF is re-opened */
11987 	if (BNXT_PF(bp))
11988 		bnxt_vf_reps_open(bp);
11989 	if (bp->ptp_cfg)
11990 		atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
11991 	bnxt_ptp_init_rtc(bp, true);
11992 	bnxt_ptp_cfg_tstamp_filters(bp);
11993 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
11994 		bnxt_hwrm_realloc_rss_ctx_vnic(bp);
11995 	bnxt_cfg_usr_fltrs(bp);
11996 	return 0;
11997 
11998 open_err_irq:
11999 	bnxt_del_napi(bp);
12000 
12001 open_err_free_mem:
12002 	bnxt_free_skbs(bp);
12003 	bnxt_free_irq(bp);
12004 	bnxt_free_mem(bp, true);
12005 	return rc;
12006 }
12007 
12008 /* rtnl_lock held */
12009 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12010 {
12011 	int rc = 0;
12012 
12013 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12014 		rc = -EIO;
12015 	if (!rc)
12016 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12017 	if (rc) {
12018 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12019 		dev_close(bp->dev);
12020 	}
12021 	return rc;
12022 }
12023 
12024 /* rtnl_lock held, open the NIC half way by allocating all resources, but
12025  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
12026  * self tests.
12027  */
12028 int bnxt_half_open_nic(struct bnxt *bp)
12029 {
12030 	int rc = 0;
12031 
12032 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12033 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12034 		rc = -ENODEV;
12035 		goto half_open_err;
12036 	}
12037 
12038 	rc = bnxt_alloc_mem(bp, true);
12039 	if (rc) {
12040 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12041 		goto half_open_err;
12042 	}
12043 	bnxt_init_napi(bp);
12044 	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12045 	rc = bnxt_init_nic(bp, true);
12046 	if (rc) {
12047 		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12048 		bnxt_del_napi(bp);
12049 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12050 		goto half_open_err;
12051 	}
12052 	return 0;
12053 
12054 half_open_err:
12055 	bnxt_free_skbs(bp);
12056 	bnxt_free_mem(bp, true);
12057 	dev_close(bp->dev);
12058 	return rc;
12059 }
12060 
12061 /* rtnl_lock held, this call can only be made after a previous successful
12062  * call to bnxt_half_open_nic().
12063  */
12064 void bnxt_half_close_nic(struct bnxt *bp)
12065 {
12066 	bnxt_hwrm_resource_free(bp, false, true);
12067 	bnxt_del_napi(bp);
12068 	bnxt_free_skbs(bp);
12069 	bnxt_free_mem(bp, true);
12070 	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12071 }
12072 
12073 void bnxt_reenable_sriov(struct bnxt *bp)
12074 {
12075 	if (BNXT_PF(bp)) {
12076 		struct bnxt_pf_info *pf = &bp->pf;
12077 		int n = pf->active_vfs;
12078 
12079 		if (n)
12080 			bnxt_cfg_hw_sriov(bp, &n, true);
12081 	}
12082 }
12083 
12084 static int bnxt_open(struct net_device *dev)
12085 {
12086 	struct bnxt *bp = netdev_priv(dev);
12087 	int rc;
12088 
12089 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12090 		rc = bnxt_reinit_after_abort(bp);
12091 		if (rc) {
12092 			if (rc == -EBUSY)
12093 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
12094 			else
12095 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
12096 			return -ENODEV;
12097 		}
12098 	}
12099 
12100 	rc = bnxt_hwrm_if_change(bp, true);
12101 	if (rc)
12102 		return rc;
12103 
12104 	rc = __bnxt_open_nic(bp, true, true);
12105 	if (rc) {
12106 		bnxt_hwrm_if_change(bp, false);
12107 	} else {
12108 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12109 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12110 				bnxt_queue_sp_work(bp,
12111 						   BNXT_RESTART_ULP_SP_EVENT);
12112 		}
12113 	}
12114 
12115 	return rc;
12116 }
12117 
12118 static bool bnxt_drv_busy(struct bnxt *bp)
12119 {
12120 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
12121 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
12122 }
12123 
12124 static void bnxt_get_ring_stats(struct bnxt *bp,
12125 				struct rtnl_link_stats64 *stats);
12126 
12127 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
12128 			     bool link_re_init)
12129 {
12130 	/* Close the VF-reps before closing PF */
12131 	if (BNXT_PF(bp))
12132 		bnxt_vf_reps_close(bp);
12133 
12134 	/* Change device state to avoid TX queue wake up's */
12135 	bnxt_tx_disable(bp);
12136 
12137 	clear_bit(BNXT_STATE_OPEN, &bp->state);
12138 	smp_mb__after_atomic();
12139 	while (bnxt_drv_busy(bp))
12140 		msleep(20);
12141 
12142 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12143 		bnxt_clear_rss_ctxs(bp, false);
12144 	/* Flush rings and disable interrupts */
12145 	bnxt_shutdown_nic(bp, irq_re_init);
12146 
12147 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
12148 
12149 	bnxt_debug_dev_exit(bp);
12150 	bnxt_disable_napi(bp);
12151 	del_timer_sync(&bp->timer);
12152 	bnxt_free_skbs(bp);
12153 
12154 	/* Save ring stats before shutdown */
12155 	if (bp->bnapi && irq_re_init) {
12156 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
12157 		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
12158 	}
12159 	if (irq_re_init) {
12160 		bnxt_free_irq(bp);
12161 		bnxt_del_napi(bp);
12162 	}
12163 	bnxt_free_mem(bp, irq_re_init);
12164 }
12165 
12166 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12167 {
12168 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12169 		/* If we get here, it means firmware reset is in progress
12170 		 * while we are trying to close.  We can safely proceed with
12171 		 * the close because we are holding rtnl_lock().  Some firmware
12172 		 * messages may fail as we proceed to close.  We set the
12173 		 * ABORT_ERR flag here so that the FW reset thread will later
12174 		 * abort when it gets the rtnl_lock() and sees the flag.
12175 		 */
12176 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
12177 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12178 	}
12179 
12180 #ifdef CONFIG_BNXT_SRIOV
12181 	if (bp->sriov_cfg) {
12182 		int rc;
12183 
12184 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
12185 						      !bp->sriov_cfg,
12186 						      BNXT_SRIOV_CFG_WAIT_TMO);
12187 		if (!rc)
12188 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
12189 		else if (rc < 0)
12190 			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
12191 	}
12192 #endif
12193 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
12194 }
12195 
12196 static int bnxt_close(struct net_device *dev)
12197 {
12198 	struct bnxt *bp = netdev_priv(dev);
12199 
12200 	bnxt_close_nic(bp, true, true);
12201 	bnxt_hwrm_shutdown_link(bp);
12202 	bnxt_hwrm_if_change(bp, false);
12203 	return 0;
12204 }
12205 
12206 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
12207 				   u16 *val)
12208 {
12209 	struct hwrm_port_phy_mdio_read_output *resp;
12210 	struct hwrm_port_phy_mdio_read_input *req;
12211 	int rc;
12212 
12213 	if (bp->hwrm_spec_code < 0x10a00)
12214 		return -EOPNOTSUPP;
12215 
12216 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
12217 	if (rc)
12218 		return rc;
12219 
12220 	req->port_id = cpu_to_le16(bp->pf.port_id);
12221 	req->phy_addr = phy_addr;
12222 	req->reg_addr = cpu_to_le16(reg & 0x1f);
12223 	if (mdio_phy_id_is_c45(phy_addr)) {
12224 		req->cl45_mdio = 1;
12225 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
12226 		req->dev_addr = mdio_phy_id_devad(phy_addr);
12227 		req->reg_addr = cpu_to_le16(reg);
12228 	}
12229 
12230 	resp = hwrm_req_hold(bp, req);
12231 	rc = hwrm_req_send(bp, req);
12232 	if (!rc)
12233 		*val = le16_to_cpu(resp->reg_data);
12234 	hwrm_req_drop(bp, req);
12235 	return rc;
12236 }
12237 
12238 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12239 				    u16 val)
12240 {
12241 	struct hwrm_port_phy_mdio_write_input *req;
12242 	int rc;
12243 
12244 	if (bp->hwrm_spec_code < 0x10a00)
12245 		return -EOPNOTSUPP;
12246 
12247 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12248 	if (rc)
12249 		return rc;
12250 
12251 	req->port_id = cpu_to_le16(bp->pf.port_id);
12252 	req->phy_addr = phy_addr;
12253 	req->reg_addr = cpu_to_le16(reg & 0x1f);
12254 	if (mdio_phy_id_is_c45(phy_addr)) {
12255 		req->cl45_mdio = 1;
12256 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
12257 		req->dev_addr = mdio_phy_id_devad(phy_addr);
12258 		req->reg_addr = cpu_to_le16(reg);
12259 	}
12260 	req->reg_data = cpu_to_le16(val);
12261 
12262 	return hwrm_req_send(bp, req);
12263 }
12264 
12265 /* rtnl_lock held */
12266 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12267 {
12268 	struct mii_ioctl_data *mdio = if_mii(ifr);
12269 	struct bnxt *bp = netdev_priv(dev);
12270 	int rc;
12271 
12272 	switch (cmd) {
12273 	case SIOCGMIIPHY:
12274 		mdio->phy_id = bp->link_info.phy_addr;
12275 
12276 		fallthrough;
12277 	case SIOCGMIIREG: {
12278 		u16 mii_regval = 0;
12279 
12280 		if (!netif_running(dev))
12281 			return -EAGAIN;
12282 
12283 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12284 					     &mii_regval);
12285 		mdio->val_out = mii_regval;
12286 		return rc;
12287 	}
12288 
12289 	case SIOCSMIIREG:
12290 		if (!netif_running(dev))
12291 			return -EAGAIN;
12292 
12293 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12294 						mdio->val_in);
12295 
12296 	case SIOCSHWTSTAMP:
12297 		return bnxt_hwtstamp_set(dev, ifr);
12298 
12299 	case SIOCGHWTSTAMP:
12300 		return bnxt_hwtstamp_get(dev, ifr);
12301 
12302 	default:
12303 		/* do nothing */
12304 		break;
12305 	}
12306 	return -EOPNOTSUPP;
12307 }
12308 
12309 static void bnxt_get_ring_stats(struct bnxt *bp,
12310 				struct rtnl_link_stats64 *stats)
12311 {
12312 	int i;
12313 
12314 	for (i = 0; i < bp->cp_nr_rings; i++) {
12315 		struct bnxt_napi *bnapi = bp->bnapi[i];
12316 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12317 		u64 *sw = cpr->stats.sw_stats;
12318 
12319 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12320 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12321 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12322 
12323 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12324 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12325 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12326 
12327 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12328 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12329 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12330 
12331 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12332 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12333 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12334 
12335 		stats->rx_missed_errors +=
12336 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12337 
12338 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12339 
12340 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12341 
12342 		stats->rx_dropped +=
12343 			cpr->sw_stats->rx.rx_netpoll_discards +
12344 			cpr->sw_stats->rx.rx_oom_discards;
12345 	}
12346 }
12347 
12348 static void bnxt_add_prev_stats(struct bnxt *bp,
12349 				struct rtnl_link_stats64 *stats)
12350 {
12351 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12352 
12353 	stats->rx_packets += prev_stats->rx_packets;
12354 	stats->tx_packets += prev_stats->tx_packets;
12355 	stats->rx_bytes += prev_stats->rx_bytes;
12356 	stats->tx_bytes += prev_stats->tx_bytes;
12357 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
12358 	stats->multicast += prev_stats->multicast;
12359 	stats->rx_dropped += prev_stats->rx_dropped;
12360 	stats->tx_dropped += prev_stats->tx_dropped;
12361 }
12362 
12363 static void
12364 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12365 {
12366 	struct bnxt *bp = netdev_priv(dev);
12367 
12368 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
12369 	/* Make sure bnxt_close_nic() sees that we are reading stats before
12370 	 * we check the BNXT_STATE_OPEN flag.
12371 	 */
12372 	smp_mb__after_atomic();
12373 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12374 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12375 		*stats = bp->net_stats_prev;
12376 		return;
12377 	}
12378 
12379 	bnxt_get_ring_stats(bp, stats);
12380 	bnxt_add_prev_stats(bp, stats);
12381 
12382 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
12383 		u64 *rx = bp->port_stats.sw_stats;
12384 		u64 *tx = bp->port_stats.sw_stats +
12385 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12386 
12387 		stats->rx_crc_errors =
12388 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12389 		stats->rx_frame_errors =
12390 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12391 		stats->rx_length_errors =
12392 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12393 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12394 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12395 		stats->rx_errors =
12396 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12397 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12398 		stats->collisions =
12399 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12400 		stats->tx_fifo_errors =
12401 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12402 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12403 	}
12404 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12405 }
12406 
12407 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12408 					struct bnxt_total_ring_err_stats *stats,
12409 					struct bnxt_cp_ring_info *cpr)
12410 {
12411 	struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12412 	u64 *hw_stats = cpr->stats.sw_stats;
12413 
12414 	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12415 	stats->rx_total_resets += sw_stats->rx.rx_resets;
12416 	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
12417 	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
12418 	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
12419 	stats->rx_total_ring_discards +=
12420 		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
12421 	stats->tx_total_resets += sw_stats->tx.tx_resets;
12422 	stats->tx_total_ring_discards +=
12423 		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
12424 	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
12425 }
12426 
12427 void bnxt_get_ring_err_stats(struct bnxt *bp,
12428 			     struct bnxt_total_ring_err_stats *stats)
12429 {
12430 	int i;
12431 
12432 	for (i = 0; i < bp->cp_nr_rings; i++)
12433 		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
12434 }
12435 
12436 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
12437 {
12438 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12439 	struct net_device *dev = bp->dev;
12440 	struct netdev_hw_addr *ha;
12441 	u8 *haddr;
12442 	int mc_count = 0;
12443 	bool update = false;
12444 	int off = 0;
12445 
12446 	netdev_for_each_mc_addr(ha, dev) {
12447 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
12448 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12449 			vnic->mc_list_count = 0;
12450 			return false;
12451 		}
12452 		haddr = ha->addr;
12453 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
12454 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
12455 			update = true;
12456 		}
12457 		off += ETH_ALEN;
12458 		mc_count++;
12459 	}
12460 	if (mc_count)
12461 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12462 
12463 	if (mc_count != vnic->mc_list_count) {
12464 		vnic->mc_list_count = mc_count;
12465 		update = true;
12466 	}
12467 	return update;
12468 }
12469 
12470 static bool bnxt_uc_list_updated(struct bnxt *bp)
12471 {
12472 	struct net_device *dev = bp->dev;
12473 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12474 	struct netdev_hw_addr *ha;
12475 	int off = 0;
12476 
12477 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
12478 		return true;
12479 
12480 	netdev_for_each_uc_addr(ha, dev) {
12481 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
12482 			return true;
12483 
12484 		off += ETH_ALEN;
12485 	}
12486 	return false;
12487 }
12488 
12489 static void bnxt_set_rx_mode(struct net_device *dev)
12490 {
12491 	struct bnxt *bp = netdev_priv(dev);
12492 	struct bnxt_vnic_info *vnic;
12493 	bool mc_update = false;
12494 	bool uc_update;
12495 	u32 mask;
12496 
12497 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12498 		return;
12499 
12500 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12501 	mask = vnic->rx_mask;
12502 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
12503 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
12504 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
12505 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
12506 
12507 	if (dev->flags & IFF_PROMISC)
12508 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12509 
12510 	uc_update = bnxt_uc_list_updated(bp);
12511 
12512 	if (dev->flags & IFF_BROADCAST)
12513 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
12514 	if (dev->flags & IFF_ALLMULTI) {
12515 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12516 		vnic->mc_list_count = 0;
12517 	} else if (dev->flags & IFF_MULTICAST) {
12518 		mc_update = bnxt_mc_list_updated(bp, &mask);
12519 	}
12520 
12521 	if (mask != vnic->rx_mask || uc_update || mc_update) {
12522 		vnic->rx_mask = mask;
12523 
12524 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12525 	}
12526 }
12527 
12528 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12529 {
12530 	struct net_device *dev = bp->dev;
12531 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12532 	struct netdev_hw_addr *ha;
12533 	int i, off = 0, rc;
12534 	bool uc_update;
12535 
12536 	netif_addr_lock_bh(dev);
12537 	uc_update = bnxt_uc_list_updated(bp);
12538 	netif_addr_unlock_bh(dev);
12539 
12540 	if (!uc_update)
12541 		goto skip_uc;
12542 
12543 	for (i = 1; i < vnic->uc_filter_count; i++) {
12544 		struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
12545 
12546 		bnxt_hwrm_l2_filter_free(bp, fltr);
12547 		bnxt_del_l2_filter(bp, fltr);
12548 	}
12549 
12550 	vnic->uc_filter_count = 1;
12551 
12552 	netif_addr_lock_bh(dev);
12553 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
12554 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12555 	} else {
12556 		netdev_for_each_uc_addr(ha, dev) {
12557 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
12558 			off += ETH_ALEN;
12559 			vnic->uc_filter_count++;
12560 		}
12561 	}
12562 	netif_addr_unlock_bh(dev);
12563 
12564 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
12565 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12566 		if (rc) {
12567 			if (BNXT_VF(bp) && rc == -ENODEV) {
12568 				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12569 					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12570 				else
12571 					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12572 				rc = 0;
12573 			} else {
12574 				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12575 			}
12576 			vnic->uc_filter_count = i;
12577 			return rc;
12578 		}
12579 	}
12580 	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12581 		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12582 
12583 skip_uc:
12584 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
12585 	    !bnxt_promisc_ok(bp))
12586 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12587 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12588 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12589 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12590 			    rc);
12591 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12592 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12593 		vnic->mc_list_count = 0;
12594 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12595 	}
12596 	if (rc)
12597 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12598 			   rc);
12599 
12600 	return rc;
12601 }
12602 
12603 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12604 {
12605 #ifdef CONFIG_BNXT_SRIOV
12606 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12607 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12608 
12609 		/* No minimum rings were provisioned by the PF.  Don't
12610 		 * reserve rings by default when device is down.
12611 		 */
12612 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
12613 			return true;
12614 
12615 		if (!netif_running(bp->dev))
12616 			return false;
12617 	}
12618 #endif
12619 	return true;
12620 }
12621 
12622 /* If the chip and firmware supports RFS */
12623 static bool bnxt_rfs_supported(struct bnxt *bp)
12624 {
12625 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12626 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12627 			return true;
12628 		return false;
12629 	}
12630 	/* 212 firmware is broken for aRFS */
12631 	if (BNXT_FW_MAJ(bp) == 212)
12632 		return false;
12633 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12634 		return true;
12635 	if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12636 		return true;
12637 	return false;
12638 }
12639 
12640 /* If runtime conditions support RFS */
12641 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
12642 {
12643 	struct bnxt_hw_rings hwr = {0};
12644 	int max_vnics, max_rss_ctxs;
12645 
12646 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
12647 	    !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
12648 		return bnxt_rfs_supported(bp);
12649 
12650 	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12651 		return false;
12652 
12653 	hwr.grp = bp->rx_nr_rings;
12654 	hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
12655 	if (new_rss_ctx)
12656 		hwr.vnic++;
12657 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
12658 	max_vnics = bnxt_get_max_func_vnics(bp);
12659 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12660 
12661 	if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
12662 		if (bp->rx_nr_rings > 1)
12663 			netdev_warn(bp->dev,
12664 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
12665 				    min(max_rss_ctxs - 1, max_vnics - 1));
12666 		return false;
12667 	}
12668 
12669 	if (!BNXT_NEW_RM(bp))
12670 		return true;
12671 
12672 	if (hwr.vnic == bp->hw_resc.resv_vnics &&
12673 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12674 		return true;
12675 
12676 	bnxt_hwrm_reserve_rings(bp, &hwr);
12677 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12678 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12679 		return true;
12680 
12681 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12682 	hwr.vnic = 1;
12683 	hwr.rss_ctx = 0;
12684 	bnxt_hwrm_reserve_rings(bp, &hwr);
12685 	return false;
12686 }
12687 
12688 static netdev_features_t bnxt_fix_features(struct net_device *dev,
12689 					   netdev_features_t features)
12690 {
12691 	struct bnxt *bp = netdev_priv(dev);
12692 	netdev_features_t vlan_features;
12693 
12694 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
12695 		features &= ~NETIF_F_NTUPLE;
12696 
12697 	if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12698 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12699 
12700 	if (!(features & NETIF_F_GRO))
12701 		features &= ~NETIF_F_GRO_HW;
12702 
12703 	if (features & NETIF_F_GRO_HW)
12704 		features &= ~NETIF_F_LRO;
12705 
12706 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
12707 	 * turned on or off together.
12708 	 */
12709 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
12710 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
12711 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12712 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12713 		else if (vlan_features)
12714 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12715 	}
12716 #ifdef CONFIG_BNXT_SRIOV
12717 	if (BNXT_VF(bp) && bp->vf.vlan)
12718 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12719 #endif
12720 	return features;
12721 }
12722 
12723 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
12724 				bool link_re_init, u32 flags, bool update_tpa)
12725 {
12726 	bnxt_close_nic(bp, irq_re_init, link_re_init);
12727 	bp->flags = flags;
12728 	if (update_tpa)
12729 		bnxt_set_ring_params(bp);
12730 	return bnxt_open_nic(bp, irq_re_init, link_re_init);
12731 }
12732 
12733 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
12734 {
12735 	bool update_tpa = false, update_ntuple = false;
12736 	struct bnxt *bp = netdev_priv(dev);
12737 	u32 flags = bp->flags;
12738 	u32 changes;
12739 	int rc = 0;
12740 	bool re_init = false;
12741 
12742 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
12743 	if (features & NETIF_F_GRO_HW)
12744 		flags |= BNXT_FLAG_GRO;
12745 	else if (features & NETIF_F_LRO)
12746 		flags |= BNXT_FLAG_LRO;
12747 
12748 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12749 		flags &= ~BNXT_FLAG_TPA;
12750 
12751 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12752 		flags |= BNXT_FLAG_STRIP_VLAN;
12753 
12754 	if (features & NETIF_F_NTUPLE)
12755 		flags |= BNXT_FLAG_RFS;
12756 	else
12757 		bnxt_clear_usr_fltrs(bp, true);
12758 
12759 	changes = flags ^ bp->flags;
12760 	if (changes & BNXT_FLAG_TPA) {
12761 		update_tpa = true;
12762 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12763 		    (flags & BNXT_FLAG_TPA) == 0 ||
12764 		    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12765 			re_init = true;
12766 	}
12767 
12768 	if (changes & ~BNXT_FLAG_TPA)
12769 		re_init = true;
12770 
12771 	if (changes & BNXT_FLAG_RFS)
12772 		update_ntuple = true;
12773 
12774 	if (flags != bp->flags) {
12775 		u32 old_flags = bp->flags;
12776 
12777 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12778 			bp->flags = flags;
12779 			if (update_tpa)
12780 				bnxt_set_ring_params(bp);
12781 			return rc;
12782 		}
12783 
12784 		if (update_ntuple)
12785 			return bnxt_reinit_features(bp, true, false, flags, update_tpa);
12786 
12787 		if (re_init)
12788 			return bnxt_reinit_features(bp, false, false, flags, update_tpa);
12789 
12790 		if (update_tpa) {
12791 			bp->flags = flags;
12792 			rc = bnxt_set_tpa(bp,
12793 					  (flags & BNXT_FLAG_TPA) ?
12794 					  true : false);
12795 			if (rc)
12796 				bp->flags = old_flags;
12797 		}
12798 	}
12799 	return rc;
12800 }
12801 
12802 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12803 			      u8 **nextp)
12804 {
12805 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
12806 	struct hop_jumbo_hdr *jhdr;
12807 	int hdr_count = 0;
12808 	u8 *nexthdr;
12809 	int start;
12810 
12811 	/* Check that there are at most 2 IPv6 extension headers, no
12812 	 * fragment header, and each is <= 64 bytes.
12813 	 */
12814 	start = nw_off + sizeof(*ip6h);
12815 	nexthdr = &ip6h->nexthdr;
12816 	while (ipv6_ext_hdr(*nexthdr)) {
12817 		struct ipv6_opt_hdr *hp;
12818 		int hdrlen;
12819 
12820 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
12821 		    *nexthdr == NEXTHDR_FRAGMENT)
12822 			return false;
12823 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
12824 					  skb_headlen(skb), NULL);
12825 		if (!hp)
12826 			return false;
12827 		if (*nexthdr == NEXTHDR_AUTH)
12828 			hdrlen = ipv6_authlen(hp);
12829 		else
12830 			hdrlen = ipv6_optlen(hp);
12831 
12832 		if (hdrlen > 64)
12833 			return false;
12834 
12835 		/* The ext header may be a hop-by-hop header inserted for
12836 		 * big TCP purposes. This will be removed before sending
12837 		 * from NIC, so do not count it.
12838 		 */
12839 		if (*nexthdr == NEXTHDR_HOP) {
12840 			if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
12841 				goto increment_hdr;
12842 
12843 			jhdr = (struct hop_jumbo_hdr *)hp;
12844 			if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
12845 			    jhdr->nexthdr != IPPROTO_TCP)
12846 				goto increment_hdr;
12847 
12848 			goto next_hdr;
12849 		}
12850 increment_hdr:
12851 		hdr_count++;
12852 next_hdr:
12853 		nexthdr = &hp->nexthdr;
12854 		start += hdrlen;
12855 	}
12856 	if (nextp) {
12857 		/* Caller will check inner protocol */
12858 		if (skb->encapsulation) {
12859 			*nextp = nexthdr;
12860 			return true;
12861 		}
12862 		*nextp = NULL;
12863 	}
12864 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
12865 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
12866 }
12867 
12868 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
12869 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
12870 {
12871 	struct udphdr *uh = udp_hdr(skb);
12872 	__be16 udp_port = uh->dest;
12873 
12874 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12875 	    udp_port != bp->vxlan_gpe_port)
12876 		return false;
12877 	if (skb->inner_protocol == htons(ETH_P_TEB)) {
12878 		struct ethhdr *eh = inner_eth_hdr(skb);
12879 
12880 		switch (eh->h_proto) {
12881 		case htons(ETH_P_IP):
12882 			return true;
12883 		case htons(ETH_P_IPV6):
12884 			return bnxt_exthdr_check(bp, skb,
12885 						 skb_inner_network_offset(skb),
12886 						 NULL);
12887 		}
12888 	} else if (skb->inner_protocol == htons(ETH_P_IP)) {
12889 		return true;
12890 	} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
12891 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12892 					 NULL);
12893 	}
12894 	return false;
12895 }
12896 
12897 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
12898 {
12899 	switch (l4_proto) {
12900 	case IPPROTO_UDP:
12901 		return bnxt_udp_tunl_check(bp, skb);
12902 	case IPPROTO_IPIP:
12903 		return true;
12904 	case IPPROTO_GRE: {
12905 		switch (skb->inner_protocol) {
12906 		default:
12907 			return false;
12908 		case htons(ETH_P_IP):
12909 			return true;
12910 		case htons(ETH_P_IPV6):
12911 			fallthrough;
12912 		}
12913 	}
12914 	case IPPROTO_IPV6:
12915 		/* Check ext headers of inner ipv6 */
12916 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12917 					 NULL);
12918 	}
12919 	return false;
12920 }
12921 
12922 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
12923 					     struct net_device *dev,
12924 					     netdev_features_t features)
12925 {
12926 	struct bnxt *bp = netdev_priv(dev);
12927 	u8 *l4_proto;
12928 
12929 	features = vlan_features_check(skb, features);
12930 	switch (vlan_get_protocol(skb)) {
12931 	case htons(ETH_P_IP):
12932 		if (!skb->encapsulation)
12933 			return features;
12934 		l4_proto = &ip_hdr(skb)->protocol;
12935 		if (bnxt_tunl_check(bp, skb, *l4_proto))
12936 			return features;
12937 		break;
12938 	case htons(ETH_P_IPV6):
12939 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
12940 				       &l4_proto))
12941 			break;
12942 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
12943 			return features;
12944 		break;
12945 	}
12946 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12947 }
12948 
12949 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
12950 			 u32 *reg_buf)
12951 {
12952 	struct hwrm_dbg_read_direct_output *resp;
12953 	struct hwrm_dbg_read_direct_input *req;
12954 	__le32 *dbg_reg_buf;
12955 	dma_addr_t mapping;
12956 	int rc, i;
12957 
12958 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12959 	if (rc)
12960 		return rc;
12961 
12962 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
12963 					 &mapping);
12964 	if (!dbg_reg_buf) {
12965 		rc = -ENOMEM;
12966 		goto dbg_rd_reg_exit;
12967 	}
12968 
12969 	req->host_dest_addr = cpu_to_le64(mapping);
12970 
12971 	resp = hwrm_req_hold(bp, req);
12972 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
12973 	req->read_len32 = cpu_to_le32(num_words);
12974 
12975 	rc = hwrm_req_send(bp, req);
12976 	if (rc || resp->error_code) {
12977 		rc = -EIO;
12978 		goto dbg_rd_reg_exit;
12979 	}
12980 	for (i = 0; i < num_words; i++)
12981 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
12982 
12983 dbg_rd_reg_exit:
12984 	hwrm_req_drop(bp, req);
12985 	return rc;
12986 }
12987 
12988 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
12989 				       u32 ring_id, u32 *prod, u32 *cons)
12990 {
12991 	struct hwrm_dbg_ring_info_get_output *resp;
12992 	struct hwrm_dbg_ring_info_get_input *req;
12993 	int rc;
12994 
12995 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
12996 	if (rc)
12997 		return rc;
12998 
12999 	req->ring_type = ring_type;
13000 	req->fw_ring_id = cpu_to_le32(ring_id);
13001 	resp = hwrm_req_hold(bp, req);
13002 	rc = hwrm_req_send(bp, req);
13003 	if (!rc) {
13004 		*prod = le32_to_cpu(resp->producer_index);
13005 		*cons = le32_to_cpu(resp->consumer_index);
13006 	}
13007 	hwrm_req_drop(bp, req);
13008 	return rc;
13009 }
13010 
13011 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13012 {
13013 	struct bnxt_tx_ring_info *txr;
13014 	int i = bnapi->index, j;
13015 
13016 	bnxt_for_each_napi_tx(j, bnapi, txr)
13017 		netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13018 			    i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13019 			    txr->tx_cons);
13020 }
13021 
13022 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13023 {
13024 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13025 	int i = bnapi->index;
13026 
13027 	if (!rxr)
13028 		return;
13029 
13030 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13031 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13032 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13033 		    rxr->rx_sw_agg_prod);
13034 }
13035 
13036 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13037 {
13038 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13039 	int i = bnapi->index;
13040 
13041 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13042 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13043 }
13044 
13045 static void bnxt_dbg_dump_states(struct bnxt *bp)
13046 {
13047 	int i;
13048 	struct bnxt_napi *bnapi;
13049 
13050 	for (i = 0; i < bp->cp_nr_rings; i++) {
13051 		bnapi = bp->bnapi[i];
13052 		if (netif_msg_drv(bp)) {
13053 			bnxt_dump_tx_sw_state(bnapi);
13054 			bnxt_dump_rx_sw_state(bnapi);
13055 			bnxt_dump_cp_sw_state(bnapi);
13056 		}
13057 	}
13058 }
13059 
13060 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13061 {
13062 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13063 	struct hwrm_ring_reset_input *req;
13064 	struct bnxt_napi *bnapi = rxr->bnapi;
13065 	struct bnxt_cp_ring_info *cpr;
13066 	u16 cp_ring_id;
13067 	int rc;
13068 
13069 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
13070 	if (rc)
13071 		return rc;
13072 
13073 	cpr = &bnapi->cp_ring;
13074 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13075 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
13076 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
13077 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
13078 	return hwrm_req_send_silent(bp, req);
13079 }
13080 
13081 static void bnxt_reset_task(struct bnxt *bp, bool silent)
13082 {
13083 	if (!silent)
13084 		bnxt_dbg_dump_states(bp);
13085 	if (netif_running(bp->dev)) {
13086 		bnxt_close_nic(bp, !silent, false);
13087 		bnxt_open_nic(bp, !silent, false);
13088 	}
13089 }
13090 
13091 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
13092 {
13093 	struct bnxt *bp = netdev_priv(dev);
13094 
13095 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
13096 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
13097 }
13098 
13099 static void bnxt_fw_health_check(struct bnxt *bp)
13100 {
13101 	struct bnxt_fw_health *fw_health = bp->fw_health;
13102 	struct pci_dev *pdev = bp->pdev;
13103 	u32 val;
13104 
13105 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13106 		return;
13107 
13108 	/* Make sure it is enabled before checking the tmr_counter. */
13109 	smp_rmb();
13110 	if (fw_health->tmr_counter) {
13111 		fw_health->tmr_counter--;
13112 		return;
13113 	}
13114 
13115 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13116 	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
13117 		fw_health->arrests++;
13118 		goto fw_reset;
13119 	}
13120 
13121 	fw_health->last_fw_heartbeat = val;
13122 
13123 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13124 	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
13125 		fw_health->discoveries++;
13126 		goto fw_reset;
13127 	}
13128 
13129 	fw_health->tmr_counter = fw_health->tmr_multiplier;
13130 	return;
13131 
13132 fw_reset:
13133 	bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
13134 }
13135 
13136 static void bnxt_timer(struct timer_list *t)
13137 {
13138 	struct bnxt *bp = from_timer(bp, t, timer);
13139 	struct net_device *dev = bp->dev;
13140 
13141 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
13142 		return;
13143 
13144 	if (atomic_read(&bp->intr_sem) != 0)
13145 		goto bnxt_restart_timer;
13146 
13147 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
13148 		bnxt_fw_health_check(bp);
13149 
13150 	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
13151 		bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
13152 
13153 	if (bnxt_tc_flower_enabled(bp))
13154 		bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
13155 
13156 #ifdef CONFIG_RFS_ACCEL
13157 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
13158 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13159 #endif /*CONFIG_RFS_ACCEL*/
13160 
13161 	if (bp->link_info.phy_retry) {
13162 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
13163 			bp->link_info.phy_retry = false;
13164 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
13165 		} else {
13166 			bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
13167 		}
13168 	}
13169 
13170 	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13171 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13172 
13173 	if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
13174 		bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
13175 
13176 bnxt_restart_timer:
13177 	mod_timer(&bp->timer, jiffies + bp->current_interval);
13178 }
13179 
13180 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
13181 {
13182 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
13183 	 * set.  If the device is being closed, bnxt_close() may be holding
13184 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
13185 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
13186 	 */
13187 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13188 	rtnl_lock();
13189 }
13190 
13191 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
13192 {
13193 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13194 	rtnl_unlock();
13195 }
13196 
13197 /* Only called from bnxt_sp_task() */
13198 static void bnxt_reset(struct bnxt *bp, bool silent)
13199 {
13200 	bnxt_rtnl_lock_sp(bp);
13201 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
13202 		bnxt_reset_task(bp, silent);
13203 	bnxt_rtnl_unlock_sp(bp);
13204 }
13205 
13206 /* Only called from bnxt_sp_task() */
13207 static void bnxt_rx_ring_reset(struct bnxt *bp)
13208 {
13209 	int i;
13210 
13211 	bnxt_rtnl_lock_sp(bp);
13212 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13213 		bnxt_rtnl_unlock_sp(bp);
13214 		return;
13215 	}
13216 	/* Disable and flush TPA before resetting the RX ring */
13217 	if (bp->flags & BNXT_FLAG_TPA)
13218 		bnxt_set_tpa(bp, false);
13219 	for (i = 0; i < bp->rx_nr_rings; i++) {
13220 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13221 		struct bnxt_cp_ring_info *cpr;
13222 		int rc;
13223 
13224 		if (!rxr->bnapi->in_reset)
13225 			continue;
13226 
13227 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
13228 		if (rc) {
13229 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
13230 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13231 			else
13232 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13233 					    rc);
13234 			bnxt_reset_task(bp, true);
13235 			break;
13236 		}
13237 		bnxt_free_one_rx_ring_skbs(bp, i);
13238 		rxr->rx_prod = 0;
13239 		rxr->rx_agg_prod = 0;
13240 		rxr->rx_sw_agg_prod = 0;
13241 		rxr->rx_next_cons = 0;
13242 		rxr->bnapi->in_reset = false;
13243 		bnxt_alloc_one_rx_ring(bp, i);
13244 		cpr = &rxr->bnapi->cp_ring;
13245 		cpr->sw_stats->rx.rx_resets++;
13246 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
13247 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13248 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13249 	}
13250 	if (bp->flags & BNXT_FLAG_TPA)
13251 		bnxt_set_tpa(bp, true);
13252 	bnxt_rtnl_unlock_sp(bp);
13253 }
13254 
13255 static void bnxt_fw_fatal_close(struct bnxt *bp)
13256 {
13257 	bnxt_tx_disable(bp);
13258 	bnxt_disable_napi(bp);
13259 	bnxt_disable_int_sync(bp);
13260 	bnxt_free_irq(bp);
13261 	bnxt_clear_int_mode(bp);
13262 	pci_disable_device(bp->pdev);
13263 }
13264 
13265 static void bnxt_fw_reset_close(struct bnxt *bp)
13266 {
13267 	/* When firmware is in fatal state, quiesce device and disable
13268 	 * bus master to prevent any potential bad DMAs before freeing
13269 	 * kernel memory.
13270 	 */
13271 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13272 		u16 val = 0;
13273 
13274 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13275 		if (val == 0xffff)
13276 			bp->fw_reset_min_dsecs = 0;
13277 		bnxt_fw_fatal_close(bp);
13278 	}
13279 	__bnxt_close_nic(bp, true, false);
13280 	bnxt_vf_reps_free(bp);
13281 	bnxt_clear_int_mode(bp);
13282 	bnxt_hwrm_func_drv_unrgtr(bp);
13283 	if (pci_is_enabled(bp->pdev))
13284 		pci_disable_device(bp->pdev);
13285 	bnxt_free_ctx_mem(bp);
13286 }
13287 
13288 static bool is_bnxt_fw_ok(struct bnxt *bp)
13289 {
13290 	struct bnxt_fw_health *fw_health = bp->fw_health;
13291 	bool no_heartbeat = false, has_reset = false;
13292 	u32 val;
13293 
13294 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13295 	if (val == fw_health->last_fw_heartbeat)
13296 		no_heartbeat = true;
13297 
13298 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13299 	if (val != fw_health->last_fw_reset_cnt)
13300 		has_reset = true;
13301 
13302 	if (!no_heartbeat && has_reset)
13303 		return true;
13304 
13305 	return false;
13306 }
13307 
13308 /* rtnl_lock is acquired before calling this function */
13309 static void bnxt_force_fw_reset(struct bnxt *bp)
13310 {
13311 	struct bnxt_fw_health *fw_health = bp->fw_health;
13312 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13313 	u32 wait_dsecs;
13314 
13315 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13316 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13317 		return;
13318 
13319 	if (ptp) {
13320 		spin_lock_bh(&ptp->ptp_lock);
13321 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13322 		spin_unlock_bh(&ptp->ptp_lock);
13323 	} else {
13324 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13325 	}
13326 	bnxt_fw_reset_close(bp);
13327 	wait_dsecs = fw_health->master_func_wait_dsecs;
13328 	if (fw_health->primary) {
13329 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13330 			wait_dsecs = 0;
13331 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13332 	} else {
13333 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13334 		wait_dsecs = fw_health->normal_func_wait_dsecs;
13335 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13336 	}
13337 
13338 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13339 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13340 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13341 }
13342 
13343 void bnxt_fw_exception(struct bnxt *bp)
13344 {
13345 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13346 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13347 	bnxt_ulp_stop(bp);
13348 	bnxt_rtnl_lock_sp(bp);
13349 	bnxt_force_fw_reset(bp);
13350 	bnxt_rtnl_unlock_sp(bp);
13351 }
13352 
13353 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13354  * < 0 on error.
13355  */
13356 static int bnxt_get_registered_vfs(struct bnxt *bp)
13357 {
13358 #ifdef CONFIG_BNXT_SRIOV
13359 	int rc;
13360 
13361 	if (!BNXT_PF(bp))
13362 		return 0;
13363 
13364 	rc = bnxt_hwrm_func_qcfg(bp);
13365 	if (rc) {
13366 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13367 		return rc;
13368 	}
13369 	if (bp->pf.registered_vfs)
13370 		return bp->pf.registered_vfs;
13371 	if (bp->sriov_cfg)
13372 		return 1;
13373 #endif
13374 	return 0;
13375 }
13376 
13377 void bnxt_fw_reset(struct bnxt *bp)
13378 {
13379 	bnxt_ulp_stop(bp);
13380 	bnxt_rtnl_lock_sp(bp);
13381 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13382 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13383 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13384 		int n = 0, tmo;
13385 
13386 		if (ptp) {
13387 			spin_lock_bh(&ptp->ptp_lock);
13388 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13389 			spin_unlock_bh(&ptp->ptp_lock);
13390 		} else {
13391 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13392 		}
13393 		if (bp->pf.active_vfs &&
13394 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13395 			n = bnxt_get_registered_vfs(bp);
13396 		if (n < 0) {
13397 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13398 				   n);
13399 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13400 			dev_close(bp->dev);
13401 			goto fw_reset_exit;
13402 		} else if (n > 0) {
13403 			u16 vf_tmo_dsecs = n * 10;
13404 
13405 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13406 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13407 			bp->fw_reset_state =
13408 				BNXT_FW_RESET_STATE_POLL_VF;
13409 			bnxt_queue_fw_reset_work(bp, HZ / 10);
13410 			goto fw_reset_exit;
13411 		}
13412 		bnxt_fw_reset_close(bp);
13413 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13414 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13415 			tmo = HZ / 10;
13416 		} else {
13417 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13418 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
13419 		}
13420 		bnxt_queue_fw_reset_work(bp, tmo);
13421 	}
13422 fw_reset_exit:
13423 	bnxt_rtnl_unlock_sp(bp);
13424 }
13425 
13426 static void bnxt_chk_missed_irq(struct bnxt *bp)
13427 {
13428 	int i;
13429 
13430 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13431 		return;
13432 
13433 	for (i = 0; i < bp->cp_nr_rings; i++) {
13434 		struct bnxt_napi *bnapi = bp->bnapi[i];
13435 		struct bnxt_cp_ring_info *cpr;
13436 		u32 fw_ring_id;
13437 		int j;
13438 
13439 		if (!bnapi)
13440 			continue;
13441 
13442 		cpr = &bnapi->cp_ring;
13443 		for (j = 0; j < cpr->cp_ring_count; j++) {
13444 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
13445 			u32 val[2];
13446 
13447 			if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
13448 				continue;
13449 
13450 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
13451 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
13452 				continue;
13453 			}
13454 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
13455 			bnxt_dbg_hwrm_ring_info_get(bp,
13456 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
13457 				fw_ring_id, &val[0], &val[1]);
13458 			cpr->sw_stats->cmn.missed_irqs++;
13459 		}
13460 	}
13461 }
13462 
13463 static void bnxt_cfg_ntp_filters(struct bnxt *);
13464 
13465 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
13466 {
13467 	struct bnxt_link_info *link_info = &bp->link_info;
13468 
13469 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
13470 		link_info->autoneg = BNXT_AUTONEG_SPEED;
13471 		if (bp->hwrm_spec_code >= 0x10201) {
13472 			if (link_info->auto_pause_setting &
13473 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
13474 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13475 		} else {
13476 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13477 		}
13478 		bnxt_set_auto_speed(link_info);
13479 	} else {
13480 		bnxt_set_force_speed(link_info);
13481 		link_info->req_duplex = link_info->duplex_setting;
13482 	}
13483 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
13484 		link_info->req_flow_ctrl =
13485 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
13486 	else
13487 		link_info->req_flow_ctrl = link_info->force_pause_setting;
13488 }
13489 
13490 static void bnxt_fw_echo_reply(struct bnxt *bp)
13491 {
13492 	struct bnxt_fw_health *fw_health = bp->fw_health;
13493 	struct hwrm_func_echo_response_input *req;
13494 	int rc;
13495 
13496 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13497 	if (rc)
13498 		return;
13499 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
13500 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
13501 	hwrm_req_send(bp, req);
13502 }
13503 
13504 static void bnxt_ulp_restart(struct bnxt *bp)
13505 {
13506 	bnxt_ulp_stop(bp);
13507 	bnxt_ulp_start(bp, 0);
13508 }
13509 
13510 static void bnxt_sp_task(struct work_struct *work)
13511 {
13512 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13513 
13514 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13515 	smp_mb__after_atomic();
13516 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13517 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13518 		return;
13519 	}
13520 
13521 	if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
13522 		bnxt_ulp_restart(bp);
13523 		bnxt_reenable_sriov(bp);
13524 	}
13525 
13526 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13527 		bnxt_cfg_rx_mode(bp);
13528 
13529 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13530 		bnxt_cfg_ntp_filters(bp);
13531 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13532 		bnxt_hwrm_exec_fwd_req(bp);
13533 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13534 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
13535 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13536 		bnxt_hwrm_port_qstats(bp, 0);
13537 		bnxt_hwrm_port_qstats_ext(bp, 0);
13538 		bnxt_accumulate_all_stats(bp);
13539 	}
13540 
13541 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13542 		int rc;
13543 
13544 		mutex_lock(&bp->link_lock);
13545 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
13546 				       &bp->sp_event))
13547 			bnxt_hwrm_phy_qcaps(bp);
13548 
13549 		rc = bnxt_update_link(bp, true);
13550 		if (rc)
13551 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13552 				   rc);
13553 
13554 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
13555 				       &bp->sp_event))
13556 			bnxt_init_ethtool_link_settings(bp);
13557 		mutex_unlock(&bp->link_lock);
13558 	}
13559 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13560 		int rc;
13561 
13562 		mutex_lock(&bp->link_lock);
13563 		rc = bnxt_update_phy_setting(bp);
13564 		mutex_unlock(&bp->link_lock);
13565 		if (rc) {
13566 			netdev_warn(bp->dev, "update phy settings retry failed\n");
13567 		} else {
13568 			bp->link_info.phy_retry = false;
13569 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
13570 		}
13571 	}
13572 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13573 		mutex_lock(&bp->link_lock);
13574 		bnxt_get_port_module_status(bp);
13575 		mutex_unlock(&bp->link_lock);
13576 	}
13577 
13578 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13579 		bnxt_tc_flow_stats_work(bp);
13580 
13581 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13582 		bnxt_chk_missed_irq(bp);
13583 
13584 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13585 		bnxt_fw_echo_reply(bp);
13586 
13587 	if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13588 		bnxt_hwmon_notify_event(bp);
13589 
13590 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
13591 	 * must be the last functions to be called before exiting.
13592 	 */
13593 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13594 		bnxt_reset(bp, false);
13595 
13596 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13597 		bnxt_reset(bp, true);
13598 
13599 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13600 		bnxt_rx_ring_reset(bp);
13601 
13602 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13603 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13604 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13605 			bnxt_devlink_health_fw_report(bp);
13606 		else
13607 			bnxt_fw_reset(bp);
13608 	}
13609 
13610 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13611 		if (!is_bnxt_fw_ok(bp))
13612 			bnxt_devlink_health_fw_report(bp);
13613 	}
13614 
13615 	smp_mb__before_atomic();
13616 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13617 }
13618 
13619 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13620 				int *max_cp);
13621 
13622 /* Under rtnl_lock */
13623 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13624 		     int tx_xdp)
13625 {
13626 	int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
13627 	struct bnxt_hw_rings hwr = {0};
13628 	int rx_rings = rx;
13629 
13630 	if (tcs)
13631 		tx_sets = tcs;
13632 
13633 	_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13634 
13635 	if (max_rx < rx_rings)
13636 		return -ENOMEM;
13637 
13638 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
13639 		rx_rings <<= 1;
13640 
13641 	hwr.rx = rx_rings;
13642 	hwr.tx = tx * tx_sets + tx_xdp;
13643 	if (max_tx < hwr.tx)
13644 		return -ENOMEM;
13645 
13646 	hwr.vnic = bnxt_get_total_vnics(bp, rx);
13647 
13648 	tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
13649 	hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
13650 	if (max_cp < hwr.cp)
13651 		return -ENOMEM;
13652 	hwr.stat = hwr.cp;
13653 	if (BNXT_NEW_RM(bp)) {
13654 		hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
13655 		hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
13656 		hwr.grp = rx;
13657 		hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13658 	}
13659 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
13660 		hwr.cp_p5 = hwr.tx + rx;
13661 	return bnxt_hwrm_check_rings(bp, &hwr);
13662 }
13663 
13664 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13665 {
13666 	if (bp->bar2) {
13667 		pci_iounmap(pdev, bp->bar2);
13668 		bp->bar2 = NULL;
13669 	}
13670 
13671 	if (bp->bar1) {
13672 		pci_iounmap(pdev, bp->bar1);
13673 		bp->bar1 = NULL;
13674 	}
13675 
13676 	if (bp->bar0) {
13677 		pci_iounmap(pdev, bp->bar0);
13678 		bp->bar0 = NULL;
13679 	}
13680 }
13681 
13682 static void bnxt_cleanup_pci(struct bnxt *bp)
13683 {
13684 	bnxt_unmap_bars(bp, bp->pdev);
13685 	pci_release_regions(bp->pdev);
13686 	if (pci_is_enabled(bp->pdev))
13687 		pci_disable_device(bp->pdev);
13688 }
13689 
13690 static void bnxt_init_dflt_coal(struct bnxt *bp)
13691 {
13692 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13693 	struct bnxt_coal *coal;
13694 	u16 flags = 0;
13695 
13696 	if (coal_cap->cmpl_params &
13697 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
13698 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
13699 
13700 	/* Tick values in micro seconds.
13701 	 * 1 coal_buf x bufs_per_record = 1 completion record.
13702 	 */
13703 	coal = &bp->rx_coal;
13704 	coal->coal_ticks = 10;
13705 	coal->coal_bufs = 30;
13706 	coal->coal_ticks_irq = 1;
13707 	coal->coal_bufs_irq = 2;
13708 	coal->idle_thresh = 50;
13709 	coal->bufs_per_record = 2;
13710 	coal->budget = 64;		/* NAPI budget */
13711 	coal->flags = flags;
13712 
13713 	coal = &bp->tx_coal;
13714 	coal->coal_ticks = 28;
13715 	coal->coal_bufs = 30;
13716 	coal->coal_ticks_irq = 2;
13717 	coal->coal_bufs_irq = 2;
13718 	coal->bufs_per_record = 1;
13719 	coal->flags = flags;
13720 
13721 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13722 }
13723 
13724 /* FW that pre-reserves 1 VNIC per function */
13725 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13726 {
13727 	u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13728 
13729 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13730 	    (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
13731 		return true;
13732 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13733 	    (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
13734 		return true;
13735 	return false;
13736 }
13737 
13738 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13739 {
13740 	int rc;
13741 
13742 	bp->fw_cap = 0;
13743 	rc = bnxt_hwrm_ver_get(bp);
13744 	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
13745 	 * so wait before continuing with recovery.
13746 	 */
13747 	if (rc)
13748 		msleep(100);
13749 	bnxt_try_map_fw_health_reg(bp);
13750 	if (rc) {
13751 		rc = bnxt_try_recover_fw(bp);
13752 		if (rc)
13753 			return rc;
13754 		rc = bnxt_hwrm_ver_get(bp);
13755 		if (rc)
13756 			return rc;
13757 	}
13758 
13759 	bnxt_nvm_cfg_ver_get(bp);
13760 
13761 	rc = bnxt_hwrm_func_reset(bp);
13762 	if (rc)
13763 		return -ENODEV;
13764 
13765 	bnxt_hwrm_fw_set_time(bp);
13766 	return 0;
13767 }
13768 
13769 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13770 {
13771 	int rc;
13772 
13773 	/* Get the MAX capabilities for this function */
13774 	rc = bnxt_hwrm_func_qcaps(bp);
13775 	if (rc) {
13776 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13777 			   rc);
13778 		return -ENODEV;
13779 	}
13780 
13781 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13782 	if (rc)
13783 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13784 			    rc);
13785 
13786 	if (bnxt_alloc_fw_health(bp)) {
13787 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13788 	} else {
13789 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
13790 		if (rc)
13791 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13792 				    rc);
13793 	}
13794 
13795 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13796 	if (rc)
13797 		return -ENODEV;
13798 
13799 	if (bnxt_fw_pre_resv_vnics(bp))
13800 		bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
13801 
13802 	bnxt_hwrm_func_qcfg(bp);
13803 	bnxt_hwrm_vnic_qcaps(bp);
13804 	bnxt_hwrm_port_led_qcaps(bp);
13805 	bnxt_ethtool_init(bp);
13806 	if (bp->fw_cap & BNXT_FW_CAP_PTP)
13807 		__bnxt_hwrm_ptp_qcfg(bp);
13808 	bnxt_dcb_init(bp);
13809 	bnxt_hwmon_init(bp);
13810 	return 0;
13811 }
13812 
13813 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
13814 {
13815 	bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
13816 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
13817 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
13818 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
13819 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
13820 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
13821 		bp->rss_hash_delta = bp->rss_hash_cfg;
13822 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
13823 		bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
13824 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
13825 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
13826 	}
13827 }
13828 
13829 static void bnxt_set_dflt_rfs(struct bnxt *bp)
13830 {
13831 	struct net_device *dev = bp->dev;
13832 
13833 	dev->hw_features &= ~NETIF_F_NTUPLE;
13834 	dev->features &= ~NETIF_F_NTUPLE;
13835 	bp->flags &= ~BNXT_FLAG_RFS;
13836 	if (bnxt_rfs_supported(bp)) {
13837 		dev->hw_features |= NETIF_F_NTUPLE;
13838 		if (bnxt_rfs_capable(bp, false)) {
13839 			bp->flags |= BNXT_FLAG_RFS;
13840 			dev->features |= NETIF_F_NTUPLE;
13841 		}
13842 	}
13843 }
13844 
13845 static void bnxt_fw_init_one_p3(struct bnxt *bp)
13846 {
13847 	struct pci_dev *pdev = bp->pdev;
13848 
13849 	bnxt_set_dflt_rss_hash_type(bp);
13850 	bnxt_set_dflt_rfs(bp);
13851 
13852 	bnxt_get_wol_settings(bp);
13853 	if (bp->flags & BNXT_FLAG_WOL_CAP)
13854 		device_set_wakeup_enable(&pdev->dev, bp->wol);
13855 	else
13856 		device_set_wakeup_capable(&pdev->dev, false);
13857 
13858 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
13859 	bnxt_hwrm_coal_params_qcaps(bp);
13860 }
13861 
13862 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
13863 
13864 int bnxt_fw_init_one(struct bnxt *bp)
13865 {
13866 	int rc;
13867 
13868 	rc = bnxt_fw_init_one_p1(bp);
13869 	if (rc) {
13870 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
13871 		return rc;
13872 	}
13873 	rc = bnxt_fw_init_one_p2(bp);
13874 	if (rc) {
13875 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
13876 		return rc;
13877 	}
13878 	rc = bnxt_probe_phy(bp, false);
13879 	if (rc)
13880 		return rc;
13881 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13882 	if (rc)
13883 		return rc;
13884 
13885 	bnxt_fw_init_one_p3(bp);
13886 	return 0;
13887 }
13888 
13889 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
13890 {
13891 	struct bnxt_fw_health *fw_health = bp->fw_health;
13892 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
13893 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
13894 	u32 reg_type, reg_off, delay_msecs;
13895 
13896 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
13897 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
13898 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
13899 	switch (reg_type) {
13900 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
13901 		pci_write_config_dword(bp->pdev, reg_off, val);
13902 		break;
13903 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
13904 		writel(reg_off & BNXT_GRC_BASE_MASK,
13905 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
13906 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
13907 		fallthrough;
13908 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
13909 		writel(val, bp->bar0 + reg_off);
13910 		break;
13911 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
13912 		writel(val, bp->bar1 + reg_off);
13913 		break;
13914 	}
13915 	if (delay_msecs) {
13916 		pci_read_config_dword(bp->pdev, 0, &val);
13917 		msleep(delay_msecs);
13918 	}
13919 }
13920 
13921 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
13922 {
13923 	struct hwrm_func_qcfg_output *resp;
13924 	struct hwrm_func_qcfg_input *req;
13925 	bool result = true; /* firmware will enforce if unknown */
13926 
13927 	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
13928 		return result;
13929 
13930 	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
13931 		return result;
13932 
13933 	req->fid = cpu_to_le16(0xffff);
13934 	resp = hwrm_req_hold(bp, req);
13935 	if (!hwrm_req_send(bp, req))
13936 		result = !!(le16_to_cpu(resp->flags) &
13937 			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
13938 	hwrm_req_drop(bp, req);
13939 	return result;
13940 }
13941 
13942 static void bnxt_reset_all(struct bnxt *bp)
13943 {
13944 	struct bnxt_fw_health *fw_health = bp->fw_health;
13945 	int i, rc;
13946 
13947 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13948 		bnxt_fw_reset_via_optee(bp);
13949 		bp->fw_reset_timestamp = jiffies;
13950 		return;
13951 	}
13952 
13953 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
13954 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
13955 			bnxt_fw_reset_writel(bp, i);
13956 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
13957 		struct hwrm_fw_reset_input *req;
13958 
13959 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13960 		if (!rc) {
13961 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
13962 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
13963 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
13964 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
13965 			rc = hwrm_req_send(bp, req);
13966 		}
13967 		if (rc != -ENODEV)
13968 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13969 	}
13970 	bp->fw_reset_timestamp = jiffies;
13971 }
13972 
13973 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
13974 {
13975 	return time_after(jiffies, bp->fw_reset_timestamp +
13976 			  (bp->fw_reset_max_dsecs * HZ / 10));
13977 }
13978 
13979 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13980 {
13981 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13982 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
13983 		bnxt_dl_health_fw_status_update(bp, false);
13984 	bp->fw_reset_state = 0;
13985 	dev_close(bp->dev);
13986 }
13987 
13988 static void bnxt_fw_reset_task(struct work_struct *work)
13989 {
13990 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
13991 	int rc = 0;
13992 
13993 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13994 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
13995 		return;
13996 	}
13997 
13998 	switch (bp->fw_reset_state) {
13999 	case BNXT_FW_RESET_STATE_POLL_VF: {
14000 		int n = bnxt_get_registered_vfs(bp);
14001 		int tmo;
14002 
14003 		if (n < 0) {
14004 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14005 				   n, jiffies_to_msecs(jiffies -
14006 				   bp->fw_reset_timestamp));
14007 			goto fw_reset_abort;
14008 		} else if (n > 0) {
14009 			if (bnxt_fw_reset_timeout(bp)) {
14010 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14011 				bp->fw_reset_state = 0;
14012 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14013 					   n);
14014 				goto ulp_start;
14015 			}
14016 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14017 			return;
14018 		}
14019 		bp->fw_reset_timestamp = jiffies;
14020 		rtnl_lock();
14021 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14022 			bnxt_fw_reset_abort(bp, rc);
14023 			rtnl_unlock();
14024 			goto ulp_start;
14025 		}
14026 		bnxt_fw_reset_close(bp);
14027 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14028 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14029 			tmo = HZ / 10;
14030 		} else {
14031 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14032 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
14033 		}
14034 		rtnl_unlock();
14035 		bnxt_queue_fw_reset_work(bp, tmo);
14036 		return;
14037 	}
14038 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
14039 		u32 val;
14040 
14041 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14042 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
14043 		    !bnxt_fw_reset_timeout(bp)) {
14044 			bnxt_queue_fw_reset_work(bp, HZ / 5);
14045 			return;
14046 		}
14047 
14048 		if (!bp->fw_health->primary) {
14049 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
14050 
14051 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14052 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14053 			return;
14054 		}
14055 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14056 	}
14057 		fallthrough;
14058 	case BNXT_FW_RESET_STATE_RESET_FW:
14059 		bnxt_reset_all(bp);
14060 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14061 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
14062 		return;
14063 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
14064 		bnxt_inv_fw_health_reg(bp);
14065 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
14066 		    !bp->fw_reset_min_dsecs) {
14067 			u16 val;
14068 
14069 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14070 			if (val == 0xffff) {
14071 				if (bnxt_fw_reset_timeout(bp)) {
14072 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
14073 					rc = -ETIMEDOUT;
14074 					goto fw_reset_abort;
14075 				}
14076 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
14077 				return;
14078 			}
14079 		}
14080 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14081 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
14082 		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
14083 		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
14084 			bnxt_dl_remote_reload(bp);
14085 		if (pci_enable_device(bp->pdev)) {
14086 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
14087 			rc = -ENODEV;
14088 			goto fw_reset_abort;
14089 		}
14090 		pci_set_master(bp->pdev);
14091 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
14092 		fallthrough;
14093 	case BNXT_FW_RESET_STATE_POLL_FW:
14094 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
14095 		rc = bnxt_hwrm_poll(bp);
14096 		if (rc) {
14097 			if (bnxt_fw_reset_timeout(bp)) {
14098 				netdev_err(bp->dev, "Firmware reset aborted\n");
14099 				goto fw_reset_abort_status;
14100 			}
14101 			bnxt_queue_fw_reset_work(bp, HZ / 5);
14102 			return;
14103 		}
14104 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
14105 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
14106 		fallthrough;
14107 	case BNXT_FW_RESET_STATE_OPENING:
14108 		while (!rtnl_trylock()) {
14109 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14110 			return;
14111 		}
14112 		rc = bnxt_open(bp->dev);
14113 		if (rc) {
14114 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
14115 			bnxt_fw_reset_abort(bp, rc);
14116 			rtnl_unlock();
14117 			goto ulp_start;
14118 		}
14119 
14120 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
14121 		    bp->fw_health->enabled) {
14122 			bp->fw_health->last_fw_reset_cnt =
14123 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14124 		}
14125 		bp->fw_reset_state = 0;
14126 		/* Make sure fw_reset_state is 0 before clearing the flag */
14127 		smp_mb__before_atomic();
14128 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14129 		bnxt_ptp_reapply_pps(bp);
14130 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
14131 		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
14132 			bnxt_dl_health_fw_recovery_done(bp);
14133 			bnxt_dl_health_fw_status_update(bp, true);
14134 		}
14135 		rtnl_unlock();
14136 		bnxt_ulp_start(bp, 0);
14137 		bnxt_reenable_sriov(bp);
14138 		rtnl_lock();
14139 		bnxt_vf_reps_alloc(bp);
14140 		bnxt_vf_reps_open(bp);
14141 		rtnl_unlock();
14142 		break;
14143 	}
14144 	return;
14145 
14146 fw_reset_abort_status:
14147 	if (bp->fw_health->status_reliable ||
14148 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
14149 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14150 
14151 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
14152 	}
14153 fw_reset_abort:
14154 	rtnl_lock();
14155 	bnxt_fw_reset_abort(bp, rc);
14156 	rtnl_unlock();
14157 ulp_start:
14158 	bnxt_ulp_start(bp, rc);
14159 }
14160 
14161 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
14162 {
14163 	int rc;
14164 	struct bnxt *bp = netdev_priv(dev);
14165 
14166 	SET_NETDEV_DEV(dev, &pdev->dev);
14167 
14168 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
14169 	rc = pci_enable_device(pdev);
14170 	if (rc) {
14171 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14172 		goto init_err;
14173 	}
14174 
14175 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
14176 		dev_err(&pdev->dev,
14177 			"Cannot find PCI device base address, aborting\n");
14178 		rc = -ENODEV;
14179 		goto init_err_disable;
14180 	}
14181 
14182 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
14183 	if (rc) {
14184 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14185 		goto init_err_disable;
14186 	}
14187 
14188 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
14189 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
14190 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
14191 		rc = -EIO;
14192 		goto init_err_release;
14193 	}
14194 
14195 	pci_set_master(pdev);
14196 
14197 	bp->dev = dev;
14198 	bp->pdev = pdev;
14199 
14200 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
14201 	 * determines the BAR size.
14202 	 */
14203 	bp->bar0 = pci_ioremap_bar(pdev, 0);
14204 	if (!bp->bar0) {
14205 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14206 		rc = -ENOMEM;
14207 		goto init_err_release;
14208 	}
14209 
14210 	bp->bar2 = pci_ioremap_bar(pdev, 4);
14211 	if (!bp->bar2) {
14212 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
14213 		rc = -ENOMEM;
14214 		goto init_err_release;
14215 	}
14216 
14217 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
14218 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
14219 
14220 	spin_lock_init(&bp->ntp_fltr_lock);
14221 #if BITS_PER_LONG == 32
14222 	spin_lock_init(&bp->db_lock);
14223 #endif
14224 
14225 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
14226 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
14227 
14228 	timer_setup(&bp->timer, bnxt_timer, 0);
14229 	bp->current_interval = BNXT_TIMER_INTERVAL;
14230 
14231 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
14232 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14233 
14234 	clear_bit(BNXT_STATE_OPEN, &bp->state);
14235 	return 0;
14236 
14237 init_err_release:
14238 	bnxt_unmap_bars(bp, pdev);
14239 	pci_release_regions(pdev);
14240 
14241 init_err_disable:
14242 	pci_disable_device(pdev);
14243 
14244 init_err:
14245 	return rc;
14246 }
14247 
14248 /* rtnl_lock held */
14249 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14250 {
14251 	struct sockaddr *addr = p;
14252 	struct bnxt *bp = netdev_priv(dev);
14253 	int rc = 0;
14254 
14255 	if (!is_valid_ether_addr(addr->sa_data))
14256 		return -EADDRNOTAVAIL;
14257 
14258 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14259 		return 0;
14260 
14261 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
14262 	if (rc)
14263 		return rc;
14264 
14265 	eth_hw_addr_set(dev, addr->sa_data);
14266 	bnxt_clear_usr_fltrs(bp, true);
14267 	if (netif_running(dev)) {
14268 		bnxt_close_nic(bp, false, false);
14269 		rc = bnxt_open_nic(bp, false, false);
14270 	}
14271 
14272 	return rc;
14273 }
14274 
14275 /* rtnl_lock held */
14276 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14277 {
14278 	struct bnxt *bp = netdev_priv(dev);
14279 
14280 	if (netif_running(dev))
14281 		bnxt_close_nic(bp, true, false);
14282 
14283 	WRITE_ONCE(dev->mtu, new_mtu);
14284 	bnxt_set_ring_params(bp);
14285 
14286 	if (netif_running(dev))
14287 		return bnxt_open_nic(bp, true, false);
14288 
14289 	return 0;
14290 }
14291 
14292 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14293 {
14294 	struct bnxt *bp = netdev_priv(dev);
14295 	bool sh = false;
14296 	int rc, tx_cp;
14297 
14298 	if (tc > bp->max_tc) {
14299 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14300 			   tc, bp->max_tc);
14301 		return -EINVAL;
14302 	}
14303 
14304 	if (bp->num_tc == tc)
14305 		return 0;
14306 
14307 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14308 		sh = true;
14309 
14310 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14311 			      sh, tc, bp->tx_nr_rings_xdp);
14312 	if (rc)
14313 		return rc;
14314 
14315 	/* Needs to close the device and do hw resource re-allocations */
14316 	if (netif_running(bp->dev))
14317 		bnxt_close_nic(bp, true, false);
14318 
14319 	if (tc) {
14320 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14321 		netdev_set_num_tc(dev, tc);
14322 		bp->num_tc = tc;
14323 	} else {
14324 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14325 		netdev_reset_tc(dev);
14326 		bp->num_tc = 0;
14327 	}
14328 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14329 	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14330 	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14331 			       tx_cp + bp->rx_nr_rings;
14332 
14333 	if (netif_running(bp->dev))
14334 		return bnxt_open_nic(bp, true, false);
14335 
14336 	return 0;
14337 }
14338 
14339 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14340 				  void *cb_priv)
14341 {
14342 	struct bnxt *bp = cb_priv;
14343 
14344 	if (!bnxt_tc_flower_enabled(bp) ||
14345 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14346 		return -EOPNOTSUPP;
14347 
14348 	switch (type) {
14349 	case TC_SETUP_CLSFLOWER:
14350 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14351 	default:
14352 		return -EOPNOTSUPP;
14353 	}
14354 }
14355 
14356 LIST_HEAD(bnxt_block_cb_list);
14357 
14358 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14359 			 void *type_data)
14360 {
14361 	struct bnxt *bp = netdev_priv(dev);
14362 
14363 	switch (type) {
14364 	case TC_SETUP_BLOCK:
14365 		return flow_block_cb_setup_simple(type_data,
14366 						  &bnxt_block_cb_list,
14367 						  bnxt_setup_tc_block_cb,
14368 						  bp, bp, true);
14369 	case TC_SETUP_QDISC_MQPRIO: {
14370 		struct tc_mqprio_qopt *mqprio = type_data;
14371 
14372 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
14373 
14374 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
14375 	}
14376 	default:
14377 		return -EOPNOTSUPP;
14378 	}
14379 }
14380 
14381 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
14382 			    const struct sk_buff *skb)
14383 {
14384 	struct bnxt_vnic_info *vnic;
14385 
14386 	if (skb)
14387 		return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
14388 
14389 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
14390 	return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
14391 }
14392 
14393 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
14394 			   u32 idx)
14395 {
14396 	struct hlist_head *head;
14397 	int bit_id;
14398 
14399 	spin_lock_bh(&bp->ntp_fltr_lock);
14400 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
14401 	if (bit_id < 0) {
14402 		spin_unlock_bh(&bp->ntp_fltr_lock);
14403 		return -ENOMEM;
14404 	}
14405 
14406 	fltr->base.sw_id = (u16)bit_id;
14407 	fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
14408 	fltr->base.flags |= BNXT_ACT_RING_DST;
14409 	head = &bp->ntp_fltr_hash_tbl[idx];
14410 	hlist_add_head_rcu(&fltr->base.hash, head);
14411 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
14412 	bnxt_insert_usr_fltr(bp, &fltr->base);
14413 	bp->ntp_fltr_count++;
14414 	spin_unlock_bh(&bp->ntp_fltr_lock);
14415 	return 0;
14416 }
14417 
14418 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
14419 			    struct bnxt_ntuple_filter *f2)
14420 {
14421 	struct bnxt_flow_masks *masks1 = &f1->fmasks;
14422 	struct bnxt_flow_masks *masks2 = &f2->fmasks;
14423 	struct flow_keys *keys1 = &f1->fkeys;
14424 	struct flow_keys *keys2 = &f2->fkeys;
14425 
14426 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
14427 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
14428 		return false;
14429 
14430 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
14431 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
14432 		    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
14433 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
14434 		    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
14435 			return false;
14436 	} else {
14437 		if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
14438 				     &keys2->addrs.v6addrs.src) ||
14439 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
14440 				     &masks2->addrs.v6addrs.src) ||
14441 		    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
14442 				     &keys2->addrs.v6addrs.dst) ||
14443 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
14444 				     &masks2->addrs.v6addrs.dst))
14445 			return false;
14446 	}
14447 
14448 	return keys1->ports.src == keys2->ports.src &&
14449 	       masks1->ports.src == masks2->ports.src &&
14450 	       keys1->ports.dst == keys2->ports.dst &&
14451 	       masks1->ports.dst == masks2->ports.dst &&
14452 	       keys1->control.flags == keys2->control.flags &&
14453 	       f1->l2_fltr == f2->l2_fltr;
14454 }
14455 
14456 struct bnxt_ntuple_filter *
14457 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
14458 				struct bnxt_ntuple_filter *fltr, u32 idx)
14459 {
14460 	struct bnxt_ntuple_filter *f;
14461 	struct hlist_head *head;
14462 
14463 	head = &bp->ntp_fltr_hash_tbl[idx];
14464 	hlist_for_each_entry_rcu(f, head, base.hash) {
14465 		if (bnxt_fltr_match(f, fltr))
14466 			return f;
14467 	}
14468 	return NULL;
14469 }
14470 
14471 #ifdef CONFIG_RFS_ACCEL
14472 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
14473 			      u16 rxq_index, u32 flow_id)
14474 {
14475 	struct bnxt *bp = netdev_priv(dev);
14476 	struct bnxt_ntuple_filter *fltr, *new_fltr;
14477 	struct flow_keys *fkeys;
14478 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
14479 	struct bnxt_l2_filter *l2_fltr;
14480 	int rc = 0, idx;
14481 	u32 flags;
14482 
14483 	if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
14484 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
14485 		atomic_inc(&l2_fltr->refcnt);
14486 	} else {
14487 		struct bnxt_l2_key key;
14488 
14489 		ether_addr_copy(key.dst_mac_addr, eth->h_dest);
14490 		key.vlan = 0;
14491 		l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
14492 		if (!l2_fltr)
14493 			return -EINVAL;
14494 		if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
14495 			bnxt_del_l2_filter(bp, l2_fltr);
14496 			return -EINVAL;
14497 		}
14498 	}
14499 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
14500 	if (!new_fltr) {
14501 		bnxt_del_l2_filter(bp, l2_fltr);
14502 		return -ENOMEM;
14503 	}
14504 
14505 	fkeys = &new_fltr->fkeys;
14506 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
14507 		rc = -EPROTONOSUPPORT;
14508 		goto err_free;
14509 	}
14510 
14511 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
14512 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
14513 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
14514 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
14515 		rc = -EPROTONOSUPPORT;
14516 		goto err_free;
14517 	}
14518 	new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
14519 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
14520 		if (bp->hwrm_spec_code < 0x10601) {
14521 			rc = -EPROTONOSUPPORT;
14522 			goto err_free;
14523 		}
14524 		new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
14525 	}
14526 	flags = fkeys->control.flags;
14527 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
14528 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14529 		rc = -EPROTONOSUPPORT;
14530 		goto err_free;
14531 	}
14532 	new_fltr->l2_fltr = l2_fltr;
14533 
14534 	idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14535 	rcu_read_lock();
14536 	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14537 	if (fltr) {
14538 		rc = fltr->base.sw_id;
14539 		rcu_read_unlock();
14540 		goto err_free;
14541 	}
14542 	rcu_read_unlock();
14543 
14544 	new_fltr->flow_id = flow_id;
14545 	new_fltr->base.rxq = rxq_index;
14546 	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14547 	if (!rc) {
14548 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14549 		return new_fltr->base.sw_id;
14550 	}
14551 
14552 err_free:
14553 	bnxt_del_l2_filter(bp, l2_fltr);
14554 	kfree(new_fltr);
14555 	return rc;
14556 }
14557 #endif
14558 
14559 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14560 {
14561 	spin_lock_bh(&bp->ntp_fltr_lock);
14562 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
14563 		spin_unlock_bh(&bp->ntp_fltr_lock);
14564 		return;
14565 	}
14566 	hlist_del_rcu(&fltr->base.hash);
14567 	bnxt_del_one_usr_fltr(bp, &fltr->base);
14568 	bp->ntp_fltr_count--;
14569 	spin_unlock_bh(&bp->ntp_fltr_lock);
14570 	bnxt_del_l2_filter(bp, fltr->l2_fltr);
14571 	clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14572 	kfree_rcu(fltr, base.rcu);
14573 }
14574 
14575 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14576 {
14577 #ifdef CONFIG_RFS_ACCEL
14578 	int i;
14579 
14580 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
14581 		struct hlist_head *head;
14582 		struct hlist_node *tmp;
14583 		struct bnxt_ntuple_filter *fltr;
14584 		int rc;
14585 
14586 		head = &bp->ntp_fltr_hash_tbl[i];
14587 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
14588 			bool del = false;
14589 
14590 			if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
14591 				if (fltr->base.flags & BNXT_ACT_NO_AGING)
14592 					continue;
14593 				if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14594 							fltr->flow_id,
14595 							fltr->base.sw_id)) {
14596 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
14597 									 fltr);
14598 					del = true;
14599 				}
14600 			} else {
14601 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14602 								       fltr);
14603 				if (rc)
14604 					del = true;
14605 				else
14606 					set_bit(BNXT_FLTR_VALID, &fltr->base.state);
14607 			}
14608 
14609 			if (del)
14610 				bnxt_del_ntp_filter(bp, fltr);
14611 		}
14612 	}
14613 #endif
14614 }
14615 
14616 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
14617 				    unsigned int entry, struct udp_tunnel_info *ti)
14618 {
14619 	struct bnxt *bp = netdev_priv(netdev);
14620 	unsigned int cmd;
14621 
14622 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14623 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
14624 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14625 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
14626 	else
14627 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
14628 
14629 	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14630 }
14631 
14632 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
14633 				      unsigned int entry, struct udp_tunnel_info *ti)
14634 {
14635 	struct bnxt *bp = netdev_priv(netdev);
14636 	unsigned int cmd;
14637 
14638 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14639 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
14640 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14641 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
14642 	else
14643 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
14644 
14645 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14646 }
14647 
14648 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
14649 	.set_port	= bnxt_udp_tunnel_set_port,
14650 	.unset_port	= bnxt_udp_tunnel_unset_port,
14651 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14652 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14653 	.tables		= {
14654 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14655 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14656 	},
14657 }, bnxt_udp_tunnels_p7 = {
14658 	.set_port	= bnxt_udp_tunnel_set_port,
14659 	.unset_port	= bnxt_udp_tunnel_unset_port,
14660 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14661 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14662 	.tables		= {
14663 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14664 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14665 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
14666 	},
14667 };
14668 
14669 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
14670 			       struct net_device *dev, u32 filter_mask,
14671 			       int nlflags)
14672 {
14673 	struct bnxt *bp = netdev_priv(dev);
14674 
14675 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14676 				       nlflags, filter_mask, NULL);
14677 }
14678 
14679 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
14680 			       u16 flags, struct netlink_ext_ack *extack)
14681 {
14682 	struct bnxt *bp = netdev_priv(dev);
14683 	struct nlattr *attr, *br_spec;
14684 	int rem, rc = 0;
14685 
14686 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14687 		return -EOPNOTSUPP;
14688 
14689 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
14690 	if (!br_spec)
14691 		return -EINVAL;
14692 
14693 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
14694 		u16 mode;
14695 
14696 		mode = nla_get_u16(attr);
14697 		if (mode == bp->br_mode)
14698 			break;
14699 
14700 		rc = bnxt_hwrm_set_br_mode(bp, mode);
14701 		if (!rc)
14702 			bp->br_mode = mode;
14703 		break;
14704 	}
14705 	return rc;
14706 }
14707 
14708 int bnxt_get_port_parent_id(struct net_device *dev,
14709 			    struct netdev_phys_item_id *ppid)
14710 {
14711 	struct bnxt *bp = netdev_priv(dev);
14712 
14713 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14714 		return -EOPNOTSUPP;
14715 
14716 	/* The PF and it's VF-reps only support the switchdev framework */
14717 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14718 		return -EOPNOTSUPP;
14719 
14720 	ppid->id_len = sizeof(bp->dsn);
14721 	memcpy(ppid->id, bp->dsn, ppid->id_len);
14722 
14723 	return 0;
14724 }
14725 
14726 static const struct net_device_ops bnxt_netdev_ops = {
14727 	.ndo_open		= bnxt_open,
14728 	.ndo_start_xmit		= bnxt_start_xmit,
14729 	.ndo_stop		= bnxt_close,
14730 	.ndo_get_stats64	= bnxt_get_stats64,
14731 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
14732 	.ndo_eth_ioctl		= bnxt_ioctl,
14733 	.ndo_validate_addr	= eth_validate_addr,
14734 	.ndo_set_mac_address	= bnxt_change_mac_addr,
14735 	.ndo_change_mtu		= bnxt_change_mtu,
14736 	.ndo_fix_features	= bnxt_fix_features,
14737 	.ndo_set_features	= bnxt_set_features,
14738 	.ndo_features_check	= bnxt_features_check,
14739 	.ndo_tx_timeout		= bnxt_tx_timeout,
14740 #ifdef CONFIG_BNXT_SRIOV
14741 	.ndo_get_vf_config	= bnxt_get_vf_config,
14742 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
14743 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
14744 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
14745 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
14746 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
14747 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
14748 #endif
14749 	.ndo_setup_tc           = bnxt_setup_tc,
14750 #ifdef CONFIG_RFS_ACCEL
14751 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
14752 #endif
14753 	.ndo_bpf		= bnxt_xdp,
14754 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
14755 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
14756 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
14757 };
14758 
14759 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
14760 				    struct netdev_queue_stats_rx *stats)
14761 {
14762 	struct bnxt *bp = netdev_priv(dev);
14763 	struct bnxt_cp_ring_info *cpr;
14764 	u64 *sw;
14765 
14766 	cpr = &bp->bnapi[i]->cp_ring;
14767 	sw = cpr->stats.sw_stats;
14768 
14769 	stats->packets = 0;
14770 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
14771 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
14772 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
14773 
14774 	stats->bytes = 0;
14775 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
14776 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
14777 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
14778 
14779 	stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
14780 }
14781 
14782 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
14783 				    struct netdev_queue_stats_tx *stats)
14784 {
14785 	struct bnxt *bp = netdev_priv(dev);
14786 	struct bnxt_napi *bnapi;
14787 	u64 *sw;
14788 
14789 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
14790 	sw = bnapi->cp_ring.stats.sw_stats;
14791 
14792 	stats->packets = 0;
14793 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
14794 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
14795 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
14796 
14797 	stats->bytes = 0;
14798 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
14799 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
14800 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
14801 }
14802 
14803 static void bnxt_get_base_stats(struct net_device *dev,
14804 				struct netdev_queue_stats_rx *rx,
14805 				struct netdev_queue_stats_tx *tx)
14806 {
14807 	struct bnxt *bp = netdev_priv(dev);
14808 
14809 	rx->packets = bp->net_stats_prev.rx_packets;
14810 	rx->bytes = bp->net_stats_prev.rx_bytes;
14811 	rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
14812 
14813 	tx->packets = bp->net_stats_prev.tx_packets;
14814 	tx->bytes = bp->net_stats_prev.tx_bytes;
14815 }
14816 
14817 static const struct netdev_stat_ops bnxt_stat_ops = {
14818 	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
14819 	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
14820 	.get_base_stats		= bnxt_get_base_stats,
14821 };
14822 
14823 static void bnxt_remove_one(struct pci_dev *pdev)
14824 {
14825 	struct net_device *dev = pci_get_drvdata(pdev);
14826 	struct bnxt *bp = netdev_priv(dev);
14827 
14828 	if (BNXT_PF(bp))
14829 		bnxt_sriov_disable(bp);
14830 
14831 	bnxt_rdma_aux_device_del(bp);
14832 
14833 	bnxt_ptp_clear(bp);
14834 	unregister_netdev(dev);
14835 
14836 	bnxt_rdma_aux_device_uninit(bp);
14837 
14838 	bnxt_free_l2_filters(bp, true);
14839 	bnxt_free_ntp_fltrs(bp, true);
14840 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
14841 		bnxt_clear_rss_ctxs(bp, true);
14842 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14843 	/* Flush any pending tasks */
14844 	cancel_work_sync(&bp->sp_task);
14845 	cancel_delayed_work_sync(&bp->fw_reset_task);
14846 	bp->sp_event = 0;
14847 
14848 	bnxt_dl_fw_reporters_destroy(bp);
14849 	bnxt_dl_unregister(bp);
14850 	bnxt_shutdown_tc(bp);
14851 
14852 	bnxt_clear_int_mode(bp);
14853 	bnxt_hwrm_func_drv_unrgtr(bp);
14854 	bnxt_free_hwrm_resources(bp);
14855 	bnxt_hwmon_uninit(bp);
14856 	bnxt_ethtool_free(bp);
14857 	bnxt_dcb_free(bp);
14858 	kfree(bp->ptp_cfg);
14859 	bp->ptp_cfg = NULL;
14860 	kfree(bp->fw_health);
14861 	bp->fw_health = NULL;
14862 	bnxt_cleanup_pci(bp);
14863 	bnxt_free_ctx_mem(bp);
14864 	kfree(bp->rss_indir_tbl);
14865 	bp->rss_indir_tbl = NULL;
14866 	bnxt_free_port_stats(bp);
14867 	free_netdev(dev);
14868 }
14869 
14870 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
14871 {
14872 	int rc = 0;
14873 	struct bnxt_link_info *link_info = &bp->link_info;
14874 
14875 	bp->phy_flags = 0;
14876 	rc = bnxt_hwrm_phy_qcaps(bp);
14877 	if (rc) {
14878 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14879 			   rc);
14880 		return rc;
14881 	}
14882 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
14883 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
14884 	else
14885 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
14886 	if (!fw_dflt)
14887 		return 0;
14888 
14889 	mutex_lock(&bp->link_lock);
14890 	rc = bnxt_update_link(bp, false);
14891 	if (rc) {
14892 		mutex_unlock(&bp->link_lock);
14893 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14894 			   rc);
14895 		return rc;
14896 	}
14897 
14898 	/* Older firmware does not have supported_auto_speeds, so assume
14899 	 * that all supported speeds can be autonegotiated.
14900 	 */
14901 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
14902 		link_info->support_auto_speeds = link_info->support_speeds;
14903 
14904 	bnxt_init_ethtool_link_settings(bp);
14905 	mutex_unlock(&bp->link_lock);
14906 	return 0;
14907 }
14908 
14909 static int bnxt_get_max_irq(struct pci_dev *pdev)
14910 {
14911 	u16 ctrl;
14912 
14913 	if (!pdev->msix_cap)
14914 		return 1;
14915 
14916 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
14917 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
14918 }
14919 
14920 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14921 				int *max_cp)
14922 {
14923 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
14924 	int max_ring_grps = 0, max_irq;
14925 
14926 	*max_tx = hw_resc->max_tx_rings;
14927 	*max_rx = hw_resc->max_rx_rings;
14928 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
14929 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
14930 			bnxt_get_ulp_msix_num_in_use(bp),
14931 			hw_resc->max_stat_ctxs -
14932 			bnxt_get_ulp_stat_ctxs_in_use(bp));
14933 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14934 		*max_cp = min_t(int, *max_cp, max_irq);
14935 	max_ring_grps = hw_resc->max_hw_ring_grps;
14936 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
14937 		*max_cp -= 1;
14938 		*max_rx -= 2;
14939 	}
14940 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
14941 		*max_rx >>= 1;
14942 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
14943 		int rc;
14944 
14945 		rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14946 		if (rc) {
14947 			*max_rx = 0;
14948 			*max_tx = 0;
14949 		}
14950 		/* On P5 chips, max_cp output param should be available NQs */
14951 		*max_cp = max_irq;
14952 	}
14953 	*max_rx = min_t(int, *max_rx, max_ring_grps);
14954 }
14955 
14956 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
14957 {
14958 	int rx, tx, cp;
14959 
14960 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
14961 	*max_rx = rx;
14962 	*max_tx = tx;
14963 	if (!rx || !tx || !cp)
14964 		return -ENOMEM;
14965 
14966 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
14967 }
14968 
14969 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14970 			       bool shared)
14971 {
14972 	int rc;
14973 
14974 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14975 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14976 		/* Not enough rings, try disabling agg rings. */
14977 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
14978 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14979 		if (rc) {
14980 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
14981 			bp->flags |= BNXT_FLAG_AGG_RINGS;
14982 			return rc;
14983 		}
14984 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
14985 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14986 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14987 		bnxt_set_ring_params(bp);
14988 	}
14989 
14990 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
14991 		int max_cp, max_stat, max_irq;
14992 
14993 		/* Reserve minimum resources for RoCE */
14994 		max_cp = bnxt_get_max_func_cp_rings(bp);
14995 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
14996 		max_irq = bnxt_get_max_func_irqs(bp);
14997 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
14998 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
14999 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
15000 			return 0;
15001 
15002 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
15003 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
15004 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
15005 		max_cp = min_t(int, max_cp, max_irq);
15006 		max_cp = min_t(int, max_cp, max_stat);
15007 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
15008 		if (rc)
15009 			rc = 0;
15010 	}
15011 	return rc;
15012 }
15013 
15014 /* In initial default shared ring setting, each shared ring must have a
15015  * RX/TX ring pair.
15016  */
15017 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
15018 {
15019 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
15020 	bp->rx_nr_rings = bp->cp_nr_rings;
15021 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
15022 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15023 }
15024 
15025 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
15026 {
15027 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
15028 	int avail_msix;
15029 
15030 	if (!bnxt_can_reserve_rings(bp))
15031 		return 0;
15032 
15033 	if (sh)
15034 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
15035 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
15036 	/* Reduce default rings on multi-port cards so that total default
15037 	 * rings do not exceed CPU count.
15038 	 */
15039 	if (bp->port_count > 1) {
15040 		int max_rings =
15041 			max_t(int, num_online_cpus() / bp->port_count, 1);
15042 
15043 		dflt_rings = min_t(int, dflt_rings, max_rings);
15044 	}
15045 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
15046 	if (rc)
15047 		return rc;
15048 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
15049 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
15050 	if (sh)
15051 		bnxt_trim_dflt_sh_rings(bp);
15052 	else
15053 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
15054 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15055 
15056 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
15057 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
15058 		int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
15059 
15060 		bnxt_set_ulp_msix_num(bp, ulp_num_msix);
15061 		bnxt_set_dflt_ulp_stat_ctxs(bp);
15062 	}
15063 
15064 	rc = __bnxt_reserve_rings(bp);
15065 	if (rc && rc != -ENODEV)
15066 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
15067 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15068 	if (sh)
15069 		bnxt_trim_dflt_sh_rings(bp);
15070 
15071 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
15072 	if (bnxt_need_reserve_rings(bp)) {
15073 		rc = __bnxt_reserve_rings(bp);
15074 		if (rc && rc != -ENODEV)
15075 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
15076 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15077 	}
15078 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
15079 		bp->rx_nr_rings++;
15080 		bp->cp_nr_rings++;
15081 	}
15082 	if (rc) {
15083 		bp->tx_nr_rings = 0;
15084 		bp->rx_nr_rings = 0;
15085 	}
15086 	return rc;
15087 }
15088 
15089 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
15090 {
15091 	int rc;
15092 
15093 	if (bp->tx_nr_rings)
15094 		return 0;
15095 
15096 	bnxt_ulp_irq_stop(bp);
15097 	bnxt_clear_int_mode(bp);
15098 	rc = bnxt_set_dflt_rings(bp, true);
15099 	if (rc) {
15100 		if (BNXT_VF(bp) && rc == -ENODEV)
15101 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15102 		else
15103 			netdev_err(bp->dev, "Not enough rings available.\n");
15104 		goto init_dflt_ring_err;
15105 	}
15106 	rc = bnxt_init_int_mode(bp);
15107 	if (rc)
15108 		goto init_dflt_ring_err;
15109 
15110 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15111 
15112 	bnxt_set_dflt_rfs(bp);
15113 
15114 init_dflt_ring_err:
15115 	bnxt_ulp_irq_restart(bp, rc);
15116 	return rc;
15117 }
15118 
15119 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
15120 {
15121 	int rc;
15122 
15123 	ASSERT_RTNL();
15124 	bnxt_hwrm_func_qcaps(bp);
15125 
15126 	if (netif_running(bp->dev))
15127 		__bnxt_close_nic(bp, true, false);
15128 
15129 	bnxt_ulp_irq_stop(bp);
15130 	bnxt_clear_int_mode(bp);
15131 	rc = bnxt_init_int_mode(bp);
15132 	bnxt_ulp_irq_restart(bp, rc);
15133 
15134 	if (netif_running(bp->dev)) {
15135 		if (rc)
15136 			dev_close(bp->dev);
15137 		else
15138 			rc = bnxt_open_nic(bp, true, false);
15139 	}
15140 
15141 	return rc;
15142 }
15143 
15144 static int bnxt_init_mac_addr(struct bnxt *bp)
15145 {
15146 	int rc = 0;
15147 
15148 	if (BNXT_PF(bp)) {
15149 		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
15150 	} else {
15151 #ifdef CONFIG_BNXT_SRIOV
15152 		struct bnxt_vf_info *vf = &bp->vf;
15153 		bool strict_approval = true;
15154 
15155 		if (is_valid_ether_addr(vf->mac_addr)) {
15156 			/* overwrite netdev dev_addr with admin VF MAC */
15157 			eth_hw_addr_set(bp->dev, vf->mac_addr);
15158 			/* Older PF driver or firmware may not approve this
15159 			 * correctly.
15160 			 */
15161 			strict_approval = false;
15162 		} else {
15163 			eth_hw_addr_random(bp->dev);
15164 		}
15165 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
15166 #endif
15167 	}
15168 	return rc;
15169 }
15170 
15171 static void bnxt_vpd_read_info(struct bnxt *bp)
15172 {
15173 	struct pci_dev *pdev = bp->pdev;
15174 	unsigned int vpd_size, kw_len;
15175 	int pos, size;
15176 	u8 *vpd_data;
15177 
15178 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
15179 	if (IS_ERR(vpd_data)) {
15180 		pci_warn(pdev, "Unable to read VPD\n");
15181 		return;
15182 	}
15183 
15184 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15185 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
15186 	if (pos < 0)
15187 		goto read_sn;
15188 
15189 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15190 	memcpy(bp->board_partno, &vpd_data[pos], size);
15191 
15192 read_sn:
15193 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15194 					   PCI_VPD_RO_KEYWORD_SERIALNO,
15195 					   &kw_len);
15196 	if (pos < 0)
15197 		goto exit;
15198 
15199 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15200 	memcpy(bp->board_serialno, &vpd_data[pos], size);
15201 exit:
15202 	kfree(vpd_data);
15203 }
15204 
15205 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
15206 {
15207 	struct pci_dev *pdev = bp->pdev;
15208 	u64 qword;
15209 
15210 	qword = pci_get_dsn(pdev);
15211 	if (!qword) {
15212 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
15213 		return -EOPNOTSUPP;
15214 	}
15215 
15216 	put_unaligned_le64(qword, dsn);
15217 
15218 	bp->flags |= BNXT_FLAG_DSN_VALID;
15219 	return 0;
15220 }
15221 
15222 static int bnxt_map_db_bar(struct bnxt *bp)
15223 {
15224 	if (!bp->db_size)
15225 		return -ENODEV;
15226 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
15227 	if (!bp->bar1)
15228 		return -ENOMEM;
15229 	return 0;
15230 }
15231 
15232 void bnxt_print_device_info(struct bnxt *bp)
15233 {
15234 	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
15235 		    board_info[bp->board_idx].name,
15236 		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
15237 
15238 	pcie_print_link_status(bp->pdev);
15239 }
15240 
15241 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
15242 {
15243 	struct bnxt_hw_resc *hw_resc;
15244 	struct net_device *dev;
15245 	struct bnxt *bp;
15246 	int rc, max_irqs;
15247 
15248 	if (pci_is_bridge(pdev))
15249 		return -ENODEV;
15250 
15251 	/* Clear any pending DMA transactions from crash kernel
15252 	 * while loading driver in capture kernel.
15253 	 */
15254 	if (is_kdump_kernel()) {
15255 		pci_clear_master(pdev);
15256 		pcie_flr(pdev);
15257 	}
15258 
15259 	max_irqs = bnxt_get_max_irq(pdev);
15260 	dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
15261 				 max_irqs);
15262 	if (!dev)
15263 		return -ENOMEM;
15264 
15265 	bp = netdev_priv(dev);
15266 	bp->board_idx = ent->driver_data;
15267 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
15268 	bnxt_set_max_func_irqs(bp, max_irqs);
15269 
15270 	if (bnxt_vf_pciid(bp->board_idx))
15271 		bp->flags |= BNXT_FLAG_VF;
15272 
15273 	/* No devlink port registration in case of a VF */
15274 	if (BNXT_PF(bp))
15275 		SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
15276 
15277 	if (pdev->msix_cap)
15278 		bp->flags |= BNXT_FLAG_MSIX_CAP;
15279 
15280 	rc = bnxt_init_board(pdev, dev);
15281 	if (rc < 0)
15282 		goto init_err_free;
15283 
15284 	dev->netdev_ops = &bnxt_netdev_ops;
15285 	dev->stat_ops = &bnxt_stat_ops;
15286 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
15287 	dev->ethtool_ops = &bnxt_ethtool_ops;
15288 	pci_set_drvdata(pdev, dev);
15289 
15290 	rc = bnxt_alloc_hwrm_resources(bp);
15291 	if (rc)
15292 		goto init_err_pci_clean;
15293 
15294 	mutex_init(&bp->hwrm_cmd_lock);
15295 	mutex_init(&bp->link_lock);
15296 
15297 	rc = bnxt_fw_init_one_p1(bp);
15298 	if (rc)
15299 		goto init_err_pci_clean;
15300 
15301 	if (BNXT_PF(bp))
15302 		bnxt_vpd_read_info(bp);
15303 
15304 	if (BNXT_CHIP_P5_PLUS(bp)) {
15305 		bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
15306 		if (BNXT_CHIP_P7(bp))
15307 			bp->flags |= BNXT_FLAG_CHIP_P7;
15308 	}
15309 
15310 	rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
15311 	if (rc)
15312 		goto init_err_pci_clean;
15313 
15314 	rc = bnxt_fw_init_one_p2(bp);
15315 	if (rc)
15316 		goto init_err_pci_clean;
15317 
15318 	rc = bnxt_map_db_bar(bp);
15319 	if (rc) {
15320 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
15321 			rc);
15322 		goto init_err_pci_clean;
15323 	}
15324 
15325 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15326 			   NETIF_F_TSO | NETIF_F_TSO6 |
15327 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15328 			   NETIF_F_GSO_IPXIP4 |
15329 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15330 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
15331 			   NETIF_F_RXCSUM | NETIF_F_GRO;
15332 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15333 		dev->hw_features |= NETIF_F_GSO_UDP_L4;
15334 
15335 	if (BNXT_SUPPORTS_TPA(bp))
15336 		dev->hw_features |= NETIF_F_LRO;
15337 
15338 	dev->hw_enc_features =
15339 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15340 			NETIF_F_TSO | NETIF_F_TSO6 |
15341 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15342 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15343 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
15344 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15345 		dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
15346 	if (bp->flags & BNXT_FLAG_CHIP_P7)
15347 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
15348 	else
15349 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
15350 
15351 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
15352 				    NETIF_F_GSO_GRE_CSUM;
15353 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
15354 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
15355 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
15356 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
15357 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
15358 	if (BNXT_SUPPORTS_TPA(bp))
15359 		dev->hw_features |= NETIF_F_GRO_HW;
15360 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
15361 	if (dev->features & NETIF_F_GRO_HW)
15362 		dev->features &= ~NETIF_F_LRO;
15363 	dev->priv_flags |= IFF_UNICAST_FLT;
15364 
15365 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
15366 	if (bp->tso_max_segs)
15367 		netif_set_tso_max_segs(dev, bp->tso_max_segs);
15368 
15369 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
15370 			    NETDEV_XDP_ACT_RX_SG;
15371 
15372 #ifdef CONFIG_BNXT_SRIOV
15373 	init_waitqueue_head(&bp->sriov_cfg_wait);
15374 #endif
15375 	if (BNXT_SUPPORTS_TPA(bp)) {
15376 		bp->gro_func = bnxt_gro_func_5730x;
15377 		if (BNXT_CHIP_P4(bp))
15378 			bp->gro_func = bnxt_gro_func_5731x;
15379 		else if (BNXT_CHIP_P5_PLUS(bp))
15380 			bp->gro_func = bnxt_gro_func_5750x;
15381 	}
15382 	if (!BNXT_CHIP_P4_PLUS(bp))
15383 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
15384 
15385 	rc = bnxt_init_mac_addr(bp);
15386 	if (rc) {
15387 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
15388 		rc = -EADDRNOTAVAIL;
15389 		goto init_err_pci_clean;
15390 	}
15391 
15392 	if (BNXT_PF(bp)) {
15393 		/* Read the adapter's DSN to use as the eswitch switch_id */
15394 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15395 	}
15396 
15397 	/* MTU range: 60 - FW defined max */
15398 	dev->min_mtu = ETH_ZLEN;
15399 	dev->max_mtu = bp->max_mtu;
15400 
15401 	rc = bnxt_probe_phy(bp, true);
15402 	if (rc)
15403 		goto init_err_pci_clean;
15404 
15405 	hw_resc = &bp->hw_resc;
15406 	bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
15407 		       BNXT_L2_FLTR_MAX_FLTR;
15408 	/* Older firmware may not report these filters properly */
15409 	if (bp->max_fltr < BNXT_MAX_FLTR)
15410 		bp->max_fltr = BNXT_MAX_FLTR;
15411 	bnxt_init_l2_fltr_tbl(bp);
15412 	bnxt_set_rx_skb_mode(bp, false);
15413 	bnxt_set_tpa_flags(bp);
15414 	bnxt_set_ring_params(bp);
15415 	bnxt_rdma_aux_device_init(bp);
15416 	rc = bnxt_set_dflt_rings(bp, true);
15417 	if (rc) {
15418 		if (BNXT_VF(bp) && rc == -ENODEV) {
15419 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15420 		} else {
15421 			netdev_err(bp->dev, "Not enough rings available.\n");
15422 			rc = -ENOMEM;
15423 		}
15424 		goto init_err_pci_clean;
15425 	}
15426 
15427 	bnxt_fw_init_one_p3(bp);
15428 
15429 	bnxt_init_dflt_coal(bp);
15430 
15431 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
15432 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
15433 
15434 	rc = bnxt_init_int_mode(bp);
15435 	if (rc)
15436 		goto init_err_pci_clean;
15437 
15438 	/* No TC has been set yet and rings may have been trimmed due to
15439 	 * limited MSIX, so we re-initialize the TX rings per TC.
15440 	 */
15441 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15442 
15443 	if (BNXT_PF(bp)) {
15444 		if (!bnxt_pf_wq) {
15445 			bnxt_pf_wq =
15446 				create_singlethread_workqueue("bnxt_pf_wq");
15447 			if (!bnxt_pf_wq) {
15448 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
15449 				rc = -ENOMEM;
15450 				goto init_err_pci_clean;
15451 			}
15452 		}
15453 		rc = bnxt_init_tc(bp);
15454 		if (rc)
15455 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
15456 				   rc);
15457 	}
15458 
15459 	bnxt_inv_fw_health_reg(bp);
15460 	rc = bnxt_dl_register(bp);
15461 	if (rc)
15462 		goto init_err_dl;
15463 
15464 	INIT_LIST_HEAD(&bp->usr_fltr_list);
15465 
15466 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
15467 		bnxt_init_multi_rss_ctx(bp);
15468 
15469 
15470 	rc = register_netdev(dev);
15471 	if (rc)
15472 		goto init_err_cleanup;
15473 
15474 	bnxt_dl_fw_reporters_create(bp);
15475 
15476 	bnxt_rdma_aux_device_add(bp);
15477 
15478 	bnxt_print_device_info(bp);
15479 
15480 	pci_save_state(pdev);
15481 
15482 	return 0;
15483 init_err_cleanup:
15484 	bnxt_rdma_aux_device_uninit(bp);
15485 	bnxt_dl_unregister(bp);
15486 init_err_dl:
15487 	bnxt_shutdown_tc(bp);
15488 	bnxt_clear_int_mode(bp);
15489 
15490 init_err_pci_clean:
15491 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
15492 		bnxt_clear_rss_ctxs(bp, true);
15493 	bnxt_hwrm_func_drv_unrgtr(bp);
15494 	bnxt_free_hwrm_resources(bp);
15495 	bnxt_hwmon_uninit(bp);
15496 	bnxt_ethtool_free(bp);
15497 	bnxt_ptp_clear(bp);
15498 	kfree(bp->ptp_cfg);
15499 	bp->ptp_cfg = NULL;
15500 	kfree(bp->fw_health);
15501 	bp->fw_health = NULL;
15502 	bnxt_cleanup_pci(bp);
15503 	bnxt_free_ctx_mem(bp);
15504 	kfree(bp->rss_indir_tbl);
15505 	bp->rss_indir_tbl = NULL;
15506 
15507 init_err_free:
15508 	free_netdev(dev);
15509 	return rc;
15510 }
15511 
15512 static void bnxt_shutdown(struct pci_dev *pdev)
15513 {
15514 	struct net_device *dev = pci_get_drvdata(pdev);
15515 	struct bnxt *bp;
15516 
15517 	if (!dev)
15518 		return;
15519 
15520 	rtnl_lock();
15521 	bp = netdev_priv(dev);
15522 	if (!bp)
15523 		goto shutdown_exit;
15524 
15525 	if (netif_running(dev))
15526 		dev_close(dev);
15527 
15528 	bnxt_clear_int_mode(bp);
15529 	pci_disable_device(pdev);
15530 
15531 	if (system_state == SYSTEM_POWER_OFF) {
15532 		pci_wake_from_d3(pdev, bp->wol);
15533 		pci_set_power_state(pdev, PCI_D3hot);
15534 	}
15535 
15536 shutdown_exit:
15537 	rtnl_unlock();
15538 }
15539 
15540 #ifdef CONFIG_PM_SLEEP
15541 static int bnxt_suspend(struct device *device)
15542 {
15543 	struct net_device *dev = dev_get_drvdata(device);
15544 	struct bnxt *bp = netdev_priv(dev);
15545 	int rc = 0;
15546 
15547 	bnxt_ulp_stop(bp);
15548 
15549 	rtnl_lock();
15550 	if (netif_running(dev)) {
15551 		netif_device_detach(dev);
15552 		rc = bnxt_close(dev);
15553 	}
15554 	bnxt_hwrm_func_drv_unrgtr(bp);
15555 	pci_disable_device(bp->pdev);
15556 	bnxt_free_ctx_mem(bp);
15557 	rtnl_unlock();
15558 	return rc;
15559 }
15560 
15561 static int bnxt_resume(struct device *device)
15562 {
15563 	struct net_device *dev = dev_get_drvdata(device);
15564 	struct bnxt *bp = netdev_priv(dev);
15565 	int rc = 0;
15566 
15567 	rtnl_lock();
15568 	rc = pci_enable_device(bp->pdev);
15569 	if (rc) {
15570 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
15571 			   rc);
15572 		goto resume_exit;
15573 	}
15574 	pci_set_master(bp->pdev);
15575 	if (bnxt_hwrm_ver_get(bp)) {
15576 		rc = -ENODEV;
15577 		goto resume_exit;
15578 	}
15579 	rc = bnxt_hwrm_func_reset(bp);
15580 	if (rc) {
15581 		rc = -EBUSY;
15582 		goto resume_exit;
15583 	}
15584 
15585 	rc = bnxt_hwrm_func_qcaps(bp);
15586 	if (rc)
15587 		goto resume_exit;
15588 
15589 	bnxt_clear_reservations(bp, true);
15590 
15591 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
15592 		rc = -ENODEV;
15593 		goto resume_exit;
15594 	}
15595 
15596 	bnxt_get_wol_settings(bp);
15597 	if (netif_running(dev)) {
15598 		rc = bnxt_open(dev);
15599 		if (!rc)
15600 			netif_device_attach(dev);
15601 	}
15602 
15603 resume_exit:
15604 	rtnl_unlock();
15605 	bnxt_ulp_start(bp, rc);
15606 	if (!rc)
15607 		bnxt_reenable_sriov(bp);
15608 	return rc;
15609 }
15610 
15611 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
15612 #define BNXT_PM_OPS (&bnxt_pm_ops)
15613 
15614 #else
15615 
15616 #define BNXT_PM_OPS NULL
15617 
15618 #endif /* CONFIG_PM_SLEEP */
15619 
15620 /**
15621  * bnxt_io_error_detected - called when PCI error is detected
15622  * @pdev: Pointer to PCI device
15623  * @state: The current pci connection state
15624  *
15625  * This function is called after a PCI bus error affecting
15626  * this device has been detected.
15627  */
15628 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
15629 					       pci_channel_state_t state)
15630 {
15631 	struct net_device *netdev = pci_get_drvdata(pdev);
15632 	struct bnxt *bp = netdev_priv(netdev);
15633 	bool abort = false;
15634 
15635 	netdev_info(netdev, "PCI I/O error detected\n");
15636 
15637 	bnxt_ulp_stop(bp);
15638 
15639 	rtnl_lock();
15640 	netif_device_detach(netdev);
15641 
15642 	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15643 		netdev_err(bp->dev, "Firmware reset already in progress\n");
15644 		abort = true;
15645 	}
15646 
15647 	if (abort || state == pci_channel_io_perm_failure) {
15648 		rtnl_unlock();
15649 		return PCI_ERS_RESULT_DISCONNECT;
15650 	}
15651 
15652 	/* Link is not reliable anymore if state is pci_channel_io_frozen
15653 	 * so we disable bus master to prevent any potential bad DMAs before
15654 	 * freeing kernel memory.
15655 	 */
15656 	if (state == pci_channel_io_frozen) {
15657 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
15658 		bnxt_fw_fatal_close(bp);
15659 	}
15660 
15661 	if (netif_running(netdev))
15662 		__bnxt_close_nic(bp, true, true);
15663 
15664 	if (pci_is_enabled(pdev))
15665 		pci_disable_device(pdev);
15666 	bnxt_free_ctx_mem(bp);
15667 	rtnl_unlock();
15668 
15669 	/* Request a slot slot reset. */
15670 	return PCI_ERS_RESULT_NEED_RESET;
15671 }
15672 
15673 /**
15674  * bnxt_io_slot_reset - called after the pci bus has been reset.
15675  * @pdev: Pointer to PCI device
15676  *
15677  * Restart the card from scratch, as if from a cold-boot.
15678  * At this point, the card has exprienced a hard reset,
15679  * followed by fixups by BIOS, and has its config space
15680  * set up identically to what it was at cold boot.
15681  */
15682 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
15683 {
15684 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
15685 	struct net_device *netdev = pci_get_drvdata(pdev);
15686 	struct bnxt *bp = netdev_priv(netdev);
15687 	int retry = 0;
15688 	int err = 0;
15689 	int off;
15690 
15691 	netdev_info(bp->dev, "PCI Slot Reset\n");
15692 
15693 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15694 	    test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
15695 		msleep(900);
15696 
15697 	rtnl_lock();
15698 
15699 	if (pci_enable_device(pdev)) {
15700 		dev_err(&pdev->dev,
15701 			"Cannot re-enable PCI device after reset.\n");
15702 	} else {
15703 		pci_set_master(pdev);
15704 		/* Upon fatal error, our device internal logic that latches to
15705 		 * BAR value is getting reset and will restore only upon
15706 		 * rewritting the BARs.
15707 		 *
15708 		 * As pci_restore_state() does not re-write the BARs if the
15709 		 * value is same as saved value earlier, driver needs to
15710 		 * write the BARs to 0 to force restore, in case of fatal error.
15711 		 */
15712 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
15713 				       &bp->state)) {
15714 			for (off = PCI_BASE_ADDRESS_0;
15715 			     off <= PCI_BASE_ADDRESS_5; off += 4)
15716 				pci_write_config_dword(bp->pdev, off, 0);
15717 		}
15718 		pci_restore_state(pdev);
15719 		pci_save_state(pdev);
15720 
15721 		bnxt_inv_fw_health_reg(bp);
15722 		bnxt_try_map_fw_health_reg(bp);
15723 
15724 		/* In some PCIe AER scenarios, firmware may take up to
15725 		 * 10 seconds to become ready in the worst case.
15726 		 */
15727 		do {
15728 			err = bnxt_try_recover_fw(bp);
15729 			if (!err)
15730 				break;
15731 			retry++;
15732 		} while (retry < BNXT_FW_SLOT_RESET_RETRY);
15733 
15734 		if (err) {
15735 			dev_err(&pdev->dev, "Firmware not ready\n");
15736 			goto reset_exit;
15737 		}
15738 
15739 		err = bnxt_hwrm_func_reset(bp);
15740 		if (!err)
15741 			result = PCI_ERS_RESULT_RECOVERED;
15742 
15743 		bnxt_ulp_irq_stop(bp);
15744 		bnxt_clear_int_mode(bp);
15745 		err = bnxt_init_int_mode(bp);
15746 		bnxt_ulp_irq_restart(bp, err);
15747 	}
15748 
15749 reset_exit:
15750 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15751 	bnxt_clear_reservations(bp, true);
15752 	rtnl_unlock();
15753 
15754 	return result;
15755 }
15756 
15757 /**
15758  * bnxt_io_resume - called when traffic can start flowing again.
15759  * @pdev: Pointer to PCI device
15760  *
15761  * This callback is called when the error recovery driver tells
15762  * us that its OK to resume normal operation.
15763  */
15764 static void bnxt_io_resume(struct pci_dev *pdev)
15765 {
15766 	struct net_device *netdev = pci_get_drvdata(pdev);
15767 	struct bnxt *bp = netdev_priv(netdev);
15768 	int err;
15769 
15770 	netdev_info(bp->dev, "PCI Slot Resume\n");
15771 	rtnl_lock();
15772 
15773 	err = bnxt_hwrm_func_qcaps(bp);
15774 	if (!err && netif_running(netdev))
15775 		err = bnxt_open(netdev);
15776 
15777 	if (!err)
15778 		netif_device_attach(netdev);
15779 
15780 	rtnl_unlock();
15781 	bnxt_ulp_start(bp, err);
15782 	if (!err)
15783 		bnxt_reenable_sriov(bp);
15784 }
15785 
15786 static const struct pci_error_handlers bnxt_err_handler = {
15787 	.error_detected	= bnxt_io_error_detected,
15788 	.slot_reset	= bnxt_io_slot_reset,
15789 	.resume		= bnxt_io_resume
15790 };
15791 
15792 static struct pci_driver bnxt_pci_driver = {
15793 	.name		= DRV_MODULE_NAME,
15794 	.id_table	= bnxt_pci_tbl,
15795 	.probe		= bnxt_init_one,
15796 	.remove		= bnxt_remove_one,
15797 	.shutdown	= bnxt_shutdown,
15798 	.driver.pm	= BNXT_PM_OPS,
15799 	.err_handler	= &bnxt_err_handler,
15800 #if defined(CONFIG_BNXT_SRIOV)
15801 	.sriov_configure = bnxt_sriov_configure,
15802 #endif
15803 };
15804 
15805 static int __init bnxt_init(void)
15806 {
15807 	int err;
15808 
15809 	bnxt_debug_init();
15810 	err = pci_register_driver(&bnxt_pci_driver);
15811 	if (err) {
15812 		bnxt_debug_exit();
15813 		return err;
15814 	}
15815 
15816 	return 0;
15817 }
15818 
15819 static void __exit bnxt_exit(void)
15820 {
15821 	pci_unregister_driver(&bnxt_pci_driver);
15822 	if (bnxt_pf_wq)
15823 		destroy_workqueue(bnxt_pf_wq);
15824 	bnxt_debug_exit();
15825 }
15826 
15827 module_init(bnxt_init);
15828 module_exit(bnxt_exit);
15829