xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c (revision e958da0ddbe831197a0023251880a4a09d5ba268)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58 
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_hwmon.h"
73 
74 #define BNXT_TX_TIMEOUT		(5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
76 				 NETIF_MSG_TX_ERR)
77 
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
80 
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84 
85 #define BNXT_TX_PUSH_THRESH 164
86 
87 /* indexed by enum board_idx */
88 static const struct {
89 	char *name;
90 } board_info[] = {
91 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 	[BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 	[BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 	[BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 	[BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
140 	[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
141 };
142 
143 static const struct pci_device_id bnxt_pci_tbl[] = {
144 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
145 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
146 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
147 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
148 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
149 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
150 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
151 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
152 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
153 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
154 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
155 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
156 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
157 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
158 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
159 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
160 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
161 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
162 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
163 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
164 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
165 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
166 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
167 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
168 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
169 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
171 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
172 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
173 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
174 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
175 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
176 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
177 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
178 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
179 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
180 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
181 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
182 	{ PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
183 	{ PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
184 	{ PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
185 	{ PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
186 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
187 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
188 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
189 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
190 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
191 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
193 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
194 #ifdef CONFIG_BNXT_SRIOV
195 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
196 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
197 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
198 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
199 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
200 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
201 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
202 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
203 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
204 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
205 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
211 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
214 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
215 	{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
216 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 	{ 0 }
219 };
220 
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222 
223 static const u16 bnxt_vf_req_snif[] = {
224 	HWRM_FUNC_CFG,
225 	HWRM_FUNC_VF_CFG,
226 	HWRM_PORT_PHY_QCFG,
227 	HWRM_CFA_L2_FILTER_ALLOC,
228 };
229 
230 static const u16 bnxt_async_events_arr[] = {
231 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
233 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
234 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
235 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
236 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
237 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
238 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
239 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
240 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
241 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
242 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
243 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
244 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
245 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
246 	ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
247 };
248 
249 static struct workqueue_struct *bnxt_pf_wq;
250 
251 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
252 			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
253 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
254 
255 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
256 	.ports = {
257 		.src = 0,
258 		.dst = 0,
259 	},
260 	.addrs = {
261 		.v6addrs = {
262 			.src = BNXT_IPV6_MASK_NONE,
263 			.dst = BNXT_IPV6_MASK_NONE,
264 		},
265 	},
266 };
267 
268 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
269 	.ports = {
270 		.src = cpu_to_be16(0xffff),
271 		.dst = cpu_to_be16(0xffff),
272 	},
273 	.addrs = {
274 		.v6addrs = {
275 			.src = BNXT_IPV6_MASK_ALL,
276 			.dst = BNXT_IPV6_MASK_ALL,
277 		},
278 	},
279 };
280 
281 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
282 	.ports = {
283 		.src = cpu_to_be16(0xffff),
284 		.dst = cpu_to_be16(0xffff),
285 	},
286 	.addrs = {
287 		.v4addrs = {
288 			.src = cpu_to_be32(0xffffffff),
289 			.dst = cpu_to_be32(0xffffffff),
290 		},
291 	},
292 };
293 
294 static bool bnxt_vf_pciid(enum board_idx idx)
295 {
296 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
297 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
298 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
299 		idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
300 }
301 
302 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
303 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
304 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
305 
306 #define BNXT_CP_DB_IRQ_DIS(db)						\
307 		writel(DB_CP_IRQ_DIS_FLAGS, db)
308 
309 #define BNXT_DB_CQ(db, idx)						\
310 	writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
311 
312 #define BNXT_DB_NQ_P5(db, idx)						\
313 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
314 		    (db)->doorbell)
315 
316 #define BNXT_DB_NQ_P7(db, idx)						\
317 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK |		\
318 		    DB_RING_IDX(db, idx), (db)->doorbell)
319 
320 #define BNXT_DB_CQ_ARM(db, idx)						\
321 	writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
322 
323 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
324 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM |		\
325 		    DB_RING_IDX(db, idx), (db)->doorbell)
326 
327 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328 {
329 	if (bp->flags & BNXT_FLAG_CHIP_P7)
330 		BNXT_DB_NQ_P7(db, idx);
331 	else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
332 		BNXT_DB_NQ_P5(db, idx);
333 	else
334 		BNXT_DB_CQ(db, idx);
335 }
336 
337 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
338 {
339 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
340 		BNXT_DB_NQ_ARM_P5(db, idx);
341 	else
342 		BNXT_DB_CQ_ARM(db, idx);
343 }
344 
345 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
346 {
347 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
348 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
349 			    DB_RING_IDX(db, idx), db->doorbell);
350 	else
351 		BNXT_DB_CQ(db, idx);
352 }
353 
354 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
355 {
356 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
357 		return;
358 
359 	if (BNXT_PF(bp))
360 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
361 	else
362 		schedule_delayed_work(&bp->fw_reset_task, delay);
363 }
364 
365 static void __bnxt_queue_sp_work(struct bnxt *bp)
366 {
367 	if (BNXT_PF(bp))
368 		queue_work(bnxt_pf_wq, &bp->sp_task);
369 	else
370 		schedule_work(&bp->sp_task);
371 }
372 
373 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
374 {
375 	set_bit(event, &bp->sp_event);
376 	__bnxt_queue_sp_work(bp);
377 }
378 
379 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
380 {
381 	if (!rxr->bnapi->in_reset) {
382 		rxr->bnapi->in_reset = true;
383 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
384 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
385 		else
386 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
387 		__bnxt_queue_sp_work(bp);
388 	}
389 	rxr->rx_next_cons = 0xffff;
390 }
391 
392 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
393 			  u16 curr)
394 {
395 	struct bnxt_napi *bnapi = txr->bnapi;
396 
397 	if (bnapi->tx_fault)
398 		return;
399 
400 	netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
401 		   txr->txq_index, txr->tx_hw_cons,
402 		   txr->tx_cons, txr->tx_prod, curr);
403 	WARN_ON_ONCE(1);
404 	bnapi->tx_fault = 1;
405 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
406 }
407 
408 const u16 bnxt_lhint_arr[] = {
409 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
410 	TX_BD_FLAGS_LHINT_512_TO_1023,
411 	TX_BD_FLAGS_LHINT_1024_TO_2047,
412 	TX_BD_FLAGS_LHINT_1024_TO_2047,
413 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
414 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
415 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
416 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
417 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
418 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
419 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
420 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
421 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
422 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
423 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
424 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
426 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
427 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
428 };
429 
430 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
431 {
432 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
433 
434 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
435 		return 0;
436 
437 	return md_dst->u.port_info.port_id;
438 }
439 
440 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
441 			     u16 prod)
442 {
443 	/* Sync BD data before updating doorbell */
444 	wmb();
445 	bnxt_db_write(bp, &txr->tx_db, prod);
446 	txr->kick_pending = 0;
447 }
448 
449 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
450 {
451 	struct bnxt *bp = netdev_priv(dev);
452 	struct tx_bd *txbd, *txbd0;
453 	struct tx_bd_ext *txbd1;
454 	struct netdev_queue *txq;
455 	int i;
456 	dma_addr_t mapping;
457 	unsigned int length, pad = 0;
458 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
459 	u16 prod, last_frag;
460 	struct pci_dev *pdev = bp->pdev;
461 	struct bnxt_tx_ring_info *txr;
462 	struct bnxt_sw_tx_bd *tx_buf;
463 	__le32 lflags = 0;
464 
465 	i = skb_get_queue_mapping(skb);
466 	if (unlikely(i >= bp->tx_nr_rings)) {
467 		dev_kfree_skb_any(skb);
468 		dev_core_stats_tx_dropped_inc(dev);
469 		return NETDEV_TX_OK;
470 	}
471 
472 	txq = netdev_get_tx_queue(dev, i);
473 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
474 	prod = txr->tx_prod;
475 
476 	free_size = bnxt_tx_avail(bp, txr);
477 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
478 		/* We must have raced with NAPI cleanup */
479 		if (net_ratelimit() && txr->kick_pending)
480 			netif_warn(bp, tx_err, dev,
481 				   "bnxt: ring busy w/ flush pending!\n");
482 		if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
483 					bp->tx_wake_thresh))
484 			return NETDEV_TX_BUSY;
485 	}
486 
487 	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
488 		goto tx_free;
489 
490 	length = skb->len;
491 	len = skb_headlen(skb);
492 	last_frag = skb_shinfo(skb)->nr_frags;
493 
494 	txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
495 
496 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
497 	tx_buf->skb = skb;
498 	tx_buf->nr_frags = last_frag;
499 
500 	vlan_tag_flags = 0;
501 	cfa_action = bnxt_xmit_get_cfa_action(skb);
502 	if (skb_vlan_tag_present(skb)) {
503 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
504 				 skb_vlan_tag_get(skb);
505 		/* Currently supports 8021Q, 8021AD vlan offloads
506 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
507 		 */
508 		if (skb->vlan_proto == htons(ETH_P_8021Q))
509 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
510 	}
511 
512 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
513 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
514 
515 		if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
516 		    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
517 			if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
518 					    &ptp->tx_hdr_off)) {
519 				if (vlan_tag_flags)
520 					ptp->tx_hdr_off += VLAN_HLEN;
521 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
522 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
523 			} else {
524 				atomic_inc(&bp->ptp_cfg->tx_avail);
525 			}
526 		}
527 	}
528 
529 	if (unlikely(skb->no_fcs))
530 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
531 
532 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
533 	    !lflags) {
534 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
535 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
536 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
537 		void __iomem *db = txr->tx_db.doorbell;
538 		void *pdata = tx_push_buf->data;
539 		u64 *end;
540 		int j, push_len;
541 
542 		/* Set COAL_NOW to be ready quickly for the next push */
543 		tx_push->tx_bd_len_flags_type =
544 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
545 					TX_BD_TYPE_LONG_TX_BD |
546 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
547 					TX_BD_FLAGS_COAL_NOW |
548 					TX_BD_FLAGS_PACKET_END |
549 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
550 
551 		if (skb->ip_summed == CHECKSUM_PARTIAL)
552 			tx_push1->tx_bd_hsize_lflags =
553 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
554 		else
555 			tx_push1->tx_bd_hsize_lflags = 0;
556 
557 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
558 		tx_push1->tx_bd_cfa_action =
559 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
560 
561 		end = pdata + length;
562 		end = PTR_ALIGN(end, 8) - 1;
563 		*end = 0;
564 
565 		skb_copy_from_linear_data(skb, pdata, len);
566 		pdata += len;
567 		for (j = 0; j < last_frag; j++) {
568 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
569 			void *fptr;
570 
571 			fptr = skb_frag_address_safe(frag);
572 			if (!fptr)
573 				goto normal_tx;
574 
575 			memcpy(pdata, fptr, skb_frag_size(frag));
576 			pdata += skb_frag_size(frag);
577 		}
578 
579 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
580 		txbd->tx_bd_haddr = txr->data_mapping;
581 		txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
582 		prod = NEXT_TX(prod);
583 		tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
584 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
585 		memcpy(txbd, tx_push1, sizeof(*txbd));
586 		prod = NEXT_TX(prod);
587 		tx_push->doorbell =
588 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
589 				    DB_RING_IDX(&txr->tx_db, prod));
590 		WRITE_ONCE(txr->tx_prod, prod);
591 
592 		tx_buf->is_push = 1;
593 		netdev_tx_sent_queue(txq, skb->len);
594 		wmb();	/* Sync is_push and byte queue before pushing data */
595 
596 		push_len = (length + sizeof(*tx_push) + 7) / 8;
597 		if (push_len > 16) {
598 			__iowrite64_copy(db, tx_push_buf, 16);
599 			__iowrite32_copy(db + 4, tx_push_buf + 1,
600 					 (push_len - 16) << 1);
601 		} else {
602 			__iowrite64_copy(db, tx_push_buf, push_len);
603 		}
604 
605 		goto tx_done;
606 	}
607 
608 normal_tx:
609 	if (length < BNXT_MIN_PKT_SIZE) {
610 		pad = BNXT_MIN_PKT_SIZE - length;
611 		if (skb_pad(skb, pad))
612 			/* SKB already freed. */
613 			goto tx_kick_pending;
614 		length = BNXT_MIN_PKT_SIZE;
615 	}
616 
617 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
618 
619 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
620 		goto tx_free;
621 
622 	dma_unmap_addr_set(tx_buf, mapping, mapping);
623 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
624 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
625 
626 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
627 	txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
628 
629 	prod = NEXT_TX(prod);
630 	txbd1 = (struct tx_bd_ext *)
631 		&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
632 
633 	txbd1->tx_bd_hsize_lflags = lflags;
634 	if (skb_is_gso(skb)) {
635 		bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
636 		u32 hdr_len;
637 
638 		if (skb->encapsulation) {
639 			if (udp_gso)
640 				hdr_len = skb_inner_transport_offset(skb) +
641 					  sizeof(struct udphdr);
642 			else
643 				hdr_len = skb_inner_tcp_all_headers(skb);
644 		} else if (udp_gso) {
645 			hdr_len = skb_transport_offset(skb) +
646 				  sizeof(struct udphdr);
647 		} else {
648 			hdr_len = skb_tcp_all_headers(skb);
649 		}
650 
651 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
652 					TX_BD_FLAGS_T_IPID |
653 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
654 		length = skb_shinfo(skb)->gso_size;
655 		txbd1->tx_bd_mss = cpu_to_le32(length);
656 		length += hdr_len;
657 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
658 		txbd1->tx_bd_hsize_lflags |=
659 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
660 		txbd1->tx_bd_mss = 0;
661 	}
662 
663 	length >>= 9;
664 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
665 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
666 				     skb->len);
667 		i = 0;
668 		goto tx_dma_error;
669 	}
670 	flags |= bnxt_lhint_arr[length];
671 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
672 
673 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
674 	txbd1->tx_bd_cfa_action =
675 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
676 	txbd0 = txbd;
677 	for (i = 0; i < last_frag; i++) {
678 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
679 
680 		prod = NEXT_TX(prod);
681 		txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
682 
683 		len = skb_frag_size(frag);
684 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
685 					   DMA_TO_DEVICE);
686 
687 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
688 			goto tx_dma_error;
689 
690 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
691 		dma_unmap_addr_set(tx_buf, mapping, mapping);
692 
693 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
694 
695 		flags = len << TX_BD_LEN_SHIFT;
696 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
697 	}
698 
699 	flags &= ~TX_BD_LEN;
700 	txbd->tx_bd_len_flags_type =
701 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
702 			    TX_BD_FLAGS_PACKET_END);
703 
704 	netdev_tx_sent_queue(txq, skb->len);
705 
706 	skb_tx_timestamp(skb);
707 
708 	prod = NEXT_TX(prod);
709 	WRITE_ONCE(txr->tx_prod, prod);
710 
711 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
712 		bnxt_txr_db_kick(bp, txr, prod);
713 	} else {
714 		if (free_size >= bp->tx_wake_thresh)
715 			txbd0->tx_bd_len_flags_type |=
716 				cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
717 		txr->kick_pending = 1;
718 	}
719 
720 tx_done:
721 
722 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
723 		if (netdev_xmit_more() && !tx_buf->is_push) {
724 			txbd0->tx_bd_len_flags_type &=
725 				cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
726 			bnxt_txr_db_kick(bp, txr, prod);
727 		}
728 
729 		netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
730 				   bp->tx_wake_thresh);
731 	}
732 	return NETDEV_TX_OK;
733 
734 tx_dma_error:
735 	if (BNXT_TX_PTP_IS_SET(lflags))
736 		atomic_inc(&bp->ptp_cfg->tx_avail);
737 
738 	last_frag = i;
739 
740 	/* start back at beginning and unmap skb */
741 	prod = txr->tx_prod;
742 	tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
743 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
744 			 skb_headlen(skb), DMA_TO_DEVICE);
745 	prod = NEXT_TX(prod);
746 
747 	/* unmap remaining mapped pages */
748 	for (i = 0; i < last_frag; i++) {
749 		prod = NEXT_TX(prod);
750 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
751 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
752 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
753 			       DMA_TO_DEVICE);
754 	}
755 
756 tx_free:
757 	dev_kfree_skb_any(skb);
758 tx_kick_pending:
759 	if (txr->kick_pending)
760 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
761 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
762 	dev_core_stats_tx_dropped_inc(dev);
763 	return NETDEV_TX_OK;
764 }
765 
766 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
767 			  int budget)
768 {
769 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
770 	struct pci_dev *pdev = bp->pdev;
771 	u16 hw_cons = txr->tx_hw_cons;
772 	unsigned int tx_bytes = 0;
773 	u16 cons = txr->tx_cons;
774 	int tx_pkts = 0;
775 
776 	while (RING_TX(bp, cons) != hw_cons) {
777 		struct bnxt_sw_tx_bd *tx_buf;
778 		struct sk_buff *skb;
779 		int j, last;
780 
781 		tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
782 		cons = NEXT_TX(cons);
783 		skb = tx_buf->skb;
784 		tx_buf->skb = NULL;
785 
786 		if (unlikely(!skb)) {
787 			bnxt_sched_reset_txr(bp, txr, cons);
788 			return;
789 		}
790 
791 		tx_pkts++;
792 		tx_bytes += skb->len;
793 
794 		if (tx_buf->is_push) {
795 			tx_buf->is_push = 0;
796 			goto next_tx_int;
797 		}
798 
799 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
800 				 skb_headlen(skb), DMA_TO_DEVICE);
801 		last = tx_buf->nr_frags;
802 
803 		for (j = 0; j < last; j++) {
804 			cons = NEXT_TX(cons);
805 			tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
806 			dma_unmap_page(
807 				&pdev->dev,
808 				dma_unmap_addr(tx_buf, mapping),
809 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
810 				DMA_TO_DEVICE);
811 		}
812 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
813 			if (BNXT_CHIP_P5(bp)) {
814 				/* PTP worker takes ownership of the skb */
815 				if (!bnxt_get_tx_ts_p5(bp, skb))
816 					skb = NULL;
817 				else
818 					atomic_inc(&bp->ptp_cfg->tx_avail);
819 			}
820 		}
821 
822 next_tx_int:
823 		cons = NEXT_TX(cons);
824 
825 		dev_consume_skb_any(skb);
826 	}
827 
828 	WRITE_ONCE(txr->tx_cons, cons);
829 
830 	__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
831 				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
832 				   READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
833 }
834 
835 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
836 {
837 	struct bnxt_tx_ring_info *txr;
838 	int i;
839 
840 	bnxt_for_each_napi_tx(i, bnapi, txr) {
841 		if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
842 			__bnxt_tx_int(bp, txr, budget);
843 	}
844 	bnapi->events &= ~BNXT_TX_CMP_EVENT;
845 }
846 
847 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
848 					 struct bnxt_rx_ring_info *rxr,
849 					 unsigned int *offset,
850 					 gfp_t gfp)
851 {
852 	struct page *page;
853 
854 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
855 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
856 						BNXT_RX_PAGE_SIZE);
857 	} else {
858 		page = page_pool_dev_alloc_pages(rxr->page_pool);
859 		*offset = 0;
860 	}
861 	if (!page)
862 		return NULL;
863 
864 	*mapping = page_pool_get_dma_addr(page) + *offset;
865 	return page;
866 }
867 
868 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
869 				       gfp_t gfp)
870 {
871 	u8 *data;
872 	struct pci_dev *pdev = bp->pdev;
873 
874 	if (gfp == GFP_ATOMIC)
875 		data = napi_alloc_frag(bp->rx_buf_size);
876 	else
877 		data = netdev_alloc_frag(bp->rx_buf_size);
878 	if (!data)
879 		return NULL;
880 
881 	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
882 					bp->rx_buf_use_size, bp->rx_dir,
883 					DMA_ATTR_WEAK_ORDERING);
884 
885 	if (dma_mapping_error(&pdev->dev, *mapping)) {
886 		skb_free_frag(data);
887 		data = NULL;
888 	}
889 	return data;
890 }
891 
892 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
893 		       u16 prod, gfp_t gfp)
894 {
895 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
896 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
897 	dma_addr_t mapping;
898 
899 	if (BNXT_RX_PAGE_MODE(bp)) {
900 		unsigned int offset;
901 		struct page *page =
902 			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
903 
904 		if (!page)
905 			return -ENOMEM;
906 
907 		mapping += bp->rx_dma_offset;
908 		rx_buf->data = page;
909 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
910 	} else {
911 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
912 
913 		if (!data)
914 			return -ENOMEM;
915 
916 		rx_buf->data = data;
917 		rx_buf->data_ptr = data + bp->rx_offset;
918 	}
919 	rx_buf->mapping = mapping;
920 
921 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
922 	return 0;
923 }
924 
925 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
926 {
927 	u16 prod = rxr->rx_prod;
928 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
929 	struct bnxt *bp = rxr->bnapi->bp;
930 	struct rx_bd *cons_bd, *prod_bd;
931 
932 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
933 	cons_rx_buf = &rxr->rx_buf_ring[cons];
934 
935 	prod_rx_buf->data = data;
936 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
937 
938 	prod_rx_buf->mapping = cons_rx_buf->mapping;
939 
940 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
941 	cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
942 
943 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
944 }
945 
946 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
947 {
948 	u16 next, max = rxr->rx_agg_bmap_size;
949 
950 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
951 	if (next >= max)
952 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
953 	return next;
954 }
955 
956 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
957 				     struct bnxt_rx_ring_info *rxr,
958 				     u16 prod, gfp_t gfp)
959 {
960 	struct rx_bd *rxbd =
961 		&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
962 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
963 	struct page *page;
964 	dma_addr_t mapping;
965 	u16 sw_prod = rxr->rx_sw_agg_prod;
966 	unsigned int offset = 0;
967 
968 	page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
969 
970 	if (!page)
971 		return -ENOMEM;
972 
973 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
974 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
975 
976 	__set_bit(sw_prod, rxr->rx_agg_bmap);
977 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
978 	rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
979 
980 	rx_agg_buf->page = page;
981 	rx_agg_buf->offset = offset;
982 	rx_agg_buf->mapping = mapping;
983 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
984 	rxbd->rx_bd_opaque = sw_prod;
985 	return 0;
986 }
987 
988 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
989 				       struct bnxt_cp_ring_info *cpr,
990 				       u16 cp_cons, u16 curr)
991 {
992 	struct rx_agg_cmp *agg;
993 
994 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
995 	agg = (struct rx_agg_cmp *)
996 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
997 	return agg;
998 }
999 
1000 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1001 					      struct bnxt_rx_ring_info *rxr,
1002 					      u16 agg_id, u16 curr)
1003 {
1004 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1005 
1006 	return &tpa_info->agg_arr[curr];
1007 }
1008 
1009 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1010 				   u16 start, u32 agg_bufs, bool tpa)
1011 {
1012 	struct bnxt_napi *bnapi = cpr->bnapi;
1013 	struct bnxt *bp = bnapi->bp;
1014 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1015 	u16 prod = rxr->rx_agg_prod;
1016 	u16 sw_prod = rxr->rx_sw_agg_prod;
1017 	bool p5_tpa = false;
1018 	u32 i;
1019 
1020 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1021 		p5_tpa = true;
1022 
1023 	for (i = 0; i < agg_bufs; i++) {
1024 		u16 cons;
1025 		struct rx_agg_cmp *agg;
1026 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1027 		struct rx_bd *prod_bd;
1028 		struct page *page;
1029 
1030 		if (p5_tpa)
1031 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1032 		else
1033 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
1034 		cons = agg->rx_agg_cmp_opaque;
1035 		__clear_bit(cons, rxr->rx_agg_bmap);
1036 
1037 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1038 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1039 
1040 		__set_bit(sw_prod, rxr->rx_agg_bmap);
1041 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1042 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1043 
1044 		/* It is possible for sw_prod to be equal to cons, so
1045 		 * set cons_rx_buf->page to NULL first.
1046 		 */
1047 		page = cons_rx_buf->page;
1048 		cons_rx_buf->page = NULL;
1049 		prod_rx_buf->page = page;
1050 		prod_rx_buf->offset = cons_rx_buf->offset;
1051 
1052 		prod_rx_buf->mapping = cons_rx_buf->mapping;
1053 
1054 		prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1055 
1056 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1057 		prod_bd->rx_bd_opaque = sw_prod;
1058 
1059 		prod = NEXT_RX_AGG(prod);
1060 		sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1061 	}
1062 	rxr->rx_agg_prod = prod;
1063 	rxr->rx_sw_agg_prod = sw_prod;
1064 }
1065 
1066 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1067 					      struct bnxt_rx_ring_info *rxr,
1068 					      u16 cons, void *data, u8 *data_ptr,
1069 					      dma_addr_t dma_addr,
1070 					      unsigned int offset_and_len)
1071 {
1072 	unsigned int len = offset_and_len & 0xffff;
1073 	struct page *page = data;
1074 	u16 prod = rxr->rx_prod;
1075 	struct sk_buff *skb;
1076 	int err;
1077 
1078 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1079 	if (unlikely(err)) {
1080 		bnxt_reuse_rx_data(rxr, cons, data);
1081 		return NULL;
1082 	}
1083 	dma_addr -= bp->rx_dma_offset;
1084 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1085 				bp->rx_dir);
1086 	skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1087 	if (!skb) {
1088 		page_pool_recycle_direct(rxr->page_pool, page);
1089 		return NULL;
1090 	}
1091 	skb_mark_for_recycle(skb);
1092 	skb_reserve(skb, bp->rx_offset);
1093 	__skb_put(skb, len);
1094 
1095 	return skb;
1096 }
1097 
1098 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1099 					struct bnxt_rx_ring_info *rxr,
1100 					u16 cons, void *data, u8 *data_ptr,
1101 					dma_addr_t dma_addr,
1102 					unsigned int offset_and_len)
1103 {
1104 	unsigned int payload = offset_and_len >> 16;
1105 	unsigned int len = offset_and_len & 0xffff;
1106 	skb_frag_t *frag;
1107 	struct page *page = data;
1108 	u16 prod = rxr->rx_prod;
1109 	struct sk_buff *skb;
1110 	int off, err;
1111 
1112 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1113 	if (unlikely(err)) {
1114 		bnxt_reuse_rx_data(rxr, cons, data);
1115 		return NULL;
1116 	}
1117 	dma_addr -= bp->rx_dma_offset;
1118 	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1119 				bp->rx_dir);
1120 
1121 	if (unlikely(!payload))
1122 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1123 
1124 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1125 	if (!skb) {
1126 		page_pool_recycle_direct(rxr->page_pool, page);
1127 		return NULL;
1128 	}
1129 
1130 	skb_mark_for_recycle(skb);
1131 	off = (void *)data_ptr - page_address(page);
1132 	skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1133 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1134 	       payload + NET_IP_ALIGN);
1135 
1136 	frag = &skb_shinfo(skb)->frags[0];
1137 	skb_frag_size_sub(frag, payload);
1138 	skb_frag_off_add(frag, payload);
1139 	skb->data_len -= payload;
1140 	skb->tail += payload;
1141 
1142 	return skb;
1143 }
1144 
1145 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1146 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1147 				   void *data, u8 *data_ptr,
1148 				   dma_addr_t dma_addr,
1149 				   unsigned int offset_and_len)
1150 {
1151 	u16 prod = rxr->rx_prod;
1152 	struct sk_buff *skb;
1153 	int err;
1154 
1155 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1156 	if (unlikely(err)) {
1157 		bnxt_reuse_rx_data(rxr, cons, data);
1158 		return NULL;
1159 	}
1160 
1161 	skb = napi_build_skb(data, bp->rx_buf_size);
1162 	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1163 			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1164 	if (!skb) {
1165 		skb_free_frag(data);
1166 		return NULL;
1167 	}
1168 
1169 	skb_reserve(skb, bp->rx_offset);
1170 	skb_put(skb, offset_and_len & 0xffff);
1171 	return skb;
1172 }
1173 
1174 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1175 			       struct bnxt_cp_ring_info *cpr,
1176 			       struct skb_shared_info *shinfo,
1177 			       u16 idx, u32 agg_bufs, bool tpa,
1178 			       struct xdp_buff *xdp)
1179 {
1180 	struct bnxt_napi *bnapi = cpr->bnapi;
1181 	struct pci_dev *pdev = bp->pdev;
1182 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1183 	u16 prod = rxr->rx_agg_prod;
1184 	u32 i, total_frag_len = 0;
1185 	bool p5_tpa = false;
1186 
1187 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1188 		p5_tpa = true;
1189 
1190 	for (i = 0; i < agg_bufs; i++) {
1191 		skb_frag_t *frag = &shinfo->frags[i];
1192 		u16 cons, frag_len;
1193 		struct rx_agg_cmp *agg;
1194 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1195 		struct page *page;
1196 		dma_addr_t mapping;
1197 
1198 		if (p5_tpa)
1199 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1200 		else
1201 			agg = bnxt_get_agg(bp, cpr, idx, i);
1202 		cons = agg->rx_agg_cmp_opaque;
1203 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1204 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1205 
1206 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1207 		skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1208 					cons_rx_buf->offset, frag_len);
1209 		shinfo->nr_frags = i + 1;
1210 		__clear_bit(cons, rxr->rx_agg_bmap);
1211 
1212 		/* It is possible for bnxt_alloc_rx_page() to allocate
1213 		 * a sw_prod index that equals the cons index, so we
1214 		 * need to clear the cons entry now.
1215 		 */
1216 		mapping = cons_rx_buf->mapping;
1217 		page = cons_rx_buf->page;
1218 		cons_rx_buf->page = NULL;
1219 
1220 		if (xdp && page_is_pfmemalloc(page))
1221 			xdp_buff_set_frag_pfmemalloc(xdp);
1222 
1223 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1224 			--shinfo->nr_frags;
1225 			cons_rx_buf->page = page;
1226 
1227 			/* Update prod since possibly some pages have been
1228 			 * allocated already.
1229 			 */
1230 			rxr->rx_agg_prod = prod;
1231 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1232 			return 0;
1233 		}
1234 
1235 		dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1236 					bp->rx_dir);
1237 
1238 		total_frag_len += frag_len;
1239 		prod = NEXT_RX_AGG(prod);
1240 	}
1241 	rxr->rx_agg_prod = prod;
1242 	return total_frag_len;
1243 }
1244 
1245 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1246 					     struct bnxt_cp_ring_info *cpr,
1247 					     struct sk_buff *skb, u16 idx,
1248 					     u32 agg_bufs, bool tpa)
1249 {
1250 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1251 	u32 total_frag_len = 0;
1252 
1253 	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1254 					     agg_bufs, tpa, NULL);
1255 	if (!total_frag_len) {
1256 		skb_mark_for_recycle(skb);
1257 		dev_kfree_skb(skb);
1258 		return NULL;
1259 	}
1260 
1261 	skb->data_len += total_frag_len;
1262 	skb->len += total_frag_len;
1263 	skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1264 	return skb;
1265 }
1266 
1267 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1268 				 struct bnxt_cp_ring_info *cpr,
1269 				 struct xdp_buff *xdp, u16 idx,
1270 				 u32 agg_bufs, bool tpa)
1271 {
1272 	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1273 	u32 total_frag_len = 0;
1274 
1275 	if (!xdp_buff_has_frags(xdp))
1276 		shinfo->nr_frags = 0;
1277 
1278 	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1279 					     idx, agg_bufs, tpa, xdp);
1280 	if (total_frag_len) {
1281 		xdp_buff_set_frags_flag(xdp);
1282 		shinfo->nr_frags = agg_bufs;
1283 		shinfo->xdp_frags_size = total_frag_len;
1284 	}
1285 	return total_frag_len;
1286 }
1287 
1288 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1289 			       u8 agg_bufs, u32 *raw_cons)
1290 {
1291 	u16 last;
1292 	struct rx_agg_cmp *agg;
1293 
1294 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1295 	last = RING_CMP(*raw_cons);
1296 	agg = (struct rx_agg_cmp *)
1297 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1298 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1299 }
1300 
1301 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1302 				      unsigned int len,
1303 				      dma_addr_t mapping)
1304 {
1305 	struct bnxt *bp = bnapi->bp;
1306 	struct pci_dev *pdev = bp->pdev;
1307 	struct sk_buff *skb;
1308 
1309 	skb = napi_alloc_skb(&bnapi->napi, len);
1310 	if (!skb)
1311 		return NULL;
1312 
1313 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1314 				bp->rx_dir);
1315 
1316 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1317 	       len + NET_IP_ALIGN);
1318 
1319 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1320 				   bp->rx_dir);
1321 
1322 	skb_put(skb, len);
1323 
1324 	return skb;
1325 }
1326 
1327 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1328 				     unsigned int len,
1329 				     dma_addr_t mapping)
1330 {
1331 	return bnxt_copy_data(bnapi, data, len, mapping);
1332 }
1333 
1334 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1335 				     struct xdp_buff *xdp,
1336 				     unsigned int len,
1337 				     dma_addr_t mapping)
1338 {
1339 	unsigned int metasize = 0;
1340 	u8 *data = xdp->data;
1341 	struct sk_buff *skb;
1342 
1343 	len = xdp->data_end - xdp->data_meta;
1344 	metasize = xdp->data - xdp->data_meta;
1345 	data = xdp->data_meta;
1346 
1347 	skb = bnxt_copy_data(bnapi, data, len, mapping);
1348 	if (!skb)
1349 		return skb;
1350 
1351 	if (metasize) {
1352 		skb_metadata_set(skb, metasize);
1353 		__skb_pull(skb, metasize);
1354 	}
1355 
1356 	return skb;
1357 }
1358 
1359 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1360 			   u32 *raw_cons, void *cmp)
1361 {
1362 	struct rx_cmp *rxcmp = cmp;
1363 	u32 tmp_raw_cons = *raw_cons;
1364 	u8 cmp_type, agg_bufs = 0;
1365 
1366 	cmp_type = RX_CMP_TYPE(rxcmp);
1367 
1368 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1369 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1370 			    RX_CMP_AGG_BUFS) >>
1371 			   RX_CMP_AGG_BUFS_SHIFT;
1372 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1373 		struct rx_tpa_end_cmp *tpa_end = cmp;
1374 
1375 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1376 			return 0;
1377 
1378 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1379 	}
1380 
1381 	if (agg_bufs) {
1382 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1383 			return -EBUSY;
1384 	}
1385 	*raw_cons = tmp_raw_cons;
1386 	return 0;
1387 }
1388 
1389 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1390 {
1391 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1392 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1393 
1394 	if (test_bit(idx, map->agg_idx_bmap))
1395 		idx = find_first_zero_bit(map->agg_idx_bmap,
1396 					  BNXT_AGG_IDX_BMAP_SIZE);
1397 	__set_bit(idx, map->agg_idx_bmap);
1398 	map->agg_id_tbl[agg_id] = idx;
1399 	return idx;
1400 }
1401 
1402 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1403 {
1404 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1405 
1406 	__clear_bit(idx, map->agg_idx_bmap);
1407 }
1408 
1409 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1410 {
1411 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1412 
1413 	return map->agg_id_tbl[agg_id];
1414 }
1415 
1416 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1417 			      struct rx_tpa_start_cmp *tpa_start,
1418 			      struct rx_tpa_start_cmp_ext *tpa_start1)
1419 {
1420 	tpa_info->cfa_code_valid = 1;
1421 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1422 	tpa_info->vlan_valid = 0;
1423 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1424 		tpa_info->vlan_valid = 1;
1425 		tpa_info->metadata =
1426 			le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1427 	}
1428 }
1429 
1430 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1431 				 struct rx_tpa_start_cmp *tpa_start,
1432 				 struct rx_tpa_start_cmp_ext *tpa_start1)
1433 {
1434 	tpa_info->vlan_valid = 0;
1435 	if (TPA_START_VLAN_VALID(tpa_start)) {
1436 		u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1437 		u32 vlan_proto = ETH_P_8021Q;
1438 
1439 		tpa_info->vlan_valid = 1;
1440 		if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1441 			vlan_proto = ETH_P_8021AD;
1442 		tpa_info->metadata = vlan_proto << 16 |
1443 				     TPA_START_METADATA0_TCI(tpa_start1);
1444 	}
1445 }
1446 
1447 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1448 			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1449 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1450 {
1451 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1452 	struct bnxt_tpa_info *tpa_info;
1453 	u16 cons, prod, agg_id;
1454 	struct rx_bd *prod_bd;
1455 	dma_addr_t mapping;
1456 
1457 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1458 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1459 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1460 	} else {
1461 		agg_id = TPA_START_AGG_ID(tpa_start);
1462 	}
1463 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1464 	prod = rxr->rx_prod;
1465 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1466 	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1467 	tpa_info = &rxr->rx_tpa[agg_id];
1468 
1469 	if (unlikely(cons != rxr->rx_next_cons ||
1470 		     TPA_START_ERROR(tpa_start))) {
1471 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1472 			    cons, rxr->rx_next_cons,
1473 			    TPA_START_ERROR_CODE(tpa_start1));
1474 		bnxt_sched_reset_rxr(bp, rxr);
1475 		return;
1476 	}
1477 	prod_rx_buf->data = tpa_info->data;
1478 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1479 
1480 	mapping = tpa_info->mapping;
1481 	prod_rx_buf->mapping = mapping;
1482 
1483 	prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1484 
1485 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1486 
1487 	tpa_info->data = cons_rx_buf->data;
1488 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1489 	cons_rx_buf->data = NULL;
1490 	tpa_info->mapping = cons_rx_buf->mapping;
1491 
1492 	tpa_info->len =
1493 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1494 				RX_TPA_START_CMP_LEN_SHIFT;
1495 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1496 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1497 		tpa_info->gso_type = SKB_GSO_TCPV4;
1498 		if (TPA_START_IS_IPV6(tpa_start1))
1499 			tpa_info->gso_type = SKB_GSO_TCPV6;
1500 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1501 		else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1502 			 TPA_START_HASH_TYPE(tpa_start) == 3)
1503 			tpa_info->gso_type = SKB_GSO_TCPV6;
1504 		tpa_info->rss_hash =
1505 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1506 	} else {
1507 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1508 		tpa_info->gso_type = 0;
1509 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1510 	}
1511 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1512 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1513 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1514 		bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1515 	else
1516 		bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1517 	tpa_info->agg_count = 0;
1518 
1519 	rxr->rx_prod = NEXT_RX(prod);
1520 	cons = RING_RX(bp, NEXT_RX(cons));
1521 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1522 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1523 
1524 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1525 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1526 	cons_rx_buf->data = NULL;
1527 }
1528 
1529 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1530 {
1531 	if (agg_bufs)
1532 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1533 }
1534 
1535 #ifdef CONFIG_INET
1536 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1537 {
1538 	struct udphdr *uh = NULL;
1539 
1540 	if (ip_proto == htons(ETH_P_IP)) {
1541 		struct iphdr *iph = (struct iphdr *)skb->data;
1542 
1543 		if (iph->protocol == IPPROTO_UDP)
1544 			uh = (struct udphdr *)(iph + 1);
1545 	} else {
1546 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1547 
1548 		if (iph->nexthdr == IPPROTO_UDP)
1549 			uh = (struct udphdr *)(iph + 1);
1550 	}
1551 	if (uh) {
1552 		if (uh->check)
1553 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1554 		else
1555 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1556 	}
1557 }
1558 #endif
1559 
1560 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1561 					   int payload_off, int tcp_ts,
1562 					   struct sk_buff *skb)
1563 {
1564 #ifdef CONFIG_INET
1565 	struct tcphdr *th;
1566 	int len, nw_off;
1567 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1568 	u32 hdr_info = tpa_info->hdr_info;
1569 	bool loopback = false;
1570 
1571 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1572 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1573 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1574 
1575 	/* If the packet is an internal loopback packet, the offsets will
1576 	 * have an extra 4 bytes.
1577 	 */
1578 	if (inner_mac_off == 4) {
1579 		loopback = true;
1580 	} else if (inner_mac_off > 4) {
1581 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1582 					    ETH_HLEN - 2));
1583 
1584 		/* We only support inner iPv4/ipv6.  If we don't see the
1585 		 * correct protocol ID, it must be a loopback packet where
1586 		 * the offsets are off by 4.
1587 		 */
1588 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1589 			loopback = true;
1590 	}
1591 	if (loopback) {
1592 		/* internal loopback packet, subtract all offsets by 4 */
1593 		inner_ip_off -= 4;
1594 		inner_mac_off -= 4;
1595 		outer_ip_off -= 4;
1596 	}
1597 
1598 	nw_off = inner_ip_off - ETH_HLEN;
1599 	skb_set_network_header(skb, nw_off);
1600 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1601 		struct ipv6hdr *iph = ipv6_hdr(skb);
1602 
1603 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1604 		len = skb->len - skb_transport_offset(skb);
1605 		th = tcp_hdr(skb);
1606 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1607 	} else {
1608 		struct iphdr *iph = ip_hdr(skb);
1609 
1610 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1611 		len = skb->len - skb_transport_offset(skb);
1612 		th = tcp_hdr(skb);
1613 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1614 	}
1615 
1616 	if (inner_mac_off) { /* tunnel */
1617 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1618 					    ETH_HLEN - 2));
1619 
1620 		bnxt_gro_tunnel(skb, proto);
1621 	}
1622 #endif
1623 	return skb;
1624 }
1625 
1626 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1627 					   int payload_off, int tcp_ts,
1628 					   struct sk_buff *skb)
1629 {
1630 #ifdef CONFIG_INET
1631 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1632 	u32 hdr_info = tpa_info->hdr_info;
1633 	int iphdr_len, nw_off;
1634 
1635 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1636 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1637 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1638 
1639 	nw_off = inner_ip_off - ETH_HLEN;
1640 	skb_set_network_header(skb, nw_off);
1641 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1642 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1643 	skb_set_transport_header(skb, nw_off + iphdr_len);
1644 
1645 	if (inner_mac_off) { /* tunnel */
1646 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1647 					    ETH_HLEN - 2));
1648 
1649 		bnxt_gro_tunnel(skb, proto);
1650 	}
1651 #endif
1652 	return skb;
1653 }
1654 
1655 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1656 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1657 
1658 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1659 					   int payload_off, int tcp_ts,
1660 					   struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 	struct tcphdr *th;
1664 	int len, nw_off, tcp_opt_len = 0;
1665 
1666 	if (tcp_ts)
1667 		tcp_opt_len = 12;
1668 
1669 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1670 		struct iphdr *iph;
1671 
1672 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1673 			 ETH_HLEN;
1674 		skb_set_network_header(skb, nw_off);
1675 		iph = ip_hdr(skb);
1676 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1677 		len = skb->len - skb_transport_offset(skb);
1678 		th = tcp_hdr(skb);
1679 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1680 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1681 		struct ipv6hdr *iph;
1682 
1683 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1684 			 ETH_HLEN;
1685 		skb_set_network_header(skb, nw_off);
1686 		iph = ipv6_hdr(skb);
1687 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1688 		len = skb->len - skb_transport_offset(skb);
1689 		th = tcp_hdr(skb);
1690 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1691 	} else {
1692 		dev_kfree_skb_any(skb);
1693 		return NULL;
1694 	}
1695 
1696 	if (nw_off) /* tunnel */
1697 		bnxt_gro_tunnel(skb, skb->protocol);
1698 #endif
1699 	return skb;
1700 }
1701 
1702 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1703 					   struct bnxt_tpa_info *tpa_info,
1704 					   struct rx_tpa_end_cmp *tpa_end,
1705 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1706 					   struct sk_buff *skb)
1707 {
1708 #ifdef CONFIG_INET
1709 	int payload_off;
1710 	u16 segs;
1711 
1712 	segs = TPA_END_TPA_SEGS(tpa_end);
1713 	if (segs == 1)
1714 		return skb;
1715 
1716 	NAPI_GRO_CB(skb)->count = segs;
1717 	skb_shinfo(skb)->gso_size =
1718 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1719 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1720 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1721 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1722 	else
1723 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1724 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1725 	if (likely(skb))
1726 		tcp_gro_complete(skb);
1727 #endif
1728 	return skb;
1729 }
1730 
1731 /* Given the cfa_code of a received packet determine which
1732  * netdev (vf-rep or PF) the packet is destined to.
1733  */
1734 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1735 {
1736 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1737 
1738 	/* if vf-rep dev is NULL, the must belongs to the PF */
1739 	return dev ? dev : bp->dev;
1740 }
1741 
1742 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1743 					   struct bnxt_cp_ring_info *cpr,
1744 					   u32 *raw_cons,
1745 					   struct rx_tpa_end_cmp *tpa_end,
1746 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1747 					   u8 *event)
1748 {
1749 	struct bnxt_napi *bnapi = cpr->bnapi;
1750 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1751 	struct net_device *dev = bp->dev;
1752 	u8 *data_ptr, agg_bufs;
1753 	unsigned int len;
1754 	struct bnxt_tpa_info *tpa_info;
1755 	dma_addr_t mapping;
1756 	struct sk_buff *skb;
1757 	u16 idx = 0, agg_id;
1758 	void *data;
1759 	bool gro;
1760 
1761 	if (unlikely(bnapi->in_reset)) {
1762 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1763 
1764 		if (rc < 0)
1765 			return ERR_PTR(-EBUSY);
1766 		return NULL;
1767 	}
1768 
1769 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1770 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1771 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1772 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1773 		tpa_info = &rxr->rx_tpa[agg_id];
1774 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1775 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1776 				    agg_bufs, tpa_info->agg_count);
1777 			agg_bufs = tpa_info->agg_count;
1778 		}
1779 		tpa_info->agg_count = 0;
1780 		*event |= BNXT_AGG_EVENT;
1781 		bnxt_free_agg_idx(rxr, agg_id);
1782 		idx = agg_id;
1783 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1784 	} else {
1785 		agg_id = TPA_END_AGG_ID(tpa_end);
1786 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1787 		tpa_info = &rxr->rx_tpa[agg_id];
1788 		idx = RING_CMP(*raw_cons);
1789 		if (agg_bufs) {
1790 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1791 				return ERR_PTR(-EBUSY);
1792 
1793 			*event |= BNXT_AGG_EVENT;
1794 			idx = NEXT_CMP(idx);
1795 		}
1796 		gro = !!TPA_END_GRO(tpa_end);
1797 	}
1798 	data = tpa_info->data;
1799 	data_ptr = tpa_info->data_ptr;
1800 	prefetch(data_ptr);
1801 	len = tpa_info->len;
1802 	mapping = tpa_info->mapping;
1803 
1804 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1805 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1806 		if (agg_bufs > MAX_SKB_FRAGS)
1807 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1808 				    agg_bufs, (int)MAX_SKB_FRAGS);
1809 		return NULL;
1810 	}
1811 
1812 	if (len <= bp->rx_copy_thresh) {
1813 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1814 		if (!skb) {
1815 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1816 			cpr->sw_stats->rx.rx_oom_discards += 1;
1817 			return NULL;
1818 		}
1819 	} else {
1820 		u8 *new_data;
1821 		dma_addr_t new_mapping;
1822 
1823 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1824 		if (!new_data) {
1825 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1826 			cpr->sw_stats->rx.rx_oom_discards += 1;
1827 			return NULL;
1828 		}
1829 
1830 		tpa_info->data = new_data;
1831 		tpa_info->data_ptr = new_data + bp->rx_offset;
1832 		tpa_info->mapping = new_mapping;
1833 
1834 		skb = napi_build_skb(data, bp->rx_buf_size);
1835 		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1836 				       bp->rx_buf_use_size, bp->rx_dir,
1837 				       DMA_ATTR_WEAK_ORDERING);
1838 
1839 		if (!skb) {
1840 			skb_free_frag(data);
1841 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1842 			cpr->sw_stats->rx.rx_oom_discards += 1;
1843 			return NULL;
1844 		}
1845 		skb_reserve(skb, bp->rx_offset);
1846 		skb_put(skb, len);
1847 	}
1848 
1849 	if (agg_bufs) {
1850 		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1851 		if (!skb) {
1852 			/* Page reuse already handled by bnxt_rx_pages(). */
1853 			cpr->sw_stats->rx.rx_oom_discards += 1;
1854 			return NULL;
1855 		}
1856 	}
1857 
1858 	if (tpa_info->cfa_code_valid)
1859 		dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1860 	skb->protocol = eth_type_trans(skb, dev);
1861 
1862 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1863 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1864 
1865 	if (tpa_info->vlan_valid &&
1866 	    (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1867 		__be16 vlan_proto = htons(tpa_info->metadata >>
1868 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1869 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1870 
1871 		if (eth_type_vlan(vlan_proto)) {
1872 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1873 		} else {
1874 			dev_kfree_skb(skb);
1875 			return NULL;
1876 		}
1877 	}
1878 
1879 	skb_checksum_none_assert(skb);
1880 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1881 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1882 		skb->csum_level =
1883 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1884 	}
1885 
1886 	if (gro)
1887 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1888 
1889 	return skb;
1890 }
1891 
1892 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1893 			 struct rx_agg_cmp *rx_agg)
1894 {
1895 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1896 	struct bnxt_tpa_info *tpa_info;
1897 
1898 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1899 	tpa_info = &rxr->rx_tpa[agg_id];
1900 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1901 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1902 }
1903 
1904 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1905 			     struct sk_buff *skb)
1906 {
1907 	skb_mark_for_recycle(skb);
1908 
1909 	if (skb->dev != bp->dev) {
1910 		/* this packet belongs to a vf-rep */
1911 		bnxt_vf_rep_rx(bp, skb);
1912 		return;
1913 	}
1914 	skb_record_rx_queue(skb, bnapi->index);
1915 	napi_gro_receive(&bnapi->napi, skb);
1916 }
1917 
1918 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1919 			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1920 {
1921 	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1922 
1923 	if (BNXT_PTP_RX_TS_VALID(flags))
1924 		goto ts_valid;
1925 	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1926 		return false;
1927 
1928 ts_valid:
1929 	*cmpl_ts = ts;
1930 	return true;
1931 }
1932 
1933 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1934 				    struct rx_cmp *rxcmp,
1935 				    struct rx_cmp_ext *rxcmp1)
1936 {
1937 	__be16 vlan_proto;
1938 	u16 vtag;
1939 
1940 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1941 		__le32 flags2 = rxcmp1->rx_cmp_flags2;
1942 		u32 meta_data;
1943 
1944 		if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1945 			return skb;
1946 
1947 		meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1948 		vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1949 		vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1950 		if (eth_type_vlan(vlan_proto))
1951 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1952 		else
1953 			goto vlan_err;
1954 	} else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1955 		if (RX_CMP_VLAN_VALID(rxcmp)) {
1956 			u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1957 
1958 			if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1959 				vlan_proto = htons(ETH_P_8021Q);
1960 			else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1961 				vlan_proto = htons(ETH_P_8021AD);
1962 			else
1963 				goto vlan_err;
1964 			vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1965 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1966 		}
1967 	}
1968 	return skb;
1969 vlan_err:
1970 	dev_kfree_skb(skb);
1971 	return NULL;
1972 }
1973 
1974 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1975 					   struct rx_cmp *rxcmp)
1976 {
1977 	u8 ext_op;
1978 
1979 	ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
1980 	switch (ext_op) {
1981 	case EXT_OP_INNER_4:
1982 	case EXT_OP_OUTER_4:
1983 	case EXT_OP_INNFL_3:
1984 	case EXT_OP_OUTFL_3:
1985 		return PKT_HASH_TYPE_L4;
1986 	default:
1987 		return PKT_HASH_TYPE_L3;
1988 	}
1989 }
1990 
1991 /* returns the following:
1992  * 1       - 1 packet successfully received
1993  * 0       - successful TPA_START, packet not completed yet
1994  * -EBUSY  - completion ring does not have all the agg buffers yet
1995  * -ENOMEM - packet aborted due to out of memory
1996  * -EIO    - packet aborted due to hw error indicated in BD
1997  */
1998 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1999 		       u32 *raw_cons, u8 *event)
2000 {
2001 	struct bnxt_napi *bnapi = cpr->bnapi;
2002 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2003 	struct net_device *dev = bp->dev;
2004 	struct rx_cmp *rxcmp;
2005 	struct rx_cmp_ext *rxcmp1;
2006 	u32 tmp_raw_cons = *raw_cons;
2007 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2008 	struct bnxt_sw_rx_bd *rx_buf;
2009 	unsigned int len;
2010 	u8 *data_ptr, agg_bufs, cmp_type;
2011 	bool xdp_active = false;
2012 	dma_addr_t dma_addr;
2013 	struct sk_buff *skb;
2014 	struct xdp_buff xdp;
2015 	u32 flags, misc;
2016 	u32 cmpl_ts;
2017 	void *data;
2018 	int rc = 0;
2019 
2020 	rxcmp = (struct rx_cmp *)
2021 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2022 
2023 	cmp_type = RX_CMP_TYPE(rxcmp);
2024 
2025 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2026 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2027 		goto next_rx_no_prod_no_len;
2028 	}
2029 
2030 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2031 	cp_cons = RING_CMP(tmp_raw_cons);
2032 	rxcmp1 = (struct rx_cmp_ext *)
2033 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2034 
2035 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2036 		return -EBUSY;
2037 
2038 	/* The valid test of the entry must be done first before
2039 	 * reading any further.
2040 	 */
2041 	dma_rmb();
2042 	prod = rxr->rx_prod;
2043 
2044 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2045 	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2046 		bnxt_tpa_start(bp, rxr, cmp_type,
2047 			       (struct rx_tpa_start_cmp *)rxcmp,
2048 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
2049 
2050 		*event |= BNXT_RX_EVENT;
2051 		goto next_rx_no_prod_no_len;
2052 
2053 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2054 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2055 				   (struct rx_tpa_end_cmp *)rxcmp,
2056 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2057 
2058 		if (IS_ERR(skb))
2059 			return -EBUSY;
2060 
2061 		rc = -ENOMEM;
2062 		if (likely(skb)) {
2063 			bnxt_deliver_skb(bp, bnapi, skb);
2064 			rc = 1;
2065 		}
2066 		*event |= BNXT_RX_EVENT;
2067 		goto next_rx_no_prod_no_len;
2068 	}
2069 
2070 	cons = rxcmp->rx_cmp_opaque;
2071 	if (unlikely(cons != rxr->rx_next_cons)) {
2072 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2073 
2074 		/* 0xffff is forced error, don't print it */
2075 		if (rxr->rx_next_cons != 0xffff)
2076 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2077 				    cons, rxr->rx_next_cons);
2078 		bnxt_sched_reset_rxr(bp, rxr);
2079 		if (rc1)
2080 			return rc1;
2081 		goto next_rx_no_prod_no_len;
2082 	}
2083 	rx_buf = &rxr->rx_buf_ring[cons];
2084 	data = rx_buf->data;
2085 	data_ptr = rx_buf->data_ptr;
2086 	prefetch(data_ptr);
2087 
2088 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2089 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2090 
2091 	if (agg_bufs) {
2092 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2093 			return -EBUSY;
2094 
2095 		cp_cons = NEXT_CMP(cp_cons);
2096 		*event |= BNXT_AGG_EVENT;
2097 	}
2098 	*event |= BNXT_RX_EVENT;
2099 
2100 	rx_buf->data = NULL;
2101 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2102 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2103 
2104 		bnxt_reuse_rx_data(rxr, cons, data);
2105 		if (agg_bufs)
2106 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2107 					       false);
2108 
2109 		rc = -EIO;
2110 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2111 			bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2112 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2113 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2114 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
2115 						 rx_err);
2116 				bnxt_sched_reset_rxr(bp, rxr);
2117 			}
2118 		}
2119 		goto next_rx_no_len;
2120 	}
2121 
2122 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2123 	len = flags >> RX_CMP_LEN_SHIFT;
2124 	dma_addr = rx_buf->mapping;
2125 
2126 	if (bnxt_xdp_attached(bp, rxr)) {
2127 		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2128 		if (agg_bufs) {
2129 			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2130 							     cp_cons, agg_bufs,
2131 							     false);
2132 			if (!frag_len)
2133 				goto oom_next_rx;
2134 		}
2135 		xdp_active = true;
2136 	}
2137 
2138 	if (xdp_active) {
2139 		if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2140 			rc = 1;
2141 			goto next_rx;
2142 		}
2143 	}
2144 
2145 	if (len <= bp->rx_copy_thresh) {
2146 		if (!xdp_active)
2147 			skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2148 		else
2149 			skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2150 		bnxt_reuse_rx_data(rxr, cons, data);
2151 		if (!skb) {
2152 			if (agg_bufs) {
2153 				if (!xdp_active)
2154 					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2155 							       agg_bufs, false);
2156 				else
2157 					bnxt_xdp_buff_frags_free(rxr, &xdp);
2158 			}
2159 			goto oom_next_rx;
2160 		}
2161 	} else {
2162 		u32 payload;
2163 
2164 		if (rx_buf->data_ptr == data_ptr)
2165 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
2166 		else
2167 			payload = 0;
2168 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2169 				      payload | len);
2170 		if (!skb)
2171 			goto oom_next_rx;
2172 	}
2173 
2174 	if (agg_bufs) {
2175 		if (!xdp_active) {
2176 			skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2177 			if (!skb)
2178 				goto oom_next_rx;
2179 		} else {
2180 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2181 			if (!skb) {
2182 				/* we should be able to free the old skb here */
2183 				bnxt_xdp_buff_frags_free(rxr, &xdp);
2184 				goto oom_next_rx;
2185 			}
2186 		}
2187 	}
2188 
2189 	if (RX_CMP_HASH_VALID(rxcmp)) {
2190 		enum pkt_hash_types type;
2191 
2192 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2193 			type = bnxt_rss_ext_op(bp, rxcmp);
2194 		} else {
2195 			u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2196 
2197 			/* RSS profiles 1 and 3 with extract code 0 for inner
2198 			 * 4-tuple
2199 			 */
2200 			if (hash_type != 1 && hash_type != 3)
2201 				type = PKT_HASH_TYPE_L3;
2202 			else
2203 				type = PKT_HASH_TYPE_L4;
2204 		}
2205 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2206 	}
2207 
2208 	if (cmp_type == CMP_TYPE_RX_L2_CMP)
2209 		dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2210 	skb->protocol = eth_type_trans(skb, dev);
2211 
2212 	if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2213 		skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2214 		if (!skb)
2215 			goto next_rx;
2216 	}
2217 
2218 	skb_checksum_none_assert(skb);
2219 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
2220 		if (dev->features & NETIF_F_RXCSUM) {
2221 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2222 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2223 		}
2224 	} else {
2225 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2226 			if (dev->features & NETIF_F_RXCSUM)
2227 				bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2228 		}
2229 	}
2230 
2231 	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2232 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2233 			u64 ns, ts;
2234 
2235 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2236 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2237 
2238 				spin_lock_bh(&ptp->ptp_lock);
2239 				ns = timecounter_cyc2time(&ptp->tc, ts);
2240 				spin_unlock_bh(&ptp->ptp_lock);
2241 				memset(skb_hwtstamps(skb), 0,
2242 				       sizeof(*skb_hwtstamps(skb)));
2243 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2244 			}
2245 		}
2246 	}
2247 	bnxt_deliver_skb(bp, bnapi, skb);
2248 	rc = 1;
2249 
2250 next_rx:
2251 	cpr->rx_packets += 1;
2252 	cpr->rx_bytes += len;
2253 
2254 next_rx_no_len:
2255 	rxr->rx_prod = NEXT_RX(prod);
2256 	rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2257 
2258 next_rx_no_prod_no_len:
2259 	*raw_cons = tmp_raw_cons;
2260 
2261 	return rc;
2262 
2263 oom_next_rx:
2264 	cpr->sw_stats->rx.rx_oom_discards += 1;
2265 	rc = -ENOMEM;
2266 	goto next_rx;
2267 }
2268 
2269 /* In netpoll mode, if we are using a combined completion ring, we need to
2270  * discard the rx packets and recycle the buffers.
2271  */
2272 static int bnxt_force_rx_discard(struct bnxt *bp,
2273 				 struct bnxt_cp_ring_info *cpr,
2274 				 u32 *raw_cons, u8 *event)
2275 {
2276 	u32 tmp_raw_cons = *raw_cons;
2277 	struct rx_cmp_ext *rxcmp1;
2278 	struct rx_cmp *rxcmp;
2279 	u16 cp_cons;
2280 	u8 cmp_type;
2281 	int rc;
2282 
2283 	cp_cons = RING_CMP(tmp_raw_cons);
2284 	rxcmp = (struct rx_cmp *)
2285 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2286 
2287 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2288 	cp_cons = RING_CMP(tmp_raw_cons);
2289 	rxcmp1 = (struct rx_cmp_ext *)
2290 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2291 
2292 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2293 		return -EBUSY;
2294 
2295 	/* The valid test of the entry must be done first before
2296 	 * reading any further.
2297 	 */
2298 	dma_rmb();
2299 	cmp_type = RX_CMP_TYPE(rxcmp);
2300 	if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2301 	    cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2302 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2303 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2304 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2305 		struct rx_tpa_end_cmp_ext *tpa_end1;
2306 
2307 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2308 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2309 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2310 	}
2311 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2312 	if (rc && rc != -EBUSY)
2313 		cpr->sw_stats->rx.rx_netpoll_discards += 1;
2314 	return rc;
2315 }
2316 
2317 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2318 {
2319 	struct bnxt_fw_health *fw_health = bp->fw_health;
2320 	u32 reg = fw_health->regs[reg_idx];
2321 	u32 reg_type, reg_off, val = 0;
2322 
2323 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2324 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2325 	switch (reg_type) {
2326 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2327 		pci_read_config_dword(bp->pdev, reg_off, &val);
2328 		break;
2329 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2330 		reg_off = fw_health->mapped_regs[reg_idx];
2331 		fallthrough;
2332 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2333 		val = readl(bp->bar0 + reg_off);
2334 		break;
2335 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2336 		val = readl(bp->bar1 + reg_off);
2337 		break;
2338 	}
2339 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2340 		val &= fw_health->fw_reset_inprog_reg_mask;
2341 	return val;
2342 }
2343 
2344 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2345 {
2346 	int i;
2347 
2348 	for (i = 0; i < bp->rx_nr_rings; i++) {
2349 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2350 		struct bnxt_ring_grp_info *grp_info;
2351 
2352 		grp_info = &bp->grp_info[grp_idx];
2353 		if (grp_info->agg_fw_ring_id == ring_id)
2354 			return grp_idx;
2355 	}
2356 	return INVALID_HW_RING_ID;
2357 }
2358 
2359 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2360 {
2361 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2362 
2363 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2364 		return link_info->force_link_speed2;
2365 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2366 		return link_info->force_pam4_link_speed;
2367 	return link_info->force_link_speed;
2368 }
2369 
2370 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2371 {
2372 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2373 
2374 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2375 		link_info->req_link_speed = link_info->force_link_speed2;
2376 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2377 		switch (link_info->req_link_speed) {
2378 		case BNXT_LINK_SPEED_50GB_PAM4:
2379 		case BNXT_LINK_SPEED_100GB_PAM4:
2380 		case BNXT_LINK_SPEED_200GB_PAM4:
2381 		case BNXT_LINK_SPEED_400GB_PAM4:
2382 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2383 			break;
2384 		case BNXT_LINK_SPEED_100GB_PAM4_112:
2385 		case BNXT_LINK_SPEED_200GB_PAM4_112:
2386 		case BNXT_LINK_SPEED_400GB_PAM4_112:
2387 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2388 			break;
2389 		default:
2390 			link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2391 		}
2392 		return;
2393 	}
2394 	link_info->req_link_speed = link_info->force_link_speed;
2395 	link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2396 	if (link_info->force_pam4_link_speed) {
2397 		link_info->req_link_speed = link_info->force_pam4_link_speed;
2398 		link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2399 	}
2400 }
2401 
2402 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2403 {
2404 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2405 
2406 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2407 		link_info->advertising = link_info->auto_link_speeds2;
2408 		return;
2409 	}
2410 	link_info->advertising = link_info->auto_link_speeds;
2411 	link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2412 }
2413 
2414 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2415 {
2416 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2417 
2418 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2419 		if (link_info->req_link_speed != link_info->force_link_speed2)
2420 			return true;
2421 		return false;
2422 	}
2423 	if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2424 	    link_info->req_link_speed != link_info->force_link_speed)
2425 		return true;
2426 	if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2427 	    link_info->req_link_speed != link_info->force_pam4_link_speed)
2428 		return true;
2429 	return false;
2430 }
2431 
2432 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2433 {
2434 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2435 
2436 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2437 		if (link_info->advertising != link_info->auto_link_speeds2)
2438 			return true;
2439 		return false;
2440 	}
2441 	if (link_info->advertising != link_info->auto_link_speeds ||
2442 	    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2443 		return true;
2444 	return false;
2445 }
2446 
2447 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2)				\
2448 	((data2) &							\
2449 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2450 
2451 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)			\
2452 	(((data2) &							\
2453 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2454 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2455 
2456 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)			\
2457 	((data1) &							\
2458 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2459 
2460 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)		\
2461 	(((data1) &							\
2462 	  ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2463 	 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2464 
2465 /* Return true if the workqueue has to be scheduled */
2466 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2467 {
2468 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2469 
2470 	switch (err_type) {
2471 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2472 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2473 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2474 		break;
2475 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2476 		netdev_warn(bp->dev, "Pause Storm detected!\n");
2477 		break;
2478 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2479 		netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2480 		break;
2481 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2482 		u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2483 		char *threshold_type;
2484 		bool notify = false;
2485 		char *dir_str;
2486 
2487 		switch (type) {
2488 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2489 			threshold_type = "warning";
2490 			break;
2491 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2492 			threshold_type = "critical";
2493 			break;
2494 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2495 			threshold_type = "fatal";
2496 			break;
2497 		case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2498 			threshold_type = "shutdown";
2499 			break;
2500 		default:
2501 			netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2502 			return false;
2503 		}
2504 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2505 			dir_str = "above";
2506 			notify = true;
2507 		} else {
2508 			dir_str = "below";
2509 		}
2510 		netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2511 			    dir_str, threshold_type);
2512 		netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2513 			    BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2514 			    BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2515 		if (notify) {
2516 			bp->thermal_threshold_type = type;
2517 			set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2518 			return true;
2519 		}
2520 		return false;
2521 	}
2522 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2523 		netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2524 		break;
2525 	default:
2526 		netdev_err(bp->dev, "FW reported unknown error type %u\n",
2527 			   err_type);
2528 		break;
2529 	}
2530 	return false;
2531 }
2532 
2533 #define BNXT_GET_EVENT_PORT(data)	\
2534 	((data) &			\
2535 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2536 
2537 #define BNXT_EVENT_RING_TYPE(data2)	\
2538 	((data2) &			\
2539 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2540 
2541 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2542 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2543 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2544 
2545 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)	\
2546 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2547 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2548 
2549 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)	\
2550 	(((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2551 	 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2552 
2553 #define BNXT_PHC_BITS	48
2554 
2555 static int bnxt_async_event_process(struct bnxt *bp,
2556 				    struct hwrm_async_event_cmpl *cmpl)
2557 {
2558 	u16 event_id = le16_to_cpu(cmpl->event_id);
2559 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2560 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2561 
2562 	netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2563 		   event_id, data1, data2);
2564 
2565 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2566 	switch (event_id) {
2567 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2568 		struct bnxt_link_info *link_info = &bp->link_info;
2569 
2570 		if (BNXT_VF(bp))
2571 			goto async_event_process_exit;
2572 
2573 		/* print unsupported speed warning in forced speed mode only */
2574 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2575 		    (data1 & 0x20000)) {
2576 			u16 fw_speed = bnxt_get_force_speed(link_info);
2577 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2578 
2579 			if (speed != SPEED_UNKNOWN)
2580 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2581 					    speed);
2582 		}
2583 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2584 	}
2585 		fallthrough;
2586 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2587 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2588 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2589 		fallthrough;
2590 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2591 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2592 		break;
2593 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2594 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2595 		break;
2596 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2597 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2598 
2599 		if (BNXT_VF(bp))
2600 			break;
2601 
2602 		if (bp->pf.port_id != port_id)
2603 			break;
2604 
2605 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2606 		break;
2607 	}
2608 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2609 		if (BNXT_PF(bp))
2610 			goto async_event_process_exit;
2611 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2612 		break;
2613 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2614 		char *type_str = "Solicited";
2615 
2616 		if (!bp->fw_health)
2617 			goto async_event_process_exit;
2618 
2619 		bp->fw_reset_timestamp = jiffies;
2620 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2621 		if (!bp->fw_reset_min_dsecs)
2622 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2623 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2624 		if (!bp->fw_reset_max_dsecs)
2625 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2626 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2627 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2628 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2629 			type_str = "Fatal";
2630 			bp->fw_health->fatalities++;
2631 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2632 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2633 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2634 			type_str = "Non-fatal";
2635 			bp->fw_health->survivals++;
2636 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2637 		}
2638 		netif_warn(bp, hw, bp->dev,
2639 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2640 			   type_str, data1, data2,
2641 			   bp->fw_reset_min_dsecs * 100,
2642 			   bp->fw_reset_max_dsecs * 100);
2643 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2644 		break;
2645 	}
2646 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2647 		struct bnxt_fw_health *fw_health = bp->fw_health;
2648 		char *status_desc = "healthy";
2649 		u32 status;
2650 
2651 		if (!fw_health)
2652 			goto async_event_process_exit;
2653 
2654 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2655 			fw_health->enabled = false;
2656 			netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2657 			break;
2658 		}
2659 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2660 		fw_health->tmr_multiplier =
2661 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2662 				     bp->current_interval * 10);
2663 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2664 		if (!fw_health->enabled)
2665 			fw_health->last_fw_heartbeat =
2666 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2667 		fw_health->last_fw_reset_cnt =
2668 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2669 		status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2670 		if (status != BNXT_FW_STATUS_HEALTHY)
2671 			status_desc = "unhealthy";
2672 		netif_info(bp, drv, bp->dev,
2673 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2674 			   fw_health->primary ? "primary" : "backup", status,
2675 			   status_desc, fw_health->last_fw_reset_cnt);
2676 		if (!fw_health->enabled) {
2677 			/* Make sure tmr_counter is set and visible to
2678 			 * bnxt_health_check() before setting enabled to true.
2679 			 */
2680 			smp_wmb();
2681 			fw_health->enabled = true;
2682 		}
2683 		goto async_event_process_exit;
2684 	}
2685 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2686 		netif_notice(bp, hw, bp->dev,
2687 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2688 			     data1, data2);
2689 		goto async_event_process_exit;
2690 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2691 		struct bnxt_rx_ring_info *rxr;
2692 		u16 grp_idx;
2693 
2694 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2695 			goto async_event_process_exit;
2696 
2697 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2698 			    BNXT_EVENT_RING_TYPE(data2), data1);
2699 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2700 			goto async_event_process_exit;
2701 
2702 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2703 		if (grp_idx == INVALID_HW_RING_ID) {
2704 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2705 				    data1);
2706 			goto async_event_process_exit;
2707 		}
2708 		rxr = bp->bnapi[grp_idx]->rx_ring;
2709 		bnxt_sched_reset_rxr(bp, rxr);
2710 		goto async_event_process_exit;
2711 	}
2712 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2713 		struct bnxt_fw_health *fw_health = bp->fw_health;
2714 
2715 		netif_notice(bp, hw, bp->dev,
2716 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2717 			     data1, data2);
2718 		if (fw_health) {
2719 			fw_health->echo_req_data1 = data1;
2720 			fw_health->echo_req_data2 = data2;
2721 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2722 			break;
2723 		}
2724 		goto async_event_process_exit;
2725 	}
2726 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2727 		bnxt_ptp_pps_event(bp, data1, data2);
2728 		goto async_event_process_exit;
2729 	}
2730 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2731 		if (bnxt_event_error_report(bp, data1, data2))
2732 			break;
2733 		goto async_event_process_exit;
2734 	}
2735 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2736 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2737 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2738 			if (BNXT_PTP_USE_RTC(bp)) {
2739 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2740 				u64 ns;
2741 
2742 				if (!ptp)
2743 					goto async_event_process_exit;
2744 
2745 				spin_lock_bh(&ptp->ptp_lock);
2746 				bnxt_ptp_update_current_time(bp);
2747 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2748 				       BNXT_PHC_BITS) | ptp->current_time);
2749 				bnxt_ptp_rtc_timecounter_init(ptp, ns);
2750 				spin_unlock_bh(&ptp->ptp_lock);
2751 			}
2752 			break;
2753 		}
2754 		goto async_event_process_exit;
2755 	}
2756 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2757 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2758 
2759 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2760 		goto async_event_process_exit;
2761 	}
2762 	default:
2763 		goto async_event_process_exit;
2764 	}
2765 	__bnxt_queue_sp_work(bp);
2766 async_event_process_exit:
2767 	return 0;
2768 }
2769 
2770 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2771 {
2772 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2773 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2774 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2775 				(struct hwrm_fwd_req_cmpl *)txcmp;
2776 
2777 	switch (cmpl_type) {
2778 	case CMPL_BASE_TYPE_HWRM_DONE:
2779 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2780 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2781 		break;
2782 
2783 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2784 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2785 
2786 		if ((vf_id < bp->pf.first_vf_id) ||
2787 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2788 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2789 				   vf_id);
2790 			return -EINVAL;
2791 		}
2792 
2793 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2794 		bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2795 		break;
2796 
2797 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2798 		bnxt_async_event_process(bp,
2799 					 (struct hwrm_async_event_cmpl *)txcmp);
2800 		break;
2801 
2802 	default:
2803 		break;
2804 	}
2805 
2806 	return 0;
2807 }
2808 
2809 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2810 {
2811 	struct bnxt_napi *bnapi = dev_instance;
2812 	struct bnxt *bp = bnapi->bp;
2813 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2814 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2815 
2816 	cpr->event_ctr++;
2817 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2818 	napi_schedule(&bnapi->napi);
2819 	return IRQ_HANDLED;
2820 }
2821 
2822 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2823 {
2824 	u32 raw_cons = cpr->cp_raw_cons;
2825 	u16 cons = RING_CMP(raw_cons);
2826 	struct tx_cmp *txcmp;
2827 
2828 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2829 
2830 	return TX_CMP_VALID(txcmp, raw_cons);
2831 }
2832 
2833 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2834 {
2835 	struct bnxt_napi *bnapi = dev_instance;
2836 	struct bnxt *bp = bnapi->bp;
2837 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2838 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2839 	u32 int_status;
2840 
2841 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2842 
2843 	if (!bnxt_has_work(bp, cpr)) {
2844 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2845 		/* return if erroneous interrupt */
2846 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2847 			return IRQ_NONE;
2848 	}
2849 
2850 	/* disable ring IRQ */
2851 	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2852 
2853 	/* Return here if interrupt is shared and is disabled. */
2854 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2855 		return IRQ_HANDLED;
2856 
2857 	napi_schedule(&bnapi->napi);
2858 	return IRQ_HANDLED;
2859 }
2860 
2861 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2862 			    int budget)
2863 {
2864 	struct bnxt_napi *bnapi = cpr->bnapi;
2865 	u32 raw_cons = cpr->cp_raw_cons;
2866 	u32 cons;
2867 	int rx_pkts = 0;
2868 	u8 event = 0;
2869 	struct tx_cmp *txcmp;
2870 
2871 	cpr->has_more_work = 0;
2872 	cpr->had_work_done = 1;
2873 	while (1) {
2874 		u8 cmp_type;
2875 		int rc;
2876 
2877 		cons = RING_CMP(raw_cons);
2878 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2879 
2880 		if (!TX_CMP_VALID(txcmp, raw_cons))
2881 			break;
2882 
2883 		/* The valid test of the entry must be done first before
2884 		 * reading any further.
2885 		 */
2886 		dma_rmb();
2887 		cmp_type = TX_CMP_TYPE(txcmp);
2888 		if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2889 		    cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2890 			u32 opaque = txcmp->tx_cmp_opaque;
2891 			struct bnxt_tx_ring_info *txr;
2892 			u16 tx_freed;
2893 
2894 			txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2895 			event |= BNXT_TX_CMP_EVENT;
2896 			if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2897 				txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2898 			else
2899 				txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2900 			tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2901 				   bp->tx_ring_mask;
2902 			/* return full budget so NAPI will complete. */
2903 			if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2904 				rx_pkts = budget;
2905 				raw_cons = NEXT_RAW_CMP(raw_cons);
2906 				if (budget)
2907 					cpr->has_more_work = 1;
2908 				break;
2909 			}
2910 		} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2911 			   cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2912 			if (likely(budget))
2913 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2914 			else
2915 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2916 							   &event);
2917 			if (likely(rc >= 0))
2918 				rx_pkts += rc;
2919 			/* Increment rx_pkts when rc is -ENOMEM to count towards
2920 			 * the NAPI budget.  Otherwise, we may potentially loop
2921 			 * here forever if we consistently cannot allocate
2922 			 * buffers.
2923 			 */
2924 			else if (rc == -ENOMEM && budget)
2925 				rx_pkts++;
2926 			else if (rc == -EBUSY)	/* partial completion */
2927 				break;
2928 		} else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2929 				    cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2930 				    cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
2931 			bnxt_hwrm_handler(bp, txcmp);
2932 		}
2933 		raw_cons = NEXT_RAW_CMP(raw_cons);
2934 
2935 		if (rx_pkts && rx_pkts == budget) {
2936 			cpr->has_more_work = 1;
2937 			break;
2938 		}
2939 	}
2940 
2941 	if (event & BNXT_REDIRECT_EVENT)
2942 		xdp_do_flush();
2943 
2944 	if (event & BNXT_TX_EVENT) {
2945 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
2946 		u16 prod = txr->tx_prod;
2947 
2948 		/* Sync BD data before updating doorbell */
2949 		wmb();
2950 
2951 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2952 	}
2953 
2954 	cpr->cp_raw_cons = raw_cons;
2955 	bnapi->events |= event;
2956 	return rx_pkts;
2957 }
2958 
2959 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2960 				  int budget)
2961 {
2962 	if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
2963 		bnapi->tx_int(bp, bnapi, budget);
2964 
2965 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2966 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2967 
2968 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2969 	}
2970 	if (bnapi->events & BNXT_AGG_EVENT) {
2971 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2972 
2973 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2974 	}
2975 	bnapi->events &= BNXT_TX_CMP_EVENT;
2976 }
2977 
2978 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2979 			  int budget)
2980 {
2981 	struct bnxt_napi *bnapi = cpr->bnapi;
2982 	int rx_pkts;
2983 
2984 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2985 
2986 	/* ACK completion ring before freeing tx ring and producing new
2987 	 * buffers in rx/agg rings to prevent overflowing the completion
2988 	 * ring.
2989 	 */
2990 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2991 
2992 	__bnxt_poll_work_done(bp, bnapi, budget);
2993 	return rx_pkts;
2994 }
2995 
2996 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2997 {
2998 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2999 	struct bnxt *bp = bnapi->bp;
3000 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3001 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3002 	struct tx_cmp *txcmp;
3003 	struct rx_cmp_ext *rxcmp1;
3004 	u32 cp_cons, tmp_raw_cons;
3005 	u32 raw_cons = cpr->cp_raw_cons;
3006 	bool flush_xdp = false;
3007 	u32 rx_pkts = 0;
3008 	u8 event = 0;
3009 
3010 	while (1) {
3011 		int rc;
3012 
3013 		cp_cons = RING_CMP(raw_cons);
3014 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3015 
3016 		if (!TX_CMP_VALID(txcmp, raw_cons))
3017 			break;
3018 
3019 		/* The valid test of the entry must be done first before
3020 		 * reading any further.
3021 		 */
3022 		dma_rmb();
3023 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3024 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3025 			cp_cons = RING_CMP(tmp_raw_cons);
3026 			rxcmp1 = (struct rx_cmp_ext *)
3027 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3028 
3029 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3030 				break;
3031 
3032 			/* force an error to recycle the buffer */
3033 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3034 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3035 
3036 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3037 			if (likely(rc == -EIO) && budget)
3038 				rx_pkts++;
3039 			else if (rc == -EBUSY)	/* partial completion */
3040 				break;
3041 			if (event & BNXT_REDIRECT_EVENT)
3042 				flush_xdp = true;
3043 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
3044 				    CMPL_BASE_TYPE_HWRM_DONE)) {
3045 			bnxt_hwrm_handler(bp, txcmp);
3046 		} else {
3047 			netdev_err(bp->dev,
3048 				   "Invalid completion received on special ring\n");
3049 		}
3050 		raw_cons = NEXT_RAW_CMP(raw_cons);
3051 
3052 		if (rx_pkts == budget)
3053 			break;
3054 	}
3055 
3056 	cpr->cp_raw_cons = raw_cons;
3057 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3058 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3059 
3060 	if (event & BNXT_AGG_EVENT)
3061 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3062 	if (flush_xdp)
3063 		xdp_do_flush();
3064 
3065 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3066 		napi_complete_done(napi, rx_pkts);
3067 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3068 	}
3069 	return rx_pkts;
3070 }
3071 
3072 static int bnxt_poll(struct napi_struct *napi, int budget)
3073 {
3074 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3075 	struct bnxt *bp = bnapi->bp;
3076 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3077 	int work_done = 0;
3078 
3079 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3080 		napi_complete(napi);
3081 		return 0;
3082 	}
3083 	while (1) {
3084 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3085 
3086 		if (work_done >= budget) {
3087 			if (!budget)
3088 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3089 			break;
3090 		}
3091 
3092 		if (!bnxt_has_work(bp, cpr)) {
3093 			if (napi_complete_done(napi, work_done))
3094 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3095 			break;
3096 		}
3097 	}
3098 	if (bp->flags & BNXT_FLAG_DIM) {
3099 		struct dim_sample dim_sample = {};
3100 
3101 		dim_update_sample(cpr->event_ctr,
3102 				  cpr->rx_packets,
3103 				  cpr->rx_bytes,
3104 				  &dim_sample);
3105 		net_dim(&cpr->dim, dim_sample);
3106 	}
3107 	return work_done;
3108 }
3109 
3110 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3111 {
3112 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3113 	int i, work_done = 0;
3114 
3115 	for (i = 0; i < cpr->cp_ring_count; i++) {
3116 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3117 
3118 		if (cpr2->had_nqe_notify) {
3119 			work_done += __bnxt_poll_work(bp, cpr2,
3120 						      budget - work_done);
3121 			cpr->has_more_work |= cpr2->has_more_work;
3122 		}
3123 	}
3124 	return work_done;
3125 }
3126 
3127 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3128 				 u64 dbr_type, int budget)
3129 {
3130 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3131 	int i;
3132 
3133 	for (i = 0; i < cpr->cp_ring_count; i++) {
3134 		struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3135 		struct bnxt_db_info *db;
3136 
3137 		if (cpr2->had_work_done) {
3138 			u32 tgl = 0;
3139 
3140 			if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3141 				cpr2->had_nqe_notify = 0;
3142 				tgl = cpr2->toggle;
3143 			}
3144 			db = &cpr2->cp_db;
3145 			bnxt_writeq(bp,
3146 				    db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3147 				    DB_RING_IDX(db, cpr2->cp_raw_cons),
3148 				    db->doorbell);
3149 			cpr2->had_work_done = 0;
3150 		}
3151 	}
3152 	__bnxt_poll_work_done(bp, bnapi, budget);
3153 }
3154 
3155 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3156 {
3157 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3158 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3159 	struct bnxt_cp_ring_info *cpr_rx;
3160 	u32 raw_cons = cpr->cp_raw_cons;
3161 	struct bnxt *bp = bnapi->bp;
3162 	struct nqe_cn *nqcmp;
3163 	int work_done = 0;
3164 	u32 cons;
3165 
3166 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3167 		napi_complete(napi);
3168 		return 0;
3169 	}
3170 	if (cpr->has_more_work) {
3171 		cpr->has_more_work = 0;
3172 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3173 	}
3174 	while (1) {
3175 		u16 type;
3176 
3177 		cons = RING_CMP(raw_cons);
3178 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3179 
3180 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3181 			if (cpr->has_more_work)
3182 				break;
3183 
3184 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3185 					     budget);
3186 			cpr->cp_raw_cons = raw_cons;
3187 			if (napi_complete_done(napi, work_done))
3188 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3189 						  cpr->cp_raw_cons);
3190 			goto poll_done;
3191 		}
3192 
3193 		/* The valid test of the entry must be done first before
3194 		 * reading any further.
3195 		 */
3196 		dma_rmb();
3197 
3198 		type = le16_to_cpu(nqcmp->type);
3199 		if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3200 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3201 			u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3202 			struct bnxt_cp_ring_info *cpr2;
3203 
3204 			/* No more budget for RX work */
3205 			if (budget && work_done >= budget &&
3206 			    cq_type == BNXT_NQ_HDL_TYPE_RX)
3207 				break;
3208 
3209 			idx = BNXT_NQ_HDL_IDX(idx);
3210 			cpr2 = &cpr->cp_ring_arr[idx];
3211 			cpr2->had_nqe_notify = 1;
3212 			cpr2->toggle = NQE_CN_TOGGLE(type);
3213 			work_done += __bnxt_poll_work(bp, cpr2,
3214 						      budget - work_done);
3215 			cpr->has_more_work |= cpr2->has_more_work;
3216 		} else {
3217 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3218 		}
3219 		raw_cons = NEXT_RAW_CMP(raw_cons);
3220 	}
3221 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3222 	if (raw_cons != cpr->cp_raw_cons) {
3223 		cpr->cp_raw_cons = raw_cons;
3224 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3225 	}
3226 poll_done:
3227 	cpr_rx = &cpr->cp_ring_arr[0];
3228 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3229 	    (bp->flags & BNXT_FLAG_DIM)) {
3230 		struct dim_sample dim_sample = {};
3231 
3232 		dim_update_sample(cpr->event_ctr,
3233 				  cpr_rx->rx_packets,
3234 				  cpr_rx->rx_bytes,
3235 				  &dim_sample);
3236 		net_dim(&cpr->dim, dim_sample);
3237 	}
3238 	return work_done;
3239 }
3240 
3241 static void bnxt_free_tx_skbs(struct bnxt *bp)
3242 {
3243 	int i, max_idx;
3244 	struct pci_dev *pdev = bp->pdev;
3245 
3246 	if (!bp->tx_ring)
3247 		return;
3248 
3249 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3250 	for (i = 0; i < bp->tx_nr_rings; i++) {
3251 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3252 		int j;
3253 
3254 		if (!txr->tx_buf_ring)
3255 			continue;
3256 
3257 		for (j = 0; j < max_idx;) {
3258 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3259 			struct sk_buff *skb;
3260 			int k, last;
3261 
3262 			if (i < bp->tx_nr_rings_xdp &&
3263 			    tx_buf->action == XDP_REDIRECT) {
3264 				dma_unmap_single(&pdev->dev,
3265 					dma_unmap_addr(tx_buf, mapping),
3266 					dma_unmap_len(tx_buf, len),
3267 					DMA_TO_DEVICE);
3268 				xdp_return_frame(tx_buf->xdpf);
3269 				tx_buf->action = 0;
3270 				tx_buf->xdpf = NULL;
3271 				j++;
3272 				continue;
3273 			}
3274 
3275 			skb = tx_buf->skb;
3276 			if (!skb) {
3277 				j++;
3278 				continue;
3279 			}
3280 
3281 			tx_buf->skb = NULL;
3282 
3283 			if (tx_buf->is_push) {
3284 				dev_kfree_skb(skb);
3285 				j += 2;
3286 				continue;
3287 			}
3288 
3289 			dma_unmap_single(&pdev->dev,
3290 					 dma_unmap_addr(tx_buf, mapping),
3291 					 skb_headlen(skb),
3292 					 DMA_TO_DEVICE);
3293 
3294 			last = tx_buf->nr_frags;
3295 			j += 2;
3296 			for (k = 0; k < last; k++, j++) {
3297 				int ring_idx = j & bp->tx_ring_mask;
3298 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3299 
3300 				tx_buf = &txr->tx_buf_ring[ring_idx];
3301 				dma_unmap_page(
3302 					&pdev->dev,
3303 					dma_unmap_addr(tx_buf, mapping),
3304 					skb_frag_size(frag), DMA_TO_DEVICE);
3305 			}
3306 			dev_kfree_skb(skb);
3307 		}
3308 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3309 	}
3310 }
3311 
3312 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3313 {
3314 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3315 	struct pci_dev *pdev = bp->pdev;
3316 	struct bnxt_tpa_idx_map *map;
3317 	int i, max_idx, max_agg_idx;
3318 
3319 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3320 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3321 	if (!rxr->rx_tpa)
3322 		goto skip_rx_tpa_free;
3323 
3324 	for (i = 0; i < bp->max_tpa; i++) {
3325 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3326 		u8 *data = tpa_info->data;
3327 
3328 		if (!data)
3329 			continue;
3330 
3331 		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
3332 				       bp->rx_buf_use_size, bp->rx_dir,
3333 				       DMA_ATTR_WEAK_ORDERING);
3334 
3335 		tpa_info->data = NULL;
3336 
3337 		skb_free_frag(data);
3338 	}
3339 
3340 skip_rx_tpa_free:
3341 	if (!rxr->rx_buf_ring)
3342 		goto skip_rx_buf_free;
3343 
3344 	for (i = 0; i < max_idx; i++) {
3345 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3346 		dma_addr_t mapping = rx_buf->mapping;
3347 		void *data = rx_buf->data;
3348 
3349 		if (!data)
3350 			continue;
3351 
3352 		rx_buf->data = NULL;
3353 		if (BNXT_RX_PAGE_MODE(bp)) {
3354 			page_pool_recycle_direct(rxr->page_pool, data);
3355 		} else {
3356 			dma_unmap_single_attrs(&pdev->dev, mapping,
3357 					       bp->rx_buf_use_size, bp->rx_dir,
3358 					       DMA_ATTR_WEAK_ORDERING);
3359 			skb_free_frag(data);
3360 		}
3361 	}
3362 
3363 skip_rx_buf_free:
3364 	if (!rxr->rx_agg_ring)
3365 		goto skip_rx_agg_free;
3366 
3367 	for (i = 0; i < max_agg_idx; i++) {
3368 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3369 		struct page *page = rx_agg_buf->page;
3370 
3371 		if (!page)
3372 			continue;
3373 
3374 		rx_agg_buf->page = NULL;
3375 		__clear_bit(i, rxr->rx_agg_bmap);
3376 
3377 		page_pool_recycle_direct(rxr->page_pool, page);
3378 	}
3379 
3380 skip_rx_agg_free:
3381 	map = rxr->rx_tpa_idx_map;
3382 	if (map)
3383 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3384 }
3385 
3386 static void bnxt_free_rx_skbs(struct bnxt *bp)
3387 {
3388 	int i;
3389 
3390 	if (!bp->rx_ring)
3391 		return;
3392 
3393 	for (i = 0; i < bp->rx_nr_rings; i++)
3394 		bnxt_free_one_rx_ring_skbs(bp, i);
3395 }
3396 
3397 static void bnxt_free_skbs(struct bnxt *bp)
3398 {
3399 	bnxt_free_tx_skbs(bp);
3400 	bnxt_free_rx_skbs(bp);
3401 }
3402 
3403 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3404 {
3405 	u8 init_val = ctxm->init_value;
3406 	u16 offset = ctxm->init_offset;
3407 	u8 *p2 = p;
3408 	int i;
3409 
3410 	if (!init_val)
3411 		return;
3412 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3413 		memset(p, init_val, len);
3414 		return;
3415 	}
3416 	for (i = 0; i < len; i += ctxm->entry_size)
3417 		*(p2 + i + offset) = init_val;
3418 }
3419 
3420 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3421 {
3422 	struct pci_dev *pdev = bp->pdev;
3423 	int i;
3424 
3425 	if (!rmem->pg_arr)
3426 		goto skip_pages;
3427 
3428 	for (i = 0; i < rmem->nr_pages; i++) {
3429 		if (!rmem->pg_arr[i])
3430 			continue;
3431 
3432 		dma_free_coherent(&pdev->dev, rmem->page_size,
3433 				  rmem->pg_arr[i], rmem->dma_arr[i]);
3434 
3435 		rmem->pg_arr[i] = NULL;
3436 	}
3437 skip_pages:
3438 	if (rmem->pg_tbl) {
3439 		size_t pg_tbl_size = rmem->nr_pages * 8;
3440 
3441 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3442 			pg_tbl_size = rmem->page_size;
3443 		dma_free_coherent(&pdev->dev, pg_tbl_size,
3444 				  rmem->pg_tbl, rmem->pg_tbl_map);
3445 		rmem->pg_tbl = NULL;
3446 	}
3447 	if (rmem->vmem_size && *rmem->vmem) {
3448 		vfree(*rmem->vmem);
3449 		*rmem->vmem = NULL;
3450 	}
3451 }
3452 
3453 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3454 {
3455 	struct pci_dev *pdev = bp->pdev;
3456 	u64 valid_bit = 0;
3457 	int i;
3458 
3459 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3460 		valid_bit = PTU_PTE_VALID;
3461 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3462 		size_t pg_tbl_size = rmem->nr_pages * 8;
3463 
3464 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3465 			pg_tbl_size = rmem->page_size;
3466 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3467 						  &rmem->pg_tbl_map,
3468 						  GFP_KERNEL);
3469 		if (!rmem->pg_tbl)
3470 			return -ENOMEM;
3471 	}
3472 
3473 	for (i = 0; i < rmem->nr_pages; i++) {
3474 		u64 extra_bits = valid_bit;
3475 
3476 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3477 						     rmem->page_size,
3478 						     &rmem->dma_arr[i],
3479 						     GFP_KERNEL);
3480 		if (!rmem->pg_arr[i])
3481 			return -ENOMEM;
3482 
3483 		if (rmem->ctx_mem)
3484 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3485 					  rmem->page_size);
3486 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
3487 			if (i == rmem->nr_pages - 2 &&
3488 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3489 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
3490 			else if (i == rmem->nr_pages - 1 &&
3491 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3492 				extra_bits |= PTU_PTE_LAST;
3493 			rmem->pg_tbl[i] =
3494 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3495 		}
3496 	}
3497 
3498 	if (rmem->vmem_size) {
3499 		*rmem->vmem = vzalloc(rmem->vmem_size);
3500 		if (!(*rmem->vmem))
3501 			return -ENOMEM;
3502 	}
3503 	return 0;
3504 }
3505 
3506 static void bnxt_free_tpa_info(struct bnxt *bp)
3507 {
3508 	int i, j;
3509 
3510 	for (i = 0; i < bp->rx_nr_rings; i++) {
3511 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3512 
3513 		kfree(rxr->rx_tpa_idx_map);
3514 		rxr->rx_tpa_idx_map = NULL;
3515 		if (rxr->rx_tpa) {
3516 			for (j = 0; j < bp->max_tpa; j++) {
3517 				kfree(rxr->rx_tpa[j].agg_arr);
3518 				rxr->rx_tpa[j].agg_arr = NULL;
3519 			}
3520 		}
3521 		kfree(rxr->rx_tpa);
3522 		rxr->rx_tpa = NULL;
3523 	}
3524 }
3525 
3526 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3527 {
3528 	int i, j;
3529 
3530 	bp->max_tpa = MAX_TPA;
3531 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3532 		if (!bp->max_tpa_v2)
3533 			return 0;
3534 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3535 	}
3536 
3537 	for (i = 0; i < bp->rx_nr_rings; i++) {
3538 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3539 		struct rx_agg_cmp *agg;
3540 
3541 		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3542 				      GFP_KERNEL);
3543 		if (!rxr->rx_tpa)
3544 			return -ENOMEM;
3545 
3546 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3547 			continue;
3548 		for (j = 0; j < bp->max_tpa; j++) {
3549 			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3550 			if (!agg)
3551 				return -ENOMEM;
3552 			rxr->rx_tpa[j].agg_arr = agg;
3553 		}
3554 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3555 					      GFP_KERNEL);
3556 		if (!rxr->rx_tpa_idx_map)
3557 			return -ENOMEM;
3558 	}
3559 	return 0;
3560 }
3561 
3562 static void bnxt_free_rx_rings(struct bnxt *bp)
3563 {
3564 	int i;
3565 
3566 	if (!bp->rx_ring)
3567 		return;
3568 
3569 	bnxt_free_tpa_info(bp);
3570 	for (i = 0; i < bp->rx_nr_rings; i++) {
3571 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3572 		struct bnxt_ring_struct *ring;
3573 
3574 		if (rxr->xdp_prog)
3575 			bpf_prog_put(rxr->xdp_prog);
3576 
3577 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3578 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3579 
3580 		page_pool_destroy(rxr->page_pool);
3581 		rxr->page_pool = NULL;
3582 
3583 		kfree(rxr->rx_agg_bmap);
3584 		rxr->rx_agg_bmap = NULL;
3585 
3586 		ring = &rxr->rx_ring_struct;
3587 		bnxt_free_ring(bp, &ring->ring_mem);
3588 
3589 		ring = &rxr->rx_agg_ring_struct;
3590 		bnxt_free_ring(bp, &ring->ring_mem);
3591 	}
3592 }
3593 
3594 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3595 				   struct bnxt_rx_ring_info *rxr,
3596 				   int numa_node)
3597 {
3598 	struct page_pool_params pp = { 0 };
3599 
3600 	pp.pool_size = bp->rx_agg_ring_size;
3601 	if (BNXT_RX_PAGE_MODE(bp))
3602 		pp.pool_size += bp->rx_ring_size;
3603 	pp.nid = numa_node;
3604 	pp.napi = &rxr->bnapi->napi;
3605 	pp.netdev = bp->dev;
3606 	pp.dev = &bp->pdev->dev;
3607 	pp.dma_dir = bp->rx_dir;
3608 	pp.max_len = PAGE_SIZE;
3609 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3610 
3611 	rxr->page_pool = page_pool_create(&pp);
3612 	if (IS_ERR(rxr->page_pool)) {
3613 		int err = PTR_ERR(rxr->page_pool);
3614 
3615 		rxr->page_pool = NULL;
3616 		return err;
3617 	}
3618 	return 0;
3619 }
3620 
3621 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3622 {
3623 	int numa_node = dev_to_node(&bp->pdev->dev);
3624 	int i, rc = 0, agg_rings = 0, cpu;
3625 
3626 	if (!bp->rx_ring)
3627 		return -ENOMEM;
3628 
3629 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3630 		agg_rings = 1;
3631 
3632 	for (i = 0; i < bp->rx_nr_rings; i++) {
3633 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3634 		struct bnxt_ring_struct *ring;
3635 		int cpu_node;
3636 
3637 		ring = &rxr->rx_ring_struct;
3638 
3639 		cpu = cpumask_local_spread(i, numa_node);
3640 		cpu_node = cpu_to_node(cpu);
3641 		netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3642 			   i, cpu_node);
3643 		rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3644 		if (rc)
3645 			return rc;
3646 
3647 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3648 		if (rc < 0)
3649 			return rc;
3650 
3651 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3652 						MEM_TYPE_PAGE_POOL,
3653 						rxr->page_pool);
3654 		if (rc) {
3655 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3656 			return rc;
3657 		}
3658 
3659 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3660 		if (rc)
3661 			return rc;
3662 
3663 		ring->grp_idx = i;
3664 		if (agg_rings) {
3665 			u16 mem_size;
3666 
3667 			ring = &rxr->rx_agg_ring_struct;
3668 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3669 			if (rc)
3670 				return rc;
3671 
3672 			ring->grp_idx = i;
3673 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3674 			mem_size = rxr->rx_agg_bmap_size / 8;
3675 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3676 			if (!rxr->rx_agg_bmap)
3677 				return -ENOMEM;
3678 		}
3679 	}
3680 	if (bp->flags & BNXT_FLAG_TPA)
3681 		rc = bnxt_alloc_tpa_info(bp);
3682 	return rc;
3683 }
3684 
3685 static void bnxt_free_tx_rings(struct bnxt *bp)
3686 {
3687 	int i;
3688 	struct pci_dev *pdev = bp->pdev;
3689 
3690 	if (!bp->tx_ring)
3691 		return;
3692 
3693 	for (i = 0; i < bp->tx_nr_rings; i++) {
3694 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3695 		struct bnxt_ring_struct *ring;
3696 
3697 		if (txr->tx_push) {
3698 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3699 					  txr->tx_push, txr->tx_push_mapping);
3700 			txr->tx_push = NULL;
3701 		}
3702 
3703 		ring = &txr->tx_ring_struct;
3704 
3705 		bnxt_free_ring(bp, &ring->ring_mem);
3706 	}
3707 }
3708 
3709 #define BNXT_TC_TO_RING_BASE(bp, tc)	\
3710 	((tc) * (bp)->tx_nr_rings_per_tc)
3711 
3712 #define BNXT_RING_TO_TC_OFF(bp, tx)	\
3713 	((tx) % (bp)->tx_nr_rings_per_tc)
3714 
3715 #define BNXT_RING_TO_TC(bp, tx)		\
3716 	((tx) / (bp)->tx_nr_rings_per_tc)
3717 
3718 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3719 {
3720 	int i, j, rc;
3721 	struct pci_dev *pdev = bp->pdev;
3722 
3723 	bp->tx_push_size = 0;
3724 	if (bp->tx_push_thresh) {
3725 		int push_size;
3726 
3727 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3728 					bp->tx_push_thresh);
3729 
3730 		if (push_size > 256) {
3731 			push_size = 0;
3732 			bp->tx_push_thresh = 0;
3733 		}
3734 
3735 		bp->tx_push_size = push_size;
3736 	}
3737 
3738 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3739 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3740 		struct bnxt_ring_struct *ring;
3741 		u8 qidx;
3742 
3743 		ring = &txr->tx_ring_struct;
3744 
3745 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3746 		if (rc)
3747 			return rc;
3748 
3749 		ring->grp_idx = txr->bnapi->index;
3750 		if (bp->tx_push_size) {
3751 			dma_addr_t mapping;
3752 
3753 			/* One pre-allocated DMA buffer to backup
3754 			 * TX push operation
3755 			 */
3756 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
3757 						bp->tx_push_size,
3758 						&txr->tx_push_mapping,
3759 						GFP_KERNEL);
3760 
3761 			if (!txr->tx_push)
3762 				return -ENOMEM;
3763 
3764 			mapping = txr->tx_push_mapping +
3765 				sizeof(struct tx_push_bd);
3766 			txr->data_mapping = cpu_to_le64(mapping);
3767 		}
3768 		qidx = bp->tc_to_qidx[j];
3769 		ring->queue_id = bp->q_info[qidx].queue_id;
3770 		spin_lock_init(&txr->xdp_tx_lock);
3771 		if (i < bp->tx_nr_rings_xdp)
3772 			continue;
3773 		if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3774 			j++;
3775 	}
3776 	return 0;
3777 }
3778 
3779 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3780 {
3781 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3782 
3783 	kfree(cpr->cp_desc_ring);
3784 	cpr->cp_desc_ring = NULL;
3785 	ring->ring_mem.pg_arr = NULL;
3786 	kfree(cpr->cp_desc_mapping);
3787 	cpr->cp_desc_mapping = NULL;
3788 	ring->ring_mem.dma_arr = NULL;
3789 }
3790 
3791 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3792 {
3793 	cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3794 	if (!cpr->cp_desc_ring)
3795 		return -ENOMEM;
3796 	cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3797 				       GFP_KERNEL);
3798 	if (!cpr->cp_desc_mapping)
3799 		return -ENOMEM;
3800 	return 0;
3801 }
3802 
3803 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3804 {
3805 	int i;
3806 
3807 	if (!bp->bnapi)
3808 		return;
3809 	for (i = 0; i < bp->cp_nr_rings; i++) {
3810 		struct bnxt_napi *bnapi = bp->bnapi[i];
3811 
3812 		if (!bnapi)
3813 			continue;
3814 		bnxt_free_cp_arrays(&bnapi->cp_ring);
3815 	}
3816 }
3817 
3818 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3819 {
3820 	int i, n = bp->cp_nr_pages;
3821 
3822 	for (i = 0; i < bp->cp_nr_rings; i++) {
3823 		struct bnxt_napi *bnapi = bp->bnapi[i];
3824 		int rc;
3825 
3826 		if (!bnapi)
3827 			continue;
3828 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3829 		if (rc)
3830 			return rc;
3831 	}
3832 	return 0;
3833 }
3834 
3835 static void bnxt_free_cp_rings(struct bnxt *bp)
3836 {
3837 	int i;
3838 
3839 	if (!bp->bnapi)
3840 		return;
3841 
3842 	for (i = 0; i < bp->cp_nr_rings; i++) {
3843 		struct bnxt_napi *bnapi = bp->bnapi[i];
3844 		struct bnxt_cp_ring_info *cpr;
3845 		struct bnxt_ring_struct *ring;
3846 		int j;
3847 
3848 		if (!bnapi)
3849 			continue;
3850 
3851 		cpr = &bnapi->cp_ring;
3852 		ring = &cpr->cp_ring_struct;
3853 
3854 		bnxt_free_ring(bp, &ring->ring_mem);
3855 
3856 		if (!cpr->cp_ring_arr)
3857 			continue;
3858 
3859 		for (j = 0; j < cpr->cp_ring_count; j++) {
3860 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3861 
3862 			ring = &cpr2->cp_ring_struct;
3863 			bnxt_free_ring(bp, &ring->ring_mem);
3864 			bnxt_free_cp_arrays(cpr2);
3865 		}
3866 		kfree(cpr->cp_ring_arr);
3867 		cpr->cp_ring_arr = NULL;
3868 		cpr->cp_ring_count = 0;
3869 	}
3870 }
3871 
3872 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3873 				  struct bnxt_cp_ring_info *cpr)
3874 {
3875 	struct bnxt_ring_mem_info *rmem;
3876 	struct bnxt_ring_struct *ring;
3877 	int rc;
3878 
3879 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3880 	if (rc) {
3881 		bnxt_free_cp_arrays(cpr);
3882 		return -ENOMEM;
3883 	}
3884 	ring = &cpr->cp_ring_struct;
3885 	rmem = &ring->ring_mem;
3886 	rmem->nr_pages = bp->cp_nr_pages;
3887 	rmem->page_size = HW_CMPD_RING_SIZE;
3888 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
3889 	rmem->dma_arr = cpr->cp_desc_mapping;
3890 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3891 	rc = bnxt_alloc_ring(bp, rmem);
3892 	if (rc) {
3893 		bnxt_free_ring(bp, rmem);
3894 		bnxt_free_cp_arrays(cpr);
3895 	}
3896 	return rc;
3897 }
3898 
3899 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3900 {
3901 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3902 	int i, j, rc, ulp_msix;
3903 	int tcs = bp->num_tc;
3904 
3905 	if (!tcs)
3906 		tcs = 1;
3907 	ulp_msix = bnxt_get_ulp_msix_num(bp);
3908 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3909 		struct bnxt_napi *bnapi = bp->bnapi[i];
3910 		struct bnxt_cp_ring_info *cpr, *cpr2;
3911 		struct bnxt_ring_struct *ring;
3912 		int cp_count = 0, k;
3913 		int rx = 0, tx = 0;
3914 
3915 		if (!bnapi)
3916 			continue;
3917 
3918 		cpr = &bnapi->cp_ring;
3919 		cpr->bnapi = bnapi;
3920 		ring = &cpr->cp_ring_struct;
3921 
3922 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3923 		if (rc)
3924 			return rc;
3925 
3926 		ring->map_idx = ulp_msix + i;
3927 
3928 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3929 			continue;
3930 
3931 		if (i < bp->rx_nr_rings) {
3932 			cp_count++;
3933 			rx = 1;
3934 		}
3935 		if (i < bp->tx_nr_rings_xdp) {
3936 			cp_count++;
3937 			tx = 1;
3938 		} else if ((sh && i < bp->tx_nr_rings) ||
3939 			 (!sh && i >= bp->rx_nr_rings)) {
3940 			cp_count += tcs;
3941 			tx = 1;
3942 		}
3943 
3944 		cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3945 					   GFP_KERNEL);
3946 		if (!cpr->cp_ring_arr)
3947 			return -ENOMEM;
3948 		cpr->cp_ring_count = cp_count;
3949 
3950 		for (k = 0; k < cp_count; k++) {
3951 			cpr2 = &cpr->cp_ring_arr[k];
3952 			rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3953 			if (rc)
3954 				return rc;
3955 			cpr2->bnapi = bnapi;
3956 			cpr2->sw_stats = cpr->sw_stats;
3957 			cpr2->cp_idx = k;
3958 			if (!k && rx) {
3959 				bp->rx_ring[i].rx_cpr = cpr2;
3960 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
3961 			} else {
3962 				int n, tc = k - rx;
3963 
3964 				n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3965 				bp->tx_ring[n].tx_cpr = cpr2;
3966 				cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
3967 			}
3968 		}
3969 		if (tx)
3970 			j++;
3971 	}
3972 	return 0;
3973 }
3974 
3975 static void bnxt_init_ring_struct(struct bnxt *bp)
3976 {
3977 	int i, j;
3978 
3979 	for (i = 0; i < bp->cp_nr_rings; i++) {
3980 		struct bnxt_napi *bnapi = bp->bnapi[i];
3981 		struct bnxt_ring_mem_info *rmem;
3982 		struct bnxt_cp_ring_info *cpr;
3983 		struct bnxt_rx_ring_info *rxr;
3984 		struct bnxt_tx_ring_info *txr;
3985 		struct bnxt_ring_struct *ring;
3986 
3987 		if (!bnapi)
3988 			continue;
3989 
3990 		cpr = &bnapi->cp_ring;
3991 		ring = &cpr->cp_ring_struct;
3992 		rmem = &ring->ring_mem;
3993 		rmem->nr_pages = bp->cp_nr_pages;
3994 		rmem->page_size = HW_CMPD_RING_SIZE;
3995 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
3996 		rmem->dma_arr = cpr->cp_desc_mapping;
3997 		rmem->vmem_size = 0;
3998 
3999 		rxr = bnapi->rx_ring;
4000 		if (!rxr)
4001 			goto skip_rx;
4002 
4003 		ring = &rxr->rx_ring_struct;
4004 		rmem = &ring->ring_mem;
4005 		rmem->nr_pages = bp->rx_nr_pages;
4006 		rmem->page_size = HW_RXBD_RING_SIZE;
4007 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
4008 		rmem->dma_arr = rxr->rx_desc_mapping;
4009 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4010 		rmem->vmem = (void **)&rxr->rx_buf_ring;
4011 
4012 		ring = &rxr->rx_agg_ring_struct;
4013 		rmem = &ring->ring_mem;
4014 		rmem->nr_pages = bp->rx_agg_nr_pages;
4015 		rmem->page_size = HW_RXBD_RING_SIZE;
4016 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4017 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
4018 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4019 		rmem->vmem = (void **)&rxr->rx_agg_ring;
4020 
4021 skip_rx:
4022 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4023 			ring = &txr->tx_ring_struct;
4024 			rmem = &ring->ring_mem;
4025 			rmem->nr_pages = bp->tx_nr_pages;
4026 			rmem->page_size = HW_TXBD_RING_SIZE;
4027 			rmem->pg_arr = (void **)txr->tx_desc_ring;
4028 			rmem->dma_arr = txr->tx_desc_mapping;
4029 			rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4030 			rmem->vmem = (void **)&txr->tx_buf_ring;
4031 		}
4032 	}
4033 }
4034 
4035 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4036 {
4037 	int i;
4038 	u32 prod;
4039 	struct rx_bd **rx_buf_ring;
4040 
4041 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4042 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4043 		int j;
4044 		struct rx_bd *rxbd;
4045 
4046 		rxbd = rx_buf_ring[i];
4047 		if (!rxbd)
4048 			continue;
4049 
4050 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4051 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4052 			rxbd->rx_bd_opaque = prod;
4053 		}
4054 	}
4055 }
4056 
4057 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4058 {
4059 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4060 	struct net_device *dev = bp->dev;
4061 	u32 prod;
4062 	int i;
4063 
4064 	prod = rxr->rx_prod;
4065 	for (i = 0; i < bp->rx_ring_size; i++) {
4066 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4067 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
4068 				    ring_nr, i, bp->rx_ring_size);
4069 			break;
4070 		}
4071 		prod = NEXT_RX(prod);
4072 	}
4073 	rxr->rx_prod = prod;
4074 
4075 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4076 		return 0;
4077 
4078 	prod = rxr->rx_agg_prod;
4079 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
4080 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4081 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
4082 				    ring_nr, i, bp->rx_ring_size);
4083 			break;
4084 		}
4085 		prod = NEXT_RX_AGG(prod);
4086 	}
4087 	rxr->rx_agg_prod = prod;
4088 
4089 	if (rxr->rx_tpa) {
4090 		dma_addr_t mapping;
4091 		u8 *data;
4092 
4093 		for (i = 0; i < bp->max_tpa; i++) {
4094 			data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4095 			if (!data)
4096 				return -ENOMEM;
4097 
4098 			rxr->rx_tpa[i].data = data;
4099 			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4100 			rxr->rx_tpa[i].mapping = mapping;
4101 		}
4102 	}
4103 	return 0;
4104 }
4105 
4106 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4107 {
4108 	struct bnxt_rx_ring_info *rxr;
4109 	struct bnxt_ring_struct *ring;
4110 	u32 type;
4111 
4112 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4113 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4114 
4115 	if (NET_IP_ALIGN == 2)
4116 		type |= RX_BD_FLAGS_SOP;
4117 
4118 	rxr = &bp->rx_ring[ring_nr];
4119 	ring = &rxr->rx_ring_struct;
4120 	bnxt_init_rxbd_pages(ring, type);
4121 
4122 	netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4123 			     &rxr->bnapi->napi);
4124 
4125 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4126 		bpf_prog_add(bp->xdp_prog, 1);
4127 		rxr->xdp_prog = bp->xdp_prog;
4128 	}
4129 	ring->fw_ring_id = INVALID_HW_RING_ID;
4130 
4131 	ring = &rxr->rx_agg_ring_struct;
4132 	ring->fw_ring_id = INVALID_HW_RING_ID;
4133 
4134 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4135 		type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4136 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4137 
4138 		bnxt_init_rxbd_pages(ring, type);
4139 	}
4140 
4141 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
4142 }
4143 
4144 static void bnxt_init_cp_rings(struct bnxt *bp)
4145 {
4146 	int i, j;
4147 
4148 	for (i = 0; i < bp->cp_nr_rings; i++) {
4149 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4150 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4151 
4152 		ring->fw_ring_id = INVALID_HW_RING_ID;
4153 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4154 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4155 		if (!cpr->cp_ring_arr)
4156 			continue;
4157 		for (j = 0; j < cpr->cp_ring_count; j++) {
4158 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4159 
4160 			ring = &cpr2->cp_ring_struct;
4161 			ring->fw_ring_id = INVALID_HW_RING_ID;
4162 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4163 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4164 		}
4165 	}
4166 }
4167 
4168 static int bnxt_init_rx_rings(struct bnxt *bp)
4169 {
4170 	int i, rc = 0;
4171 
4172 	if (BNXT_RX_PAGE_MODE(bp)) {
4173 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4174 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4175 	} else {
4176 		bp->rx_offset = BNXT_RX_OFFSET;
4177 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4178 	}
4179 
4180 	for (i = 0; i < bp->rx_nr_rings; i++) {
4181 		rc = bnxt_init_one_rx_ring(bp, i);
4182 		if (rc)
4183 			break;
4184 	}
4185 
4186 	return rc;
4187 }
4188 
4189 static int bnxt_init_tx_rings(struct bnxt *bp)
4190 {
4191 	u16 i;
4192 
4193 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4194 				   BNXT_MIN_TX_DESC_CNT);
4195 
4196 	for (i = 0; i < bp->tx_nr_rings; i++) {
4197 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4198 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4199 
4200 		ring->fw_ring_id = INVALID_HW_RING_ID;
4201 
4202 		if (i >= bp->tx_nr_rings_xdp)
4203 			netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4204 					     NETDEV_QUEUE_TYPE_TX,
4205 					     &txr->bnapi->napi);
4206 	}
4207 
4208 	return 0;
4209 }
4210 
4211 static void bnxt_free_ring_grps(struct bnxt *bp)
4212 {
4213 	kfree(bp->grp_info);
4214 	bp->grp_info = NULL;
4215 }
4216 
4217 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4218 {
4219 	int i;
4220 
4221 	if (irq_re_init) {
4222 		bp->grp_info = kcalloc(bp->cp_nr_rings,
4223 				       sizeof(struct bnxt_ring_grp_info),
4224 				       GFP_KERNEL);
4225 		if (!bp->grp_info)
4226 			return -ENOMEM;
4227 	}
4228 	for (i = 0; i < bp->cp_nr_rings; i++) {
4229 		if (irq_re_init)
4230 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4231 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4232 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4233 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4234 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4235 	}
4236 	return 0;
4237 }
4238 
4239 static void bnxt_free_vnics(struct bnxt *bp)
4240 {
4241 	kfree(bp->vnic_info);
4242 	bp->vnic_info = NULL;
4243 	bp->nr_vnics = 0;
4244 }
4245 
4246 static int bnxt_alloc_vnics(struct bnxt *bp)
4247 {
4248 	int num_vnics = 1;
4249 
4250 #ifdef CONFIG_RFS_ACCEL
4251 	if (bp->flags & BNXT_FLAG_RFS) {
4252 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4253 			num_vnics++;
4254 		else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4255 			num_vnics += bp->rx_nr_rings;
4256 	}
4257 #endif
4258 
4259 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4260 		num_vnics++;
4261 
4262 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4263 				GFP_KERNEL);
4264 	if (!bp->vnic_info)
4265 		return -ENOMEM;
4266 
4267 	bp->nr_vnics = num_vnics;
4268 	return 0;
4269 }
4270 
4271 static void bnxt_init_vnics(struct bnxt *bp)
4272 {
4273 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4274 	int i;
4275 
4276 	for (i = 0; i < bp->nr_vnics; i++) {
4277 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4278 		int j;
4279 
4280 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
4281 		vnic->vnic_id = i;
4282 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4283 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4284 
4285 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4286 
4287 		if (bp->vnic_info[i].rss_hash_key) {
4288 			if (i == BNXT_VNIC_DEFAULT) {
4289 				u8 *key = (void *)vnic->rss_hash_key;
4290 				int k;
4291 
4292 				if (!bp->rss_hash_key_valid &&
4293 				    !bp->rss_hash_key_updated) {
4294 					get_random_bytes(bp->rss_hash_key,
4295 							 HW_HASH_KEY_SIZE);
4296 					bp->rss_hash_key_updated = true;
4297 				}
4298 
4299 				memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4300 				       HW_HASH_KEY_SIZE);
4301 
4302 				if (!bp->rss_hash_key_updated)
4303 					continue;
4304 
4305 				bp->rss_hash_key_updated = false;
4306 				bp->rss_hash_key_valid = true;
4307 
4308 				bp->toeplitz_prefix = 0;
4309 				for (k = 0; k < 8; k++) {
4310 					bp->toeplitz_prefix <<= 8;
4311 					bp->toeplitz_prefix |= key[k];
4312 				}
4313 			} else {
4314 				memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4315 				       HW_HASH_KEY_SIZE);
4316 			}
4317 		}
4318 	}
4319 }
4320 
4321 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4322 {
4323 	int pages;
4324 
4325 	pages = ring_size / desc_per_pg;
4326 
4327 	if (!pages)
4328 		return 1;
4329 
4330 	pages++;
4331 
4332 	while (pages & (pages - 1))
4333 		pages++;
4334 
4335 	return pages;
4336 }
4337 
4338 void bnxt_set_tpa_flags(struct bnxt *bp)
4339 {
4340 	bp->flags &= ~BNXT_FLAG_TPA;
4341 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4342 		return;
4343 	if (bp->dev->features & NETIF_F_LRO)
4344 		bp->flags |= BNXT_FLAG_LRO;
4345 	else if (bp->dev->features & NETIF_F_GRO_HW)
4346 		bp->flags |= BNXT_FLAG_GRO;
4347 }
4348 
4349 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4350  * be set on entry.
4351  */
4352 void bnxt_set_ring_params(struct bnxt *bp)
4353 {
4354 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4355 	u32 agg_factor = 0, agg_ring_size = 0;
4356 
4357 	/* 8 for CRC and VLAN */
4358 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4359 
4360 	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4361 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4362 
4363 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4364 	ring_size = bp->rx_ring_size;
4365 	bp->rx_agg_ring_size = 0;
4366 	bp->rx_agg_nr_pages = 0;
4367 
4368 	if (bp->flags & BNXT_FLAG_TPA)
4369 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4370 
4371 	bp->flags &= ~BNXT_FLAG_JUMBO;
4372 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4373 		u32 jumbo_factor;
4374 
4375 		bp->flags |= BNXT_FLAG_JUMBO;
4376 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4377 		if (jumbo_factor > agg_factor)
4378 			agg_factor = jumbo_factor;
4379 	}
4380 	if (agg_factor) {
4381 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4382 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4383 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4384 				    bp->rx_ring_size, ring_size);
4385 			bp->rx_ring_size = ring_size;
4386 		}
4387 		agg_ring_size = ring_size * agg_factor;
4388 
4389 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4390 							RX_DESC_CNT);
4391 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4392 			u32 tmp = agg_ring_size;
4393 
4394 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4395 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4396 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4397 				    tmp, agg_ring_size);
4398 		}
4399 		bp->rx_agg_ring_size = agg_ring_size;
4400 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4401 
4402 		if (BNXT_RX_PAGE_MODE(bp)) {
4403 			rx_space = PAGE_SIZE;
4404 			rx_size = PAGE_SIZE -
4405 				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4406 				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4407 		} else {
4408 			rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
4409 			rx_space = rx_size + NET_SKB_PAD +
4410 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4411 		}
4412 	}
4413 
4414 	bp->rx_buf_use_size = rx_size;
4415 	bp->rx_buf_size = rx_space;
4416 
4417 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4418 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4419 
4420 	ring_size = bp->tx_ring_size;
4421 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4422 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4423 
4424 	max_rx_cmpl = bp->rx_ring_size;
4425 	/* MAX TPA needs to be added because TPA_START completions are
4426 	 * immediately recycled, so the TPA completions are not bound by
4427 	 * the RX ring size.
4428 	 */
4429 	if (bp->flags & BNXT_FLAG_TPA)
4430 		max_rx_cmpl += bp->max_tpa;
4431 	/* RX and TPA completions are 32-byte, all others are 16-byte */
4432 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4433 	bp->cp_ring_size = ring_size;
4434 
4435 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4436 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
4437 		bp->cp_nr_pages = MAX_CP_PAGES;
4438 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4439 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4440 			    ring_size, bp->cp_ring_size);
4441 	}
4442 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4443 	bp->cp_ring_mask = bp->cp_bit - 1;
4444 }
4445 
4446 /* Changing allocation mode of RX rings.
4447  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4448  */
4449 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4450 {
4451 	struct net_device *dev = bp->dev;
4452 
4453 	if (page_mode) {
4454 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4455 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4456 
4457 		if (bp->xdp_prog->aux->xdp_has_frags)
4458 			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4459 		else
4460 			dev->max_mtu =
4461 				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4462 		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4463 			bp->flags |= BNXT_FLAG_JUMBO;
4464 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
4465 		} else {
4466 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4467 			bp->rx_skb_func = bnxt_rx_page_skb;
4468 		}
4469 		bp->rx_dir = DMA_BIDIRECTIONAL;
4470 		/* Disable LRO or GRO_HW */
4471 		netdev_update_features(dev);
4472 	} else {
4473 		dev->max_mtu = bp->max_mtu;
4474 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4475 		bp->rx_dir = DMA_FROM_DEVICE;
4476 		bp->rx_skb_func = bnxt_rx_skb;
4477 	}
4478 	return 0;
4479 }
4480 
4481 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4482 {
4483 	int i;
4484 	struct bnxt_vnic_info *vnic;
4485 	struct pci_dev *pdev = bp->pdev;
4486 
4487 	if (!bp->vnic_info)
4488 		return;
4489 
4490 	for (i = 0; i < bp->nr_vnics; i++) {
4491 		vnic = &bp->vnic_info[i];
4492 
4493 		kfree(vnic->fw_grp_ids);
4494 		vnic->fw_grp_ids = NULL;
4495 
4496 		kfree(vnic->uc_list);
4497 		vnic->uc_list = NULL;
4498 
4499 		if (vnic->mc_list) {
4500 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4501 					  vnic->mc_list, vnic->mc_list_mapping);
4502 			vnic->mc_list = NULL;
4503 		}
4504 
4505 		if (vnic->rss_table) {
4506 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4507 					  vnic->rss_table,
4508 					  vnic->rss_table_dma_addr);
4509 			vnic->rss_table = NULL;
4510 		}
4511 
4512 		vnic->rss_hash_key = NULL;
4513 		vnic->flags = 0;
4514 	}
4515 }
4516 
4517 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4518 {
4519 	int i, rc = 0, size;
4520 	struct bnxt_vnic_info *vnic;
4521 	struct pci_dev *pdev = bp->pdev;
4522 	int max_rings;
4523 
4524 	for (i = 0; i < bp->nr_vnics; i++) {
4525 		vnic = &bp->vnic_info[i];
4526 
4527 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4528 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4529 
4530 			if (mem_size > 0) {
4531 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4532 				if (!vnic->uc_list) {
4533 					rc = -ENOMEM;
4534 					goto out;
4535 				}
4536 			}
4537 		}
4538 
4539 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4540 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4541 			vnic->mc_list =
4542 				dma_alloc_coherent(&pdev->dev,
4543 						   vnic->mc_list_size,
4544 						   &vnic->mc_list_mapping,
4545 						   GFP_KERNEL);
4546 			if (!vnic->mc_list) {
4547 				rc = -ENOMEM;
4548 				goto out;
4549 			}
4550 		}
4551 
4552 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4553 			goto vnic_skip_grps;
4554 
4555 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4556 			max_rings = bp->rx_nr_rings;
4557 		else
4558 			max_rings = 1;
4559 
4560 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4561 		if (!vnic->fw_grp_ids) {
4562 			rc = -ENOMEM;
4563 			goto out;
4564 		}
4565 vnic_skip_grps:
4566 		if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4567 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4568 			continue;
4569 
4570 		/* Allocate rss table and hash key */
4571 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4572 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4573 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4574 
4575 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4576 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4577 						     vnic->rss_table_size,
4578 						     &vnic->rss_table_dma_addr,
4579 						     GFP_KERNEL);
4580 		if (!vnic->rss_table) {
4581 			rc = -ENOMEM;
4582 			goto out;
4583 		}
4584 
4585 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4586 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4587 	}
4588 	return 0;
4589 
4590 out:
4591 	return rc;
4592 }
4593 
4594 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4595 {
4596 	struct bnxt_hwrm_wait_token *token;
4597 
4598 	dma_pool_destroy(bp->hwrm_dma_pool);
4599 	bp->hwrm_dma_pool = NULL;
4600 
4601 	rcu_read_lock();
4602 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4603 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4604 	rcu_read_unlock();
4605 }
4606 
4607 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4608 {
4609 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4610 					    BNXT_HWRM_DMA_SIZE,
4611 					    BNXT_HWRM_DMA_ALIGN, 0);
4612 	if (!bp->hwrm_dma_pool)
4613 		return -ENOMEM;
4614 
4615 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4616 
4617 	return 0;
4618 }
4619 
4620 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4621 {
4622 	kfree(stats->hw_masks);
4623 	stats->hw_masks = NULL;
4624 	kfree(stats->sw_stats);
4625 	stats->sw_stats = NULL;
4626 	if (stats->hw_stats) {
4627 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4628 				  stats->hw_stats_map);
4629 		stats->hw_stats = NULL;
4630 	}
4631 }
4632 
4633 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4634 				bool alloc_masks)
4635 {
4636 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4637 					     &stats->hw_stats_map, GFP_KERNEL);
4638 	if (!stats->hw_stats)
4639 		return -ENOMEM;
4640 
4641 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4642 	if (!stats->sw_stats)
4643 		goto stats_mem_err;
4644 
4645 	if (alloc_masks) {
4646 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4647 		if (!stats->hw_masks)
4648 			goto stats_mem_err;
4649 	}
4650 	return 0;
4651 
4652 stats_mem_err:
4653 	bnxt_free_stats_mem(bp, stats);
4654 	return -ENOMEM;
4655 }
4656 
4657 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4658 {
4659 	int i;
4660 
4661 	for (i = 0; i < count; i++)
4662 		mask_arr[i] = mask;
4663 }
4664 
4665 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4666 {
4667 	int i;
4668 
4669 	for (i = 0; i < count; i++)
4670 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4671 }
4672 
4673 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4674 				    struct bnxt_stats_mem *stats)
4675 {
4676 	struct hwrm_func_qstats_ext_output *resp;
4677 	struct hwrm_func_qstats_ext_input *req;
4678 	__le64 *hw_masks;
4679 	int rc;
4680 
4681 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4682 	    !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4683 		return -EOPNOTSUPP;
4684 
4685 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4686 	if (rc)
4687 		return rc;
4688 
4689 	req->fid = cpu_to_le16(0xffff);
4690 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4691 
4692 	resp = hwrm_req_hold(bp, req);
4693 	rc = hwrm_req_send(bp, req);
4694 	if (!rc) {
4695 		hw_masks = &resp->rx_ucast_pkts;
4696 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4697 	}
4698 	hwrm_req_drop(bp, req);
4699 	return rc;
4700 }
4701 
4702 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4703 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4704 
4705 static void bnxt_init_stats(struct bnxt *bp)
4706 {
4707 	struct bnxt_napi *bnapi = bp->bnapi[0];
4708 	struct bnxt_cp_ring_info *cpr;
4709 	struct bnxt_stats_mem *stats;
4710 	__le64 *rx_stats, *tx_stats;
4711 	int rc, rx_count, tx_count;
4712 	u64 *rx_masks, *tx_masks;
4713 	u64 mask;
4714 	u8 flags;
4715 
4716 	cpr = &bnapi->cp_ring;
4717 	stats = &cpr->stats;
4718 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4719 	if (rc) {
4720 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4721 			mask = (1ULL << 48) - 1;
4722 		else
4723 			mask = -1ULL;
4724 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4725 	}
4726 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
4727 		stats = &bp->port_stats;
4728 		rx_stats = stats->hw_stats;
4729 		rx_masks = stats->hw_masks;
4730 		rx_count = sizeof(struct rx_port_stats) / 8;
4731 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4732 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4733 		tx_count = sizeof(struct tx_port_stats) / 8;
4734 
4735 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4736 		rc = bnxt_hwrm_port_qstats(bp, flags);
4737 		if (rc) {
4738 			mask = (1ULL << 40) - 1;
4739 
4740 			bnxt_fill_masks(rx_masks, mask, rx_count);
4741 			bnxt_fill_masks(tx_masks, mask, tx_count);
4742 		} else {
4743 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4744 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4745 			bnxt_hwrm_port_qstats(bp, 0);
4746 		}
4747 	}
4748 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4749 		stats = &bp->rx_port_stats_ext;
4750 		rx_stats = stats->hw_stats;
4751 		rx_masks = stats->hw_masks;
4752 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
4753 		stats = &bp->tx_port_stats_ext;
4754 		tx_stats = stats->hw_stats;
4755 		tx_masks = stats->hw_masks;
4756 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
4757 
4758 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4759 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4760 		if (rc) {
4761 			mask = (1ULL << 40) - 1;
4762 
4763 			bnxt_fill_masks(rx_masks, mask, rx_count);
4764 			if (tx_stats)
4765 				bnxt_fill_masks(tx_masks, mask, tx_count);
4766 		} else {
4767 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4768 			if (tx_stats)
4769 				bnxt_copy_hw_masks(tx_masks, tx_stats,
4770 						   tx_count);
4771 			bnxt_hwrm_port_qstats_ext(bp, 0);
4772 		}
4773 	}
4774 }
4775 
4776 static void bnxt_free_port_stats(struct bnxt *bp)
4777 {
4778 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
4779 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4780 
4781 	bnxt_free_stats_mem(bp, &bp->port_stats);
4782 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4783 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4784 }
4785 
4786 static void bnxt_free_ring_stats(struct bnxt *bp)
4787 {
4788 	int i;
4789 
4790 	if (!bp->bnapi)
4791 		return;
4792 
4793 	for (i = 0; i < bp->cp_nr_rings; i++) {
4794 		struct bnxt_napi *bnapi = bp->bnapi[i];
4795 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4796 
4797 		bnxt_free_stats_mem(bp, &cpr->stats);
4798 
4799 		kfree(cpr->sw_stats);
4800 		cpr->sw_stats = NULL;
4801 	}
4802 }
4803 
4804 static int bnxt_alloc_stats(struct bnxt *bp)
4805 {
4806 	u32 size, i;
4807 	int rc;
4808 
4809 	size = bp->hw_ring_stats_size;
4810 
4811 	for (i = 0; i < bp->cp_nr_rings; i++) {
4812 		struct bnxt_napi *bnapi = bp->bnapi[i];
4813 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4814 
4815 		cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
4816 		if (!cpr->sw_stats)
4817 			return -ENOMEM;
4818 
4819 		cpr->stats.len = size;
4820 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4821 		if (rc)
4822 			return rc;
4823 
4824 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4825 	}
4826 
4827 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4828 		return 0;
4829 
4830 	if (bp->port_stats.hw_stats)
4831 		goto alloc_ext_stats;
4832 
4833 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4834 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4835 	if (rc)
4836 		return rc;
4837 
4838 	bp->flags |= BNXT_FLAG_PORT_STATS;
4839 
4840 alloc_ext_stats:
4841 	/* Display extended statistics only if FW supports it */
4842 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4843 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4844 			return 0;
4845 
4846 	if (bp->rx_port_stats_ext.hw_stats)
4847 		goto alloc_tx_ext_stats;
4848 
4849 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4850 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4851 	/* Extended stats are optional */
4852 	if (rc)
4853 		return 0;
4854 
4855 alloc_tx_ext_stats:
4856 	if (bp->tx_port_stats_ext.hw_stats)
4857 		return 0;
4858 
4859 	if (bp->hwrm_spec_code >= 0x10902 ||
4860 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4861 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4862 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4863 		/* Extended stats are optional */
4864 		if (rc)
4865 			return 0;
4866 	}
4867 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4868 	return 0;
4869 }
4870 
4871 static void bnxt_clear_ring_indices(struct bnxt *bp)
4872 {
4873 	int i, j;
4874 
4875 	if (!bp->bnapi)
4876 		return;
4877 
4878 	for (i = 0; i < bp->cp_nr_rings; i++) {
4879 		struct bnxt_napi *bnapi = bp->bnapi[i];
4880 		struct bnxt_cp_ring_info *cpr;
4881 		struct bnxt_rx_ring_info *rxr;
4882 		struct bnxt_tx_ring_info *txr;
4883 
4884 		if (!bnapi)
4885 			continue;
4886 
4887 		cpr = &bnapi->cp_ring;
4888 		cpr->cp_raw_cons = 0;
4889 
4890 		bnxt_for_each_napi_tx(j, bnapi, txr) {
4891 			txr->tx_prod = 0;
4892 			txr->tx_cons = 0;
4893 			txr->tx_hw_cons = 0;
4894 		}
4895 
4896 		rxr = bnapi->rx_ring;
4897 		if (rxr) {
4898 			rxr->rx_prod = 0;
4899 			rxr->rx_agg_prod = 0;
4900 			rxr->rx_sw_agg_prod = 0;
4901 			rxr->rx_next_cons = 0;
4902 		}
4903 		bnapi->events = 0;
4904 	}
4905 }
4906 
4907 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4908 {
4909 	u8 type = fltr->type, flags = fltr->flags;
4910 
4911 	INIT_LIST_HEAD(&fltr->list);
4912 	if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
4913 	    (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
4914 		list_add_tail(&fltr->list, &bp->usr_fltr_list);
4915 }
4916 
4917 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4918 {
4919 	if (!list_empty(&fltr->list))
4920 		list_del_init(&fltr->list);
4921 }
4922 
4923 void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
4924 {
4925 	struct bnxt_filter_base *usr_fltr, *tmp;
4926 
4927 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
4928 		if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
4929 			continue;
4930 		bnxt_del_one_usr_fltr(bp, usr_fltr);
4931 	}
4932 }
4933 
4934 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4935 {
4936 	hlist_del(&fltr->hash);
4937 	bnxt_del_one_usr_fltr(bp, fltr);
4938 	if (fltr->flags) {
4939 		clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
4940 		bp->ntp_fltr_count--;
4941 	}
4942 	kfree(fltr);
4943 }
4944 
4945 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
4946 {
4947 	int i;
4948 
4949 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
4950 	 * safe to delete the hash table.
4951 	 */
4952 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4953 		struct hlist_head *head;
4954 		struct hlist_node *tmp;
4955 		struct bnxt_ntuple_filter *fltr;
4956 
4957 		head = &bp->ntp_fltr_hash_tbl[i];
4958 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4959 			bnxt_del_l2_filter(bp, fltr->l2_fltr);
4960 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
4961 				     !list_empty(&fltr->base.list)))
4962 				continue;
4963 			bnxt_del_fltr(bp, &fltr->base);
4964 		}
4965 	}
4966 	if (!all)
4967 		return;
4968 
4969 	bitmap_free(bp->ntp_fltr_bmap);
4970 	bp->ntp_fltr_bmap = NULL;
4971 	bp->ntp_fltr_count = 0;
4972 }
4973 
4974 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4975 {
4976 	int i, rc = 0;
4977 
4978 	if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
4979 		return 0;
4980 
4981 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4982 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4983 
4984 	bp->ntp_fltr_count = 0;
4985 	bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
4986 
4987 	if (!bp->ntp_fltr_bmap)
4988 		rc = -ENOMEM;
4989 
4990 	return rc;
4991 }
4992 
4993 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
4994 {
4995 	int i;
4996 
4997 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
4998 		struct hlist_head *head;
4999 		struct hlist_node *tmp;
5000 		struct bnxt_l2_filter *fltr;
5001 
5002 		head = &bp->l2_fltr_hash_tbl[i];
5003 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5004 			if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5005 				     !list_empty(&fltr->base.list)))
5006 				continue;
5007 			bnxt_del_fltr(bp, &fltr->base);
5008 		}
5009 	}
5010 }
5011 
5012 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5013 {
5014 	int i;
5015 
5016 	for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5017 		INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5018 	get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5019 }
5020 
5021 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5022 {
5023 	bnxt_free_vnic_attributes(bp);
5024 	bnxt_free_tx_rings(bp);
5025 	bnxt_free_rx_rings(bp);
5026 	bnxt_free_cp_rings(bp);
5027 	bnxt_free_all_cp_arrays(bp);
5028 	bnxt_free_ntp_fltrs(bp, false);
5029 	bnxt_free_l2_filters(bp, false);
5030 	if (irq_re_init) {
5031 		bnxt_free_ring_stats(bp);
5032 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5033 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5034 			bnxt_free_port_stats(bp);
5035 		bnxt_free_ring_grps(bp);
5036 		bnxt_free_vnics(bp);
5037 		kfree(bp->tx_ring_map);
5038 		bp->tx_ring_map = NULL;
5039 		kfree(bp->tx_ring);
5040 		bp->tx_ring = NULL;
5041 		kfree(bp->rx_ring);
5042 		bp->rx_ring = NULL;
5043 		kfree(bp->bnapi);
5044 		bp->bnapi = NULL;
5045 	} else {
5046 		bnxt_clear_ring_indices(bp);
5047 	}
5048 }
5049 
5050 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5051 {
5052 	int i, j, rc, size, arr_size;
5053 	void *bnapi;
5054 
5055 	if (irq_re_init) {
5056 		/* Allocate bnapi mem pointer array and mem block for
5057 		 * all queues
5058 		 */
5059 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5060 				bp->cp_nr_rings);
5061 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5062 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5063 		if (!bnapi)
5064 			return -ENOMEM;
5065 
5066 		bp->bnapi = bnapi;
5067 		bnapi += arr_size;
5068 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5069 			bp->bnapi[i] = bnapi;
5070 			bp->bnapi[i]->index = i;
5071 			bp->bnapi[i]->bp = bp;
5072 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5073 				struct bnxt_cp_ring_info *cpr =
5074 					&bp->bnapi[i]->cp_ring;
5075 
5076 				cpr->cp_ring_struct.ring_mem.flags =
5077 					BNXT_RMEM_RING_PTE_FLAG;
5078 			}
5079 		}
5080 
5081 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
5082 				      sizeof(struct bnxt_rx_ring_info),
5083 				      GFP_KERNEL);
5084 		if (!bp->rx_ring)
5085 			return -ENOMEM;
5086 
5087 		for (i = 0; i < bp->rx_nr_rings; i++) {
5088 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5089 
5090 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5091 				rxr->rx_ring_struct.ring_mem.flags =
5092 					BNXT_RMEM_RING_PTE_FLAG;
5093 				rxr->rx_agg_ring_struct.ring_mem.flags =
5094 					BNXT_RMEM_RING_PTE_FLAG;
5095 			} else {
5096 				rxr->rx_cpr =  &bp->bnapi[i]->cp_ring;
5097 			}
5098 			rxr->bnapi = bp->bnapi[i];
5099 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5100 		}
5101 
5102 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
5103 				      sizeof(struct bnxt_tx_ring_info),
5104 				      GFP_KERNEL);
5105 		if (!bp->tx_ring)
5106 			return -ENOMEM;
5107 
5108 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5109 					  GFP_KERNEL);
5110 
5111 		if (!bp->tx_ring_map)
5112 			return -ENOMEM;
5113 
5114 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5115 			j = 0;
5116 		else
5117 			j = bp->rx_nr_rings;
5118 
5119 		for (i = 0; i < bp->tx_nr_rings; i++) {
5120 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5121 			struct bnxt_napi *bnapi2;
5122 
5123 			if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5124 				txr->tx_ring_struct.ring_mem.flags =
5125 					BNXT_RMEM_RING_PTE_FLAG;
5126 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5127 			if (i >= bp->tx_nr_rings_xdp) {
5128 				int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5129 
5130 				bnapi2 = bp->bnapi[k];
5131 				txr->txq_index = i - bp->tx_nr_rings_xdp;
5132 				txr->tx_napi_idx =
5133 					BNXT_RING_TO_TC(bp, txr->txq_index);
5134 				bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5135 				bnapi2->tx_int = bnxt_tx_int;
5136 			} else {
5137 				bnapi2 = bp->bnapi[j];
5138 				bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5139 				bnapi2->tx_ring[0] = txr;
5140 				bnapi2->tx_int = bnxt_tx_int_xdp;
5141 				j++;
5142 			}
5143 			txr->bnapi = bnapi2;
5144 			if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5145 				txr->tx_cpr = &bnapi2->cp_ring;
5146 		}
5147 
5148 		rc = bnxt_alloc_stats(bp);
5149 		if (rc)
5150 			goto alloc_mem_err;
5151 		bnxt_init_stats(bp);
5152 
5153 		rc = bnxt_alloc_ntp_fltrs(bp);
5154 		if (rc)
5155 			goto alloc_mem_err;
5156 
5157 		rc = bnxt_alloc_vnics(bp);
5158 		if (rc)
5159 			goto alloc_mem_err;
5160 	}
5161 
5162 	rc = bnxt_alloc_all_cp_arrays(bp);
5163 	if (rc)
5164 		goto alloc_mem_err;
5165 
5166 	bnxt_init_ring_struct(bp);
5167 
5168 	rc = bnxt_alloc_rx_rings(bp);
5169 	if (rc)
5170 		goto alloc_mem_err;
5171 
5172 	rc = bnxt_alloc_tx_rings(bp);
5173 	if (rc)
5174 		goto alloc_mem_err;
5175 
5176 	rc = bnxt_alloc_cp_rings(bp);
5177 	if (rc)
5178 		goto alloc_mem_err;
5179 
5180 	bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5181 						  BNXT_VNIC_MCAST_FLAG |
5182 						  BNXT_VNIC_UCAST_FLAG;
5183 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5184 		bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5185 			BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5186 
5187 	rc = bnxt_alloc_vnic_attributes(bp);
5188 	if (rc)
5189 		goto alloc_mem_err;
5190 	return 0;
5191 
5192 alloc_mem_err:
5193 	bnxt_free_mem(bp, true);
5194 	return rc;
5195 }
5196 
5197 static void bnxt_disable_int(struct bnxt *bp)
5198 {
5199 	int i;
5200 
5201 	if (!bp->bnapi)
5202 		return;
5203 
5204 	for (i = 0; i < bp->cp_nr_rings; i++) {
5205 		struct bnxt_napi *bnapi = bp->bnapi[i];
5206 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5207 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5208 
5209 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
5210 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5211 	}
5212 }
5213 
5214 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5215 {
5216 	struct bnxt_napi *bnapi = bp->bnapi[n];
5217 	struct bnxt_cp_ring_info *cpr;
5218 
5219 	cpr = &bnapi->cp_ring;
5220 	return cpr->cp_ring_struct.map_idx;
5221 }
5222 
5223 static void bnxt_disable_int_sync(struct bnxt *bp)
5224 {
5225 	int i;
5226 
5227 	if (!bp->irq_tbl)
5228 		return;
5229 
5230 	atomic_inc(&bp->intr_sem);
5231 
5232 	bnxt_disable_int(bp);
5233 	for (i = 0; i < bp->cp_nr_rings; i++) {
5234 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5235 
5236 		synchronize_irq(bp->irq_tbl[map_idx].vector);
5237 	}
5238 }
5239 
5240 static void bnxt_enable_int(struct bnxt *bp)
5241 {
5242 	int i;
5243 
5244 	atomic_set(&bp->intr_sem, 0);
5245 	for (i = 0; i < bp->cp_nr_rings; i++) {
5246 		struct bnxt_napi *bnapi = bp->bnapi[i];
5247 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5248 
5249 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5250 	}
5251 }
5252 
5253 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5254 			    bool async_only)
5255 {
5256 	DECLARE_BITMAP(async_events_bmap, 256);
5257 	u32 *events = (u32 *)async_events_bmap;
5258 	struct hwrm_func_drv_rgtr_output *resp;
5259 	struct hwrm_func_drv_rgtr_input *req;
5260 	u32 flags;
5261 	int rc, i;
5262 
5263 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5264 	if (rc)
5265 		return rc;
5266 
5267 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5268 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
5269 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5270 
5271 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5272 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5273 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5274 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5275 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5276 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5277 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5278 	req->flags = cpu_to_le32(flags);
5279 	req->ver_maj_8b = DRV_VER_MAJ;
5280 	req->ver_min_8b = DRV_VER_MIN;
5281 	req->ver_upd_8b = DRV_VER_UPD;
5282 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5283 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
5284 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5285 
5286 	if (BNXT_PF(bp)) {
5287 		u32 data[8];
5288 		int i;
5289 
5290 		memset(data, 0, sizeof(data));
5291 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5292 			u16 cmd = bnxt_vf_req_snif[i];
5293 			unsigned int bit, idx;
5294 
5295 			idx = cmd / 32;
5296 			bit = cmd % 32;
5297 			data[idx] |= 1 << bit;
5298 		}
5299 
5300 		for (i = 0; i < 8; i++)
5301 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5302 
5303 		req->enables |=
5304 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5305 	}
5306 
5307 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5308 		req->flags |= cpu_to_le32(
5309 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5310 
5311 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
5312 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5313 		u16 event_id = bnxt_async_events_arr[i];
5314 
5315 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5316 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5317 			continue;
5318 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5319 		    !bp->ptp_cfg)
5320 			continue;
5321 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
5322 	}
5323 	if (bmap && bmap_size) {
5324 		for (i = 0; i < bmap_size; i++) {
5325 			if (test_bit(i, bmap))
5326 				__set_bit(i, async_events_bmap);
5327 		}
5328 	}
5329 	for (i = 0; i < 8; i++)
5330 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5331 
5332 	if (async_only)
5333 		req->enables =
5334 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5335 
5336 	resp = hwrm_req_hold(bp, req);
5337 	rc = hwrm_req_send(bp, req);
5338 	if (!rc) {
5339 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5340 		if (resp->flags &
5341 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5342 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5343 	}
5344 	hwrm_req_drop(bp, req);
5345 	return rc;
5346 }
5347 
5348 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5349 {
5350 	struct hwrm_func_drv_unrgtr_input *req;
5351 	int rc;
5352 
5353 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5354 		return 0;
5355 
5356 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5357 	if (rc)
5358 		return rc;
5359 	return hwrm_req_send(bp, req);
5360 }
5361 
5362 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5363 
5364 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5365 {
5366 	struct hwrm_tunnel_dst_port_free_input *req;
5367 	int rc;
5368 
5369 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5370 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5371 		return 0;
5372 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5373 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5374 		return 0;
5375 
5376 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5377 	if (rc)
5378 		return rc;
5379 
5380 	req->tunnel_type = tunnel_type;
5381 
5382 	switch (tunnel_type) {
5383 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5384 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5385 		bp->vxlan_port = 0;
5386 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5387 		break;
5388 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5389 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5390 		bp->nge_port = 0;
5391 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5392 		break;
5393 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5394 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5395 		bp->vxlan_gpe_port = 0;
5396 		bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5397 		break;
5398 	default:
5399 		break;
5400 	}
5401 
5402 	rc = hwrm_req_send(bp, req);
5403 	if (rc)
5404 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5405 			   rc);
5406 	if (bp->flags & BNXT_FLAG_TPA)
5407 		bnxt_set_tpa(bp, true);
5408 	return rc;
5409 }
5410 
5411 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5412 					   u8 tunnel_type)
5413 {
5414 	struct hwrm_tunnel_dst_port_alloc_output *resp;
5415 	struct hwrm_tunnel_dst_port_alloc_input *req;
5416 	int rc;
5417 
5418 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5419 	if (rc)
5420 		return rc;
5421 
5422 	req->tunnel_type = tunnel_type;
5423 	req->tunnel_dst_port_val = port;
5424 
5425 	resp = hwrm_req_hold(bp, req);
5426 	rc = hwrm_req_send(bp, req);
5427 	if (rc) {
5428 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5429 			   rc);
5430 		goto err_out;
5431 	}
5432 
5433 	switch (tunnel_type) {
5434 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5435 		bp->vxlan_port = port;
5436 		bp->vxlan_fw_dst_port_id =
5437 			le16_to_cpu(resp->tunnel_dst_port_id);
5438 		break;
5439 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5440 		bp->nge_port = port;
5441 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5442 		break;
5443 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5444 		bp->vxlan_gpe_port = port;
5445 		bp->vxlan_gpe_fw_dst_port_id =
5446 			le16_to_cpu(resp->tunnel_dst_port_id);
5447 		break;
5448 	default:
5449 		break;
5450 	}
5451 	if (bp->flags & BNXT_FLAG_TPA)
5452 		bnxt_set_tpa(bp, true);
5453 
5454 err_out:
5455 	hwrm_req_drop(bp, req);
5456 	return rc;
5457 }
5458 
5459 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5460 {
5461 	struct hwrm_cfa_l2_set_rx_mask_input *req;
5462 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5463 	int rc;
5464 
5465 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5466 	if (rc)
5467 		return rc;
5468 
5469 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5470 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5471 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5472 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5473 	}
5474 	req->mask = cpu_to_le32(vnic->rx_mask);
5475 	return hwrm_req_send_silent(bp, req);
5476 }
5477 
5478 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5479 {
5480 	if (!atomic_dec_and_test(&fltr->refcnt))
5481 		return;
5482 	spin_lock_bh(&bp->ntp_fltr_lock);
5483 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5484 		spin_unlock_bh(&bp->ntp_fltr_lock);
5485 		return;
5486 	}
5487 	hlist_del_rcu(&fltr->base.hash);
5488 	bnxt_del_one_usr_fltr(bp, &fltr->base);
5489 	if (fltr->base.flags) {
5490 		clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5491 		bp->ntp_fltr_count--;
5492 	}
5493 	spin_unlock_bh(&bp->ntp_fltr_lock);
5494 	kfree_rcu(fltr, base.rcu);
5495 }
5496 
5497 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5498 						      struct bnxt_l2_key *key,
5499 						      u32 idx)
5500 {
5501 	struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5502 	struct bnxt_l2_filter *fltr;
5503 
5504 	hlist_for_each_entry_rcu(fltr, head, base.hash) {
5505 		struct bnxt_l2_key *l2_key = &fltr->l2_key;
5506 
5507 		if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5508 		    l2_key->vlan == key->vlan)
5509 			return fltr;
5510 	}
5511 	return NULL;
5512 }
5513 
5514 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5515 						    struct bnxt_l2_key *key,
5516 						    u32 idx)
5517 {
5518 	struct bnxt_l2_filter *fltr = NULL;
5519 
5520 	rcu_read_lock();
5521 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5522 	if (fltr)
5523 		atomic_inc(&fltr->refcnt);
5524 	rcu_read_unlock();
5525 	return fltr;
5526 }
5527 
5528 #define BNXT_IPV4_4TUPLE(bp, fkeys)					\
5529 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5530 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) ||	\
5531 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5532 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5533 
5534 #define BNXT_IPV6_4TUPLE(bp, fkeys)					\
5535 	(((fkeys)->basic.ip_proto == IPPROTO_TCP &&			\
5536 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) ||	\
5537 	 ((fkeys)->basic.ip_proto == IPPROTO_UDP &&			\
5538 	  (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5539 
5540 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5541 {
5542 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5543 		if (BNXT_IPV4_4TUPLE(bp, fkeys))
5544 			return sizeof(fkeys->addrs.v4addrs) +
5545 			       sizeof(fkeys->ports);
5546 
5547 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5548 			return sizeof(fkeys->addrs.v4addrs);
5549 	}
5550 
5551 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5552 		if (BNXT_IPV6_4TUPLE(bp, fkeys))
5553 			return sizeof(fkeys->addrs.v6addrs) +
5554 			       sizeof(fkeys->ports);
5555 
5556 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5557 			return sizeof(fkeys->addrs.v6addrs);
5558 	}
5559 
5560 	return 0;
5561 }
5562 
5563 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5564 			 const unsigned char *key)
5565 {
5566 	u64 prefix = bp->toeplitz_prefix, hash = 0;
5567 	struct bnxt_ipv4_tuple tuple4;
5568 	struct bnxt_ipv6_tuple tuple6;
5569 	int i, j, len = 0;
5570 	u8 *four_tuple;
5571 
5572 	len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5573 	if (!len)
5574 		return 0;
5575 
5576 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5577 		tuple4.v4addrs = fkeys->addrs.v4addrs;
5578 		tuple4.ports = fkeys->ports;
5579 		four_tuple = (unsigned char *)&tuple4;
5580 	} else {
5581 		tuple6.v6addrs = fkeys->addrs.v6addrs;
5582 		tuple6.ports = fkeys->ports;
5583 		four_tuple = (unsigned char *)&tuple6;
5584 	}
5585 
5586 	for (i = 0, j = 8; i < len; i++, j++) {
5587 		u8 byte = four_tuple[i];
5588 		int bit;
5589 
5590 		for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5591 			if (byte & 0x80)
5592 				hash ^= prefix;
5593 		}
5594 		prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5595 	}
5596 
5597 	/* The valid part of the hash is in the upper 32 bits. */
5598 	return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5599 }
5600 
5601 #ifdef CONFIG_RFS_ACCEL
5602 static struct bnxt_l2_filter *
5603 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5604 {
5605 	struct bnxt_l2_filter *fltr;
5606 	u32 idx;
5607 
5608 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5609 	      BNXT_L2_FLTR_HASH_MASK;
5610 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
5611 	return fltr;
5612 }
5613 #endif
5614 
5615 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5616 			       struct bnxt_l2_key *key, u32 idx)
5617 {
5618 	struct hlist_head *head;
5619 
5620 	ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5621 	fltr->l2_key.vlan = key->vlan;
5622 	fltr->base.type = BNXT_FLTR_TYPE_L2;
5623 	if (fltr->base.flags) {
5624 		int bit_id;
5625 
5626 		bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5627 						 bp->max_fltr, 0);
5628 		if (bit_id < 0)
5629 			return -ENOMEM;
5630 		fltr->base.sw_id = (u16)bit_id;
5631 		bp->ntp_fltr_count++;
5632 	}
5633 	head = &bp->l2_fltr_hash_tbl[idx];
5634 	hlist_add_head_rcu(&fltr->base.hash, head);
5635 	bnxt_insert_usr_fltr(bp, &fltr->base);
5636 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5637 	atomic_set(&fltr->refcnt, 1);
5638 	return 0;
5639 }
5640 
5641 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5642 						   struct bnxt_l2_key *key,
5643 						   gfp_t gfp)
5644 {
5645 	struct bnxt_l2_filter *fltr;
5646 	u32 idx;
5647 	int rc;
5648 
5649 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5650 	      BNXT_L2_FLTR_HASH_MASK;
5651 	fltr = bnxt_lookup_l2_filter(bp, key, idx);
5652 	if (fltr)
5653 		return fltr;
5654 
5655 	fltr = kzalloc(sizeof(*fltr), gfp);
5656 	if (!fltr)
5657 		return ERR_PTR(-ENOMEM);
5658 	spin_lock_bh(&bp->ntp_fltr_lock);
5659 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5660 	spin_unlock_bh(&bp->ntp_fltr_lock);
5661 	if (rc) {
5662 		bnxt_del_l2_filter(bp, fltr);
5663 		fltr = ERR_PTR(rc);
5664 	}
5665 	return fltr;
5666 }
5667 
5668 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5669 						struct bnxt_l2_key *key,
5670 						u16 flags)
5671 {
5672 	struct bnxt_l2_filter *fltr;
5673 	u32 idx;
5674 	int rc;
5675 
5676 	idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5677 	      BNXT_L2_FLTR_HASH_MASK;
5678 	spin_lock_bh(&bp->ntp_fltr_lock);
5679 	fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5680 	if (fltr) {
5681 		fltr = ERR_PTR(-EEXIST);
5682 		goto l2_filter_exit;
5683 	}
5684 	fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5685 	if (!fltr) {
5686 		fltr = ERR_PTR(-ENOMEM);
5687 		goto l2_filter_exit;
5688 	}
5689 	fltr->base.flags = flags;
5690 	rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5691 	if (rc) {
5692 		spin_unlock_bh(&bp->ntp_fltr_lock);
5693 		bnxt_del_l2_filter(bp, fltr);
5694 		return ERR_PTR(rc);
5695 	}
5696 
5697 l2_filter_exit:
5698 	spin_unlock_bh(&bp->ntp_fltr_lock);
5699 	return fltr;
5700 }
5701 
5702 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
5703 {
5704 #ifdef CONFIG_BNXT_SRIOV
5705 	struct bnxt_vf_info *vf = &pf->vf[vf_idx];
5706 
5707 	return vf->fw_fid;
5708 #else
5709 	return INVALID_HW_RING_ID;
5710 #endif
5711 }
5712 
5713 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5714 {
5715 	struct hwrm_cfa_l2_filter_free_input *req;
5716 	u16 target_id = 0xffff;
5717 	int rc;
5718 
5719 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5720 		struct bnxt_pf_info *pf = &bp->pf;
5721 
5722 		if (fltr->base.vf_idx >= pf->active_vfs)
5723 			return -EINVAL;
5724 
5725 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5726 		if (target_id == INVALID_HW_RING_ID)
5727 			return -EINVAL;
5728 	}
5729 
5730 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5731 	if (rc)
5732 		return rc;
5733 
5734 	req->target_id = cpu_to_le16(target_id);
5735 	req->l2_filter_id = fltr->base.filter_id;
5736 	return hwrm_req_send(bp, req);
5737 }
5738 
5739 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5740 {
5741 	struct hwrm_cfa_l2_filter_alloc_output *resp;
5742 	struct hwrm_cfa_l2_filter_alloc_input *req;
5743 	u16 target_id = 0xffff;
5744 	int rc;
5745 
5746 	if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5747 		struct bnxt_pf_info *pf = &bp->pf;
5748 
5749 		if (fltr->base.vf_idx >= pf->active_vfs)
5750 			return -EINVAL;
5751 
5752 		target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5753 	}
5754 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5755 	if (rc)
5756 		return rc;
5757 
5758 	req->target_id = cpu_to_le16(target_id);
5759 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5760 
5761 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5762 		req->flags |=
5763 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5764 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
5765 	req->enables =
5766 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5767 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5768 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5769 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
5770 	eth_broadcast_addr(req->l2_addr_mask);
5771 
5772 	if (fltr->l2_key.vlan) {
5773 		req->enables |=
5774 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5775 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5776 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
5777 		req->num_vlans = 1;
5778 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
5779 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
5780 	}
5781 
5782 	resp = hwrm_req_hold(bp, req);
5783 	rc = hwrm_req_send(bp, req);
5784 	if (!rc) {
5785 		fltr->base.filter_id = resp->l2_filter_id;
5786 		set_bit(BNXT_FLTR_VALID, &fltr->base.state);
5787 	}
5788 	hwrm_req_drop(bp, req);
5789 	return rc;
5790 }
5791 
5792 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5793 				     struct bnxt_ntuple_filter *fltr)
5794 {
5795 	struct hwrm_cfa_ntuple_filter_free_input *req;
5796 	int rc;
5797 
5798 	set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
5799 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5800 	if (rc)
5801 		return rc;
5802 
5803 	req->ntuple_filter_id = fltr->base.filter_id;
5804 	return hwrm_req_send(bp, req);
5805 }
5806 
5807 #define BNXT_NTP_FLTR_FLAGS					\
5808 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
5809 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
5810 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
5811 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
5812 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
5813 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
5814 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
5815 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
5816 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
5817 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
5818 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
5819 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
5820 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5821 
5822 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
5823 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5824 
5825 void bnxt_fill_ipv6_mask(__be32 mask[4])
5826 {
5827 	int i;
5828 
5829 	for (i = 0; i < 4; i++)
5830 		mask[i] = cpu_to_be32(~0);
5831 }
5832 
5833 static void
5834 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
5835 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
5836 			  struct bnxt_ntuple_filter *fltr)
5837 {
5838 	struct bnxt_rss_ctx *rss_ctx, *tmp;
5839 	u16 rxq = fltr->base.rxq;
5840 
5841 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
5842 		list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
5843 			if (rss_ctx->index == fltr->base.fw_vnic_id) {
5844 				struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
5845 
5846 				req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5847 				break;
5848 			}
5849 		}
5850 		return;
5851 	}
5852 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
5853 		struct bnxt_vnic_info *vnic;
5854 		u32 enables;
5855 
5856 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
5857 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5858 		enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
5859 		req->enables |= cpu_to_le32(enables);
5860 		req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
5861 	} else {
5862 		u32 flags;
5863 
5864 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5865 		req->flags |= cpu_to_le32(flags);
5866 		req->dst_id = cpu_to_le16(rxq);
5867 	}
5868 }
5869 
5870 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5871 				      struct bnxt_ntuple_filter *fltr)
5872 {
5873 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5874 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
5875 	struct bnxt_flow_masks *masks = &fltr->fmasks;
5876 	struct flow_keys *keys = &fltr->fkeys;
5877 	struct bnxt_l2_filter *l2_fltr;
5878 	struct bnxt_vnic_info *vnic;
5879 	int rc;
5880 
5881 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5882 	if (rc)
5883 		return rc;
5884 
5885 	l2_fltr = fltr->l2_fltr;
5886 	req->l2_filter_id = l2_fltr->base.filter_id;
5887 
5888 	if (fltr->base.flags & BNXT_ACT_DROP) {
5889 		req->flags =
5890 			cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
5891 	} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5892 		bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
5893 	} else {
5894 		vnic = &bp->vnic_info[fltr->base.rxq + 1];
5895 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5896 	}
5897 	req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
5898 
5899 	req->ethertype = htons(ETH_P_IP);
5900 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
5901 	req->ip_protocol = keys->basic.ip_proto;
5902 
5903 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
5904 		req->ethertype = htons(ETH_P_IPV6);
5905 		req->ip_addr_type =
5906 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5907 		*(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
5908 		*(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
5909 		*(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
5910 		*(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
5911 	} else {
5912 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5913 		req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
5914 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5915 		req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
5916 	}
5917 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5918 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5919 		req->tunnel_type =
5920 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5921 	}
5922 
5923 	req->src_port = keys->ports.src;
5924 	req->src_port_mask = masks->ports.src;
5925 	req->dst_port = keys->ports.dst;
5926 	req->dst_port_mask = masks->ports.dst;
5927 
5928 	resp = hwrm_req_hold(bp, req);
5929 	rc = hwrm_req_send(bp, req);
5930 	if (!rc)
5931 		fltr->base.filter_id = resp->ntuple_filter_id;
5932 	hwrm_req_drop(bp, req);
5933 	return rc;
5934 }
5935 
5936 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5937 				     const u8 *mac_addr)
5938 {
5939 	struct bnxt_l2_filter *fltr;
5940 	struct bnxt_l2_key key;
5941 	int rc;
5942 
5943 	ether_addr_copy(key.dst_mac_addr, mac_addr);
5944 	key.vlan = 0;
5945 	fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
5946 	if (IS_ERR(fltr))
5947 		return PTR_ERR(fltr);
5948 
5949 	fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
5950 	rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5951 	if (rc)
5952 		bnxt_del_l2_filter(bp, fltr);
5953 	else
5954 		bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
5955 	return rc;
5956 }
5957 
5958 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5959 {
5960 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5961 
5962 	/* Any associated ntuple filters will also be cleared by firmware. */
5963 	for (i = 0; i < num_of_vnics; i++) {
5964 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5965 
5966 		for (j = 0; j < vnic->uc_filter_count; j++) {
5967 			struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
5968 
5969 			bnxt_hwrm_l2_filter_free(bp, fltr);
5970 			bnxt_del_l2_filter(bp, fltr);
5971 		}
5972 		vnic->uc_filter_count = 0;
5973 	}
5974 }
5975 
5976 #define BNXT_DFLT_TUNL_TPA_BMAP				\
5977 	(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE |	\
5978 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 |	\
5979 	 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5980 
5981 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5982 					   struct hwrm_vnic_tpa_cfg_input *req)
5983 {
5984 	u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
5985 
5986 	if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5987 		return;
5988 
5989 	if (bp->vxlan_port)
5990 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
5991 	if (bp->vxlan_gpe_port)
5992 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
5993 	if (bp->nge_port)
5994 		tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
5995 
5996 	req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
5997 	req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
5998 }
5999 
6000 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6001 			   u32 tpa_flags)
6002 {
6003 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6004 	struct hwrm_vnic_tpa_cfg_input *req;
6005 	int rc;
6006 
6007 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6008 		return 0;
6009 
6010 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6011 	if (rc)
6012 		return rc;
6013 
6014 	if (tpa_flags) {
6015 		u16 mss = bp->dev->mtu - 40;
6016 		u32 nsegs, n, segs = 0, flags;
6017 
6018 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6019 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6020 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6021 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6022 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6023 		if (tpa_flags & BNXT_FLAG_GRO)
6024 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6025 
6026 		req->flags = cpu_to_le32(flags);
6027 
6028 		req->enables =
6029 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6030 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6031 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6032 
6033 		/* Number of segs are log2 units, and first packet is not
6034 		 * included as part of this units.
6035 		 */
6036 		if (mss <= BNXT_RX_PAGE_SIZE) {
6037 			n = BNXT_RX_PAGE_SIZE / mss;
6038 			nsegs = (MAX_SKB_FRAGS - 1) * n;
6039 		} else {
6040 			n = mss / BNXT_RX_PAGE_SIZE;
6041 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
6042 				n++;
6043 			nsegs = (MAX_SKB_FRAGS - n) / n;
6044 		}
6045 
6046 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6047 			segs = MAX_TPA_SEGS_P5;
6048 			max_aggs = bp->max_tpa;
6049 		} else {
6050 			segs = ilog2(nsegs);
6051 		}
6052 		req->max_agg_segs = cpu_to_le16(segs);
6053 		req->max_aggs = cpu_to_le16(max_aggs);
6054 
6055 		req->min_agg_len = cpu_to_le32(512);
6056 		bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6057 	}
6058 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6059 
6060 	return hwrm_req_send(bp, req);
6061 }
6062 
6063 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6064 {
6065 	struct bnxt_ring_grp_info *grp_info;
6066 
6067 	grp_info = &bp->grp_info[ring->grp_idx];
6068 	return grp_info->cp_fw_ring_id;
6069 }
6070 
6071 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6072 {
6073 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6074 		return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6075 	else
6076 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6077 }
6078 
6079 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6080 {
6081 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6082 		return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6083 	else
6084 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6085 }
6086 
6087 int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
6088 {
6089 	int entries;
6090 	u16 *tbl;
6091 
6092 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6093 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6094 	else
6095 		entries = HW_HASH_INDEX_SIZE;
6096 
6097 	bp->rss_indir_tbl_entries = entries;
6098 	tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6099 	if (!tbl)
6100 		return -ENOMEM;
6101 
6102 	if (rss_ctx)
6103 		rss_ctx->rss_indir_tbl = tbl;
6104 	else
6105 		bp->rss_indir_tbl = tbl;
6106 
6107 	return 0;
6108 }
6109 
6110 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
6111 {
6112 	u16 max_rings, max_entries, pad, i;
6113 	u16 *rss_indir_tbl;
6114 
6115 	if (!bp->rx_nr_rings)
6116 		return;
6117 
6118 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6119 		max_rings = bp->rx_nr_rings - 1;
6120 	else
6121 		max_rings = bp->rx_nr_rings;
6122 
6123 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6124 	if (rss_ctx)
6125 		rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
6126 	else
6127 		rss_indir_tbl = &bp->rss_indir_tbl[0];
6128 
6129 	for (i = 0; i < max_entries; i++)
6130 		rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6131 
6132 	pad = bp->rss_indir_tbl_entries - max_entries;
6133 	if (pad)
6134 		memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
6135 }
6136 
6137 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6138 {
6139 	u16 i, tbl_size, max_ring = 0;
6140 
6141 	if (!bp->rss_indir_tbl)
6142 		return 0;
6143 
6144 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6145 	for (i = 0; i < tbl_size; i++)
6146 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6147 	return max_ring;
6148 }
6149 
6150 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6151 {
6152 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6153 		if (!rx_rings)
6154 			return 0;
6155 		return bnxt_calc_nr_ring_pages(rx_rings - 1,
6156 					       BNXT_RSS_TABLE_ENTRIES_P5);
6157 	}
6158 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6159 		return 2;
6160 	return 1;
6161 }
6162 
6163 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6164 {
6165 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6166 	u16 i, j;
6167 
6168 	/* Fill the RSS indirection table with ring group ids */
6169 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6170 		if (!no_rss)
6171 			j = bp->rss_indir_tbl[i];
6172 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6173 	}
6174 }
6175 
6176 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6177 				    struct bnxt_vnic_info *vnic)
6178 {
6179 	__le16 *ring_tbl = vnic->rss_table;
6180 	struct bnxt_rx_ring_info *rxr;
6181 	u16 tbl_size, i;
6182 
6183 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6184 
6185 	for (i = 0; i < tbl_size; i++) {
6186 		u16 ring_id, j;
6187 
6188 		if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6189 			j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6190 		else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6191 			j = vnic->rss_ctx->rss_indir_tbl[i];
6192 		else
6193 			j = bp->rss_indir_tbl[i];
6194 		rxr = &bp->rx_ring[j];
6195 
6196 		ring_id = rxr->rx_ring_struct.fw_ring_id;
6197 		*ring_tbl++ = cpu_to_le16(ring_id);
6198 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6199 		*ring_tbl++ = cpu_to_le16(ring_id);
6200 	}
6201 }
6202 
6203 static void
6204 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6205 			 struct bnxt_vnic_info *vnic)
6206 {
6207 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6208 		bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6209 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6210 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6211 	} else {
6212 		bnxt_fill_hw_rss_tbl(bp, vnic);
6213 	}
6214 
6215 	if (bp->rss_hash_delta) {
6216 		req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6217 		if (bp->rss_hash_cfg & bp->rss_hash_delta)
6218 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6219 		else
6220 			req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6221 	} else {
6222 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6223 	}
6224 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6225 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6226 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6227 }
6228 
6229 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6230 				  bool set_rss)
6231 {
6232 	struct hwrm_vnic_rss_cfg_input *req;
6233 	int rc;
6234 
6235 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6236 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6237 		return 0;
6238 
6239 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6240 	if (rc)
6241 		return rc;
6242 
6243 	if (set_rss)
6244 		__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6245 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6246 	return hwrm_req_send(bp, req);
6247 }
6248 
6249 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6250 				     struct bnxt_vnic_info *vnic, bool set_rss)
6251 {
6252 	struct hwrm_vnic_rss_cfg_input *req;
6253 	dma_addr_t ring_tbl_map;
6254 	u32 i, nr_ctxs;
6255 	int rc;
6256 
6257 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6258 	if (rc)
6259 		return rc;
6260 
6261 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6262 	if (!set_rss)
6263 		return hwrm_req_send(bp, req);
6264 
6265 	__bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6266 	ring_tbl_map = vnic->rss_table_dma_addr;
6267 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6268 
6269 	hwrm_req_hold(bp, req);
6270 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6271 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6272 		req->ring_table_pair_index = i;
6273 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6274 		rc = hwrm_req_send(bp, req);
6275 		if (rc)
6276 			goto exit;
6277 	}
6278 
6279 exit:
6280 	hwrm_req_drop(bp, req);
6281 	return rc;
6282 }
6283 
6284 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6285 {
6286 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6287 	struct hwrm_vnic_rss_qcfg_output *resp;
6288 	struct hwrm_vnic_rss_qcfg_input *req;
6289 
6290 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6291 		return;
6292 
6293 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6294 	/* all contexts configured to same hash_type, zero always exists */
6295 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6296 	resp = hwrm_req_hold(bp, req);
6297 	if (!hwrm_req_send(bp, req)) {
6298 		bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6299 		bp->rss_hash_delta = 0;
6300 	}
6301 	hwrm_req_drop(bp, req);
6302 }
6303 
6304 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6305 {
6306 	struct hwrm_vnic_plcmodes_cfg_input *req;
6307 	int rc;
6308 
6309 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6310 	if (rc)
6311 		return rc;
6312 
6313 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6314 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6315 
6316 	if (BNXT_RX_PAGE_MODE(bp)) {
6317 		req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6318 	} else {
6319 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6320 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6321 		req->enables |=
6322 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6323 		req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6324 		req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6325 	}
6326 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6327 	return hwrm_req_send(bp, req);
6328 }
6329 
6330 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6331 					struct bnxt_vnic_info *vnic,
6332 					u16 ctx_idx)
6333 {
6334 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6335 
6336 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6337 		return;
6338 
6339 	req->rss_cos_lb_ctx_id =
6340 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6341 
6342 	hwrm_req_send(bp, req);
6343 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6344 }
6345 
6346 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6347 {
6348 	int i, j;
6349 
6350 	for (i = 0; i < bp->nr_vnics; i++) {
6351 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6352 
6353 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6354 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6355 				bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6356 		}
6357 	}
6358 	bp->rsscos_nr_ctxs = 0;
6359 }
6360 
6361 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6362 				    struct bnxt_vnic_info *vnic, u16 ctx_idx)
6363 {
6364 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6365 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6366 	int rc;
6367 
6368 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6369 	if (rc)
6370 		return rc;
6371 
6372 	resp = hwrm_req_hold(bp, req);
6373 	rc = hwrm_req_send(bp, req);
6374 	if (!rc)
6375 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6376 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
6377 	hwrm_req_drop(bp, req);
6378 
6379 	return rc;
6380 }
6381 
6382 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6383 {
6384 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6385 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6386 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6387 }
6388 
6389 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6390 {
6391 	struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6392 	struct hwrm_vnic_cfg_input *req;
6393 	unsigned int ring = 0, grp_idx;
6394 	u16 def_vlan = 0;
6395 	int rc;
6396 
6397 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6398 	if (rc)
6399 		return rc;
6400 
6401 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6402 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6403 
6404 		req->default_rx_ring_id =
6405 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6406 		req->default_cmpl_ring_id =
6407 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6408 		req->enables =
6409 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6410 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6411 		goto vnic_mru;
6412 	}
6413 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6414 	/* Only RSS support for now TBD: COS & LB */
6415 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6416 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6417 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6418 					   VNIC_CFG_REQ_ENABLES_MRU);
6419 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6420 		req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6421 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6422 					   VNIC_CFG_REQ_ENABLES_MRU);
6423 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6424 	} else {
6425 		req->rss_rule = cpu_to_le16(0xffff);
6426 	}
6427 
6428 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6429 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6430 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6431 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6432 	} else {
6433 		req->cos_rule = cpu_to_le16(0xffff);
6434 	}
6435 
6436 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6437 		ring = 0;
6438 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6439 		ring = vnic->vnic_id - 1;
6440 	else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6441 		ring = bp->rx_nr_rings - 1;
6442 
6443 	grp_idx = bp->rx_ring[ring].bnapi->index;
6444 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6445 	req->lb_rule = cpu_to_le16(0xffff);
6446 vnic_mru:
6447 	req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
6448 
6449 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6450 #ifdef CONFIG_BNXT_SRIOV
6451 	if (BNXT_VF(bp))
6452 		def_vlan = bp->vf.vlan;
6453 #endif
6454 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6455 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6456 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6457 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6458 
6459 	return hwrm_req_send(bp, req);
6460 }
6461 
6462 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6463 				    struct bnxt_vnic_info *vnic)
6464 {
6465 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6466 		struct hwrm_vnic_free_input *req;
6467 
6468 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6469 			return;
6470 
6471 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6472 
6473 		hwrm_req_send(bp, req);
6474 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
6475 	}
6476 }
6477 
6478 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6479 {
6480 	u16 i;
6481 
6482 	for (i = 0; i < bp->nr_vnics; i++)
6483 		bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6484 }
6485 
6486 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6487 			 unsigned int start_rx_ring_idx,
6488 			 unsigned int nr_rings)
6489 {
6490 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6491 	struct hwrm_vnic_alloc_output *resp;
6492 	struct hwrm_vnic_alloc_input *req;
6493 	int rc;
6494 
6495 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6496 	if (rc)
6497 		return rc;
6498 
6499 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6500 		goto vnic_no_ring_grps;
6501 
6502 	/* map ring groups to this vnic */
6503 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6504 		grp_idx = bp->rx_ring[i].bnapi->index;
6505 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6506 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6507 				   j, nr_rings);
6508 			break;
6509 		}
6510 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6511 	}
6512 
6513 vnic_no_ring_grps:
6514 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6515 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6516 	if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6517 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6518 
6519 	resp = hwrm_req_hold(bp, req);
6520 	rc = hwrm_req_send(bp, req);
6521 	if (!rc)
6522 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6523 	hwrm_req_drop(bp, req);
6524 	return rc;
6525 }
6526 
6527 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6528 {
6529 	struct hwrm_vnic_qcaps_output *resp;
6530 	struct hwrm_vnic_qcaps_input *req;
6531 	int rc;
6532 
6533 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6534 	bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6535 	bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6536 	if (bp->hwrm_spec_code < 0x10600)
6537 		return 0;
6538 
6539 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6540 	if (rc)
6541 		return rc;
6542 
6543 	resp = hwrm_req_hold(bp, req);
6544 	rc = hwrm_req_send(bp, req);
6545 	if (!rc) {
6546 		u32 flags = le32_to_cpu(resp->flags);
6547 
6548 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6549 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6550 			bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6551 		if (flags &
6552 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6553 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6554 
6555 		/* Older P5 fw before EXT_HW_STATS support did not set
6556 		 * VLAN_STRIP_CAP properly.
6557 		 */
6558 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6559 		    (BNXT_CHIP_P5(bp) &&
6560 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6561 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6562 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6563 			bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6564 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6565 			bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6566 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6567 		if (bp->max_tpa_v2) {
6568 			if (BNXT_CHIP_P5(bp))
6569 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6570 			else
6571 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6572 		}
6573 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6574 			bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6575 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6576 			bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6577 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6578 			bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6579 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6580 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6581 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6582 			bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6583 	}
6584 	hwrm_req_drop(bp, req);
6585 	return rc;
6586 }
6587 
6588 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6589 {
6590 	struct hwrm_ring_grp_alloc_output *resp;
6591 	struct hwrm_ring_grp_alloc_input *req;
6592 	int rc;
6593 	u16 i;
6594 
6595 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6596 		return 0;
6597 
6598 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6599 	if (rc)
6600 		return rc;
6601 
6602 	resp = hwrm_req_hold(bp, req);
6603 	for (i = 0; i < bp->rx_nr_rings; i++) {
6604 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6605 
6606 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6607 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6608 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6609 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6610 
6611 		rc = hwrm_req_send(bp, req);
6612 
6613 		if (rc)
6614 			break;
6615 
6616 		bp->grp_info[grp_idx].fw_grp_id =
6617 			le32_to_cpu(resp->ring_group_id);
6618 	}
6619 	hwrm_req_drop(bp, req);
6620 	return rc;
6621 }
6622 
6623 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6624 {
6625 	struct hwrm_ring_grp_free_input *req;
6626 	u16 i;
6627 
6628 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6629 		return;
6630 
6631 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6632 		return;
6633 
6634 	hwrm_req_hold(bp, req);
6635 	for (i = 0; i < bp->cp_nr_rings; i++) {
6636 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6637 			continue;
6638 		req->ring_group_id =
6639 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
6640 
6641 		hwrm_req_send(bp, req);
6642 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6643 	}
6644 	hwrm_req_drop(bp, req);
6645 }
6646 
6647 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6648 				    struct bnxt_ring_struct *ring,
6649 				    u32 ring_type, u32 map_index)
6650 {
6651 	struct hwrm_ring_alloc_output *resp;
6652 	struct hwrm_ring_alloc_input *req;
6653 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6654 	struct bnxt_ring_grp_info *grp_info;
6655 	int rc, err = 0;
6656 	u16 ring_id;
6657 
6658 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6659 	if (rc)
6660 		goto exit;
6661 
6662 	req->enables = 0;
6663 	if (rmem->nr_pages > 1) {
6664 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6665 		/* Page size is in log2 units */
6666 		req->page_size = BNXT_PAGE_SHIFT;
6667 		req->page_tbl_depth = 1;
6668 	} else {
6669 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
6670 	}
6671 	req->fbo = 0;
6672 	/* Association of ring index with doorbell index and MSIX number */
6673 	req->logical_id = cpu_to_le16(map_index);
6674 
6675 	switch (ring_type) {
6676 	case HWRM_RING_ALLOC_TX: {
6677 		struct bnxt_tx_ring_info *txr;
6678 
6679 		txr = container_of(ring, struct bnxt_tx_ring_info,
6680 				   tx_ring_struct);
6681 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6682 		/* Association of transmit ring with completion ring */
6683 		grp_info = &bp->grp_info[ring->grp_idx];
6684 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6685 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6686 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6687 		req->queue_id = cpu_to_le16(ring->queue_id);
6688 		if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6689 			req->cmpl_coal_cnt =
6690 				RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6691 		break;
6692 	}
6693 	case HWRM_RING_ALLOC_RX:
6694 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6695 		req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6696 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6697 			u16 flags = 0;
6698 
6699 			/* Association of rx ring with stats context */
6700 			grp_info = &bp->grp_info[ring->grp_idx];
6701 			req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6702 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6703 			req->enables |= cpu_to_le32(
6704 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6705 			if (NET_IP_ALIGN == 2)
6706 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
6707 			req->flags = cpu_to_le16(flags);
6708 		}
6709 		break;
6710 	case HWRM_RING_ALLOC_AGG:
6711 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6712 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
6713 			/* Association of agg ring with rx ring */
6714 			grp_info = &bp->grp_info[ring->grp_idx];
6715 			req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
6716 			req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
6717 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6718 			req->enables |= cpu_to_le32(
6719 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
6720 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6721 		} else {
6722 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6723 		}
6724 		req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6725 		break;
6726 	case HWRM_RING_ALLOC_CMPL:
6727 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
6728 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6729 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6730 			/* Association of cp ring with nq */
6731 			grp_info = &bp->grp_info[map_index];
6732 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
6733 			req->cq_handle = cpu_to_le64(ring->handle);
6734 			req->enables |= cpu_to_le32(
6735 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
6736 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
6737 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6738 		}
6739 		break;
6740 	case HWRM_RING_ALLOC_NQ:
6741 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
6742 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6743 		if (bp->flags & BNXT_FLAG_USING_MSIX)
6744 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6745 		break;
6746 	default:
6747 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6748 			   ring_type);
6749 		return -1;
6750 	}
6751 
6752 	resp = hwrm_req_hold(bp, req);
6753 	rc = hwrm_req_send(bp, req);
6754 	err = le16_to_cpu(resp->error_code);
6755 	ring_id = le16_to_cpu(resp->ring_id);
6756 	hwrm_req_drop(bp, req);
6757 
6758 exit:
6759 	if (rc || err) {
6760 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6761 			   ring_type, rc, err);
6762 		return -EIO;
6763 	}
6764 	ring->fw_ring_id = ring_id;
6765 	return rc;
6766 }
6767 
6768 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6769 {
6770 	int rc;
6771 
6772 	if (BNXT_PF(bp)) {
6773 		struct hwrm_func_cfg_input *req;
6774 
6775 		rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6776 		if (rc)
6777 			return rc;
6778 
6779 		req->fid = cpu_to_le16(0xffff);
6780 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6781 		req->async_event_cr = cpu_to_le16(idx);
6782 		return hwrm_req_send(bp, req);
6783 	} else {
6784 		struct hwrm_func_vf_cfg_input *req;
6785 
6786 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6787 		if (rc)
6788 			return rc;
6789 
6790 		req->enables =
6791 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6792 		req->async_event_cr = cpu_to_le16(idx);
6793 		return hwrm_req_send(bp, req);
6794 	}
6795 }
6796 
6797 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6798 			     u32 ring_type)
6799 {
6800 	switch (ring_type) {
6801 	case HWRM_RING_ALLOC_TX:
6802 		db->db_ring_mask = bp->tx_ring_mask;
6803 		break;
6804 	case HWRM_RING_ALLOC_RX:
6805 		db->db_ring_mask = bp->rx_ring_mask;
6806 		break;
6807 	case HWRM_RING_ALLOC_AGG:
6808 		db->db_ring_mask = bp->rx_agg_ring_mask;
6809 		break;
6810 	case HWRM_RING_ALLOC_CMPL:
6811 	case HWRM_RING_ALLOC_NQ:
6812 		db->db_ring_mask = bp->cp_ring_mask;
6813 		break;
6814 	}
6815 	if (bp->flags & BNXT_FLAG_CHIP_P7) {
6816 		db->db_epoch_mask = db->db_ring_mask + 1;
6817 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
6818 	}
6819 }
6820 
6821 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6822 			u32 map_idx, u32 xid)
6823 {
6824 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6825 		switch (ring_type) {
6826 		case HWRM_RING_ALLOC_TX:
6827 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
6828 			break;
6829 		case HWRM_RING_ALLOC_RX:
6830 		case HWRM_RING_ALLOC_AGG:
6831 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
6832 			break;
6833 		case HWRM_RING_ALLOC_CMPL:
6834 			db->db_key64 = DBR_PATH_L2;
6835 			break;
6836 		case HWRM_RING_ALLOC_NQ:
6837 			db->db_key64 = DBR_PATH_L2;
6838 			break;
6839 		}
6840 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
6841 
6842 		if (bp->flags & BNXT_FLAG_CHIP_P7)
6843 			db->db_key64 |= DBR_VALID;
6844 
6845 		db->doorbell = bp->bar1 + bp->db_offset;
6846 	} else {
6847 		db->doorbell = bp->bar1 + map_idx * 0x80;
6848 		switch (ring_type) {
6849 		case HWRM_RING_ALLOC_TX:
6850 			db->db_key32 = DB_KEY_TX;
6851 			break;
6852 		case HWRM_RING_ALLOC_RX:
6853 		case HWRM_RING_ALLOC_AGG:
6854 			db->db_key32 = DB_KEY_RX;
6855 			break;
6856 		case HWRM_RING_ALLOC_CMPL:
6857 			db->db_key32 = DB_KEY_CP;
6858 			break;
6859 		}
6860 	}
6861 	bnxt_set_db_mask(bp, db, ring_type);
6862 }
6863 
6864 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
6865 {
6866 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
6867 	int i, rc = 0;
6868 	u32 type;
6869 
6870 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6871 		type = HWRM_RING_ALLOC_NQ;
6872 	else
6873 		type = HWRM_RING_ALLOC_CMPL;
6874 	for (i = 0; i < bp->cp_nr_rings; i++) {
6875 		struct bnxt_napi *bnapi = bp->bnapi[i];
6876 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6877 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
6878 		u32 map_idx = ring->map_idx;
6879 		unsigned int vector;
6880 
6881 		vector = bp->irq_tbl[map_idx].vector;
6882 		disable_irq_nosync(vector);
6883 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6884 		if (rc) {
6885 			enable_irq(vector);
6886 			goto err_out;
6887 		}
6888 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6889 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
6890 		enable_irq(vector);
6891 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
6892 
6893 		if (!i) {
6894 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6895 			if (rc)
6896 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
6897 		}
6898 	}
6899 
6900 	type = HWRM_RING_ALLOC_TX;
6901 	for (i = 0; i < bp->tx_nr_rings; i++) {
6902 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6903 		struct bnxt_ring_struct *ring;
6904 		u32 map_idx;
6905 
6906 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6907 			struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
6908 			struct bnxt_napi *bnapi = txr->bnapi;
6909 			u32 type2 = HWRM_RING_ALLOC_CMPL;
6910 
6911 			ring = &cpr2->cp_ring_struct;
6912 			ring->handle = BNXT_SET_NQ_HDL(cpr2);
6913 			map_idx = bnapi->index;
6914 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6915 			if (rc)
6916 				goto err_out;
6917 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6918 				    ring->fw_ring_id);
6919 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6920 		}
6921 		ring = &txr->tx_ring_struct;
6922 		map_idx = i;
6923 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6924 		if (rc)
6925 			goto err_out;
6926 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
6927 	}
6928 
6929 	type = HWRM_RING_ALLOC_RX;
6930 	for (i = 0; i < bp->rx_nr_rings; i++) {
6931 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6932 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6933 		struct bnxt_napi *bnapi = rxr->bnapi;
6934 		u32 map_idx = bnapi->index;
6935 
6936 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6937 		if (rc)
6938 			goto err_out;
6939 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6940 		/* If we have agg rings, post agg buffers first. */
6941 		if (!agg_rings)
6942 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6943 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6944 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6945 			struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
6946 			u32 type2 = HWRM_RING_ALLOC_CMPL;
6947 
6948 			ring = &cpr2->cp_ring_struct;
6949 			ring->handle = BNXT_SET_NQ_HDL(cpr2);
6950 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6951 			if (rc)
6952 				goto err_out;
6953 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6954 				    ring->fw_ring_id);
6955 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6956 		}
6957 	}
6958 
6959 	if (agg_rings) {
6960 		type = HWRM_RING_ALLOC_AGG;
6961 		for (i = 0; i < bp->rx_nr_rings; i++) {
6962 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6963 			struct bnxt_ring_struct *ring =
6964 						&rxr->rx_agg_ring_struct;
6965 			u32 grp_idx = ring->grp_idx;
6966 			u32 map_idx = grp_idx + bp->rx_nr_rings;
6967 
6968 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6969 			if (rc)
6970 				goto err_out;
6971 
6972 			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6973 				    ring->fw_ring_id);
6974 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6975 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6976 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6977 		}
6978 	}
6979 err_out:
6980 	return rc;
6981 }
6982 
6983 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6984 				   struct bnxt_ring_struct *ring,
6985 				   u32 ring_type, int cmpl_ring_id)
6986 {
6987 	struct hwrm_ring_free_output *resp;
6988 	struct hwrm_ring_free_input *req;
6989 	u16 error_code = 0;
6990 	int rc;
6991 
6992 	if (BNXT_NO_FW_ACCESS(bp))
6993 		return 0;
6994 
6995 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6996 	if (rc)
6997 		goto exit;
6998 
6999 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7000 	req->ring_type = ring_type;
7001 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
7002 
7003 	resp = hwrm_req_hold(bp, req);
7004 	rc = hwrm_req_send(bp, req);
7005 	error_code = le16_to_cpu(resp->error_code);
7006 	hwrm_req_drop(bp, req);
7007 exit:
7008 	if (rc || error_code) {
7009 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7010 			   ring_type, rc, error_code);
7011 		return -EIO;
7012 	}
7013 	return 0;
7014 }
7015 
7016 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7017 {
7018 	u32 type;
7019 	int i;
7020 
7021 	if (!bp->bnapi)
7022 		return;
7023 
7024 	for (i = 0; i < bp->tx_nr_rings; i++) {
7025 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7026 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7027 
7028 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7029 			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
7030 
7031 			hwrm_ring_free_send_msg(bp, ring,
7032 						RING_FREE_REQ_RING_TYPE_TX,
7033 						close_path ? cmpl_ring_id :
7034 						INVALID_HW_RING_ID);
7035 			ring->fw_ring_id = INVALID_HW_RING_ID;
7036 		}
7037 	}
7038 
7039 	for (i = 0; i < bp->rx_nr_rings; i++) {
7040 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7041 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7042 		u32 grp_idx = rxr->bnapi->index;
7043 
7044 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7045 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7046 
7047 			hwrm_ring_free_send_msg(bp, ring,
7048 						RING_FREE_REQ_RING_TYPE_RX,
7049 						close_path ? cmpl_ring_id :
7050 						INVALID_HW_RING_ID);
7051 			ring->fw_ring_id = INVALID_HW_RING_ID;
7052 			bp->grp_info[grp_idx].rx_fw_ring_id =
7053 				INVALID_HW_RING_ID;
7054 		}
7055 	}
7056 
7057 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7058 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7059 	else
7060 		type = RING_FREE_REQ_RING_TYPE_RX;
7061 	for (i = 0; i < bp->rx_nr_rings; i++) {
7062 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7063 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7064 		u32 grp_idx = rxr->bnapi->index;
7065 
7066 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7067 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7068 
7069 			hwrm_ring_free_send_msg(bp, ring, type,
7070 						close_path ? cmpl_ring_id :
7071 						INVALID_HW_RING_ID);
7072 			ring->fw_ring_id = INVALID_HW_RING_ID;
7073 			bp->grp_info[grp_idx].agg_fw_ring_id =
7074 				INVALID_HW_RING_ID;
7075 		}
7076 	}
7077 
7078 	/* The completion rings are about to be freed.  After that the
7079 	 * IRQ doorbell will not work anymore.  So we need to disable
7080 	 * IRQ here.
7081 	 */
7082 	bnxt_disable_int_sync(bp);
7083 
7084 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7085 		type = RING_FREE_REQ_RING_TYPE_NQ;
7086 	else
7087 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7088 	for (i = 0; i < bp->cp_nr_rings; i++) {
7089 		struct bnxt_napi *bnapi = bp->bnapi[i];
7090 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7091 		struct bnxt_ring_struct *ring;
7092 		int j;
7093 
7094 		for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7095 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7096 
7097 			ring = &cpr2->cp_ring_struct;
7098 			if (ring->fw_ring_id == INVALID_HW_RING_ID)
7099 				continue;
7100 			hwrm_ring_free_send_msg(bp, ring,
7101 						RING_FREE_REQ_RING_TYPE_L2_CMPL,
7102 						INVALID_HW_RING_ID);
7103 			ring->fw_ring_id = INVALID_HW_RING_ID;
7104 		}
7105 		ring = &cpr->cp_ring_struct;
7106 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7107 			hwrm_ring_free_send_msg(bp, ring, type,
7108 						INVALID_HW_RING_ID);
7109 			ring->fw_ring_id = INVALID_HW_RING_ID;
7110 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7111 		}
7112 	}
7113 }
7114 
7115 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7116 			     bool shared);
7117 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7118 			   bool shared);
7119 
7120 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7121 {
7122 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7123 	struct hwrm_func_qcfg_output *resp;
7124 	struct hwrm_func_qcfg_input *req;
7125 	int rc;
7126 
7127 	if (bp->hwrm_spec_code < 0x10601)
7128 		return 0;
7129 
7130 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7131 	if (rc)
7132 		return rc;
7133 
7134 	req->fid = cpu_to_le16(0xffff);
7135 	resp = hwrm_req_hold(bp, req);
7136 	rc = hwrm_req_send(bp, req);
7137 	if (rc) {
7138 		hwrm_req_drop(bp, req);
7139 		return rc;
7140 	}
7141 
7142 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7143 	if (BNXT_NEW_RM(bp)) {
7144 		u16 cp, stats;
7145 
7146 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7147 		hw_resc->resv_hw_ring_grps =
7148 			le32_to_cpu(resp->alloc_hw_ring_grps);
7149 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7150 		hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7151 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
7152 		stats = le16_to_cpu(resp->alloc_stat_ctx);
7153 		hw_resc->resv_irqs = cp;
7154 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7155 			int rx = hw_resc->resv_rx_rings;
7156 			int tx = hw_resc->resv_tx_rings;
7157 
7158 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
7159 				rx >>= 1;
7160 			if (cp < (rx + tx)) {
7161 				rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7162 				if (rc)
7163 					goto get_rings_exit;
7164 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
7165 					rx <<= 1;
7166 				hw_resc->resv_rx_rings = rx;
7167 				hw_resc->resv_tx_rings = tx;
7168 			}
7169 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7170 			hw_resc->resv_hw_ring_grps = rx;
7171 		}
7172 		hw_resc->resv_cp_rings = cp;
7173 		hw_resc->resv_stat_ctxs = stats;
7174 	}
7175 get_rings_exit:
7176 	hwrm_req_drop(bp, req);
7177 	return rc;
7178 }
7179 
7180 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7181 {
7182 	struct hwrm_func_qcfg_output *resp;
7183 	struct hwrm_func_qcfg_input *req;
7184 	int rc;
7185 
7186 	if (bp->hwrm_spec_code < 0x10601)
7187 		return 0;
7188 
7189 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7190 	if (rc)
7191 		return rc;
7192 
7193 	req->fid = cpu_to_le16(fid);
7194 	resp = hwrm_req_hold(bp, req);
7195 	rc = hwrm_req_send(bp, req);
7196 	if (!rc)
7197 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7198 
7199 	hwrm_req_drop(bp, req);
7200 	return rc;
7201 }
7202 
7203 static bool bnxt_rfs_supported(struct bnxt *bp);
7204 
7205 static struct hwrm_func_cfg_input *
7206 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7207 {
7208 	struct hwrm_func_cfg_input *req;
7209 	u32 enables = 0;
7210 
7211 	if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7212 		return NULL;
7213 
7214 	req->fid = cpu_to_le16(0xffff);
7215 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7216 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7217 	if (BNXT_NEW_RM(bp)) {
7218 		enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7219 		enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7220 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7221 			enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7222 			enables |= hwr->cp_p5 ?
7223 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7224 		} else {
7225 			enables |= hwr->cp ?
7226 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7227 			enables |= hwr->grp ?
7228 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7229 		}
7230 		enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7231 		enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7232 					  0;
7233 		req->num_rx_rings = cpu_to_le16(hwr->rx);
7234 		req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7235 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7236 			req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7237 			req->num_msix = cpu_to_le16(hwr->cp);
7238 		} else {
7239 			req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7240 			req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7241 		}
7242 		req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7243 		req->num_vnics = cpu_to_le16(hwr->vnic);
7244 	}
7245 	req->enables = cpu_to_le32(enables);
7246 	return req;
7247 }
7248 
7249 static struct hwrm_func_vf_cfg_input *
7250 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7251 {
7252 	struct hwrm_func_vf_cfg_input *req;
7253 	u32 enables = 0;
7254 
7255 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7256 		return NULL;
7257 
7258 	enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7259 	enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7260 			     FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7261 	enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7262 	enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7263 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7264 		enables |= hwr->cp_p5 ?
7265 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7266 	} else {
7267 		enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7268 		enables |= hwr->grp ?
7269 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7270 	}
7271 	enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7272 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7273 
7274 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7275 	req->num_tx_rings = cpu_to_le16(hwr->tx);
7276 	req->num_rx_rings = cpu_to_le16(hwr->rx);
7277 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7278 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7279 		req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7280 	} else {
7281 		req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7282 		req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7283 	}
7284 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7285 	req->num_vnics = cpu_to_le16(hwr->vnic);
7286 
7287 	req->enables = cpu_to_le32(enables);
7288 	return req;
7289 }
7290 
7291 static int
7292 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7293 {
7294 	struct hwrm_func_cfg_input *req;
7295 	int rc;
7296 
7297 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7298 	if (!req)
7299 		return -ENOMEM;
7300 
7301 	if (!req->enables) {
7302 		hwrm_req_drop(bp, req);
7303 		return 0;
7304 	}
7305 
7306 	rc = hwrm_req_send(bp, req);
7307 	if (rc)
7308 		return rc;
7309 
7310 	if (bp->hwrm_spec_code < 0x10601)
7311 		bp->hw_resc.resv_tx_rings = hwr->tx;
7312 
7313 	return bnxt_hwrm_get_rings(bp);
7314 }
7315 
7316 static int
7317 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7318 {
7319 	struct hwrm_func_vf_cfg_input *req;
7320 	int rc;
7321 
7322 	if (!BNXT_NEW_RM(bp)) {
7323 		bp->hw_resc.resv_tx_rings = hwr->tx;
7324 		return 0;
7325 	}
7326 
7327 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7328 	if (!req)
7329 		return -ENOMEM;
7330 
7331 	rc = hwrm_req_send(bp, req);
7332 	if (rc)
7333 		return rc;
7334 
7335 	return bnxt_hwrm_get_rings(bp);
7336 }
7337 
7338 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7339 {
7340 	if (BNXT_PF(bp))
7341 		return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7342 	else
7343 		return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7344 }
7345 
7346 int bnxt_nq_rings_in_use(struct bnxt *bp)
7347 {
7348 	return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7349 }
7350 
7351 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7352 {
7353 	int cp;
7354 
7355 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7356 		return bnxt_nq_rings_in_use(bp);
7357 
7358 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
7359 	return cp;
7360 }
7361 
7362 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7363 {
7364 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7365 }
7366 
7367 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7368 {
7369 	if (!hwr->grp)
7370 		return 0;
7371 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7372 		int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7373 
7374 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7375 			rss_ctx *= hwr->vnic;
7376 		return rss_ctx;
7377 	}
7378 	if (BNXT_VF(bp))
7379 		return BNXT_VF_MAX_RSS_CTX;
7380 	if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7381 		return hwr->grp + 1;
7382 	return 1;
7383 }
7384 
7385 /* Check if a default RSS map needs to be setup.  This function is only
7386  * used on older firmware that does not require reserving RX rings.
7387  */
7388 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7389 {
7390 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7391 
7392 	/* The RSS map is valid for RX rings set to resv_rx_rings */
7393 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7394 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
7395 		if (!netif_is_rxfh_configured(bp->dev))
7396 			bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7397 	}
7398 }
7399 
7400 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7401 {
7402 	if (bp->flags & BNXT_FLAG_RFS) {
7403 		if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7404 			return 2 + bp->num_rss_ctx;
7405 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7406 			return rx_rings + 1;
7407 	}
7408 	return 1;
7409 }
7410 
7411 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7412 {
7413 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7414 	int cp = bnxt_cp_rings_in_use(bp);
7415 	int nq = bnxt_nq_rings_in_use(bp);
7416 	int rx = bp->rx_nr_rings, stat;
7417 	int vnic, grp = rx;
7418 
7419 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7420 	    bp->hwrm_spec_code >= 0x10601)
7421 		return true;
7422 
7423 	/* Old firmware does not need RX ring reservations but we still
7424 	 * need to setup a default RSS map when needed.  With new firmware
7425 	 * we go through RX ring reservations first and then set up the
7426 	 * RSS map for the successfully reserved RX rings when needed.
7427 	 */
7428 	if (!BNXT_NEW_RM(bp)) {
7429 		bnxt_check_rss_tbl_no_rmgr(bp);
7430 		return false;
7431 	}
7432 
7433 	vnic = bnxt_get_total_vnics(bp, rx);
7434 
7435 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7436 		rx <<= 1;
7437 	stat = bnxt_get_func_stat_ctxs(bp);
7438 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7439 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7440 	    (hw_resc->resv_hw_ring_grps != grp &&
7441 	     !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7442 		return true;
7443 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7444 	    hw_resc->resv_irqs != nq)
7445 		return true;
7446 	return false;
7447 }
7448 
7449 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7450 {
7451 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7452 
7453 	hwr->tx = hw_resc->resv_tx_rings;
7454 	if (BNXT_NEW_RM(bp)) {
7455 		hwr->rx = hw_resc->resv_rx_rings;
7456 		hwr->cp = hw_resc->resv_irqs;
7457 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7458 			hwr->cp_p5 = hw_resc->resv_cp_rings;
7459 		hwr->grp = hw_resc->resv_hw_ring_grps;
7460 		hwr->vnic = hw_resc->resv_vnics;
7461 		hwr->stat = hw_resc->resv_stat_ctxs;
7462 		hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7463 	}
7464 }
7465 
7466 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7467 {
7468 	return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7469 	       hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7470 }
7471 
7472 static int __bnxt_reserve_rings(struct bnxt *bp)
7473 {
7474 	struct bnxt_hw_rings hwr = {0};
7475 	int cp = bp->cp_nr_rings;
7476 	int rx_rings, rc;
7477 	int ulp_msix = 0;
7478 	bool sh = false;
7479 	int tx_cp;
7480 
7481 	if (!bnxt_need_reserve_rings(bp))
7482 		return 0;
7483 
7484 	if (!bnxt_ulp_registered(bp->edev)) {
7485 		ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7486 		if (!ulp_msix)
7487 			bnxt_set_ulp_stat_ctxs(bp, 0);
7488 
7489 		if (ulp_msix > bp->ulp_num_msix_want)
7490 			ulp_msix = bp->ulp_num_msix_want;
7491 		hwr.cp = cp + ulp_msix;
7492 	} else {
7493 		hwr.cp = bnxt_nq_rings_in_use(bp);
7494 	}
7495 
7496 	hwr.tx = bp->tx_nr_rings;
7497 	hwr.rx = bp->rx_nr_rings;
7498 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7499 		sh = true;
7500 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7501 		hwr.cp_p5 = hwr.rx + hwr.tx;
7502 
7503 	hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7504 
7505 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7506 		hwr.rx <<= 1;
7507 	hwr.grp = bp->rx_nr_rings;
7508 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7509 	hwr.stat = bnxt_get_func_stat_ctxs(bp);
7510 
7511 	rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7512 	if (rc)
7513 		return rc;
7514 
7515 	bnxt_copy_reserved_rings(bp, &hwr);
7516 
7517 	rx_rings = hwr.rx;
7518 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7519 		if (hwr.rx >= 2) {
7520 			rx_rings = hwr.rx >> 1;
7521 		} else {
7522 			if (netif_running(bp->dev))
7523 				return -ENOMEM;
7524 
7525 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7526 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7527 			bp->dev->hw_features &= ~NETIF_F_LRO;
7528 			bp->dev->features &= ~NETIF_F_LRO;
7529 			bnxt_set_ring_params(bp);
7530 		}
7531 	}
7532 	rx_rings = min_t(int, rx_rings, hwr.grp);
7533 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7534 	if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7535 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7536 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
7537 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7538 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
7539 		hwr.rx = rx_rings << 1;
7540 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7541 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7542 	bp->tx_nr_rings = hwr.tx;
7543 
7544 	/* If we cannot reserve all the RX rings, reset the RSS map only
7545 	 * if absolutely necessary
7546 	 */
7547 	if (rx_rings != bp->rx_nr_rings) {
7548 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7549 			    rx_rings, bp->rx_nr_rings);
7550 		if (netif_is_rxfh_configured(bp->dev) &&
7551 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7552 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7553 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7554 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7555 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7556 		}
7557 	}
7558 	bp->rx_nr_rings = rx_rings;
7559 	bp->cp_nr_rings = hwr.cp;
7560 
7561 	if (!bnxt_rings_ok(bp, &hwr))
7562 		return -ENOMEM;
7563 
7564 	if (!netif_is_rxfh_configured(bp->dev))
7565 		bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7566 
7567 	if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
7568 		int resv_msix, resv_ctx, ulp_ctxs;
7569 		struct bnxt_hw_resc *hw_resc;
7570 
7571 		hw_resc = &bp->hw_resc;
7572 		resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
7573 		ulp_msix = min_t(int, resv_msix, ulp_msix);
7574 		bnxt_set_ulp_msix_num(bp, ulp_msix);
7575 		resv_ctx = hw_resc->resv_stat_ctxs  - bp->cp_nr_rings;
7576 		ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
7577 		bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
7578 	}
7579 
7580 	return rc;
7581 }
7582 
7583 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7584 {
7585 	struct hwrm_func_vf_cfg_input *req;
7586 	u32 flags;
7587 
7588 	if (!BNXT_NEW_RM(bp))
7589 		return 0;
7590 
7591 	req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7592 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7593 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7594 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7595 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7596 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7597 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7598 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7599 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7600 
7601 	req->flags = cpu_to_le32(flags);
7602 	return hwrm_req_send_silent(bp, req);
7603 }
7604 
7605 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7606 {
7607 	struct hwrm_func_cfg_input *req;
7608 	u32 flags;
7609 
7610 	req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7611 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7612 	if (BNXT_NEW_RM(bp)) {
7613 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7614 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7615 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7616 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7617 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7618 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7619 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7620 		else
7621 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7622 	}
7623 
7624 	req->flags = cpu_to_le32(flags);
7625 	return hwrm_req_send_silent(bp, req);
7626 }
7627 
7628 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7629 {
7630 	if (bp->hwrm_spec_code < 0x10801)
7631 		return 0;
7632 
7633 	if (BNXT_PF(bp))
7634 		return bnxt_hwrm_check_pf_rings(bp, hwr);
7635 
7636 	return bnxt_hwrm_check_vf_rings(bp, hwr);
7637 }
7638 
7639 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7640 {
7641 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7642 	struct hwrm_ring_aggint_qcaps_output *resp;
7643 	struct hwrm_ring_aggint_qcaps_input *req;
7644 	int rc;
7645 
7646 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
7647 	coal_cap->num_cmpl_dma_aggr_max = 63;
7648 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
7649 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
7650 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
7651 	coal_cap->int_lat_tmr_min_max = 65535;
7652 	coal_cap->int_lat_tmr_max_max = 65535;
7653 	coal_cap->num_cmpl_aggr_int_max = 65535;
7654 	coal_cap->timer_units = 80;
7655 
7656 	if (bp->hwrm_spec_code < 0x10902)
7657 		return;
7658 
7659 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7660 		return;
7661 
7662 	resp = hwrm_req_hold(bp, req);
7663 	rc = hwrm_req_send_silent(bp, req);
7664 	if (!rc) {
7665 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
7666 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
7667 		coal_cap->num_cmpl_dma_aggr_max =
7668 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
7669 		coal_cap->num_cmpl_dma_aggr_during_int_max =
7670 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
7671 		coal_cap->cmpl_aggr_dma_tmr_max =
7672 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
7673 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
7674 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
7675 		coal_cap->int_lat_tmr_min_max =
7676 			le16_to_cpu(resp->int_lat_tmr_min_max);
7677 		coal_cap->int_lat_tmr_max_max =
7678 			le16_to_cpu(resp->int_lat_tmr_max_max);
7679 		coal_cap->num_cmpl_aggr_int_max =
7680 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
7681 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
7682 	}
7683 	hwrm_req_drop(bp, req);
7684 }
7685 
7686 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7687 {
7688 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7689 
7690 	return usec * 1000 / coal_cap->timer_units;
7691 }
7692 
7693 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7694 	struct bnxt_coal *hw_coal,
7695 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7696 {
7697 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7698 	u16 val, tmr, max, flags = hw_coal->flags;
7699 	u32 cmpl_params = coal_cap->cmpl_params;
7700 
7701 	max = hw_coal->bufs_per_record * 128;
7702 	if (hw_coal->budget)
7703 		max = hw_coal->bufs_per_record * hw_coal->budget;
7704 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
7705 
7706 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
7707 	req->num_cmpl_aggr_int = cpu_to_le16(val);
7708 
7709 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
7710 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
7711 
7712 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
7713 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
7714 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
7715 
7716 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7717 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
7718 	req->int_lat_tmr_max = cpu_to_le16(tmr);
7719 
7720 	/* min timer set to 1/2 of interrupt timer */
7721 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
7722 		val = tmr / 2;
7723 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
7724 		req->int_lat_tmr_min = cpu_to_le16(val);
7725 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7726 	}
7727 
7728 	/* buf timer set to 1/4 of interrupt timer */
7729 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
7730 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
7731 
7732 	if (cmpl_params &
7733 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
7734 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7735 		val = clamp_t(u16, tmr, 1,
7736 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
7737 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
7738 		req->enables |=
7739 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
7740 	}
7741 
7742 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
7743 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
7744 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
7745 	req->flags = cpu_to_le16(flags);
7746 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
7747 }
7748 
7749 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7750 				   struct bnxt_coal *hw_coal)
7751 {
7752 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
7753 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7754 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7755 	u32 nq_params = coal_cap->nq_params;
7756 	u16 tmr;
7757 	int rc;
7758 
7759 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
7760 		return 0;
7761 
7762 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7763 	if (rc)
7764 		return rc;
7765 
7766 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7767 	req->flags =
7768 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
7769 
7770 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7771 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
7772 	req->int_lat_tmr_min = cpu_to_le16(tmr);
7773 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7774 	return hwrm_req_send(bp, req);
7775 }
7776 
7777 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7778 {
7779 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
7780 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7781 	struct bnxt_coal coal;
7782 	int rc;
7783 
7784 	/* Tick values in micro seconds.
7785 	 * 1 coal_buf x bufs_per_record = 1 completion record.
7786 	 */
7787 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7788 
7789 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7790 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7791 
7792 	if (!bnapi->rx_ring)
7793 		return -ENODEV;
7794 
7795 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7796 	if (rc)
7797 		return rc;
7798 
7799 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7800 
7801 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7802 
7803 	return hwrm_req_send(bp, req_rx);
7804 }
7805 
7806 static int
7807 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7808 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7809 {
7810 	u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7811 
7812 	req->ring_id = cpu_to_le16(ring_id);
7813 	return hwrm_req_send(bp, req);
7814 }
7815 
7816 static int
7817 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7818 		      struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7819 {
7820 	struct bnxt_tx_ring_info *txr;
7821 	int i, rc;
7822 
7823 	bnxt_for_each_napi_tx(i, bnapi, txr) {
7824 		u16 ring_id;
7825 
7826 		ring_id = bnxt_cp_ring_for_tx(bp, txr);
7827 		req->ring_id = cpu_to_le16(ring_id);
7828 		rc = hwrm_req_send(bp, req);
7829 		if (rc)
7830 			return rc;
7831 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7832 			return 0;
7833 	}
7834 	return 0;
7835 }
7836 
7837 int bnxt_hwrm_set_coal(struct bnxt *bp)
7838 {
7839 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
7840 	int i, rc;
7841 
7842 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7843 	if (rc)
7844 		return rc;
7845 
7846 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7847 	if (rc) {
7848 		hwrm_req_drop(bp, req_rx);
7849 		return rc;
7850 	}
7851 
7852 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
7853 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
7854 
7855 	hwrm_req_hold(bp, req_rx);
7856 	hwrm_req_hold(bp, req_tx);
7857 	for (i = 0; i < bp->cp_nr_rings; i++) {
7858 		struct bnxt_napi *bnapi = bp->bnapi[i];
7859 		struct bnxt_coal *hw_coal;
7860 
7861 		if (!bnapi->rx_ring)
7862 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7863 		else
7864 			rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7865 		if (rc)
7866 			break;
7867 
7868 		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7869 			continue;
7870 
7871 		if (bnapi->rx_ring && bnapi->tx_ring[0]) {
7872 			rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7873 			if (rc)
7874 				break;
7875 		}
7876 		if (bnapi->rx_ring)
7877 			hw_coal = &bp->rx_coal;
7878 		else
7879 			hw_coal = &bp->tx_coal;
7880 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
7881 	}
7882 	hwrm_req_drop(bp, req_rx);
7883 	hwrm_req_drop(bp, req_tx);
7884 	return rc;
7885 }
7886 
7887 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
7888 {
7889 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
7890 	struct hwrm_stat_ctx_free_input *req;
7891 	int i;
7892 
7893 	if (!bp->bnapi)
7894 		return;
7895 
7896 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7897 		return;
7898 
7899 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
7900 		return;
7901 	if (BNXT_FW_MAJ(bp) <= 20) {
7902 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
7903 			hwrm_req_drop(bp, req);
7904 			return;
7905 		}
7906 		hwrm_req_hold(bp, req0);
7907 	}
7908 	hwrm_req_hold(bp, req);
7909 	for (i = 0; i < bp->cp_nr_rings; i++) {
7910 		struct bnxt_napi *bnapi = bp->bnapi[i];
7911 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7912 
7913 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
7914 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
7915 			if (req0) {
7916 				req0->stat_ctx_id = req->stat_ctx_id;
7917 				hwrm_req_send(bp, req0);
7918 			}
7919 			hwrm_req_send(bp, req);
7920 
7921 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
7922 		}
7923 	}
7924 	hwrm_req_drop(bp, req);
7925 	if (req0)
7926 		hwrm_req_drop(bp, req0);
7927 }
7928 
7929 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
7930 {
7931 	struct hwrm_stat_ctx_alloc_output *resp;
7932 	struct hwrm_stat_ctx_alloc_input *req;
7933 	int rc, i;
7934 
7935 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7936 		return 0;
7937 
7938 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7939 	if (rc)
7940 		return rc;
7941 
7942 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
7943 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
7944 
7945 	resp = hwrm_req_hold(bp, req);
7946 	for (i = 0; i < bp->cp_nr_rings; i++) {
7947 		struct bnxt_napi *bnapi = bp->bnapi[i];
7948 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7949 
7950 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
7951 
7952 		rc = hwrm_req_send(bp, req);
7953 		if (rc)
7954 			break;
7955 
7956 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
7957 
7958 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
7959 	}
7960 	hwrm_req_drop(bp, req);
7961 	return rc;
7962 }
7963 
7964 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
7965 {
7966 	struct hwrm_func_qcfg_output *resp;
7967 	struct hwrm_func_qcfg_input *req;
7968 	u16 flags;
7969 	int rc;
7970 
7971 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7972 	if (rc)
7973 		return rc;
7974 
7975 	req->fid = cpu_to_le16(0xffff);
7976 	resp = hwrm_req_hold(bp, req);
7977 	rc = hwrm_req_send(bp, req);
7978 	if (rc)
7979 		goto func_qcfg_exit;
7980 
7981 #ifdef CONFIG_BNXT_SRIOV
7982 	if (BNXT_VF(bp)) {
7983 		struct bnxt_vf_info *vf = &bp->vf;
7984 
7985 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
7986 	} else {
7987 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
7988 	}
7989 #endif
7990 	flags = le16_to_cpu(resp->flags);
7991 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
7992 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
7993 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
7994 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
7995 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
7996 	}
7997 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
7998 		bp->flags |= BNXT_FLAG_MULTI_HOST;
7999 
8000 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8001 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8002 
8003 	switch (resp->port_partition_type) {
8004 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8005 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8006 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8007 		bp->port_partition_type = resp->port_partition_type;
8008 		break;
8009 	}
8010 	if (bp->hwrm_spec_code < 0x10707 ||
8011 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8012 		bp->br_mode = BRIDGE_MODE_VEB;
8013 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8014 		bp->br_mode = BRIDGE_MODE_VEPA;
8015 	else
8016 		bp->br_mode = BRIDGE_MODE_UNDEF;
8017 
8018 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8019 	if (!bp->max_mtu)
8020 		bp->max_mtu = BNXT_MAX_MTU;
8021 
8022 	if (bp->db_size)
8023 		goto func_qcfg_exit;
8024 
8025 	bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8026 	if (BNXT_CHIP_P5(bp)) {
8027 		if (BNXT_PF(bp))
8028 			bp->db_offset = DB_PF_OFFSET_P5;
8029 		else
8030 			bp->db_offset = DB_VF_OFFSET_P5;
8031 	}
8032 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8033 				 1024);
8034 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8035 	    bp->db_size <= bp->db_offset)
8036 		bp->db_size = pci_resource_len(bp->pdev, 2);
8037 
8038 func_qcfg_exit:
8039 	hwrm_req_drop(bp, req);
8040 	return rc;
8041 }
8042 
8043 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8044 				      u8 init_val, u8 init_offset,
8045 				      bool init_mask_set)
8046 {
8047 	ctxm->init_value = init_val;
8048 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8049 	if (init_mask_set)
8050 		ctxm->init_offset = init_offset * 4;
8051 	else
8052 		ctxm->init_value = 0;
8053 }
8054 
8055 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8056 {
8057 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8058 	u16 type;
8059 
8060 	for (type = 0; type < ctx_max; type++) {
8061 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8062 		int n = 1;
8063 
8064 		if (!ctxm->max_entries)
8065 			continue;
8066 
8067 		if (ctxm->instance_bmap)
8068 			n = hweight32(ctxm->instance_bmap);
8069 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8070 		if (!ctxm->pg_info)
8071 			return -ENOMEM;
8072 	}
8073 	return 0;
8074 }
8075 
8076 #define BNXT_CTX_INIT_VALID(flags)	\
8077 	(!!((flags) &			\
8078 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8079 
8080 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8081 {
8082 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
8083 	struct hwrm_func_backing_store_qcaps_v2_input *req;
8084 	struct bnxt_ctx_mem_info *ctx;
8085 	u16 type;
8086 	int rc;
8087 
8088 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8089 	if (rc)
8090 		return rc;
8091 
8092 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8093 	if (!ctx)
8094 		return -ENOMEM;
8095 	bp->ctx = ctx;
8096 
8097 	resp = hwrm_req_hold(bp, req);
8098 
8099 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8100 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8101 		u8 init_val, init_off, i;
8102 		__le32 *p;
8103 		u32 flags;
8104 
8105 		req->type = cpu_to_le16(type);
8106 		rc = hwrm_req_send(bp, req);
8107 		if (rc)
8108 			goto ctx_done;
8109 		flags = le32_to_cpu(resp->flags);
8110 		type = le16_to_cpu(resp->next_valid_type);
8111 		if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
8112 			continue;
8113 
8114 		ctxm->type = le16_to_cpu(resp->type);
8115 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
8116 		ctxm->flags = flags;
8117 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8118 		ctxm->entry_multiple = resp->entry_multiple;
8119 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
8120 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8121 		init_val = resp->ctx_init_value;
8122 		init_off = resp->ctx_init_offset;
8123 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8124 					  BNXT_CTX_INIT_VALID(flags));
8125 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8126 					      BNXT_MAX_SPLIT_ENTRY);
8127 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8128 		     i++, p++)
8129 			ctxm->split[i] = le32_to_cpu(*p);
8130 	}
8131 	rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8132 
8133 ctx_done:
8134 	hwrm_req_drop(bp, req);
8135 	return rc;
8136 }
8137 
8138 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8139 {
8140 	struct hwrm_func_backing_store_qcaps_output *resp;
8141 	struct hwrm_func_backing_store_qcaps_input *req;
8142 	int rc;
8143 
8144 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
8145 		return 0;
8146 
8147 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8148 		return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8149 
8150 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8151 	if (rc)
8152 		return rc;
8153 
8154 	resp = hwrm_req_hold(bp, req);
8155 	rc = hwrm_req_send_silent(bp, req);
8156 	if (!rc) {
8157 		struct bnxt_ctx_mem_type *ctxm;
8158 		struct bnxt_ctx_mem_info *ctx;
8159 		u8 init_val, init_idx = 0;
8160 		u16 init_mask;
8161 
8162 		ctx = bp->ctx;
8163 		if (!ctx) {
8164 			ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8165 			if (!ctx) {
8166 				rc = -ENOMEM;
8167 				goto ctx_err;
8168 			}
8169 			bp->ctx = ctx;
8170 		}
8171 		init_val = resp->ctx_kind_initializer;
8172 		init_mask = le16_to_cpu(resp->ctx_init_mask);
8173 
8174 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8175 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8176 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8177 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8178 		ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8179 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8180 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8181 					  (init_mask & (1 << init_idx++)) != 0);
8182 
8183 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8184 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8185 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8186 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8187 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8188 					  (init_mask & (1 << init_idx++)) != 0);
8189 
8190 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8191 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8192 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8193 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8194 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8195 					  (init_mask & (1 << init_idx++)) != 0);
8196 
8197 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8198 		ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8199 		ctxm->max_entries = ctxm->vnic_entries +
8200 			le16_to_cpu(resp->vnic_max_ring_table_entries);
8201 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8202 		bnxt_init_ctx_initializer(ctxm, init_val,
8203 					  resp->vnic_init_offset,
8204 					  (init_mask & (1 << init_idx++)) != 0);
8205 
8206 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8207 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8208 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8209 		bnxt_init_ctx_initializer(ctxm, init_val,
8210 					  resp->stat_init_offset,
8211 					  (init_mask & (1 << init_idx++)) != 0);
8212 
8213 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8214 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8215 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8216 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8217 		ctxm->entry_multiple = resp->tqm_entries_multiple;
8218 		if (!ctxm->entry_multiple)
8219 			ctxm->entry_multiple = 1;
8220 
8221 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8222 
8223 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8224 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8225 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8226 		ctxm->mrav_num_entries_units =
8227 			le16_to_cpu(resp->mrav_num_entries_units);
8228 		bnxt_init_ctx_initializer(ctxm, init_val,
8229 					  resp->mrav_init_offset,
8230 					  (init_mask & (1 << init_idx++)) != 0);
8231 
8232 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8233 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8234 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8235 
8236 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8237 		if (!ctx->tqm_fp_rings_count)
8238 			ctx->tqm_fp_rings_count = bp->max_q;
8239 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8240 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8241 
8242 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8243 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8244 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8245 
8246 		rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8247 	} else {
8248 		rc = 0;
8249 	}
8250 ctx_err:
8251 	hwrm_req_drop(bp, req);
8252 	return rc;
8253 }
8254 
8255 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8256 				  __le64 *pg_dir)
8257 {
8258 	if (!rmem->nr_pages)
8259 		return;
8260 
8261 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8262 	if (rmem->depth >= 1) {
8263 		if (rmem->depth == 2)
8264 			*pg_attr |= 2;
8265 		else
8266 			*pg_attr |= 1;
8267 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8268 	} else {
8269 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8270 	}
8271 }
8272 
8273 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
8274 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
8275 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
8276 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
8277 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
8278 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8279 
8280 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8281 {
8282 	struct hwrm_func_backing_store_cfg_input *req;
8283 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8284 	struct bnxt_ctx_pg_info *ctx_pg;
8285 	struct bnxt_ctx_mem_type *ctxm;
8286 	void **__req = (void **)&req;
8287 	u32 req_len = sizeof(*req);
8288 	__le32 *num_entries;
8289 	__le64 *pg_dir;
8290 	u32 flags = 0;
8291 	u8 *pg_attr;
8292 	u32 ena;
8293 	int rc;
8294 	int i;
8295 
8296 	if (!ctx)
8297 		return 0;
8298 
8299 	if (req_len > bp->hwrm_max_ext_req_len)
8300 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8301 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8302 	if (rc)
8303 		return rc;
8304 
8305 	req->enables = cpu_to_le32(enables);
8306 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8307 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8308 		ctx_pg = ctxm->pg_info;
8309 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8310 		req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8311 		req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8312 		req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8313 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8314 				      &req->qpc_pg_size_qpc_lvl,
8315 				      &req->qpc_page_dir);
8316 
8317 		if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8318 			req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8319 	}
8320 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8321 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8322 		ctx_pg = ctxm->pg_info;
8323 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8324 		req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8325 		req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8326 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8327 				      &req->srq_pg_size_srq_lvl,
8328 				      &req->srq_page_dir);
8329 	}
8330 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8331 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8332 		ctx_pg = ctxm->pg_info;
8333 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8334 		req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8335 		req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8336 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8337 				      &req->cq_pg_size_cq_lvl,
8338 				      &req->cq_page_dir);
8339 	}
8340 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8341 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8342 		ctx_pg = ctxm->pg_info;
8343 		req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8344 		req->vnic_num_ring_table_entries =
8345 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8346 		req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8347 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8348 				      &req->vnic_pg_size_vnic_lvl,
8349 				      &req->vnic_page_dir);
8350 	}
8351 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8352 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8353 		ctx_pg = ctxm->pg_info;
8354 		req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8355 		req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8356 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8357 				      &req->stat_pg_size_stat_lvl,
8358 				      &req->stat_page_dir);
8359 	}
8360 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8361 		u32 units;
8362 
8363 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8364 		ctx_pg = ctxm->pg_info;
8365 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8366 		units = ctxm->mrav_num_entries_units;
8367 		if (units) {
8368 			u32 num_mr, num_ah = ctxm->mrav_av_entries;
8369 			u32 entries;
8370 
8371 			num_mr = ctx_pg->entries - num_ah;
8372 			entries = ((num_mr / units) << 16) | (num_ah / units);
8373 			req->mrav_num_entries = cpu_to_le32(entries);
8374 			flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8375 		}
8376 		req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8377 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8378 				      &req->mrav_pg_size_mrav_lvl,
8379 				      &req->mrav_page_dir);
8380 	}
8381 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8382 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8383 		ctx_pg = ctxm->pg_info;
8384 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8385 		req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8386 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8387 				      &req->tim_pg_size_tim_lvl,
8388 				      &req->tim_page_dir);
8389 	}
8390 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8391 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
8392 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8393 	     pg_dir = &req->tqm_sp_page_dir,
8394 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8395 	     ctx_pg = ctxm->pg_info;
8396 	     i < BNXT_MAX_TQM_RINGS;
8397 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8398 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8399 		if (!(enables & ena))
8400 			continue;
8401 
8402 		req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8403 		*num_entries = cpu_to_le32(ctx_pg->entries);
8404 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8405 	}
8406 	req->flags = cpu_to_le32(flags);
8407 	return hwrm_req_send(bp, req);
8408 }
8409 
8410 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8411 				  struct bnxt_ctx_pg_info *ctx_pg)
8412 {
8413 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8414 
8415 	rmem->page_size = BNXT_PAGE_SIZE;
8416 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
8417 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
8418 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8419 	if (rmem->depth >= 1)
8420 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8421 	return bnxt_alloc_ring(bp, rmem);
8422 }
8423 
8424 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8425 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8426 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
8427 {
8428 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8429 	int rc;
8430 
8431 	if (!mem_size)
8432 		return -EINVAL;
8433 
8434 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8435 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8436 		ctx_pg->nr_pages = 0;
8437 		return -EINVAL;
8438 	}
8439 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8440 		int nr_tbls, i;
8441 
8442 		rmem->depth = 2;
8443 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8444 					     GFP_KERNEL);
8445 		if (!ctx_pg->ctx_pg_tbl)
8446 			return -ENOMEM;
8447 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8448 		rmem->nr_pages = nr_tbls;
8449 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8450 		if (rc)
8451 			return rc;
8452 		for (i = 0; i < nr_tbls; i++) {
8453 			struct bnxt_ctx_pg_info *pg_tbl;
8454 
8455 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8456 			if (!pg_tbl)
8457 				return -ENOMEM;
8458 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8459 			rmem = &pg_tbl->ring_mem;
8460 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8461 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8462 			rmem->depth = 1;
8463 			rmem->nr_pages = MAX_CTX_PAGES;
8464 			rmem->ctx_mem = ctxm;
8465 			if (i == (nr_tbls - 1)) {
8466 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8467 
8468 				if (rem)
8469 					rmem->nr_pages = rem;
8470 			}
8471 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8472 			if (rc)
8473 				break;
8474 		}
8475 	} else {
8476 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8477 		if (rmem->nr_pages > 1 || depth)
8478 			rmem->depth = 1;
8479 		rmem->ctx_mem = ctxm;
8480 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8481 	}
8482 	return rc;
8483 }
8484 
8485 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8486 				  struct bnxt_ctx_pg_info *ctx_pg)
8487 {
8488 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8489 
8490 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8491 	    ctx_pg->ctx_pg_tbl) {
8492 		int i, nr_tbls = rmem->nr_pages;
8493 
8494 		for (i = 0; i < nr_tbls; i++) {
8495 			struct bnxt_ctx_pg_info *pg_tbl;
8496 			struct bnxt_ring_mem_info *rmem2;
8497 
8498 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
8499 			if (!pg_tbl)
8500 				continue;
8501 			rmem2 = &pg_tbl->ring_mem;
8502 			bnxt_free_ring(bp, rmem2);
8503 			ctx_pg->ctx_pg_arr[i] = NULL;
8504 			kfree(pg_tbl);
8505 			ctx_pg->ctx_pg_tbl[i] = NULL;
8506 		}
8507 		kfree(ctx_pg->ctx_pg_tbl);
8508 		ctx_pg->ctx_pg_tbl = NULL;
8509 	}
8510 	bnxt_free_ring(bp, rmem);
8511 	ctx_pg->nr_pages = 0;
8512 }
8513 
8514 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8515 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
8516 				   u8 pg_lvl)
8517 {
8518 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8519 	int i, rc = 0, n = 1;
8520 	u32 mem_size;
8521 
8522 	if (!ctxm->entry_size || !ctx_pg)
8523 		return -EINVAL;
8524 	if (ctxm->instance_bmap)
8525 		n = hweight32(ctxm->instance_bmap);
8526 	if (ctxm->entry_multiple)
8527 		entries = roundup(entries, ctxm->entry_multiple);
8528 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8529 	mem_size = entries * ctxm->entry_size;
8530 	for (i = 0; i < n && !rc; i++) {
8531 		ctx_pg[i].entries = entries;
8532 		rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8533 					    ctxm->init_value ? ctxm : NULL);
8534 	}
8535 	return rc;
8536 }
8537 
8538 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8539 					       struct bnxt_ctx_mem_type *ctxm,
8540 					       bool last)
8541 {
8542 	struct hwrm_func_backing_store_cfg_v2_input *req;
8543 	u32 instance_bmap = ctxm->instance_bmap;
8544 	int i, j, rc = 0, n = 1;
8545 	__le32 *p;
8546 
8547 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8548 		return 0;
8549 
8550 	if (instance_bmap)
8551 		n = hweight32(ctxm->instance_bmap);
8552 	else
8553 		instance_bmap = 1;
8554 
8555 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8556 	if (rc)
8557 		return rc;
8558 	hwrm_req_hold(bp, req);
8559 	req->type = cpu_to_le16(ctxm->type);
8560 	req->entry_size = cpu_to_le16(ctxm->entry_size);
8561 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
8562 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8563 		p[i] = cpu_to_le32(ctxm->split[i]);
8564 	for (i = 0, j = 0; j < n && !rc; i++) {
8565 		struct bnxt_ctx_pg_info *ctx_pg;
8566 
8567 		if (!(instance_bmap & (1 << i)))
8568 			continue;
8569 		req->instance = cpu_to_le16(i);
8570 		ctx_pg = &ctxm->pg_info[j++];
8571 		if (!ctx_pg->entries)
8572 			continue;
8573 		req->num_entries = cpu_to_le32(ctx_pg->entries);
8574 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8575 				      &req->page_size_pbl_level,
8576 				      &req->page_dir);
8577 		if (last && j == n)
8578 			req->flags =
8579 				cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
8580 		rc = hwrm_req_send(bp, req);
8581 	}
8582 	hwrm_req_drop(bp, req);
8583 	return rc;
8584 }
8585 
8586 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8587 {
8588 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8589 	struct bnxt_ctx_mem_type *ctxm;
8590 	u16 last_type;
8591 	int rc = 0;
8592 	u16 type;
8593 
8594 	if (!ena)
8595 		return 0;
8596 	else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
8597 		last_type = BNXT_CTX_MAX - 1;
8598 	else
8599 		last_type = BNXT_CTX_L2_MAX - 1;
8600 	ctx->ctx_arr[last_type].last = 1;
8601 
8602 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
8603 		ctxm = &ctx->ctx_arr[type];
8604 
8605 		rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8606 		if (rc)
8607 			return rc;
8608 	}
8609 	return 0;
8610 }
8611 
8612 void bnxt_free_ctx_mem(struct bnxt *bp)
8613 {
8614 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
8615 	u16 type;
8616 
8617 	if (!ctx)
8618 		return;
8619 
8620 	for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
8621 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8622 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8623 		int i, n = 1;
8624 
8625 		if (!ctx_pg)
8626 			continue;
8627 		if (ctxm->instance_bmap)
8628 			n = hweight32(ctxm->instance_bmap);
8629 		for (i = 0; i < n; i++)
8630 			bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8631 
8632 		kfree(ctx_pg);
8633 		ctxm->pg_info = NULL;
8634 	}
8635 
8636 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
8637 	kfree(ctx);
8638 	bp->ctx = NULL;
8639 }
8640 
8641 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8642 {
8643 	struct bnxt_ctx_mem_type *ctxm;
8644 	struct bnxt_ctx_mem_info *ctx;
8645 	u32 l2_qps, qp1_qps, max_qps;
8646 	u32 ena, entries_sp, entries;
8647 	u32 srqs, max_srqs, min;
8648 	u32 num_mr, num_ah;
8649 	u32 extra_srqs = 0;
8650 	u32 extra_qps = 0;
8651 	u32 fast_qpmd_qps;
8652 	u8 pg_lvl = 1;
8653 	int i, rc;
8654 
8655 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8656 	if (rc) {
8657 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8658 			   rc);
8659 		return rc;
8660 	}
8661 	ctx = bp->ctx;
8662 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
8663 		return 0;
8664 
8665 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8666 	l2_qps = ctxm->qp_l2_entries;
8667 	qp1_qps = ctxm->qp_qp1_entries;
8668 	fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
8669 	max_qps = ctxm->max_entries;
8670 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8671 	srqs = ctxm->srq_l2_entries;
8672 	max_srqs = ctxm->max_entries;
8673 	ena = 0;
8674 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8675 		pg_lvl = 2;
8676 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8677 		/* allocate extra qps if fw supports RoCE fast qp destroy feature */
8678 		extra_qps += fast_qpmd_qps;
8679 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8680 		if (fast_qpmd_qps)
8681 			ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
8682 	}
8683 
8684 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8685 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8686 				     pg_lvl);
8687 	if (rc)
8688 		return rc;
8689 
8690 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8691 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8692 	if (rc)
8693 		return rc;
8694 
8695 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8696 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8697 				     extra_qps * 2, pg_lvl);
8698 	if (rc)
8699 		return rc;
8700 
8701 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8702 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8703 	if (rc)
8704 		return rc;
8705 
8706 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8707 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8708 	if (rc)
8709 		return rc;
8710 
8711 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8712 		goto skip_rdma;
8713 
8714 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8715 	/* 128K extra is needed to accommodate static AH context
8716 	 * allocation by f/w.
8717 	 */
8718 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
8719 	num_ah = min_t(u32, num_mr, 1024 * 128);
8720 	ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
8721 	if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
8722 		ctxm->mrav_av_entries = num_ah;
8723 
8724 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8725 	if (rc)
8726 		return rc;
8727 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8728 
8729 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8730 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8731 	if (rc)
8732 		return rc;
8733 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
8734 
8735 skip_rdma:
8736 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8737 	min = ctxm->min_entries;
8738 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
8739 		     2 * (extra_qps + qp1_qps) + min;
8740 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8741 	if (rc)
8742 		return rc;
8743 
8744 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8745 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
8746 	rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8747 	if (rc)
8748 		return rc;
8749 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
8750 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
8751 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
8752 
8753 	if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8754 		rc = bnxt_backing_store_cfg_v2(bp, ena);
8755 	else
8756 		rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8757 	if (rc) {
8758 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8759 			   rc);
8760 		return rc;
8761 	}
8762 	ctx->flags |= BNXT_CTX_FLAG_INITED;
8763 	return 0;
8764 }
8765 
8766 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8767 {
8768 	struct hwrm_func_resource_qcaps_output *resp;
8769 	struct hwrm_func_resource_qcaps_input *req;
8770 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8771 	int rc;
8772 
8773 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8774 	if (rc)
8775 		return rc;
8776 
8777 	req->fid = cpu_to_le16(0xffff);
8778 	resp = hwrm_req_hold(bp, req);
8779 	rc = hwrm_req_send_silent(bp, req);
8780 	if (rc)
8781 		goto hwrm_func_resc_qcaps_exit;
8782 
8783 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
8784 	if (!all)
8785 		goto hwrm_func_resc_qcaps_exit;
8786 
8787 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
8788 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8789 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
8790 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8791 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
8792 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8793 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
8794 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8795 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
8796 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
8797 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
8798 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8799 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
8800 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8801 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
8802 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8803 
8804 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8805 		u16 max_msix = le16_to_cpu(resp->max_msix);
8806 
8807 		hw_resc->max_nqs = max_msix;
8808 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
8809 	}
8810 
8811 	if (BNXT_PF(bp)) {
8812 		struct bnxt_pf_info *pf = &bp->pf;
8813 
8814 		pf->vf_resv_strategy =
8815 			le16_to_cpu(resp->vf_reservation_strategy);
8816 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
8817 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
8818 	}
8819 hwrm_func_resc_qcaps_exit:
8820 	hwrm_req_drop(bp, req);
8821 	return rc;
8822 }
8823 
8824 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
8825 {
8826 	struct hwrm_port_mac_ptp_qcfg_output *resp;
8827 	struct hwrm_port_mac_ptp_qcfg_input *req;
8828 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
8829 	bool phc_cfg;
8830 	u8 flags;
8831 	int rc;
8832 
8833 	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
8834 		rc = -ENODEV;
8835 		goto no_ptp;
8836 	}
8837 
8838 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8839 	if (rc)
8840 		goto no_ptp;
8841 
8842 	req->port_id = cpu_to_le16(bp->pf.port_id);
8843 	resp = hwrm_req_hold(bp, req);
8844 	rc = hwrm_req_send(bp, req);
8845 	if (rc)
8846 		goto exit;
8847 
8848 	flags = resp->flags;
8849 	if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
8850 		rc = -ENODEV;
8851 		goto exit;
8852 	}
8853 	if (!ptp) {
8854 		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
8855 		if (!ptp) {
8856 			rc = -ENOMEM;
8857 			goto exit;
8858 		}
8859 		ptp->bp = bp;
8860 		bp->ptp_cfg = ptp;
8861 	}
8862 	if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
8863 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
8864 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
8865 	} else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8866 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
8867 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
8868 	} else {
8869 		rc = -ENODEV;
8870 		goto exit;
8871 	}
8872 	phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
8873 	rc = bnxt_ptp_init(bp, phc_cfg);
8874 	if (rc)
8875 		netdev_warn(bp->dev, "PTP initialization failed.\n");
8876 exit:
8877 	hwrm_req_drop(bp, req);
8878 	if (!rc)
8879 		return 0;
8880 
8881 no_ptp:
8882 	bnxt_ptp_clear(bp);
8883 	kfree(ptp);
8884 	bp->ptp_cfg = NULL;
8885 	return rc;
8886 }
8887 
8888 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8889 {
8890 	struct hwrm_func_qcaps_output *resp;
8891 	struct hwrm_func_qcaps_input *req;
8892 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8893 	u32 flags, flags_ext, flags_ext2;
8894 	int rc;
8895 
8896 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8897 	if (rc)
8898 		return rc;
8899 
8900 	req->fid = cpu_to_le16(0xffff);
8901 	resp = hwrm_req_hold(bp, req);
8902 	rc = hwrm_req_send(bp, req);
8903 	if (rc)
8904 		goto hwrm_func_qcaps_exit;
8905 
8906 	flags = le32_to_cpu(resp->flags);
8907 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
8908 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
8909 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
8910 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
8911 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
8912 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
8913 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
8914 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
8915 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
8916 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
8917 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
8918 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
8919 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
8920 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
8921 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
8922 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
8923 	if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
8924 		bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
8925 
8926 	flags_ext = le32_to_cpu(resp->flags_ext);
8927 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
8928 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
8929 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
8930 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
8931 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
8932 		bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
8933 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
8934 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
8935 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
8936 		bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
8937 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
8938 		bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8939 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
8940 		bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
8941 
8942 	flags_ext2 = le32_to_cpu(resp->flags_ext2);
8943 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
8944 		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8945 	if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
8946 		bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
8947 
8948 	bp->tx_push_thresh = 0;
8949 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
8950 	    BNXT_FW_MAJ(bp) > 217)
8951 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
8952 
8953 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8954 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8955 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8956 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8957 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
8958 	if (!hw_resc->max_hw_ring_grps)
8959 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
8960 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8961 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8962 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8963 
8964 	hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
8965 	hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
8966 	hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
8967 	hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
8968 	hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
8969 	hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
8970 
8971 	if (BNXT_PF(bp)) {
8972 		struct bnxt_pf_info *pf = &bp->pf;
8973 
8974 		pf->fw_fid = le16_to_cpu(resp->fid);
8975 		pf->port_id = le16_to_cpu(resp->port_id);
8976 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
8977 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
8978 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
8979 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
8980 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
8981 			bp->flags |= BNXT_FLAG_WOL_CAP;
8982 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
8983 			bp->fw_cap |= BNXT_FW_CAP_PTP;
8984 		} else {
8985 			bnxt_ptp_clear(bp);
8986 			kfree(bp->ptp_cfg);
8987 			bp->ptp_cfg = NULL;
8988 		}
8989 	} else {
8990 #ifdef CONFIG_BNXT_SRIOV
8991 		struct bnxt_vf_info *vf = &bp->vf;
8992 
8993 		vf->fw_fid = le16_to_cpu(resp->fid);
8994 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
8995 #endif
8996 	}
8997 
8998 hwrm_func_qcaps_exit:
8999 	hwrm_req_drop(bp, req);
9000 	return rc;
9001 }
9002 
9003 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9004 {
9005 	struct hwrm_dbg_qcaps_output *resp;
9006 	struct hwrm_dbg_qcaps_input *req;
9007 	int rc;
9008 
9009 	bp->fw_dbg_cap = 0;
9010 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9011 		return;
9012 
9013 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9014 	if (rc)
9015 		return;
9016 
9017 	req->fid = cpu_to_le16(0xffff);
9018 	resp = hwrm_req_hold(bp, req);
9019 	rc = hwrm_req_send(bp, req);
9020 	if (rc)
9021 		goto hwrm_dbg_qcaps_exit;
9022 
9023 	bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9024 
9025 hwrm_dbg_qcaps_exit:
9026 	hwrm_req_drop(bp, req);
9027 }
9028 
9029 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9030 
9031 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9032 {
9033 	int rc;
9034 
9035 	rc = __bnxt_hwrm_func_qcaps(bp);
9036 	if (rc)
9037 		return rc;
9038 
9039 	bnxt_hwrm_dbg_qcaps(bp);
9040 
9041 	rc = bnxt_hwrm_queue_qportcfg(bp);
9042 	if (rc) {
9043 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9044 		return rc;
9045 	}
9046 	if (bp->hwrm_spec_code >= 0x10803) {
9047 		rc = bnxt_alloc_ctx_mem(bp);
9048 		if (rc)
9049 			return rc;
9050 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9051 		if (!rc)
9052 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9053 	}
9054 	return 0;
9055 }
9056 
9057 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9058 {
9059 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9060 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9061 	u32 flags;
9062 	int rc;
9063 
9064 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9065 		return 0;
9066 
9067 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9068 	if (rc)
9069 		return rc;
9070 
9071 	resp = hwrm_req_hold(bp, req);
9072 	rc = hwrm_req_send(bp, req);
9073 	if (rc)
9074 		goto hwrm_cfa_adv_qcaps_exit;
9075 
9076 	flags = le32_to_cpu(resp->flags);
9077 	if (flags &
9078 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9079 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9080 
9081 	if (flags &
9082 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9083 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9084 
9085 	if (flags &
9086 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9087 		bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9088 
9089 hwrm_cfa_adv_qcaps_exit:
9090 	hwrm_req_drop(bp, req);
9091 	return rc;
9092 }
9093 
9094 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9095 {
9096 	if (bp->fw_health)
9097 		return 0;
9098 
9099 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9100 	if (!bp->fw_health)
9101 		return -ENOMEM;
9102 
9103 	mutex_init(&bp->fw_health->lock);
9104 	return 0;
9105 }
9106 
9107 static int bnxt_alloc_fw_health(struct bnxt *bp)
9108 {
9109 	int rc;
9110 
9111 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9112 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9113 		return 0;
9114 
9115 	rc = __bnxt_alloc_fw_health(bp);
9116 	if (rc) {
9117 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9118 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9119 		return rc;
9120 	}
9121 
9122 	return 0;
9123 }
9124 
9125 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9126 {
9127 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9128 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9129 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
9130 }
9131 
9132 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9133 {
9134 	struct bnxt_fw_health *fw_health = bp->fw_health;
9135 	u32 reg_type;
9136 
9137 	if (!fw_health)
9138 		return;
9139 
9140 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9141 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9142 		fw_health->status_reliable = false;
9143 
9144 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9145 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9146 		fw_health->resets_reliable = false;
9147 }
9148 
9149 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9150 {
9151 	void __iomem *hs;
9152 	u32 status_loc;
9153 	u32 reg_type;
9154 	u32 sig;
9155 
9156 	if (bp->fw_health)
9157 		bp->fw_health->status_reliable = false;
9158 
9159 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9160 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9161 
9162 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9163 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9164 		if (!bp->chip_num) {
9165 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9166 			bp->chip_num = readl(bp->bar0 +
9167 					     BNXT_FW_HEALTH_WIN_BASE +
9168 					     BNXT_GRC_REG_CHIP_NUM);
9169 		}
9170 		if (!BNXT_CHIP_P5_PLUS(bp))
9171 			return;
9172 
9173 		status_loc = BNXT_GRC_REG_STATUS_P5 |
9174 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
9175 	} else {
9176 		status_loc = readl(hs + offsetof(struct hcomm_status,
9177 						 fw_status_loc));
9178 	}
9179 
9180 	if (__bnxt_alloc_fw_health(bp)) {
9181 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
9182 		return;
9183 	}
9184 
9185 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9186 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9187 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9188 		__bnxt_map_fw_health_reg(bp, status_loc);
9189 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9190 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
9191 	}
9192 
9193 	bp->fw_health->status_reliable = true;
9194 }
9195 
9196 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9197 {
9198 	struct bnxt_fw_health *fw_health = bp->fw_health;
9199 	u32 reg_base = 0xffffffff;
9200 	int i;
9201 
9202 	bp->fw_health->status_reliable = false;
9203 	bp->fw_health->resets_reliable = false;
9204 	/* Only pre-map the monitoring GRC registers using window 3 */
9205 	for (i = 0; i < 4; i++) {
9206 		u32 reg = fw_health->regs[i];
9207 
9208 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9209 			continue;
9210 		if (reg_base == 0xffffffff)
9211 			reg_base = reg & BNXT_GRC_BASE_MASK;
9212 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9213 			return -ERANGE;
9214 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9215 	}
9216 	bp->fw_health->status_reliable = true;
9217 	bp->fw_health->resets_reliable = true;
9218 	if (reg_base == 0xffffffff)
9219 		return 0;
9220 
9221 	__bnxt_map_fw_health_reg(bp, reg_base);
9222 	return 0;
9223 }
9224 
9225 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9226 {
9227 	if (!bp->fw_health)
9228 		return;
9229 
9230 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9231 		bp->fw_health->status_reliable = true;
9232 		bp->fw_health->resets_reliable = true;
9233 	} else {
9234 		bnxt_try_map_fw_health_reg(bp);
9235 	}
9236 }
9237 
9238 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9239 {
9240 	struct bnxt_fw_health *fw_health = bp->fw_health;
9241 	struct hwrm_error_recovery_qcfg_output *resp;
9242 	struct hwrm_error_recovery_qcfg_input *req;
9243 	int rc, i;
9244 
9245 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9246 		return 0;
9247 
9248 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9249 	if (rc)
9250 		return rc;
9251 
9252 	resp = hwrm_req_hold(bp, req);
9253 	rc = hwrm_req_send(bp, req);
9254 	if (rc)
9255 		goto err_recovery_out;
9256 	fw_health->flags = le32_to_cpu(resp->flags);
9257 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9258 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9259 		rc = -EINVAL;
9260 		goto err_recovery_out;
9261 	}
9262 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9263 	fw_health->master_func_wait_dsecs =
9264 		le32_to_cpu(resp->master_func_wait_period);
9265 	fw_health->normal_func_wait_dsecs =
9266 		le32_to_cpu(resp->normal_func_wait_period);
9267 	fw_health->post_reset_wait_dsecs =
9268 		le32_to_cpu(resp->master_func_wait_period_after_reset);
9269 	fw_health->post_reset_max_wait_dsecs =
9270 		le32_to_cpu(resp->max_bailout_time_after_reset);
9271 	fw_health->regs[BNXT_FW_HEALTH_REG] =
9272 		le32_to_cpu(resp->fw_health_status_reg);
9273 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9274 		le32_to_cpu(resp->fw_heartbeat_reg);
9275 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9276 		le32_to_cpu(resp->fw_reset_cnt_reg);
9277 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9278 		le32_to_cpu(resp->reset_inprogress_reg);
9279 	fw_health->fw_reset_inprog_reg_mask =
9280 		le32_to_cpu(resp->reset_inprogress_reg_mask);
9281 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9282 	if (fw_health->fw_reset_seq_cnt >= 16) {
9283 		rc = -EINVAL;
9284 		goto err_recovery_out;
9285 	}
9286 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9287 		fw_health->fw_reset_seq_regs[i] =
9288 			le32_to_cpu(resp->reset_reg[i]);
9289 		fw_health->fw_reset_seq_vals[i] =
9290 			le32_to_cpu(resp->reset_reg_val[i]);
9291 		fw_health->fw_reset_seq_delay_msec[i] =
9292 			resp->delay_after_reset[i];
9293 	}
9294 err_recovery_out:
9295 	hwrm_req_drop(bp, req);
9296 	if (!rc)
9297 		rc = bnxt_map_fw_health_regs(bp);
9298 	if (rc)
9299 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9300 	return rc;
9301 }
9302 
9303 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9304 {
9305 	struct hwrm_func_reset_input *req;
9306 	int rc;
9307 
9308 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9309 	if (rc)
9310 		return rc;
9311 
9312 	req->enables = 0;
9313 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9314 	return hwrm_req_send(bp, req);
9315 }
9316 
9317 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9318 {
9319 	struct hwrm_nvm_get_dev_info_output nvm_info;
9320 
9321 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9322 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9323 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9324 			 nvm_info.nvm_cfg_ver_upd);
9325 }
9326 
9327 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9328 {
9329 	struct hwrm_queue_qportcfg_output *resp;
9330 	struct hwrm_queue_qportcfg_input *req;
9331 	u8 i, j, *qptr;
9332 	bool no_rdma;
9333 	int rc = 0;
9334 
9335 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9336 	if (rc)
9337 		return rc;
9338 
9339 	resp = hwrm_req_hold(bp, req);
9340 	rc = hwrm_req_send(bp, req);
9341 	if (rc)
9342 		goto qportcfg_exit;
9343 
9344 	if (!resp->max_configurable_queues) {
9345 		rc = -EINVAL;
9346 		goto qportcfg_exit;
9347 	}
9348 	bp->max_tc = resp->max_configurable_queues;
9349 	bp->max_lltc = resp->max_configurable_lossless_queues;
9350 	if (bp->max_tc > BNXT_MAX_QUEUE)
9351 		bp->max_tc = BNXT_MAX_QUEUE;
9352 
9353 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9354 	qptr = &resp->queue_id0;
9355 	for (i = 0, j = 0; i < bp->max_tc; i++) {
9356 		bp->q_info[j].queue_id = *qptr;
9357 		bp->q_ids[i] = *qptr++;
9358 		bp->q_info[j].queue_profile = *qptr++;
9359 		bp->tc_to_qidx[j] = j;
9360 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9361 		    (no_rdma && BNXT_PF(bp)))
9362 			j++;
9363 	}
9364 	bp->max_q = bp->max_tc;
9365 	bp->max_tc = max_t(u8, j, 1);
9366 
9367 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9368 		bp->max_tc = 1;
9369 
9370 	if (bp->max_lltc > bp->max_tc)
9371 		bp->max_lltc = bp->max_tc;
9372 
9373 qportcfg_exit:
9374 	hwrm_req_drop(bp, req);
9375 	return rc;
9376 }
9377 
9378 static int bnxt_hwrm_poll(struct bnxt *bp)
9379 {
9380 	struct hwrm_ver_get_input *req;
9381 	int rc;
9382 
9383 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9384 	if (rc)
9385 		return rc;
9386 
9387 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9388 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
9389 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9390 
9391 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9392 	rc = hwrm_req_send(bp, req);
9393 	return rc;
9394 }
9395 
9396 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9397 {
9398 	struct hwrm_ver_get_output *resp;
9399 	struct hwrm_ver_get_input *req;
9400 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
9401 	u32 dev_caps_cfg, hwrm_ver;
9402 	int rc, len;
9403 
9404 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9405 	if (rc)
9406 		return rc;
9407 
9408 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9409 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9410 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9411 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
9412 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9413 
9414 	resp = hwrm_req_hold(bp, req);
9415 	rc = hwrm_req_send(bp, req);
9416 	if (rc)
9417 		goto hwrm_ver_get_exit;
9418 
9419 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9420 
9421 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9422 			     resp->hwrm_intf_min_8b << 8 |
9423 			     resp->hwrm_intf_upd_8b;
9424 	if (resp->hwrm_intf_maj_8b < 1) {
9425 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9426 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9427 			    resp->hwrm_intf_upd_8b);
9428 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9429 	}
9430 
9431 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
9432 			HWRM_VERSION_UPDATE;
9433 
9434 	if (bp->hwrm_spec_code > hwrm_ver)
9435 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9436 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
9437 			 HWRM_VERSION_UPDATE);
9438 	else
9439 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9440 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9441 			 resp->hwrm_intf_upd_8b);
9442 
9443 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
9444 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9445 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
9446 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
9447 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
9448 		len = FW_VER_STR_LEN;
9449 	} else {
9450 		fw_maj = resp->hwrm_fw_maj_8b;
9451 		fw_min = resp->hwrm_fw_min_8b;
9452 		fw_bld = resp->hwrm_fw_bld_8b;
9453 		fw_rsv = resp->hwrm_fw_rsvd_8b;
9454 		len = BC_HWRM_STR_LEN;
9455 	}
9456 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9457 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9458 		 fw_rsv);
9459 
9460 	if (strlen(resp->active_pkg_name)) {
9461 		int fw_ver_len = strlen(bp->fw_ver_str);
9462 
9463 		snprintf(bp->fw_ver_str + fw_ver_len,
9464 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
9465 			 resp->active_pkg_name);
9466 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9467 	}
9468 
9469 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9470 	if (!bp->hwrm_cmd_timeout)
9471 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9472 	bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9473 	if (!bp->hwrm_cmd_max_timeout)
9474 		bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9475 	else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9476 		netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9477 			    bp->hwrm_cmd_max_timeout / 1000);
9478 
9479 	if (resp->hwrm_intf_maj_8b >= 1) {
9480 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9481 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9482 	}
9483 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9484 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9485 
9486 	bp->chip_num = le16_to_cpu(resp->chip_num);
9487 	bp->chip_rev = resp->chip_rev;
9488 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9489 	    !resp->chip_metal)
9490 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9491 
9492 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
9493 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
9494 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
9495 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9496 
9497 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
9498 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9499 
9500 	if (dev_caps_cfg &
9501 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
9502 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9503 
9504 	if (dev_caps_cfg &
9505 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
9506 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9507 
9508 	if (dev_caps_cfg &
9509 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
9510 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9511 
9512 hwrm_ver_get_exit:
9513 	hwrm_req_drop(bp, req);
9514 	return rc;
9515 }
9516 
9517 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9518 {
9519 	struct hwrm_fw_set_time_input *req;
9520 	struct tm tm;
9521 	time64_t now = ktime_get_real_seconds();
9522 	int rc;
9523 
9524 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9525 	    bp->hwrm_spec_code < 0x10400)
9526 		return -EOPNOTSUPP;
9527 
9528 	time64_to_tm(now, 0, &tm);
9529 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9530 	if (rc)
9531 		return rc;
9532 
9533 	req->year = cpu_to_le16(1900 + tm.tm_year);
9534 	req->month = 1 + tm.tm_mon;
9535 	req->day = tm.tm_mday;
9536 	req->hour = tm.tm_hour;
9537 	req->minute = tm.tm_min;
9538 	req->second = tm.tm_sec;
9539 	return hwrm_req_send(bp, req);
9540 }
9541 
9542 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
9543 {
9544 	u64 sw_tmp;
9545 
9546 	hw &= mask;
9547 	sw_tmp = (*sw & ~mask) | hw;
9548 	if (hw < (*sw & mask))
9549 		sw_tmp += mask + 1;
9550 	WRITE_ONCE(*sw, sw_tmp);
9551 }
9552 
9553 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
9554 				    int count, bool ignore_zero)
9555 {
9556 	int i;
9557 
9558 	for (i = 0; i < count; i++) {
9559 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
9560 
9561 		if (ignore_zero && !hw)
9562 			continue;
9563 
9564 		if (masks[i] == -1ULL)
9565 			sw_stats[i] = hw;
9566 		else
9567 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
9568 	}
9569 }
9570 
9571 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
9572 {
9573 	if (!stats->hw_stats)
9574 		return;
9575 
9576 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9577 				stats->hw_masks, stats->len / 8, false);
9578 }
9579 
9580 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9581 {
9582 	struct bnxt_stats_mem *ring0_stats;
9583 	bool ignore_zero = false;
9584 	int i;
9585 
9586 	/* Chip bug.  Counter intermittently becomes 0. */
9587 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9588 		ignore_zero = true;
9589 
9590 	for (i = 0; i < bp->cp_nr_rings; i++) {
9591 		struct bnxt_napi *bnapi = bp->bnapi[i];
9592 		struct bnxt_cp_ring_info *cpr;
9593 		struct bnxt_stats_mem *stats;
9594 
9595 		cpr = &bnapi->cp_ring;
9596 		stats = &cpr->stats;
9597 		if (!i)
9598 			ring0_stats = stats;
9599 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9600 					ring0_stats->hw_masks,
9601 					ring0_stats->len / 8, ignore_zero);
9602 	}
9603 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
9604 		struct bnxt_stats_mem *stats = &bp->port_stats;
9605 		__le64 *hw_stats = stats->hw_stats;
9606 		u64 *sw_stats = stats->sw_stats;
9607 		u64 *masks = stats->hw_masks;
9608 		int cnt;
9609 
9610 		cnt = sizeof(struct rx_port_stats) / 8;
9611 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9612 
9613 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9614 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9615 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9616 		cnt = sizeof(struct tx_port_stats) / 8;
9617 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9618 	}
9619 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9620 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9621 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9622 	}
9623 }
9624 
9625 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9626 {
9627 	struct hwrm_port_qstats_input *req;
9628 	struct bnxt_pf_info *pf = &bp->pf;
9629 	int rc;
9630 
9631 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9632 		return 0;
9633 
9634 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9635 		return -EOPNOTSUPP;
9636 
9637 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9638 	if (rc)
9639 		return rc;
9640 
9641 	req->flags = flags;
9642 	req->port_id = cpu_to_le16(pf->port_id);
9643 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9644 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
9645 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9646 	return hwrm_req_send(bp, req);
9647 }
9648 
9649 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9650 {
9651 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
9652 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
9653 	struct hwrm_port_qstats_ext_output *resp_qs;
9654 	struct hwrm_port_qstats_ext_input *req_qs;
9655 	struct bnxt_pf_info *pf = &bp->pf;
9656 	u32 tx_stat_size;
9657 	int rc;
9658 
9659 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9660 		return 0;
9661 
9662 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9663 		return -EOPNOTSUPP;
9664 
9665 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9666 	if (rc)
9667 		return rc;
9668 
9669 	req_qs->flags = flags;
9670 	req_qs->port_id = cpu_to_le16(pf->port_id);
9671 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
9672 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9673 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9674 		       sizeof(struct tx_port_stats_ext) : 0;
9675 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
9676 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9677 	resp_qs = hwrm_req_hold(bp, req_qs);
9678 	rc = hwrm_req_send(bp, req_qs);
9679 	if (!rc) {
9680 		bp->fw_rx_stats_ext_size =
9681 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
9682 		if (BNXT_FW_MAJ(bp) < 220 &&
9683 		    bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9684 			bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9685 
9686 		bp->fw_tx_stats_ext_size = tx_stat_size ?
9687 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
9688 	} else {
9689 		bp->fw_rx_stats_ext_size = 0;
9690 		bp->fw_tx_stats_ext_size = 0;
9691 	}
9692 	hwrm_req_drop(bp, req_qs);
9693 
9694 	if (flags)
9695 		return rc;
9696 
9697 	if (bp->fw_tx_stats_ext_size <=
9698 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
9699 		bp->pri2cos_valid = 0;
9700 		return rc;
9701 	}
9702 
9703 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9704 	if (rc)
9705 		return rc;
9706 
9707 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
9708 
9709 	resp_qc = hwrm_req_hold(bp, req_qc);
9710 	rc = hwrm_req_send(bp, req_qc);
9711 	if (!rc) {
9712 		u8 *pri2cos;
9713 		int i, j;
9714 
9715 		pri2cos = &resp_qc->pri0_cos_queue_id;
9716 		for (i = 0; i < 8; i++) {
9717 			u8 queue_id = pri2cos[i];
9718 			u8 queue_idx;
9719 
9720 			/* Per port queue IDs start from 0, 10, 20, etc */
9721 			queue_idx = queue_id % 10;
9722 			if (queue_idx > BNXT_MAX_QUEUE) {
9723 				bp->pri2cos_valid = false;
9724 				hwrm_req_drop(bp, req_qc);
9725 				return rc;
9726 			}
9727 			for (j = 0; j < bp->max_q; j++) {
9728 				if (bp->q_ids[j] == queue_id)
9729 					bp->pri2cos_idx[i] = queue_idx;
9730 			}
9731 		}
9732 		bp->pri2cos_valid = true;
9733 	}
9734 	hwrm_req_drop(bp, req_qc);
9735 
9736 	return rc;
9737 }
9738 
9739 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9740 {
9741 	bnxt_hwrm_tunnel_dst_port_free(bp,
9742 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9743 	bnxt_hwrm_tunnel_dst_port_free(bp,
9744 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9745 }
9746 
9747 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9748 {
9749 	int rc, i;
9750 	u32 tpa_flags = 0;
9751 
9752 	if (set_tpa)
9753 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
9754 	else if (BNXT_NO_FW_ACCESS(bp))
9755 		return 0;
9756 	for (i = 0; i < bp->nr_vnics; i++) {
9757 		rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
9758 		if (rc) {
9759 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9760 				   i, rc);
9761 			return rc;
9762 		}
9763 	}
9764 	return 0;
9765 }
9766 
9767 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
9768 {
9769 	int i;
9770 
9771 	for (i = 0; i < bp->nr_vnics; i++)
9772 		bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
9773 }
9774 
9775 static void bnxt_clear_vnic(struct bnxt *bp)
9776 {
9777 	if (!bp->vnic_info)
9778 		return;
9779 
9780 	bnxt_hwrm_clear_vnic_filter(bp);
9781 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
9782 		/* clear all RSS setting before free vnic ctx */
9783 		bnxt_hwrm_clear_vnic_rss(bp);
9784 		bnxt_hwrm_vnic_ctx_free(bp);
9785 	}
9786 	/* before free the vnic, undo the vnic tpa settings */
9787 	if (bp->flags & BNXT_FLAG_TPA)
9788 		bnxt_set_tpa(bp, false);
9789 	bnxt_hwrm_vnic_free(bp);
9790 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9791 		bnxt_hwrm_vnic_ctx_free(bp);
9792 }
9793 
9794 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
9795 				    bool irq_re_init)
9796 {
9797 	bnxt_clear_vnic(bp);
9798 	bnxt_hwrm_ring_free(bp, close_path);
9799 	bnxt_hwrm_ring_grp_free(bp);
9800 	if (irq_re_init) {
9801 		bnxt_hwrm_stat_ctx_free(bp);
9802 		bnxt_hwrm_free_tunnel_ports(bp);
9803 	}
9804 }
9805 
9806 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
9807 {
9808 	struct hwrm_func_cfg_input *req;
9809 	u8 evb_mode;
9810 	int rc;
9811 
9812 	if (br_mode == BRIDGE_MODE_VEB)
9813 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
9814 	else if (br_mode == BRIDGE_MODE_VEPA)
9815 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
9816 	else
9817 		return -EINVAL;
9818 
9819 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9820 	if (rc)
9821 		return rc;
9822 
9823 	req->fid = cpu_to_le16(0xffff);
9824 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
9825 	req->evb_mode = evb_mode;
9826 	return hwrm_req_send(bp, req);
9827 }
9828 
9829 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
9830 {
9831 	struct hwrm_func_cfg_input *req;
9832 	int rc;
9833 
9834 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
9835 		return 0;
9836 
9837 	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9838 	if (rc)
9839 		return rc;
9840 
9841 	req->fid = cpu_to_le16(0xffff);
9842 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
9843 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
9844 	if (size == 128)
9845 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
9846 
9847 	return hwrm_req_send(bp, req);
9848 }
9849 
9850 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9851 {
9852 	int rc;
9853 
9854 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
9855 		goto skip_rss_ctx;
9856 
9857 	/* allocate context for vnic */
9858 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
9859 	if (rc) {
9860 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9861 			   vnic->vnic_id, rc);
9862 		goto vnic_setup_err;
9863 	}
9864 	bp->rsscos_nr_ctxs++;
9865 
9866 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9867 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
9868 		if (rc) {
9869 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9870 				   vnic->vnic_id, rc);
9871 			goto vnic_setup_err;
9872 		}
9873 		bp->rsscos_nr_ctxs++;
9874 	}
9875 
9876 skip_rss_ctx:
9877 	/* configure default vnic, ring grp */
9878 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
9879 	if (rc) {
9880 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9881 			   vnic->vnic_id, rc);
9882 		goto vnic_setup_err;
9883 	}
9884 
9885 	/* Enable RSS hashing on vnic */
9886 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
9887 	if (rc) {
9888 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9889 			   vnic->vnic_id, rc);
9890 		goto vnic_setup_err;
9891 	}
9892 
9893 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9894 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
9895 		if (rc) {
9896 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9897 				   vnic->vnic_id, rc);
9898 		}
9899 	}
9900 
9901 vnic_setup_err:
9902 	return rc;
9903 }
9904 
9905 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9906 {
9907 	int rc;
9908 
9909 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
9910 	if (rc) {
9911 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9912 			   vnic->vnic_id, rc);
9913 		return rc;
9914 	}
9915 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
9916 	if (rc)
9917 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9918 			   vnic->vnic_id, rc);
9919 	return rc;
9920 }
9921 
9922 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9923 {
9924 	int rc, i, nr_ctxs;
9925 
9926 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
9927 	for (i = 0; i < nr_ctxs; i++) {
9928 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
9929 		if (rc) {
9930 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9931 				   vnic->vnic_id, i, rc);
9932 			break;
9933 		}
9934 		bp->rsscos_nr_ctxs++;
9935 	}
9936 	if (i < nr_ctxs)
9937 		return -ENOMEM;
9938 
9939 	rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
9940 	if (rc)
9941 		return rc;
9942 
9943 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9944 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
9945 		if (rc) {
9946 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9947 				   vnic->vnic_id, rc);
9948 		}
9949 	}
9950 	return rc;
9951 }
9952 
9953 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
9954 {
9955 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9956 		return __bnxt_setup_vnic_p5(bp, vnic);
9957 	else
9958 		return __bnxt_setup_vnic(bp, vnic);
9959 }
9960 
9961 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
9962 				     struct bnxt_vnic_info *vnic,
9963 				     u16 start_rx_ring_idx, int rx_rings)
9964 {
9965 	int rc;
9966 
9967 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
9968 	if (rc) {
9969 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9970 			   vnic->vnic_id, rc);
9971 		return rc;
9972 	}
9973 	return bnxt_setup_vnic(bp, vnic);
9974 }
9975 
9976 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
9977 {
9978 	struct bnxt_vnic_info *vnic;
9979 	int i, rc = 0;
9980 
9981 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
9982 		vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
9983 		return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
9984 	}
9985 
9986 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9987 		return 0;
9988 
9989 	for (i = 0; i < bp->rx_nr_rings; i++) {
9990 		u16 vnic_id = i + 1;
9991 		u16 ring_id = i;
9992 
9993 		if (vnic_id >= bp->nr_vnics)
9994 			break;
9995 
9996 		vnic = &bp->vnic_info[vnic_id];
9997 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
9998 		if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
9999 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10000 		if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10001 			break;
10002 	}
10003 	return rc;
10004 }
10005 
10006 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10007 			  bool all)
10008 {
10009 	struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10010 	struct bnxt_filter_base *usr_fltr, *tmp;
10011 	struct bnxt_ntuple_filter *ntp_fltr;
10012 	int i;
10013 
10014 	bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10015 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10016 		if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10017 			bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10018 	}
10019 	if (!all)
10020 		return;
10021 
10022 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10023 		if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10024 		    usr_fltr->fw_vnic_id == rss_ctx->index) {
10025 			ntp_fltr = container_of(usr_fltr,
10026 						struct bnxt_ntuple_filter,
10027 						base);
10028 			bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10029 			bnxt_del_ntp_filter(bp, ntp_fltr);
10030 			bnxt_del_one_usr_fltr(bp, usr_fltr);
10031 		}
10032 	}
10033 
10034 	if (vnic->rss_table)
10035 		dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10036 				  vnic->rss_table,
10037 				  vnic->rss_table_dma_addr);
10038 	kfree(rss_ctx->rss_indir_tbl);
10039 	list_del(&rss_ctx->list);
10040 	bp->num_rss_ctx--;
10041 	clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
10042 	kfree(rss_ctx);
10043 }
10044 
10045 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10046 {
10047 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10048 	struct bnxt_rss_ctx *rss_ctx, *tmp;
10049 
10050 	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
10051 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10052 
10053 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10054 		    bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10055 		    __bnxt_setup_vnic_p5(bp, vnic)) {
10056 			netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10057 				   rss_ctx->index);
10058 			bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10059 		}
10060 	}
10061 }
10062 
10063 struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
10064 {
10065 	struct bnxt_rss_ctx *rss_ctx = NULL;
10066 
10067 	rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
10068 	if (rss_ctx) {
10069 		rss_ctx->vnic.rss_ctx = rss_ctx;
10070 		list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
10071 		bp->num_rss_ctx++;
10072 	}
10073 	return rss_ctx;
10074 }
10075 
10076 void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
10077 {
10078 	struct bnxt_rss_ctx *rss_ctx, *tmp;
10079 
10080 	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
10081 		bnxt_del_one_rss_ctx(bp, rss_ctx, all);
10082 
10083 	if (all)
10084 		bitmap_free(bp->rss_ctx_bmap);
10085 }
10086 
10087 static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
10088 {
10089 	bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
10090 	if (bp->rss_ctx_bmap) {
10091 		/* burn index 0 since we cannot have context 0 */
10092 		__set_bit(0, bp->rss_ctx_bmap);
10093 		INIT_LIST_HEAD(&bp->rss_ctx_list);
10094 		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
10095 	}
10096 }
10097 
10098 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
10099 static bool bnxt_promisc_ok(struct bnxt *bp)
10100 {
10101 #ifdef CONFIG_BNXT_SRIOV
10102 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10103 		return false;
10104 #endif
10105 	return true;
10106 }
10107 
10108 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10109 {
10110 	struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10111 	unsigned int rc = 0;
10112 
10113 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10114 	if (rc) {
10115 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10116 			   rc);
10117 		return rc;
10118 	}
10119 
10120 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10121 	if (rc) {
10122 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10123 			   rc);
10124 		return rc;
10125 	}
10126 	return rc;
10127 }
10128 
10129 static int bnxt_cfg_rx_mode(struct bnxt *);
10130 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10131 
10132 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10133 {
10134 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10135 	int rc = 0;
10136 	unsigned int rx_nr_rings = bp->rx_nr_rings;
10137 
10138 	if (irq_re_init) {
10139 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
10140 		if (rc) {
10141 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10142 				   rc);
10143 			goto err_out;
10144 		}
10145 	}
10146 
10147 	rc = bnxt_hwrm_ring_alloc(bp);
10148 	if (rc) {
10149 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10150 		goto err_out;
10151 	}
10152 
10153 	rc = bnxt_hwrm_ring_grp_alloc(bp);
10154 	if (rc) {
10155 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10156 		goto err_out;
10157 	}
10158 
10159 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10160 		rx_nr_rings--;
10161 
10162 	/* default vnic 0 */
10163 	rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10164 	if (rc) {
10165 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10166 		goto err_out;
10167 	}
10168 
10169 	if (BNXT_VF(bp))
10170 		bnxt_hwrm_func_qcfg(bp);
10171 
10172 	rc = bnxt_setup_vnic(bp, vnic);
10173 	if (rc)
10174 		goto err_out;
10175 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10176 		bnxt_hwrm_update_rss_hash_cfg(bp);
10177 
10178 	if (bp->flags & BNXT_FLAG_RFS) {
10179 		rc = bnxt_alloc_rfs_vnics(bp);
10180 		if (rc)
10181 			goto err_out;
10182 	}
10183 
10184 	if (bp->flags & BNXT_FLAG_TPA) {
10185 		rc = bnxt_set_tpa(bp, true);
10186 		if (rc)
10187 			goto err_out;
10188 	}
10189 
10190 	if (BNXT_VF(bp))
10191 		bnxt_update_vf_mac(bp);
10192 
10193 	/* Filter for default vnic 0 */
10194 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10195 	if (rc) {
10196 		if (BNXT_VF(bp) && rc == -ENODEV)
10197 			netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10198 		else
10199 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10200 		goto err_out;
10201 	}
10202 	vnic->uc_filter_count = 1;
10203 
10204 	vnic->rx_mask = 0;
10205 	if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10206 		goto skip_rx_mask;
10207 
10208 	if (bp->dev->flags & IFF_BROADCAST)
10209 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10210 
10211 	if (bp->dev->flags & IFF_PROMISC)
10212 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10213 
10214 	if (bp->dev->flags & IFF_ALLMULTI) {
10215 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10216 		vnic->mc_list_count = 0;
10217 	} else if (bp->dev->flags & IFF_MULTICAST) {
10218 		u32 mask = 0;
10219 
10220 		bnxt_mc_list_updated(bp, &mask);
10221 		vnic->rx_mask |= mask;
10222 	}
10223 
10224 	rc = bnxt_cfg_rx_mode(bp);
10225 	if (rc)
10226 		goto err_out;
10227 
10228 skip_rx_mask:
10229 	rc = bnxt_hwrm_set_coal(bp);
10230 	if (rc)
10231 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10232 				rc);
10233 
10234 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10235 		rc = bnxt_setup_nitroa0_vnic(bp);
10236 		if (rc)
10237 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10238 				   rc);
10239 	}
10240 
10241 	if (BNXT_VF(bp)) {
10242 		bnxt_hwrm_func_qcfg(bp);
10243 		netdev_update_features(bp->dev);
10244 	}
10245 
10246 	return 0;
10247 
10248 err_out:
10249 	bnxt_hwrm_resource_free(bp, 0, true);
10250 
10251 	return rc;
10252 }
10253 
10254 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10255 {
10256 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10257 	return 0;
10258 }
10259 
10260 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10261 {
10262 	bnxt_init_cp_rings(bp);
10263 	bnxt_init_rx_rings(bp);
10264 	bnxt_init_tx_rings(bp);
10265 	bnxt_init_ring_grps(bp, irq_re_init);
10266 	bnxt_init_vnics(bp);
10267 
10268 	return bnxt_init_chip(bp, irq_re_init);
10269 }
10270 
10271 static int bnxt_set_real_num_queues(struct bnxt *bp)
10272 {
10273 	int rc;
10274 	struct net_device *dev = bp->dev;
10275 
10276 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10277 					  bp->tx_nr_rings_xdp);
10278 	if (rc)
10279 		return rc;
10280 
10281 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10282 	if (rc)
10283 		return rc;
10284 
10285 #ifdef CONFIG_RFS_ACCEL
10286 	if (bp->flags & BNXT_FLAG_RFS)
10287 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10288 #endif
10289 
10290 	return rc;
10291 }
10292 
10293 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10294 			     bool shared)
10295 {
10296 	int _rx = *rx, _tx = *tx;
10297 
10298 	if (shared) {
10299 		*rx = min_t(int, _rx, max);
10300 		*tx = min_t(int, _tx, max);
10301 	} else {
10302 		if (max < 2)
10303 			return -ENOMEM;
10304 
10305 		while (_rx + _tx > max) {
10306 			if (_rx > _tx && _rx > 1)
10307 				_rx--;
10308 			else if (_tx > 1)
10309 				_tx--;
10310 		}
10311 		*rx = _rx;
10312 		*tx = _tx;
10313 	}
10314 	return 0;
10315 }
10316 
10317 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10318 {
10319 	return (tx - tx_xdp) / tx_sets + tx_xdp;
10320 }
10321 
10322 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10323 {
10324 	int tcs = bp->num_tc;
10325 
10326 	if (!tcs)
10327 		tcs = 1;
10328 	return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10329 }
10330 
10331 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10332 {
10333 	int tcs = bp->num_tc;
10334 
10335 	return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10336 	       bp->tx_nr_rings_xdp;
10337 }
10338 
10339 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10340 			   bool sh)
10341 {
10342 	int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10343 
10344 	if (tx_cp != *tx) {
10345 		int tx_saved = tx_cp, rc;
10346 
10347 		rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10348 		if (rc)
10349 			return rc;
10350 		if (tx_cp != tx_saved)
10351 			*tx = bnxt_num_cp_to_tx(bp, tx_cp);
10352 		return 0;
10353 	}
10354 	return __bnxt_trim_rings(bp, rx, tx, max, sh);
10355 }
10356 
10357 static void bnxt_setup_msix(struct bnxt *bp)
10358 {
10359 	const int len = sizeof(bp->irq_tbl[0].name);
10360 	struct net_device *dev = bp->dev;
10361 	int tcs, i;
10362 
10363 	tcs = bp->num_tc;
10364 	if (tcs) {
10365 		int i, off, count;
10366 
10367 		for (i = 0; i < tcs; i++) {
10368 			count = bp->tx_nr_rings_per_tc;
10369 			off = BNXT_TC_TO_RING_BASE(bp, i);
10370 			netdev_set_tc_queue(dev, i, count, off);
10371 		}
10372 	}
10373 
10374 	for (i = 0; i < bp->cp_nr_rings; i++) {
10375 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10376 		char *attr;
10377 
10378 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10379 			attr = "TxRx";
10380 		else if (i < bp->rx_nr_rings)
10381 			attr = "rx";
10382 		else
10383 			attr = "tx";
10384 
10385 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10386 			 attr, i);
10387 		bp->irq_tbl[map_idx].handler = bnxt_msix;
10388 	}
10389 }
10390 
10391 static void bnxt_setup_inta(struct bnxt *bp)
10392 {
10393 	const int len = sizeof(bp->irq_tbl[0].name);
10394 
10395 	if (bp->num_tc) {
10396 		netdev_reset_tc(bp->dev);
10397 		bp->num_tc = 0;
10398 	}
10399 
10400 	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
10401 		 0);
10402 	bp->irq_tbl[0].handler = bnxt_inta;
10403 }
10404 
10405 static int bnxt_init_int_mode(struct bnxt *bp);
10406 
10407 static int bnxt_setup_int_mode(struct bnxt *bp)
10408 {
10409 	int rc;
10410 
10411 	if (!bp->irq_tbl) {
10412 		rc = bnxt_init_int_mode(bp);
10413 		if (rc || !bp->irq_tbl)
10414 			return rc ?: -ENODEV;
10415 	}
10416 
10417 	if (bp->flags & BNXT_FLAG_USING_MSIX)
10418 		bnxt_setup_msix(bp);
10419 	else
10420 		bnxt_setup_inta(bp);
10421 
10422 	rc = bnxt_set_real_num_queues(bp);
10423 	return rc;
10424 }
10425 
10426 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10427 {
10428 	return bp->hw_resc.max_rsscos_ctxs;
10429 }
10430 
10431 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10432 {
10433 	return bp->hw_resc.max_vnics;
10434 }
10435 
10436 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10437 {
10438 	return bp->hw_resc.max_stat_ctxs;
10439 }
10440 
10441 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10442 {
10443 	return bp->hw_resc.max_cp_rings;
10444 }
10445 
10446 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10447 {
10448 	unsigned int cp = bp->hw_resc.max_cp_rings;
10449 
10450 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10451 		cp -= bnxt_get_ulp_msix_num(bp);
10452 
10453 	return cp;
10454 }
10455 
10456 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10457 {
10458 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10459 
10460 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10461 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
10462 
10463 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
10464 }
10465 
10466 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10467 {
10468 	bp->hw_resc.max_irqs = max_irqs;
10469 }
10470 
10471 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10472 {
10473 	unsigned int cp;
10474 
10475 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
10476 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10477 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10478 	else
10479 		return cp - bp->cp_nr_rings;
10480 }
10481 
10482 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10483 {
10484 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10485 }
10486 
10487 int bnxt_get_avail_msix(struct bnxt *bp, int num)
10488 {
10489 	int max_cp = bnxt_get_max_func_cp_rings(bp);
10490 	int max_irq = bnxt_get_max_func_irqs(bp);
10491 	int total_req = bp->cp_nr_rings + num;
10492 	int max_idx, avail_msix;
10493 
10494 	max_idx = bp->total_irqs;
10495 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10496 		max_idx = min_t(int, bp->total_irqs, max_cp);
10497 	avail_msix = max_idx - bp->cp_nr_rings;
10498 	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
10499 		return avail_msix;
10500 
10501 	if (max_irq < total_req) {
10502 		num = max_irq - bp->cp_nr_rings;
10503 		if (num <= 0)
10504 			return 0;
10505 	}
10506 	return num;
10507 }
10508 
10509 static int bnxt_get_num_msix(struct bnxt *bp)
10510 {
10511 	if (!BNXT_NEW_RM(bp))
10512 		return bnxt_get_max_func_irqs(bp);
10513 
10514 	return bnxt_nq_rings_in_use(bp);
10515 }
10516 
10517 static int bnxt_init_msix(struct bnxt *bp)
10518 {
10519 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
10520 	struct msix_entry *msix_ent;
10521 
10522 	total_vecs = bnxt_get_num_msix(bp);
10523 	max = bnxt_get_max_func_irqs(bp);
10524 	if (total_vecs > max)
10525 		total_vecs = max;
10526 
10527 	if (!total_vecs)
10528 		return 0;
10529 
10530 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
10531 	if (!msix_ent)
10532 		return -ENOMEM;
10533 
10534 	for (i = 0; i < total_vecs; i++) {
10535 		msix_ent[i].entry = i;
10536 		msix_ent[i].vector = 0;
10537 	}
10538 
10539 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10540 		min = 2;
10541 
10542 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
10543 	ulp_msix = bnxt_get_ulp_msix_num(bp);
10544 	if (total_vecs < 0 || total_vecs < ulp_msix) {
10545 		rc = -ENODEV;
10546 		goto msix_setup_exit;
10547 	}
10548 
10549 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
10550 	if (bp->irq_tbl) {
10551 		for (i = 0; i < total_vecs; i++)
10552 			bp->irq_tbl[i].vector = msix_ent[i].vector;
10553 
10554 		bp->total_irqs = total_vecs;
10555 		/* Trim rings based upon num of vectors allocated */
10556 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10557 				     total_vecs - ulp_msix, min == 1);
10558 		if (rc)
10559 			goto msix_setup_exit;
10560 
10561 		tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10562 		bp->cp_nr_rings = (min == 1) ?
10563 				  max_t(int, tx_cp, bp->rx_nr_rings) :
10564 				  tx_cp + bp->rx_nr_rings;
10565 
10566 	} else {
10567 		rc = -ENOMEM;
10568 		goto msix_setup_exit;
10569 	}
10570 	bp->flags |= BNXT_FLAG_USING_MSIX;
10571 	kfree(msix_ent);
10572 	return 0;
10573 
10574 msix_setup_exit:
10575 	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10576 	kfree(bp->irq_tbl);
10577 	bp->irq_tbl = NULL;
10578 	pci_disable_msix(bp->pdev);
10579 	kfree(msix_ent);
10580 	return rc;
10581 }
10582 
10583 static int bnxt_init_inta(struct bnxt *bp)
10584 {
10585 	bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
10586 	if (!bp->irq_tbl)
10587 		return -ENOMEM;
10588 
10589 	bp->total_irqs = 1;
10590 	bp->rx_nr_rings = 1;
10591 	bp->tx_nr_rings = 1;
10592 	bp->cp_nr_rings = 1;
10593 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
10594 	bp->irq_tbl[0].vector = bp->pdev->irq;
10595 	return 0;
10596 }
10597 
10598 static int bnxt_init_int_mode(struct bnxt *bp)
10599 {
10600 	int rc = -ENODEV;
10601 
10602 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
10603 		rc = bnxt_init_msix(bp);
10604 
10605 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
10606 		/* fallback to INTA */
10607 		rc = bnxt_init_inta(bp);
10608 	}
10609 	return rc;
10610 }
10611 
10612 static void bnxt_clear_int_mode(struct bnxt *bp)
10613 {
10614 	if (bp->flags & BNXT_FLAG_USING_MSIX)
10615 		pci_disable_msix(bp->pdev);
10616 
10617 	kfree(bp->irq_tbl);
10618 	bp->irq_tbl = NULL;
10619 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
10620 }
10621 
10622 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10623 {
10624 	bool irq_cleared = false;
10625 	int tcs = bp->num_tc;
10626 	int irqs_required;
10627 	int rc;
10628 
10629 	if (!bnxt_need_reserve_rings(bp))
10630 		return 0;
10631 
10632 	if (!bnxt_ulp_registered(bp->edev)) {
10633 		int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
10634 
10635 		if (ulp_msix > bp->ulp_num_msix_want)
10636 			ulp_msix = bp->ulp_num_msix_want;
10637 		irqs_required = ulp_msix + bp->cp_nr_rings;
10638 	} else {
10639 		irqs_required = bnxt_get_num_msix(bp);
10640 	}
10641 
10642 	if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
10643 		bnxt_ulp_irq_stop(bp);
10644 		bnxt_clear_int_mode(bp);
10645 		irq_cleared = true;
10646 	}
10647 	rc = __bnxt_reserve_rings(bp);
10648 	if (irq_cleared) {
10649 		if (!rc)
10650 			rc = bnxt_init_int_mode(bp);
10651 		bnxt_ulp_irq_restart(bp, rc);
10652 	}
10653 	if (rc) {
10654 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10655 		return rc;
10656 	}
10657 	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10658 		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10659 		netdev_err(bp->dev, "tx ring reservation failure\n");
10660 		netdev_reset_tc(bp->dev);
10661 		bp->num_tc = 0;
10662 		if (bp->tx_nr_rings_xdp)
10663 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10664 		else
10665 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10666 		return -ENOMEM;
10667 	}
10668 	return 0;
10669 }
10670 
10671 static void bnxt_free_irq(struct bnxt *bp)
10672 {
10673 	struct bnxt_irq *irq;
10674 	int i;
10675 
10676 #ifdef CONFIG_RFS_ACCEL
10677 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10678 	bp->dev->rx_cpu_rmap = NULL;
10679 #endif
10680 	if (!bp->irq_tbl || !bp->bnapi)
10681 		return;
10682 
10683 	for (i = 0; i < bp->cp_nr_rings; i++) {
10684 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10685 
10686 		irq = &bp->irq_tbl[map_idx];
10687 		if (irq->requested) {
10688 			if (irq->have_cpumask) {
10689 				irq_set_affinity_hint(irq->vector, NULL);
10690 				free_cpumask_var(irq->cpu_mask);
10691 				irq->have_cpumask = 0;
10692 			}
10693 			free_irq(irq->vector, bp->bnapi[i]);
10694 		}
10695 
10696 		irq->requested = 0;
10697 	}
10698 }
10699 
10700 static int bnxt_request_irq(struct bnxt *bp)
10701 {
10702 	int i, j, rc = 0;
10703 	unsigned long flags = 0;
10704 #ifdef CONFIG_RFS_ACCEL
10705 	struct cpu_rmap *rmap;
10706 #endif
10707 
10708 	rc = bnxt_setup_int_mode(bp);
10709 	if (rc) {
10710 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10711 			   rc);
10712 		return rc;
10713 	}
10714 #ifdef CONFIG_RFS_ACCEL
10715 	rmap = bp->dev->rx_cpu_rmap;
10716 #endif
10717 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
10718 		flags = IRQF_SHARED;
10719 
10720 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10721 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10722 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10723 
10724 #ifdef CONFIG_RFS_ACCEL
10725 		if (rmap && bp->bnapi[i]->rx_ring) {
10726 			rc = irq_cpu_rmap_add(rmap, irq->vector);
10727 			if (rc)
10728 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10729 					    j);
10730 			j++;
10731 		}
10732 #endif
10733 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10734 				 bp->bnapi[i]);
10735 		if (rc)
10736 			break;
10737 
10738 		netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10739 		irq->requested = 1;
10740 
10741 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
10742 			int numa_node = dev_to_node(&bp->pdev->dev);
10743 
10744 			irq->have_cpumask = 1;
10745 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
10746 					irq->cpu_mask);
10747 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10748 			if (rc) {
10749 				netdev_warn(bp->dev,
10750 					    "Set affinity failed, IRQ = %d\n",
10751 					    irq->vector);
10752 				break;
10753 			}
10754 		}
10755 	}
10756 	return rc;
10757 }
10758 
10759 static void bnxt_del_napi(struct bnxt *bp)
10760 {
10761 	int i;
10762 
10763 	if (!bp->bnapi)
10764 		return;
10765 
10766 	for (i = 0; i < bp->rx_nr_rings; i++)
10767 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10768 	for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10769 		netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10770 
10771 	for (i = 0; i < bp->cp_nr_rings; i++) {
10772 		struct bnxt_napi *bnapi = bp->bnapi[i];
10773 
10774 		__netif_napi_del(&bnapi->napi);
10775 	}
10776 	/* We called __netif_napi_del(), we need
10777 	 * to respect an RCU grace period before freeing napi structures.
10778 	 */
10779 	synchronize_net();
10780 }
10781 
10782 static void bnxt_init_napi(struct bnxt *bp)
10783 {
10784 	int i;
10785 	unsigned int cp_nr_rings = bp->cp_nr_rings;
10786 	struct bnxt_napi *bnapi;
10787 
10788 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
10789 		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
10790 
10791 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10792 			poll_fn = bnxt_poll_p5;
10793 		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10794 			cp_nr_rings--;
10795 		for (i = 0; i < cp_nr_rings; i++) {
10796 			bnapi = bp->bnapi[i];
10797 			netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10798 		}
10799 		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10800 			bnapi = bp->bnapi[cp_nr_rings];
10801 			netif_napi_add(bp->dev, &bnapi->napi,
10802 				       bnxt_poll_nitroa0);
10803 		}
10804 	} else {
10805 		bnapi = bp->bnapi[0];
10806 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
10807 	}
10808 }
10809 
10810 static void bnxt_disable_napi(struct bnxt *bp)
10811 {
10812 	int i;
10813 
10814 	if (!bp->bnapi ||
10815 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
10816 		return;
10817 
10818 	for (i = 0; i < bp->cp_nr_rings; i++) {
10819 		struct bnxt_napi *bnapi = bp->bnapi[i];
10820 		struct bnxt_cp_ring_info *cpr;
10821 
10822 		cpr = &bnapi->cp_ring;
10823 		if (bnapi->tx_fault)
10824 			cpr->sw_stats->tx.tx_resets++;
10825 		if (bnapi->in_reset)
10826 			cpr->sw_stats->rx.rx_resets++;
10827 		napi_disable(&bnapi->napi);
10828 		if (bnapi->rx_ring)
10829 			cancel_work_sync(&cpr->dim.work);
10830 	}
10831 }
10832 
10833 static void bnxt_enable_napi(struct bnxt *bp)
10834 {
10835 	int i;
10836 
10837 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
10838 	for (i = 0; i < bp->cp_nr_rings; i++) {
10839 		struct bnxt_napi *bnapi = bp->bnapi[i];
10840 		struct bnxt_cp_ring_info *cpr;
10841 
10842 		bnapi->tx_fault = 0;
10843 
10844 		cpr = &bnapi->cp_ring;
10845 		bnapi->in_reset = false;
10846 
10847 		if (bnapi->rx_ring) {
10848 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
10849 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
10850 		}
10851 		napi_enable(&bnapi->napi);
10852 	}
10853 }
10854 
10855 void bnxt_tx_disable(struct bnxt *bp)
10856 {
10857 	int i;
10858 	struct bnxt_tx_ring_info *txr;
10859 
10860 	if (bp->tx_ring) {
10861 		for (i = 0; i < bp->tx_nr_rings; i++) {
10862 			txr = &bp->tx_ring[i];
10863 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
10864 		}
10865 	}
10866 	/* Make sure napi polls see @dev_state change */
10867 	synchronize_net();
10868 	/* Drop carrier first to prevent TX timeout */
10869 	netif_carrier_off(bp->dev);
10870 	/* Stop all TX queues */
10871 	netif_tx_disable(bp->dev);
10872 }
10873 
10874 void bnxt_tx_enable(struct bnxt *bp)
10875 {
10876 	int i;
10877 	struct bnxt_tx_ring_info *txr;
10878 
10879 	for (i = 0; i < bp->tx_nr_rings; i++) {
10880 		txr = &bp->tx_ring[i];
10881 		WRITE_ONCE(txr->dev_state, 0);
10882 	}
10883 	/* Make sure napi polls see @dev_state change */
10884 	synchronize_net();
10885 	netif_tx_wake_all_queues(bp->dev);
10886 	if (BNXT_LINK_IS_UP(bp))
10887 		netif_carrier_on(bp->dev);
10888 }
10889 
10890 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
10891 {
10892 	u8 active_fec = link_info->active_fec_sig_mode &
10893 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
10894 
10895 	switch (active_fec) {
10896 	default:
10897 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
10898 		return "None";
10899 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
10900 		return "Clause 74 BaseR";
10901 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
10902 		return "Clause 91 RS(528,514)";
10903 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
10904 		return "Clause 91 RS544_1XN";
10905 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
10906 		return "Clause 91 RS(544,514)";
10907 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
10908 		return "Clause 91 RS272_1XN";
10909 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
10910 		return "Clause 91 RS(272,257)";
10911 	}
10912 }
10913 
10914 void bnxt_report_link(struct bnxt *bp)
10915 {
10916 	if (BNXT_LINK_IS_UP(bp)) {
10917 		const char *signal = "";
10918 		const char *flow_ctrl;
10919 		const char *duplex;
10920 		u32 speed;
10921 		u16 fec;
10922 
10923 		netif_carrier_on(bp->dev);
10924 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
10925 		if (speed == SPEED_UNKNOWN) {
10926 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
10927 			return;
10928 		}
10929 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
10930 			duplex = "full";
10931 		else
10932 			duplex = "half";
10933 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
10934 			flow_ctrl = "ON - receive & transmit";
10935 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
10936 			flow_ctrl = "ON - transmit";
10937 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
10938 			flow_ctrl = "ON - receive";
10939 		else
10940 			flow_ctrl = "none";
10941 		if (bp->link_info.phy_qcfg_resp.option_flags &
10942 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
10943 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
10944 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
10945 			switch (sig_mode) {
10946 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
10947 				signal = "(NRZ) ";
10948 				break;
10949 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
10950 				signal = "(PAM4 56Gbps) ";
10951 				break;
10952 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
10953 				signal = "(PAM4 112Gbps) ";
10954 				break;
10955 			default:
10956 				break;
10957 			}
10958 		}
10959 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
10960 			    speed, signal, duplex, flow_ctrl);
10961 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
10962 			netdev_info(bp->dev, "EEE is %s\n",
10963 				    bp->eee.eee_active ? "active" :
10964 							 "not active");
10965 		fec = bp->link_info.fec_cfg;
10966 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
10967 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
10968 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
10969 				    bnxt_report_fec(&bp->link_info));
10970 	} else {
10971 		netif_carrier_off(bp->dev);
10972 		netdev_err(bp->dev, "NIC Link is Down\n");
10973 	}
10974 }
10975 
10976 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
10977 {
10978 	if (!resp->supported_speeds_auto_mode &&
10979 	    !resp->supported_speeds_force_mode &&
10980 	    !resp->supported_pam4_speeds_auto_mode &&
10981 	    !resp->supported_pam4_speeds_force_mode &&
10982 	    !resp->supported_speeds2_auto_mode &&
10983 	    !resp->supported_speeds2_force_mode)
10984 		return true;
10985 	return false;
10986 }
10987 
10988 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
10989 {
10990 	struct bnxt_link_info *link_info = &bp->link_info;
10991 	struct hwrm_port_phy_qcaps_output *resp;
10992 	struct hwrm_port_phy_qcaps_input *req;
10993 	int rc = 0;
10994 
10995 	if (bp->hwrm_spec_code < 0x10201)
10996 		return 0;
10997 
10998 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10999 	if (rc)
11000 		return rc;
11001 
11002 	resp = hwrm_req_hold(bp, req);
11003 	rc = hwrm_req_send(bp, req);
11004 	if (rc)
11005 		goto hwrm_phy_qcaps_exit;
11006 
11007 	bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11008 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11009 		struct ethtool_keee *eee = &bp->eee;
11010 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11011 
11012 		_bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11013 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11014 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11015 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11016 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11017 	}
11018 
11019 	if (bp->hwrm_spec_code >= 0x10a01) {
11020 		if (bnxt_phy_qcaps_no_speed(resp)) {
11021 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11022 			netdev_warn(bp->dev, "Ethernet link disabled\n");
11023 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11024 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11025 			netdev_info(bp->dev, "Ethernet link enabled\n");
11026 			/* Phy re-enabled, reprobe the speeds */
11027 			link_info->support_auto_speeds = 0;
11028 			link_info->support_pam4_auto_speeds = 0;
11029 			link_info->support_auto_speeds2 = 0;
11030 		}
11031 	}
11032 	if (resp->supported_speeds_auto_mode)
11033 		link_info->support_auto_speeds =
11034 			le16_to_cpu(resp->supported_speeds_auto_mode);
11035 	if (resp->supported_pam4_speeds_auto_mode)
11036 		link_info->support_pam4_auto_speeds =
11037 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11038 	if (resp->supported_speeds2_auto_mode)
11039 		link_info->support_auto_speeds2 =
11040 			le16_to_cpu(resp->supported_speeds2_auto_mode);
11041 
11042 	bp->port_count = resp->port_cnt;
11043 
11044 hwrm_phy_qcaps_exit:
11045 	hwrm_req_drop(bp, req);
11046 	return rc;
11047 }
11048 
11049 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11050 {
11051 	u16 diff = advertising ^ supported;
11052 
11053 	return ((supported | diff) != supported);
11054 }
11055 
11056 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
11057 {
11058 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
11059 
11060 	/* Check if any advertised speeds are no longer supported. The caller
11061 	 * holds the link_lock mutex, so we can modify link_info settings.
11062 	 */
11063 	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11064 		if (bnxt_support_dropped(link_info->advertising,
11065 					 link_info->support_auto_speeds2)) {
11066 			link_info->advertising = link_info->support_auto_speeds2;
11067 			return true;
11068 		}
11069 		return false;
11070 	}
11071 	if (bnxt_support_dropped(link_info->advertising,
11072 				 link_info->support_auto_speeds)) {
11073 		link_info->advertising = link_info->support_auto_speeds;
11074 		return true;
11075 	}
11076 	if (bnxt_support_dropped(link_info->advertising_pam4,
11077 				 link_info->support_pam4_auto_speeds)) {
11078 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
11079 		return true;
11080 	}
11081 	return false;
11082 }
11083 
11084 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
11085 {
11086 	struct bnxt_link_info *link_info = &bp->link_info;
11087 	struct hwrm_port_phy_qcfg_output *resp;
11088 	struct hwrm_port_phy_qcfg_input *req;
11089 	u8 link_state = link_info->link_state;
11090 	bool support_changed;
11091 	int rc;
11092 
11093 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
11094 	if (rc)
11095 		return rc;
11096 
11097 	resp = hwrm_req_hold(bp, req);
11098 	rc = hwrm_req_send(bp, req);
11099 	if (rc) {
11100 		hwrm_req_drop(bp, req);
11101 		if (BNXT_VF(bp) && rc == -ENODEV) {
11102 			netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
11103 			rc = 0;
11104 		}
11105 		return rc;
11106 	}
11107 
11108 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
11109 	link_info->phy_link_status = resp->link;
11110 	link_info->duplex = resp->duplex_cfg;
11111 	if (bp->hwrm_spec_code >= 0x10800)
11112 		link_info->duplex = resp->duplex_state;
11113 	link_info->pause = resp->pause;
11114 	link_info->auto_mode = resp->auto_mode;
11115 	link_info->auto_pause_setting = resp->auto_pause;
11116 	link_info->lp_pause = resp->link_partner_adv_pause;
11117 	link_info->force_pause_setting = resp->force_pause;
11118 	link_info->duplex_setting = resp->duplex_cfg;
11119 	if (link_info->phy_link_status == BNXT_LINK_LINK) {
11120 		link_info->link_speed = le16_to_cpu(resp->link_speed);
11121 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
11122 			link_info->active_lanes = resp->active_lanes;
11123 	} else {
11124 		link_info->link_speed = 0;
11125 		link_info->active_lanes = 0;
11126 	}
11127 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
11128 	link_info->force_pam4_link_speed =
11129 		le16_to_cpu(resp->force_pam4_link_speed);
11130 	link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
11131 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
11132 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
11133 	link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
11134 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
11135 	link_info->auto_pam4_link_speeds =
11136 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
11137 	link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
11138 	link_info->lp_auto_link_speeds =
11139 		le16_to_cpu(resp->link_partner_adv_speeds);
11140 	link_info->lp_auto_pam4_link_speeds =
11141 		resp->link_partner_pam4_adv_speeds;
11142 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
11143 	link_info->phy_ver[0] = resp->phy_maj;
11144 	link_info->phy_ver[1] = resp->phy_min;
11145 	link_info->phy_ver[2] = resp->phy_bld;
11146 	link_info->media_type = resp->media_type;
11147 	link_info->phy_type = resp->phy_type;
11148 	link_info->transceiver = resp->xcvr_pkg_type;
11149 	link_info->phy_addr = resp->eee_config_phy_addr &
11150 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
11151 	link_info->module_status = resp->module_status;
11152 
11153 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
11154 		struct ethtool_keee *eee = &bp->eee;
11155 		u16 fw_speeds;
11156 
11157 		eee->eee_active = 0;
11158 		if (resp->eee_config_phy_addr &
11159 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
11160 			eee->eee_active = 1;
11161 			fw_speeds = le16_to_cpu(
11162 				resp->link_partner_adv_eee_link_speed_mask);
11163 			_bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
11164 		}
11165 
11166 		/* Pull initial EEE config */
11167 		if (!chng_link_state) {
11168 			if (resp->eee_config_phy_addr &
11169 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
11170 				eee->eee_enabled = 1;
11171 
11172 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
11173 			_bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
11174 
11175 			if (resp->eee_config_phy_addr &
11176 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
11177 				__le32 tmr;
11178 
11179 				eee->tx_lpi_enabled = 1;
11180 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
11181 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
11182 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
11183 			}
11184 		}
11185 	}
11186 
11187 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
11188 	if (bp->hwrm_spec_code >= 0x10504) {
11189 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
11190 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11191 	}
11192 	/* TODO: need to add more logic to report VF link */
11193 	if (chng_link_state) {
11194 		if (link_info->phy_link_status == BNXT_LINK_LINK)
11195 			link_info->link_state = BNXT_LINK_STATE_UP;
11196 		else
11197 			link_info->link_state = BNXT_LINK_STATE_DOWN;
11198 		if (link_state != link_info->link_state)
11199 			bnxt_report_link(bp);
11200 	} else {
11201 		/* always link down if not require to update link state */
11202 		link_info->link_state = BNXT_LINK_STATE_DOWN;
11203 	}
11204 	hwrm_req_drop(bp, req);
11205 
11206 	if (!BNXT_PHY_CFG_ABLE(bp))
11207 		return 0;
11208 
11209 	support_changed = bnxt_support_speed_dropped(link_info);
11210 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11211 		bnxt_hwrm_set_link_setting(bp, true, false);
11212 	return 0;
11213 }
11214 
11215 static void bnxt_get_port_module_status(struct bnxt *bp)
11216 {
11217 	struct bnxt_link_info *link_info = &bp->link_info;
11218 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11219 	u8 module_status;
11220 
11221 	if (bnxt_update_link(bp, true))
11222 		return;
11223 
11224 	module_status = link_info->module_status;
11225 	switch (module_status) {
11226 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11227 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11228 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11229 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11230 			    bp->pf.port_id);
11231 		if (bp->hwrm_spec_code >= 0x10201) {
11232 			netdev_warn(bp->dev, "Module part number %s\n",
11233 				    resp->phy_vendor_partnumber);
11234 		}
11235 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11236 			netdev_warn(bp->dev, "TX is disabled\n");
11237 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11238 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11239 	}
11240 }
11241 
11242 static void
11243 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11244 {
11245 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11246 		if (bp->hwrm_spec_code >= 0x10201)
11247 			req->auto_pause =
11248 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11249 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11250 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11251 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11252 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11253 		req->enables |=
11254 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11255 	} else {
11256 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11257 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11258 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11259 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11260 		req->enables |=
11261 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11262 		if (bp->hwrm_spec_code >= 0x10201) {
11263 			req->auto_pause = req->force_pause;
11264 			req->enables |= cpu_to_le32(
11265 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11266 		}
11267 	}
11268 }
11269 
11270 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11271 {
11272 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11273 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11274 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11275 			req->enables |=
11276 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11277 			req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11278 		} else if (bp->link_info.advertising) {
11279 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11280 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11281 		}
11282 		if (bp->link_info.advertising_pam4) {
11283 			req->enables |=
11284 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11285 			req->auto_link_pam4_speed_mask =
11286 				cpu_to_le16(bp->link_info.advertising_pam4);
11287 		}
11288 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11289 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11290 	} else {
11291 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11292 		if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11293 			req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11294 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11295 			netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11296 				   (u32)bp->link_info.req_link_speed);
11297 		} else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11298 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11299 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11300 		} else {
11301 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11302 		}
11303 	}
11304 
11305 	/* tell chimp that the setting takes effect immediately */
11306 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11307 }
11308 
11309 int bnxt_hwrm_set_pause(struct bnxt *bp)
11310 {
11311 	struct hwrm_port_phy_cfg_input *req;
11312 	int rc;
11313 
11314 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11315 	if (rc)
11316 		return rc;
11317 
11318 	bnxt_hwrm_set_pause_common(bp, req);
11319 
11320 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11321 	    bp->link_info.force_link_chng)
11322 		bnxt_hwrm_set_link_common(bp, req);
11323 
11324 	rc = hwrm_req_send(bp, req);
11325 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11326 		/* since changing of pause setting doesn't trigger any link
11327 		 * change event, the driver needs to update the current pause
11328 		 * result upon successfully return of the phy_cfg command
11329 		 */
11330 		bp->link_info.pause =
11331 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11332 		bp->link_info.auto_pause_setting = 0;
11333 		if (!bp->link_info.force_link_chng)
11334 			bnxt_report_link(bp);
11335 	}
11336 	bp->link_info.force_link_chng = false;
11337 	return rc;
11338 }
11339 
11340 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11341 			      struct hwrm_port_phy_cfg_input *req)
11342 {
11343 	struct ethtool_keee *eee = &bp->eee;
11344 
11345 	if (eee->eee_enabled) {
11346 		u16 eee_speeds;
11347 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11348 
11349 		if (eee->tx_lpi_enabled)
11350 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11351 		else
11352 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11353 
11354 		req->flags |= cpu_to_le32(flags);
11355 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11356 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11357 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11358 	} else {
11359 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11360 	}
11361 }
11362 
11363 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11364 {
11365 	struct hwrm_port_phy_cfg_input *req;
11366 	int rc;
11367 
11368 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11369 	if (rc)
11370 		return rc;
11371 
11372 	if (set_pause)
11373 		bnxt_hwrm_set_pause_common(bp, req);
11374 
11375 	bnxt_hwrm_set_link_common(bp, req);
11376 
11377 	if (set_eee)
11378 		bnxt_hwrm_set_eee(bp, req);
11379 	return hwrm_req_send(bp, req);
11380 }
11381 
11382 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11383 {
11384 	struct hwrm_port_phy_cfg_input *req;
11385 	int rc;
11386 
11387 	if (!BNXT_SINGLE_PF(bp))
11388 		return 0;
11389 
11390 	if (pci_num_vf(bp->pdev) &&
11391 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11392 		return 0;
11393 
11394 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11395 	if (rc)
11396 		return rc;
11397 
11398 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11399 	rc = hwrm_req_send(bp, req);
11400 	if (!rc) {
11401 		mutex_lock(&bp->link_lock);
11402 		/* Device is not obliged link down in certain scenarios, even
11403 		 * when forced. Setting the state unknown is consistent with
11404 		 * driver startup and will force link state to be reported
11405 		 * during subsequent open based on PORT_PHY_QCFG.
11406 		 */
11407 		bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11408 		mutex_unlock(&bp->link_lock);
11409 	}
11410 	return rc;
11411 }
11412 
11413 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11414 {
11415 #ifdef CONFIG_TEE_BNXT_FW
11416 	int rc = tee_bnxt_fw_load();
11417 
11418 	if (rc)
11419 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11420 
11421 	return rc;
11422 #else
11423 	netdev_err(bp->dev, "OP-TEE not supported\n");
11424 	return -ENODEV;
11425 #endif
11426 }
11427 
11428 static int bnxt_try_recover_fw(struct bnxt *bp)
11429 {
11430 	if (bp->fw_health && bp->fw_health->status_reliable) {
11431 		int retry = 0, rc;
11432 		u32 sts;
11433 
11434 		do {
11435 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11436 			rc = bnxt_hwrm_poll(bp);
11437 			if (!BNXT_FW_IS_BOOTING(sts) &&
11438 			    !BNXT_FW_IS_RECOVERING(sts))
11439 				break;
11440 			retry++;
11441 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11442 
11443 		if (!BNXT_FW_IS_HEALTHY(sts)) {
11444 			netdev_err(bp->dev,
11445 				   "Firmware not responding, status: 0x%x\n",
11446 				   sts);
11447 			rc = -ENODEV;
11448 		}
11449 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11450 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11451 			return bnxt_fw_reset_via_optee(bp);
11452 		}
11453 		return rc;
11454 	}
11455 
11456 	return -ENODEV;
11457 }
11458 
11459 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11460 {
11461 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11462 
11463 	if (!BNXT_NEW_RM(bp))
11464 		return; /* no resource reservations required */
11465 
11466 	hw_resc->resv_cp_rings = 0;
11467 	hw_resc->resv_stat_ctxs = 0;
11468 	hw_resc->resv_irqs = 0;
11469 	hw_resc->resv_tx_rings = 0;
11470 	hw_resc->resv_rx_rings = 0;
11471 	hw_resc->resv_hw_ring_grps = 0;
11472 	hw_resc->resv_vnics = 0;
11473 	hw_resc->resv_rsscos_ctxs = 0;
11474 	if (!fw_reset) {
11475 		bp->tx_nr_rings = 0;
11476 		bp->rx_nr_rings = 0;
11477 	}
11478 }
11479 
11480 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11481 {
11482 	int rc;
11483 
11484 	if (!BNXT_NEW_RM(bp))
11485 		return 0; /* no resource reservations required */
11486 
11487 	rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11488 	if (rc)
11489 		netdev_err(bp->dev, "resc_qcaps failed\n");
11490 
11491 	bnxt_clear_reservations(bp, fw_reset);
11492 
11493 	return rc;
11494 }
11495 
11496 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11497 {
11498 	struct hwrm_func_drv_if_change_output *resp;
11499 	struct hwrm_func_drv_if_change_input *req;
11500 	bool fw_reset = !bp->irq_tbl;
11501 	bool resc_reinit = false;
11502 	int rc, retry = 0;
11503 	u32 flags = 0;
11504 
11505 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11506 		return 0;
11507 
11508 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11509 	if (rc)
11510 		return rc;
11511 
11512 	if (up)
11513 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
11514 	resp = hwrm_req_hold(bp, req);
11515 
11516 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11517 	while (retry < BNXT_FW_IF_RETRY) {
11518 		rc = hwrm_req_send(bp, req);
11519 		if (rc != -EAGAIN)
11520 			break;
11521 
11522 		msleep(50);
11523 		retry++;
11524 	}
11525 
11526 	if (rc == -EAGAIN) {
11527 		hwrm_req_drop(bp, req);
11528 		return rc;
11529 	} else if (!rc) {
11530 		flags = le32_to_cpu(resp->flags);
11531 	} else if (up) {
11532 		rc = bnxt_try_recover_fw(bp);
11533 		fw_reset = true;
11534 	}
11535 	hwrm_req_drop(bp, req);
11536 	if (rc)
11537 		return rc;
11538 
11539 	if (!up) {
11540 		bnxt_inv_fw_health_reg(bp);
11541 		return 0;
11542 	}
11543 
11544 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
11545 		resc_reinit = true;
11546 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
11547 	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11548 		fw_reset = true;
11549 	else
11550 		bnxt_remap_fw_health_regs(bp);
11551 
11552 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11553 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11554 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11555 		return -ENODEV;
11556 	}
11557 	if (resc_reinit || fw_reset) {
11558 		if (fw_reset) {
11559 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11560 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11561 				bnxt_ulp_irq_stop(bp);
11562 			bnxt_free_ctx_mem(bp);
11563 			bnxt_dcb_free(bp);
11564 			rc = bnxt_fw_init_one(bp);
11565 			if (rc) {
11566 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11567 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11568 				return rc;
11569 			}
11570 			bnxt_clear_int_mode(bp);
11571 			rc = bnxt_init_int_mode(bp);
11572 			if (rc) {
11573 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11574 				netdev_err(bp->dev, "init int mode failed\n");
11575 				return rc;
11576 			}
11577 		}
11578 		rc = bnxt_cancel_reservations(bp, fw_reset);
11579 	}
11580 	return rc;
11581 }
11582 
11583 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11584 {
11585 	struct hwrm_port_led_qcaps_output *resp;
11586 	struct hwrm_port_led_qcaps_input *req;
11587 	struct bnxt_pf_info *pf = &bp->pf;
11588 	int rc;
11589 
11590 	bp->num_leds = 0;
11591 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11592 		return 0;
11593 
11594 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11595 	if (rc)
11596 		return rc;
11597 
11598 	req->port_id = cpu_to_le16(pf->port_id);
11599 	resp = hwrm_req_hold(bp, req);
11600 	rc = hwrm_req_send(bp, req);
11601 	if (rc) {
11602 		hwrm_req_drop(bp, req);
11603 		return rc;
11604 	}
11605 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
11606 		int i;
11607 
11608 		bp->num_leds = resp->num_leds;
11609 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11610 						 bp->num_leds);
11611 		for (i = 0; i < bp->num_leds; i++) {
11612 			struct bnxt_led_info *led = &bp->leds[i];
11613 			__le16 caps = led->led_state_caps;
11614 
11615 			if (!led->led_group_id ||
11616 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
11617 				bp->num_leds = 0;
11618 				break;
11619 			}
11620 		}
11621 	}
11622 	hwrm_req_drop(bp, req);
11623 	return 0;
11624 }
11625 
11626 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11627 {
11628 	struct hwrm_wol_filter_alloc_output *resp;
11629 	struct hwrm_wol_filter_alloc_input *req;
11630 	int rc;
11631 
11632 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11633 	if (rc)
11634 		return rc;
11635 
11636 	req->port_id = cpu_to_le16(bp->pf.port_id);
11637 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
11638 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
11639 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11640 
11641 	resp = hwrm_req_hold(bp, req);
11642 	rc = hwrm_req_send(bp, req);
11643 	if (!rc)
11644 		bp->wol_filter_id = resp->wol_filter_id;
11645 	hwrm_req_drop(bp, req);
11646 	return rc;
11647 }
11648 
11649 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11650 {
11651 	struct hwrm_wol_filter_free_input *req;
11652 	int rc;
11653 
11654 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11655 	if (rc)
11656 		return rc;
11657 
11658 	req->port_id = cpu_to_le16(bp->pf.port_id);
11659 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
11660 	req->wol_filter_id = bp->wol_filter_id;
11661 
11662 	return hwrm_req_send(bp, req);
11663 }
11664 
11665 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11666 {
11667 	struct hwrm_wol_filter_qcfg_output *resp;
11668 	struct hwrm_wol_filter_qcfg_input *req;
11669 	u16 next_handle = 0;
11670 	int rc;
11671 
11672 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11673 	if (rc)
11674 		return rc;
11675 
11676 	req->port_id = cpu_to_le16(bp->pf.port_id);
11677 	req->handle = cpu_to_le16(handle);
11678 	resp = hwrm_req_hold(bp, req);
11679 	rc = hwrm_req_send(bp, req);
11680 	if (!rc) {
11681 		next_handle = le16_to_cpu(resp->next_handle);
11682 		if (next_handle != 0) {
11683 			if (resp->wol_type ==
11684 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
11685 				bp->wol = 1;
11686 				bp->wol_filter_id = resp->wol_filter_id;
11687 			}
11688 		}
11689 	}
11690 	hwrm_req_drop(bp, req);
11691 	return next_handle;
11692 }
11693 
11694 static void bnxt_get_wol_settings(struct bnxt *bp)
11695 {
11696 	u16 handle = 0;
11697 
11698 	bp->wol = 0;
11699 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11700 		return;
11701 
11702 	do {
11703 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11704 	} while (handle && handle != 0xffff);
11705 }
11706 
11707 static bool bnxt_eee_config_ok(struct bnxt *bp)
11708 {
11709 	struct ethtool_keee *eee = &bp->eee;
11710 	struct bnxt_link_info *link_info = &bp->link_info;
11711 
11712 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11713 		return true;
11714 
11715 	if (eee->eee_enabled) {
11716 		__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
11717 		__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
11718 
11719 		_bnxt_fw_to_linkmode(advertising, link_info->advertising);
11720 
11721 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11722 			eee->eee_enabled = 0;
11723 			return false;
11724 		}
11725 		if (linkmode_andnot(tmp, eee->advertised, advertising)) {
11726 			linkmode_and(eee->advertised, advertising,
11727 				     eee->supported);
11728 			return false;
11729 		}
11730 	}
11731 	return true;
11732 }
11733 
11734 static int bnxt_update_phy_setting(struct bnxt *bp)
11735 {
11736 	int rc;
11737 	bool update_link = false;
11738 	bool update_pause = false;
11739 	bool update_eee = false;
11740 	struct bnxt_link_info *link_info = &bp->link_info;
11741 
11742 	rc = bnxt_update_link(bp, true);
11743 	if (rc) {
11744 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11745 			   rc);
11746 		return rc;
11747 	}
11748 	if (!BNXT_SINGLE_PF(bp))
11749 		return 0;
11750 
11751 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11752 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
11753 	    link_info->req_flow_ctrl)
11754 		update_pause = true;
11755 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11756 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
11757 		update_pause = true;
11758 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11759 		if (BNXT_AUTO_MODE(link_info->auto_mode))
11760 			update_link = true;
11761 		if (bnxt_force_speed_updated(link_info))
11762 			update_link = true;
11763 		if (link_info->req_duplex != link_info->duplex_setting)
11764 			update_link = true;
11765 	} else {
11766 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
11767 			update_link = true;
11768 		if (bnxt_auto_speed_updated(link_info))
11769 			update_link = true;
11770 	}
11771 
11772 	/* The last close may have shutdown the link, so need to call
11773 	 * PHY_CFG to bring it back up.
11774 	 */
11775 	if (!BNXT_LINK_IS_UP(bp))
11776 		update_link = true;
11777 
11778 	if (!bnxt_eee_config_ok(bp))
11779 		update_eee = true;
11780 
11781 	if (update_link)
11782 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11783 	else if (update_pause)
11784 		rc = bnxt_hwrm_set_pause(bp);
11785 	if (rc) {
11786 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11787 			   rc);
11788 		return rc;
11789 	}
11790 
11791 	return rc;
11792 }
11793 
11794 /* Common routine to pre-map certain register block to different GRC window.
11795  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
11796  * in PF and 3 windows in VF that can be customized to map in different
11797  * register blocks.
11798  */
11799 static void bnxt_preset_reg_win(struct bnxt *bp)
11800 {
11801 	if (BNXT_PF(bp)) {
11802 		/* CAG registers map to GRC window #4 */
11803 		writel(BNXT_CAG_REG_BASE,
11804 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
11805 	}
11806 }
11807 
11808 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11809 
11810 static int bnxt_reinit_after_abort(struct bnxt *bp)
11811 {
11812 	int rc;
11813 
11814 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11815 		return -EBUSY;
11816 
11817 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
11818 		return -ENODEV;
11819 
11820 	rc = bnxt_fw_init_one(bp);
11821 	if (!rc) {
11822 		bnxt_clear_int_mode(bp);
11823 		rc = bnxt_init_int_mode(bp);
11824 		if (!rc) {
11825 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11826 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11827 		}
11828 	}
11829 	return rc;
11830 }
11831 
11832 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
11833 {
11834 	struct bnxt_ntuple_filter *ntp_fltr;
11835 	struct bnxt_l2_filter *l2_fltr;
11836 
11837 	if (list_empty(&fltr->list))
11838 		return;
11839 
11840 	if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
11841 		ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
11842 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
11843 		atomic_inc(&l2_fltr->refcnt);
11844 		ntp_fltr->l2_fltr = l2_fltr;
11845 		if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
11846 			bnxt_del_ntp_filter(bp, ntp_fltr);
11847 			netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
11848 				   fltr->sw_id);
11849 		}
11850 	} else if (fltr->type == BNXT_FLTR_TYPE_L2) {
11851 		l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
11852 		if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
11853 			bnxt_del_l2_filter(bp, l2_fltr);
11854 			netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
11855 				   fltr->sw_id);
11856 		}
11857 	}
11858 }
11859 
11860 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
11861 {
11862 	struct bnxt_filter_base *usr_fltr, *tmp;
11863 
11864 	list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
11865 		bnxt_cfg_one_usr_fltr(bp, usr_fltr);
11866 }
11867 
11868 static int bnxt_set_xps_mapping(struct bnxt *bp)
11869 {
11870 	int numa_node = dev_to_node(&bp->pdev->dev);
11871 	unsigned int q_idx, map_idx, cpu, i;
11872 	const struct cpumask *cpu_mask_ptr;
11873 	int nr_cpus = num_online_cpus();
11874 	cpumask_t *q_map;
11875 	int rc = 0;
11876 
11877 	q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
11878 	if (!q_map)
11879 		return -ENOMEM;
11880 
11881 	/* Create CPU mask for all TX queues across MQPRIO traffic classes.
11882 	 * Each TC has the same number of TX queues. The nth TX queue for each
11883 	 * TC will have the same CPU mask.
11884 	 */
11885 	for (i = 0; i < nr_cpus; i++) {
11886 		map_idx = i % bp->tx_nr_rings_per_tc;
11887 		cpu = cpumask_local_spread(i, numa_node);
11888 		cpu_mask_ptr = get_cpu_mask(cpu);
11889 		cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
11890 	}
11891 
11892 	/* Register CPU mask for each TX queue except the ones marked for XDP */
11893 	for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
11894 		map_idx = q_idx % bp->tx_nr_rings_per_tc;
11895 		rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
11896 		if (rc) {
11897 			netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
11898 				    q_idx);
11899 			break;
11900 		}
11901 	}
11902 
11903 	kfree(q_map);
11904 
11905 	return rc;
11906 }
11907 
11908 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11909 {
11910 	int rc = 0;
11911 
11912 	bnxt_preset_reg_win(bp);
11913 	netif_carrier_off(bp->dev);
11914 	if (irq_re_init) {
11915 		/* Reserve rings now if none were reserved at driver probe. */
11916 		rc = bnxt_init_dflt_ring_mode(bp);
11917 		if (rc) {
11918 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
11919 			return rc;
11920 		}
11921 	}
11922 	rc = bnxt_reserve_rings(bp, irq_re_init);
11923 	if (rc)
11924 		return rc;
11925 	if ((bp->flags & BNXT_FLAG_RFS) &&
11926 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
11927 		/* disable RFS if falling back to INTA */
11928 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
11929 		bp->flags &= ~BNXT_FLAG_RFS;
11930 	}
11931 
11932 	rc = bnxt_alloc_mem(bp, irq_re_init);
11933 	if (rc) {
11934 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11935 		goto open_err_free_mem;
11936 	}
11937 
11938 	if (irq_re_init) {
11939 		bnxt_init_napi(bp);
11940 		rc = bnxt_request_irq(bp);
11941 		if (rc) {
11942 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11943 			goto open_err_irq;
11944 		}
11945 	}
11946 
11947 	rc = bnxt_init_nic(bp, irq_re_init);
11948 	if (rc) {
11949 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11950 		goto open_err_irq;
11951 	}
11952 
11953 	bnxt_enable_napi(bp);
11954 	bnxt_debug_dev_init(bp);
11955 
11956 	if (link_re_init) {
11957 		mutex_lock(&bp->link_lock);
11958 		rc = bnxt_update_phy_setting(bp);
11959 		mutex_unlock(&bp->link_lock);
11960 		if (rc) {
11961 			netdev_warn(bp->dev, "failed to update phy settings\n");
11962 			if (BNXT_SINGLE_PF(bp)) {
11963 				bp->link_info.phy_retry = true;
11964 				bp->link_info.phy_retry_expires =
11965 					jiffies + 5 * HZ;
11966 			}
11967 		}
11968 	}
11969 
11970 	if (irq_re_init) {
11971 		udp_tunnel_nic_reset_ntf(bp->dev);
11972 		rc = bnxt_set_xps_mapping(bp);
11973 		if (rc)
11974 			netdev_warn(bp->dev, "failed to set xps mapping\n");
11975 	}
11976 
11977 	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
11978 		if (!static_key_enabled(&bnxt_xdp_locking_key))
11979 			static_branch_enable(&bnxt_xdp_locking_key);
11980 	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
11981 		static_branch_disable(&bnxt_xdp_locking_key);
11982 	}
11983 	set_bit(BNXT_STATE_OPEN, &bp->state);
11984 	bnxt_enable_int(bp);
11985 	/* Enable TX queues */
11986 	bnxt_tx_enable(bp);
11987 	mod_timer(&bp->timer, jiffies + bp->current_interval);
11988 	/* Poll link status and check for SFP+ module status */
11989 	mutex_lock(&bp->link_lock);
11990 	bnxt_get_port_module_status(bp);
11991 	mutex_unlock(&bp->link_lock);
11992 
11993 	/* VF-reps may need to be re-opened after the PF is re-opened */
11994 	if (BNXT_PF(bp))
11995 		bnxt_vf_reps_open(bp);
11996 	if (bp->ptp_cfg)
11997 		atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
11998 	bnxt_ptp_init_rtc(bp, true);
11999 	bnxt_ptp_cfg_tstamp_filters(bp);
12000 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12001 		bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12002 	bnxt_cfg_usr_fltrs(bp);
12003 	return 0;
12004 
12005 open_err_irq:
12006 	bnxt_del_napi(bp);
12007 
12008 open_err_free_mem:
12009 	bnxt_free_skbs(bp);
12010 	bnxt_free_irq(bp);
12011 	bnxt_free_mem(bp, true);
12012 	return rc;
12013 }
12014 
12015 /* rtnl_lock held */
12016 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12017 {
12018 	int rc = 0;
12019 
12020 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12021 		rc = -EIO;
12022 	if (!rc)
12023 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12024 	if (rc) {
12025 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12026 		dev_close(bp->dev);
12027 	}
12028 	return rc;
12029 }
12030 
12031 /* rtnl_lock held, open the NIC half way by allocating all resources, but
12032  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
12033  * self tests.
12034  */
12035 int bnxt_half_open_nic(struct bnxt *bp)
12036 {
12037 	int rc = 0;
12038 
12039 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12040 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12041 		rc = -ENODEV;
12042 		goto half_open_err;
12043 	}
12044 
12045 	rc = bnxt_alloc_mem(bp, true);
12046 	if (rc) {
12047 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12048 		goto half_open_err;
12049 	}
12050 	bnxt_init_napi(bp);
12051 	set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12052 	rc = bnxt_init_nic(bp, true);
12053 	if (rc) {
12054 		clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12055 		bnxt_del_napi(bp);
12056 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12057 		goto half_open_err;
12058 	}
12059 	return 0;
12060 
12061 half_open_err:
12062 	bnxt_free_skbs(bp);
12063 	bnxt_free_mem(bp, true);
12064 	dev_close(bp->dev);
12065 	return rc;
12066 }
12067 
12068 /* rtnl_lock held, this call can only be made after a previous successful
12069  * call to bnxt_half_open_nic().
12070  */
12071 void bnxt_half_close_nic(struct bnxt *bp)
12072 {
12073 	bnxt_hwrm_resource_free(bp, false, true);
12074 	bnxt_del_napi(bp);
12075 	bnxt_free_skbs(bp);
12076 	bnxt_free_mem(bp, true);
12077 	clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12078 }
12079 
12080 void bnxt_reenable_sriov(struct bnxt *bp)
12081 {
12082 	if (BNXT_PF(bp)) {
12083 		struct bnxt_pf_info *pf = &bp->pf;
12084 		int n = pf->active_vfs;
12085 
12086 		if (n)
12087 			bnxt_cfg_hw_sriov(bp, &n, true);
12088 	}
12089 }
12090 
12091 static int bnxt_open(struct net_device *dev)
12092 {
12093 	struct bnxt *bp = netdev_priv(dev);
12094 	int rc;
12095 
12096 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12097 		rc = bnxt_reinit_after_abort(bp);
12098 		if (rc) {
12099 			if (rc == -EBUSY)
12100 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
12101 			else
12102 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
12103 			return -ENODEV;
12104 		}
12105 	}
12106 
12107 	rc = bnxt_hwrm_if_change(bp, true);
12108 	if (rc)
12109 		return rc;
12110 
12111 	rc = __bnxt_open_nic(bp, true, true);
12112 	if (rc) {
12113 		bnxt_hwrm_if_change(bp, false);
12114 	} else {
12115 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12116 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12117 				bnxt_queue_sp_work(bp,
12118 						   BNXT_RESTART_ULP_SP_EVENT);
12119 		}
12120 	}
12121 
12122 	return rc;
12123 }
12124 
12125 static bool bnxt_drv_busy(struct bnxt *bp)
12126 {
12127 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
12128 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
12129 }
12130 
12131 static void bnxt_get_ring_stats(struct bnxt *bp,
12132 				struct rtnl_link_stats64 *stats);
12133 
12134 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
12135 			     bool link_re_init)
12136 {
12137 	/* Close the VF-reps before closing PF */
12138 	if (BNXT_PF(bp))
12139 		bnxt_vf_reps_close(bp);
12140 
12141 	/* Change device state to avoid TX queue wake up's */
12142 	bnxt_tx_disable(bp);
12143 
12144 	clear_bit(BNXT_STATE_OPEN, &bp->state);
12145 	smp_mb__after_atomic();
12146 	while (bnxt_drv_busy(bp))
12147 		msleep(20);
12148 
12149 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12150 		bnxt_clear_rss_ctxs(bp, false);
12151 	/* Flush rings and disable interrupts */
12152 	bnxt_shutdown_nic(bp, irq_re_init);
12153 
12154 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
12155 
12156 	bnxt_debug_dev_exit(bp);
12157 	bnxt_disable_napi(bp);
12158 	del_timer_sync(&bp->timer);
12159 	bnxt_free_skbs(bp);
12160 
12161 	/* Save ring stats before shutdown */
12162 	if (bp->bnapi && irq_re_init) {
12163 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
12164 		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
12165 	}
12166 	if (irq_re_init) {
12167 		bnxt_free_irq(bp);
12168 		bnxt_del_napi(bp);
12169 	}
12170 	bnxt_free_mem(bp, irq_re_init);
12171 }
12172 
12173 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12174 {
12175 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12176 		/* If we get here, it means firmware reset is in progress
12177 		 * while we are trying to close.  We can safely proceed with
12178 		 * the close because we are holding rtnl_lock().  Some firmware
12179 		 * messages may fail as we proceed to close.  We set the
12180 		 * ABORT_ERR flag here so that the FW reset thread will later
12181 		 * abort when it gets the rtnl_lock() and sees the flag.
12182 		 */
12183 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
12184 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12185 	}
12186 
12187 #ifdef CONFIG_BNXT_SRIOV
12188 	if (bp->sriov_cfg) {
12189 		int rc;
12190 
12191 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
12192 						      !bp->sriov_cfg,
12193 						      BNXT_SRIOV_CFG_WAIT_TMO);
12194 		if (!rc)
12195 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
12196 		else if (rc < 0)
12197 			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
12198 	}
12199 #endif
12200 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
12201 }
12202 
12203 static int bnxt_close(struct net_device *dev)
12204 {
12205 	struct bnxt *bp = netdev_priv(dev);
12206 
12207 	bnxt_close_nic(bp, true, true);
12208 	bnxt_hwrm_shutdown_link(bp);
12209 	bnxt_hwrm_if_change(bp, false);
12210 	return 0;
12211 }
12212 
12213 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
12214 				   u16 *val)
12215 {
12216 	struct hwrm_port_phy_mdio_read_output *resp;
12217 	struct hwrm_port_phy_mdio_read_input *req;
12218 	int rc;
12219 
12220 	if (bp->hwrm_spec_code < 0x10a00)
12221 		return -EOPNOTSUPP;
12222 
12223 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
12224 	if (rc)
12225 		return rc;
12226 
12227 	req->port_id = cpu_to_le16(bp->pf.port_id);
12228 	req->phy_addr = phy_addr;
12229 	req->reg_addr = cpu_to_le16(reg & 0x1f);
12230 	if (mdio_phy_id_is_c45(phy_addr)) {
12231 		req->cl45_mdio = 1;
12232 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
12233 		req->dev_addr = mdio_phy_id_devad(phy_addr);
12234 		req->reg_addr = cpu_to_le16(reg);
12235 	}
12236 
12237 	resp = hwrm_req_hold(bp, req);
12238 	rc = hwrm_req_send(bp, req);
12239 	if (!rc)
12240 		*val = le16_to_cpu(resp->reg_data);
12241 	hwrm_req_drop(bp, req);
12242 	return rc;
12243 }
12244 
12245 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12246 				    u16 val)
12247 {
12248 	struct hwrm_port_phy_mdio_write_input *req;
12249 	int rc;
12250 
12251 	if (bp->hwrm_spec_code < 0x10a00)
12252 		return -EOPNOTSUPP;
12253 
12254 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12255 	if (rc)
12256 		return rc;
12257 
12258 	req->port_id = cpu_to_le16(bp->pf.port_id);
12259 	req->phy_addr = phy_addr;
12260 	req->reg_addr = cpu_to_le16(reg & 0x1f);
12261 	if (mdio_phy_id_is_c45(phy_addr)) {
12262 		req->cl45_mdio = 1;
12263 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
12264 		req->dev_addr = mdio_phy_id_devad(phy_addr);
12265 		req->reg_addr = cpu_to_le16(reg);
12266 	}
12267 	req->reg_data = cpu_to_le16(val);
12268 
12269 	return hwrm_req_send(bp, req);
12270 }
12271 
12272 /* rtnl_lock held */
12273 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12274 {
12275 	struct mii_ioctl_data *mdio = if_mii(ifr);
12276 	struct bnxt *bp = netdev_priv(dev);
12277 	int rc;
12278 
12279 	switch (cmd) {
12280 	case SIOCGMIIPHY:
12281 		mdio->phy_id = bp->link_info.phy_addr;
12282 
12283 		fallthrough;
12284 	case SIOCGMIIREG: {
12285 		u16 mii_regval = 0;
12286 
12287 		if (!netif_running(dev))
12288 			return -EAGAIN;
12289 
12290 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12291 					     &mii_regval);
12292 		mdio->val_out = mii_regval;
12293 		return rc;
12294 	}
12295 
12296 	case SIOCSMIIREG:
12297 		if (!netif_running(dev))
12298 			return -EAGAIN;
12299 
12300 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12301 						mdio->val_in);
12302 
12303 	case SIOCSHWTSTAMP:
12304 		return bnxt_hwtstamp_set(dev, ifr);
12305 
12306 	case SIOCGHWTSTAMP:
12307 		return bnxt_hwtstamp_get(dev, ifr);
12308 
12309 	default:
12310 		/* do nothing */
12311 		break;
12312 	}
12313 	return -EOPNOTSUPP;
12314 }
12315 
12316 static void bnxt_get_ring_stats(struct bnxt *bp,
12317 				struct rtnl_link_stats64 *stats)
12318 {
12319 	int i;
12320 
12321 	for (i = 0; i < bp->cp_nr_rings; i++) {
12322 		struct bnxt_napi *bnapi = bp->bnapi[i];
12323 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12324 		u64 *sw = cpr->stats.sw_stats;
12325 
12326 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12327 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12328 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12329 
12330 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12331 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12332 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12333 
12334 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12335 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12336 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12337 
12338 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12339 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12340 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12341 
12342 		stats->rx_missed_errors +=
12343 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12344 
12345 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12346 
12347 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12348 
12349 		stats->rx_dropped +=
12350 			cpr->sw_stats->rx.rx_netpoll_discards +
12351 			cpr->sw_stats->rx.rx_oom_discards;
12352 	}
12353 }
12354 
12355 static void bnxt_add_prev_stats(struct bnxt *bp,
12356 				struct rtnl_link_stats64 *stats)
12357 {
12358 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12359 
12360 	stats->rx_packets += prev_stats->rx_packets;
12361 	stats->tx_packets += prev_stats->tx_packets;
12362 	stats->rx_bytes += prev_stats->rx_bytes;
12363 	stats->tx_bytes += prev_stats->tx_bytes;
12364 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
12365 	stats->multicast += prev_stats->multicast;
12366 	stats->rx_dropped += prev_stats->rx_dropped;
12367 	stats->tx_dropped += prev_stats->tx_dropped;
12368 }
12369 
12370 static void
12371 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12372 {
12373 	struct bnxt *bp = netdev_priv(dev);
12374 
12375 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
12376 	/* Make sure bnxt_close_nic() sees that we are reading stats before
12377 	 * we check the BNXT_STATE_OPEN flag.
12378 	 */
12379 	smp_mb__after_atomic();
12380 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12381 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12382 		*stats = bp->net_stats_prev;
12383 		return;
12384 	}
12385 
12386 	bnxt_get_ring_stats(bp, stats);
12387 	bnxt_add_prev_stats(bp, stats);
12388 
12389 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
12390 		u64 *rx = bp->port_stats.sw_stats;
12391 		u64 *tx = bp->port_stats.sw_stats +
12392 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12393 
12394 		stats->rx_crc_errors =
12395 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12396 		stats->rx_frame_errors =
12397 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12398 		stats->rx_length_errors =
12399 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12400 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12401 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12402 		stats->rx_errors =
12403 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12404 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12405 		stats->collisions =
12406 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12407 		stats->tx_fifo_errors =
12408 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12409 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12410 	}
12411 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12412 }
12413 
12414 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12415 					struct bnxt_total_ring_err_stats *stats,
12416 					struct bnxt_cp_ring_info *cpr)
12417 {
12418 	struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12419 	u64 *hw_stats = cpr->stats.sw_stats;
12420 
12421 	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12422 	stats->rx_total_resets += sw_stats->rx.rx_resets;
12423 	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
12424 	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
12425 	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
12426 	stats->rx_total_ring_discards +=
12427 		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
12428 	stats->tx_total_resets += sw_stats->tx.tx_resets;
12429 	stats->tx_total_ring_discards +=
12430 		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
12431 	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
12432 }
12433 
12434 void bnxt_get_ring_err_stats(struct bnxt *bp,
12435 			     struct bnxt_total_ring_err_stats *stats)
12436 {
12437 	int i;
12438 
12439 	for (i = 0; i < bp->cp_nr_rings; i++)
12440 		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
12441 }
12442 
12443 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
12444 {
12445 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12446 	struct net_device *dev = bp->dev;
12447 	struct netdev_hw_addr *ha;
12448 	u8 *haddr;
12449 	int mc_count = 0;
12450 	bool update = false;
12451 	int off = 0;
12452 
12453 	netdev_for_each_mc_addr(ha, dev) {
12454 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
12455 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12456 			vnic->mc_list_count = 0;
12457 			return false;
12458 		}
12459 		haddr = ha->addr;
12460 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
12461 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
12462 			update = true;
12463 		}
12464 		off += ETH_ALEN;
12465 		mc_count++;
12466 	}
12467 	if (mc_count)
12468 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12469 
12470 	if (mc_count != vnic->mc_list_count) {
12471 		vnic->mc_list_count = mc_count;
12472 		update = true;
12473 	}
12474 	return update;
12475 }
12476 
12477 static bool bnxt_uc_list_updated(struct bnxt *bp)
12478 {
12479 	struct net_device *dev = bp->dev;
12480 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12481 	struct netdev_hw_addr *ha;
12482 	int off = 0;
12483 
12484 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
12485 		return true;
12486 
12487 	netdev_for_each_uc_addr(ha, dev) {
12488 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
12489 			return true;
12490 
12491 		off += ETH_ALEN;
12492 	}
12493 	return false;
12494 }
12495 
12496 static void bnxt_set_rx_mode(struct net_device *dev)
12497 {
12498 	struct bnxt *bp = netdev_priv(dev);
12499 	struct bnxt_vnic_info *vnic;
12500 	bool mc_update = false;
12501 	bool uc_update;
12502 	u32 mask;
12503 
12504 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12505 		return;
12506 
12507 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12508 	mask = vnic->rx_mask;
12509 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
12510 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
12511 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
12512 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
12513 
12514 	if (dev->flags & IFF_PROMISC)
12515 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12516 
12517 	uc_update = bnxt_uc_list_updated(bp);
12518 
12519 	if (dev->flags & IFF_BROADCAST)
12520 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
12521 	if (dev->flags & IFF_ALLMULTI) {
12522 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12523 		vnic->mc_list_count = 0;
12524 	} else if (dev->flags & IFF_MULTICAST) {
12525 		mc_update = bnxt_mc_list_updated(bp, &mask);
12526 	}
12527 
12528 	if (mask != vnic->rx_mask || uc_update || mc_update) {
12529 		vnic->rx_mask = mask;
12530 
12531 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12532 	}
12533 }
12534 
12535 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12536 {
12537 	struct net_device *dev = bp->dev;
12538 	struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12539 	struct netdev_hw_addr *ha;
12540 	int i, off = 0, rc;
12541 	bool uc_update;
12542 
12543 	netif_addr_lock_bh(dev);
12544 	uc_update = bnxt_uc_list_updated(bp);
12545 	netif_addr_unlock_bh(dev);
12546 
12547 	if (!uc_update)
12548 		goto skip_uc;
12549 
12550 	for (i = 1; i < vnic->uc_filter_count; i++) {
12551 		struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
12552 
12553 		bnxt_hwrm_l2_filter_free(bp, fltr);
12554 		bnxt_del_l2_filter(bp, fltr);
12555 	}
12556 
12557 	vnic->uc_filter_count = 1;
12558 
12559 	netif_addr_lock_bh(dev);
12560 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
12561 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12562 	} else {
12563 		netdev_for_each_uc_addr(ha, dev) {
12564 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
12565 			off += ETH_ALEN;
12566 			vnic->uc_filter_count++;
12567 		}
12568 	}
12569 	netif_addr_unlock_bh(dev);
12570 
12571 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
12572 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12573 		if (rc) {
12574 			if (BNXT_VF(bp) && rc == -ENODEV) {
12575 				if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12576 					netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12577 				else
12578 					netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12579 				rc = 0;
12580 			} else {
12581 				netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12582 			}
12583 			vnic->uc_filter_count = i;
12584 			return rc;
12585 		}
12586 	}
12587 	if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12588 		netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12589 
12590 skip_uc:
12591 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
12592 	    !bnxt_promisc_ok(bp))
12593 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12594 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12595 	if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12596 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12597 			    rc);
12598 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12599 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12600 		vnic->mc_list_count = 0;
12601 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12602 	}
12603 	if (rc)
12604 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12605 			   rc);
12606 
12607 	return rc;
12608 }
12609 
12610 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12611 {
12612 #ifdef CONFIG_BNXT_SRIOV
12613 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12614 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12615 
12616 		/* No minimum rings were provisioned by the PF.  Don't
12617 		 * reserve rings by default when device is down.
12618 		 */
12619 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
12620 			return true;
12621 
12622 		if (!netif_running(bp->dev))
12623 			return false;
12624 	}
12625 #endif
12626 	return true;
12627 }
12628 
12629 /* If the chip and firmware supports RFS */
12630 static bool bnxt_rfs_supported(struct bnxt *bp)
12631 {
12632 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12633 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12634 			return true;
12635 		return false;
12636 	}
12637 	/* 212 firmware is broken for aRFS */
12638 	if (BNXT_FW_MAJ(bp) == 212)
12639 		return false;
12640 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12641 		return true;
12642 	if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12643 		return true;
12644 	return false;
12645 }
12646 
12647 /* If runtime conditions support RFS */
12648 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
12649 {
12650 	struct bnxt_hw_rings hwr = {0};
12651 	int max_vnics, max_rss_ctxs;
12652 
12653 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
12654 	    !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
12655 		return bnxt_rfs_supported(bp);
12656 
12657 	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12658 		return false;
12659 
12660 	hwr.grp = bp->rx_nr_rings;
12661 	hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
12662 	if (new_rss_ctx)
12663 		hwr.vnic++;
12664 	hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
12665 	max_vnics = bnxt_get_max_func_vnics(bp);
12666 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12667 
12668 	if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
12669 		if (bp->rx_nr_rings > 1)
12670 			netdev_warn(bp->dev,
12671 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
12672 				    min(max_rss_ctxs - 1, max_vnics - 1));
12673 		return false;
12674 	}
12675 
12676 	if (!BNXT_NEW_RM(bp))
12677 		return true;
12678 
12679 	if (hwr.vnic == bp->hw_resc.resv_vnics &&
12680 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12681 		return true;
12682 
12683 	bnxt_hwrm_reserve_rings(bp, &hwr);
12684 	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12685 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12686 		return true;
12687 
12688 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12689 	hwr.vnic = 1;
12690 	hwr.rss_ctx = 0;
12691 	bnxt_hwrm_reserve_rings(bp, &hwr);
12692 	return false;
12693 }
12694 
12695 static netdev_features_t bnxt_fix_features(struct net_device *dev,
12696 					   netdev_features_t features)
12697 {
12698 	struct bnxt *bp = netdev_priv(dev);
12699 	netdev_features_t vlan_features;
12700 
12701 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
12702 		features &= ~NETIF_F_NTUPLE;
12703 
12704 	if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12705 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12706 
12707 	if (!(features & NETIF_F_GRO))
12708 		features &= ~NETIF_F_GRO_HW;
12709 
12710 	if (features & NETIF_F_GRO_HW)
12711 		features &= ~NETIF_F_LRO;
12712 
12713 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
12714 	 * turned on or off together.
12715 	 */
12716 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
12717 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
12718 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12719 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12720 		else if (vlan_features)
12721 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12722 	}
12723 #ifdef CONFIG_BNXT_SRIOV
12724 	if (BNXT_VF(bp) && bp->vf.vlan)
12725 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12726 #endif
12727 	return features;
12728 }
12729 
12730 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
12731 				bool link_re_init, u32 flags, bool update_tpa)
12732 {
12733 	bnxt_close_nic(bp, irq_re_init, link_re_init);
12734 	bp->flags = flags;
12735 	if (update_tpa)
12736 		bnxt_set_ring_params(bp);
12737 	return bnxt_open_nic(bp, irq_re_init, link_re_init);
12738 }
12739 
12740 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
12741 {
12742 	bool update_tpa = false, update_ntuple = false;
12743 	struct bnxt *bp = netdev_priv(dev);
12744 	u32 flags = bp->flags;
12745 	u32 changes;
12746 	int rc = 0;
12747 	bool re_init = false;
12748 
12749 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
12750 	if (features & NETIF_F_GRO_HW)
12751 		flags |= BNXT_FLAG_GRO;
12752 	else if (features & NETIF_F_LRO)
12753 		flags |= BNXT_FLAG_LRO;
12754 
12755 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12756 		flags &= ~BNXT_FLAG_TPA;
12757 
12758 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12759 		flags |= BNXT_FLAG_STRIP_VLAN;
12760 
12761 	if (features & NETIF_F_NTUPLE)
12762 		flags |= BNXT_FLAG_RFS;
12763 	else
12764 		bnxt_clear_usr_fltrs(bp, true);
12765 
12766 	changes = flags ^ bp->flags;
12767 	if (changes & BNXT_FLAG_TPA) {
12768 		update_tpa = true;
12769 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12770 		    (flags & BNXT_FLAG_TPA) == 0 ||
12771 		    (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12772 			re_init = true;
12773 	}
12774 
12775 	if (changes & ~BNXT_FLAG_TPA)
12776 		re_init = true;
12777 
12778 	if (changes & BNXT_FLAG_RFS)
12779 		update_ntuple = true;
12780 
12781 	if (flags != bp->flags) {
12782 		u32 old_flags = bp->flags;
12783 
12784 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12785 			bp->flags = flags;
12786 			if (update_tpa)
12787 				bnxt_set_ring_params(bp);
12788 			return rc;
12789 		}
12790 
12791 		if (update_ntuple)
12792 			return bnxt_reinit_features(bp, true, false, flags, update_tpa);
12793 
12794 		if (re_init)
12795 			return bnxt_reinit_features(bp, false, false, flags, update_tpa);
12796 
12797 		if (update_tpa) {
12798 			bp->flags = flags;
12799 			rc = bnxt_set_tpa(bp,
12800 					  (flags & BNXT_FLAG_TPA) ?
12801 					  true : false);
12802 			if (rc)
12803 				bp->flags = old_flags;
12804 		}
12805 	}
12806 	return rc;
12807 }
12808 
12809 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12810 			      u8 **nextp)
12811 {
12812 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
12813 	struct hop_jumbo_hdr *jhdr;
12814 	int hdr_count = 0;
12815 	u8 *nexthdr;
12816 	int start;
12817 
12818 	/* Check that there are at most 2 IPv6 extension headers, no
12819 	 * fragment header, and each is <= 64 bytes.
12820 	 */
12821 	start = nw_off + sizeof(*ip6h);
12822 	nexthdr = &ip6h->nexthdr;
12823 	while (ipv6_ext_hdr(*nexthdr)) {
12824 		struct ipv6_opt_hdr *hp;
12825 		int hdrlen;
12826 
12827 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
12828 		    *nexthdr == NEXTHDR_FRAGMENT)
12829 			return false;
12830 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
12831 					  skb_headlen(skb), NULL);
12832 		if (!hp)
12833 			return false;
12834 		if (*nexthdr == NEXTHDR_AUTH)
12835 			hdrlen = ipv6_authlen(hp);
12836 		else
12837 			hdrlen = ipv6_optlen(hp);
12838 
12839 		if (hdrlen > 64)
12840 			return false;
12841 
12842 		/* The ext header may be a hop-by-hop header inserted for
12843 		 * big TCP purposes. This will be removed before sending
12844 		 * from NIC, so do not count it.
12845 		 */
12846 		if (*nexthdr == NEXTHDR_HOP) {
12847 			if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
12848 				goto increment_hdr;
12849 
12850 			jhdr = (struct hop_jumbo_hdr *)hp;
12851 			if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
12852 			    jhdr->nexthdr != IPPROTO_TCP)
12853 				goto increment_hdr;
12854 
12855 			goto next_hdr;
12856 		}
12857 increment_hdr:
12858 		hdr_count++;
12859 next_hdr:
12860 		nexthdr = &hp->nexthdr;
12861 		start += hdrlen;
12862 	}
12863 	if (nextp) {
12864 		/* Caller will check inner protocol */
12865 		if (skb->encapsulation) {
12866 			*nextp = nexthdr;
12867 			return true;
12868 		}
12869 		*nextp = NULL;
12870 	}
12871 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
12872 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
12873 }
12874 
12875 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
12876 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
12877 {
12878 	struct udphdr *uh = udp_hdr(skb);
12879 	__be16 udp_port = uh->dest;
12880 
12881 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12882 	    udp_port != bp->vxlan_gpe_port)
12883 		return false;
12884 	if (skb->inner_protocol == htons(ETH_P_TEB)) {
12885 		struct ethhdr *eh = inner_eth_hdr(skb);
12886 
12887 		switch (eh->h_proto) {
12888 		case htons(ETH_P_IP):
12889 			return true;
12890 		case htons(ETH_P_IPV6):
12891 			return bnxt_exthdr_check(bp, skb,
12892 						 skb_inner_network_offset(skb),
12893 						 NULL);
12894 		}
12895 	} else if (skb->inner_protocol == htons(ETH_P_IP)) {
12896 		return true;
12897 	} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
12898 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12899 					 NULL);
12900 	}
12901 	return false;
12902 }
12903 
12904 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
12905 {
12906 	switch (l4_proto) {
12907 	case IPPROTO_UDP:
12908 		return bnxt_udp_tunl_check(bp, skb);
12909 	case IPPROTO_IPIP:
12910 		return true;
12911 	case IPPROTO_GRE: {
12912 		switch (skb->inner_protocol) {
12913 		default:
12914 			return false;
12915 		case htons(ETH_P_IP):
12916 			return true;
12917 		case htons(ETH_P_IPV6):
12918 			fallthrough;
12919 		}
12920 	}
12921 	case IPPROTO_IPV6:
12922 		/* Check ext headers of inner ipv6 */
12923 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12924 					 NULL);
12925 	}
12926 	return false;
12927 }
12928 
12929 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
12930 					     struct net_device *dev,
12931 					     netdev_features_t features)
12932 {
12933 	struct bnxt *bp = netdev_priv(dev);
12934 	u8 *l4_proto;
12935 
12936 	features = vlan_features_check(skb, features);
12937 	switch (vlan_get_protocol(skb)) {
12938 	case htons(ETH_P_IP):
12939 		if (!skb->encapsulation)
12940 			return features;
12941 		l4_proto = &ip_hdr(skb)->protocol;
12942 		if (bnxt_tunl_check(bp, skb, *l4_proto))
12943 			return features;
12944 		break;
12945 	case htons(ETH_P_IPV6):
12946 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
12947 				       &l4_proto))
12948 			break;
12949 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
12950 			return features;
12951 		break;
12952 	}
12953 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12954 }
12955 
12956 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
12957 			 u32 *reg_buf)
12958 {
12959 	struct hwrm_dbg_read_direct_output *resp;
12960 	struct hwrm_dbg_read_direct_input *req;
12961 	__le32 *dbg_reg_buf;
12962 	dma_addr_t mapping;
12963 	int rc, i;
12964 
12965 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12966 	if (rc)
12967 		return rc;
12968 
12969 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
12970 					 &mapping);
12971 	if (!dbg_reg_buf) {
12972 		rc = -ENOMEM;
12973 		goto dbg_rd_reg_exit;
12974 	}
12975 
12976 	req->host_dest_addr = cpu_to_le64(mapping);
12977 
12978 	resp = hwrm_req_hold(bp, req);
12979 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
12980 	req->read_len32 = cpu_to_le32(num_words);
12981 
12982 	rc = hwrm_req_send(bp, req);
12983 	if (rc || resp->error_code) {
12984 		rc = -EIO;
12985 		goto dbg_rd_reg_exit;
12986 	}
12987 	for (i = 0; i < num_words; i++)
12988 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
12989 
12990 dbg_rd_reg_exit:
12991 	hwrm_req_drop(bp, req);
12992 	return rc;
12993 }
12994 
12995 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
12996 				       u32 ring_id, u32 *prod, u32 *cons)
12997 {
12998 	struct hwrm_dbg_ring_info_get_output *resp;
12999 	struct hwrm_dbg_ring_info_get_input *req;
13000 	int rc;
13001 
13002 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13003 	if (rc)
13004 		return rc;
13005 
13006 	req->ring_type = ring_type;
13007 	req->fw_ring_id = cpu_to_le32(ring_id);
13008 	resp = hwrm_req_hold(bp, req);
13009 	rc = hwrm_req_send(bp, req);
13010 	if (!rc) {
13011 		*prod = le32_to_cpu(resp->producer_index);
13012 		*cons = le32_to_cpu(resp->consumer_index);
13013 	}
13014 	hwrm_req_drop(bp, req);
13015 	return rc;
13016 }
13017 
13018 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13019 {
13020 	struct bnxt_tx_ring_info *txr;
13021 	int i = bnapi->index, j;
13022 
13023 	bnxt_for_each_napi_tx(j, bnapi, txr)
13024 		netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13025 			    i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13026 			    txr->tx_cons);
13027 }
13028 
13029 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13030 {
13031 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13032 	int i = bnapi->index;
13033 
13034 	if (!rxr)
13035 		return;
13036 
13037 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13038 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13039 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13040 		    rxr->rx_sw_agg_prod);
13041 }
13042 
13043 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13044 {
13045 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13046 	int i = bnapi->index;
13047 
13048 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13049 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13050 }
13051 
13052 static void bnxt_dbg_dump_states(struct bnxt *bp)
13053 {
13054 	int i;
13055 	struct bnxt_napi *bnapi;
13056 
13057 	for (i = 0; i < bp->cp_nr_rings; i++) {
13058 		bnapi = bp->bnapi[i];
13059 		if (netif_msg_drv(bp)) {
13060 			bnxt_dump_tx_sw_state(bnapi);
13061 			bnxt_dump_rx_sw_state(bnapi);
13062 			bnxt_dump_cp_sw_state(bnapi);
13063 		}
13064 	}
13065 }
13066 
13067 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13068 {
13069 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13070 	struct hwrm_ring_reset_input *req;
13071 	struct bnxt_napi *bnapi = rxr->bnapi;
13072 	struct bnxt_cp_ring_info *cpr;
13073 	u16 cp_ring_id;
13074 	int rc;
13075 
13076 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
13077 	if (rc)
13078 		return rc;
13079 
13080 	cpr = &bnapi->cp_ring;
13081 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13082 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
13083 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
13084 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
13085 	return hwrm_req_send_silent(bp, req);
13086 }
13087 
13088 static void bnxt_reset_task(struct bnxt *bp, bool silent)
13089 {
13090 	if (!silent)
13091 		bnxt_dbg_dump_states(bp);
13092 	if (netif_running(bp->dev)) {
13093 		bnxt_close_nic(bp, !silent, false);
13094 		bnxt_open_nic(bp, !silent, false);
13095 	}
13096 }
13097 
13098 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
13099 {
13100 	struct bnxt *bp = netdev_priv(dev);
13101 
13102 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
13103 	bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
13104 }
13105 
13106 static void bnxt_fw_health_check(struct bnxt *bp)
13107 {
13108 	struct bnxt_fw_health *fw_health = bp->fw_health;
13109 	struct pci_dev *pdev = bp->pdev;
13110 	u32 val;
13111 
13112 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13113 		return;
13114 
13115 	/* Make sure it is enabled before checking the tmr_counter. */
13116 	smp_rmb();
13117 	if (fw_health->tmr_counter) {
13118 		fw_health->tmr_counter--;
13119 		return;
13120 	}
13121 
13122 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13123 	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
13124 		fw_health->arrests++;
13125 		goto fw_reset;
13126 	}
13127 
13128 	fw_health->last_fw_heartbeat = val;
13129 
13130 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13131 	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
13132 		fw_health->discoveries++;
13133 		goto fw_reset;
13134 	}
13135 
13136 	fw_health->tmr_counter = fw_health->tmr_multiplier;
13137 	return;
13138 
13139 fw_reset:
13140 	bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
13141 }
13142 
13143 static void bnxt_timer(struct timer_list *t)
13144 {
13145 	struct bnxt *bp = from_timer(bp, t, timer);
13146 	struct net_device *dev = bp->dev;
13147 
13148 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
13149 		return;
13150 
13151 	if (atomic_read(&bp->intr_sem) != 0)
13152 		goto bnxt_restart_timer;
13153 
13154 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
13155 		bnxt_fw_health_check(bp);
13156 
13157 	if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
13158 		bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
13159 
13160 	if (bnxt_tc_flower_enabled(bp))
13161 		bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
13162 
13163 #ifdef CONFIG_RFS_ACCEL
13164 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
13165 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13166 #endif /*CONFIG_RFS_ACCEL*/
13167 
13168 	if (bp->link_info.phy_retry) {
13169 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
13170 			bp->link_info.phy_retry = false;
13171 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
13172 		} else {
13173 			bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
13174 		}
13175 	}
13176 
13177 	if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13178 		bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13179 
13180 	if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
13181 		bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
13182 
13183 bnxt_restart_timer:
13184 	mod_timer(&bp->timer, jiffies + bp->current_interval);
13185 }
13186 
13187 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
13188 {
13189 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
13190 	 * set.  If the device is being closed, bnxt_close() may be holding
13191 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
13192 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
13193 	 */
13194 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13195 	rtnl_lock();
13196 }
13197 
13198 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
13199 {
13200 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13201 	rtnl_unlock();
13202 }
13203 
13204 /* Only called from bnxt_sp_task() */
13205 static void bnxt_reset(struct bnxt *bp, bool silent)
13206 {
13207 	bnxt_rtnl_lock_sp(bp);
13208 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
13209 		bnxt_reset_task(bp, silent);
13210 	bnxt_rtnl_unlock_sp(bp);
13211 }
13212 
13213 /* Only called from bnxt_sp_task() */
13214 static void bnxt_rx_ring_reset(struct bnxt *bp)
13215 {
13216 	int i;
13217 
13218 	bnxt_rtnl_lock_sp(bp);
13219 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13220 		bnxt_rtnl_unlock_sp(bp);
13221 		return;
13222 	}
13223 	/* Disable and flush TPA before resetting the RX ring */
13224 	if (bp->flags & BNXT_FLAG_TPA)
13225 		bnxt_set_tpa(bp, false);
13226 	for (i = 0; i < bp->rx_nr_rings; i++) {
13227 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13228 		struct bnxt_cp_ring_info *cpr;
13229 		int rc;
13230 
13231 		if (!rxr->bnapi->in_reset)
13232 			continue;
13233 
13234 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
13235 		if (rc) {
13236 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
13237 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13238 			else
13239 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13240 					    rc);
13241 			bnxt_reset_task(bp, true);
13242 			break;
13243 		}
13244 		bnxt_free_one_rx_ring_skbs(bp, i);
13245 		rxr->rx_prod = 0;
13246 		rxr->rx_agg_prod = 0;
13247 		rxr->rx_sw_agg_prod = 0;
13248 		rxr->rx_next_cons = 0;
13249 		rxr->bnapi->in_reset = false;
13250 		bnxt_alloc_one_rx_ring(bp, i);
13251 		cpr = &rxr->bnapi->cp_ring;
13252 		cpr->sw_stats->rx.rx_resets++;
13253 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
13254 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13255 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13256 	}
13257 	if (bp->flags & BNXT_FLAG_TPA)
13258 		bnxt_set_tpa(bp, true);
13259 	bnxt_rtnl_unlock_sp(bp);
13260 }
13261 
13262 static void bnxt_fw_fatal_close(struct bnxt *bp)
13263 {
13264 	bnxt_tx_disable(bp);
13265 	bnxt_disable_napi(bp);
13266 	bnxt_disable_int_sync(bp);
13267 	bnxt_free_irq(bp);
13268 	bnxt_clear_int_mode(bp);
13269 	pci_disable_device(bp->pdev);
13270 }
13271 
13272 static void bnxt_fw_reset_close(struct bnxt *bp)
13273 {
13274 	/* When firmware is in fatal state, quiesce device and disable
13275 	 * bus master to prevent any potential bad DMAs before freeing
13276 	 * kernel memory.
13277 	 */
13278 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13279 		u16 val = 0;
13280 
13281 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13282 		if (val == 0xffff)
13283 			bp->fw_reset_min_dsecs = 0;
13284 		bnxt_fw_fatal_close(bp);
13285 	}
13286 	__bnxt_close_nic(bp, true, false);
13287 	bnxt_vf_reps_free(bp);
13288 	bnxt_clear_int_mode(bp);
13289 	bnxt_hwrm_func_drv_unrgtr(bp);
13290 	if (pci_is_enabled(bp->pdev))
13291 		pci_disable_device(bp->pdev);
13292 	bnxt_free_ctx_mem(bp);
13293 }
13294 
13295 static bool is_bnxt_fw_ok(struct bnxt *bp)
13296 {
13297 	struct bnxt_fw_health *fw_health = bp->fw_health;
13298 	bool no_heartbeat = false, has_reset = false;
13299 	u32 val;
13300 
13301 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13302 	if (val == fw_health->last_fw_heartbeat)
13303 		no_heartbeat = true;
13304 
13305 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13306 	if (val != fw_health->last_fw_reset_cnt)
13307 		has_reset = true;
13308 
13309 	if (!no_heartbeat && has_reset)
13310 		return true;
13311 
13312 	return false;
13313 }
13314 
13315 /* rtnl_lock is acquired before calling this function */
13316 static void bnxt_force_fw_reset(struct bnxt *bp)
13317 {
13318 	struct bnxt_fw_health *fw_health = bp->fw_health;
13319 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13320 	u32 wait_dsecs;
13321 
13322 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13323 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13324 		return;
13325 
13326 	if (ptp) {
13327 		spin_lock_bh(&ptp->ptp_lock);
13328 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13329 		spin_unlock_bh(&ptp->ptp_lock);
13330 	} else {
13331 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13332 	}
13333 	bnxt_fw_reset_close(bp);
13334 	wait_dsecs = fw_health->master_func_wait_dsecs;
13335 	if (fw_health->primary) {
13336 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13337 			wait_dsecs = 0;
13338 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13339 	} else {
13340 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13341 		wait_dsecs = fw_health->normal_func_wait_dsecs;
13342 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13343 	}
13344 
13345 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13346 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13347 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13348 }
13349 
13350 void bnxt_fw_exception(struct bnxt *bp)
13351 {
13352 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13353 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13354 	bnxt_ulp_stop(bp);
13355 	bnxt_rtnl_lock_sp(bp);
13356 	bnxt_force_fw_reset(bp);
13357 	bnxt_rtnl_unlock_sp(bp);
13358 }
13359 
13360 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13361  * < 0 on error.
13362  */
13363 static int bnxt_get_registered_vfs(struct bnxt *bp)
13364 {
13365 #ifdef CONFIG_BNXT_SRIOV
13366 	int rc;
13367 
13368 	if (!BNXT_PF(bp))
13369 		return 0;
13370 
13371 	rc = bnxt_hwrm_func_qcfg(bp);
13372 	if (rc) {
13373 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13374 		return rc;
13375 	}
13376 	if (bp->pf.registered_vfs)
13377 		return bp->pf.registered_vfs;
13378 	if (bp->sriov_cfg)
13379 		return 1;
13380 #endif
13381 	return 0;
13382 }
13383 
13384 void bnxt_fw_reset(struct bnxt *bp)
13385 {
13386 	bnxt_ulp_stop(bp);
13387 	bnxt_rtnl_lock_sp(bp);
13388 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13389 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13390 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13391 		int n = 0, tmo;
13392 
13393 		if (ptp) {
13394 			spin_lock_bh(&ptp->ptp_lock);
13395 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13396 			spin_unlock_bh(&ptp->ptp_lock);
13397 		} else {
13398 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13399 		}
13400 		if (bp->pf.active_vfs &&
13401 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13402 			n = bnxt_get_registered_vfs(bp);
13403 		if (n < 0) {
13404 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13405 				   n);
13406 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13407 			dev_close(bp->dev);
13408 			goto fw_reset_exit;
13409 		} else if (n > 0) {
13410 			u16 vf_tmo_dsecs = n * 10;
13411 
13412 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13413 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13414 			bp->fw_reset_state =
13415 				BNXT_FW_RESET_STATE_POLL_VF;
13416 			bnxt_queue_fw_reset_work(bp, HZ / 10);
13417 			goto fw_reset_exit;
13418 		}
13419 		bnxt_fw_reset_close(bp);
13420 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13421 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13422 			tmo = HZ / 10;
13423 		} else {
13424 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13425 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
13426 		}
13427 		bnxt_queue_fw_reset_work(bp, tmo);
13428 	}
13429 fw_reset_exit:
13430 	bnxt_rtnl_unlock_sp(bp);
13431 }
13432 
13433 static void bnxt_chk_missed_irq(struct bnxt *bp)
13434 {
13435 	int i;
13436 
13437 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13438 		return;
13439 
13440 	for (i = 0; i < bp->cp_nr_rings; i++) {
13441 		struct bnxt_napi *bnapi = bp->bnapi[i];
13442 		struct bnxt_cp_ring_info *cpr;
13443 		u32 fw_ring_id;
13444 		int j;
13445 
13446 		if (!bnapi)
13447 			continue;
13448 
13449 		cpr = &bnapi->cp_ring;
13450 		for (j = 0; j < cpr->cp_ring_count; j++) {
13451 			struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
13452 			u32 val[2];
13453 
13454 			if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
13455 				continue;
13456 
13457 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
13458 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
13459 				continue;
13460 			}
13461 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
13462 			bnxt_dbg_hwrm_ring_info_get(bp,
13463 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
13464 				fw_ring_id, &val[0], &val[1]);
13465 			cpr->sw_stats->cmn.missed_irqs++;
13466 		}
13467 	}
13468 }
13469 
13470 static void bnxt_cfg_ntp_filters(struct bnxt *);
13471 
13472 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
13473 {
13474 	struct bnxt_link_info *link_info = &bp->link_info;
13475 
13476 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
13477 		link_info->autoneg = BNXT_AUTONEG_SPEED;
13478 		if (bp->hwrm_spec_code >= 0x10201) {
13479 			if (link_info->auto_pause_setting &
13480 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
13481 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13482 		} else {
13483 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13484 		}
13485 		bnxt_set_auto_speed(link_info);
13486 	} else {
13487 		bnxt_set_force_speed(link_info);
13488 		link_info->req_duplex = link_info->duplex_setting;
13489 	}
13490 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
13491 		link_info->req_flow_ctrl =
13492 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
13493 	else
13494 		link_info->req_flow_ctrl = link_info->force_pause_setting;
13495 }
13496 
13497 static void bnxt_fw_echo_reply(struct bnxt *bp)
13498 {
13499 	struct bnxt_fw_health *fw_health = bp->fw_health;
13500 	struct hwrm_func_echo_response_input *req;
13501 	int rc;
13502 
13503 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13504 	if (rc)
13505 		return;
13506 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
13507 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
13508 	hwrm_req_send(bp, req);
13509 }
13510 
13511 static void bnxt_ulp_restart(struct bnxt *bp)
13512 {
13513 	bnxt_ulp_stop(bp);
13514 	bnxt_ulp_start(bp, 0);
13515 }
13516 
13517 static void bnxt_sp_task(struct work_struct *work)
13518 {
13519 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13520 
13521 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13522 	smp_mb__after_atomic();
13523 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13524 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13525 		return;
13526 	}
13527 
13528 	if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
13529 		bnxt_ulp_restart(bp);
13530 		bnxt_reenable_sriov(bp);
13531 	}
13532 
13533 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13534 		bnxt_cfg_rx_mode(bp);
13535 
13536 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13537 		bnxt_cfg_ntp_filters(bp);
13538 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13539 		bnxt_hwrm_exec_fwd_req(bp);
13540 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13541 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
13542 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13543 		bnxt_hwrm_port_qstats(bp, 0);
13544 		bnxt_hwrm_port_qstats_ext(bp, 0);
13545 		bnxt_accumulate_all_stats(bp);
13546 	}
13547 
13548 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13549 		int rc;
13550 
13551 		mutex_lock(&bp->link_lock);
13552 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
13553 				       &bp->sp_event))
13554 			bnxt_hwrm_phy_qcaps(bp);
13555 
13556 		rc = bnxt_update_link(bp, true);
13557 		if (rc)
13558 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13559 				   rc);
13560 
13561 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
13562 				       &bp->sp_event))
13563 			bnxt_init_ethtool_link_settings(bp);
13564 		mutex_unlock(&bp->link_lock);
13565 	}
13566 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13567 		int rc;
13568 
13569 		mutex_lock(&bp->link_lock);
13570 		rc = bnxt_update_phy_setting(bp);
13571 		mutex_unlock(&bp->link_lock);
13572 		if (rc) {
13573 			netdev_warn(bp->dev, "update phy settings retry failed\n");
13574 		} else {
13575 			bp->link_info.phy_retry = false;
13576 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
13577 		}
13578 	}
13579 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13580 		mutex_lock(&bp->link_lock);
13581 		bnxt_get_port_module_status(bp);
13582 		mutex_unlock(&bp->link_lock);
13583 	}
13584 
13585 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13586 		bnxt_tc_flow_stats_work(bp);
13587 
13588 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13589 		bnxt_chk_missed_irq(bp);
13590 
13591 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13592 		bnxt_fw_echo_reply(bp);
13593 
13594 	if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13595 		bnxt_hwmon_notify_event(bp);
13596 
13597 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
13598 	 * must be the last functions to be called before exiting.
13599 	 */
13600 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13601 		bnxt_reset(bp, false);
13602 
13603 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13604 		bnxt_reset(bp, true);
13605 
13606 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13607 		bnxt_rx_ring_reset(bp);
13608 
13609 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13610 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13611 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13612 			bnxt_devlink_health_fw_report(bp);
13613 		else
13614 			bnxt_fw_reset(bp);
13615 	}
13616 
13617 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13618 		if (!is_bnxt_fw_ok(bp))
13619 			bnxt_devlink_health_fw_report(bp);
13620 	}
13621 
13622 	smp_mb__before_atomic();
13623 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13624 }
13625 
13626 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13627 				int *max_cp);
13628 
13629 /* Under rtnl_lock */
13630 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13631 		     int tx_xdp)
13632 {
13633 	int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
13634 	struct bnxt_hw_rings hwr = {0};
13635 	int rx_rings = rx;
13636 
13637 	if (tcs)
13638 		tx_sets = tcs;
13639 
13640 	_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13641 
13642 	if (max_rx < rx_rings)
13643 		return -ENOMEM;
13644 
13645 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
13646 		rx_rings <<= 1;
13647 
13648 	hwr.rx = rx_rings;
13649 	hwr.tx = tx * tx_sets + tx_xdp;
13650 	if (max_tx < hwr.tx)
13651 		return -ENOMEM;
13652 
13653 	hwr.vnic = bnxt_get_total_vnics(bp, rx);
13654 
13655 	tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
13656 	hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
13657 	if (max_cp < hwr.cp)
13658 		return -ENOMEM;
13659 	hwr.stat = hwr.cp;
13660 	if (BNXT_NEW_RM(bp)) {
13661 		hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
13662 		hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
13663 		hwr.grp = rx;
13664 		hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13665 	}
13666 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
13667 		hwr.cp_p5 = hwr.tx + rx;
13668 	return bnxt_hwrm_check_rings(bp, &hwr);
13669 }
13670 
13671 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13672 {
13673 	if (bp->bar2) {
13674 		pci_iounmap(pdev, bp->bar2);
13675 		bp->bar2 = NULL;
13676 	}
13677 
13678 	if (bp->bar1) {
13679 		pci_iounmap(pdev, bp->bar1);
13680 		bp->bar1 = NULL;
13681 	}
13682 
13683 	if (bp->bar0) {
13684 		pci_iounmap(pdev, bp->bar0);
13685 		bp->bar0 = NULL;
13686 	}
13687 }
13688 
13689 static void bnxt_cleanup_pci(struct bnxt *bp)
13690 {
13691 	bnxt_unmap_bars(bp, bp->pdev);
13692 	pci_release_regions(bp->pdev);
13693 	if (pci_is_enabled(bp->pdev))
13694 		pci_disable_device(bp->pdev);
13695 }
13696 
13697 static void bnxt_init_dflt_coal(struct bnxt *bp)
13698 {
13699 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13700 	struct bnxt_coal *coal;
13701 	u16 flags = 0;
13702 
13703 	if (coal_cap->cmpl_params &
13704 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
13705 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
13706 
13707 	/* Tick values in micro seconds.
13708 	 * 1 coal_buf x bufs_per_record = 1 completion record.
13709 	 */
13710 	coal = &bp->rx_coal;
13711 	coal->coal_ticks = 10;
13712 	coal->coal_bufs = 30;
13713 	coal->coal_ticks_irq = 1;
13714 	coal->coal_bufs_irq = 2;
13715 	coal->idle_thresh = 50;
13716 	coal->bufs_per_record = 2;
13717 	coal->budget = 64;		/* NAPI budget */
13718 	coal->flags = flags;
13719 
13720 	coal = &bp->tx_coal;
13721 	coal->coal_ticks = 28;
13722 	coal->coal_bufs = 30;
13723 	coal->coal_ticks_irq = 2;
13724 	coal->coal_bufs_irq = 2;
13725 	coal->bufs_per_record = 1;
13726 	coal->flags = flags;
13727 
13728 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13729 }
13730 
13731 /* FW that pre-reserves 1 VNIC per function */
13732 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13733 {
13734 	u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13735 
13736 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13737 	    (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
13738 		return true;
13739 	if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13740 	    (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
13741 		return true;
13742 	return false;
13743 }
13744 
13745 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13746 {
13747 	int rc;
13748 
13749 	bp->fw_cap = 0;
13750 	rc = bnxt_hwrm_ver_get(bp);
13751 	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
13752 	 * so wait before continuing with recovery.
13753 	 */
13754 	if (rc)
13755 		msleep(100);
13756 	bnxt_try_map_fw_health_reg(bp);
13757 	if (rc) {
13758 		rc = bnxt_try_recover_fw(bp);
13759 		if (rc)
13760 			return rc;
13761 		rc = bnxt_hwrm_ver_get(bp);
13762 		if (rc)
13763 			return rc;
13764 	}
13765 
13766 	bnxt_nvm_cfg_ver_get(bp);
13767 
13768 	rc = bnxt_hwrm_func_reset(bp);
13769 	if (rc)
13770 		return -ENODEV;
13771 
13772 	bnxt_hwrm_fw_set_time(bp);
13773 	return 0;
13774 }
13775 
13776 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13777 {
13778 	int rc;
13779 
13780 	/* Get the MAX capabilities for this function */
13781 	rc = bnxt_hwrm_func_qcaps(bp);
13782 	if (rc) {
13783 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13784 			   rc);
13785 		return -ENODEV;
13786 	}
13787 
13788 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13789 	if (rc)
13790 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13791 			    rc);
13792 
13793 	if (bnxt_alloc_fw_health(bp)) {
13794 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13795 	} else {
13796 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
13797 		if (rc)
13798 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13799 				    rc);
13800 	}
13801 
13802 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13803 	if (rc)
13804 		return -ENODEV;
13805 
13806 	if (bnxt_fw_pre_resv_vnics(bp))
13807 		bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
13808 
13809 	bnxt_hwrm_func_qcfg(bp);
13810 	bnxt_hwrm_vnic_qcaps(bp);
13811 	bnxt_hwrm_port_led_qcaps(bp);
13812 	bnxt_ethtool_init(bp);
13813 	if (bp->fw_cap & BNXT_FW_CAP_PTP)
13814 		__bnxt_hwrm_ptp_qcfg(bp);
13815 	bnxt_dcb_init(bp);
13816 	bnxt_hwmon_init(bp);
13817 	return 0;
13818 }
13819 
13820 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
13821 {
13822 	bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
13823 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
13824 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
13825 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
13826 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
13827 	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
13828 		bp->rss_hash_delta = bp->rss_hash_cfg;
13829 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
13830 		bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
13831 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
13832 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
13833 	}
13834 }
13835 
13836 static void bnxt_set_dflt_rfs(struct bnxt *bp)
13837 {
13838 	struct net_device *dev = bp->dev;
13839 
13840 	dev->hw_features &= ~NETIF_F_NTUPLE;
13841 	dev->features &= ~NETIF_F_NTUPLE;
13842 	bp->flags &= ~BNXT_FLAG_RFS;
13843 	if (bnxt_rfs_supported(bp)) {
13844 		dev->hw_features |= NETIF_F_NTUPLE;
13845 		if (bnxt_rfs_capable(bp, false)) {
13846 			bp->flags |= BNXT_FLAG_RFS;
13847 			dev->features |= NETIF_F_NTUPLE;
13848 		}
13849 	}
13850 }
13851 
13852 static void bnxt_fw_init_one_p3(struct bnxt *bp)
13853 {
13854 	struct pci_dev *pdev = bp->pdev;
13855 
13856 	bnxt_set_dflt_rss_hash_type(bp);
13857 	bnxt_set_dflt_rfs(bp);
13858 
13859 	bnxt_get_wol_settings(bp);
13860 	if (bp->flags & BNXT_FLAG_WOL_CAP)
13861 		device_set_wakeup_enable(&pdev->dev, bp->wol);
13862 	else
13863 		device_set_wakeup_capable(&pdev->dev, false);
13864 
13865 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
13866 	bnxt_hwrm_coal_params_qcaps(bp);
13867 }
13868 
13869 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
13870 
13871 int bnxt_fw_init_one(struct bnxt *bp)
13872 {
13873 	int rc;
13874 
13875 	rc = bnxt_fw_init_one_p1(bp);
13876 	if (rc) {
13877 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
13878 		return rc;
13879 	}
13880 	rc = bnxt_fw_init_one_p2(bp);
13881 	if (rc) {
13882 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
13883 		return rc;
13884 	}
13885 	rc = bnxt_probe_phy(bp, false);
13886 	if (rc)
13887 		return rc;
13888 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13889 	if (rc)
13890 		return rc;
13891 
13892 	bnxt_fw_init_one_p3(bp);
13893 	return 0;
13894 }
13895 
13896 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
13897 {
13898 	struct bnxt_fw_health *fw_health = bp->fw_health;
13899 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
13900 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
13901 	u32 reg_type, reg_off, delay_msecs;
13902 
13903 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
13904 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
13905 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
13906 	switch (reg_type) {
13907 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
13908 		pci_write_config_dword(bp->pdev, reg_off, val);
13909 		break;
13910 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
13911 		writel(reg_off & BNXT_GRC_BASE_MASK,
13912 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
13913 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
13914 		fallthrough;
13915 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
13916 		writel(val, bp->bar0 + reg_off);
13917 		break;
13918 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
13919 		writel(val, bp->bar1 + reg_off);
13920 		break;
13921 	}
13922 	if (delay_msecs) {
13923 		pci_read_config_dword(bp->pdev, 0, &val);
13924 		msleep(delay_msecs);
13925 	}
13926 }
13927 
13928 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
13929 {
13930 	struct hwrm_func_qcfg_output *resp;
13931 	struct hwrm_func_qcfg_input *req;
13932 	bool result = true; /* firmware will enforce if unknown */
13933 
13934 	if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
13935 		return result;
13936 
13937 	if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
13938 		return result;
13939 
13940 	req->fid = cpu_to_le16(0xffff);
13941 	resp = hwrm_req_hold(bp, req);
13942 	if (!hwrm_req_send(bp, req))
13943 		result = !!(le16_to_cpu(resp->flags) &
13944 			    FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
13945 	hwrm_req_drop(bp, req);
13946 	return result;
13947 }
13948 
13949 static void bnxt_reset_all(struct bnxt *bp)
13950 {
13951 	struct bnxt_fw_health *fw_health = bp->fw_health;
13952 	int i, rc;
13953 
13954 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13955 		bnxt_fw_reset_via_optee(bp);
13956 		bp->fw_reset_timestamp = jiffies;
13957 		return;
13958 	}
13959 
13960 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
13961 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
13962 			bnxt_fw_reset_writel(bp, i);
13963 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
13964 		struct hwrm_fw_reset_input *req;
13965 
13966 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13967 		if (!rc) {
13968 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
13969 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
13970 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
13971 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
13972 			rc = hwrm_req_send(bp, req);
13973 		}
13974 		if (rc != -ENODEV)
13975 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13976 	}
13977 	bp->fw_reset_timestamp = jiffies;
13978 }
13979 
13980 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
13981 {
13982 	return time_after(jiffies, bp->fw_reset_timestamp +
13983 			  (bp->fw_reset_max_dsecs * HZ / 10));
13984 }
13985 
13986 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13987 {
13988 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13989 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
13990 		bnxt_dl_health_fw_status_update(bp, false);
13991 	bp->fw_reset_state = 0;
13992 	dev_close(bp->dev);
13993 }
13994 
13995 static void bnxt_fw_reset_task(struct work_struct *work)
13996 {
13997 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
13998 	int rc = 0;
13999 
14000 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14001 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
14002 		return;
14003 	}
14004 
14005 	switch (bp->fw_reset_state) {
14006 	case BNXT_FW_RESET_STATE_POLL_VF: {
14007 		int n = bnxt_get_registered_vfs(bp);
14008 		int tmo;
14009 
14010 		if (n < 0) {
14011 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14012 				   n, jiffies_to_msecs(jiffies -
14013 				   bp->fw_reset_timestamp));
14014 			goto fw_reset_abort;
14015 		} else if (n > 0) {
14016 			if (bnxt_fw_reset_timeout(bp)) {
14017 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14018 				bp->fw_reset_state = 0;
14019 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14020 					   n);
14021 				goto ulp_start;
14022 			}
14023 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14024 			return;
14025 		}
14026 		bp->fw_reset_timestamp = jiffies;
14027 		rtnl_lock();
14028 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14029 			bnxt_fw_reset_abort(bp, rc);
14030 			rtnl_unlock();
14031 			goto ulp_start;
14032 		}
14033 		bnxt_fw_reset_close(bp);
14034 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14035 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14036 			tmo = HZ / 10;
14037 		} else {
14038 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14039 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
14040 		}
14041 		rtnl_unlock();
14042 		bnxt_queue_fw_reset_work(bp, tmo);
14043 		return;
14044 	}
14045 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
14046 		u32 val;
14047 
14048 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14049 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
14050 		    !bnxt_fw_reset_timeout(bp)) {
14051 			bnxt_queue_fw_reset_work(bp, HZ / 5);
14052 			return;
14053 		}
14054 
14055 		if (!bp->fw_health->primary) {
14056 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
14057 
14058 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14059 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14060 			return;
14061 		}
14062 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14063 	}
14064 		fallthrough;
14065 	case BNXT_FW_RESET_STATE_RESET_FW:
14066 		bnxt_reset_all(bp);
14067 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14068 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
14069 		return;
14070 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
14071 		bnxt_inv_fw_health_reg(bp);
14072 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
14073 		    !bp->fw_reset_min_dsecs) {
14074 			u16 val;
14075 
14076 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14077 			if (val == 0xffff) {
14078 				if (bnxt_fw_reset_timeout(bp)) {
14079 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
14080 					rc = -ETIMEDOUT;
14081 					goto fw_reset_abort;
14082 				}
14083 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
14084 				return;
14085 			}
14086 		}
14087 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14088 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
14089 		if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
14090 		    !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
14091 			bnxt_dl_remote_reload(bp);
14092 		if (pci_enable_device(bp->pdev)) {
14093 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
14094 			rc = -ENODEV;
14095 			goto fw_reset_abort;
14096 		}
14097 		pci_set_master(bp->pdev);
14098 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
14099 		fallthrough;
14100 	case BNXT_FW_RESET_STATE_POLL_FW:
14101 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
14102 		rc = bnxt_hwrm_poll(bp);
14103 		if (rc) {
14104 			if (bnxt_fw_reset_timeout(bp)) {
14105 				netdev_err(bp->dev, "Firmware reset aborted\n");
14106 				goto fw_reset_abort_status;
14107 			}
14108 			bnxt_queue_fw_reset_work(bp, HZ / 5);
14109 			return;
14110 		}
14111 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
14112 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
14113 		fallthrough;
14114 	case BNXT_FW_RESET_STATE_OPENING:
14115 		while (!rtnl_trylock()) {
14116 			bnxt_queue_fw_reset_work(bp, HZ / 10);
14117 			return;
14118 		}
14119 		rc = bnxt_open(bp->dev);
14120 		if (rc) {
14121 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
14122 			bnxt_fw_reset_abort(bp, rc);
14123 			rtnl_unlock();
14124 			goto ulp_start;
14125 		}
14126 
14127 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
14128 		    bp->fw_health->enabled) {
14129 			bp->fw_health->last_fw_reset_cnt =
14130 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14131 		}
14132 		bp->fw_reset_state = 0;
14133 		/* Make sure fw_reset_state is 0 before clearing the flag */
14134 		smp_mb__before_atomic();
14135 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14136 		bnxt_ptp_reapply_pps(bp);
14137 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
14138 		if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
14139 			bnxt_dl_health_fw_recovery_done(bp);
14140 			bnxt_dl_health_fw_status_update(bp, true);
14141 		}
14142 		rtnl_unlock();
14143 		bnxt_ulp_start(bp, 0);
14144 		bnxt_reenable_sriov(bp);
14145 		rtnl_lock();
14146 		bnxt_vf_reps_alloc(bp);
14147 		bnxt_vf_reps_open(bp);
14148 		rtnl_unlock();
14149 		break;
14150 	}
14151 	return;
14152 
14153 fw_reset_abort_status:
14154 	if (bp->fw_health->status_reliable ||
14155 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
14156 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14157 
14158 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
14159 	}
14160 fw_reset_abort:
14161 	rtnl_lock();
14162 	bnxt_fw_reset_abort(bp, rc);
14163 	rtnl_unlock();
14164 ulp_start:
14165 	bnxt_ulp_start(bp, rc);
14166 }
14167 
14168 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
14169 {
14170 	int rc;
14171 	struct bnxt *bp = netdev_priv(dev);
14172 
14173 	SET_NETDEV_DEV(dev, &pdev->dev);
14174 
14175 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
14176 	rc = pci_enable_device(pdev);
14177 	if (rc) {
14178 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14179 		goto init_err;
14180 	}
14181 
14182 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
14183 		dev_err(&pdev->dev,
14184 			"Cannot find PCI device base address, aborting\n");
14185 		rc = -ENODEV;
14186 		goto init_err_disable;
14187 	}
14188 
14189 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
14190 	if (rc) {
14191 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14192 		goto init_err_disable;
14193 	}
14194 
14195 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
14196 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
14197 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
14198 		rc = -EIO;
14199 		goto init_err_release;
14200 	}
14201 
14202 	pci_set_master(pdev);
14203 
14204 	bp->dev = dev;
14205 	bp->pdev = pdev;
14206 
14207 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
14208 	 * determines the BAR size.
14209 	 */
14210 	bp->bar0 = pci_ioremap_bar(pdev, 0);
14211 	if (!bp->bar0) {
14212 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14213 		rc = -ENOMEM;
14214 		goto init_err_release;
14215 	}
14216 
14217 	bp->bar2 = pci_ioremap_bar(pdev, 4);
14218 	if (!bp->bar2) {
14219 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
14220 		rc = -ENOMEM;
14221 		goto init_err_release;
14222 	}
14223 
14224 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
14225 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
14226 
14227 	spin_lock_init(&bp->ntp_fltr_lock);
14228 #if BITS_PER_LONG == 32
14229 	spin_lock_init(&bp->db_lock);
14230 #endif
14231 
14232 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
14233 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
14234 
14235 	timer_setup(&bp->timer, bnxt_timer, 0);
14236 	bp->current_interval = BNXT_TIMER_INTERVAL;
14237 
14238 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
14239 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14240 
14241 	clear_bit(BNXT_STATE_OPEN, &bp->state);
14242 	return 0;
14243 
14244 init_err_release:
14245 	bnxt_unmap_bars(bp, pdev);
14246 	pci_release_regions(pdev);
14247 
14248 init_err_disable:
14249 	pci_disable_device(pdev);
14250 
14251 init_err:
14252 	return rc;
14253 }
14254 
14255 /* rtnl_lock held */
14256 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14257 {
14258 	struct sockaddr *addr = p;
14259 	struct bnxt *bp = netdev_priv(dev);
14260 	int rc = 0;
14261 
14262 	if (!is_valid_ether_addr(addr->sa_data))
14263 		return -EADDRNOTAVAIL;
14264 
14265 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14266 		return 0;
14267 
14268 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
14269 	if (rc)
14270 		return rc;
14271 
14272 	eth_hw_addr_set(dev, addr->sa_data);
14273 	bnxt_clear_usr_fltrs(bp, true);
14274 	if (netif_running(dev)) {
14275 		bnxt_close_nic(bp, false, false);
14276 		rc = bnxt_open_nic(bp, false, false);
14277 	}
14278 
14279 	return rc;
14280 }
14281 
14282 /* rtnl_lock held */
14283 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14284 {
14285 	struct bnxt *bp = netdev_priv(dev);
14286 
14287 	if (netif_running(dev))
14288 		bnxt_close_nic(bp, true, false);
14289 
14290 	dev->mtu = new_mtu;
14291 	bnxt_set_ring_params(bp);
14292 
14293 	if (netif_running(dev))
14294 		return bnxt_open_nic(bp, true, false);
14295 
14296 	return 0;
14297 }
14298 
14299 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14300 {
14301 	struct bnxt *bp = netdev_priv(dev);
14302 	bool sh = false;
14303 	int rc, tx_cp;
14304 
14305 	if (tc > bp->max_tc) {
14306 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14307 			   tc, bp->max_tc);
14308 		return -EINVAL;
14309 	}
14310 
14311 	if (bp->num_tc == tc)
14312 		return 0;
14313 
14314 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14315 		sh = true;
14316 
14317 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14318 			      sh, tc, bp->tx_nr_rings_xdp);
14319 	if (rc)
14320 		return rc;
14321 
14322 	/* Needs to close the device and do hw resource re-allocations */
14323 	if (netif_running(bp->dev))
14324 		bnxt_close_nic(bp, true, false);
14325 
14326 	if (tc) {
14327 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14328 		netdev_set_num_tc(dev, tc);
14329 		bp->num_tc = tc;
14330 	} else {
14331 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14332 		netdev_reset_tc(dev);
14333 		bp->num_tc = 0;
14334 	}
14335 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14336 	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14337 	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14338 			       tx_cp + bp->rx_nr_rings;
14339 
14340 	if (netif_running(bp->dev))
14341 		return bnxt_open_nic(bp, true, false);
14342 
14343 	return 0;
14344 }
14345 
14346 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14347 				  void *cb_priv)
14348 {
14349 	struct bnxt *bp = cb_priv;
14350 
14351 	if (!bnxt_tc_flower_enabled(bp) ||
14352 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14353 		return -EOPNOTSUPP;
14354 
14355 	switch (type) {
14356 	case TC_SETUP_CLSFLOWER:
14357 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14358 	default:
14359 		return -EOPNOTSUPP;
14360 	}
14361 }
14362 
14363 LIST_HEAD(bnxt_block_cb_list);
14364 
14365 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14366 			 void *type_data)
14367 {
14368 	struct bnxt *bp = netdev_priv(dev);
14369 
14370 	switch (type) {
14371 	case TC_SETUP_BLOCK:
14372 		return flow_block_cb_setup_simple(type_data,
14373 						  &bnxt_block_cb_list,
14374 						  bnxt_setup_tc_block_cb,
14375 						  bp, bp, true);
14376 	case TC_SETUP_QDISC_MQPRIO: {
14377 		struct tc_mqprio_qopt *mqprio = type_data;
14378 
14379 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
14380 
14381 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
14382 	}
14383 	default:
14384 		return -EOPNOTSUPP;
14385 	}
14386 }
14387 
14388 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
14389 			    const struct sk_buff *skb)
14390 {
14391 	struct bnxt_vnic_info *vnic;
14392 
14393 	if (skb)
14394 		return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
14395 
14396 	vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
14397 	return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
14398 }
14399 
14400 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
14401 			   u32 idx)
14402 {
14403 	struct hlist_head *head;
14404 	int bit_id;
14405 
14406 	spin_lock_bh(&bp->ntp_fltr_lock);
14407 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
14408 	if (bit_id < 0) {
14409 		spin_unlock_bh(&bp->ntp_fltr_lock);
14410 		return -ENOMEM;
14411 	}
14412 
14413 	fltr->base.sw_id = (u16)bit_id;
14414 	fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
14415 	fltr->base.flags |= BNXT_ACT_RING_DST;
14416 	head = &bp->ntp_fltr_hash_tbl[idx];
14417 	hlist_add_head_rcu(&fltr->base.hash, head);
14418 	set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
14419 	bnxt_insert_usr_fltr(bp, &fltr->base);
14420 	bp->ntp_fltr_count++;
14421 	spin_unlock_bh(&bp->ntp_fltr_lock);
14422 	return 0;
14423 }
14424 
14425 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
14426 			    struct bnxt_ntuple_filter *f2)
14427 {
14428 	struct bnxt_flow_masks *masks1 = &f1->fmasks;
14429 	struct bnxt_flow_masks *masks2 = &f2->fmasks;
14430 	struct flow_keys *keys1 = &f1->fkeys;
14431 	struct flow_keys *keys2 = &f2->fkeys;
14432 
14433 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
14434 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
14435 		return false;
14436 
14437 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
14438 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
14439 		    masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
14440 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
14441 		    masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
14442 			return false;
14443 	} else {
14444 		if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
14445 				     &keys2->addrs.v6addrs.src) ||
14446 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
14447 				     &masks2->addrs.v6addrs.src) ||
14448 		    !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
14449 				     &keys2->addrs.v6addrs.dst) ||
14450 		    !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
14451 				     &masks2->addrs.v6addrs.dst))
14452 			return false;
14453 	}
14454 
14455 	return keys1->ports.src == keys2->ports.src &&
14456 	       masks1->ports.src == masks2->ports.src &&
14457 	       keys1->ports.dst == keys2->ports.dst &&
14458 	       masks1->ports.dst == masks2->ports.dst &&
14459 	       keys1->control.flags == keys2->control.flags &&
14460 	       f1->l2_fltr == f2->l2_fltr;
14461 }
14462 
14463 struct bnxt_ntuple_filter *
14464 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
14465 				struct bnxt_ntuple_filter *fltr, u32 idx)
14466 {
14467 	struct bnxt_ntuple_filter *f;
14468 	struct hlist_head *head;
14469 
14470 	head = &bp->ntp_fltr_hash_tbl[idx];
14471 	hlist_for_each_entry_rcu(f, head, base.hash) {
14472 		if (bnxt_fltr_match(f, fltr))
14473 			return f;
14474 	}
14475 	return NULL;
14476 }
14477 
14478 #ifdef CONFIG_RFS_ACCEL
14479 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
14480 			      u16 rxq_index, u32 flow_id)
14481 {
14482 	struct bnxt *bp = netdev_priv(dev);
14483 	struct bnxt_ntuple_filter *fltr, *new_fltr;
14484 	struct flow_keys *fkeys;
14485 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
14486 	struct bnxt_l2_filter *l2_fltr;
14487 	int rc = 0, idx;
14488 	u32 flags;
14489 
14490 	if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
14491 		l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
14492 		atomic_inc(&l2_fltr->refcnt);
14493 	} else {
14494 		struct bnxt_l2_key key;
14495 
14496 		ether_addr_copy(key.dst_mac_addr, eth->h_dest);
14497 		key.vlan = 0;
14498 		l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
14499 		if (!l2_fltr)
14500 			return -EINVAL;
14501 		if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
14502 			bnxt_del_l2_filter(bp, l2_fltr);
14503 			return -EINVAL;
14504 		}
14505 	}
14506 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
14507 	if (!new_fltr) {
14508 		bnxt_del_l2_filter(bp, l2_fltr);
14509 		return -ENOMEM;
14510 	}
14511 
14512 	fkeys = &new_fltr->fkeys;
14513 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
14514 		rc = -EPROTONOSUPPORT;
14515 		goto err_free;
14516 	}
14517 
14518 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
14519 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
14520 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
14521 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
14522 		rc = -EPROTONOSUPPORT;
14523 		goto err_free;
14524 	}
14525 	new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
14526 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
14527 		if (bp->hwrm_spec_code < 0x10601) {
14528 			rc = -EPROTONOSUPPORT;
14529 			goto err_free;
14530 		}
14531 		new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
14532 	}
14533 	flags = fkeys->control.flags;
14534 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
14535 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14536 		rc = -EPROTONOSUPPORT;
14537 		goto err_free;
14538 	}
14539 	new_fltr->l2_fltr = l2_fltr;
14540 
14541 	idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14542 	rcu_read_lock();
14543 	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14544 	if (fltr) {
14545 		rc = fltr->base.sw_id;
14546 		rcu_read_unlock();
14547 		goto err_free;
14548 	}
14549 	rcu_read_unlock();
14550 
14551 	new_fltr->flow_id = flow_id;
14552 	new_fltr->base.rxq = rxq_index;
14553 	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14554 	if (!rc) {
14555 		bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14556 		return new_fltr->base.sw_id;
14557 	}
14558 
14559 err_free:
14560 	bnxt_del_l2_filter(bp, l2_fltr);
14561 	kfree(new_fltr);
14562 	return rc;
14563 }
14564 #endif
14565 
14566 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14567 {
14568 	spin_lock_bh(&bp->ntp_fltr_lock);
14569 	if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
14570 		spin_unlock_bh(&bp->ntp_fltr_lock);
14571 		return;
14572 	}
14573 	hlist_del_rcu(&fltr->base.hash);
14574 	bnxt_del_one_usr_fltr(bp, &fltr->base);
14575 	bp->ntp_fltr_count--;
14576 	spin_unlock_bh(&bp->ntp_fltr_lock);
14577 	bnxt_del_l2_filter(bp, fltr->l2_fltr);
14578 	clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14579 	kfree_rcu(fltr, base.rcu);
14580 }
14581 
14582 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14583 {
14584 #ifdef CONFIG_RFS_ACCEL
14585 	int i;
14586 
14587 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
14588 		struct hlist_head *head;
14589 		struct hlist_node *tmp;
14590 		struct bnxt_ntuple_filter *fltr;
14591 		int rc;
14592 
14593 		head = &bp->ntp_fltr_hash_tbl[i];
14594 		hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
14595 			bool del = false;
14596 
14597 			if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
14598 				if (fltr->base.flags & BNXT_ACT_NO_AGING)
14599 					continue;
14600 				if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14601 							fltr->flow_id,
14602 							fltr->base.sw_id)) {
14603 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
14604 									 fltr);
14605 					del = true;
14606 				}
14607 			} else {
14608 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14609 								       fltr);
14610 				if (rc)
14611 					del = true;
14612 				else
14613 					set_bit(BNXT_FLTR_VALID, &fltr->base.state);
14614 			}
14615 
14616 			if (del)
14617 				bnxt_del_ntp_filter(bp, fltr);
14618 		}
14619 	}
14620 #endif
14621 }
14622 
14623 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
14624 				    unsigned int entry, struct udp_tunnel_info *ti)
14625 {
14626 	struct bnxt *bp = netdev_priv(netdev);
14627 	unsigned int cmd;
14628 
14629 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14630 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
14631 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14632 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
14633 	else
14634 		cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
14635 
14636 	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14637 }
14638 
14639 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
14640 				      unsigned int entry, struct udp_tunnel_info *ti)
14641 {
14642 	struct bnxt *bp = netdev_priv(netdev);
14643 	unsigned int cmd;
14644 
14645 	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14646 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
14647 	else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14648 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
14649 	else
14650 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
14651 
14652 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14653 }
14654 
14655 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
14656 	.set_port	= bnxt_udp_tunnel_set_port,
14657 	.unset_port	= bnxt_udp_tunnel_unset_port,
14658 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14659 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14660 	.tables		= {
14661 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14662 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14663 	},
14664 }, bnxt_udp_tunnels_p7 = {
14665 	.set_port	= bnxt_udp_tunnel_set_port,
14666 	.unset_port	= bnxt_udp_tunnel_unset_port,
14667 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14668 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14669 	.tables		= {
14670 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
14671 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14672 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
14673 	},
14674 };
14675 
14676 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
14677 			       struct net_device *dev, u32 filter_mask,
14678 			       int nlflags)
14679 {
14680 	struct bnxt *bp = netdev_priv(dev);
14681 
14682 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14683 				       nlflags, filter_mask, NULL);
14684 }
14685 
14686 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
14687 			       u16 flags, struct netlink_ext_ack *extack)
14688 {
14689 	struct bnxt *bp = netdev_priv(dev);
14690 	struct nlattr *attr, *br_spec;
14691 	int rem, rc = 0;
14692 
14693 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14694 		return -EOPNOTSUPP;
14695 
14696 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
14697 	if (!br_spec)
14698 		return -EINVAL;
14699 
14700 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
14701 		u16 mode;
14702 
14703 		mode = nla_get_u16(attr);
14704 		if (mode == bp->br_mode)
14705 			break;
14706 
14707 		rc = bnxt_hwrm_set_br_mode(bp, mode);
14708 		if (!rc)
14709 			bp->br_mode = mode;
14710 		break;
14711 	}
14712 	return rc;
14713 }
14714 
14715 int bnxt_get_port_parent_id(struct net_device *dev,
14716 			    struct netdev_phys_item_id *ppid)
14717 {
14718 	struct bnxt *bp = netdev_priv(dev);
14719 
14720 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14721 		return -EOPNOTSUPP;
14722 
14723 	/* The PF and it's VF-reps only support the switchdev framework */
14724 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14725 		return -EOPNOTSUPP;
14726 
14727 	ppid->id_len = sizeof(bp->dsn);
14728 	memcpy(ppid->id, bp->dsn, ppid->id_len);
14729 
14730 	return 0;
14731 }
14732 
14733 static const struct net_device_ops bnxt_netdev_ops = {
14734 	.ndo_open		= bnxt_open,
14735 	.ndo_start_xmit		= bnxt_start_xmit,
14736 	.ndo_stop		= bnxt_close,
14737 	.ndo_get_stats64	= bnxt_get_stats64,
14738 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
14739 	.ndo_eth_ioctl		= bnxt_ioctl,
14740 	.ndo_validate_addr	= eth_validate_addr,
14741 	.ndo_set_mac_address	= bnxt_change_mac_addr,
14742 	.ndo_change_mtu		= bnxt_change_mtu,
14743 	.ndo_fix_features	= bnxt_fix_features,
14744 	.ndo_set_features	= bnxt_set_features,
14745 	.ndo_features_check	= bnxt_features_check,
14746 	.ndo_tx_timeout		= bnxt_tx_timeout,
14747 #ifdef CONFIG_BNXT_SRIOV
14748 	.ndo_get_vf_config	= bnxt_get_vf_config,
14749 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
14750 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
14751 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
14752 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
14753 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
14754 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
14755 #endif
14756 	.ndo_setup_tc           = bnxt_setup_tc,
14757 #ifdef CONFIG_RFS_ACCEL
14758 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
14759 #endif
14760 	.ndo_bpf		= bnxt_xdp,
14761 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
14762 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
14763 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
14764 };
14765 
14766 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
14767 				    struct netdev_queue_stats_rx *stats)
14768 {
14769 	struct bnxt *bp = netdev_priv(dev);
14770 	struct bnxt_cp_ring_info *cpr;
14771 	u64 *sw;
14772 
14773 	cpr = &bp->bnapi[i]->cp_ring;
14774 	sw = cpr->stats.sw_stats;
14775 
14776 	stats->packets = 0;
14777 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
14778 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
14779 	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
14780 
14781 	stats->bytes = 0;
14782 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
14783 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
14784 	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
14785 
14786 	stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
14787 }
14788 
14789 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
14790 				    struct netdev_queue_stats_tx *stats)
14791 {
14792 	struct bnxt *bp = netdev_priv(dev);
14793 	struct bnxt_napi *bnapi;
14794 	u64 *sw;
14795 
14796 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
14797 	sw = bnapi->cp_ring.stats.sw_stats;
14798 
14799 	stats->packets = 0;
14800 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
14801 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
14802 	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
14803 
14804 	stats->bytes = 0;
14805 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
14806 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
14807 	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
14808 }
14809 
14810 static void bnxt_get_base_stats(struct net_device *dev,
14811 				struct netdev_queue_stats_rx *rx,
14812 				struct netdev_queue_stats_tx *tx)
14813 {
14814 	struct bnxt *bp = netdev_priv(dev);
14815 
14816 	rx->packets = bp->net_stats_prev.rx_packets;
14817 	rx->bytes = bp->net_stats_prev.rx_bytes;
14818 	rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
14819 
14820 	tx->packets = bp->net_stats_prev.tx_packets;
14821 	tx->bytes = bp->net_stats_prev.tx_bytes;
14822 }
14823 
14824 static const struct netdev_stat_ops bnxt_stat_ops = {
14825 	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
14826 	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
14827 	.get_base_stats		= bnxt_get_base_stats,
14828 };
14829 
14830 static void bnxt_remove_one(struct pci_dev *pdev)
14831 {
14832 	struct net_device *dev = pci_get_drvdata(pdev);
14833 	struct bnxt *bp = netdev_priv(dev);
14834 
14835 	if (BNXT_PF(bp))
14836 		bnxt_sriov_disable(bp);
14837 
14838 	bnxt_rdma_aux_device_del(bp);
14839 
14840 	bnxt_ptp_clear(bp);
14841 	unregister_netdev(dev);
14842 
14843 	bnxt_rdma_aux_device_uninit(bp);
14844 
14845 	bnxt_free_l2_filters(bp, true);
14846 	bnxt_free_ntp_fltrs(bp, true);
14847 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
14848 		bnxt_clear_rss_ctxs(bp, true);
14849 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14850 	/* Flush any pending tasks */
14851 	cancel_work_sync(&bp->sp_task);
14852 	cancel_delayed_work_sync(&bp->fw_reset_task);
14853 	bp->sp_event = 0;
14854 
14855 	bnxt_dl_fw_reporters_destroy(bp);
14856 	bnxt_dl_unregister(bp);
14857 	bnxt_shutdown_tc(bp);
14858 
14859 	bnxt_clear_int_mode(bp);
14860 	bnxt_hwrm_func_drv_unrgtr(bp);
14861 	bnxt_free_hwrm_resources(bp);
14862 	bnxt_hwmon_uninit(bp);
14863 	bnxt_ethtool_free(bp);
14864 	bnxt_dcb_free(bp);
14865 	kfree(bp->ptp_cfg);
14866 	bp->ptp_cfg = NULL;
14867 	kfree(bp->fw_health);
14868 	bp->fw_health = NULL;
14869 	bnxt_cleanup_pci(bp);
14870 	bnxt_free_ctx_mem(bp);
14871 	kfree(bp->rss_indir_tbl);
14872 	bp->rss_indir_tbl = NULL;
14873 	bnxt_free_port_stats(bp);
14874 	free_netdev(dev);
14875 }
14876 
14877 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
14878 {
14879 	int rc = 0;
14880 	struct bnxt_link_info *link_info = &bp->link_info;
14881 
14882 	bp->phy_flags = 0;
14883 	rc = bnxt_hwrm_phy_qcaps(bp);
14884 	if (rc) {
14885 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14886 			   rc);
14887 		return rc;
14888 	}
14889 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
14890 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
14891 	else
14892 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
14893 	if (!fw_dflt)
14894 		return 0;
14895 
14896 	mutex_lock(&bp->link_lock);
14897 	rc = bnxt_update_link(bp, false);
14898 	if (rc) {
14899 		mutex_unlock(&bp->link_lock);
14900 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14901 			   rc);
14902 		return rc;
14903 	}
14904 
14905 	/* Older firmware does not have supported_auto_speeds, so assume
14906 	 * that all supported speeds can be autonegotiated.
14907 	 */
14908 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
14909 		link_info->support_auto_speeds = link_info->support_speeds;
14910 
14911 	bnxt_init_ethtool_link_settings(bp);
14912 	mutex_unlock(&bp->link_lock);
14913 	return 0;
14914 }
14915 
14916 static int bnxt_get_max_irq(struct pci_dev *pdev)
14917 {
14918 	u16 ctrl;
14919 
14920 	if (!pdev->msix_cap)
14921 		return 1;
14922 
14923 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
14924 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
14925 }
14926 
14927 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14928 				int *max_cp)
14929 {
14930 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
14931 	int max_ring_grps = 0, max_irq;
14932 
14933 	*max_tx = hw_resc->max_tx_rings;
14934 	*max_rx = hw_resc->max_rx_rings;
14935 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
14936 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
14937 			bnxt_get_ulp_msix_num_in_use(bp),
14938 			hw_resc->max_stat_ctxs -
14939 			bnxt_get_ulp_stat_ctxs_in_use(bp));
14940 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14941 		*max_cp = min_t(int, *max_cp, max_irq);
14942 	max_ring_grps = hw_resc->max_hw_ring_grps;
14943 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
14944 		*max_cp -= 1;
14945 		*max_rx -= 2;
14946 	}
14947 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
14948 		*max_rx >>= 1;
14949 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
14950 		int rc;
14951 
14952 		rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14953 		if (rc) {
14954 			*max_rx = 0;
14955 			*max_tx = 0;
14956 		}
14957 		/* On P5 chips, max_cp output param should be available NQs */
14958 		*max_cp = max_irq;
14959 	}
14960 	*max_rx = min_t(int, *max_rx, max_ring_grps);
14961 }
14962 
14963 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
14964 {
14965 	int rx, tx, cp;
14966 
14967 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
14968 	*max_rx = rx;
14969 	*max_tx = tx;
14970 	if (!rx || !tx || !cp)
14971 		return -ENOMEM;
14972 
14973 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
14974 }
14975 
14976 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14977 			       bool shared)
14978 {
14979 	int rc;
14980 
14981 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14982 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14983 		/* Not enough rings, try disabling agg rings. */
14984 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
14985 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14986 		if (rc) {
14987 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
14988 			bp->flags |= BNXT_FLAG_AGG_RINGS;
14989 			return rc;
14990 		}
14991 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
14992 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14993 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14994 		bnxt_set_ring_params(bp);
14995 	}
14996 
14997 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
14998 		int max_cp, max_stat, max_irq;
14999 
15000 		/* Reserve minimum resources for RoCE */
15001 		max_cp = bnxt_get_max_func_cp_rings(bp);
15002 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
15003 		max_irq = bnxt_get_max_func_irqs(bp);
15004 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
15005 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
15006 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
15007 			return 0;
15008 
15009 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
15010 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
15011 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
15012 		max_cp = min_t(int, max_cp, max_irq);
15013 		max_cp = min_t(int, max_cp, max_stat);
15014 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
15015 		if (rc)
15016 			rc = 0;
15017 	}
15018 	return rc;
15019 }
15020 
15021 /* In initial default shared ring setting, each shared ring must have a
15022  * RX/TX ring pair.
15023  */
15024 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
15025 {
15026 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
15027 	bp->rx_nr_rings = bp->cp_nr_rings;
15028 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
15029 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15030 }
15031 
15032 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
15033 {
15034 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
15035 	int avail_msix;
15036 
15037 	if (!bnxt_can_reserve_rings(bp))
15038 		return 0;
15039 
15040 	if (sh)
15041 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
15042 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
15043 	/* Reduce default rings on multi-port cards so that total default
15044 	 * rings do not exceed CPU count.
15045 	 */
15046 	if (bp->port_count > 1) {
15047 		int max_rings =
15048 			max_t(int, num_online_cpus() / bp->port_count, 1);
15049 
15050 		dflt_rings = min_t(int, dflt_rings, max_rings);
15051 	}
15052 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
15053 	if (rc)
15054 		return rc;
15055 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
15056 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
15057 	if (sh)
15058 		bnxt_trim_dflt_sh_rings(bp);
15059 	else
15060 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
15061 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15062 
15063 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
15064 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
15065 		int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
15066 
15067 		bnxt_set_ulp_msix_num(bp, ulp_num_msix);
15068 		bnxt_set_dflt_ulp_stat_ctxs(bp);
15069 	}
15070 
15071 	rc = __bnxt_reserve_rings(bp);
15072 	if (rc && rc != -ENODEV)
15073 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
15074 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15075 	if (sh)
15076 		bnxt_trim_dflt_sh_rings(bp);
15077 
15078 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
15079 	if (bnxt_need_reserve_rings(bp)) {
15080 		rc = __bnxt_reserve_rings(bp);
15081 		if (rc && rc != -ENODEV)
15082 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
15083 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15084 	}
15085 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
15086 		bp->rx_nr_rings++;
15087 		bp->cp_nr_rings++;
15088 	}
15089 	if (rc) {
15090 		bp->tx_nr_rings = 0;
15091 		bp->rx_nr_rings = 0;
15092 	}
15093 	return rc;
15094 }
15095 
15096 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
15097 {
15098 	int rc;
15099 
15100 	if (bp->tx_nr_rings)
15101 		return 0;
15102 
15103 	bnxt_ulp_irq_stop(bp);
15104 	bnxt_clear_int_mode(bp);
15105 	rc = bnxt_set_dflt_rings(bp, true);
15106 	if (rc) {
15107 		if (BNXT_VF(bp) && rc == -ENODEV)
15108 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15109 		else
15110 			netdev_err(bp->dev, "Not enough rings available.\n");
15111 		goto init_dflt_ring_err;
15112 	}
15113 	rc = bnxt_init_int_mode(bp);
15114 	if (rc)
15115 		goto init_dflt_ring_err;
15116 
15117 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15118 
15119 	bnxt_set_dflt_rfs(bp);
15120 
15121 init_dflt_ring_err:
15122 	bnxt_ulp_irq_restart(bp, rc);
15123 	return rc;
15124 }
15125 
15126 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
15127 {
15128 	int rc;
15129 
15130 	ASSERT_RTNL();
15131 	bnxt_hwrm_func_qcaps(bp);
15132 
15133 	if (netif_running(bp->dev))
15134 		__bnxt_close_nic(bp, true, false);
15135 
15136 	bnxt_ulp_irq_stop(bp);
15137 	bnxt_clear_int_mode(bp);
15138 	rc = bnxt_init_int_mode(bp);
15139 	bnxt_ulp_irq_restart(bp, rc);
15140 
15141 	if (netif_running(bp->dev)) {
15142 		if (rc)
15143 			dev_close(bp->dev);
15144 		else
15145 			rc = bnxt_open_nic(bp, true, false);
15146 	}
15147 
15148 	return rc;
15149 }
15150 
15151 static int bnxt_init_mac_addr(struct bnxt *bp)
15152 {
15153 	int rc = 0;
15154 
15155 	if (BNXT_PF(bp)) {
15156 		eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
15157 	} else {
15158 #ifdef CONFIG_BNXT_SRIOV
15159 		struct bnxt_vf_info *vf = &bp->vf;
15160 		bool strict_approval = true;
15161 
15162 		if (is_valid_ether_addr(vf->mac_addr)) {
15163 			/* overwrite netdev dev_addr with admin VF MAC */
15164 			eth_hw_addr_set(bp->dev, vf->mac_addr);
15165 			/* Older PF driver or firmware may not approve this
15166 			 * correctly.
15167 			 */
15168 			strict_approval = false;
15169 		} else {
15170 			eth_hw_addr_random(bp->dev);
15171 		}
15172 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
15173 #endif
15174 	}
15175 	return rc;
15176 }
15177 
15178 static void bnxt_vpd_read_info(struct bnxt *bp)
15179 {
15180 	struct pci_dev *pdev = bp->pdev;
15181 	unsigned int vpd_size, kw_len;
15182 	int pos, size;
15183 	u8 *vpd_data;
15184 
15185 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
15186 	if (IS_ERR(vpd_data)) {
15187 		pci_warn(pdev, "Unable to read VPD\n");
15188 		return;
15189 	}
15190 
15191 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15192 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
15193 	if (pos < 0)
15194 		goto read_sn;
15195 
15196 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15197 	memcpy(bp->board_partno, &vpd_data[pos], size);
15198 
15199 read_sn:
15200 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
15201 					   PCI_VPD_RO_KEYWORD_SERIALNO,
15202 					   &kw_len);
15203 	if (pos < 0)
15204 		goto exit;
15205 
15206 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
15207 	memcpy(bp->board_serialno, &vpd_data[pos], size);
15208 exit:
15209 	kfree(vpd_data);
15210 }
15211 
15212 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
15213 {
15214 	struct pci_dev *pdev = bp->pdev;
15215 	u64 qword;
15216 
15217 	qword = pci_get_dsn(pdev);
15218 	if (!qword) {
15219 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
15220 		return -EOPNOTSUPP;
15221 	}
15222 
15223 	put_unaligned_le64(qword, dsn);
15224 
15225 	bp->flags |= BNXT_FLAG_DSN_VALID;
15226 	return 0;
15227 }
15228 
15229 static int bnxt_map_db_bar(struct bnxt *bp)
15230 {
15231 	if (!bp->db_size)
15232 		return -ENODEV;
15233 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
15234 	if (!bp->bar1)
15235 		return -ENOMEM;
15236 	return 0;
15237 }
15238 
15239 void bnxt_print_device_info(struct bnxt *bp)
15240 {
15241 	netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
15242 		    board_info[bp->board_idx].name,
15243 		    (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
15244 
15245 	pcie_print_link_status(bp->pdev);
15246 }
15247 
15248 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
15249 {
15250 	struct bnxt_hw_resc *hw_resc;
15251 	struct net_device *dev;
15252 	struct bnxt *bp;
15253 	int rc, max_irqs;
15254 
15255 	if (pci_is_bridge(pdev))
15256 		return -ENODEV;
15257 
15258 	/* Clear any pending DMA transactions from crash kernel
15259 	 * while loading driver in capture kernel.
15260 	 */
15261 	if (is_kdump_kernel()) {
15262 		pci_clear_master(pdev);
15263 		pcie_flr(pdev);
15264 	}
15265 
15266 	max_irqs = bnxt_get_max_irq(pdev);
15267 	dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
15268 				 max_irqs);
15269 	if (!dev)
15270 		return -ENOMEM;
15271 
15272 	bp = netdev_priv(dev);
15273 	bp->board_idx = ent->driver_data;
15274 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
15275 	bnxt_set_max_func_irqs(bp, max_irqs);
15276 
15277 	if (bnxt_vf_pciid(bp->board_idx))
15278 		bp->flags |= BNXT_FLAG_VF;
15279 
15280 	/* No devlink port registration in case of a VF */
15281 	if (BNXT_PF(bp))
15282 		SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
15283 
15284 	if (pdev->msix_cap)
15285 		bp->flags |= BNXT_FLAG_MSIX_CAP;
15286 
15287 	rc = bnxt_init_board(pdev, dev);
15288 	if (rc < 0)
15289 		goto init_err_free;
15290 
15291 	dev->netdev_ops = &bnxt_netdev_ops;
15292 	dev->stat_ops = &bnxt_stat_ops;
15293 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
15294 	dev->ethtool_ops = &bnxt_ethtool_ops;
15295 	pci_set_drvdata(pdev, dev);
15296 
15297 	rc = bnxt_alloc_hwrm_resources(bp);
15298 	if (rc)
15299 		goto init_err_pci_clean;
15300 
15301 	mutex_init(&bp->hwrm_cmd_lock);
15302 	mutex_init(&bp->link_lock);
15303 
15304 	rc = bnxt_fw_init_one_p1(bp);
15305 	if (rc)
15306 		goto init_err_pci_clean;
15307 
15308 	if (BNXT_PF(bp))
15309 		bnxt_vpd_read_info(bp);
15310 
15311 	if (BNXT_CHIP_P5_PLUS(bp)) {
15312 		bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
15313 		if (BNXT_CHIP_P7(bp))
15314 			bp->flags |= BNXT_FLAG_CHIP_P7;
15315 	}
15316 
15317 	rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
15318 	if (rc)
15319 		goto init_err_pci_clean;
15320 
15321 	rc = bnxt_fw_init_one_p2(bp);
15322 	if (rc)
15323 		goto init_err_pci_clean;
15324 
15325 	rc = bnxt_map_db_bar(bp);
15326 	if (rc) {
15327 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
15328 			rc);
15329 		goto init_err_pci_clean;
15330 	}
15331 
15332 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15333 			   NETIF_F_TSO | NETIF_F_TSO6 |
15334 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15335 			   NETIF_F_GSO_IPXIP4 |
15336 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15337 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
15338 			   NETIF_F_RXCSUM | NETIF_F_GRO;
15339 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15340 		dev->hw_features |= NETIF_F_GSO_UDP_L4;
15341 
15342 	if (BNXT_SUPPORTS_TPA(bp))
15343 		dev->hw_features |= NETIF_F_LRO;
15344 
15345 	dev->hw_enc_features =
15346 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15347 			NETIF_F_TSO | NETIF_F_TSO6 |
15348 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15349 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15350 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
15351 	if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15352 		dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
15353 	if (bp->flags & BNXT_FLAG_CHIP_P7)
15354 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
15355 	else
15356 		dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
15357 
15358 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
15359 				    NETIF_F_GSO_GRE_CSUM;
15360 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
15361 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
15362 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
15363 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
15364 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
15365 	if (BNXT_SUPPORTS_TPA(bp))
15366 		dev->hw_features |= NETIF_F_GRO_HW;
15367 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
15368 	if (dev->features & NETIF_F_GRO_HW)
15369 		dev->features &= ~NETIF_F_LRO;
15370 	dev->priv_flags |= IFF_UNICAST_FLT;
15371 
15372 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
15373 
15374 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
15375 			    NETDEV_XDP_ACT_RX_SG;
15376 
15377 #ifdef CONFIG_BNXT_SRIOV
15378 	init_waitqueue_head(&bp->sriov_cfg_wait);
15379 #endif
15380 	if (BNXT_SUPPORTS_TPA(bp)) {
15381 		bp->gro_func = bnxt_gro_func_5730x;
15382 		if (BNXT_CHIP_P4(bp))
15383 			bp->gro_func = bnxt_gro_func_5731x;
15384 		else if (BNXT_CHIP_P5_PLUS(bp))
15385 			bp->gro_func = bnxt_gro_func_5750x;
15386 	}
15387 	if (!BNXT_CHIP_P4_PLUS(bp))
15388 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
15389 
15390 	rc = bnxt_init_mac_addr(bp);
15391 	if (rc) {
15392 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
15393 		rc = -EADDRNOTAVAIL;
15394 		goto init_err_pci_clean;
15395 	}
15396 
15397 	if (BNXT_PF(bp)) {
15398 		/* Read the adapter's DSN to use as the eswitch switch_id */
15399 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15400 	}
15401 
15402 	/* MTU range: 60 - FW defined max */
15403 	dev->min_mtu = ETH_ZLEN;
15404 	dev->max_mtu = bp->max_mtu;
15405 
15406 	rc = bnxt_probe_phy(bp, true);
15407 	if (rc)
15408 		goto init_err_pci_clean;
15409 
15410 	hw_resc = &bp->hw_resc;
15411 	bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
15412 		       BNXT_L2_FLTR_MAX_FLTR;
15413 	/* Older firmware may not report these filters properly */
15414 	if (bp->max_fltr < BNXT_MAX_FLTR)
15415 		bp->max_fltr = BNXT_MAX_FLTR;
15416 	bnxt_init_l2_fltr_tbl(bp);
15417 	bnxt_set_rx_skb_mode(bp, false);
15418 	bnxt_set_tpa_flags(bp);
15419 	bnxt_set_ring_params(bp);
15420 	bnxt_rdma_aux_device_init(bp);
15421 	rc = bnxt_set_dflt_rings(bp, true);
15422 	if (rc) {
15423 		if (BNXT_VF(bp) && rc == -ENODEV) {
15424 			netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15425 		} else {
15426 			netdev_err(bp->dev, "Not enough rings available.\n");
15427 			rc = -ENOMEM;
15428 		}
15429 		goto init_err_pci_clean;
15430 	}
15431 
15432 	bnxt_fw_init_one_p3(bp);
15433 
15434 	bnxt_init_dflt_coal(bp);
15435 
15436 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
15437 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
15438 
15439 	rc = bnxt_init_int_mode(bp);
15440 	if (rc)
15441 		goto init_err_pci_clean;
15442 
15443 	/* No TC has been set yet and rings may have been trimmed due to
15444 	 * limited MSIX, so we re-initialize the TX rings per TC.
15445 	 */
15446 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15447 
15448 	if (BNXT_PF(bp)) {
15449 		if (!bnxt_pf_wq) {
15450 			bnxt_pf_wq =
15451 				create_singlethread_workqueue("bnxt_pf_wq");
15452 			if (!bnxt_pf_wq) {
15453 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
15454 				rc = -ENOMEM;
15455 				goto init_err_pci_clean;
15456 			}
15457 		}
15458 		rc = bnxt_init_tc(bp);
15459 		if (rc)
15460 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
15461 				   rc);
15462 	}
15463 
15464 	bnxt_inv_fw_health_reg(bp);
15465 	rc = bnxt_dl_register(bp);
15466 	if (rc)
15467 		goto init_err_dl;
15468 
15469 	INIT_LIST_HEAD(&bp->usr_fltr_list);
15470 
15471 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
15472 		bnxt_init_multi_rss_ctx(bp);
15473 
15474 
15475 	rc = register_netdev(dev);
15476 	if (rc)
15477 		goto init_err_cleanup;
15478 
15479 	bnxt_dl_fw_reporters_create(bp);
15480 
15481 	bnxt_rdma_aux_device_add(bp);
15482 
15483 	bnxt_print_device_info(bp);
15484 
15485 	pci_save_state(pdev);
15486 
15487 	return 0;
15488 init_err_cleanup:
15489 	bnxt_rdma_aux_device_uninit(bp);
15490 	bnxt_dl_unregister(bp);
15491 init_err_dl:
15492 	bnxt_shutdown_tc(bp);
15493 	bnxt_clear_int_mode(bp);
15494 
15495 init_err_pci_clean:
15496 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
15497 		bnxt_clear_rss_ctxs(bp, true);
15498 	bnxt_hwrm_func_drv_unrgtr(bp);
15499 	bnxt_free_hwrm_resources(bp);
15500 	bnxt_hwmon_uninit(bp);
15501 	bnxt_ethtool_free(bp);
15502 	bnxt_ptp_clear(bp);
15503 	kfree(bp->ptp_cfg);
15504 	bp->ptp_cfg = NULL;
15505 	kfree(bp->fw_health);
15506 	bp->fw_health = NULL;
15507 	bnxt_cleanup_pci(bp);
15508 	bnxt_free_ctx_mem(bp);
15509 	kfree(bp->rss_indir_tbl);
15510 	bp->rss_indir_tbl = NULL;
15511 
15512 init_err_free:
15513 	free_netdev(dev);
15514 	return rc;
15515 }
15516 
15517 static void bnxt_shutdown(struct pci_dev *pdev)
15518 {
15519 	struct net_device *dev = pci_get_drvdata(pdev);
15520 	struct bnxt *bp;
15521 
15522 	if (!dev)
15523 		return;
15524 
15525 	rtnl_lock();
15526 	bp = netdev_priv(dev);
15527 	if (!bp)
15528 		goto shutdown_exit;
15529 
15530 	if (netif_running(dev))
15531 		dev_close(dev);
15532 
15533 	bnxt_clear_int_mode(bp);
15534 	pci_disable_device(pdev);
15535 
15536 	if (system_state == SYSTEM_POWER_OFF) {
15537 		pci_wake_from_d3(pdev, bp->wol);
15538 		pci_set_power_state(pdev, PCI_D3hot);
15539 	}
15540 
15541 shutdown_exit:
15542 	rtnl_unlock();
15543 }
15544 
15545 #ifdef CONFIG_PM_SLEEP
15546 static int bnxt_suspend(struct device *device)
15547 {
15548 	struct net_device *dev = dev_get_drvdata(device);
15549 	struct bnxt *bp = netdev_priv(dev);
15550 	int rc = 0;
15551 
15552 	bnxt_ulp_stop(bp);
15553 
15554 	rtnl_lock();
15555 	if (netif_running(dev)) {
15556 		netif_device_detach(dev);
15557 		rc = bnxt_close(dev);
15558 	}
15559 	bnxt_hwrm_func_drv_unrgtr(bp);
15560 	pci_disable_device(bp->pdev);
15561 	bnxt_free_ctx_mem(bp);
15562 	rtnl_unlock();
15563 	return rc;
15564 }
15565 
15566 static int bnxt_resume(struct device *device)
15567 {
15568 	struct net_device *dev = dev_get_drvdata(device);
15569 	struct bnxt *bp = netdev_priv(dev);
15570 	int rc = 0;
15571 
15572 	rtnl_lock();
15573 	rc = pci_enable_device(bp->pdev);
15574 	if (rc) {
15575 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
15576 			   rc);
15577 		goto resume_exit;
15578 	}
15579 	pci_set_master(bp->pdev);
15580 	if (bnxt_hwrm_ver_get(bp)) {
15581 		rc = -ENODEV;
15582 		goto resume_exit;
15583 	}
15584 	rc = bnxt_hwrm_func_reset(bp);
15585 	if (rc) {
15586 		rc = -EBUSY;
15587 		goto resume_exit;
15588 	}
15589 
15590 	rc = bnxt_hwrm_func_qcaps(bp);
15591 	if (rc)
15592 		goto resume_exit;
15593 
15594 	bnxt_clear_reservations(bp, true);
15595 
15596 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
15597 		rc = -ENODEV;
15598 		goto resume_exit;
15599 	}
15600 
15601 	bnxt_get_wol_settings(bp);
15602 	if (netif_running(dev)) {
15603 		rc = bnxt_open(dev);
15604 		if (!rc)
15605 			netif_device_attach(dev);
15606 	}
15607 
15608 resume_exit:
15609 	rtnl_unlock();
15610 	bnxt_ulp_start(bp, rc);
15611 	if (!rc)
15612 		bnxt_reenable_sriov(bp);
15613 	return rc;
15614 }
15615 
15616 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
15617 #define BNXT_PM_OPS (&bnxt_pm_ops)
15618 
15619 #else
15620 
15621 #define BNXT_PM_OPS NULL
15622 
15623 #endif /* CONFIG_PM_SLEEP */
15624 
15625 /**
15626  * bnxt_io_error_detected - called when PCI error is detected
15627  * @pdev: Pointer to PCI device
15628  * @state: The current pci connection state
15629  *
15630  * This function is called after a PCI bus error affecting
15631  * this device has been detected.
15632  */
15633 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
15634 					       pci_channel_state_t state)
15635 {
15636 	struct net_device *netdev = pci_get_drvdata(pdev);
15637 	struct bnxt *bp = netdev_priv(netdev);
15638 	bool abort = false;
15639 
15640 	netdev_info(netdev, "PCI I/O error detected\n");
15641 
15642 	bnxt_ulp_stop(bp);
15643 
15644 	rtnl_lock();
15645 	netif_device_detach(netdev);
15646 
15647 	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15648 		netdev_err(bp->dev, "Firmware reset already in progress\n");
15649 		abort = true;
15650 	}
15651 
15652 	if (abort || state == pci_channel_io_perm_failure) {
15653 		rtnl_unlock();
15654 		return PCI_ERS_RESULT_DISCONNECT;
15655 	}
15656 
15657 	/* Link is not reliable anymore if state is pci_channel_io_frozen
15658 	 * so we disable bus master to prevent any potential bad DMAs before
15659 	 * freeing kernel memory.
15660 	 */
15661 	if (state == pci_channel_io_frozen) {
15662 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
15663 		bnxt_fw_fatal_close(bp);
15664 	}
15665 
15666 	if (netif_running(netdev))
15667 		__bnxt_close_nic(bp, true, true);
15668 
15669 	if (pci_is_enabled(pdev))
15670 		pci_disable_device(pdev);
15671 	bnxt_free_ctx_mem(bp);
15672 	rtnl_unlock();
15673 
15674 	/* Request a slot slot reset. */
15675 	return PCI_ERS_RESULT_NEED_RESET;
15676 }
15677 
15678 /**
15679  * bnxt_io_slot_reset - called after the pci bus has been reset.
15680  * @pdev: Pointer to PCI device
15681  *
15682  * Restart the card from scratch, as if from a cold-boot.
15683  * At this point, the card has exprienced a hard reset,
15684  * followed by fixups by BIOS, and has its config space
15685  * set up identically to what it was at cold boot.
15686  */
15687 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
15688 {
15689 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
15690 	struct net_device *netdev = pci_get_drvdata(pdev);
15691 	struct bnxt *bp = netdev_priv(netdev);
15692 	int retry = 0;
15693 	int err = 0;
15694 	int off;
15695 
15696 	netdev_info(bp->dev, "PCI Slot Reset\n");
15697 
15698 	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
15699 	    test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
15700 		msleep(900);
15701 
15702 	rtnl_lock();
15703 
15704 	if (pci_enable_device(pdev)) {
15705 		dev_err(&pdev->dev,
15706 			"Cannot re-enable PCI device after reset.\n");
15707 	} else {
15708 		pci_set_master(pdev);
15709 		/* Upon fatal error, our device internal logic that latches to
15710 		 * BAR value is getting reset and will restore only upon
15711 		 * rewritting the BARs.
15712 		 *
15713 		 * As pci_restore_state() does not re-write the BARs if the
15714 		 * value is same as saved value earlier, driver needs to
15715 		 * write the BARs to 0 to force restore, in case of fatal error.
15716 		 */
15717 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
15718 				       &bp->state)) {
15719 			for (off = PCI_BASE_ADDRESS_0;
15720 			     off <= PCI_BASE_ADDRESS_5; off += 4)
15721 				pci_write_config_dword(bp->pdev, off, 0);
15722 		}
15723 		pci_restore_state(pdev);
15724 		pci_save_state(pdev);
15725 
15726 		bnxt_inv_fw_health_reg(bp);
15727 		bnxt_try_map_fw_health_reg(bp);
15728 
15729 		/* In some PCIe AER scenarios, firmware may take up to
15730 		 * 10 seconds to become ready in the worst case.
15731 		 */
15732 		do {
15733 			err = bnxt_try_recover_fw(bp);
15734 			if (!err)
15735 				break;
15736 			retry++;
15737 		} while (retry < BNXT_FW_SLOT_RESET_RETRY);
15738 
15739 		if (err) {
15740 			dev_err(&pdev->dev, "Firmware not ready\n");
15741 			goto reset_exit;
15742 		}
15743 
15744 		err = bnxt_hwrm_func_reset(bp);
15745 		if (!err)
15746 			result = PCI_ERS_RESULT_RECOVERED;
15747 
15748 		bnxt_ulp_irq_stop(bp);
15749 		bnxt_clear_int_mode(bp);
15750 		err = bnxt_init_int_mode(bp);
15751 		bnxt_ulp_irq_restart(bp, err);
15752 	}
15753 
15754 reset_exit:
15755 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15756 	bnxt_clear_reservations(bp, true);
15757 	rtnl_unlock();
15758 
15759 	return result;
15760 }
15761 
15762 /**
15763  * bnxt_io_resume - called when traffic can start flowing again.
15764  * @pdev: Pointer to PCI device
15765  *
15766  * This callback is called when the error recovery driver tells
15767  * us that its OK to resume normal operation.
15768  */
15769 static void bnxt_io_resume(struct pci_dev *pdev)
15770 {
15771 	struct net_device *netdev = pci_get_drvdata(pdev);
15772 	struct bnxt *bp = netdev_priv(netdev);
15773 	int err;
15774 
15775 	netdev_info(bp->dev, "PCI Slot Resume\n");
15776 	rtnl_lock();
15777 
15778 	err = bnxt_hwrm_func_qcaps(bp);
15779 	if (!err && netif_running(netdev))
15780 		err = bnxt_open(netdev);
15781 
15782 	if (!err)
15783 		netif_device_attach(netdev);
15784 
15785 	rtnl_unlock();
15786 	bnxt_ulp_start(bp, err);
15787 	if (!err)
15788 		bnxt_reenable_sriov(bp);
15789 }
15790 
15791 static const struct pci_error_handlers bnxt_err_handler = {
15792 	.error_detected	= bnxt_io_error_detected,
15793 	.slot_reset	= bnxt_io_slot_reset,
15794 	.resume		= bnxt_io_resume
15795 };
15796 
15797 static struct pci_driver bnxt_pci_driver = {
15798 	.name		= DRV_MODULE_NAME,
15799 	.id_table	= bnxt_pci_tbl,
15800 	.probe		= bnxt_init_one,
15801 	.remove		= bnxt_remove_one,
15802 	.shutdown	= bnxt_shutdown,
15803 	.driver.pm	= BNXT_PM_OPS,
15804 	.err_handler	= &bnxt_err_handler,
15805 #if defined(CONFIG_BNXT_SRIOV)
15806 	.sriov_configure = bnxt_sriov_configure,
15807 #endif
15808 };
15809 
15810 static int __init bnxt_init(void)
15811 {
15812 	int err;
15813 
15814 	bnxt_debug_init();
15815 	err = pci_register_driver(&bnxt_pci_driver);
15816 	if (err) {
15817 		bnxt_debug_exit();
15818 		return err;
15819 	}
15820 
15821 	return 0;
15822 }
15823 
15824 static void __exit bnxt_exit(void)
15825 {
15826 	pci_unregister_driver(&bnxt_pci_driver);
15827 	if (bnxt_pf_wq)
15828 		destroy_workqueue(bnxt_pf_wq);
15829 	bnxt_debug_exit();
15830 }
15831 
15832 module_init(bnxt_init);
15833 module_exit(bnxt_exit);
15834