1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_hwrm.h"
62 #include "bnxt_ulp.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
65 #include "bnxt_dcb.h"
66 #include "bnxt_xdp.h"
67 #include "bnxt_ptp.h"
68 #include "bnxt_vfr.h"
69 #include "bnxt_tc.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_coredump.h"
73 #include "bnxt_hwmon.h"
74
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 /* indexed by enum board_idx */
88 static const struct {
89 char *name;
90 } board_info[] = {
91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
140 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
141 };
142
143 static const struct pci_device_id bnxt_pci_tbl[] = {
144 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
145 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
146 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
147 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
148 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
149 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
150 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
151 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
152 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
153 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
154 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
155 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
156 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
157 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
158 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
159 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
160 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
161 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
162 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
163 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
164 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
166 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
167 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
168 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
171 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
178 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
179 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
180 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
181 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
182 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
183 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
184 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
185 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
186 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
194 #ifdef CONFIG_BNXT_SRIOV
195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
196 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
197 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
198 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
200 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
211 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
212 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
213 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
214 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
215 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 { 0 }
219 };
220
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223 static const u16 bnxt_vf_req_snif[] = {
224 HWRM_FUNC_CFG,
225 HWRM_FUNC_VF_CFG,
226 HWRM_PORT_PHY_QCFG,
227 HWRM_CFA_L2_FILTER_ALLOC,
228 };
229
230 static const u16 bnxt_async_events_arr[] = {
231 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
233 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
234 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
235 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
237 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
239 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
240 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
241 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
242 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
243 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
244 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
246 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
247 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
248 };
249
250 const u16 bnxt_bstore_to_trace[] = {
251 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
252 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
253 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
254 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
255 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
256 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
257 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
258 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
259 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
260 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
261 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
262 };
263
264 static struct workqueue_struct *bnxt_pf_wq;
265
266 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
267 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
268 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
269
270 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
271 .ports = {
272 .src = 0,
273 .dst = 0,
274 },
275 .addrs = {
276 .v6addrs = {
277 .src = BNXT_IPV6_MASK_NONE,
278 .dst = BNXT_IPV6_MASK_NONE,
279 },
280 },
281 };
282
283 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
284 .ports = {
285 .src = cpu_to_be16(0xffff),
286 .dst = cpu_to_be16(0xffff),
287 },
288 .addrs = {
289 .v6addrs = {
290 .src = BNXT_IPV6_MASK_ALL,
291 .dst = BNXT_IPV6_MASK_ALL,
292 },
293 },
294 };
295
296 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
297 .ports = {
298 .src = cpu_to_be16(0xffff),
299 .dst = cpu_to_be16(0xffff),
300 },
301 .addrs = {
302 .v4addrs = {
303 .src = cpu_to_be32(0xffffffff),
304 .dst = cpu_to_be32(0xffffffff),
305 },
306 },
307 };
308
bnxt_vf_pciid(enum board_idx idx)309 static bool bnxt_vf_pciid(enum board_idx idx)
310 {
311 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
312 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
313 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
314 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
315 }
316
317 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
318 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
319
320 #define BNXT_DB_CQ(db, idx) \
321 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
322
323 #define BNXT_DB_NQ_P5(db, idx) \
324 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
325 (db)->doorbell)
326
327 #define BNXT_DB_NQ_P7(db, idx) \
328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
329 DB_RING_IDX(db, idx), (db)->doorbell)
330
331 #define BNXT_DB_CQ_ARM(db, idx) \
332 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
333
334 #define BNXT_DB_NQ_ARM_P5(db, idx) \
335 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
336 DB_RING_IDX(db, idx), (db)->doorbell)
337
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)338 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
339 {
340 if (bp->flags & BNXT_FLAG_CHIP_P7)
341 BNXT_DB_NQ_P7(db, idx);
342 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
343 BNXT_DB_NQ_P5(db, idx);
344 else
345 BNXT_DB_CQ(db, idx);
346 }
347
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)348 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
349 {
350 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
351 BNXT_DB_NQ_ARM_P5(db, idx);
352 else
353 BNXT_DB_CQ_ARM(db, idx);
354 }
355
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)356 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
357 {
358 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
359 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
360 DB_RING_IDX(db, idx), db->doorbell);
361 else
362 BNXT_DB_CQ(db, idx);
363 }
364
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)365 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
366 {
367 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
368 return;
369
370 if (BNXT_PF(bp))
371 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
372 else
373 schedule_delayed_work(&bp->fw_reset_task, delay);
374 }
375
__bnxt_queue_sp_work(struct bnxt * bp)376 static void __bnxt_queue_sp_work(struct bnxt *bp)
377 {
378 if (BNXT_PF(bp))
379 queue_work(bnxt_pf_wq, &bp->sp_task);
380 else
381 schedule_work(&bp->sp_task);
382 }
383
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)384 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
385 {
386 set_bit(event, &bp->sp_event);
387 __bnxt_queue_sp_work(bp);
388 }
389
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)390 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
391 {
392 if (!rxr->bnapi->in_reset) {
393 rxr->bnapi->in_reset = true;
394 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
395 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
396 else
397 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
398 __bnxt_queue_sp_work(bp);
399 }
400 rxr->rx_next_cons = 0xffff;
401 }
402
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)403 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
404 u16 curr)
405 {
406 struct bnxt_napi *bnapi = txr->bnapi;
407
408 if (bnapi->tx_fault)
409 return;
410
411 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
412 txr->txq_index, txr->tx_hw_cons,
413 txr->tx_cons, txr->tx_prod, curr);
414 WARN_ON_ONCE(1);
415 bnapi->tx_fault = 1;
416 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
417 }
418
419 const u16 bnxt_lhint_arr[] = {
420 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
421 TX_BD_FLAGS_LHINT_512_TO_1023,
422 TX_BD_FLAGS_LHINT_1024_TO_2047,
423 TX_BD_FLAGS_LHINT_1024_TO_2047,
424 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
426 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
427 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
428 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
429 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
430 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
431 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
432 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 };
440
bnxt_xmit_get_cfa_action(struct sk_buff * skb)441 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
442 {
443 struct metadata_dst *md_dst = skb_metadata_dst(skb);
444
445 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
446 return 0;
447
448 return md_dst->u.port_info.port_id;
449 }
450
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)451 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
452 u16 prod)
453 {
454 /* Sync BD data before updating doorbell */
455 wmb();
456 bnxt_db_write(bp, &txr->tx_db, prod);
457 txr->kick_pending = 0;
458 }
459
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)460 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
461 {
462 struct bnxt *bp = netdev_priv(dev);
463 struct tx_bd *txbd, *txbd0;
464 struct tx_bd_ext *txbd1;
465 struct netdev_queue *txq;
466 int i;
467 dma_addr_t mapping;
468 unsigned int length, pad = 0;
469 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
470 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
471 struct pci_dev *pdev = bp->pdev;
472 u16 prod, last_frag, txts_prod;
473 struct bnxt_tx_ring_info *txr;
474 struct bnxt_sw_tx_bd *tx_buf;
475 __le32 lflags = 0;
476
477 i = skb_get_queue_mapping(skb);
478 if (unlikely(i >= bp->tx_nr_rings)) {
479 dev_kfree_skb_any(skb);
480 dev_core_stats_tx_dropped_inc(dev);
481 return NETDEV_TX_OK;
482 }
483
484 txq = netdev_get_tx_queue(dev, i);
485 txr = &bp->tx_ring[bp->tx_ring_map[i]];
486 prod = txr->tx_prod;
487
488 free_size = bnxt_tx_avail(bp, txr);
489 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
490 /* We must have raced with NAPI cleanup */
491 if (net_ratelimit() && txr->kick_pending)
492 netif_warn(bp, tx_err, dev,
493 "bnxt: ring busy w/ flush pending!\n");
494 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
495 bp->tx_wake_thresh))
496 return NETDEV_TX_BUSY;
497 }
498
499 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
500 goto tx_free;
501
502 length = skb->len;
503 len = skb_headlen(skb);
504 last_frag = skb_shinfo(skb)->nr_frags;
505
506 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
507
508 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
509 tx_buf->skb = skb;
510 tx_buf->nr_frags = last_frag;
511
512 vlan_tag_flags = 0;
513 cfa_action = bnxt_xmit_get_cfa_action(skb);
514 if (skb_vlan_tag_present(skb)) {
515 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
516 skb_vlan_tag_get(skb);
517 /* Currently supports 8021Q, 8021AD vlan offloads
518 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
519 */
520 if (skb->vlan_proto == htons(ETH_P_8021Q))
521 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
522 }
523
524 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
525 ptp->tx_tstamp_en) {
526 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
527 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
528 tx_buf->is_ts_pkt = 1;
529 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
530 } else if (!skb_is_gso(skb)) {
531 u16 seq_id, hdr_off;
532
533 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
534 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
535 if (vlan_tag_flags)
536 hdr_off += VLAN_HLEN;
537 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
538 tx_buf->is_ts_pkt = 1;
539 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
540
541 ptp->txts_req[txts_prod].tx_seqid = seq_id;
542 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
543 tx_buf->txts_prod = txts_prod;
544 }
545 }
546 }
547 if (unlikely(skb->no_fcs))
548 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
549
550 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
551 !lflags) {
552 struct tx_push_buffer *tx_push_buf = txr->tx_push;
553 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
554 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
555 void __iomem *db = txr->tx_db.doorbell;
556 void *pdata = tx_push_buf->data;
557 u64 *end;
558 int j, push_len;
559
560 /* Set COAL_NOW to be ready quickly for the next push */
561 tx_push->tx_bd_len_flags_type =
562 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
563 TX_BD_TYPE_LONG_TX_BD |
564 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
565 TX_BD_FLAGS_COAL_NOW |
566 TX_BD_FLAGS_PACKET_END |
567 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
568
569 if (skb->ip_summed == CHECKSUM_PARTIAL)
570 tx_push1->tx_bd_hsize_lflags =
571 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
572 else
573 tx_push1->tx_bd_hsize_lflags = 0;
574
575 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
576 tx_push1->tx_bd_cfa_action =
577 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
578
579 end = pdata + length;
580 end = PTR_ALIGN(end, 8) - 1;
581 *end = 0;
582
583 skb_copy_from_linear_data(skb, pdata, len);
584 pdata += len;
585 for (j = 0; j < last_frag; j++) {
586 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
587 void *fptr;
588
589 fptr = skb_frag_address_safe(frag);
590 if (!fptr)
591 goto normal_tx;
592
593 memcpy(pdata, fptr, skb_frag_size(frag));
594 pdata += skb_frag_size(frag);
595 }
596
597 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
598 txbd->tx_bd_haddr = txr->data_mapping;
599 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
600 prod = NEXT_TX(prod);
601 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
602 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
603 memcpy(txbd, tx_push1, sizeof(*txbd));
604 prod = NEXT_TX(prod);
605 tx_push->doorbell =
606 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
607 DB_RING_IDX(&txr->tx_db, prod));
608 WRITE_ONCE(txr->tx_prod, prod);
609
610 tx_buf->is_push = 1;
611 netdev_tx_sent_queue(txq, skb->len);
612 wmb(); /* Sync is_push and byte queue before pushing data */
613
614 push_len = (length + sizeof(*tx_push) + 7) / 8;
615 if (push_len > 16) {
616 __iowrite64_copy(db, tx_push_buf, 16);
617 __iowrite32_copy(db + 4, tx_push_buf + 1,
618 (push_len - 16) << 1);
619 } else {
620 __iowrite64_copy(db, tx_push_buf, push_len);
621 }
622
623 goto tx_done;
624 }
625
626 normal_tx:
627 if (length < BNXT_MIN_PKT_SIZE) {
628 pad = BNXT_MIN_PKT_SIZE - length;
629 if (skb_pad(skb, pad))
630 /* SKB already freed. */
631 goto tx_kick_pending;
632 length = BNXT_MIN_PKT_SIZE;
633 }
634
635 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
636
637 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
638 goto tx_free;
639
640 dma_unmap_addr_set(tx_buf, mapping, mapping);
641 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
642 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
643
644 txbd->tx_bd_haddr = cpu_to_le64(mapping);
645 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
646
647 prod = NEXT_TX(prod);
648 txbd1 = (struct tx_bd_ext *)
649 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
650
651 txbd1->tx_bd_hsize_lflags = lflags;
652 if (skb_is_gso(skb)) {
653 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
654 u32 hdr_len;
655
656 if (skb->encapsulation) {
657 if (udp_gso)
658 hdr_len = skb_inner_transport_offset(skb) +
659 sizeof(struct udphdr);
660 else
661 hdr_len = skb_inner_tcp_all_headers(skb);
662 } else if (udp_gso) {
663 hdr_len = skb_transport_offset(skb) +
664 sizeof(struct udphdr);
665 } else {
666 hdr_len = skb_tcp_all_headers(skb);
667 }
668
669 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
670 TX_BD_FLAGS_T_IPID |
671 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
672 length = skb_shinfo(skb)->gso_size;
673 txbd1->tx_bd_mss = cpu_to_le32(length);
674 length += hdr_len;
675 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
676 txbd1->tx_bd_hsize_lflags |=
677 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
678 txbd1->tx_bd_mss = 0;
679 }
680
681 length >>= 9;
682 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
683 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
684 skb->len);
685 i = 0;
686 goto tx_dma_error;
687 }
688 flags |= bnxt_lhint_arr[length];
689 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
690
691 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
692 txbd1->tx_bd_cfa_action =
693 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
694 txbd0 = txbd;
695 for (i = 0; i < last_frag; i++) {
696 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
697
698 prod = NEXT_TX(prod);
699 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
700
701 len = skb_frag_size(frag);
702 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
703 DMA_TO_DEVICE);
704
705 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
706 goto tx_dma_error;
707
708 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
709 dma_unmap_addr_set(tx_buf, mapping, mapping);
710
711 txbd->tx_bd_haddr = cpu_to_le64(mapping);
712
713 flags = len << TX_BD_LEN_SHIFT;
714 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
715 }
716
717 flags &= ~TX_BD_LEN;
718 txbd->tx_bd_len_flags_type =
719 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
720 TX_BD_FLAGS_PACKET_END);
721
722 netdev_tx_sent_queue(txq, skb->len);
723
724 skb_tx_timestamp(skb);
725
726 prod = NEXT_TX(prod);
727 WRITE_ONCE(txr->tx_prod, prod);
728
729 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
730 bnxt_txr_db_kick(bp, txr, prod);
731 } else {
732 if (free_size >= bp->tx_wake_thresh)
733 txbd0->tx_bd_len_flags_type |=
734 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
735 txr->kick_pending = 1;
736 }
737
738 tx_done:
739
740 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
741 if (netdev_xmit_more() && !tx_buf->is_push) {
742 txbd0->tx_bd_len_flags_type &=
743 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
744 bnxt_txr_db_kick(bp, txr, prod);
745 }
746
747 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
748 bp->tx_wake_thresh);
749 }
750 return NETDEV_TX_OK;
751
752 tx_dma_error:
753 last_frag = i;
754
755 /* start back at beginning and unmap skb */
756 prod = txr->tx_prod;
757 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
758 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
759 skb_headlen(skb), DMA_TO_DEVICE);
760 prod = NEXT_TX(prod);
761
762 /* unmap remaining mapped pages */
763 for (i = 0; i < last_frag; i++) {
764 prod = NEXT_TX(prod);
765 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
766 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
767 skb_frag_size(&skb_shinfo(skb)->frags[i]),
768 DMA_TO_DEVICE);
769 }
770
771 tx_free:
772 dev_kfree_skb_any(skb);
773 tx_kick_pending:
774 if (BNXT_TX_PTP_IS_SET(lflags)) {
775 txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
776 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
777 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
778 /* set SKB to err so PTP worker will clean up */
779 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
780 }
781 if (txr->kick_pending)
782 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
783 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
784 dev_core_stats_tx_dropped_inc(dev);
785 return NETDEV_TX_OK;
786 }
787
788 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)789 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
790 int budget)
791 {
792 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
793 struct pci_dev *pdev = bp->pdev;
794 u16 hw_cons = txr->tx_hw_cons;
795 unsigned int tx_bytes = 0;
796 u16 cons = txr->tx_cons;
797 int tx_pkts = 0;
798 bool rc = false;
799
800 while (RING_TX(bp, cons) != hw_cons) {
801 struct bnxt_sw_tx_bd *tx_buf;
802 struct sk_buff *skb;
803 bool is_ts_pkt;
804 int j, last;
805
806 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
807 skb = tx_buf->skb;
808
809 if (unlikely(!skb)) {
810 bnxt_sched_reset_txr(bp, txr, cons);
811 return rc;
812 }
813
814 is_ts_pkt = tx_buf->is_ts_pkt;
815 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
816 rc = true;
817 break;
818 }
819
820 cons = NEXT_TX(cons);
821 tx_pkts++;
822 tx_bytes += skb->len;
823 tx_buf->skb = NULL;
824 tx_buf->is_ts_pkt = 0;
825
826 if (tx_buf->is_push) {
827 tx_buf->is_push = 0;
828 goto next_tx_int;
829 }
830
831 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
832 skb_headlen(skb), DMA_TO_DEVICE);
833 last = tx_buf->nr_frags;
834
835 for (j = 0; j < last; j++) {
836 cons = NEXT_TX(cons);
837 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
838 dma_unmap_page(
839 &pdev->dev,
840 dma_unmap_addr(tx_buf, mapping),
841 skb_frag_size(&skb_shinfo(skb)->frags[j]),
842 DMA_TO_DEVICE);
843 }
844 if (unlikely(is_ts_pkt)) {
845 if (BNXT_CHIP_P5(bp)) {
846 /* PTP worker takes ownership of the skb */
847 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
848 skb = NULL;
849 }
850 }
851
852 next_tx_int:
853 cons = NEXT_TX(cons);
854
855 dev_consume_skb_any(skb);
856 }
857
858 WRITE_ONCE(txr->tx_cons, cons);
859
860 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
861 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
862 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
863
864 return rc;
865 }
866
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)867 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
868 {
869 struct bnxt_tx_ring_info *txr;
870 bool more = false;
871 int i;
872
873 bnxt_for_each_napi_tx(i, bnapi, txr) {
874 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
875 more |= __bnxt_tx_int(bp, txr, budget);
876 }
877 if (!more)
878 bnapi->events &= ~BNXT_TX_CMP_EVENT;
879 }
880
bnxt_separate_head_pool(void)881 static bool bnxt_separate_head_pool(void)
882 {
883 return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
884 }
885
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)886 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
887 struct bnxt_rx_ring_info *rxr,
888 unsigned int *offset,
889 gfp_t gfp)
890 {
891 struct page *page;
892
893 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
894 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
895 BNXT_RX_PAGE_SIZE);
896 } else {
897 page = page_pool_dev_alloc_pages(rxr->page_pool);
898 *offset = 0;
899 }
900 if (!page)
901 return NULL;
902
903 *mapping = page_pool_get_dma_addr(page) + *offset;
904 return page;
905 }
906
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)907 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
908 struct bnxt_rx_ring_info *rxr,
909 gfp_t gfp)
910 {
911 unsigned int offset;
912 struct page *page;
913
914 page = page_pool_alloc_frag(rxr->head_pool, &offset,
915 bp->rx_buf_size, gfp);
916 if (!page)
917 return NULL;
918
919 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
920 return page_address(page) + offset;
921 }
922
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)923 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
924 u16 prod, gfp_t gfp)
925 {
926 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
927 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
928 dma_addr_t mapping;
929
930 if (BNXT_RX_PAGE_MODE(bp)) {
931 unsigned int offset;
932 struct page *page =
933 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
934
935 if (!page)
936 return -ENOMEM;
937
938 mapping += bp->rx_dma_offset;
939 rx_buf->data = page;
940 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
941 } else {
942 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
943
944 if (!data)
945 return -ENOMEM;
946
947 rx_buf->data = data;
948 rx_buf->data_ptr = data + bp->rx_offset;
949 }
950 rx_buf->mapping = mapping;
951
952 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
953 return 0;
954 }
955
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)956 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
957 {
958 u16 prod = rxr->rx_prod;
959 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
960 struct bnxt *bp = rxr->bnapi->bp;
961 struct rx_bd *cons_bd, *prod_bd;
962
963 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
964 cons_rx_buf = &rxr->rx_buf_ring[cons];
965
966 prod_rx_buf->data = data;
967 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
968
969 prod_rx_buf->mapping = cons_rx_buf->mapping;
970
971 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
972 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
973
974 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
975 }
976
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)977 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
978 {
979 u16 next, max = rxr->rx_agg_bmap_size;
980
981 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
982 if (next >= max)
983 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
984 return next;
985 }
986
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)987 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
988 struct bnxt_rx_ring_info *rxr,
989 u16 prod, gfp_t gfp)
990 {
991 struct rx_bd *rxbd =
992 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
993 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
994 struct page *page;
995 dma_addr_t mapping;
996 u16 sw_prod = rxr->rx_sw_agg_prod;
997 unsigned int offset = 0;
998
999 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
1000
1001 if (!page)
1002 return -ENOMEM;
1003
1004 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1005 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1006
1007 __set_bit(sw_prod, rxr->rx_agg_bmap);
1008 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1009 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1010
1011 rx_agg_buf->page = page;
1012 rx_agg_buf->offset = offset;
1013 rx_agg_buf->mapping = mapping;
1014 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1015 rxbd->rx_bd_opaque = sw_prod;
1016 return 0;
1017 }
1018
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1019 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1020 struct bnxt_cp_ring_info *cpr,
1021 u16 cp_cons, u16 curr)
1022 {
1023 struct rx_agg_cmp *agg;
1024
1025 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1026 agg = (struct rx_agg_cmp *)
1027 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1028 return agg;
1029 }
1030
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1031 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1032 struct bnxt_rx_ring_info *rxr,
1033 u16 agg_id, u16 curr)
1034 {
1035 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1036
1037 return &tpa_info->agg_arr[curr];
1038 }
1039
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1040 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1041 u16 start, u32 agg_bufs, bool tpa)
1042 {
1043 struct bnxt_napi *bnapi = cpr->bnapi;
1044 struct bnxt *bp = bnapi->bp;
1045 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1046 u16 prod = rxr->rx_agg_prod;
1047 u16 sw_prod = rxr->rx_sw_agg_prod;
1048 bool p5_tpa = false;
1049 u32 i;
1050
1051 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1052 p5_tpa = true;
1053
1054 for (i = 0; i < agg_bufs; i++) {
1055 u16 cons;
1056 struct rx_agg_cmp *agg;
1057 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1058 struct rx_bd *prod_bd;
1059 struct page *page;
1060
1061 if (p5_tpa)
1062 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1063 else
1064 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1065 cons = agg->rx_agg_cmp_opaque;
1066 __clear_bit(cons, rxr->rx_agg_bmap);
1067
1068 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1069 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1070
1071 __set_bit(sw_prod, rxr->rx_agg_bmap);
1072 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1073 cons_rx_buf = &rxr->rx_agg_ring[cons];
1074
1075 /* It is possible for sw_prod to be equal to cons, so
1076 * set cons_rx_buf->page to NULL first.
1077 */
1078 page = cons_rx_buf->page;
1079 cons_rx_buf->page = NULL;
1080 prod_rx_buf->page = page;
1081 prod_rx_buf->offset = cons_rx_buf->offset;
1082
1083 prod_rx_buf->mapping = cons_rx_buf->mapping;
1084
1085 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1086
1087 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1088 prod_bd->rx_bd_opaque = sw_prod;
1089
1090 prod = NEXT_RX_AGG(prod);
1091 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1092 }
1093 rxr->rx_agg_prod = prod;
1094 rxr->rx_sw_agg_prod = sw_prod;
1095 }
1096
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1097 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1098 struct bnxt_rx_ring_info *rxr,
1099 u16 cons, void *data, u8 *data_ptr,
1100 dma_addr_t dma_addr,
1101 unsigned int offset_and_len)
1102 {
1103 unsigned int len = offset_and_len & 0xffff;
1104 struct page *page = data;
1105 u16 prod = rxr->rx_prod;
1106 struct sk_buff *skb;
1107 int err;
1108
1109 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1110 if (unlikely(err)) {
1111 bnxt_reuse_rx_data(rxr, cons, data);
1112 return NULL;
1113 }
1114 dma_addr -= bp->rx_dma_offset;
1115 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1116 bp->rx_dir);
1117 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1118 if (!skb) {
1119 page_pool_recycle_direct(rxr->page_pool, page);
1120 return NULL;
1121 }
1122 skb_mark_for_recycle(skb);
1123 skb_reserve(skb, bp->rx_offset);
1124 __skb_put(skb, len);
1125
1126 return skb;
1127 }
1128
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1129 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1130 struct bnxt_rx_ring_info *rxr,
1131 u16 cons, void *data, u8 *data_ptr,
1132 dma_addr_t dma_addr,
1133 unsigned int offset_and_len)
1134 {
1135 unsigned int payload = offset_and_len >> 16;
1136 unsigned int len = offset_and_len & 0xffff;
1137 skb_frag_t *frag;
1138 struct page *page = data;
1139 u16 prod = rxr->rx_prod;
1140 struct sk_buff *skb;
1141 int off, err;
1142
1143 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1144 if (unlikely(err)) {
1145 bnxt_reuse_rx_data(rxr, cons, data);
1146 return NULL;
1147 }
1148 dma_addr -= bp->rx_dma_offset;
1149 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1150 bp->rx_dir);
1151
1152 if (unlikely(!payload))
1153 payload = eth_get_headlen(bp->dev, data_ptr, len);
1154
1155 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1156 if (!skb) {
1157 page_pool_recycle_direct(rxr->page_pool, page);
1158 return NULL;
1159 }
1160
1161 skb_mark_for_recycle(skb);
1162 off = (void *)data_ptr - page_address(page);
1163 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1164 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1165 payload + NET_IP_ALIGN);
1166
1167 frag = &skb_shinfo(skb)->frags[0];
1168 skb_frag_size_sub(frag, payload);
1169 skb_frag_off_add(frag, payload);
1170 skb->data_len -= payload;
1171 skb->tail += payload;
1172
1173 return skb;
1174 }
1175
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1176 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1177 struct bnxt_rx_ring_info *rxr, u16 cons,
1178 void *data, u8 *data_ptr,
1179 dma_addr_t dma_addr,
1180 unsigned int offset_and_len)
1181 {
1182 u16 prod = rxr->rx_prod;
1183 struct sk_buff *skb;
1184 int err;
1185
1186 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1187 if (unlikely(err)) {
1188 bnxt_reuse_rx_data(rxr, cons, data);
1189 return NULL;
1190 }
1191
1192 skb = napi_build_skb(data, bp->rx_buf_size);
1193 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1194 bp->rx_dir);
1195 if (!skb) {
1196 page_pool_free_va(rxr->head_pool, data, true);
1197 return NULL;
1198 }
1199
1200 skb_mark_for_recycle(skb);
1201 skb_reserve(skb, bp->rx_offset);
1202 skb_put(skb, offset_and_len & 0xffff);
1203 return skb;
1204 }
1205
__bnxt_rx_agg_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct skb_shared_info * shinfo,u16 idx,u32 agg_bufs,bool tpa,struct xdp_buff * xdp)1206 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1207 struct bnxt_cp_ring_info *cpr,
1208 struct skb_shared_info *shinfo,
1209 u16 idx, u32 agg_bufs, bool tpa,
1210 struct xdp_buff *xdp)
1211 {
1212 struct bnxt_napi *bnapi = cpr->bnapi;
1213 struct pci_dev *pdev = bp->pdev;
1214 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1215 u16 prod = rxr->rx_agg_prod;
1216 u32 i, total_frag_len = 0;
1217 bool p5_tpa = false;
1218
1219 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1220 p5_tpa = true;
1221
1222 for (i = 0; i < agg_bufs; i++) {
1223 skb_frag_t *frag = &shinfo->frags[i];
1224 u16 cons, frag_len;
1225 struct rx_agg_cmp *agg;
1226 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1227 struct page *page;
1228 dma_addr_t mapping;
1229
1230 if (p5_tpa)
1231 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1232 else
1233 agg = bnxt_get_agg(bp, cpr, idx, i);
1234 cons = agg->rx_agg_cmp_opaque;
1235 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1236 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1237
1238 cons_rx_buf = &rxr->rx_agg_ring[cons];
1239 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1240 cons_rx_buf->offset, frag_len);
1241 shinfo->nr_frags = i + 1;
1242 __clear_bit(cons, rxr->rx_agg_bmap);
1243
1244 /* It is possible for bnxt_alloc_rx_page() to allocate
1245 * a sw_prod index that equals the cons index, so we
1246 * need to clear the cons entry now.
1247 */
1248 mapping = cons_rx_buf->mapping;
1249 page = cons_rx_buf->page;
1250 cons_rx_buf->page = NULL;
1251
1252 if (xdp && page_is_pfmemalloc(page))
1253 xdp_buff_set_frag_pfmemalloc(xdp);
1254
1255 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1256 --shinfo->nr_frags;
1257 cons_rx_buf->page = page;
1258
1259 /* Update prod since possibly some pages have been
1260 * allocated already.
1261 */
1262 rxr->rx_agg_prod = prod;
1263 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1264 return 0;
1265 }
1266
1267 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1268 bp->rx_dir);
1269
1270 total_frag_len += frag_len;
1271 prod = NEXT_RX_AGG(prod);
1272 }
1273 rxr->rx_agg_prod = prod;
1274 return total_frag_len;
1275 }
1276
bnxt_rx_agg_pages_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1277 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1278 struct bnxt_cp_ring_info *cpr,
1279 struct sk_buff *skb, u16 idx,
1280 u32 agg_bufs, bool tpa)
1281 {
1282 struct skb_shared_info *shinfo = skb_shinfo(skb);
1283 u32 total_frag_len = 0;
1284
1285 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1286 agg_bufs, tpa, NULL);
1287 if (!total_frag_len) {
1288 skb_mark_for_recycle(skb);
1289 dev_kfree_skb(skb);
1290 return NULL;
1291 }
1292
1293 skb->data_len += total_frag_len;
1294 skb->len += total_frag_len;
1295 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1296 return skb;
1297 }
1298
bnxt_rx_agg_pages_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1299 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1300 struct bnxt_cp_ring_info *cpr,
1301 struct xdp_buff *xdp, u16 idx,
1302 u32 agg_bufs, bool tpa)
1303 {
1304 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1305 u32 total_frag_len = 0;
1306
1307 if (!xdp_buff_has_frags(xdp))
1308 shinfo->nr_frags = 0;
1309
1310 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1311 idx, agg_bufs, tpa, xdp);
1312 if (total_frag_len) {
1313 xdp_buff_set_frags_flag(xdp);
1314 shinfo->nr_frags = agg_bufs;
1315 shinfo->xdp_frags_size = total_frag_len;
1316 }
1317 return total_frag_len;
1318 }
1319
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1320 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1321 u8 agg_bufs, u32 *raw_cons)
1322 {
1323 u16 last;
1324 struct rx_agg_cmp *agg;
1325
1326 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1327 last = RING_CMP(*raw_cons);
1328 agg = (struct rx_agg_cmp *)
1329 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1330 return RX_AGG_CMP_VALID(agg, *raw_cons);
1331 }
1332
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1333 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1334 unsigned int len,
1335 dma_addr_t mapping)
1336 {
1337 struct bnxt *bp = bnapi->bp;
1338 struct pci_dev *pdev = bp->pdev;
1339 struct sk_buff *skb;
1340
1341 skb = napi_alloc_skb(&bnapi->napi, len);
1342 if (!skb)
1343 return NULL;
1344
1345 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1346 bp->rx_dir);
1347
1348 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1349 len + NET_IP_ALIGN);
1350
1351 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1352 bp->rx_dir);
1353
1354 skb_put(skb, len);
1355
1356 return skb;
1357 }
1358
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1359 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1360 unsigned int len,
1361 dma_addr_t mapping)
1362 {
1363 return bnxt_copy_data(bnapi, data, len, mapping);
1364 }
1365
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1366 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1367 struct xdp_buff *xdp,
1368 unsigned int len,
1369 dma_addr_t mapping)
1370 {
1371 unsigned int metasize = 0;
1372 u8 *data = xdp->data;
1373 struct sk_buff *skb;
1374
1375 len = xdp->data_end - xdp->data_meta;
1376 metasize = xdp->data - xdp->data_meta;
1377 data = xdp->data_meta;
1378
1379 skb = bnxt_copy_data(bnapi, data, len, mapping);
1380 if (!skb)
1381 return skb;
1382
1383 if (metasize) {
1384 skb_metadata_set(skb, metasize);
1385 __skb_pull(skb, metasize);
1386 }
1387
1388 return skb;
1389 }
1390
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1391 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1392 u32 *raw_cons, void *cmp)
1393 {
1394 struct rx_cmp *rxcmp = cmp;
1395 u32 tmp_raw_cons = *raw_cons;
1396 u8 cmp_type, agg_bufs = 0;
1397
1398 cmp_type = RX_CMP_TYPE(rxcmp);
1399
1400 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1401 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1402 RX_CMP_AGG_BUFS) >>
1403 RX_CMP_AGG_BUFS_SHIFT;
1404 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1405 struct rx_tpa_end_cmp *tpa_end = cmp;
1406
1407 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1408 return 0;
1409
1410 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1411 }
1412
1413 if (agg_bufs) {
1414 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1415 return -EBUSY;
1416 }
1417 *raw_cons = tmp_raw_cons;
1418 return 0;
1419 }
1420
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1421 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1422 {
1423 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1424 u16 idx = agg_id & MAX_TPA_P5_MASK;
1425
1426 if (test_bit(idx, map->agg_idx_bmap))
1427 idx = find_first_zero_bit(map->agg_idx_bmap,
1428 BNXT_AGG_IDX_BMAP_SIZE);
1429 __set_bit(idx, map->agg_idx_bmap);
1430 map->agg_id_tbl[agg_id] = idx;
1431 return idx;
1432 }
1433
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1434 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1435 {
1436 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1437
1438 __clear_bit(idx, map->agg_idx_bmap);
1439 }
1440
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1441 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1442 {
1443 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1444
1445 return map->agg_id_tbl[agg_id];
1446 }
1447
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1448 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1449 struct rx_tpa_start_cmp *tpa_start,
1450 struct rx_tpa_start_cmp_ext *tpa_start1)
1451 {
1452 tpa_info->cfa_code_valid = 1;
1453 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1454 tpa_info->vlan_valid = 0;
1455 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1456 tpa_info->vlan_valid = 1;
1457 tpa_info->metadata =
1458 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1459 }
1460 }
1461
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1462 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1463 struct rx_tpa_start_cmp *tpa_start,
1464 struct rx_tpa_start_cmp_ext *tpa_start1)
1465 {
1466 tpa_info->vlan_valid = 0;
1467 if (TPA_START_VLAN_VALID(tpa_start)) {
1468 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1469 u32 vlan_proto = ETH_P_8021Q;
1470
1471 tpa_info->vlan_valid = 1;
1472 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1473 vlan_proto = ETH_P_8021AD;
1474 tpa_info->metadata = vlan_proto << 16 |
1475 TPA_START_METADATA0_TCI(tpa_start1);
1476 }
1477 }
1478
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1479 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1480 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1481 struct rx_tpa_start_cmp_ext *tpa_start1)
1482 {
1483 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1484 struct bnxt_tpa_info *tpa_info;
1485 u16 cons, prod, agg_id;
1486 struct rx_bd *prod_bd;
1487 dma_addr_t mapping;
1488
1489 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1490 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1491 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1492 } else {
1493 agg_id = TPA_START_AGG_ID(tpa_start);
1494 }
1495 cons = tpa_start->rx_tpa_start_cmp_opaque;
1496 prod = rxr->rx_prod;
1497 cons_rx_buf = &rxr->rx_buf_ring[cons];
1498 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1499 tpa_info = &rxr->rx_tpa[agg_id];
1500
1501 if (unlikely(cons != rxr->rx_next_cons ||
1502 TPA_START_ERROR(tpa_start))) {
1503 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1504 cons, rxr->rx_next_cons,
1505 TPA_START_ERROR_CODE(tpa_start1));
1506 bnxt_sched_reset_rxr(bp, rxr);
1507 return;
1508 }
1509 prod_rx_buf->data = tpa_info->data;
1510 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1511
1512 mapping = tpa_info->mapping;
1513 prod_rx_buf->mapping = mapping;
1514
1515 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1516
1517 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1518
1519 tpa_info->data = cons_rx_buf->data;
1520 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1521 cons_rx_buf->data = NULL;
1522 tpa_info->mapping = cons_rx_buf->mapping;
1523
1524 tpa_info->len =
1525 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1526 RX_TPA_START_CMP_LEN_SHIFT;
1527 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1528 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1529 tpa_info->gso_type = SKB_GSO_TCPV4;
1530 if (TPA_START_IS_IPV6(tpa_start1))
1531 tpa_info->gso_type = SKB_GSO_TCPV6;
1532 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1533 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1534 TPA_START_HASH_TYPE(tpa_start) == 3)
1535 tpa_info->gso_type = SKB_GSO_TCPV6;
1536 tpa_info->rss_hash =
1537 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1538 } else {
1539 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1540 tpa_info->gso_type = 0;
1541 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1542 }
1543 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1544 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1545 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1546 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1547 else
1548 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1549 tpa_info->agg_count = 0;
1550
1551 rxr->rx_prod = NEXT_RX(prod);
1552 cons = RING_RX(bp, NEXT_RX(cons));
1553 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1554 cons_rx_buf = &rxr->rx_buf_ring[cons];
1555
1556 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1557 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1558 cons_rx_buf->data = NULL;
1559 }
1560
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1561 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1562 {
1563 if (agg_bufs)
1564 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1565 }
1566
1567 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1568 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1569 {
1570 struct udphdr *uh = NULL;
1571
1572 if (ip_proto == htons(ETH_P_IP)) {
1573 struct iphdr *iph = (struct iphdr *)skb->data;
1574
1575 if (iph->protocol == IPPROTO_UDP)
1576 uh = (struct udphdr *)(iph + 1);
1577 } else {
1578 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1579
1580 if (iph->nexthdr == IPPROTO_UDP)
1581 uh = (struct udphdr *)(iph + 1);
1582 }
1583 if (uh) {
1584 if (uh->check)
1585 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1586 else
1587 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1588 }
1589 }
1590 #endif
1591
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1592 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1593 int payload_off, int tcp_ts,
1594 struct sk_buff *skb)
1595 {
1596 #ifdef CONFIG_INET
1597 struct tcphdr *th;
1598 int len, nw_off;
1599 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1600 u32 hdr_info = tpa_info->hdr_info;
1601 bool loopback = false;
1602
1603 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1604 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1605 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1606
1607 /* If the packet is an internal loopback packet, the offsets will
1608 * have an extra 4 bytes.
1609 */
1610 if (inner_mac_off == 4) {
1611 loopback = true;
1612 } else if (inner_mac_off > 4) {
1613 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1614 ETH_HLEN - 2));
1615
1616 /* We only support inner iPv4/ipv6. If we don't see the
1617 * correct protocol ID, it must be a loopback packet where
1618 * the offsets are off by 4.
1619 */
1620 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1621 loopback = true;
1622 }
1623 if (loopback) {
1624 /* internal loopback packet, subtract all offsets by 4 */
1625 inner_ip_off -= 4;
1626 inner_mac_off -= 4;
1627 outer_ip_off -= 4;
1628 }
1629
1630 nw_off = inner_ip_off - ETH_HLEN;
1631 skb_set_network_header(skb, nw_off);
1632 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1633 struct ipv6hdr *iph = ipv6_hdr(skb);
1634
1635 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1636 len = skb->len - skb_transport_offset(skb);
1637 th = tcp_hdr(skb);
1638 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1639 } else {
1640 struct iphdr *iph = ip_hdr(skb);
1641
1642 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1643 len = skb->len - skb_transport_offset(skb);
1644 th = tcp_hdr(skb);
1645 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1646 }
1647
1648 if (inner_mac_off) { /* tunnel */
1649 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1650 ETH_HLEN - 2));
1651
1652 bnxt_gro_tunnel(skb, proto);
1653 }
1654 #endif
1655 return skb;
1656 }
1657
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1658 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1659 int payload_off, int tcp_ts,
1660 struct sk_buff *skb)
1661 {
1662 #ifdef CONFIG_INET
1663 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1664 u32 hdr_info = tpa_info->hdr_info;
1665 int iphdr_len, nw_off;
1666
1667 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1668 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1669 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1670
1671 nw_off = inner_ip_off - ETH_HLEN;
1672 skb_set_network_header(skb, nw_off);
1673 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1674 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1675 skb_set_transport_header(skb, nw_off + iphdr_len);
1676
1677 if (inner_mac_off) { /* tunnel */
1678 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1679 ETH_HLEN - 2));
1680
1681 bnxt_gro_tunnel(skb, proto);
1682 }
1683 #endif
1684 return skb;
1685 }
1686
1687 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1688 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1689
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1690 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1691 int payload_off, int tcp_ts,
1692 struct sk_buff *skb)
1693 {
1694 #ifdef CONFIG_INET
1695 struct tcphdr *th;
1696 int len, nw_off, tcp_opt_len = 0;
1697
1698 if (tcp_ts)
1699 tcp_opt_len = 12;
1700
1701 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1702 struct iphdr *iph;
1703
1704 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1705 ETH_HLEN;
1706 skb_set_network_header(skb, nw_off);
1707 iph = ip_hdr(skb);
1708 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1709 len = skb->len - skb_transport_offset(skb);
1710 th = tcp_hdr(skb);
1711 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1712 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1713 struct ipv6hdr *iph;
1714
1715 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1716 ETH_HLEN;
1717 skb_set_network_header(skb, nw_off);
1718 iph = ipv6_hdr(skb);
1719 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1720 len = skb->len - skb_transport_offset(skb);
1721 th = tcp_hdr(skb);
1722 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1723 } else {
1724 dev_kfree_skb_any(skb);
1725 return NULL;
1726 }
1727
1728 if (nw_off) /* tunnel */
1729 bnxt_gro_tunnel(skb, skb->protocol);
1730 #endif
1731 return skb;
1732 }
1733
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1734 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1735 struct bnxt_tpa_info *tpa_info,
1736 struct rx_tpa_end_cmp *tpa_end,
1737 struct rx_tpa_end_cmp_ext *tpa_end1,
1738 struct sk_buff *skb)
1739 {
1740 #ifdef CONFIG_INET
1741 int payload_off;
1742 u16 segs;
1743
1744 segs = TPA_END_TPA_SEGS(tpa_end);
1745 if (segs == 1)
1746 return skb;
1747
1748 NAPI_GRO_CB(skb)->count = segs;
1749 skb_shinfo(skb)->gso_size =
1750 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1751 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1752 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1753 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1754 else
1755 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1756 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1757 if (likely(skb))
1758 tcp_gro_complete(skb);
1759 #endif
1760 return skb;
1761 }
1762
1763 /* Given the cfa_code of a received packet determine which
1764 * netdev (vf-rep or PF) the packet is destined to.
1765 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1766 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1767 {
1768 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1769
1770 /* if vf-rep dev is NULL, the must belongs to the PF */
1771 return dev ? dev : bp->dev;
1772 }
1773
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1774 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1775 struct bnxt_cp_ring_info *cpr,
1776 u32 *raw_cons,
1777 struct rx_tpa_end_cmp *tpa_end,
1778 struct rx_tpa_end_cmp_ext *tpa_end1,
1779 u8 *event)
1780 {
1781 struct bnxt_napi *bnapi = cpr->bnapi;
1782 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1783 struct net_device *dev = bp->dev;
1784 u8 *data_ptr, agg_bufs;
1785 unsigned int len;
1786 struct bnxt_tpa_info *tpa_info;
1787 dma_addr_t mapping;
1788 struct sk_buff *skb;
1789 u16 idx = 0, agg_id;
1790 void *data;
1791 bool gro;
1792
1793 if (unlikely(bnapi->in_reset)) {
1794 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1795
1796 if (rc < 0)
1797 return ERR_PTR(-EBUSY);
1798 return NULL;
1799 }
1800
1801 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1802 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1803 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1804 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1805 tpa_info = &rxr->rx_tpa[agg_id];
1806 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1807 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1808 agg_bufs, tpa_info->agg_count);
1809 agg_bufs = tpa_info->agg_count;
1810 }
1811 tpa_info->agg_count = 0;
1812 *event |= BNXT_AGG_EVENT;
1813 bnxt_free_agg_idx(rxr, agg_id);
1814 idx = agg_id;
1815 gro = !!(bp->flags & BNXT_FLAG_GRO);
1816 } else {
1817 agg_id = TPA_END_AGG_ID(tpa_end);
1818 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1819 tpa_info = &rxr->rx_tpa[agg_id];
1820 idx = RING_CMP(*raw_cons);
1821 if (agg_bufs) {
1822 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1823 return ERR_PTR(-EBUSY);
1824
1825 *event |= BNXT_AGG_EVENT;
1826 idx = NEXT_CMP(idx);
1827 }
1828 gro = !!TPA_END_GRO(tpa_end);
1829 }
1830 data = tpa_info->data;
1831 data_ptr = tpa_info->data_ptr;
1832 prefetch(data_ptr);
1833 len = tpa_info->len;
1834 mapping = tpa_info->mapping;
1835
1836 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1837 bnxt_abort_tpa(cpr, idx, agg_bufs);
1838 if (agg_bufs > MAX_SKB_FRAGS)
1839 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1840 agg_bufs, (int)MAX_SKB_FRAGS);
1841 return NULL;
1842 }
1843
1844 if (len <= bp->rx_copybreak) {
1845 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1846 if (!skb) {
1847 bnxt_abort_tpa(cpr, idx, agg_bufs);
1848 cpr->sw_stats->rx.rx_oom_discards += 1;
1849 return NULL;
1850 }
1851 } else {
1852 u8 *new_data;
1853 dma_addr_t new_mapping;
1854
1855 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1856 GFP_ATOMIC);
1857 if (!new_data) {
1858 bnxt_abort_tpa(cpr, idx, agg_bufs);
1859 cpr->sw_stats->rx.rx_oom_discards += 1;
1860 return NULL;
1861 }
1862
1863 tpa_info->data = new_data;
1864 tpa_info->data_ptr = new_data + bp->rx_offset;
1865 tpa_info->mapping = new_mapping;
1866
1867 skb = napi_build_skb(data, bp->rx_buf_size);
1868 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1869 bp->rx_buf_use_size, bp->rx_dir);
1870
1871 if (!skb) {
1872 page_pool_free_va(rxr->head_pool, data, true);
1873 bnxt_abort_tpa(cpr, idx, agg_bufs);
1874 cpr->sw_stats->rx.rx_oom_discards += 1;
1875 return NULL;
1876 }
1877 skb_mark_for_recycle(skb);
1878 skb_reserve(skb, bp->rx_offset);
1879 skb_put(skb, len);
1880 }
1881
1882 if (agg_bufs) {
1883 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1884 if (!skb) {
1885 /* Page reuse already handled by bnxt_rx_pages(). */
1886 cpr->sw_stats->rx.rx_oom_discards += 1;
1887 return NULL;
1888 }
1889 }
1890
1891 if (tpa_info->cfa_code_valid)
1892 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1893 skb->protocol = eth_type_trans(skb, dev);
1894
1895 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1896 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1897
1898 if (tpa_info->vlan_valid &&
1899 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1900 __be16 vlan_proto = htons(tpa_info->metadata >>
1901 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1902 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1903
1904 if (eth_type_vlan(vlan_proto)) {
1905 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1906 } else {
1907 dev_kfree_skb(skb);
1908 return NULL;
1909 }
1910 }
1911
1912 skb_checksum_none_assert(skb);
1913 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1914 skb->ip_summed = CHECKSUM_UNNECESSARY;
1915 skb->csum_level =
1916 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1917 }
1918
1919 if (gro)
1920 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1921
1922 return skb;
1923 }
1924
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1925 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1926 struct rx_agg_cmp *rx_agg)
1927 {
1928 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1929 struct bnxt_tpa_info *tpa_info;
1930
1931 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1932 tpa_info = &rxr->rx_tpa[agg_id];
1933 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1934 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1935 }
1936
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1937 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1938 struct sk_buff *skb)
1939 {
1940 skb_mark_for_recycle(skb);
1941
1942 if (skb->dev != bp->dev) {
1943 /* this packet belongs to a vf-rep */
1944 bnxt_vf_rep_rx(bp, skb);
1945 return;
1946 }
1947 skb_record_rx_queue(skb, bnapi->index);
1948 napi_gro_receive(&bnapi->napi, skb);
1949 }
1950
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)1951 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1952 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1953 {
1954 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1955
1956 if (BNXT_PTP_RX_TS_VALID(flags))
1957 goto ts_valid;
1958 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1959 return false;
1960
1961 ts_valid:
1962 *cmpl_ts = ts;
1963 return true;
1964 }
1965
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)1966 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1967 struct rx_cmp *rxcmp,
1968 struct rx_cmp_ext *rxcmp1)
1969 {
1970 __be16 vlan_proto;
1971 u16 vtag;
1972
1973 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1974 __le32 flags2 = rxcmp1->rx_cmp_flags2;
1975 u32 meta_data;
1976
1977 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1978 return skb;
1979
1980 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1981 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1982 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1983 if (eth_type_vlan(vlan_proto))
1984 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1985 else
1986 goto vlan_err;
1987 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1988 if (RX_CMP_VLAN_VALID(rxcmp)) {
1989 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1990
1991 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1992 vlan_proto = htons(ETH_P_8021Q);
1993 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1994 vlan_proto = htons(ETH_P_8021AD);
1995 else
1996 goto vlan_err;
1997 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1998 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1999 }
2000 }
2001 return skb;
2002 vlan_err:
2003 dev_kfree_skb(skb);
2004 return NULL;
2005 }
2006
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2007 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2008 struct rx_cmp *rxcmp)
2009 {
2010 u8 ext_op;
2011
2012 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2013 switch (ext_op) {
2014 case EXT_OP_INNER_4:
2015 case EXT_OP_OUTER_4:
2016 case EXT_OP_INNFL_3:
2017 case EXT_OP_OUTFL_3:
2018 return PKT_HASH_TYPE_L4;
2019 default:
2020 return PKT_HASH_TYPE_L3;
2021 }
2022 }
2023
2024 /* returns the following:
2025 * 1 - 1 packet successfully received
2026 * 0 - successful TPA_START, packet not completed yet
2027 * -EBUSY - completion ring does not have all the agg buffers yet
2028 * -ENOMEM - packet aborted due to out of memory
2029 * -EIO - packet aborted due to hw error indicated in BD
2030 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2031 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2032 u32 *raw_cons, u8 *event)
2033 {
2034 struct bnxt_napi *bnapi = cpr->bnapi;
2035 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2036 struct net_device *dev = bp->dev;
2037 struct rx_cmp *rxcmp;
2038 struct rx_cmp_ext *rxcmp1;
2039 u32 tmp_raw_cons = *raw_cons;
2040 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2041 struct skb_shared_info *sinfo;
2042 struct bnxt_sw_rx_bd *rx_buf;
2043 unsigned int len;
2044 u8 *data_ptr, agg_bufs, cmp_type;
2045 bool xdp_active = false;
2046 dma_addr_t dma_addr;
2047 struct sk_buff *skb;
2048 struct xdp_buff xdp;
2049 u32 flags, misc;
2050 u32 cmpl_ts;
2051 void *data;
2052 int rc = 0;
2053
2054 rxcmp = (struct rx_cmp *)
2055 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2056
2057 cmp_type = RX_CMP_TYPE(rxcmp);
2058
2059 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2060 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2061 goto next_rx_no_prod_no_len;
2062 }
2063
2064 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2065 cp_cons = RING_CMP(tmp_raw_cons);
2066 rxcmp1 = (struct rx_cmp_ext *)
2067 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2068
2069 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2070 return -EBUSY;
2071
2072 /* The valid test of the entry must be done first before
2073 * reading any further.
2074 */
2075 dma_rmb();
2076 prod = rxr->rx_prod;
2077
2078 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2079 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2080 bnxt_tpa_start(bp, rxr, cmp_type,
2081 (struct rx_tpa_start_cmp *)rxcmp,
2082 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2083
2084 *event |= BNXT_RX_EVENT;
2085 goto next_rx_no_prod_no_len;
2086
2087 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2088 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2089 (struct rx_tpa_end_cmp *)rxcmp,
2090 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2091
2092 if (IS_ERR(skb))
2093 return -EBUSY;
2094
2095 rc = -ENOMEM;
2096 if (likely(skb)) {
2097 bnxt_deliver_skb(bp, bnapi, skb);
2098 rc = 1;
2099 }
2100 *event |= BNXT_RX_EVENT;
2101 goto next_rx_no_prod_no_len;
2102 }
2103
2104 cons = rxcmp->rx_cmp_opaque;
2105 if (unlikely(cons != rxr->rx_next_cons)) {
2106 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2107
2108 /* 0xffff is forced error, don't print it */
2109 if (rxr->rx_next_cons != 0xffff)
2110 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2111 cons, rxr->rx_next_cons);
2112 bnxt_sched_reset_rxr(bp, rxr);
2113 if (rc1)
2114 return rc1;
2115 goto next_rx_no_prod_no_len;
2116 }
2117 rx_buf = &rxr->rx_buf_ring[cons];
2118 data = rx_buf->data;
2119 data_ptr = rx_buf->data_ptr;
2120 prefetch(data_ptr);
2121
2122 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2123 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2124
2125 if (agg_bufs) {
2126 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2127 return -EBUSY;
2128
2129 cp_cons = NEXT_CMP(cp_cons);
2130 *event |= BNXT_AGG_EVENT;
2131 }
2132 *event |= BNXT_RX_EVENT;
2133
2134 rx_buf->data = NULL;
2135 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2136 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2137
2138 bnxt_reuse_rx_data(rxr, cons, data);
2139 if (agg_bufs)
2140 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2141 false);
2142
2143 rc = -EIO;
2144 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2145 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2146 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2147 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2148 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2149 rx_err);
2150 bnxt_sched_reset_rxr(bp, rxr);
2151 }
2152 }
2153 goto next_rx_no_len;
2154 }
2155
2156 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2157 len = flags >> RX_CMP_LEN_SHIFT;
2158 dma_addr = rx_buf->mapping;
2159
2160 if (bnxt_xdp_attached(bp, rxr)) {
2161 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2162 if (agg_bufs) {
2163 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2164 cp_cons, agg_bufs,
2165 false);
2166 if (!frag_len)
2167 goto oom_next_rx;
2168
2169 }
2170 xdp_active = true;
2171 }
2172
2173 if (xdp_active) {
2174 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2175 rc = 1;
2176 goto next_rx;
2177 }
2178 if (xdp_buff_has_frags(&xdp)) {
2179 sinfo = xdp_get_shared_info_from_buff(&xdp);
2180 agg_bufs = sinfo->nr_frags;
2181 } else {
2182 agg_bufs = 0;
2183 }
2184 }
2185
2186 if (len <= bp->rx_copybreak) {
2187 if (!xdp_active)
2188 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2189 else
2190 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2191 bnxt_reuse_rx_data(rxr, cons, data);
2192 if (!skb) {
2193 if (agg_bufs) {
2194 if (!xdp_active)
2195 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2196 agg_bufs, false);
2197 else
2198 bnxt_xdp_buff_frags_free(rxr, &xdp);
2199 }
2200 goto oom_next_rx;
2201 }
2202 } else {
2203 u32 payload;
2204
2205 if (rx_buf->data_ptr == data_ptr)
2206 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2207 else
2208 payload = 0;
2209 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2210 payload | len);
2211 if (!skb)
2212 goto oom_next_rx;
2213 }
2214
2215 if (agg_bufs) {
2216 if (!xdp_active) {
2217 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2218 if (!skb)
2219 goto oom_next_rx;
2220 } else {
2221 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2222 rxr->page_pool, &xdp);
2223 if (!skb) {
2224 /* we should be able to free the old skb here */
2225 bnxt_xdp_buff_frags_free(rxr, &xdp);
2226 goto oom_next_rx;
2227 }
2228 }
2229 }
2230
2231 if (RX_CMP_HASH_VALID(rxcmp)) {
2232 enum pkt_hash_types type;
2233
2234 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2235 type = bnxt_rss_ext_op(bp, rxcmp);
2236 } else {
2237 u32 itypes = RX_CMP_ITYPES(rxcmp);
2238
2239 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2240 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2241 type = PKT_HASH_TYPE_L4;
2242 else
2243 type = PKT_HASH_TYPE_L3;
2244 }
2245 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2246 }
2247
2248 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2249 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2250 skb->protocol = eth_type_trans(skb, dev);
2251
2252 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2253 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2254 if (!skb)
2255 goto next_rx;
2256 }
2257
2258 skb_checksum_none_assert(skb);
2259 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2260 if (dev->features & NETIF_F_RXCSUM) {
2261 skb->ip_summed = CHECKSUM_UNNECESSARY;
2262 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2263 }
2264 } else {
2265 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2266 if (dev->features & NETIF_F_RXCSUM)
2267 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2268 }
2269 }
2270
2271 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2272 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2273 u64 ns, ts;
2274
2275 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2276 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2277
2278 ns = bnxt_timecounter_cyc2time(ptp, ts);
2279 memset(skb_hwtstamps(skb), 0,
2280 sizeof(*skb_hwtstamps(skb)));
2281 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2282 }
2283 }
2284 }
2285 bnxt_deliver_skb(bp, bnapi, skb);
2286 rc = 1;
2287
2288 next_rx:
2289 cpr->rx_packets += 1;
2290 cpr->rx_bytes += len;
2291
2292 next_rx_no_len:
2293 rxr->rx_prod = NEXT_RX(prod);
2294 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2295
2296 next_rx_no_prod_no_len:
2297 *raw_cons = tmp_raw_cons;
2298
2299 return rc;
2300
2301 oom_next_rx:
2302 cpr->sw_stats->rx.rx_oom_discards += 1;
2303 rc = -ENOMEM;
2304 goto next_rx;
2305 }
2306
2307 /* In netpoll mode, if we are using a combined completion ring, we need to
2308 * discard the rx packets and recycle the buffers.
2309 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2310 static int bnxt_force_rx_discard(struct bnxt *bp,
2311 struct bnxt_cp_ring_info *cpr,
2312 u32 *raw_cons, u8 *event)
2313 {
2314 u32 tmp_raw_cons = *raw_cons;
2315 struct rx_cmp_ext *rxcmp1;
2316 struct rx_cmp *rxcmp;
2317 u16 cp_cons;
2318 u8 cmp_type;
2319 int rc;
2320
2321 cp_cons = RING_CMP(tmp_raw_cons);
2322 rxcmp = (struct rx_cmp *)
2323 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2324
2325 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2326 cp_cons = RING_CMP(tmp_raw_cons);
2327 rxcmp1 = (struct rx_cmp_ext *)
2328 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2329
2330 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2331 return -EBUSY;
2332
2333 /* The valid test of the entry must be done first before
2334 * reading any further.
2335 */
2336 dma_rmb();
2337 cmp_type = RX_CMP_TYPE(rxcmp);
2338 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2339 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2340 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2341 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2342 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2343 struct rx_tpa_end_cmp_ext *tpa_end1;
2344
2345 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2346 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2347 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2348 }
2349 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2350 if (rc && rc != -EBUSY)
2351 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2352 return rc;
2353 }
2354
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2355 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2356 {
2357 struct bnxt_fw_health *fw_health = bp->fw_health;
2358 u32 reg = fw_health->regs[reg_idx];
2359 u32 reg_type, reg_off, val = 0;
2360
2361 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2362 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2363 switch (reg_type) {
2364 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2365 pci_read_config_dword(bp->pdev, reg_off, &val);
2366 break;
2367 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2368 reg_off = fw_health->mapped_regs[reg_idx];
2369 fallthrough;
2370 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2371 val = readl(bp->bar0 + reg_off);
2372 break;
2373 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2374 val = readl(bp->bar1 + reg_off);
2375 break;
2376 }
2377 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2378 val &= fw_health->fw_reset_inprog_reg_mask;
2379 return val;
2380 }
2381
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2382 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2383 {
2384 int i;
2385
2386 for (i = 0; i < bp->rx_nr_rings; i++) {
2387 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2388 struct bnxt_ring_grp_info *grp_info;
2389
2390 grp_info = &bp->grp_info[grp_idx];
2391 if (grp_info->agg_fw_ring_id == ring_id)
2392 return grp_idx;
2393 }
2394 return INVALID_HW_RING_ID;
2395 }
2396
bnxt_get_force_speed(struct bnxt_link_info * link_info)2397 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2398 {
2399 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2400
2401 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2402 return link_info->force_link_speed2;
2403 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2404 return link_info->force_pam4_link_speed;
2405 return link_info->force_link_speed;
2406 }
2407
bnxt_set_force_speed(struct bnxt_link_info * link_info)2408 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2409 {
2410 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2411
2412 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2413 link_info->req_link_speed = link_info->force_link_speed2;
2414 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2415 switch (link_info->req_link_speed) {
2416 case BNXT_LINK_SPEED_50GB_PAM4:
2417 case BNXT_LINK_SPEED_100GB_PAM4:
2418 case BNXT_LINK_SPEED_200GB_PAM4:
2419 case BNXT_LINK_SPEED_400GB_PAM4:
2420 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2421 break;
2422 case BNXT_LINK_SPEED_100GB_PAM4_112:
2423 case BNXT_LINK_SPEED_200GB_PAM4_112:
2424 case BNXT_LINK_SPEED_400GB_PAM4_112:
2425 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2426 break;
2427 default:
2428 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2429 }
2430 return;
2431 }
2432 link_info->req_link_speed = link_info->force_link_speed;
2433 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2434 if (link_info->force_pam4_link_speed) {
2435 link_info->req_link_speed = link_info->force_pam4_link_speed;
2436 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2437 }
2438 }
2439
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2440 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2441 {
2442 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2443
2444 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2445 link_info->advertising = link_info->auto_link_speeds2;
2446 return;
2447 }
2448 link_info->advertising = link_info->auto_link_speeds;
2449 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2450 }
2451
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2452 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2453 {
2454 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2455
2456 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2457 if (link_info->req_link_speed != link_info->force_link_speed2)
2458 return true;
2459 return false;
2460 }
2461 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2462 link_info->req_link_speed != link_info->force_link_speed)
2463 return true;
2464 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2465 link_info->req_link_speed != link_info->force_pam4_link_speed)
2466 return true;
2467 return false;
2468 }
2469
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2470 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2471 {
2472 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2473
2474 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2475 if (link_info->advertising != link_info->auto_link_speeds2)
2476 return true;
2477 return false;
2478 }
2479 if (link_info->advertising != link_info->auto_link_speeds ||
2480 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2481 return true;
2482 return false;
2483 }
2484
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2485 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2486 {
2487 u32 flags = bp->ctx->ctx_arr[type].flags;
2488
2489 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2490 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2491 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2492 }
2493
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2494 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2495 {
2496 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2497 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2498 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2499 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2500 struct bnxt_bs_trace_info *bs_trace;
2501 int last_pg;
2502
2503 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2504 return;
2505
2506 mem_size = ctxm->max_entries * ctxm->entry_size;
2507 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2508 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2509
2510 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2511 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2512
2513 rmem = &ctx_pg[0].ring_mem;
2514 bs_trace = &bp->bs_trace[trace_type];
2515 bs_trace->ctx_type = ctxm->type;
2516 bs_trace->trace_type = trace_type;
2517 if (pages > MAX_CTX_PAGES) {
2518 int last_pg_dir = rmem->nr_pages - 1;
2519
2520 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2521 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2522 } else {
2523 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2524 }
2525 bs_trace->magic_byte += magic_byte_offset;
2526 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2527 }
2528
2529 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2530 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2531 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2532
2533 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2534 (((data2) & \
2535 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2536 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2537
2538 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2539 ((data2) & \
2540 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2541
2542 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2543 (((data2) & \
2544 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2545 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2546
2547 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2548 ((data1) & \
2549 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2550
2551 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2552 (((data1) & \
2553 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2554 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2555
2556 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2557 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2558 {
2559 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2560
2561 switch (err_type) {
2562 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2563 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2564 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2565 break;
2566 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2567 netdev_warn(bp->dev, "Pause Storm detected!\n");
2568 break;
2569 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2570 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2571 break;
2572 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2573 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2574 char *threshold_type;
2575 bool notify = false;
2576 char *dir_str;
2577
2578 switch (type) {
2579 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2580 threshold_type = "warning";
2581 break;
2582 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2583 threshold_type = "critical";
2584 break;
2585 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2586 threshold_type = "fatal";
2587 break;
2588 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2589 threshold_type = "shutdown";
2590 break;
2591 default:
2592 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2593 return false;
2594 }
2595 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2596 dir_str = "above";
2597 notify = true;
2598 } else {
2599 dir_str = "below";
2600 }
2601 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2602 dir_str, threshold_type);
2603 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2604 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2605 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2606 if (notify) {
2607 bp->thermal_threshold_type = type;
2608 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2609 return true;
2610 }
2611 return false;
2612 }
2613 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2614 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2615 break;
2616 default:
2617 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2618 err_type);
2619 break;
2620 }
2621 return false;
2622 }
2623
2624 #define BNXT_GET_EVENT_PORT(data) \
2625 ((data) & \
2626 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2627
2628 #define BNXT_EVENT_RING_TYPE(data2) \
2629 ((data2) & \
2630 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2631
2632 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2633 (BNXT_EVENT_RING_TYPE(data2) == \
2634 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2635
2636 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2637 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2638 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2639
2640 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2641 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2642 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2643
2644 #define BNXT_PHC_BITS 48
2645
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2646 static int bnxt_async_event_process(struct bnxt *bp,
2647 struct hwrm_async_event_cmpl *cmpl)
2648 {
2649 u16 event_id = le16_to_cpu(cmpl->event_id);
2650 u32 data1 = le32_to_cpu(cmpl->event_data1);
2651 u32 data2 = le32_to_cpu(cmpl->event_data2);
2652
2653 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2654 event_id, data1, data2);
2655
2656 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2657 switch (event_id) {
2658 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2659 struct bnxt_link_info *link_info = &bp->link_info;
2660
2661 if (BNXT_VF(bp))
2662 goto async_event_process_exit;
2663
2664 /* print unsupported speed warning in forced speed mode only */
2665 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2666 (data1 & 0x20000)) {
2667 u16 fw_speed = bnxt_get_force_speed(link_info);
2668 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2669
2670 if (speed != SPEED_UNKNOWN)
2671 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2672 speed);
2673 }
2674 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2675 }
2676 fallthrough;
2677 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2678 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2679 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2680 fallthrough;
2681 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2682 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2683 break;
2684 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2685 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2686 break;
2687 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2688 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2689
2690 if (BNXT_VF(bp))
2691 break;
2692
2693 if (bp->pf.port_id != port_id)
2694 break;
2695
2696 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2697 break;
2698 }
2699 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2700 if (BNXT_PF(bp))
2701 goto async_event_process_exit;
2702 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2703 break;
2704 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2705 char *type_str = "Solicited";
2706
2707 if (!bp->fw_health)
2708 goto async_event_process_exit;
2709
2710 bp->fw_reset_timestamp = jiffies;
2711 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2712 if (!bp->fw_reset_min_dsecs)
2713 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2714 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2715 if (!bp->fw_reset_max_dsecs)
2716 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2717 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2718 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2719 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2720 type_str = "Fatal";
2721 bp->fw_health->fatalities++;
2722 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2723 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2724 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2725 type_str = "Non-fatal";
2726 bp->fw_health->survivals++;
2727 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2728 }
2729 netif_warn(bp, hw, bp->dev,
2730 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2731 type_str, data1, data2,
2732 bp->fw_reset_min_dsecs * 100,
2733 bp->fw_reset_max_dsecs * 100);
2734 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2735 break;
2736 }
2737 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2738 struct bnxt_fw_health *fw_health = bp->fw_health;
2739 char *status_desc = "healthy";
2740 u32 status;
2741
2742 if (!fw_health)
2743 goto async_event_process_exit;
2744
2745 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2746 fw_health->enabled = false;
2747 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2748 break;
2749 }
2750 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2751 fw_health->tmr_multiplier =
2752 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2753 bp->current_interval * 10);
2754 fw_health->tmr_counter = fw_health->tmr_multiplier;
2755 if (!fw_health->enabled)
2756 fw_health->last_fw_heartbeat =
2757 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2758 fw_health->last_fw_reset_cnt =
2759 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2760 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2761 if (status != BNXT_FW_STATUS_HEALTHY)
2762 status_desc = "unhealthy";
2763 netif_info(bp, drv, bp->dev,
2764 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2765 fw_health->primary ? "primary" : "backup", status,
2766 status_desc, fw_health->last_fw_reset_cnt);
2767 if (!fw_health->enabled) {
2768 /* Make sure tmr_counter is set and visible to
2769 * bnxt_health_check() before setting enabled to true.
2770 */
2771 smp_wmb();
2772 fw_health->enabled = true;
2773 }
2774 goto async_event_process_exit;
2775 }
2776 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2777 netif_notice(bp, hw, bp->dev,
2778 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2779 data1, data2);
2780 goto async_event_process_exit;
2781 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2782 struct bnxt_rx_ring_info *rxr;
2783 u16 grp_idx;
2784
2785 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2786 goto async_event_process_exit;
2787
2788 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2789 BNXT_EVENT_RING_TYPE(data2), data1);
2790 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2791 goto async_event_process_exit;
2792
2793 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2794 if (grp_idx == INVALID_HW_RING_ID) {
2795 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2796 data1);
2797 goto async_event_process_exit;
2798 }
2799 rxr = bp->bnapi[grp_idx]->rx_ring;
2800 bnxt_sched_reset_rxr(bp, rxr);
2801 goto async_event_process_exit;
2802 }
2803 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2804 struct bnxt_fw_health *fw_health = bp->fw_health;
2805
2806 netif_notice(bp, hw, bp->dev,
2807 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2808 data1, data2);
2809 if (fw_health) {
2810 fw_health->echo_req_data1 = data1;
2811 fw_health->echo_req_data2 = data2;
2812 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2813 break;
2814 }
2815 goto async_event_process_exit;
2816 }
2817 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2818 bnxt_ptp_pps_event(bp, data1, data2);
2819 goto async_event_process_exit;
2820 }
2821 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2822 if (bnxt_event_error_report(bp, data1, data2))
2823 break;
2824 goto async_event_process_exit;
2825 }
2826 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2827 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2828 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2829 if (BNXT_PTP_USE_RTC(bp)) {
2830 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2831 unsigned long flags;
2832 u64 ns;
2833
2834 if (!ptp)
2835 goto async_event_process_exit;
2836
2837 bnxt_ptp_update_current_time(bp);
2838 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2839 BNXT_PHC_BITS) | ptp->current_time);
2840 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2841 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2842 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2843 }
2844 break;
2845 }
2846 goto async_event_process_exit;
2847 }
2848 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2849 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2850
2851 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2852 goto async_event_process_exit;
2853 }
2854 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2855 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2856 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2857
2858 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2859 goto async_event_process_exit;
2860 }
2861 default:
2862 goto async_event_process_exit;
2863 }
2864 __bnxt_queue_sp_work(bp);
2865 async_event_process_exit:
2866 bnxt_ulp_async_events(bp, cmpl);
2867 return 0;
2868 }
2869
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2870 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2871 {
2872 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2873 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2874 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2875 (struct hwrm_fwd_req_cmpl *)txcmp;
2876
2877 switch (cmpl_type) {
2878 case CMPL_BASE_TYPE_HWRM_DONE:
2879 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2880 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2881 break;
2882
2883 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2884 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2885
2886 if ((vf_id < bp->pf.first_vf_id) ||
2887 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2888 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2889 vf_id);
2890 return -EINVAL;
2891 }
2892
2893 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2894 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2895 break;
2896
2897 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2898 bnxt_async_event_process(bp,
2899 (struct hwrm_async_event_cmpl *)txcmp);
2900 break;
2901
2902 default:
2903 break;
2904 }
2905
2906 return 0;
2907 }
2908
bnxt_vnic_is_active(struct bnxt * bp)2909 static bool bnxt_vnic_is_active(struct bnxt *bp)
2910 {
2911 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2912
2913 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2914 }
2915
bnxt_msix(int irq,void * dev_instance)2916 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2917 {
2918 struct bnxt_napi *bnapi = dev_instance;
2919 struct bnxt *bp = bnapi->bp;
2920 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2921 u32 cons = RING_CMP(cpr->cp_raw_cons);
2922
2923 cpr->event_ctr++;
2924 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2925 napi_schedule(&bnapi->napi);
2926 return IRQ_HANDLED;
2927 }
2928
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2929 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2930 {
2931 u32 raw_cons = cpr->cp_raw_cons;
2932 u16 cons = RING_CMP(raw_cons);
2933 struct tx_cmp *txcmp;
2934
2935 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2936
2937 return TX_CMP_VALID(txcmp, raw_cons);
2938 }
2939
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2940 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2941 int budget)
2942 {
2943 struct bnxt_napi *bnapi = cpr->bnapi;
2944 u32 raw_cons = cpr->cp_raw_cons;
2945 u32 cons;
2946 int rx_pkts = 0;
2947 u8 event = 0;
2948 struct tx_cmp *txcmp;
2949
2950 cpr->has_more_work = 0;
2951 cpr->had_work_done = 1;
2952 while (1) {
2953 u8 cmp_type;
2954 int rc;
2955
2956 cons = RING_CMP(raw_cons);
2957 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2958
2959 if (!TX_CMP_VALID(txcmp, raw_cons))
2960 break;
2961
2962 /* The valid test of the entry must be done first before
2963 * reading any further.
2964 */
2965 dma_rmb();
2966 cmp_type = TX_CMP_TYPE(txcmp);
2967 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2968 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2969 u32 opaque = txcmp->tx_cmp_opaque;
2970 struct bnxt_tx_ring_info *txr;
2971 u16 tx_freed;
2972
2973 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2974 event |= BNXT_TX_CMP_EVENT;
2975 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2976 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2977 else
2978 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2979 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2980 bp->tx_ring_mask;
2981 /* return full budget so NAPI will complete. */
2982 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2983 rx_pkts = budget;
2984 raw_cons = NEXT_RAW_CMP(raw_cons);
2985 if (budget)
2986 cpr->has_more_work = 1;
2987 break;
2988 }
2989 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
2990 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
2991 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2992 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2993 if (likely(budget))
2994 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2995 else
2996 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2997 &event);
2998 if (likely(rc >= 0))
2999 rx_pkts += rc;
3000 /* Increment rx_pkts when rc is -ENOMEM to count towards
3001 * the NAPI budget. Otherwise, we may potentially loop
3002 * here forever if we consistently cannot allocate
3003 * buffers.
3004 */
3005 else if (rc == -ENOMEM && budget)
3006 rx_pkts++;
3007 else if (rc == -EBUSY) /* partial completion */
3008 break;
3009 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3010 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3011 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3012 bnxt_hwrm_handler(bp, txcmp);
3013 }
3014 raw_cons = NEXT_RAW_CMP(raw_cons);
3015
3016 if (rx_pkts && rx_pkts == budget) {
3017 cpr->has_more_work = 1;
3018 break;
3019 }
3020 }
3021
3022 if (event & BNXT_REDIRECT_EVENT) {
3023 xdp_do_flush();
3024 event &= ~BNXT_REDIRECT_EVENT;
3025 }
3026
3027 if (event & BNXT_TX_EVENT) {
3028 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3029 u16 prod = txr->tx_prod;
3030
3031 /* Sync BD data before updating doorbell */
3032 wmb();
3033
3034 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3035 event &= ~BNXT_TX_EVENT;
3036 }
3037
3038 cpr->cp_raw_cons = raw_cons;
3039 bnapi->events |= event;
3040 return rx_pkts;
3041 }
3042
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3043 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3044 int budget)
3045 {
3046 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3047 bnapi->tx_int(bp, bnapi, budget);
3048
3049 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3050 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3051
3052 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3053 bnapi->events &= ~BNXT_RX_EVENT;
3054 }
3055 if (bnapi->events & BNXT_AGG_EVENT) {
3056 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3057
3058 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3059 bnapi->events &= ~BNXT_AGG_EVENT;
3060 }
3061 }
3062
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3063 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3064 int budget)
3065 {
3066 struct bnxt_napi *bnapi = cpr->bnapi;
3067 int rx_pkts;
3068
3069 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3070
3071 /* ACK completion ring before freeing tx ring and producing new
3072 * buffers in rx/agg rings to prevent overflowing the completion
3073 * ring.
3074 */
3075 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3076
3077 __bnxt_poll_work_done(bp, bnapi, budget);
3078 return rx_pkts;
3079 }
3080
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3081 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3082 {
3083 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3084 struct bnxt *bp = bnapi->bp;
3085 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3086 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3087 struct tx_cmp *txcmp;
3088 struct rx_cmp_ext *rxcmp1;
3089 u32 cp_cons, tmp_raw_cons;
3090 u32 raw_cons = cpr->cp_raw_cons;
3091 bool flush_xdp = false;
3092 u32 rx_pkts = 0;
3093 u8 event = 0;
3094
3095 while (1) {
3096 int rc;
3097
3098 cp_cons = RING_CMP(raw_cons);
3099 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3100
3101 if (!TX_CMP_VALID(txcmp, raw_cons))
3102 break;
3103
3104 /* The valid test of the entry must be done first before
3105 * reading any further.
3106 */
3107 dma_rmb();
3108 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3109 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3110 cp_cons = RING_CMP(tmp_raw_cons);
3111 rxcmp1 = (struct rx_cmp_ext *)
3112 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3113
3114 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3115 break;
3116
3117 /* force an error to recycle the buffer */
3118 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3119 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3120
3121 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3122 if (likely(rc == -EIO) && budget)
3123 rx_pkts++;
3124 else if (rc == -EBUSY) /* partial completion */
3125 break;
3126 if (event & BNXT_REDIRECT_EVENT)
3127 flush_xdp = true;
3128 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3129 CMPL_BASE_TYPE_HWRM_DONE)) {
3130 bnxt_hwrm_handler(bp, txcmp);
3131 } else {
3132 netdev_err(bp->dev,
3133 "Invalid completion received on special ring\n");
3134 }
3135 raw_cons = NEXT_RAW_CMP(raw_cons);
3136
3137 if (rx_pkts == budget)
3138 break;
3139 }
3140
3141 cpr->cp_raw_cons = raw_cons;
3142 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3143 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3144
3145 if (event & BNXT_AGG_EVENT)
3146 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3147 if (flush_xdp)
3148 xdp_do_flush();
3149
3150 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3151 napi_complete_done(napi, rx_pkts);
3152 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3153 }
3154 return rx_pkts;
3155 }
3156
bnxt_poll(struct napi_struct * napi,int budget)3157 static int bnxt_poll(struct napi_struct *napi, int budget)
3158 {
3159 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3160 struct bnxt *bp = bnapi->bp;
3161 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3162 int work_done = 0;
3163
3164 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3165 napi_complete(napi);
3166 return 0;
3167 }
3168 while (1) {
3169 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3170
3171 if (work_done >= budget) {
3172 if (!budget)
3173 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3174 break;
3175 }
3176
3177 if (!bnxt_has_work(bp, cpr)) {
3178 if (napi_complete_done(napi, work_done))
3179 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3180 break;
3181 }
3182 }
3183 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3184 struct dim_sample dim_sample = {};
3185
3186 dim_update_sample(cpr->event_ctr,
3187 cpr->rx_packets,
3188 cpr->rx_bytes,
3189 &dim_sample);
3190 net_dim(&cpr->dim, &dim_sample);
3191 }
3192 return work_done;
3193 }
3194
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3195 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3196 {
3197 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3198 int i, work_done = 0;
3199
3200 for (i = 0; i < cpr->cp_ring_count; i++) {
3201 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3202
3203 if (cpr2->had_nqe_notify) {
3204 work_done += __bnxt_poll_work(bp, cpr2,
3205 budget - work_done);
3206 cpr->has_more_work |= cpr2->has_more_work;
3207 }
3208 }
3209 return work_done;
3210 }
3211
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3212 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3213 u64 dbr_type, int budget)
3214 {
3215 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3216 int i;
3217
3218 for (i = 0; i < cpr->cp_ring_count; i++) {
3219 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3220 struct bnxt_db_info *db;
3221
3222 if (cpr2->had_work_done) {
3223 u32 tgl = 0;
3224
3225 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3226 cpr2->had_nqe_notify = 0;
3227 tgl = cpr2->toggle;
3228 }
3229 db = &cpr2->cp_db;
3230 bnxt_writeq(bp,
3231 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3232 DB_RING_IDX(db, cpr2->cp_raw_cons),
3233 db->doorbell);
3234 cpr2->had_work_done = 0;
3235 }
3236 }
3237 __bnxt_poll_work_done(bp, bnapi, budget);
3238 }
3239
bnxt_poll_p5(struct napi_struct * napi,int budget)3240 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3241 {
3242 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3243 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3244 struct bnxt_cp_ring_info *cpr_rx;
3245 u32 raw_cons = cpr->cp_raw_cons;
3246 struct bnxt *bp = bnapi->bp;
3247 struct nqe_cn *nqcmp;
3248 int work_done = 0;
3249 u32 cons;
3250
3251 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3252 napi_complete(napi);
3253 return 0;
3254 }
3255 if (cpr->has_more_work) {
3256 cpr->has_more_work = 0;
3257 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3258 }
3259 while (1) {
3260 u16 type;
3261
3262 cons = RING_CMP(raw_cons);
3263 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3264
3265 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3266 if (cpr->has_more_work)
3267 break;
3268
3269 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3270 budget);
3271 cpr->cp_raw_cons = raw_cons;
3272 if (napi_complete_done(napi, work_done))
3273 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3274 cpr->cp_raw_cons);
3275 goto poll_done;
3276 }
3277
3278 /* The valid test of the entry must be done first before
3279 * reading any further.
3280 */
3281 dma_rmb();
3282
3283 type = le16_to_cpu(nqcmp->type);
3284 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3285 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3286 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3287 struct bnxt_cp_ring_info *cpr2;
3288
3289 /* No more budget for RX work */
3290 if (budget && work_done >= budget &&
3291 cq_type == BNXT_NQ_HDL_TYPE_RX)
3292 break;
3293
3294 idx = BNXT_NQ_HDL_IDX(idx);
3295 cpr2 = &cpr->cp_ring_arr[idx];
3296 cpr2->had_nqe_notify = 1;
3297 cpr2->toggle = NQE_CN_TOGGLE(type);
3298 work_done += __bnxt_poll_work(bp, cpr2,
3299 budget - work_done);
3300 cpr->has_more_work |= cpr2->has_more_work;
3301 } else {
3302 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3303 }
3304 raw_cons = NEXT_RAW_CMP(raw_cons);
3305 }
3306 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3307 if (raw_cons != cpr->cp_raw_cons) {
3308 cpr->cp_raw_cons = raw_cons;
3309 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3310 }
3311 poll_done:
3312 cpr_rx = &cpr->cp_ring_arr[0];
3313 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3314 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3315 struct dim_sample dim_sample = {};
3316
3317 dim_update_sample(cpr->event_ctr,
3318 cpr_rx->rx_packets,
3319 cpr_rx->rx_bytes,
3320 &dim_sample);
3321 net_dim(&cpr->dim, &dim_sample);
3322 }
3323 return work_done;
3324 }
3325
bnxt_free_tx_skbs(struct bnxt * bp)3326 static void bnxt_free_tx_skbs(struct bnxt *bp)
3327 {
3328 int i, max_idx;
3329 struct pci_dev *pdev = bp->pdev;
3330
3331 if (!bp->tx_ring)
3332 return;
3333
3334 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3335 for (i = 0; i < bp->tx_nr_rings; i++) {
3336 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3337 int j;
3338
3339 if (!txr->tx_buf_ring)
3340 continue;
3341
3342 for (j = 0; j < max_idx;) {
3343 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3344 struct sk_buff *skb;
3345 int k, last;
3346
3347 if (i < bp->tx_nr_rings_xdp &&
3348 tx_buf->action == XDP_REDIRECT) {
3349 dma_unmap_single(&pdev->dev,
3350 dma_unmap_addr(tx_buf, mapping),
3351 dma_unmap_len(tx_buf, len),
3352 DMA_TO_DEVICE);
3353 xdp_return_frame(tx_buf->xdpf);
3354 tx_buf->action = 0;
3355 tx_buf->xdpf = NULL;
3356 j++;
3357 continue;
3358 }
3359
3360 skb = tx_buf->skb;
3361 if (!skb) {
3362 j++;
3363 continue;
3364 }
3365
3366 tx_buf->skb = NULL;
3367
3368 if (tx_buf->is_push) {
3369 dev_kfree_skb(skb);
3370 j += 2;
3371 continue;
3372 }
3373
3374 dma_unmap_single(&pdev->dev,
3375 dma_unmap_addr(tx_buf, mapping),
3376 skb_headlen(skb),
3377 DMA_TO_DEVICE);
3378
3379 last = tx_buf->nr_frags;
3380 j += 2;
3381 for (k = 0; k < last; k++, j++) {
3382 int ring_idx = j & bp->tx_ring_mask;
3383 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3384
3385 tx_buf = &txr->tx_buf_ring[ring_idx];
3386 dma_unmap_page(
3387 &pdev->dev,
3388 dma_unmap_addr(tx_buf, mapping),
3389 skb_frag_size(frag), DMA_TO_DEVICE);
3390 }
3391 dev_kfree_skb(skb);
3392 }
3393 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3394 }
3395 }
3396
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3397 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3398 {
3399 int i, max_idx;
3400
3401 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3402
3403 for (i = 0; i < max_idx; i++) {
3404 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3405 void *data = rx_buf->data;
3406
3407 if (!data)
3408 continue;
3409
3410 rx_buf->data = NULL;
3411 if (BNXT_RX_PAGE_MODE(bp))
3412 page_pool_recycle_direct(rxr->page_pool, data);
3413 else
3414 page_pool_free_va(rxr->head_pool, data, true);
3415 }
3416 }
3417
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3418 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3419 {
3420 int i, max_idx;
3421
3422 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3423
3424 for (i = 0; i < max_idx; i++) {
3425 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3426 struct page *page = rx_agg_buf->page;
3427
3428 if (!page)
3429 continue;
3430
3431 rx_agg_buf->page = NULL;
3432 __clear_bit(i, rxr->rx_agg_bmap);
3433
3434 page_pool_recycle_direct(rxr->page_pool, page);
3435 }
3436 }
3437
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3438 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3439 struct bnxt_rx_ring_info *rxr)
3440 {
3441 int i;
3442
3443 for (i = 0; i < bp->max_tpa; i++) {
3444 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3445 u8 *data = tpa_info->data;
3446
3447 if (!data)
3448 continue;
3449
3450 tpa_info->data = NULL;
3451 page_pool_free_va(rxr->head_pool, data, false);
3452 }
3453 }
3454
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3455 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3456 struct bnxt_rx_ring_info *rxr)
3457 {
3458 struct bnxt_tpa_idx_map *map;
3459
3460 if (!rxr->rx_tpa)
3461 goto skip_rx_tpa_free;
3462
3463 bnxt_free_one_tpa_info_data(bp, rxr);
3464
3465 skip_rx_tpa_free:
3466 if (!rxr->rx_buf_ring)
3467 goto skip_rx_buf_free;
3468
3469 bnxt_free_one_rx_ring(bp, rxr);
3470
3471 skip_rx_buf_free:
3472 if (!rxr->rx_agg_ring)
3473 goto skip_rx_agg_free;
3474
3475 bnxt_free_one_rx_agg_ring(bp, rxr);
3476
3477 skip_rx_agg_free:
3478 map = rxr->rx_tpa_idx_map;
3479 if (map)
3480 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3481 }
3482
bnxt_free_rx_skbs(struct bnxt * bp)3483 static void bnxt_free_rx_skbs(struct bnxt *bp)
3484 {
3485 int i;
3486
3487 if (!bp->rx_ring)
3488 return;
3489
3490 for (i = 0; i < bp->rx_nr_rings; i++)
3491 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3492 }
3493
bnxt_free_skbs(struct bnxt * bp)3494 static void bnxt_free_skbs(struct bnxt *bp)
3495 {
3496 bnxt_free_tx_skbs(bp);
3497 bnxt_free_rx_skbs(bp);
3498 }
3499
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3500 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3501 {
3502 u8 init_val = ctxm->init_value;
3503 u16 offset = ctxm->init_offset;
3504 u8 *p2 = p;
3505 int i;
3506
3507 if (!init_val)
3508 return;
3509 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3510 memset(p, init_val, len);
3511 return;
3512 }
3513 for (i = 0; i < len; i += ctxm->entry_size)
3514 *(p2 + i + offset) = init_val;
3515 }
3516
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3517 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3518 void *buf, size_t offset, size_t head,
3519 size_t tail)
3520 {
3521 int i, head_page, start_idx, source_offset;
3522 size_t len, rem_len, total_len, max_bytes;
3523
3524 head_page = head / rmem->page_size;
3525 source_offset = head % rmem->page_size;
3526 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3527 if (!total_len)
3528 total_len = MAX_CTX_BYTES;
3529 start_idx = head_page % MAX_CTX_PAGES;
3530 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3531 source_offset;
3532 total_len = min(total_len, max_bytes);
3533 rem_len = total_len;
3534
3535 for (i = start_idx; rem_len; i++, source_offset = 0) {
3536 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3537 if (buf)
3538 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3539 len);
3540 offset += len;
3541 rem_len -= len;
3542 }
3543 return total_len;
3544 }
3545
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3546 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3547 {
3548 struct pci_dev *pdev = bp->pdev;
3549 int i;
3550
3551 if (!rmem->pg_arr)
3552 goto skip_pages;
3553
3554 for (i = 0; i < rmem->nr_pages; i++) {
3555 if (!rmem->pg_arr[i])
3556 continue;
3557
3558 dma_free_coherent(&pdev->dev, rmem->page_size,
3559 rmem->pg_arr[i], rmem->dma_arr[i]);
3560
3561 rmem->pg_arr[i] = NULL;
3562 }
3563 skip_pages:
3564 if (rmem->pg_tbl) {
3565 size_t pg_tbl_size = rmem->nr_pages * 8;
3566
3567 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3568 pg_tbl_size = rmem->page_size;
3569 dma_free_coherent(&pdev->dev, pg_tbl_size,
3570 rmem->pg_tbl, rmem->pg_tbl_map);
3571 rmem->pg_tbl = NULL;
3572 }
3573 if (rmem->vmem_size && *rmem->vmem) {
3574 vfree(*rmem->vmem);
3575 *rmem->vmem = NULL;
3576 }
3577 }
3578
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3579 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3580 {
3581 struct pci_dev *pdev = bp->pdev;
3582 u64 valid_bit = 0;
3583 int i;
3584
3585 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3586 valid_bit = PTU_PTE_VALID;
3587 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3588 size_t pg_tbl_size = rmem->nr_pages * 8;
3589
3590 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3591 pg_tbl_size = rmem->page_size;
3592 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3593 &rmem->pg_tbl_map,
3594 GFP_KERNEL);
3595 if (!rmem->pg_tbl)
3596 return -ENOMEM;
3597 }
3598
3599 for (i = 0; i < rmem->nr_pages; i++) {
3600 u64 extra_bits = valid_bit;
3601
3602 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3603 rmem->page_size,
3604 &rmem->dma_arr[i],
3605 GFP_KERNEL);
3606 if (!rmem->pg_arr[i])
3607 return -ENOMEM;
3608
3609 if (rmem->ctx_mem)
3610 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3611 rmem->page_size);
3612 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3613 if (i == rmem->nr_pages - 2 &&
3614 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3615 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3616 else if (i == rmem->nr_pages - 1 &&
3617 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3618 extra_bits |= PTU_PTE_LAST;
3619 rmem->pg_tbl[i] =
3620 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3621 }
3622 }
3623
3624 if (rmem->vmem_size) {
3625 *rmem->vmem = vzalloc(rmem->vmem_size);
3626 if (!(*rmem->vmem))
3627 return -ENOMEM;
3628 }
3629 return 0;
3630 }
3631
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3632 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3633 struct bnxt_rx_ring_info *rxr)
3634 {
3635 int i;
3636
3637 kfree(rxr->rx_tpa_idx_map);
3638 rxr->rx_tpa_idx_map = NULL;
3639 if (rxr->rx_tpa) {
3640 for (i = 0; i < bp->max_tpa; i++) {
3641 kfree(rxr->rx_tpa[i].agg_arr);
3642 rxr->rx_tpa[i].agg_arr = NULL;
3643 }
3644 }
3645 kfree(rxr->rx_tpa);
3646 rxr->rx_tpa = NULL;
3647 }
3648
bnxt_free_tpa_info(struct bnxt * bp)3649 static void bnxt_free_tpa_info(struct bnxt *bp)
3650 {
3651 int i;
3652
3653 for (i = 0; i < bp->rx_nr_rings; i++) {
3654 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3655
3656 bnxt_free_one_tpa_info(bp, rxr);
3657 }
3658 }
3659
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3660 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3661 struct bnxt_rx_ring_info *rxr)
3662 {
3663 struct rx_agg_cmp *agg;
3664 int i;
3665
3666 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3667 GFP_KERNEL);
3668 if (!rxr->rx_tpa)
3669 return -ENOMEM;
3670
3671 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3672 return 0;
3673 for (i = 0; i < bp->max_tpa; i++) {
3674 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3675 if (!agg)
3676 return -ENOMEM;
3677 rxr->rx_tpa[i].agg_arr = agg;
3678 }
3679 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3680 GFP_KERNEL);
3681 if (!rxr->rx_tpa_idx_map)
3682 return -ENOMEM;
3683
3684 return 0;
3685 }
3686
bnxt_alloc_tpa_info(struct bnxt * bp)3687 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3688 {
3689 int i, rc;
3690
3691 bp->max_tpa = MAX_TPA;
3692 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3693 if (!bp->max_tpa_v2)
3694 return 0;
3695 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3696 }
3697
3698 for (i = 0; i < bp->rx_nr_rings; i++) {
3699 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3700
3701 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3702 if (rc)
3703 return rc;
3704 }
3705 return 0;
3706 }
3707
bnxt_free_rx_rings(struct bnxt * bp)3708 static void bnxt_free_rx_rings(struct bnxt *bp)
3709 {
3710 int i;
3711
3712 if (!bp->rx_ring)
3713 return;
3714
3715 bnxt_free_tpa_info(bp);
3716 for (i = 0; i < bp->rx_nr_rings; i++) {
3717 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3718 struct bnxt_ring_struct *ring;
3719
3720 if (rxr->xdp_prog)
3721 bpf_prog_put(rxr->xdp_prog);
3722
3723 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3724 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3725
3726 page_pool_destroy(rxr->page_pool);
3727 if (bnxt_separate_head_pool())
3728 page_pool_destroy(rxr->head_pool);
3729 rxr->page_pool = rxr->head_pool = NULL;
3730
3731 kfree(rxr->rx_agg_bmap);
3732 rxr->rx_agg_bmap = NULL;
3733
3734 ring = &rxr->rx_ring_struct;
3735 bnxt_free_ring(bp, &ring->ring_mem);
3736
3737 ring = &rxr->rx_agg_ring_struct;
3738 bnxt_free_ring(bp, &ring->ring_mem);
3739 }
3740 }
3741
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3742 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3743 struct bnxt_rx_ring_info *rxr,
3744 int numa_node)
3745 {
3746 struct page_pool_params pp = { 0 };
3747 struct page_pool *pool;
3748
3749 pp.pool_size = bp->rx_agg_ring_size;
3750 if (BNXT_RX_PAGE_MODE(bp))
3751 pp.pool_size += bp->rx_ring_size;
3752 pp.nid = numa_node;
3753 pp.napi = &rxr->bnapi->napi;
3754 pp.netdev = bp->dev;
3755 pp.dev = &bp->pdev->dev;
3756 pp.dma_dir = bp->rx_dir;
3757 pp.max_len = PAGE_SIZE;
3758 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3759
3760 pool = page_pool_create(&pp);
3761 if (IS_ERR(pool))
3762 return PTR_ERR(pool);
3763 rxr->page_pool = pool;
3764
3765 if (bnxt_separate_head_pool()) {
3766 pp.pool_size = max(bp->rx_ring_size, 1024);
3767 pool = page_pool_create(&pp);
3768 if (IS_ERR(pool))
3769 goto err_destroy_pp;
3770 }
3771 rxr->head_pool = pool;
3772
3773 return 0;
3774
3775 err_destroy_pp:
3776 page_pool_destroy(rxr->page_pool);
3777 rxr->page_pool = NULL;
3778 return PTR_ERR(pool);
3779 }
3780
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3781 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3782 {
3783 u16 mem_size;
3784
3785 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3786 mem_size = rxr->rx_agg_bmap_size / 8;
3787 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3788 if (!rxr->rx_agg_bmap)
3789 return -ENOMEM;
3790
3791 return 0;
3792 }
3793
bnxt_alloc_rx_rings(struct bnxt * bp)3794 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3795 {
3796 int numa_node = dev_to_node(&bp->pdev->dev);
3797 int i, rc = 0, agg_rings = 0, cpu;
3798
3799 if (!bp->rx_ring)
3800 return -ENOMEM;
3801
3802 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3803 agg_rings = 1;
3804
3805 for (i = 0; i < bp->rx_nr_rings; i++) {
3806 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3807 struct bnxt_ring_struct *ring;
3808 int cpu_node;
3809
3810 ring = &rxr->rx_ring_struct;
3811
3812 cpu = cpumask_local_spread(i, numa_node);
3813 cpu_node = cpu_to_node(cpu);
3814 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3815 i, cpu_node);
3816 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3817 if (rc)
3818 return rc;
3819
3820 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3821 if (rc < 0)
3822 return rc;
3823
3824 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3825 MEM_TYPE_PAGE_POOL,
3826 rxr->page_pool);
3827 if (rc) {
3828 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3829 return rc;
3830 }
3831
3832 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3833 if (rc)
3834 return rc;
3835
3836 ring->grp_idx = i;
3837 if (agg_rings) {
3838 ring = &rxr->rx_agg_ring_struct;
3839 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3840 if (rc)
3841 return rc;
3842
3843 ring->grp_idx = i;
3844 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3845 if (rc)
3846 return rc;
3847 }
3848 }
3849 if (bp->flags & BNXT_FLAG_TPA)
3850 rc = bnxt_alloc_tpa_info(bp);
3851 return rc;
3852 }
3853
bnxt_free_tx_rings(struct bnxt * bp)3854 static void bnxt_free_tx_rings(struct bnxt *bp)
3855 {
3856 int i;
3857 struct pci_dev *pdev = bp->pdev;
3858
3859 if (!bp->tx_ring)
3860 return;
3861
3862 for (i = 0; i < bp->tx_nr_rings; i++) {
3863 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3864 struct bnxt_ring_struct *ring;
3865
3866 if (txr->tx_push) {
3867 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3868 txr->tx_push, txr->tx_push_mapping);
3869 txr->tx_push = NULL;
3870 }
3871
3872 ring = &txr->tx_ring_struct;
3873
3874 bnxt_free_ring(bp, &ring->ring_mem);
3875 }
3876 }
3877
3878 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3879 ((tc) * (bp)->tx_nr_rings_per_tc)
3880
3881 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3882 ((tx) % (bp)->tx_nr_rings_per_tc)
3883
3884 #define BNXT_RING_TO_TC(bp, tx) \
3885 ((tx) / (bp)->tx_nr_rings_per_tc)
3886
bnxt_alloc_tx_rings(struct bnxt * bp)3887 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3888 {
3889 int i, j, rc;
3890 struct pci_dev *pdev = bp->pdev;
3891
3892 bp->tx_push_size = 0;
3893 if (bp->tx_push_thresh) {
3894 int push_size;
3895
3896 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3897 bp->tx_push_thresh);
3898
3899 if (push_size > 256) {
3900 push_size = 0;
3901 bp->tx_push_thresh = 0;
3902 }
3903
3904 bp->tx_push_size = push_size;
3905 }
3906
3907 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3908 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3909 struct bnxt_ring_struct *ring;
3910 u8 qidx;
3911
3912 ring = &txr->tx_ring_struct;
3913
3914 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3915 if (rc)
3916 return rc;
3917
3918 ring->grp_idx = txr->bnapi->index;
3919 if (bp->tx_push_size) {
3920 dma_addr_t mapping;
3921
3922 /* One pre-allocated DMA buffer to backup
3923 * TX push operation
3924 */
3925 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3926 bp->tx_push_size,
3927 &txr->tx_push_mapping,
3928 GFP_KERNEL);
3929
3930 if (!txr->tx_push)
3931 return -ENOMEM;
3932
3933 mapping = txr->tx_push_mapping +
3934 sizeof(struct tx_push_bd);
3935 txr->data_mapping = cpu_to_le64(mapping);
3936 }
3937 qidx = bp->tc_to_qidx[j];
3938 ring->queue_id = bp->q_info[qidx].queue_id;
3939 spin_lock_init(&txr->xdp_tx_lock);
3940 if (i < bp->tx_nr_rings_xdp)
3941 continue;
3942 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3943 j++;
3944 }
3945 return 0;
3946 }
3947
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)3948 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3949 {
3950 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3951
3952 kfree(cpr->cp_desc_ring);
3953 cpr->cp_desc_ring = NULL;
3954 ring->ring_mem.pg_arr = NULL;
3955 kfree(cpr->cp_desc_mapping);
3956 cpr->cp_desc_mapping = NULL;
3957 ring->ring_mem.dma_arr = NULL;
3958 }
3959
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)3960 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3961 {
3962 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3963 if (!cpr->cp_desc_ring)
3964 return -ENOMEM;
3965 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3966 GFP_KERNEL);
3967 if (!cpr->cp_desc_mapping)
3968 return -ENOMEM;
3969 return 0;
3970 }
3971
bnxt_free_all_cp_arrays(struct bnxt * bp)3972 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3973 {
3974 int i;
3975
3976 if (!bp->bnapi)
3977 return;
3978 for (i = 0; i < bp->cp_nr_rings; i++) {
3979 struct bnxt_napi *bnapi = bp->bnapi[i];
3980
3981 if (!bnapi)
3982 continue;
3983 bnxt_free_cp_arrays(&bnapi->cp_ring);
3984 }
3985 }
3986
bnxt_alloc_all_cp_arrays(struct bnxt * bp)3987 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3988 {
3989 int i, n = bp->cp_nr_pages;
3990
3991 for (i = 0; i < bp->cp_nr_rings; i++) {
3992 struct bnxt_napi *bnapi = bp->bnapi[i];
3993 int rc;
3994
3995 if (!bnapi)
3996 continue;
3997 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3998 if (rc)
3999 return rc;
4000 }
4001 return 0;
4002 }
4003
bnxt_free_cp_rings(struct bnxt * bp)4004 static void bnxt_free_cp_rings(struct bnxt *bp)
4005 {
4006 int i;
4007
4008 if (!bp->bnapi)
4009 return;
4010
4011 for (i = 0; i < bp->cp_nr_rings; i++) {
4012 struct bnxt_napi *bnapi = bp->bnapi[i];
4013 struct bnxt_cp_ring_info *cpr;
4014 struct bnxt_ring_struct *ring;
4015 int j;
4016
4017 if (!bnapi)
4018 continue;
4019
4020 cpr = &bnapi->cp_ring;
4021 ring = &cpr->cp_ring_struct;
4022
4023 bnxt_free_ring(bp, &ring->ring_mem);
4024
4025 if (!cpr->cp_ring_arr)
4026 continue;
4027
4028 for (j = 0; j < cpr->cp_ring_count; j++) {
4029 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4030
4031 ring = &cpr2->cp_ring_struct;
4032 bnxt_free_ring(bp, &ring->ring_mem);
4033 bnxt_free_cp_arrays(cpr2);
4034 }
4035 kfree(cpr->cp_ring_arr);
4036 cpr->cp_ring_arr = NULL;
4037 cpr->cp_ring_count = 0;
4038 }
4039 }
4040
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4041 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4042 struct bnxt_cp_ring_info *cpr)
4043 {
4044 struct bnxt_ring_mem_info *rmem;
4045 struct bnxt_ring_struct *ring;
4046 int rc;
4047
4048 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4049 if (rc) {
4050 bnxt_free_cp_arrays(cpr);
4051 return -ENOMEM;
4052 }
4053 ring = &cpr->cp_ring_struct;
4054 rmem = &ring->ring_mem;
4055 rmem->nr_pages = bp->cp_nr_pages;
4056 rmem->page_size = HW_CMPD_RING_SIZE;
4057 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4058 rmem->dma_arr = cpr->cp_desc_mapping;
4059 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4060 rc = bnxt_alloc_ring(bp, rmem);
4061 if (rc) {
4062 bnxt_free_ring(bp, rmem);
4063 bnxt_free_cp_arrays(cpr);
4064 }
4065 return rc;
4066 }
4067
bnxt_alloc_cp_rings(struct bnxt * bp)4068 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4069 {
4070 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4071 int i, j, rc, ulp_msix;
4072 int tcs = bp->num_tc;
4073
4074 if (!tcs)
4075 tcs = 1;
4076 ulp_msix = bnxt_get_ulp_msix_num(bp);
4077 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4078 struct bnxt_napi *bnapi = bp->bnapi[i];
4079 struct bnxt_cp_ring_info *cpr, *cpr2;
4080 struct bnxt_ring_struct *ring;
4081 int cp_count = 0, k;
4082 int rx = 0, tx = 0;
4083
4084 if (!bnapi)
4085 continue;
4086
4087 cpr = &bnapi->cp_ring;
4088 cpr->bnapi = bnapi;
4089 ring = &cpr->cp_ring_struct;
4090
4091 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4092 if (rc)
4093 return rc;
4094
4095 ring->map_idx = ulp_msix + i;
4096
4097 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4098 continue;
4099
4100 if (i < bp->rx_nr_rings) {
4101 cp_count++;
4102 rx = 1;
4103 }
4104 if (i < bp->tx_nr_rings_xdp) {
4105 cp_count++;
4106 tx = 1;
4107 } else if ((sh && i < bp->tx_nr_rings) ||
4108 (!sh && i >= bp->rx_nr_rings)) {
4109 cp_count += tcs;
4110 tx = 1;
4111 }
4112
4113 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4114 GFP_KERNEL);
4115 if (!cpr->cp_ring_arr)
4116 return -ENOMEM;
4117 cpr->cp_ring_count = cp_count;
4118
4119 for (k = 0; k < cp_count; k++) {
4120 cpr2 = &cpr->cp_ring_arr[k];
4121 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4122 if (rc)
4123 return rc;
4124 cpr2->bnapi = bnapi;
4125 cpr2->sw_stats = cpr->sw_stats;
4126 cpr2->cp_idx = k;
4127 if (!k && rx) {
4128 bp->rx_ring[i].rx_cpr = cpr2;
4129 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4130 } else {
4131 int n, tc = k - rx;
4132
4133 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4134 bp->tx_ring[n].tx_cpr = cpr2;
4135 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4136 }
4137 }
4138 if (tx)
4139 j++;
4140 }
4141 return 0;
4142 }
4143
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4144 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4145 struct bnxt_rx_ring_info *rxr)
4146 {
4147 struct bnxt_ring_mem_info *rmem;
4148 struct bnxt_ring_struct *ring;
4149
4150 ring = &rxr->rx_ring_struct;
4151 rmem = &ring->ring_mem;
4152 rmem->nr_pages = bp->rx_nr_pages;
4153 rmem->page_size = HW_RXBD_RING_SIZE;
4154 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4155 rmem->dma_arr = rxr->rx_desc_mapping;
4156 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4157 rmem->vmem = (void **)&rxr->rx_buf_ring;
4158
4159 ring = &rxr->rx_agg_ring_struct;
4160 rmem = &ring->ring_mem;
4161 rmem->nr_pages = bp->rx_agg_nr_pages;
4162 rmem->page_size = HW_RXBD_RING_SIZE;
4163 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4164 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4165 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4166 rmem->vmem = (void **)&rxr->rx_agg_ring;
4167 }
4168
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4169 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4170 struct bnxt_rx_ring_info *rxr)
4171 {
4172 struct bnxt_ring_mem_info *rmem;
4173 struct bnxt_ring_struct *ring;
4174 int i;
4175
4176 rxr->page_pool->p.napi = NULL;
4177 rxr->page_pool = NULL;
4178 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4179
4180 ring = &rxr->rx_ring_struct;
4181 rmem = &ring->ring_mem;
4182 rmem->pg_tbl = NULL;
4183 rmem->pg_tbl_map = 0;
4184 for (i = 0; i < rmem->nr_pages; i++) {
4185 rmem->pg_arr[i] = NULL;
4186 rmem->dma_arr[i] = 0;
4187 }
4188 *rmem->vmem = NULL;
4189
4190 ring = &rxr->rx_agg_ring_struct;
4191 rmem = &ring->ring_mem;
4192 rmem->pg_tbl = NULL;
4193 rmem->pg_tbl_map = 0;
4194 for (i = 0; i < rmem->nr_pages; i++) {
4195 rmem->pg_arr[i] = NULL;
4196 rmem->dma_arr[i] = 0;
4197 }
4198 *rmem->vmem = NULL;
4199 }
4200
bnxt_init_ring_struct(struct bnxt * bp)4201 static void bnxt_init_ring_struct(struct bnxt *bp)
4202 {
4203 int i, j;
4204
4205 for (i = 0; i < bp->cp_nr_rings; i++) {
4206 struct bnxt_napi *bnapi = bp->bnapi[i];
4207 struct bnxt_ring_mem_info *rmem;
4208 struct bnxt_cp_ring_info *cpr;
4209 struct bnxt_rx_ring_info *rxr;
4210 struct bnxt_tx_ring_info *txr;
4211 struct bnxt_ring_struct *ring;
4212
4213 if (!bnapi)
4214 continue;
4215
4216 cpr = &bnapi->cp_ring;
4217 ring = &cpr->cp_ring_struct;
4218 rmem = &ring->ring_mem;
4219 rmem->nr_pages = bp->cp_nr_pages;
4220 rmem->page_size = HW_CMPD_RING_SIZE;
4221 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4222 rmem->dma_arr = cpr->cp_desc_mapping;
4223 rmem->vmem_size = 0;
4224
4225 rxr = bnapi->rx_ring;
4226 if (!rxr)
4227 goto skip_rx;
4228
4229 ring = &rxr->rx_ring_struct;
4230 rmem = &ring->ring_mem;
4231 rmem->nr_pages = bp->rx_nr_pages;
4232 rmem->page_size = HW_RXBD_RING_SIZE;
4233 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4234 rmem->dma_arr = rxr->rx_desc_mapping;
4235 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4236 rmem->vmem = (void **)&rxr->rx_buf_ring;
4237
4238 ring = &rxr->rx_agg_ring_struct;
4239 rmem = &ring->ring_mem;
4240 rmem->nr_pages = bp->rx_agg_nr_pages;
4241 rmem->page_size = HW_RXBD_RING_SIZE;
4242 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4243 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4244 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4245 rmem->vmem = (void **)&rxr->rx_agg_ring;
4246
4247 skip_rx:
4248 bnxt_for_each_napi_tx(j, bnapi, txr) {
4249 ring = &txr->tx_ring_struct;
4250 rmem = &ring->ring_mem;
4251 rmem->nr_pages = bp->tx_nr_pages;
4252 rmem->page_size = HW_TXBD_RING_SIZE;
4253 rmem->pg_arr = (void **)txr->tx_desc_ring;
4254 rmem->dma_arr = txr->tx_desc_mapping;
4255 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4256 rmem->vmem = (void **)&txr->tx_buf_ring;
4257 }
4258 }
4259 }
4260
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4261 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4262 {
4263 int i;
4264 u32 prod;
4265 struct rx_bd **rx_buf_ring;
4266
4267 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4268 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4269 int j;
4270 struct rx_bd *rxbd;
4271
4272 rxbd = rx_buf_ring[i];
4273 if (!rxbd)
4274 continue;
4275
4276 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4277 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4278 rxbd->rx_bd_opaque = prod;
4279 }
4280 }
4281 }
4282
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4283 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4284 struct bnxt_rx_ring_info *rxr,
4285 int ring_nr)
4286 {
4287 u32 prod;
4288 int i;
4289
4290 prod = rxr->rx_prod;
4291 for (i = 0; i < bp->rx_ring_size; i++) {
4292 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4293 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4294 ring_nr, i, bp->rx_ring_size);
4295 break;
4296 }
4297 prod = NEXT_RX(prod);
4298 }
4299 rxr->rx_prod = prod;
4300 }
4301
bnxt_alloc_one_rx_ring_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4302 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
4303 struct bnxt_rx_ring_info *rxr,
4304 int ring_nr)
4305 {
4306 u32 prod;
4307 int i;
4308
4309 prod = rxr->rx_agg_prod;
4310 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4311 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4312 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4313 ring_nr, i, bp->rx_ring_size);
4314 break;
4315 }
4316 prod = NEXT_RX_AGG(prod);
4317 }
4318 rxr->rx_agg_prod = prod;
4319 }
4320
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4321 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4322 struct bnxt_rx_ring_info *rxr)
4323 {
4324 dma_addr_t mapping;
4325 u8 *data;
4326 int i;
4327
4328 for (i = 0; i < bp->max_tpa; i++) {
4329 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4330 GFP_KERNEL);
4331 if (!data)
4332 return -ENOMEM;
4333
4334 rxr->rx_tpa[i].data = data;
4335 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4336 rxr->rx_tpa[i].mapping = mapping;
4337 }
4338
4339 return 0;
4340 }
4341
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4342 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4343 {
4344 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4345 int rc;
4346
4347 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4348
4349 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4350 return 0;
4351
4352 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
4353
4354 if (rxr->rx_tpa) {
4355 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4356 if (rc)
4357 return rc;
4358 }
4359 return 0;
4360 }
4361
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4362 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4363 struct bnxt_rx_ring_info *rxr)
4364 {
4365 struct bnxt_ring_struct *ring;
4366 u32 type;
4367
4368 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4369 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4370
4371 if (NET_IP_ALIGN == 2)
4372 type |= RX_BD_FLAGS_SOP;
4373
4374 ring = &rxr->rx_ring_struct;
4375 bnxt_init_rxbd_pages(ring, type);
4376 ring->fw_ring_id = INVALID_HW_RING_ID;
4377 }
4378
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4379 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4380 struct bnxt_rx_ring_info *rxr)
4381 {
4382 struct bnxt_ring_struct *ring;
4383 u32 type;
4384
4385 ring = &rxr->rx_agg_ring_struct;
4386 ring->fw_ring_id = INVALID_HW_RING_ID;
4387 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4388 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4389 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4390
4391 bnxt_init_rxbd_pages(ring, type);
4392 }
4393 }
4394
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4395 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4396 {
4397 struct bnxt_rx_ring_info *rxr;
4398
4399 rxr = &bp->rx_ring[ring_nr];
4400 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4401
4402 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4403 &rxr->bnapi->napi);
4404
4405 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4406 bpf_prog_add(bp->xdp_prog, 1);
4407 rxr->xdp_prog = bp->xdp_prog;
4408 }
4409
4410 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4411
4412 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4413 }
4414
bnxt_init_cp_rings(struct bnxt * bp)4415 static void bnxt_init_cp_rings(struct bnxt *bp)
4416 {
4417 int i, j;
4418
4419 for (i = 0; i < bp->cp_nr_rings; i++) {
4420 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4421 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4422
4423 ring->fw_ring_id = INVALID_HW_RING_ID;
4424 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4425 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4426 if (!cpr->cp_ring_arr)
4427 continue;
4428 for (j = 0; j < cpr->cp_ring_count; j++) {
4429 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4430
4431 ring = &cpr2->cp_ring_struct;
4432 ring->fw_ring_id = INVALID_HW_RING_ID;
4433 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4434 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4435 }
4436 }
4437 }
4438
bnxt_init_rx_rings(struct bnxt * bp)4439 static int bnxt_init_rx_rings(struct bnxt *bp)
4440 {
4441 int i, rc = 0;
4442
4443 if (BNXT_RX_PAGE_MODE(bp)) {
4444 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4445 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4446 } else {
4447 bp->rx_offset = BNXT_RX_OFFSET;
4448 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4449 }
4450
4451 for (i = 0; i < bp->rx_nr_rings; i++) {
4452 rc = bnxt_init_one_rx_ring(bp, i);
4453 if (rc)
4454 break;
4455 }
4456
4457 return rc;
4458 }
4459
bnxt_init_tx_rings(struct bnxt * bp)4460 static int bnxt_init_tx_rings(struct bnxt *bp)
4461 {
4462 u16 i;
4463
4464 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4465 BNXT_MIN_TX_DESC_CNT);
4466
4467 for (i = 0; i < bp->tx_nr_rings; i++) {
4468 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4469 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4470
4471 ring->fw_ring_id = INVALID_HW_RING_ID;
4472
4473 if (i >= bp->tx_nr_rings_xdp)
4474 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4475 NETDEV_QUEUE_TYPE_TX,
4476 &txr->bnapi->napi);
4477 }
4478
4479 return 0;
4480 }
4481
bnxt_free_ring_grps(struct bnxt * bp)4482 static void bnxt_free_ring_grps(struct bnxt *bp)
4483 {
4484 kfree(bp->grp_info);
4485 bp->grp_info = NULL;
4486 }
4487
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4488 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4489 {
4490 int i;
4491
4492 if (irq_re_init) {
4493 bp->grp_info = kcalloc(bp->cp_nr_rings,
4494 sizeof(struct bnxt_ring_grp_info),
4495 GFP_KERNEL);
4496 if (!bp->grp_info)
4497 return -ENOMEM;
4498 }
4499 for (i = 0; i < bp->cp_nr_rings; i++) {
4500 if (irq_re_init)
4501 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4502 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4503 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4504 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4505 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4506 }
4507 return 0;
4508 }
4509
bnxt_free_vnics(struct bnxt * bp)4510 static void bnxt_free_vnics(struct bnxt *bp)
4511 {
4512 kfree(bp->vnic_info);
4513 bp->vnic_info = NULL;
4514 bp->nr_vnics = 0;
4515 }
4516
bnxt_alloc_vnics(struct bnxt * bp)4517 static int bnxt_alloc_vnics(struct bnxt *bp)
4518 {
4519 int num_vnics = 1;
4520
4521 #ifdef CONFIG_RFS_ACCEL
4522 if (bp->flags & BNXT_FLAG_RFS) {
4523 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4524 num_vnics++;
4525 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4526 num_vnics += bp->rx_nr_rings;
4527 }
4528 #endif
4529
4530 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4531 num_vnics++;
4532
4533 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4534 GFP_KERNEL);
4535 if (!bp->vnic_info)
4536 return -ENOMEM;
4537
4538 bp->nr_vnics = num_vnics;
4539 return 0;
4540 }
4541
bnxt_init_vnics(struct bnxt * bp)4542 static void bnxt_init_vnics(struct bnxt *bp)
4543 {
4544 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4545 int i;
4546
4547 for (i = 0; i < bp->nr_vnics; i++) {
4548 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4549 int j;
4550
4551 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4552 vnic->vnic_id = i;
4553 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4554 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4555
4556 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4557
4558 if (bp->vnic_info[i].rss_hash_key) {
4559 if (i == BNXT_VNIC_DEFAULT) {
4560 u8 *key = (void *)vnic->rss_hash_key;
4561 int k;
4562
4563 if (!bp->rss_hash_key_valid &&
4564 !bp->rss_hash_key_updated) {
4565 get_random_bytes(bp->rss_hash_key,
4566 HW_HASH_KEY_SIZE);
4567 bp->rss_hash_key_updated = true;
4568 }
4569
4570 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4571 HW_HASH_KEY_SIZE);
4572
4573 if (!bp->rss_hash_key_updated)
4574 continue;
4575
4576 bp->rss_hash_key_updated = false;
4577 bp->rss_hash_key_valid = true;
4578
4579 bp->toeplitz_prefix = 0;
4580 for (k = 0; k < 8; k++) {
4581 bp->toeplitz_prefix <<= 8;
4582 bp->toeplitz_prefix |= key[k];
4583 }
4584 } else {
4585 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4586 HW_HASH_KEY_SIZE);
4587 }
4588 }
4589 }
4590 }
4591
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4592 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4593 {
4594 int pages;
4595
4596 pages = ring_size / desc_per_pg;
4597
4598 if (!pages)
4599 return 1;
4600
4601 pages++;
4602
4603 while (pages & (pages - 1))
4604 pages++;
4605
4606 return pages;
4607 }
4608
bnxt_set_tpa_flags(struct bnxt * bp)4609 void bnxt_set_tpa_flags(struct bnxt *bp)
4610 {
4611 bp->flags &= ~BNXT_FLAG_TPA;
4612 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4613 return;
4614 if (bp->dev->features & NETIF_F_LRO)
4615 bp->flags |= BNXT_FLAG_LRO;
4616 else if (bp->dev->features & NETIF_F_GRO_HW)
4617 bp->flags |= BNXT_FLAG_GRO;
4618 }
4619
bnxt_init_ring_params(struct bnxt * bp)4620 static void bnxt_init_ring_params(struct bnxt *bp)
4621 {
4622 unsigned int rx_size;
4623
4624 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4625 /* Try to fit 4 chunks into a 4k page */
4626 rx_size = SZ_1K -
4627 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4628 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4629 }
4630
4631 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4632 * be set on entry.
4633 */
bnxt_set_ring_params(struct bnxt * bp)4634 void bnxt_set_ring_params(struct bnxt *bp)
4635 {
4636 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4637 u32 agg_factor = 0, agg_ring_size = 0;
4638
4639 /* 8 for CRC and VLAN */
4640 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4641
4642 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4643 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4644
4645 ring_size = bp->rx_ring_size;
4646 bp->rx_agg_ring_size = 0;
4647 bp->rx_agg_nr_pages = 0;
4648
4649 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4650 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4651
4652 bp->flags &= ~BNXT_FLAG_JUMBO;
4653 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4654 u32 jumbo_factor;
4655
4656 bp->flags |= BNXT_FLAG_JUMBO;
4657 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4658 if (jumbo_factor > agg_factor)
4659 agg_factor = jumbo_factor;
4660 }
4661 if (agg_factor) {
4662 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4663 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4664 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4665 bp->rx_ring_size, ring_size);
4666 bp->rx_ring_size = ring_size;
4667 }
4668 agg_ring_size = ring_size * agg_factor;
4669
4670 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4671 RX_DESC_CNT);
4672 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4673 u32 tmp = agg_ring_size;
4674
4675 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4676 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4677 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4678 tmp, agg_ring_size);
4679 }
4680 bp->rx_agg_ring_size = agg_ring_size;
4681 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4682
4683 if (BNXT_RX_PAGE_MODE(bp)) {
4684 rx_space = PAGE_SIZE;
4685 rx_size = PAGE_SIZE -
4686 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4687 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4688 } else {
4689 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4690 bp->rx_copybreak,
4691 bp->dev->cfg_pending->hds_thresh);
4692 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4693 rx_space = rx_size + NET_SKB_PAD +
4694 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4695 }
4696 }
4697
4698 bp->rx_buf_use_size = rx_size;
4699 bp->rx_buf_size = rx_space;
4700
4701 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4702 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4703
4704 ring_size = bp->tx_ring_size;
4705 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4706 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4707
4708 max_rx_cmpl = bp->rx_ring_size;
4709 /* MAX TPA needs to be added because TPA_START completions are
4710 * immediately recycled, so the TPA completions are not bound by
4711 * the RX ring size.
4712 */
4713 if (bp->flags & BNXT_FLAG_TPA)
4714 max_rx_cmpl += bp->max_tpa;
4715 /* RX and TPA completions are 32-byte, all others are 16-byte */
4716 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4717 bp->cp_ring_size = ring_size;
4718
4719 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4720 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4721 bp->cp_nr_pages = MAX_CP_PAGES;
4722 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4723 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4724 ring_size, bp->cp_ring_size);
4725 }
4726 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4727 bp->cp_ring_mask = bp->cp_bit - 1;
4728 }
4729
4730 /* Changing allocation mode of RX rings.
4731 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4732 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4733 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4734 {
4735 struct net_device *dev = bp->dev;
4736
4737 if (page_mode) {
4738 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4739 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4740
4741 if (bp->xdp_prog->aux->xdp_has_frags)
4742 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4743 else
4744 dev->max_mtu =
4745 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4746 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4747 bp->flags |= BNXT_FLAG_JUMBO;
4748 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4749 } else {
4750 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4751 bp->rx_skb_func = bnxt_rx_page_skb;
4752 }
4753 bp->rx_dir = DMA_BIDIRECTIONAL;
4754 } else {
4755 dev->max_mtu = bp->max_mtu;
4756 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4757 bp->rx_dir = DMA_FROM_DEVICE;
4758 bp->rx_skb_func = bnxt_rx_skb;
4759 }
4760 }
4761
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4762 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4763 {
4764 __bnxt_set_rx_skb_mode(bp, page_mode);
4765
4766 if (!page_mode) {
4767 int rx, tx;
4768
4769 bnxt_get_max_rings(bp, &rx, &tx, true);
4770 if (rx > 1) {
4771 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4772 bp->dev->hw_features |= NETIF_F_LRO;
4773 }
4774 }
4775
4776 /* Update LRO and GRO_HW availability */
4777 netdev_update_features(bp->dev);
4778 }
4779
bnxt_free_vnic_attributes(struct bnxt * bp)4780 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4781 {
4782 int i;
4783 struct bnxt_vnic_info *vnic;
4784 struct pci_dev *pdev = bp->pdev;
4785
4786 if (!bp->vnic_info)
4787 return;
4788
4789 for (i = 0; i < bp->nr_vnics; i++) {
4790 vnic = &bp->vnic_info[i];
4791
4792 kfree(vnic->fw_grp_ids);
4793 vnic->fw_grp_ids = NULL;
4794
4795 kfree(vnic->uc_list);
4796 vnic->uc_list = NULL;
4797
4798 if (vnic->mc_list) {
4799 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4800 vnic->mc_list, vnic->mc_list_mapping);
4801 vnic->mc_list = NULL;
4802 }
4803
4804 if (vnic->rss_table) {
4805 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4806 vnic->rss_table,
4807 vnic->rss_table_dma_addr);
4808 vnic->rss_table = NULL;
4809 }
4810
4811 vnic->rss_hash_key = NULL;
4812 vnic->flags = 0;
4813 }
4814 }
4815
bnxt_alloc_vnic_attributes(struct bnxt * bp)4816 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4817 {
4818 int i, rc = 0, size;
4819 struct bnxt_vnic_info *vnic;
4820 struct pci_dev *pdev = bp->pdev;
4821 int max_rings;
4822
4823 for (i = 0; i < bp->nr_vnics; i++) {
4824 vnic = &bp->vnic_info[i];
4825
4826 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4827 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4828
4829 if (mem_size > 0) {
4830 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4831 if (!vnic->uc_list) {
4832 rc = -ENOMEM;
4833 goto out;
4834 }
4835 }
4836 }
4837
4838 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4839 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4840 vnic->mc_list =
4841 dma_alloc_coherent(&pdev->dev,
4842 vnic->mc_list_size,
4843 &vnic->mc_list_mapping,
4844 GFP_KERNEL);
4845 if (!vnic->mc_list) {
4846 rc = -ENOMEM;
4847 goto out;
4848 }
4849 }
4850
4851 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4852 goto vnic_skip_grps;
4853
4854 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4855 max_rings = bp->rx_nr_rings;
4856 else
4857 max_rings = 1;
4858
4859 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4860 if (!vnic->fw_grp_ids) {
4861 rc = -ENOMEM;
4862 goto out;
4863 }
4864 vnic_skip_grps:
4865 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4866 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4867 continue;
4868
4869 /* Allocate rss table and hash key */
4870 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4871 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4872 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4873
4874 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4875 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4876 vnic->rss_table_size,
4877 &vnic->rss_table_dma_addr,
4878 GFP_KERNEL);
4879 if (!vnic->rss_table) {
4880 rc = -ENOMEM;
4881 goto out;
4882 }
4883
4884 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4885 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4886 }
4887 return 0;
4888
4889 out:
4890 return rc;
4891 }
4892
bnxt_free_hwrm_resources(struct bnxt * bp)4893 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4894 {
4895 struct bnxt_hwrm_wait_token *token;
4896
4897 dma_pool_destroy(bp->hwrm_dma_pool);
4898 bp->hwrm_dma_pool = NULL;
4899
4900 rcu_read_lock();
4901 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4902 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4903 rcu_read_unlock();
4904 }
4905
bnxt_alloc_hwrm_resources(struct bnxt * bp)4906 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4907 {
4908 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4909 BNXT_HWRM_DMA_SIZE,
4910 BNXT_HWRM_DMA_ALIGN, 0);
4911 if (!bp->hwrm_dma_pool)
4912 return -ENOMEM;
4913
4914 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4915
4916 return 0;
4917 }
4918
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)4919 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4920 {
4921 kfree(stats->hw_masks);
4922 stats->hw_masks = NULL;
4923 kfree(stats->sw_stats);
4924 stats->sw_stats = NULL;
4925 if (stats->hw_stats) {
4926 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4927 stats->hw_stats_map);
4928 stats->hw_stats = NULL;
4929 }
4930 }
4931
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)4932 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4933 bool alloc_masks)
4934 {
4935 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4936 &stats->hw_stats_map, GFP_KERNEL);
4937 if (!stats->hw_stats)
4938 return -ENOMEM;
4939
4940 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4941 if (!stats->sw_stats)
4942 goto stats_mem_err;
4943
4944 if (alloc_masks) {
4945 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4946 if (!stats->hw_masks)
4947 goto stats_mem_err;
4948 }
4949 return 0;
4950
4951 stats_mem_err:
4952 bnxt_free_stats_mem(bp, stats);
4953 return -ENOMEM;
4954 }
4955
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)4956 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4957 {
4958 int i;
4959
4960 for (i = 0; i < count; i++)
4961 mask_arr[i] = mask;
4962 }
4963
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)4964 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4965 {
4966 int i;
4967
4968 for (i = 0; i < count; i++)
4969 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4970 }
4971
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)4972 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4973 struct bnxt_stats_mem *stats)
4974 {
4975 struct hwrm_func_qstats_ext_output *resp;
4976 struct hwrm_func_qstats_ext_input *req;
4977 __le64 *hw_masks;
4978 int rc;
4979
4980 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4981 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4982 return -EOPNOTSUPP;
4983
4984 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4985 if (rc)
4986 return rc;
4987
4988 req->fid = cpu_to_le16(0xffff);
4989 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4990
4991 resp = hwrm_req_hold(bp, req);
4992 rc = hwrm_req_send(bp, req);
4993 if (!rc) {
4994 hw_masks = &resp->rx_ucast_pkts;
4995 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4996 }
4997 hwrm_req_drop(bp, req);
4998 return rc;
4999 }
5000
5001 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5002 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5003
bnxt_init_stats(struct bnxt * bp)5004 static void bnxt_init_stats(struct bnxt *bp)
5005 {
5006 struct bnxt_napi *bnapi = bp->bnapi[0];
5007 struct bnxt_cp_ring_info *cpr;
5008 struct bnxt_stats_mem *stats;
5009 __le64 *rx_stats, *tx_stats;
5010 int rc, rx_count, tx_count;
5011 u64 *rx_masks, *tx_masks;
5012 u64 mask;
5013 u8 flags;
5014
5015 cpr = &bnapi->cp_ring;
5016 stats = &cpr->stats;
5017 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5018 if (rc) {
5019 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5020 mask = (1ULL << 48) - 1;
5021 else
5022 mask = -1ULL;
5023 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5024 }
5025 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5026 stats = &bp->port_stats;
5027 rx_stats = stats->hw_stats;
5028 rx_masks = stats->hw_masks;
5029 rx_count = sizeof(struct rx_port_stats) / 8;
5030 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5031 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5032 tx_count = sizeof(struct tx_port_stats) / 8;
5033
5034 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5035 rc = bnxt_hwrm_port_qstats(bp, flags);
5036 if (rc) {
5037 mask = (1ULL << 40) - 1;
5038
5039 bnxt_fill_masks(rx_masks, mask, rx_count);
5040 bnxt_fill_masks(tx_masks, mask, tx_count);
5041 } else {
5042 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5043 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5044 bnxt_hwrm_port_qstats(bp, 0);
5045 }
5046 }
5047 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5048 stats = &bp->rx_port_stats_ext;
5049 rx_stats = stats->hw_stats;
5050 rx_masks = stats->hw_masks;
5051 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5052 stats = &bp->tx_port_stats_ext;
5053 tx_stats = stats->hw_stats;
5054 tx_masks = stats->hw_masks;
5055 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5056
5057 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5058 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5059 if (rc) {
5060 mask = (1ULL << 40) - 1;
5061
5062 bnxt_fill_masks(rx_masks, mask, rx_count);
5063 if (tx_stats)
5064 bnxt_fill_masks(tx_masks, mask, tx_count);
5065 } else {
5066 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5067 if (tx_stats)
5068 bnxt_copy_hw_masks(tx_masks, tx_stats,
5069 tx_count);
5070 bnxt_hwrm_port_qstats_ext(bp, 0);
5071 }
5072 }
5073 }
5074
bnxt_free_port_stats(struct bnxt * bp)5075 static void bnxt_free_port_stats(struct bnxt *bp)
5076 {
5077 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5078 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5079
5080 bnxt_free_stats_mem(bp, &bp->port_stats);
5081 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5082 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5083 }
5084
bnxt_free_ring_stats(struct bnxt * bp)5085 static void bnxt_free_ring_stats(struct bnxt *bp)
5086 {
5087 int i;
5088
5089 if (!bp->bnapi)
5090 return;
5091
5092 for (i = 0; i < bp->cp_nr_rings; i++) {
5093 struct bnxt_napi *bnapi = bp->bnapi[i];
5094 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5095
5096 bnxt_free_stats_mem(bp, &cpr->stats);
5097
5098 kfree(cpr->sw_stats);
5099 cpr->sw_stats = NULL;
5100 }
5101 }
5102
bnxt_alloc_stats(struct bnxt * bp)5103 static int bnxt_alloc_stats(struct bnxt *bp)
5104 {
5105 u32 size, i;
5106 int rc;
5107
5108 size = bp->hw_ring_stats_size;
5109
5110 for (i = 0; i < bp->cp_nr_rings; i++) {
5111 struct bnxt_napi *bnapi = bp->bnapi[i];
5112 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5113
5114 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5115 if (!cpr->sw_stats)
5116 return -ENOMEM;
5117
5118 cpr->stats.len = size;
5119 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5120 if (rc)
5121 return rc;
5122
5123 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5124 }
5125
5126 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5127 return 0;
5128
5129 if (bp->port_stats.hw_stats)
5130 goto alloc_ext_stats;
5131
5132 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5133 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5134 if (rc)
5135 return rc;
5136
5137 bp->flags |= BNXT_FLAG_PORT_STATS;
5138
5139 alloc_ext_stats:
5140 /* Display extended statistics only if FW supports it */
5141 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5142 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5143 return 0;
5144
5145 if (bp->rx_port_stats_ext.hw_stats)
5146 goto alloc_tx_ext_stats;
5147
5148 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5149 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5150 /* Extended stats are optional */
5151 if (rc)
5152 return 0;
5153
5154 alloc_tx_ext_stats:
5155 if (bp->tx_port_stats_ext.hw_stats)
5156 return 0;
5157
5158 if (bp->hwrm_spec_code >= 0x10902 ||
5159 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5160 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5161 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5162 /* Extended stats are optional */
5163 if (rc)
5164 return 0;
5165 }
5166 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5167 return 0;
5168 }
5169
bnxt_clear_ring_indices(struct bnxt * bp)5170 static void bnxt_clear_ring_indices(struct bnxt *bp)
5171 {
5172 int i, j;
5173
5174 if (!bp->bnapi)
5175 return;
5176
5177 for (i = 0; i < bp->cp_nr_rings; i++) {
5178 struct bnxt_napi *bnapi = bp->bnapi[i];
5179 struct bnxt_cp_ring_info *cpr;
5180 struct bnxt_rx_ring_info *rxr;
5181 struct bnxt_tx_ring_info *txr;
5182
5183 if (!bnapi)
5184 continue;
5185
5186 cpr = &bnapi->cp_ring;
5187 cpr->cp_raw_cons = 0;
5188
5189 bnxt_for_each_napi_tx(j, bnapi, txr) {
5190 txr->tx_prod = 0;
5191 txr->tx_cons = 0;
5192 txr->tx_hw_cons = 0;
5193 }
5194
5195 rxr = bnapi->rx_ring;
5196 if (rxr) {
5197 rxr->rx_prod = 0;
5198 rxr->rx_agg_prod = 0;
5199 rxr->rx_sw_agg_prod = 0;
5200 rxr->rx_next_cons = 0;
5201 }
5202 bnapi->events = 0;
5203 }
5204 }
5205
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5206 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5207 {
5208 u8 type = fltr->type, flags = fltr->flags;
5209
5210 INIT_LIST_HEAD(&fltr->list);
5211 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5212 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5213 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5214 }
5215
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5216 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5217 {
5218 if (!list_empty(&fltr->list))
5219 list_del_init(&fltr->list);
5220 }
5221
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5222 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5223 {
5224 struct bnxt_filter_base *usr_fltr, *tmp;
5225
5226 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5227 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5228 continue;
5229 bnxt_del_one_usr_fltr(bp, usr_fltr);
5230 }
5231 }
5232
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5233 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5234 {
5235 hlist_del(&fltr->hash);
5236 bnxt_del_one_usr_fltr(bp, fltr);
5237 if (fltr->flags) {
5238 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5239 bp->ntp_fltr_count--;
5240 }
5241 kfree(fltr);
5242 }
5243
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5244 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5245 {
5246 int i;
5247
5248 /* Under rtnl_lock and all our NAPIs have been disabled. It's
5249 * safe to delete the hash table.
5250 */
5251 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5252 struct hlist_head *head;
5253 struct hlist_node *tmp;
5254 struct bnxt_ntuple_filter *fltr;
5255
5256 head = &bp->ntp_fltr_hash_tbl[i];
5257 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5258 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5259 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5260 !list_empty(&fltr->base.list)))
5261 continue;
5262 bnxt_del_fltr(bp, &fltr->base);
5263 }
5264 }
5265 if (!all)
5266 return;
5267
5268 bitmap_free(bp->ntp_fltr_bmap);
5269 bp->ntp_fltr_bmap = NULL;
5270 bp->ntp_fltr_count = 0;
5271 }
5272
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5273 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5274 {
5275 int i, rc = 0;
5276
5277 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5278 return 0;
5279
5280 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5281 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5282
5283 bp->ntp_fltr_count = 0;
5284 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5285
5286 if (!bp->ntp_fltr_bmap)
5287 rc = -ENOMEM;
5288
5289 return rc;
5290 }
5291
bnxt_free_l2_filters(struct bnxt * bp,bool all)5292 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5293 {
5294 int i;
5295
5296 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5297 struct hlist_head *head;
5298 struct hlist_node *tmp;
5299 struct bnxt_l2_filter *fltr;
5300
5301 head = &bp->l2_fltr_hash_tbl[i];
5302 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5303 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5304 !list_empty(&fltr->base.list)))
5305 continue;
5306 bnxt_del_fltr(bp, &fltr->base);
5307 }
5308 }
5309 }
5310
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5311 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5312 {
5313 int i;
5314
5315 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5316 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5317 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5318 }
5319
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5320 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5321 {
5322 bnxt_free_vnic_attributes(bp);
5323 bnxt_free_tx_rings(bp);
5324 bnxt_free_rx_rings(bp);
5325 bnxt_free_cp_rings(bp);
5326 bnxt_free_all_cp_arrays(bp);
5327 bnxt_free_ntp_fltrs(bp, false);
5328 bnxt_free_l2_filters(bp, false);
5329 if (irq_re_init) {
5330 bnxt_free_ring_stats(bp);
5331 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5332 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5333 bnxt_free_port_stats(bp);
5334 bnxt_free_ring_grps(bp);
5335 bnxt_free_vnics(bp);
5336 kfree(bp->tx_ring_map);
5337 bp->tx_ring_map = NULL;
5338 kfree(bp->tx_ring);
5339 bp->tx_ring = NULL;
5340 kfree(bp->rx_ring);
5341 bp->rx_ring = NULL;
5342 kfree(bp->bnapi);
5343 bp->bnapi = NULL;
5344 } else {
5345 bnxt_clear_ring_indices(bp);
5346 }
5347 }
5348
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5349 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5350 {
5351 int i, j, rc, size, arr_size;
5352 void *bnapi;
5353
5354 if (irq_re_init) {
5355 /* Allocate bnapi mem pointer array and mem block for
5356 * all queues
5357 */
5358 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5359 bp->cp_nr_rings);
5360 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5361 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5362 if (!bnapi)
5363 return -ENOMEM;
5364
5365 bp->bnapi = bnapi;
5366 bnapi += arr_size;
5367 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5368 bp->bnapi[i] = bnapi;
5369 bp->bnapi[i]->index = i;
5370 bp->bnapi[i]->bp = bp;
5371 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5372 struct bnxt_cp_ring_info *cpr =
5373 &bp->bnapi[i]->cp_ring;
5374
5375 cpr->cp_ring_struct.ring_mem.flags =
5376 BNXT_RMEM_RING_PTE_FLAG;
5377 }
5378 }
5379
5380 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5381 sizeof(struct bnxt_rx_ring_info),
5382 GFP_KERNEL);
5383 if (!bp->rx_ring)
5384 return -ENOMEM;
5385
5386 for (i = 0; i < bp->rx_nr_rings; i++) {
5387 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5388
5389 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5390 rxr->rx_ring_struct.ring_mem.flags =
5391 BNXT_RMEM_RING_PTE_FLAG;
5392 rxr->rx_agg_ring_struct.ring_mem.flags =
5393 BNXT_RMEM_RING_PTE_FLAG;
5394 } else {
5395 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5396 }
5397 rxr->bnapi = bp->bnapi[i];
5398 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5399 }
5400
5401 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5402 sizeof(struct bnxt_tx_ring_info),
5403 GFP_KERNEL);
5404 if (!bp->tx_ring)
5405 return -ENOMEM;
5406
5407 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5408 GFP_KERNEL);
5409
5410 if (!bp->tx_ring_map)
5411 return -ENOMEM;
5412
5413 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5414 j = 0;
5415 else
5416 j = bp->rx_nr_rings;
5417
5418 for (i = 0; i < bp->tx_nr_rings; i++) {
5419 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5420 struct bnxt_napi *bnapi2;
5421
5422 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5423 txr->tx_ring_struct.ring_mem.flags =
5424 BNXT_RMEM_RING_PTE_FLAG;
5425 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5426 if (i >= bp->tx_nr_rings_xdp) {
5427 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5428
5429 bnapi2 = bp->bnapi[k];
5430 txr->txq_index = i - bp->tx_nr_rings_xdp;
5431 txr->tx_napi_idx =
5432 BNXT_RING_TO_TC(bp, txr->txq_index);
5433 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5434 bnapi2->tx_int = bnxt_tx_int;
5435 } else {
5436 bnapi2 = bp->bnapi[j];
5437 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5438 bnapi2->tx_ring[0] = txr;
5439 bnapi2->tx_int = bnxt_tx_int_xdp;
5440 j++;
5441 }
5442 txr->bnapi = bnapi2;
5443 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5444 txr->tx_cpr = &bnapi2->cp_ring;
5445 }
5446
5447 rc = bnxt_alloc_stats(bp);
5448 if (rc)
5449 goto alloc_mem_err;
5450 bnxt_init_stats(bp);
5451
5452 rc = bnxt_alloc_ntp_fltrs(bp);
5453 if (rc)
5454 goto alloc_mem_err;
5455
5456 rc = bnxt_alloc_vnics(bp);
5457 if (rc)
5458 goto alloc_mem_err;
5459 }
5460
5461 rc = bnxt_alloc_all_cp_arrays(bp);
5462 if (rc)
5463 goto alloc_mem_err;
5464
5465 bnxt_init_ring_struct(bp);
5466
5467 rc = bnxt_alloc_rx_rings(bp);
5468 if (rc)
5469 goto alloc_mem_err;
5470
5471 rc = bnxt_alloc_tx_rings(bp);
5472 if (rc)
5473 goto alloc_mem_err;
5474
5475 rc = bnxt_alloc_cp_rings(bp);
5476 if (rc)
5477 goto alloc_mem_err;
5478
5479 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5480 BNXT_VNIC_MCAST_FLAG |
5481 BNXT_VNIC_UCAST_FLAG;
5482 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5483 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5484 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5485
5486 rc = bnxt_alloc_vnic_attributes(bp);
5487 if (rc)
5488 goto alloc_mem_err;
5489 return 0;
5490
5491 alloc_mem_err:
5492 bnxt_free_mem(bp, true);
5493 return rc;
5494 }
5495
bnxt_disable_int(struct bnxt * bp)5496 static void bnxt_disable_int(struct bnxt *bp)
5497 {
5498 int i;
5499
5500 if (!bp->bnapi)
5501 return;
5502
5503 for (i = 0; i < bp->cp_nr_rings; i++) {
5504 struct bnxt_napi *bnapi = bp->bnapi[i];
5505 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5506 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5507
5508 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5509 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5510 }
5511 }
5512
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5513 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5514 {
5515 struct bnxt_napi *bnapi = bp->bnapi[n];
5516 struct bnxt_cp_ring_info *cpr;
5517
5518 cpr = &bnapi->cp_ring;
5519 return cpr->cp_ring_struct.map_idx;
5520 }
5521
bnxt_disable_int_sync(struct bnxt * bp)5522 static void bnxt_disable_int_sync(struct bnxt *bp)
5523 {
5524 int i;
5525
5526 if (!bp->irq_tbl)
5527 return;
5528
5529 atomic_inc(&bp->intr_sem);
5530
5531 bnxt_disable_int(bp);
5532 for (i = 0; i < bp->cp_nr_rings; i++) {
5533 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5534
5535 synchronize_irq(bp->irq_tbl[map_idx].vector);
5536 }
5537 }
5538
bnxt_enable_int(struct bnxt * bp)5539 static void bnxt_enable_int(struct bnxt *bp)
5540 {
5541 int i;
5542
5543 atomic_set(&bp->intr_sem, 0);
5544 for (i = 0; i < bp->cp_nr_rings; i++) {
5545 struct bnxt_napi *bnapi = bp->bnapi[i];
5546 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5547
5548 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5549 }
5550 }
5551
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5552 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5553 bool async_only)
5554 {
5555 DECLARE_BITMAP(async_events_bmap, 256);
5556 u32 *events = (u32 *)async_events_bmap;
5557 struct hwrm_func_drv_rgtr_output *resp;
5558 struct hwrm_func_drv_rgtr_input *req;
5559 u32 flags;
5560 int rc, i;
5561
5562 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5563 if (rc)
5564 return rc;
5565
5566 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5567 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5568 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5569
5570 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5571 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5572 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5573 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5574 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5575 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5576 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5577 req->flags = cpu_to_le32(flags);
5578 req->ver_maj_8b = DRV_VER_MAJ;
5579 req->ver_min_8b = DRV_VER_MIN;
5580 req->ver_upd_8b = DRV_VER_UPD;
5581 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5582 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5583 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5584
5585 if (BNXT_PF(bp)) {
5586 u32 data[8];
5587 int i;
5588
5589 memset(data, 0, sizeof(data));
5590 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5591 u16 cmd = bnxt_vf_req_snif[i];
5592 unsigned int bit, idx;
5593
5594 idx = cmd / 32;
5595 bit = cmd % 32;
5596 data[idx] |= 1 << bit;
5597 }
5598
5599 for (i = 0; i < 8; i++)
5600 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5601
5602 req->enables |=
5603 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5604 }
5605
5606 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5607 req->flags |= cpu_to_le32(
5608 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5609
5610 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5611 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5612 u16 event_id = bnxt_async_events_arr[i];
5613
5614 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5615 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5616 continue;
5617 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5618 !bp->ptp_cfg)
5619 continue;
5620 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5621 }
5622 if (bmap && bmap_size) {
5623 for (i = 0; i < bmap_size; i++) {
5624 if (test_bit(i, bmap))
5625 __set_bit(i, async_events_bmap);
5626 }
5627 }
5628 for (i = 0; i < 8; i++)
5629 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5630
5631 if (async_only)
5632 req->enables =
5633 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5634
5635 resp = hwrm_req_hold(bp, req);
5636 rc = hwrm_req_send(bp, req);
5637 if (!rc) {
5638 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5639 if (resp->flags &
5640 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5641 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5642 }
5643 hwrm_req_drop(bp, req);
5644 return rc;
5645 }
5646
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5647 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5648 {
5649 struct hwrm_func_drv_unrgtr_input *req;
5650 int rc;
5651
5652 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5653 return 0;
5654
5655 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5656 if (rc)
5657 return rc;
5658 return hwrm_req_send(bp, req);
5659 }
5660
5661 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5662
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5663 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5664 {
5665 struct hwrm_tunnel_dst_port_free_input *req;
5666 int rc;
5667
5668 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5669 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5670 return 0;
5671 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5672 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5673 return 0;
5674
5675 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5676 if (rc)
5677 return rc;
5678
5679 req->tunnel_type = tunnel_type;
5680
5681 switch (tunnel_type) {
5682 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5683 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5684 bp->vxlan_port = 0;
5685 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5686 break;
5687 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5688 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5689 bp->nge_port = 0;
5690 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5691 break;
5692 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5693 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5694 bp->vxlan_gpe_port = 0;
5695 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5696 break;
5697 default:
5698 break;
5699 }
5700
5701 rc = hwrm_req_send(bp, req);
5702 if (rc)
5703 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5704 rc);
5705 if (bp->flags & BNXT_FLAG_TPA)
5706 bnxt_set_tpa(bp, true);
5707 return rc;
5708 }
5709
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5710 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5711 u8 tunnel_type)
5712 {
5713 struct hwrm_tunnel_dst_port_alloc_output *resp;
5714 struct hwrm_tunnel_dst_port_alloc_input *req;
5715 int rc;
5716
5717 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5718 if (rc)
5719 return rc;
5720
5721 req->tunnel_type = tunnel_type;
5722 req->tunnel_dst_port_val = port;
5723
5724 resp = hwrm_req_hold(bp, req);
5725 rc = hwrm_req_send(bp, req);
5726 if (rc) {
5727 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5728 rc);
5729 goto err_out;
5730 }
5731
5732 switch (tunnel_type) {
5733 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5734 bp->vxlan_port = port;
5735 bp->vxlan_fw_dst_port_id =
5736 le16_to_cpu(resp->tunnel_dst_port_id);
5737 break;
5738 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5739 bp->nge_port = port;
5740 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5741 break;
5742 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5743 bp->vxlan_gpe_port = port;
5744 bp->vxlan_gpe_fw_dst_port_id =
5745 le16_to_cpu(resp->tunnel_dst_port_id);
5746 break;
5747 default:
5748 break;
5749 }
5750 if (bp->flags & BNXT_FLAG_TPA)
5751 bnxt_set_tpa(bp, true);
5752
5753 err_out:
5754 hwrm_req_drop(bp, req);
5755 return rc;
5756 }
5757
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5758 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5759 {
5760 struct hwrm_cfa_l2_set_rx_mask_input *req;
5761 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5762 int rc;
5763
5764 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5765 if (rc)
5766 return rc;
5767
5768 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5769 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5770 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5771 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5772 }
5773 req->mask = cpu_to_le32(vnic->rx_mask);
5774 return hwrm_req_send_silent(bp, req);
5775 }
5776
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5777 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5778 {
5779 if (!atomic_dec_and_test(&fltr->refcnt))
5780 return;
5781 spin_lock_bh(&bp->ntp_fltr_lock);
5782 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5783 spin_unlock_bh(&bp->ntp_fltr_lock);
5784 return;
5785 }
5786 hlist_del_rcu(&fltr->base.hash);
5787 bnxt_del_one_usr_fltr(bp, &fltr->base);
5788 if (fltr->base.flags) {
5789 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5790 bp->ntp_fltr_count--;
5791 }
5792 spin_unlock_bh(&bp->ntp_fltr_lock);
5793 kfree_rcu(fltr, base.rcu);
5794 }
5795
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5796 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5797 struct bnxt_l2_key *key,
5798 u32 idx)
5799 {
5800 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5801 struct bnxt_l2_filter *fltr;
5802
5803 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5804 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5805
5806 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5807 l2_key->vlan == key->vlan)
5808 return fltr;
5809 }
5810 return NULL;
5811 }
5812
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5813 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5814 struct bnxt_l2_key *key,
5815 u32 idx)
5816 {
5817 struct bnxt_l2_filter *fltr = NULL;
5818
5819 rcu_read_lock();
5820 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5821 if (fltr)
5822 atomic_inc(&fltr->refcnt);
5823 rcu_read_unlock();
5824 return fltr;
5825 }
5826
5827 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5828 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5829 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5830 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5831 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5832
5833 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5834 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5835 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5836 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5837 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5838
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5839 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5840 {
5841 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5842 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5843 return sizeof(fkeys->addrs.v4addrs) +
5844 sizeof(fkeys->ports);
5845
5846 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5847 return sizeof(fkeys->addrs.v4addrs);
5848 }
5849
5850 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5851 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5852 return sizeof(fkeys->addrs.v6addrs) +
5853 sizeof(fkeys->ports);
5854
5855 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5856 return sizeof(fkeys->addrs.v6addrs);
5857 }
5858
5859 return 0;
5860 }
5861
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5862 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5863 const unsigned char *key)
5864 {
5865 u64 prefix = bp->toeplitz_prefix, hash = 0;
5866 struct bnxt_ipv4_tuple tuple4;
5867 struct bnxt_ipv6_tuple tuple6;
5868 int i, j, len = 0;
5869 u8 *four_tuple;
5870
5871 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5872 if (!len)
5873 return 0;
5874
5875 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5876 tuple4.v4addrs = fkeys->addrs.v4addrs;
5877 tuple4.ports = fkeys->ports;
5878 four_tuple = (unsigned char *)&tuple4;
5879 } else {
5880 tuple6.v6addrs = fkeys->addrs.v6addrs;
5881 tuple6.ports = fkeys->ports;
5882 four_tuple = (unsigned char *)&tuple6;
5883 }
5884
5885 for (i = 0, j = 8; i < len; i++, j++) {
5886 u8 byte = four_tuple[i];
5887 int bit;
5888
5889 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5890 if (byte & 0x80)
5891 hash ^= prefix;
5892 }
5893 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5894 }
5895
5896 /* The valid part of the hash is in the upper 32 bits. */
5897 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5898 }
5899
5900 #ifdef CONFIG_RFS_ACCEL
5901 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)5902 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5903 {
5904 struct bnxt_l2_filter *fltr;
5905 u32 idx;
5906
5907 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5908 BNXT_L2_FLTR_HASH_MASK;
5909 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5910 return fltr;
5911 }
5912 #endif
5913
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)5914 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5915 struct bnxt_l2_key *key, u32 idx)
5916 {
5917 struct hlist_head *head;
5918
5919 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5920 fltr->l2_key.vlan = key->vlan;
5921 fltr->base.type = BNXT_FLTR_TYPE_L2;
5922 if (fltr->base.flags) {
5923 int bit_id;
5924
5925 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5926 bp->max_fltr, 0);
5927 if (bit_id < 0)
5928 return -ENOMEM;
5929 fltr->base.sw_id = (u16)bit_id;
5930 bp->ntp_fltr_count++;
5931 }
5932 head = &bp->l2_fltr_hash_tbl[idx];
5933 hlist_add_head_rcu(&fltr->base.hash, head);
5934 bnxt_insert_usr_fltr(bp, &fltr->base);
5935 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5936 atomic_set(&fltr->refcnt, 1);
5937 return 0;
5938 }
5939
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)5940 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5941 struct bnxt_l2_key *key,
5942 gfp_t gfp)
5943 {
5944 struct bnxt_l2_filter *fltr;
5945 u32 idx;
5946 int rc;
5947
5948 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5949 BNXT_L2_FLTR_HASH_MASK;
5950 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5951 if (fltr)
5952 return fltr;
5953
5954 fltr = kzalloc(sizeof(*fltr), gfp);
5955 if (!fltr)
5956 return ERR_PTR(-ENOMEM);
5957 spin_lock_bh(&bp->ntp_fltr_lock);
5958 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5959 spin_unlock_bh(&bp->ntp_fltr_lock);
5960 if (rc) {
5961 bnxt_del_l2_filter(bp, fltr);
5962 fltr = ERR_PTR(rc);
5963 }
5964 return fltr;
5965 }
5966
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)5967 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5968 struct bnxt_l2_key *key,
5969 u16 flags)
5970 {
5971 struct bnxt_l2_filter *fltr;
5972 u32 idx;
5973 int rc;
5974
5975 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5976 BNXT_L2_FLTR_HASH_MASK;
5977 spin_lock_bh(&bp->ntp_fltr_lock);
5978 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5979 if (fltr) {
5980 fltr = ERR_PTR(-EEXIST);
5981 goto l2_filter_exit;
5982 }
5983 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5984 if (!fltr) {
5985 fltr = ERR_PTR(-ENOMEM);
5986 goto l2_filter_exit;
5987 }
5988 fltr->base.flags = flags;
5989 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5990 if (rc) {
5991 spin_unlock_bh(&bp->ntp_fltr_lock);
5992 bnxt_del_l2_filter(bp, fltr);
5993 return ERR_PTR(rc);
5994 }
5995
5996 l2_filter_exit:
5997 spin_unlock_bh(&bp->ntp_fltr_lock);
5998 return fltr;
5999 }
6000
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6001 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6002 {
6003 #ifdef CONFIG_BNXT_SRIOV
6004 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6005
6006 return vf->fw_fid;
6007 #else
6008 return INVALID_HW_RING_ID;
6009 #endif
6010 }
6011
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6012 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6013 {
6014 struct hwrm_cfa_l2_filter_free_input *req;
6015 u16 target_id = 0xffff;
6016 int rc;
6017
6018 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6019 struct bnxt_pf_info *pf = &bp->pf;
6020
6021 if (fltr->base.vf_idx >= pf->active_vfs)
6022 return -EINVAL;
6023
6024 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6025 if (target_id == INVALID_HW_RING_ID)
6026 return -EINVAL;
6027 }
6028
6029 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6030 if (rc)
6031 return rc;
6032
6033 req->target_id = cpu_to_le16(target_id);
6034 req->l2_filter_id = fltr->base.filter_id;
6035 return hwrm_req_send(bp, req);
6036 }
6037
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6038 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6039 {
6040 struct hwrm_cfa_l2_filter_alloc_output *resp;
6041 struct hwrm_cfa_l2_filter_alloc_input *req;
6042 u16 target_id = 0xffff;
6043 int rc;
6044
6045 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6046 struct bnxt_pf_info *pf = &bp->pf;
6047
6048 if (fltr->base.vf_idx >= pf->active_vfs)
6049 return -EINVAL;
6050
6051 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6052 }
6053 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6054 if (rc)
6055 return rc;
6056
6057 req->target_id = cpu_to_le16(target_id);
6058 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6059
6060 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6061 req->flags |=
6062 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6063 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6064 req->enables =
6065 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6066 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6067 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6068 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6069 eth_broadcast_addr(req->l2_addr_mask);
6070
6071 if (fltr->l2_key.vlan) {
6072 req->enables |=
6073 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6074 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6075 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6076 req->num_vlans = 1;
6077 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6078 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6079 }
6080
6081 resp = hwrm_req_hold(bp, req);
6082 rc = hwrm_req_send(bp, req);
6083 if (!rc) {
6084 fltr->base.filter_id = resp->l2_filter_id;
6085 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6086 }
6087 hwrm_req_drop(bp, req);
6088 return rc;
6089 }
6090
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6091 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6092 struct bnxt_ntuple_filter *fltr)
6093 {
6094 struct hwrm_cfa_ntuple_filter_free_input *req;
6095 int rc;
6096
6097 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6098 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6099 if (rc)
6100 return rc;
6101
6102 req->ntuple_filter_id = fltr->base.filter_id;
6103 return hwrm_req_send(bp, req);
6104 }
6105
6106 #define BNXT_NTP_FLTR_FLAGS \
6107 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6108 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6109 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6110 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6111 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6112 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6113 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6114 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6115 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6116 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6117 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6118 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6119 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6120
6121 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6122 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6123
bnxt_fill_ipv6_mask(__be32 mask[4])6124 void bnxt_fill_ipv6_mask(__be32 mask[4])
6125 {
6126 int i;
6127
6128 for (i = 0; i < 4; i++)
6129 mask[i] = cpu_to_be32(~0);
6130 }
6131
6132 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6133 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6134 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6135 struct bnxt_ntuple_filter *fltr)
6136 {
6137 u16 rxq = fltr->base.rxq;
6138
6139 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6140 struct ethtool_rxfh_context *ctx;
6141 struct bnxt_rss_ctx *rss_ctx;
6142 struct bnxt_vnic_info *vnic;
6143
6144 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6145 fltr->base.fw_vnic_id);
6146 if (ctx) {
6147 rss_ctx = ethtool_rxfh_context_priv(ctx);
6148 vnic = &rss_ctx->vnic;
6149
6150 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6151 }
6152 return;
6153 }
6154 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6155 struct bnxt_vnic_info *vnic;
6156 u32 enables;
6157
6158 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6159 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6160 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6161 req->enables |= cpu_to_le32(enables);
6162 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6163 } else {
6164 u32 flags;
6165
6166 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6167 req->flags |= cpu_to_le32(flags);
6168 req->dst_id = cpu_to_le16(rxq);
6169 }
6170 }
6171
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6172 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6173 struct bnxt_ntuple_filter *fltr)
6174 {
6175 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6176 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6177 struct bnxt_flow_masks *masks = &fltr->fmasks;
6178 struct flow_keys *keys = &fltr->fkeys;
6179 struct bnxt_l2_filter *l2_fltr;
6180 struct bnxt_vnic_info *vnic;
6181 int rc;
6182
6183 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6184 if (rc)
6185 return rc;
6186
6187 l2_fltr = fltr->l2_fltr;
6188 req->l2_filter_id = l2_fltr->base.filter_id;
6189
6190 if (fltr->base.flags & BNXT_ACT_DROP) {
6191 req->flags =
6192 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6193 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6194 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6195 } else {
6196 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6197 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6198 }
6199 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6200
6201 req->ethertype = htons(ETH_P_IP);
6202 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6203 req->ip_protocol = keys->basic.ip_proto;
6204
6205 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6206 req->ethertype = htons(ETH_P_IPV6);
6207 req->ip_addr_type =
6208 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6209 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6210 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6211 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6212 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6213 } else {
6214 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6215 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6216 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6217 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6218 }
6219 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6220 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6221 req->tunnel_type =
6222 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6223 }
6224
6225 req->src_port = keys->ports.src;
6226 req->src_port_mask = masks->ports.src;
6227 req->dst_port = keys->ports.dst;
6228 req->dst_port_mask = masks->ports.dst;
6229
6230 resp = hwrm_req_hold(bp, req);
6231 rc = hwrm_req_send(bp, req);
6232 if (!rc)
6233 fltr->base.filter_id = resp->ntuple_filter_id;
6234 hwrm_req_drop(bp, req);
6235 return rc;
6236 }
6237
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6238 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6239 const u8 *mac_addr)
6240 {
6241 struct bnxt_l2_filter *fltr;
6242 struct bnxt_l2_key key;
6243 int rc;
6244
6245 ether_addr_copy(key.dst_mac_addr, mac_addr);
6246 key.vlan = 0;
6247 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6248 if (IS_ERR(fltr))
6249 return PTR_ERR(fltr);
6250
6251 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6252 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6253 if (rc)
6254 bnxt_del_l2_filter(bp, fltr);
6255 else
6256 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6257 return rc;
6258 }
6259
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6260 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6261 {
6262 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6263
6264 /* Any associated ntuple filters will also be cleared by firmware. */
6265 for (i = 0; i < num_of_vnics; i++) {
6266 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6267
6268 for (j = 0; j < vnic->uc_filter_count; j++) {
6269 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6270
6271 bnxt_hwrm_l2_filter_free(bp, fltr);
6272 bnxt_del_l2_filter(bp, fltr);
6273 }
6274 vnic->uc_filter_count = 0;
6275 }
6276 }
6277
6278 #define BNXT_DFLT_TUNL_TPA_BMAP \
6279 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6280 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6281 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6282
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6283 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6284 struct hwrm_vnic_tpa_cfg_input *req)
6285 {
6286 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6287
6288 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6289 return;
6290
6291 if (bp->vxlan_port)
6292 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6293 if (bp->vxlan_gpe_port)
6294 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6295 if (bp->nge_port)
6296 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6297
6298 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6299 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6300 }
6301
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6302 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6303 u32 tpa_flags)
6304 {
6305 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6306 struct hwrm_vnic_tpa_cfg_input *req;
6307 int rc;
6308
6309 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6310 return 0;
6311
6312 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6313 if (rc)
6314 return rc;
6315
6316 if (tpa_flags) {
6317 u16 mss = bp->dev->mtu - 40;
6318 u32 nsegs, n, segs = 0, flags;
6319
6320 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6321 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6322 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6323 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6324 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6325 if (tpa_flags & BNXT_FLAG_GRO)
6326 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6327
6328 req->flags = cpu_to_le32(flags);
6329
6330 req->enables =
6331 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6332 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6333 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6334
6335 /* Number of segs are log2 units, and first packet is not
6336 * included as part of this units.
6337 */
6338 if (mss <= BNXT_RX_PAGE_SIZE) {
6339 n = BNXT_RX_PAGE_SIZE / mss;
6340 nsegs = (MAX_SKB_FRAGS - 1) * n;
6341 } else {
6342 n = mss / BNXT_RX_PAGE_SIZE;
6343 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6344 n++;
6345 nsegs = (MAX_SKB_FRAGS - n) / n;
6346 }
6347
6348 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6349 segs = MAX_TPA_SEGS_P5;
6350 max_aggs = bp->max_tpa;
6351 } else {
6352 segs = ilog2(nsegs);
6353 }
6354 req->max_agg_segs = cpu_to_le16(segs);
6355 req->max_aggs = cpu_to_le16(max_aggs);
6356
6357 req->min_agg_len = cpu_to_le32(512);
6358 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6359 }
6360 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6361
6362 return hwrm_req_send(bp, req);
6363 }
6364
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6365 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6366 {
6367 struct bnxt_ring_grp_info *grp_info;
6368
6369 grp_info = &bp->grp_info[ring->grp_idx];
6370 return grp_info->cp_fw_ring_id;
6371 }
6372
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6373 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6374 {
6375 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6376 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6377 else
6378 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6379 }
6380
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6381 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6382 {
6383 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6384 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6385 else
6386 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6387 }
6388
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6389 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6390 {
6391 int entries;
6392
6393 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6394 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6395 else
6396 entries = HW_HASH_INDEX_SIZE;
6397
6398 bp->rss_indir_tbl_entries = entries;
6399 bp->rss_indir_tbl =
6400 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6401 if (!bp->rss_indir_tbl)
6402 return -ENOMEM;
6403
6404 return 0;
6405 }
6406
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6407 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6408 struct ethtool_rxfh_context *rss_ctx)
6409 {
6410 u16 max_rings, max_entries, pad, i;
6411 u32 *rss_indir_tbl;
6412
6413 if (!bp->rx_nr_rings)
6414 return;
6415
6416 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6417 max_rings = bp->rx_nr_rings - 1;
6418 else
6419 max_rings = bp->rx_nr_rings;
6420
6421 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6422 if (rss_ctx)
6423 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6424 else
6425 rss_indir_tbl = &bp->rss_indir_tbl[0];
6426
6427 for (i = 0; i < max_entries; i++)
6428 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6429
6430 pad = bp->rss_indir_tbl_entries - max_entries;
6431 if (pad)
6432 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6433 }
6434
bnxt_get_max_rss_ring(struct bnxt * bp)6435 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6436 {
6437 u32 i, tbl_size, max_ring = 0;
6438
6439 if (!bp->rss_indir_tbl)
6440 return 0;
6441
6442 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6443 for (i = 0; i < tbl_size; i++)
6444 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6445 return max_ring;
6446 }
6447
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6448 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6449 {
6450 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6451 if (!rx_rings)
6452 return 0;
6453 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6454 BNXT_RSS_TABLE_ENTRIES_P5);
6455 }
6456 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6457 return 2;
6458 return 1;
6459 }
6460
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6461 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6462 {
6463 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6464 u16 i, j;
6465
6466 /* Fill the RSS indirection table with ring group ids */
6467 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6468 if (!no_rss)
6469 j = bp->rss_indir_tbl[i];
6470 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6471 }
6472 }
6473
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6474 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6475 struct bnxt_vnic_info *vnic)
6476 {
6477 __le16 *ring_tbl = vnic->rss_table;
6478 struct bnxt_rx_ring_info *rxr;
6479 u16 tbl_size, i;
6480
6481 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6482
6483 for (i = 0; i < tbl_size; i++) {
6484 u16 ring_id, j;
6485
6486 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6487 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6488 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6489 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6490 else
6491 j = bp->rss_indir_tbl[i];
6492 rxr = &bp->rx_ring[j];
6493
6494 ring_id = rxr->rx_ring_struct.fw_ring_id;
6495 *ring_tbl++ = cpu_to_le16(ring_id);
6496 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6497 *ring_tbl++ = cpu_to_le16(ring_id);
6498 }
6499 }
6500
6501 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6502 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6503 struct bnxt_vnic_info *vnic)
6504 {
6505 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6506 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6507 if (bp->flags & BNXT_FLAG_CHIP_P7)
6508 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6509 } else {
6510 bnxt_fill_hw_rss_tbl(bp, vnic);
6511 }
6512
6513 if (bp->rss_hash_delta) {
6514 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6515 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6516 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6517 else
6518 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6519 } else {
6520 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6521 }
6522 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6523 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6524 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6525 }
6526
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6527 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6528 bool set_rss)
6529 {
6530 struct hwrm_vnic_rss_cfg_input *req;
6531 int rc;
6532
6533 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6534 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6535 return 0;
6536
6537 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6538 if (rc)
6539 return rc;
6540
6541 if (set_rss)
6542 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6543 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6544 return hwrm_req_send(bp, req);
6545 }
6546
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6547 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6548 struct bnxt_vnic_info *vnic, bool set_rss)
6549 {
6550 struct hwrm_vnic_rss_cfg_input *req;
6551 dma_addr_t ring_tbl_map;
6552 u32 i, nr_ctxs;
6553 int rc;
6554
6555 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6556 if (rc)
6557 return rc;
6558
6559 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6560 if (!set_rss)
6561 return hwrm_req_send(bp, req);
6562
6563 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6564 ring_tbl_map = vnic->rss_table_dma_addr;
6565 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6566
6567 hwrm_req_hold(bp, req);
6568 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6569 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6570 req->ring_table_pair_index = i;
6571 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6572 rc = hwrm_req_send(bp, req);
6573 if (rc)
6574 goto exit;
6575 }
6576
6577 exit:
6578 hwrm_req_drop(bp, req);
6579 return rc;
6580 }
6581
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6582 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6583 {
6584 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6585 struct hwrm_vnic_rss_qcfg_output *resp;
6586 struct hwrm_vnic_rss_qcfg_input *req;
6587
6588 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6589 return;
6590
6591 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6592 /* all contexts configured to same hash_type, zero always exists */
6593 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6594 resp = hwrm_req_hold(bp, req);
6595 if (!hwrm_req_send(bp, req)) {
6596 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6597 bp->rss_hash_delta = 0;
6598 }
6599 hwrm_req_drop(bp, req);
6600 }
6601
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6602 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6603 {
6604 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6605 struct hwrm_vnic_plcmodes_cfg_input *req;
6606 int rc;
6607
6608 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6609 if (rc)
6610 return rc;
6611
6612 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6613 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6614 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6615
6616 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6617 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6618 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6619 req->enables |=
6620 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6621 req->hds_threshold = cpu_to_le16(hds_thresh);
6622 }
6623 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6624 return hwrm_req_send(bp, req);
6625 }
6626
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6627 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6628 struct bnxt_vnic_info *vnic,
6629 u16 ctx_idx)
6630 {
6631 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6632
6633 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6634 return;
6635
6636 req->rss_cos_lb_ctx_id =
6637 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6638
6639 hwrm_req_send(bp, req);
6640 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6641 }
6642
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6643 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6644 {
6645 int i, j;
6646
6647 for (i = 0; i < bp->nr_vnics; i++) {
6648 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6649
6650 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6651 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6652 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6653 }
6654 }
6655 bp->rsscos_nr_ctxs = 0;
6656 }
6657
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6658 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6659 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6660 {
6661 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6662 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6663 int rc;
6664
6665 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6666 if (rc)
6667 return rc;
6668
6669 resp = hwrm_req_hold(bp, req);
6670 rc = hwrm_req_send(bp, req);
6671 if (!rc)
6672 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6673 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6674 hwrm_req_drop(bp, req);
6675
6676 return rc;
6677 }
6678
bnxt_get_roce_vnic_mode(struct bnxt * bp)6679 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6680 {
6681 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6682 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6683 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6684 }
6685
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6686 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6687 {
6688 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6689 struct hwrm_vnic_cfg_input *req;
6690 unsigned int ring = 0, grp_idx;
6691 u16 def_vlan = 0;
6692 int rc;
6693
6694 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6695 if (rc)
6696 return rc;
6697
6698 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6699 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6700
6701 req->default_rx_ring_id =
6702 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6703 req->default_cmpl_ring_id =
6704 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6705 req->enables =
6706 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6707 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6708 goto vnic_mru;
6709 }
6710 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6711 /* Only RSS support for now TBD: COS & LB */
6712 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6713 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6714 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6715 VNIC_CFG_REQ_ENABLES_MRU);
6716 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6717 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6718 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6719 VNIC_CFG_REQ_ENABLES_MRU);
6720 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6721 } else {
6722 req->rss_rule = cpu_to_le16(0xffff);
6723 }
6724
6725 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6726 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6727 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6728 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6729 } else {
6730 req->cos_rule = cpu_to_le16(0xffff);
6731 }
6732
6733 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6734 ring = 0;
6735 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6736 ring = vnic->vnic_id - 1;
6737 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6738 ring = bp->rx_nr_rings - 1;
6739
6740 grp_idx = bp->rx_ring[ring].bnapi->index;
6741 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6742 req->lb_rule = cpu_to_le16(0xffff);
6743 vnic_mru:
6744 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
6745 req->mru = cpu_to_le16(vnic->mru);
6746
6747 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6748 #ifdef CONFIG_BNXT_SRIOV
6749 if (BNXT_VF(bp))
6750 def_vlan = bp->vf.vlan;
6751 #endif
6752 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6753 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6754 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6755 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6756
6757 return hwrm_req_send(bp, req);
6758 }
6759
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6760 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6761 struct bnxt_vnic_info *vnic)
6762 {
6763 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6764 struct hwrm_vnic_free_input *req;
6765
6766 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6767 return;
6768
6769 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6770
6771 hwrm_req_send(bp, req);
6772 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6773 }
6774 }
6775
bnxt_hwrm_vnic_free(struct bnxt * bp)6776 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6777 {
6778 u16 i;
6779
6780 for (i = 0; i < bp->nr_vnics; i++)
6781 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6782 }
6783
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6784 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6785 unsigned int start_rx_ring_idx,
6786 unsigned int nr_rings)
6787 {
6788 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6789 struct hwrm_vnic_alloc_output *resp;
6790 struct hwrm_vnic_alloc_input *req;
6791 int rc;
6792
6793 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6794 if (rc)
6795 return rc;
6796
6797 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6798 goto vnic_no_ring_grps;
6799
6800 /* map ring groups to this vnic */
6801 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6802 grp_idx = bp->rx_ring[i].bnapi->index;
6803 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6804 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6805 j, nr_rings);
6806 break;
6807 }
6808 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6809 }
6810
6811 vnic_no_ring_grps:
6812 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6813 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6814 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6815 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6816
6817 resp = hwrm_req_hold(bp, req);
6818 rc = hwrm_req_send(bp, req);
6819 if (!rc)
6820 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6821 hwrm_req_drop(bp, req);
6822 return rc;
6823 }
6824
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6825 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6826 {
6827 struct hwrm_vnic_qcaps_output *resp;
6828 struct hwrm_vnic_qcaps_input *req;
6829 int rc;
6830
6831 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6832 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6833 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6834 if (bp->hwrm_spec_code < 0x10600)
6835 return 0;
6836
6837 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6838 if (rc)
6839 return rc;
6840
6841 resp = hwrm_req_hold(bp, req);
6842 rc = hwrm_req_send(bp, req);
6843 if (!rc) {
6844 u32 flags = le32_to_cpu(resp->flags);
6845
6846 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6847 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6848 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6849 if (flags &
6850 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6851 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6852
6853 /* Older P5 fw before EXT_HW_STATS support did not set
6854 * VLAN_STRIP_CAP properly.
6855 */
6856 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6857 (BNXT_CHIP_P5(bp) &&
6858 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6859 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6860 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6861 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6862 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6863 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6864 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6865 if (bp->max_tpa_v2) {
6866 if (BNXT_CHIP_P5(bp))
6867 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6868 else
6869 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6870 }
6871 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6872 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6873 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6874 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6875 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6876 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6877 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6878 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6879 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6880 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6881 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6882 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6883 }
6884 hwrm_req_drop(bp, req);
6885 return rc;
6886 }
6887
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6888 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6889 {
6890 struct hwrm_ring_grp_alloc_output *resp;
6891 struct hwrm_ring_grp_alloc_input *req;
6892 int rc;
6893 u16 i;
6894
6895 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6896 return 0;
6897
6898 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6899 if (rc)
6900 return rc;
6901
6902 resp = hwrm_req_hold(bp, req);
6903 for (i = 0; i < bp->rx_nr_rings; i++) {
6904 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6905
6906 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6907 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6908 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6909 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6910
6911 rc = hwrm_req_send(bp, req);
6912
6913 if (rc)
6914 break;
6915
6916 bp->grp_info[grp_idx].fw_grp_id =
6917 le32_to_cpu(resp->ring_group_id);
6918 }
6919 hwrm_req_drop(bp, req);
6920 return rc;
6921 }
6922
bnxt_hwrm_ring_grp_free(struct bnxt * bp)6923 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6924 {
6925 struct hwrm_ring_grp_free_input *req;
6926 u16 i;
6927
6928 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6929 return;
6930
6931 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6932 return;
6933
6934 hwrm_req_hold(bp, req);
6935 for (i = 0; i < bp->cp_nr_rings; i++) {
6936 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6937 continue;
6938 req->ring_group_id =
6939 cpu_to_le32(bp->grp_info[i].fw_grp_id);
6940
6941 hwrm_req_send(bp, req);
6942 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6943 }
6944 hwrm_req_drop(bp, req);
6945 }
6946
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)6947 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6948 struct bnxt_ring_struct *ring,
6949 u32 ring_type, u32 map_index)
6950 {
6951 struct hwrm_ring_alloc_output *resp;
6952 struct hwrm_ring_alloc_input *req;
6953 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6954 struct bnxt_ring_grp_info *grp_info;
6955 int rc, err = 0;
6956 u16 ring_id;
6957
6958 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6959 if (rc)
6960 goto exit;
6961
6962 req->enables = 0;
6963 if (rmem->nr_pages > 1) {
6964 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6965 /* Page size is in log2 units */
6966 req->page_size = BNXT_PAGE_SHIFT;
6967 req->page_tbl_depth = 1;
6968 } else {
6969 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
6970 }
6971 req->fbo = 0;
6972 /* Association of ring index with doorbell index and MSIX number */
6973 req->logical_id = cpu_to_le16(map_index);
6974
6975 switch (ring_type) {
6976 case HWRM_RING_ALLOC_TX: {
6977 struct bnxt_tx_ring_info *txr;
6978 u16 flags = 0;
6979
6980 txr = container_of(ring, struct bnxt_tx_ring_info,
6981 tx_ring_struct);
6982 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6983 /* Association of transmit ring with completion ring */
6984 grp_info = &bp->grp_info[ring->grp_idx];
6985 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6986 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6987 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6988 req->queue_id = cpu_to_le16(ring->queue_id);
6989 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6990 req->cmpl_coal_cnt =
6991 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6992 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
6993 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
6994 req->flags = cpu_to_le16(flags);
6995 break;
6996 }
6997 case HWRM_RING_ALLOC_RX:
6998 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6999 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
7000 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7001 u16 flags = 0;
7002
7003 /* Association of rx ring with stats context */
7004 grp_info = &bp->grp_info[ring->grp_idx];
7005 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7006 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7007 req->enables |= cpu_to_le32(
7008 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
7009 if (NET_IP_ALIGN == 2)
7010 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
7011 req->flags = cpu_to_le16(flags);
7012 }
7013 break;
7014 case HWRM_RING_ALLOC_AGG:
7015 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7016 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7017 /* Association of agg ring with rx ring */
7018 grp_info = &bp->grp_info[ring->grp_idx];
7019 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7020 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7021 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7022 req->enables |= cpu_to_le32(
7023 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
7024 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
7025 } else {
7026 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7027 }
7028 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
7029 break;
7030 case HWRM_RING_ALLOC_CMPL:
7031 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7032 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7033 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7034 /* Association of cp ring with nq */
7035 grp_info = &bp->grp_info[map_index];
7036 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7037 req->cq_handle = cpu_to_le64(ring->handle);
7038 req->enables |= cpu_to_le32(
7039 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7040 } else {
7041 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7042 }
7043 break;
7044 case HWRM_RING_ALLOC_NQ:
7045 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7046 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7047 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7048 break;
7049 default:
7050 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7051 ring_type);
7052 return -1;
7053 }
7054
7055 resp = hwrm_req_hold(bp, req);
7056 rc = hwrm_req_send(bp, req);
7057 err = le16_to_cpu(resp->error_code);
7058 ring_id = le16_to_cpu(resp->ring_id);
7059 hwrm_req_drop(bp, req);
7060
7061 exit:
7062 if (rc || err) {
7063 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7064 ring_type, rc, err);
7065 return -EIO;
7066 }
7067 ring->fw_ring_id = ring_id;
7068 return rc;
7069 }
7070
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7071 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7072 {
7073 int rc;
7074
7075 if (BNXT_PF(bp)) {
7076 struct hwrm_func_cfg_input *req;
7077
7078 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7079 if (rc)
7080 return rc;
7081
7082 req->fid = cpu_to_le16(0xffff);
7083 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7084 req->async_event_cr = cpu_to_le16(idx);
7085 return hwrm_req_send(bp, req);
7086 } else {
7087 struct hwrm_func_vf_cfg_input *req;
7088
7089 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7090 if (rc)
7091 return rc;
7092
7093 req->enables =
7094 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7095 req->async_event_cr = cpu_to_le16(idx);
7096 return hwrm_req_send(bp, req);
7097 }
7098 }
7099
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7100 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7101 u32 ring_type)
7102 {
7103 switch (ring_type) {
7104 case HWRM_RING_ALLOC_TX:
7105 db->db_ring_mask = bp->tx_ring_mask;
7106 break;
7107 case HWRM_RING_ALLOC_RX:
7108 db->db_ring_mask = bp->rx_ring_mask;
7109 break;
7110 case HWRM_RING_ALLOC_AGG:
7111 db->db_ring_mask = bp->rx_agg_ring_mask;
7112 break;
7113 case HWRM_RING_ALLOC_CMPL:
7114 case HWRM_RING_ALLOC_NQ:
7115 db->db_ring_mask = bp->cp_ring_mask;
7116 break;
7117 }
7118 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7119 db->db_epoch_mask = db->db_ring_mask + 1;
7120 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7121 }
7122 }
7123
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7124 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7125 u32 map_idx, u32 xid)
7126 {
7127 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7128 switch (ring_type) {
7129 case HWRM_RING_ALLOC_TX:
7130 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7131 break;
7132 case HWRM_RING_ALLOC_RX:
7133 case HWRM_RING_ALLOC_AGG:
7134 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7135 break;
7136 case HWRM_RING_ALLOC_CMPL:
7137 db->db_key64 = DBR_PATH_L2;
7138 break;
7139 case HWRM_RING_ALLOC_NQ:
7140 db->db_key64 = DBR_PATH_L2;
7141 break;
7142 }
7143 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7144
7145 if (bp->flags & BNXT_FLAG_CHIP_P7)
7146 db->db_key64 |= DBR_VALID;
7147
7148 db->doorbell = bp->bar1 + bp->db_offset;
7149 } else {
7150 db->doorbell = bp->bar1 + map_idx * 0x80;
7151 switch (ring_type) {
7152 case HWRM_RING_ALLOC_TX:
7153 db->db_key32 = DB_KEY_TX;
7154 break;
7155 case HWRM_RING_ALLOC_RX:
7156 case HWRM_RING_ALLOC_AGG:
7157 db->db_key32 = DB_KEY_RX;
7158 break;
7159 case HWRM_RING_ALLOC_CMPL:
7160 db->db_key32 = DB_KEY_CP;
7161 break;
7162 }
7163 }
7164 bnxt_set_db_mask(bp, db, ring_type);
7165 }
7166
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7167 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7168 struct bnxt_rx_ring_info *rxr)
7169 {
7170 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7171 struct bnxt_napi *bnapi = rxr->bnapi;
7172 u32 type = HWRM_RING_ALLOC_RX;
7173 u32 map_idx = bnapi->index;
7174 int rc;
7175
7176 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7177 if (rc)
7178 return rc;
7179
7180 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7181 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7182
7183 return 0;
7184 }
7185
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7186 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7187 struct bnxt_rx_ring_info *rxr)
7188 {
7189 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7190 u32 type = HWRM_RING_ALLOC_AGG;
7191 u32 grp_idx = ring->grp_idx;
7192 u32 map_idx;
7193 int rc;
7194
7195 map_idx = grp_idx + bp->rx_nr_rings;
7196 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7197 if (rc)
7198 return rc;
7199
7200 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7201 ring->fw_ring_id);
7202 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7203 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7204 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7205
7206 return 0;
7207 }
7208
bnxt_hwrm_ring_alloc(struct bnxt * bp)7209 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7210 {
7211 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7212 int i, rc = 0;
7213 u32 type;
7214
7215 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7216 type = HWRM_RING_ALLOC_NQ;
7217 else
7218 type = HWRM_RING_ALLOC_CMPL;
7219 for (i = 0; i < bp->cp_nr_rings; i++) {
7220 struct bnxt_napi *bnapi = bp->bnapi[i];
7221 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7222 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7223 u32 map_idx = ring->map_idx;
7224 unsigned int vector;
7225
7226 vector = bp->irq_tbl[map_idx].vector;
7227 disable_irq_nosync(vector);
7228 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7229 if (rc) {
7230 enable_irq(vector);
7231 goto err_out;
7232 }
7233 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7234 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7235 enable_irq(vector);
7236 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7237
7238 if (!i) {
7239 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7240 if (rc)
7241 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7242 }
7243 }
7244
7245 type = HWRM_RING_ALLOC_TX;
7246 for (i = 0; i < bp->tx_nr_rings; i++) {
7247 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7248 struct bnxt_ring_struct *ring;
7249 u32 map_idx;
7250
7251 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7252 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
7253 struct bnxt_napi *bnapi = txr->bnapi;
7254 u32 type2 = HWRM_RING_ALLOC_CMPL;
7255
7256 ring = &cpr2->cp_ring_struct;
7257 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7258 map_idx = bnapi->index;
7259 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7260 if (rc)
7261 goto err_out;
7262 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7263 ring->fw_ring_id);
7264 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7265 }
7266 ring = &txr->tx_ring_struct;
7267 map_idx = i;
7268 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7269 if (rc)
7270 goto err_out;
7271 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
7272 }
7273
7274 for (i = 0; i < bp->rx_nr_rings; i++) {
7275 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7276
7277 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7278 if (rc)
7279 goto err_out;
7280 /* If we have agg rings, post agg buffers first. */
7281 if (!agg_rings)
7282 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7283 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7284 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
7285 struct bnxt_napi *bnapi = rxr->bnapi;
7286 u32 type2 = HWRM_RING_ALLOC_CMPL;
7287 struct bnxt_ring_struct *ring;
7288 u32 map_idx = bnapi->index;
7289
7290 ring = &cpr2->cp_ring_struct;
7291 ring->handle = BNXT_SET_NQ_HDL(cpr2);
7292 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
7293 if (rc)
7294 goto err_out;
7295 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
7296 ring->fw_ring_id);
7297 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
7298 }
7299 }
7300
7301 if (agg_rings) {
7302 for (i = 0; i < bp->rx_nr_rings; i++) {
7303 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7304 if (rc)
7305 goto err_out;
7306 }
7307 }
7308 err_out:
7309 return rc;
7310 }
7311
bnxt_cancel_dim(struct bnxt * bp)7312 static void bnxt_cancel_dim(struct bnxt *bp)
7313 {
7314 int i;
7315
7316 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7317 * if NAPI is enabled.
7318 */
7319 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7320 return;
7321
7322 /* Make sure NAPI sees that the VNIC is disabled */
7323 synchronize_net();
7324 for (i = 0; i < bp->rx_nr_rings; i++) {
7325 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7326 struct bnxt_napi *bnapi = rxr->bnapi;
7327
7328 cancel_work_sync(&bnapi->cp_ring.dim.work);
7329 }
7330 }
7331
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7332 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7333 struct bnxt_ring_struct *ring,
7334 u32 ring_type, int cmpl_ring_id)
7335 {
7336 struct hwrm_ring_free_output *resp;
7337 struct hwrm_ring_free_input *req;
7338 u16 error_code = 0;
7339 int rc;
7340
7341 if (BNXT_NO_FW_ACCESS(bp))
7342 return 0;
7343
7344 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7345 if (rc)
7346 goto exit;
7347
7348 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7349 req->ring_type = ring_type;
7350 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7351
7352 resp = hwrm_req_hold(bp, req);
7353 rc = hwrm_req_send(bp, req);
7354 error_code = le16_to_cpu(resp->error_code);
7355 hwrm_req_drop(bp, req);
7356 exit:
7357 if (rc || error_code) {
7358 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7359 ring_type, rc, error_code);
7360 return -EIO;
7361 }
7362 return 0;
7363 }
7364
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7365 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7366 struct bnxt_rx_ring_info *rxr,
7367 bool close_path)
7368 {
7369 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7370 u32 grp_idx = rxr->bnapi->index;
7371 u32 cmpl_ring_id;
7372
7373 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7374 return;
7375
7376 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7377 hwrm_ring_free_send_msg(bp, ring,
7378 RING_FREE_REQ_RING_TYPE_RX,
7379 close_path ? cmpl_ring_id :
7380 INVALID_HW_RING_ID);
7381 ring->fw_ring_id = INVALID_HW_RING_ID;
7382 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7383 }
7384
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7385 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7386 struct bnxt_rx_ring_info *rxr,
7387 bool close_path)
7388 {
7389 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7390 u32 grp_idx = rxr->bnapi->index;
7391 u32 type, cmpl_ring_id;
7392
7393 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7394 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7395 else
7396 type = RING_FREE_REQ_RING_TYPE_RX;
7397
7398 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7399 return;
7400
7401 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7402 hwrm_ring_free_send_msg(bp, ring, type,
7403 close_path ? cmpl_ring_id :
7404 INVALID_HW_RING_ID);
7405 ring->fw_ring_id = INVALID_HW_RING_ID;
7406 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7407 }
7408
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7409 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7410 {
7411 u32 type;
7412 int i;
7413
7414 if (!bp->bnapi)
7415 return;
7416
7417 for (i = 0; i < bp->tx_nr_rings; i++) {
7418 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7419 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7420
7421 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7422 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
7423
7424 hwrm_ring_free_send_msg(bp, ring,
7425 RING_FREE_REQ_RING_TYPE_TX,
7426 close_path ? cmpl_ring_id :
7427 INVALID_HW_RING_ID);
7428 ring->fw_ring_id = INVALID_HW_RING_ID;
7429 }
7430 }
7431
7432 bnxt_cancel_dim(bp);
7433 for (i = 0; i < bp->rx_nr_rings; i++) {
7434 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7435 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7436 }
7437
7438 /* The completion rings are about to be freed. After that the
7439 * IRQ doorbell will not work anymore. So we need to disable
7440 * IRQ here.
7441 */
7442 bnxt_disable_int_sync(bp);
7443
7444 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7445 type = RING_FREE_REQ_RING_TYPE_NQ;
7446 else
7447 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7448 for (i = 0; i < bp->cp_nr_rings; i++) {
7449 struct bnxt_napi *bnapi = bp->bnapi[i];
7450 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7451 struct bnxt_ring_struct *ring;
7452 int j;
7453
7454 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7455 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7456
7457 ring = &cpr2->cp_ring_struct;
7458 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7459 continue;
7460 hwrm_ring_free_send_msg(bp, ring,
7461 RING_FREE_REQ_RING_TYPE_L2_CMPL,
7462 INVALID_HW_RING_ID);
7463 ring->fw_ring_id = INVALID_HW_RING_ID;
7464 }
7465 ring = &cpr->cp_ring_struct;
7466 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7467 hwrm_ring_free_send_msg(bp, ring, type,
7468 INVALID_HW_RING_ID);
7469 ring->fw_ring_id = INVALID_HW_RING_ID;
7470 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7471 }
7472 }
7473 }
7474
7475 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7476 bool shared);
7477 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7478 bool shared);
7479
bnxt_hwrm_get_rings(struct bnxt * bp)7480 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7481 {
7482 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7483 struct hwrm_func_qcfg_output *resp;
7484 struct hwrm_func_qcfg_input *req;
7485 int rc;
7486
7487 if (bp->hwrm_spec_code < 0x10601)
7488 return 0;
7489
7490 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7491 if (rc)
7492 return rc;
7493
7494 req->fid = cpu_to_le16(0xffff);
7495 resp = hwrm_req_hold(bp, req);
7496 rc = hwrm_req_send(bp, req);
7497 if (rc) {
7498 hwrm_req_drop(bp, req);
7499 return rc;
7500 }
7501
7502 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7503 if (BNXT_NEW_RM(bp)) {
7504 u16 cp, stats;
7505
7506 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7507 hw_resc->resv_hw_ring_grps =
7508 le32_to_cpu(resp->alloc_hw_ring_grps);
7509 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7510 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7511 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7512 stats = le16_to_cpu(resp->alloc_stat_ctx);
7513 hw_resc->resv_irqs = cp;
7514 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7515 int rx = hw_resc->resv_rx_rings;
7516 int tx = hw_resc->resv_tx_rings;
7517
7518 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7519 rx >>= 1;
7520 if (cp < (rx + tx)) {
7521 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7522 if (rc)
7523 goto get_rings_exit;
7524 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7525 rx <<= 1;
7526 hw_resc->resv_rx_rings = rx;
7527 hw_resc->resv_tx_rings = tx;
7528 }
7529 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7530 hw_resc->resv_hw_ring_grps = rx;
7531 }
7532 hw_resc->resv_cp_rings = cp;
7533 hw_resc->resv_stat_ctxs = stats;
7534 }
7535 get_rings_exit:
7536 hwrm_req_drop(bp, req);
7537 return rc;
7538 }
7539
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7540 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7541 {
7542 struct hwrm_func_qcfg_output *resp;
7543 struct hwrm_func_qcfg_input *req;
7544 int rc;
7545
7546 if (bp->hwrm_spec_code < 0x10601)
7547 return 0;
7548
7549 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7550 if (rc)
7551 return rc;
7552
7553 req->fid = cpu_to_le16(fid);
7554 resp = hwrm_req_hold(bp, req);
7555 rc = hwrm_req_send(bp, req);
7556 if (!rc)
7557 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7558
7559 hwrm_req_drop(bp, req);
7560 return rc;
7561 }
7562
7563 static bool bnxt_rfs_supported(struct bnxt *bp);
7564
7565 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7566 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7567 {
7568 struct hwrm_func_cfg_input *req;
7569 u32 enables = 0;
7570
7571 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7572 return NULL;
7573
7574 req->fid = cpu_to_le16(0xffff);
7575 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7576 req->num_tx_rings = cpu_to_le16(hwr->tx);
7577 if (BNXT_NEW_RM(bp)) {
7578 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7579 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7580 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7581 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7582 enables |= hwr->cp_p5 ?
7583 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7584 } else {
7585 enables |= hwr->cp ?
7586 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7587 enables |= hwr->grp ?
7588 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7589 }
7590 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7591 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7592 0;
7593 req->num_rx_rings = cpu_to_le16(hwr->rx);
7594 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7595 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7596 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7597 req->num_msix = cpu_to_le16(hwr->cp);
7598 } else {
7599 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7600 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7601 }
7602 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7603 req->num_vnics = cpu_to_le16(hwr->vnic);
7604 }
7605 req->enables = cpu_to_le32(enables);
7606 return req;
7607 }
7608
7609 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7610 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7611 {
7612 struct hwrm_func_vf_cfg_input *req;
7613 u32 enables = 0;
7614
7615 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7616 return NULL;
7617
7618 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7619 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7620 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7621 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7622 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7623 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7624 enables |= hwr->cp_p5 ?
7625 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7626 } else {
7627 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7628 enables |= hwr->grp ?
7629 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7630 }
7631 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7632 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7633
7634 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7635 req->num_tx_rings = cpu_to_le16(hwr->tx);
7636 req->num_rx_rings = cpu_to_le16(hwr->rx);
7637 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7638 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7639 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7640 } else {
7641 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7642 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7643 }
7644 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7645 req->num_vnics = cpu_to_le16(hwr->vnic);
7646
7647 req->enables = cpu_to_le32(enables);
7648 return req;
7649 }
7650
7651 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7652 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7653 {
7654 struct hwrm_func_cfg_input *req;
7655 int rc;
7656
7657 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7658 if (!req)
7659 return -ENOMEM;
7660
7661 if (!req->enables) {
7662 hwrm_req_drop(bp, req);
7663 return 0;
7664 }
7665
7666 rc = hwrm_req_send(bp, req);
7667 if (rc)
7668 return rc;
7669
7670 if (bp->hwrm_spec_code < 0x10601)
7671 bp->hw_resc.resv_tx_rings = hwr->tx;
7672
7673 return bnxt_hwrm_get_rings(bp);
7674 }
7675
7676 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7677 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7678 {
7679 struct hwrm_func_vf_cfg_input *req;
7680 int rc;
7681
7682 if (!BNXT_NEW_RM(bp)) {
7683 bp->hw_resc.resv_tx_rings = hwr->tx;
7684 return 0;
7685 }
7686
7687 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7688 if (!req)
7689 return -ENOMEM;
7690
7691 rc = hwrm_req_send(bp, req);
7692 if (rc)
7693 return rc;
7694
7695 return bnxt_hwrm_get_rings(bp);
7696 }
7697
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7698 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7699 {
7700 if (BNXT_PF(bp))
7701 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7702 else
7703 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7704 }
7705
bnxt_nq_rings_in_use(struct bnxt * bp)7706 int bnxt_nq_rings_in_use(struct bnxt *bp)
7707 {
7708 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7709 }
7710
bnxt_cp_rings_in_use(struct bnxt * bp)7711 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7712 {
7713 int cp;
7714
7715 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7716 return bnxt_nq_rings_in_use(bp);
7717
7718 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7719 return cp;
7720 }
7721
bnxt_get_func_stat_ctxs(struct bnxt * bp)7722 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7723 {
7724 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7725 }
7726
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7727 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7728 {
7729 if (!hwr->grp)
7730 return 0;
7731 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7732 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7733
7734 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7735 rss_ctx *= hwr->vnic;
7736 return rss_ctx;
7737 }
7738 if (BNXT_VF(bp))
7739 return BNXT_VF_MAX_RSS_CTX;
7740 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7741 return hwr->grp + 1;
7742 return 1;
7743 }
7744
7745 /* Check if a default RSS map needs to be setup. This function is only
7746 * used on older firmware that does not require reserving RX rings.
7747 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7748 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7749 {
7750 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7751
7752 /* The RSS map is valid for RX rings set to resv_rx_rings */
7753 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7754 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7755 if (!netif_is_rxfh_configured(bp->dev))
7756 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7757 }
7758 }
7759
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7760 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7761 {
7762 if (bp->flags & BNXT_FLAG_RFS) {
7763 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7764 return 2 + bp->num_rss_ctx;
7765 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7766 return rx_rings + 1;
7767 }
7768 return 1;
7769 }
7770
bnxt_need_reserve_rings(struct bnxt * bp)7771 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7772 {
7773 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7774 int cp = bnxt_cp_rings_in_use(bp);
7775 int nq = bnxt_nq_rings_in_use(bp);
7776 int rx = bp->rx_nr_rings, stat;
7777 int vnic, grp = rx;
7778
7779 /* Old firmware does not need RX ring reservations but we still
7780 * need to setup a default RSS map when needed. With new firmware
7781 * we go through RX ring reservations first and then set up the
7782 * RSS map for the successfully reserved RX rings when needed.
7783 */
7784 if (!BNXT_NEW_RM(bp))
7785 bnxt_check_rss_tbl_no_rmgr(bp);
7786
7787 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7788 bp->hwrm_spec_code >= 0x10601)
7789 return true;
7790
7791 if (!BNXT_NEW_RM(bp))
7792 return false;
7793
7794 vnic = bnxt_get_total_vnics(bp, rx);
7795
7796 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7797 rx <<= 1;
7798 stat = bnxt_get_func_stat_ctxs(bp);
7799 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7800 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7801 (hw_resc->resv_hw_ring_grps != grp &&
7802 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7803 return true;
7804 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7805 hw_resc->resv_irqs != nq)
7806 return true;
7807 return false;
7808 }
7809
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7810 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7811 {
7812 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7813
7814 hwr->tx = hw_resc->resv_tx_rings;
7815 if (BNXT_NEW_RM(bp)) {
7816 hwr->rx = hw_resc->resv_rx_rings;
7817 hwr->cp = hw_resc->resv_irqs;
7818 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7819 hwr->cp_p5 = hw_resc->resv_cp_rings;
7820 hwr->grp = hw_resc->resv_hw_ring_grps;
7821 hwr->vnic = hw_resc->resv_vnics;
7822 hwr->stat = hw_resc->resv_stat_ctxs;
7823 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7824 }
7825 }
7826
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7827 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7828 {
7829 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7830 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7831 }
7832
7833 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7834
__bnxt_reserve_rings(struct bnxt * bp)7835 static int __bnxt_reserve_rings(struct bnxt *bp)
7836 {
7837 struct bnxt_hw_rings hwr = {0};
7838 int rx_rings, old_rx_rings, rc;
7839 int cp = bp->cp_nr_rings;
7840 int ulp_msix = 0;
7841 bool sh = false;
7842 int tx_cp;
7843
7844 if (!bnxt_need_reserve_rings(bp))
7845 return 0;
7846
7847 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7848 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7849 if (!ulp_msix)
7850 bnxt_set_ulp_stat_ctxs(bp, 0);
7851
7852 if (ulp_msix > bp->ulp_num_msix_want)
7853 ulp_msix = bp->ulp_num_msix_want;
7854 hwr.cp = cp + ulp_msix;
7855 } else {
7856 hwr.cp = bnxt_nq_rings_in_use(bp);
7857 }
7858
7859 hwr.tx = bp->tx_nr_rings;
7860 hwr.rx = bp->rx_nr_rings;
7861 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7862 sh = true;
7863 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7864 hwr.cp_p5 = hwr.rx + hwr.tx;
7865
7866 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7867
7868 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7869 hwr.rx <<= 1;
7870 hwr.grp = bp->rx_nr_rings;
7871 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7872 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7873 old_rx_rings = bp->hw_resc.resv_rx_rings;
7874
7875 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7876 if (rc)
7877 return rc;
7878
7879 bnxt_copy_reserved_rings(bp, &hwr);
7880
7881 rx_rings = hwr.rx;
7882 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7883 if (hwr.rx >= 2) {
7884 rx_rings = hwr.rx >> 1;
7885 } else {
7886 if (netif_running(bp->dev))
7887 return -ENOMEM;
7888
7889 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7890 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7891 bp->dev->hw_features &= ~NETIF_F_LRO;
7892 bp->dev->features &= ~NETIF_F_LRO;
7893 bnxt_set_ring_params(bp);
7894 }
7895 }
7896 rx_rings = min_t(int, rx_rings, hwr.grp);
7897 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7898 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7899 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7900 hwr.cp = min_t(int, hwr.cp, hwr.stat);
7901 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7902 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7903 hwr.rx = rx_rings << 1;
7904 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7905 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7906 bp->tx_nr_rings = hwr.tx;
7907
7908 /* If we cannot reserve all the RX rings, reset the RSS map only
7909 * if absolutely necessary
7910 */
7911 if (rx_rings != bp->rx_nr_rings) {
7912 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7913 rx_rings, bp->rx_nr_rings);
7914 if (netif_is_rxfh_configured(bp->dev) &&
7915 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7916 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7917 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7918 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7919 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7920 }
7921 }
7922 bp->rx_nr_rings = rx_rings;
7923 bp->cp_nr_rings = hwr.cp;
7924
7925 if (!bnxt_rings_ok(bp, &hwr))
7926 return -ENOMEM;
7927
7928 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
7929 !netif_is_rxfh_configured(bp->dev))
7930 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7931
7932 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
7933 int resv_msix, resv_ctx, ulp_ctxs;
7934 struct bnxt_hw_resc *hw_resc;
7935
7936 hw_resc = &bp->hw_resc;
7937 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
7938 ulp_msix = min_t(int, resv_msix, ulp_msix);
7939 bnxt_set_ulp_msix_num(bp, ulp_msix);
7940 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
7941 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
7942 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
7943 }
7944
7945 return rc;
7946 }
7947
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7948 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7949 {
7950 struct hwrm_func_vf_cfg_input *req;
7951 u32 flags;
7952
7953 if (!BNXT_NEW_RM(bp))
7954 return 0;
7955
7956 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7957 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7958 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7959 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7960 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7961 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7962 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7963 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7964 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7965
7966 req->flags = cpu_to_le32(flags);
7967 return hwrm_req_send_silent(bp, req);
7968 }
7969
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7970 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7971 {
7972 struct hwrm_func_cfg_input *req;
7973 u32 flags;
7974
7975 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7976 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7977 if (BNXT_NEW_RM(bp)) {
7978 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7979 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7980 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7981 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7982 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7983 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7984 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7985 else
7986 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7987 }
7988
7989 req->flags = cpu_to_le32(flags);
7990 return hwrm_req_send_silent(bp, req);
7991 }
7992
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7993 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7994 {
7995 if (bp->hwrm_spec_code < 0x10801)
7996 return 0;
7997
7998 if (BNXT_PF(bp))
7999 return bnxt_hwrm_check_pf_rings(bp, hwr);
8000
8001 return bnxt_hwrm_check_vf_rings(bp, hwr);
8002 }
8003
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8004 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8005 {
8006 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8007 struct hwrm_ring_aggint_qcaps_output *resp;
8008 struct hwrm_ring_aggint_qcaps_input *req;
8009 int rc;
8010
8011 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8012 coal_cap->num_cmpl_dma_aggr_max = 63;
8013 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8014 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8015 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8016 coal_cap->int_lat_tmr_min_max = 65535;
8017 coal_cap->int_lat_tmr_max_max = 65535;
8018 coal_cap->num_cmpl_aggr_int_max = 65535;
8019 coal_cap->timer_units = 80;
8020
8021 if (bp->hwrm_spec_code < 0x10902)
8022 return;
8023
8024 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8025 return;
8026
8027 resp = hwrm_req_hold(bp, req);
8028 rc = hwrm_req_send_silent(bp, req);
8029 if (!rc) {
8030 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8031 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8032 coal_cap->num_cmpl_dma_aggr_max =
8033 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8034 coal_cap->num_cmpl_dma_aggr_during_int_max =
8035 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8036 coal_cap->cmpl_aggr_dma_tmr_max =
8037 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8038 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8039 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8040 coal_cap->int_lat_tmr_min_max =
8041 le16_to_cpu(resp->int_lat_tmr_min_max);
8042 coal_cap->int_lat_tmr_max_max =
8043 le16_to_cpu(resp->int_lat_tmr_max_max);
8044 coal_cap->num_cmpl_aggr_int_max =
8045 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8046 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8047 }
8048 hwrm_req_drop(bp, req);
8049 }
8050
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8051 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8052 {
8053 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8054
8055 return usec * 1000 / coal_cap->timer_units;
8056 }
8057
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8058 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8059 struct bnxt_coal *hw_coal,
8060 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8061 {
8062 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8063 u16 val, tmr, max, flags = hw_coal->flags;
8064 u32 cmpl_params = coal_cap->cmpl_params;
8065
8066 max = hw_coal->bufs_per_record * 128;
8067 if (hw_coal->budget)
8068 max = hw_coal->bufs_per_record * hw_coal->budget;
8069 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8070
8071 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8072 req->num_cmpl_aggr_int = cpu_to_le16(val);
8073
8074 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8075 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8076
8077 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8078 coal_cap->num_cmpl_dma_aggr_during_int_max);
8079 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8080
8081 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8082 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8083 req->int_lat_tmr_max = cpu_to_le16(tmr);
8084
8085 /* min timer set to 1/2 of interrupt timer */
8086 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8087 val = tmr / 2;
8088 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8089 req->int_lat_tmr_min = cpu_to_le16(val);
8090 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8091 }
8092
8093 /* buf timer set to 1/4 of interrupt timer */
8094 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8095 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8096
8097 if (cmpl_params &
8098 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8099 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8100 val = clamp_t(u16, tmr, 1,
8101 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8102 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8103 req->enables |=
8104 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8105 }
8106
8107 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8108 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8109 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8110 req->flags = cpu_to_le16(flags);
8111 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8112 }
8113
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8114 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8115 struct bnxt_coal *hw_coal)
8116 {
8117 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8118 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8119 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8120 u32 nq_params = coal_cap->nq_params;
8121 u16 tmr;
8122 int rc;
8123
8124 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8125 return 0;
8126
8127 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8128 if (rc)
8129 return rc;
8130
8131 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8132 req->flags =
8133 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8134
8135 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8136 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8137 req->int_lat_tmr_min = cpu_to_le16(tmr);
8138 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8139 return hwrm_req_send(bp, req);
8140 }
8141
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8142 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8143 {
8144 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8145 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8146 struct bnxt_coal coal;
8147 int rc;
8148
8149 /* Tick values in micro seconds.
8150 * 1 coal_buf x bufs_per_record = 1 completion record.
8151 */
8152 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8153
8154 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8155 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8156
8157 if (!bnapi->rx_ring)
8158 return -ENODEV;
8159
8160 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8161 if (rc)
8162 return rc;
8163
8164 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8165
8166 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8167
8168 return hwrm_req_send(bp, req_rx);
8169 }
8170
8171 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8172 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8173 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8174 {
8175 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8176
8177 req->ring_id = cpu_to_le16(ring_id);
8178 return hwrm_req_send(bp, req);
8179 }
8180
8181 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8182 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8183 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8184 {
8185 struct bnxt_tx_ring_info *txr;
8186 int i, rc;
8187
8188 bnxt_for_each_napi_tx(i, bnapi, txr) {
8189 u16 ring_id;
8190
8191 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8192 req->ring_id = cpu_to_le16(ring_id);
8193 rc = hwrm_req_send(bp, req);
8194 if (rc)
8195 return rc;
8196 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8197 return 0;
8198 }
8199 return 0;
8200 }
8201
bnxt_hwrm_set_coal(struct bnxt * bp)8202 int bnxt_hwrm_set_coal(struct bnxt *bp)
8203 {
8204 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8205 int i, rc;
8206
8207 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8208 if (rc)
8209 return rc;
8210
8211 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8212 if (rc) {
8213 hwrm_req_drop(bp, req_rx);
8214 return rc;
8215 }
8216
8217 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8218 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8219
8220 hwrm_req_hold(bp, req_rx);
8221 hwrm_req_hold(bp, req_tx);
8222 for (i = 0; i < bp->cp_nr_rings; i++) {
8223 struct bnxt_napi *bnapi = bp->bnapi[i];
8224 struct bnxt_coal *hw_coal;
8225
8226 if (!bnapi->rx_ring)
8227 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8228 else
8229 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8230 if (rc)
8231 break;
8232
8233 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8234 continue;
8235
8236 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8237 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8238 if (rc)
8239 break;
8240 }
8241 if (bnapi->rx_ring)
8242 hw_coal = &bp->rx_coal;
8243 else
8244 hw_coal = &bp->tx_coal;
8245 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8246 }
8247 hwrm_req_drop(bp, req_rx);
8248 hwrm_req_drop(bp, req_tx);
8249 return rc;
8250 }
8251
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8252 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8253 {
8254 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8255 struct hwrm_stat_ctx_free_input *req;
8256 int i;
8257
8258 if (!bp->bnapi)
8259 return;
8260
8261 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8262 return;
8263
8264 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8265 return;
8266 if (BNXT_FW_MAJ(bp) <= 20) {
8267 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8268 hwrm_req_drop(bp, req);
8269 return;
8270 }
8271 hwrm_req_hold(bp, req0);
8272 }
8273 hwrm_req_hold(bp, req);
8274 for (i = 0; i < bp->cp_nr_rings; i++) {
8275 struct bnxt_napi *bnapi = bp->bnapi[i];
8276 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8277
8278 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8279 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8280 if (req0) {
8281 req0->stat_ctx_id = req->stat_ctx_id;
8282 hwrm_req_send(bp, req0);
8283 }
8284 hwrm_req_send(bp, req);
8285
8286 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8287 }
8288 }
8289 hwrm_req_drop(bp, req);
8290 if (req0)
8291 hwrm_req_drop(bp, req0);
8292 }
8293
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8294 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8295 {
8296 struct hwrm_stat_ctx_alloc_output *resp;
8297 struct hwrm_stat_ctx_alloc_input *req;
8298 int rc, i;
8299
8300 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8301 return 0;
8302
8303 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8304 if (rc)
8305 return rc;
8306
8307 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8308 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8309
8310 resp = hwrm_req_hold(bp, req);
8311 for (i = 0; i < bp->cp_nr_rings; i++) {
8312 struct bnxt_napi *bnapi = bp->bnapi[i];
8313 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8314
8315 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8316
8317 rc = hwrm_req_send(bp, req);
8318 if (rc)
8319 break;
8320
8321 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8322
8323 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8324 }
8325 hwrm_req_drop(bp, req);
8326 return rc;
8327 }
8328
bnxt_hwrm_func_qcfg(struct bnxt * bp)8329 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8330 {
8331 struct hwrm_func_qcfg_output *resp;
8332 struct hwrm_func_qcfg_input *req;
8333 u16 flags;
8334 int rc;
8335
8336 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8337 if (rc)
8338 return rc;
8339
8340 req->fid = cpu_to_le16(0xffff);
8341 resp = hwrm_req_hold(bp, req);
8342 rc = hwrm_req_send(bp, req);
8343 if (rc)
8344 goto func_qcfg_exit;
8345
8346 flags = le16_to_cpu(resp->flags);
8347 #ifdef CONFIG_BNXT_SRIOV
8348 if (BNXT_VF(bp)) {
8349 struct bnxt_vf_info *vf = &bp->vf;
8350
8351 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8352 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8353 vf->flags |= BNXT_VF_TRUST;
8354 else
8355 vf->flags &= ~BNXT_VF_TRUST;
8356 } else {
8357 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8358 }
8359 #endif
8360 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8361 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8362 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8363 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8364 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8365 }
8366 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8367 bp->flags |= BNXT_FLAG_MULTI_HOST;
8368
8369 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8370 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8371
8372 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8373 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8374
8375 switch (resp->port_partition_type) {
8376 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8377 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8378 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8379 bp->port_partition_type = resp->port_partition_type;
8380 break;
8381 }
8382 if (bp->hwrm_spec_code < 0x10707 ||
8383 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8384 bp->br_mode = BRIDGE_MODE_VEB;
8385 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8386 bp->br_mode = BRIDGE_MODE_VEPA;
8387 else
8388 bp->br_mode = BRIDGE_MODE_UNDEF;
8389
8390 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8391 if (!bp->max_mtu)
8392 bp->max_mtu = BNXT_MAX_MTU;
8393
8394 if (bp->db_size)
8395 goto func_qcfg_exit;
8396
8397 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8398 if (BNXT_CHIP_P5(bp)) {
8399 if (BNXT_PF(bp))
8400 bp->db_offset = DB_PF_OFFSET_P5;
8401 else
8402 bp->db_offset = DB_VF_OFFSET_P5;
8403 }
8404 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8405 1024);
8406 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8407 bp->db_size <= bp->db_offset)
8408 bp->db_size = pci_resource_len(bp->pdev, 2);
8409
8410 func_qcfg_exit:
8411 hwrm_req_drop(bp, req);
8412 return rc;
8413 }
8414
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8415 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8416 u8 init_val, u8 init_offset,
8417 bool init_mask_set)
8418 {
8419 ctxm->init_value = init_val;
8420 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8421 if (init_mask_set)
8422 ctxm->init_offset = init_offset * 4;
8423 else
8424 ctxm->init_value = 0;
8425 }
8426
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8427 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8428 {
8429 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8430 u16 type;
8431
8432 for (type = 0; type < ctx_max; type++) {
8433 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8434 int n = 1;
8435
8436 if (!ctxm->max_entries || ctxm->pg_info)
8437 continue;
8438
8439 if (ctxm->instance_bmap)
8440 n = hweight32(ctxm->instance_bmap);
8441 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8442 if (!ctxm->pg_info)
8443 return -ENOMEM;
8444 }
8445 return 0;
8446 }
8447
8448 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8449 struct bnxt_ctx_mem_type *ctxm, bool force);
8450
8451 #define BNXT_CTX_INIT_VALID(flags) \
8452 (!!((flags) & \
8453 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8454
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8455 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8456 {
8457 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8458 struct hwrm_func_backing_store_qcaps_v2_input *req;
8459 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8460 u16 type;
8461 int rc;
8462
8463 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8464 if (rc)
8465 return rc;
8466
8467 if (!ctx) {
8468 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8469 if (!ctx)
8470 return -ENOMEM;
8471 bp->ctx = ctx;
8472 }
8473
8474 resp = hwrm_req_hold(bp, req);
8475
8476 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8477 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8478 u8 init_val, init_off, i;
8479 u32 max_entries;
8480 u16 entry_size;
8481 __le32 *p;
8482 u32 flags;
8483
8484 req->type = cpu_to_le16(type);
8485 rc = hwrm_req_send(bp, req);
8486 if (rc)
8487 goto ctx_done;
8488 flags = le32_to_cpu(resp->flags);
8489 type = le16_to_cpu(resp->next_valid_type);
8490 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8491 bnxt_free_one_ctx_mem(bp, ctxm, true);
8492 continue;
8493 }
8494 entry_size = le16_to_cpu(resp->entry_size);
8495 max_entries = le32_to_cpu(resp->max_num_entries);
8496 if (ctxm->mem_valid) {
8497 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8498 ctxm->entry_size != entry_size ||
8499 ctxm->max_entries != max_entries)
8500 bnxt_free_one_ctx_mem(bp, ctxm, true);
8501 else
8502 continue;
8503 }
8504 ctxm->type = le16_to_cpu(resp->type);
8505 ctxm->entry_size = entry_size;
8506 ctxm->flags = flags;
8507 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8508 ctxm->entry_multiple = resp->entry_multiple;
8509 ctxm->max_entries = max_entries;
8510 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8511 init_val = resp->ctx_init_value;
8512 init_off = resp->ctx_init_offset;
8513 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8514 BNXT_CTX_INIT_VALID(flags));
8515 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8516 BNXT_MAX_SPLIT_ENTRY);
8517 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8518 i++, p++)
8519 ctxm->split[i] = le32_to_cpu(*p);
8520 }
8521 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8522
8523 ctx_done:
8524 hwrm_req_drop(bp, req);
8525 return rc;
8526 }
8527
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8528 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8529 {
8530 struct hwrm_func_backing_store_qcaps_output *resp;
8531 struct hwrm_func_backing_store_qcaps_input *req;
8532 int rc;
8533
8534 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8535 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8536 return 0;
8537
8538 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8539 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8540
8541 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8542 if (rc)
8543 return rc;
8544
8545 resp = hwrm_req_hold(bp, req);
8546 rc = hwrm_req_send_silent(bp, req);
8547 if (!rc) {
8548 struct bnxt_ctx_mem_type *ctxm;
8549 struct bnxt_ctx_mem_info *ctx;
8550 u8 init_val, init_idx = 0;
8551 u16 init_mask;
8552
8553 ctx = bp->ctx;
8554 if (!ctx) {
8555 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8556 if (!ctx) {
8557 rc = -ENOMEM;
8558 goto ctx_err;
8559 }
8560 bp->ctx = ctx;
8561 }
8562 init_val = resp->ctx_kind_initializer;
8563 init_mask = le16_to_cpu(resp->ctx_init_mask);
8564
8565 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8566 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8567 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8568 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8569 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8570 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8571 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8572 (init_mask & (1 << init_idx++)) != 0);
8573
8574 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8575 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8576 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8577 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8578 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8579 (init_mask & (1 << init_idx++)) != 0);
8580
8581 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8582 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8583 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8584 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8585 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8586 (init_mask & (1 << init_idx++)) != 0);
8587
8588 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8589 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8590 ctxm->max_entries = ctxm->vnic_entries +
8591 le16_to_cpu(resp->vnic_max_ring_table_entries);
8592 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8593 bnxt_init_ctx_initializer(ctxm, init_val,
8594 resp->vnic_init_offset,
8595 (init_mask & (1 << init_idx++)) != 0);
8596
8597 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8598 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8599 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8600 bnxt_init_ctx_initializer(ctxm, init_val,
8601 resp->stat_init_offset,
8602 (init_mask & (1 << init_idx++)) != 0);
8603
8604 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8605 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8606 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8607 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8608 ctxm->entry_multiple = resp->tqm_entries_multiple;
8609 if (!ctxm->entry_multiple)
8610 ctxm->entry_multiple = 1;
8611
8612 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8613
8614 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8615 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8616 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8617 ctxm->mrav_num_entries_units =
8618 le16_to_cpu(resp->mrav_num_entries_units);
8619 bnxt_init_ctx_initializer(ctxm, init_val,
8620 resp->mrav_init_offset,
8621 (init_mask & (1 << init_idx++)) != 0);
8622
8623 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8624 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8625 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8626
8627 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8628 if (!ctx->tqm_fp_rings_count)
8629 ctx->tqm_fp_rings_count = bp->max_q;
8630 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8631 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8632
8633 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8634 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8635 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8636
8637 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8638 } else {
8639 rc = 0;
8640 }
8641 ctx_err:
8642 hwrm_req_drop(bp, req);
8643 return rc;
8644 }
8645
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8646 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8647 __le64 *pg_dir)
8648 {
8649 if (!rmem->nr_pages)
8650 return;
8651
8652 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8653 if (rmem->depth >= 1) {
8654 if (rmem->depth == 2)
8655 *pg_attr |= 2;
8656 else
8657 *pg_attr |= 1;
8658 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8659 } else {
8660 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8661 }
8662 }
8663
8664 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8665 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8666 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8667 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8668 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8669 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8670
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8671 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8672 {
8673 struct hwrm_func_backing_store_cfg_input *req;
8674 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8675 struct bnxt_ctx_pg_info *ctx_pg;
8676 struct bnxt_ctx_mem_type *ctxm;
8677 void **__req = (void **)&req;
8678 u32 req_len = sizeof(*req);
8679 __le32 *num_entries;
8680 __le64 *pg_dir;
8681 u32 flags = 0;
8682 u8 *pg_attr;
8683 u32 ena;
8684 int rc;
8685 int i;
8686
8687 if (!ctx)
8688 return 0;
8689
8690 if (req_len > bp->hwrm_max_ext_req_len)
8691 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8692 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8693 if (rc)
8694 return rc;
8695
8696 req->enables = cpu_to_le32(enables);
8697 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8698 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8699 ctx_pg = ctxm->pg_info;
8700 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8701 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8702 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8703 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8704 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8705 &req->qpc_pg_size_qpc_lvl,
8706 &req->qpc_page_dir);
8707
8708 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8709 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8710 }
8711 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8712 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8713 ctx_pg = ctxm->pg_info;
8714 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8715 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8716 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8717 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8718 &req->srq_pg_size_srq_lvl,
8719 &req->srq_page_dir);
8720 }
8721 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8722 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8723 ctx_pg = ctxm->pg_info;
8724 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8725 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8726 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8727 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8728 &req->cq_pg_size_cq_lvl,
8729 &req->cq_page_dir);
8730 }
8731 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8732 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8733 ctx_pg = ctxm->pg_info;
8734 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8735 req->vnic_num_ring_table_entries =
8736 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8737 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8738 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8739 &req->vnic_pg_size_vnic_lvl,
8740 &req->vnic_page_dir);
8741 }
8742 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8743 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8744 ctx_pg = ctxm->pg_info;
8745 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8746 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8747 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8748 &req->stat_pg_size_stat_lvl,
8749 &req->stat_page_dir);
8750 }
8751 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8752 u32 units;
8753
8754 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8755 ctx_pg = ctxm->pg_info;
8756 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8757 units = ctxm->mrav_num_entries_units;
8758 if (units) {
8759 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8760 u32 entries;
8761
8762 num_mr = ctx_pg->entries - num_ah;
8763 entries = ((num_mr / units) << 16) | (num_ah / units);
8764 req->mrav_num_entries = cpu_to_le32(entries);
8765 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8766 }
8767 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8768 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8769 &req->mrav_pg_size_mrav_lvl,
8770 &req->mrav_page_dir);
8771 }
8772 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8773 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8774 ctx_pg = ctxm->pg_info;
8775 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8776 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8777 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8778 &req->tim_pg_size_tim_lvl,
8779 &req->tim_page_dir);
8780 }
8781 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8782 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8783 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8784 pg_dir = &req->tqm_sp_page_dir,
8785 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8786 ctx_pg = ctxm->pg_info;
8787 i < BNXT_MAX_TQM_RINGS;
8788 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8789 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8790 if (!(enables & ena))
8791 continue;
8792
8793 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8794 *num_entries = cpu_to_le32(ctx_pg->entries);
8795 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8796 }
8797 req->flags = cpu_to_le32(flags);
8798 return hwrm_req_send(bp, req);
8799 }
8800
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8801 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8802 struct bnxt_ctx_pg_info *ctx_pg)
8803 {
8804 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8805
8806 rmem->page_size = BNXT_PAGE_SIZE;
8807 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8808 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8809 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8810 if (rmem->depth >= 1)
8811 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8812 return bnxt_alloc_ring(bp, rmem);
8813 }
8814
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8815 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8816 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8817 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8818 {
8819 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8820 int rc;
8821
8822 if (!mem_size)
8823 return -EINVAL;
8824
8825 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8826 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8827 ctx_pg->nr_pages = 0;
8828 return -EINVAL;
8829 }
8830 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8831 int nr_tbls, i;
8832
8833 rmem->depth = 2;
8834 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8835 GFP_KERNEL);
8836 if (!ctx_pg->ctx_pg_tbl)
8837 return -ENOMEM;
8838 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8839 rmem->nr_pages = nr_tbls;
8840 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8841 if (rc)
8842 return rc;
8843 for (i = 0; i < nr_tbls; i++) {
8844 struct bnxt_ctx_pg_info *pg_tbl;
8845
8846 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8847 if (!pg_tbl)
8848 return -ENOMEM;
8849 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8850 rmem = &pg_tbl->ring_mem;
8851 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8852 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8853 rmem->depth = 1;
8854 rmem->nr_pages = MAX_CTX_PAGES;
8855 rmem->ctx_mem = ctxm;
8856 if (i == (nr_tbls - 1)) {
8857 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8858
8859 if (rem)
8860 rmem->nr_pages = rem;
8861 }
8862 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8863 if (rc)
8864 break;
8865 }
8866 } else {
8867 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8868 if (rmem->nr_pages > 1 || depth)
8869 rmem->depth = 1;
8870 rmem->ctx_mem = ctxm;
8871 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8872 }
8873 return rc;
8874 }
8875
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)8876 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
8877 struct bnxt_ctx_pg_info *ctx_pg,
8878 void *buf, size_t offset, size_t head,
8879 size_t tail)
8880 {
8881 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8882 size_t nr_pages = ctx_pg->nr_pages;
8883 int page_size = rmem->page_size;
8884 size_t len = 0, total_len = 0;
8885 u16 depth = rmem->depth;
8886
8887 tail %= nr_pages * page_size;
8888 do {
8889 if (depth > 1) {
8890 int i = head / (page_size * MAX_CTX_PAGES);
8891 struct bnxt_ctx_pg_info *pg_tbl;
8892
8893 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8894 rmem = &pg_tbl->ring_mem;
8895 }
8896 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
8897 head += len;
8898 offset += len;
8899 total_len += len;
8900 if (head >= nr_pages * page_size)
8901 head = 0;
8902 } while (head != tail);
8903 return total_len;
8904 }
8905
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8906 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8907 struct bnxt_ctx_pg_info *ctx_pg)
8908 {
8909 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8910
8911 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8912 ctx_pg->ctx_pg_tbl) {
8913 int i, nr_tbls = rmem->nr_pages;
8914
8915 for (i = 0; i < nr_tbls; i++) {
8916 struct bnxt_ctx_pg_info *pg_tbl;
8917 struct bnxt_ring_mem_info *rmem2;
8918
8919 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8920 if (!pg_tbl)
8921 continue;
8922 rmem2 = &pg_tbl->ring_mem;
8923 bnxt_free_ring(bp, rmem2);
8924 ctx_pg->ctx_pg_arr[i] = NULL;
8925 kfree(pg_tbl);
8926 ctx_pg->ctx_pg_tbl[i] = NULL;
8927 }
8928 kfree(ctx_pg->ctx_pg_tbl);
8929 ctx_pg->ctx_pg_tbl = NULL;
8930 }
8931 bnxt_free_ring(bp, rmem);
8932 ctx_pg->nr_pages = 0;
8933 }
8934
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)8935 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8936 struct bnxt_ctx_mem_type *ctxm, u32 entries,
8937 u8 pg_lvl)
8938 {
8939 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8940 int i, rc = 0, n = 1;
8941 u32 mem_size;
8942
8943 if (!ctxm->entry_size || !ctx_pg)
8944 return -EINVAL;
8945 if (ctxm->instance_bmap)
8946 n = hweight32(ctxm->instance_bmap);
8947 if (ctxm->entry_multiple)
8948 entries = roundup(entries, ctxm->entry_multiple);
8949 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8950 mem_size = entries * ctxm->entry_size;
8951 for (i = 0; i < n && !rc; i++) {
8952 ctx_pg[i].entries = entries;
8953 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8954 ctxm->init_value ? ctxm : NULL);
8955 }
8956 if (!rc)
8957 ctxm->mem_valid = 1;
8958 return rc;
8959 }
8960
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)8961 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8962 struct bnxt_ctx_mem_type *ctxm,
8963 bool last)
8964 {
8965 struct hwrm_func_backing_store_cfg_v2_input *req;
8966 u32 instance_bmap = ctxm->instance_bmap;
8967 int i, j, rc = 0, n = 1;
8968 __le32 *p;
8969
8970 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8971 return 0;
8972
8973 if (instance_bmap)
8974 n = hweight32(ctxm->instance_bmap);
8975 else
8976 instance_bmap = 1;
8977
8978 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8979 if (rc)
8980 return rc;
8981 hwrm_req_hold(bp, req);
8982 req->type = cpu_to_le16(ctxm->type);
8983 req->entry_size = cpu_to_le16(ctxm->entry_size);
8984 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
8985 bnxt_bs_trace_avail(bp, ctxm->type)) {
8986 struct bnxt_bs_trace_info *bs_trace;
8987 u32 enables;
8988
8989 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
8990 req->enables = cpu_to_le32(enables);
8991 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
8992 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
8993 }
8994 req->subtype_valid_cnt = ctxm->split_entry_cnt;
8995 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8996 p[i] = cpu_to_le32(ctxm->split[i]);
8997 for (i = 0, j = 0; j < n && !rc; i++) {
8998 struct bnxt_ctx_pg_info *ctx_pg;
8999
9000 if (!(instance_bmap & (1 << i)))
9001 continue;
9002 req->instance = cpu_to_le16(i);
9003 ctx_pg = &ctxm->pg_info[j++];
9004 if (!ctx_pg->entries)
9005 continue;
9006 req->num_entries = cpu_to_le32(ctx_pg->entries);
9007 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9008 &req->page_size_pbl_level,
9009 &req->page_dir);
9010 if (last && j == n)
9011 req->flags =
9012 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9013 rc = hwrm_req_send(bp, req);
9014 }
9015 hwrm_req_drop(bp, req);
9016 return rc;
9017 }
9018
bnxt_backing_store_cfg_v2(struct bnxt * bp,u32 ena)9019 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
9020 {
9021 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9022 struct bnxt_ctx_mem_type *ctxm;
9023 u16 last_type = BNXT_CTX_INV;
9024 int rc = 0;
9025 u16 type;
9026
9027 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
9028 ctxm = &ctx->ctx_arr[type];
9029 if (!bnxt_bs_trace_avail(bp, type))
9030 continue;
9031 if (!ctxm->mem_valid) {
9032 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9033 ctxm->max_entries, 1);
9034 if (rc) {
9035 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9036 type);
9037 continue;
9038 }
9039 bnxt_bs_trace_init(bp, ctxm);
9040 }
9041 last_type = type;
9042 }
9043
9044 if (last_type == BNXT_CTX_INV) {
9045 if (!ena)
9046 return 0;
9047 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
9048 last_type = BNXT_CTX_MAX - 1;
9049 else
9050 last_type = BNXT_CTX_L2_MAX - 1;
9051 }
9052 ctx->ctx_arr[last_type].last = 1;
9053
9054 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9055 ctxm = &ctx->ctx_arr[type];
9056
9057 if (!ctxm->mem_valid)
9058 continue;
9059 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9060 if (rc)
9061 return rc;
9062 }
9063 return 0;
9064 }
9065
9066 /**
9067 * __bnxt_copy_ctx_mem - copy host context memory
9068 * @bp: The driver context
9069 * @ctxm: The pointer to the context memory type
9070 * @buf: The destination buffer or NULL to just obtain the length
9071 * @offset: The buffer offset to copy the data to
9072 * @head: The head offset of context memory to copy from
9073 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9074 *
9075 * This function is called for debugging purposes to dump the host context
9076 * used by the chip.
9077 *
9078 * Return: Length of memory copied
9079 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9080 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9081 struct bnxt_ctx_mem_type *ctxm, void *buf,
9082 size_t offset, size_t head, size_t tail)
9083 {
9084 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9085 size_t len = 0, total_len = 0;
9086 int i, n = 1;
9087
9088 if (!ctx_pg)
9089 return 0;
9090
9091 if (ctxm->instance_bmap)
9092 n = hweight32(ctxm->instance_bmap);
9093 for (i = 0; i < n; i++) {
9094 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9095 tail);
9096 offset += len;
9097 total_len += len;
9098 }
9099 return total_len;
9100 }
9101
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9102 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9103 void *buf, size_t offset)
9104 {
9105 size_t tail = ctxm->max_entries * ctxm->entry_size;
9106
9107 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9108 }
9109
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9110 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9111 struct bnxt_ctx_mem_type *ctxm, bool force)
9112 {
9113 struct bnxt_ctx_pg_info *ctx_pg;
9114 int i, n = 1;
9115
9116 ctxm->last = 0;
9117
9118 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9119 return;
9120
9121 ctx_pg = ctxm->pg_info;
9122 if (ctx_pg) {
9123 if (ctxm->instance_bmap)
9124 n = hweight32(ctxm->instance_bmap);
9125 for (i = 0; i < n; i++)
9126 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9127
9128 kfree(ctx_pg);
9129 ctxm->pg_info = NULL;
9130 ctxm->mem_valid = 0;
9131 }
9132 memset(ctxm, 0, sizeof(*ctxm));
9133 }
9134
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9135 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9136 {
9137 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9138 u16 type;
9139
9140 if (!ctx)
9141 return;
9142
9143 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9144 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9145
9146 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9147 if (force) {
9148 kfree(ctx);
9149 bp->ctx = NULL;
9150 }
9151 }
9152
bnxt_alloc_ctx_mem(struct bnxt * bp)9153 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9154 {
9155 struct bnxt_ctx_mem_type *ctxm;
9156 struct bnxt_ctx_mem_info *ctx;
9157 u32 l2_qps, qp1_qps, max_qps;
9158 u32 ena, entries_sp, entries;
9159 u32 srqs, max_srqs, min;
9160 u32 num_mr, num_ah;
9161 u32 extra_srqs = 0;
9162 u32 extra_qps = 0;
9163 u32 fast_qpmd_qps;
9164 u8 pg_lvl = 1;
9165 int i, rc;
9166
9167 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9168 if (rc) {
9169 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9170 rc);
9171 return rc;
9172 }
9173 ctx = bp->ctx;
9174 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9175 return 0;
9176
9177 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9178 l2_qps = ctxm->qp_l2_entries;
9179 qp1_qps = ctxm->qp_qp1_entries;
9180 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9181 max_qps = ctxm->max_entries;
9182 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9183 srqs = ctxm->srq_l2_entries;
9184 max_srqs = ctxm->max_entries;
9185 ena = 0;
9186 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9187 pg_lvl = 2;
9188 if (BNXT_SW_RES_LMT(bp)) {
9189 extra_qps = max_qps - l2_qps - qp1_qps;
9190 extra_srqs = max_srqs - srqs;
9191 } else {
9192 extra_qps = min_t(u32, 65536,
9193 max_qps - l2_qps - qp1_qps);
9194 /* allocate extra qps if fw supports RoCE fast qp
9195 * destroy feature
9196 */
9197 extra_qps += fast_qpmd_qps;
9198 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9199 }
9200 if (fast_qpmd_qps)
9201 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9202 }
9203
9204 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9205 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9206 pg_lvl);
9207 if (rc)
9208 return rc;
9209
9210 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9211 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9212 if (rc)
9213 return rc;
9214
9215 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9216 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9217 extra_qps * 2, pg_lvl);
9218 if (rc)
9219 return rc;
9220
9221 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9222 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9223 if (rc)
9224 return rc;
9225
9226 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9227 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9228 if (rc)
9229 return rc;
9230
9231 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9232 goto skip_rdma;
9233
9234 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9235 if (BNXT_SW_RES_LMT(bp) &&
9236 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9237 num_ah = ctxm->mrav_av_entries;
9238 num_mr = ctxm->max_entries - num_ah;
9239 } else {
9240 /* 128K extra is needed to accommodate static AH context
9241 * allocation by f/w.
9242 */
9243 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9244 num_ah = min_t(u32, num_mr, 1024 * 128);
9245 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9246 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9247 ctxm->mrav_av_entries = num_ah;
9248 }
9249
9250 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9251 if (rc)
9252 return rc;
9253 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9254
9255 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9256 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9257 if (rc)
9258 return rc;
9259 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9260
9261 skip_rdma:
9262 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9263 min = ctxm->min_entries;
9264 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9265 2 * (extra_qps + qp1_qps) + min;
9266 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9267 if (rc)
9268 return rc;
9269
9270 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9271 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9272 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9273 if (rc)
9274 return rc;
9275 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9276 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9277 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9278
9279 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9280 rc = bnxt_backing_store_cfg_v2(bp, ena);
9281 else
9282 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9283 if (rc) {
9284 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9285 rc);
9286 return rc;
9287 }
9288 ctx->flags |= BNXT_CTX_FLAG_INITED;
9289 return 0;
9290 }
9291
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9292 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9293 {
9294 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9295 u16 page_attr;
9296 int rc;
9297
9298 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9299 return 0;
9300
9301 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9302 if (rc)
9303 return rc;
9304
9305 if (BNXT_PAGE_SIZE == 0x2000)
9306 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9307 else if (BNXT_PAGE_SIZE == 0x10000)
9308 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9309 else
9310 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9311 req->pg_size_lvl = cpu_to_le16(page_attr |
9312 bp->fw_crash_mem->ring_mem.depth);
9313 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9314 req->size = cpu_to_le32(bp->fw_crash_len);
9315 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9316 return hwrm_req_send(bp, req);
9317 }
9318
bnxt_free_crash_dump_mem(struct bnxt * bp)9319 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9320 {
9321 if (bp->fw_crash_mem) {
9322 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9323 kfree(bp->fw_crash_mem);
9324 bp->fw_crash_mem = NULL;
9325 }
9326 }
9327
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9328 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9329 {
9330 u32 mem_size = 0;
9331 int rc;
9332
9333 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9334 return 0;
9335
9336 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9337 if (rc)
9338 return rc;
9339
9340 mem_size = round_up(mem_size, 4);
9341
9342 /* keep and use the existing pages */
9343 if (bp->fw_crash_mem &&
9344 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9345 goto alloc_done;
9346
9347 if (bp->fw_crash_mem)
9348 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9349 else
9350 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9351 GFP_KERNEL);
9352 if (!bp->fw_crash_mem)
9353 return -ENOMEM;
9354
9355 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9356 if (rc) {
9357 bnxt_free_crash_dump_mem(bp);
9358 return rc;
9359 }
9360
9361 alloc_done:
9362 bp->fw_crash_len = mem_size;
9363 return 0;
9364 }
9365
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9366 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9367 {
9368 struct hwrm_func_resource_qcaps_output *resp;
9369 struct hwrm_func_resource_qcaps_input *req;
9370 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9371 int rc;
9372
9373 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9374 if (rc)
9375 return rc;
9376
9377 req->fid = cpu_to_le16(0xffff);
9378 resp = hwrm_req_hold(bp, req);
9379 rc = hwrm_req_send_silent(bp, req);
9380 if (rc)
9381 goto hwrm_func_resc_qcaps_exit;
9382
9383 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9384 if (!all)
9385 goto hwrm_func_resc_qcaps_exit;
9386
9387 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9388 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9389 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9390 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9391 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9392 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9393 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9394 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9395 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9396 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9397 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9398 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9399 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9400 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9401 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9402 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9403
9404 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9405 u16 max_msix = le16_to_cpu(resp->max_msix);
9406
9407 hw_resc->max_nqs = max_msix;
9408 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9409 }
9410
9411 if (BNXT_PF(bp)) {
9412 struct bnxt_pf_info *pf = &bp->pf;
9413
9414 pf->vf_resv_strategy =
9415 le16_to_cpu(resp->vf_reservation_strategy);
9416 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9417 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9418 }
9419 hwrm_func_resc_qcaps_exit:
9420 hwrm_req_drop(bp, req);
9421 return rc;
9422 }
9423
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9424 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9425 {
9426 struct hwrm_port_mac_ptp_qcfg_output *resp;
9427 struct hwrm_port_mac_ptp_qcfg_input *req;
9428 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9429 u8 flags;
9430 int rc;
9431
9432 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9433 rc = -ENODEV;
9434 goto no_ptp;
9435 }
9436
9437 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9438 if (rc)
9439 goto no_ptp;
9440
9441 req->port_id = cpu_to_le16(bp->pf.port_id);
9442 resp = hwrm_req_hold(bp, req);
9443 rc = hwrm_req_send(bp, req);
9444 if (rc)
9445 goto exit;
9446
9447 flags = resp->flags;
9448 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9449 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9450 rc = -ENODEV;
9451 goto exit;
9452 }
9453 if (!ptp) {
9454 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9455 if (!ptp) {
9456 rc = -ENOMEM;
9457 goto exit;
9458 }
9459 ptp->bp = bp;
9460 bp->ptp_cfg = ptp;
9461 }
9462
9463 if (flags &
9464 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9465 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9466 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9467 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9468 } else if (BNXT_CHIP_P5(bp)) {
9469 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9470 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9471 } else {
9472 rc = -ENODEV;
9473 goto exit;
9474 }
9475 ptp->rtc_configured =
9476 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9477 rc = bnxt_ptp_init(bp);
9478 if (rc)
9479 netdev_warn(bp->dev, "PTP initialization failed.\n");
9480 exit:
9481 hwrm_req_drop(bp, req);
9482 if (!rc)
9483 return 0;
9484
9485 no_ptp:
9486 bnxt_ptp_clear(bp);
9487 kfree(ptp);
9488 bp->ptp_cfg = NULL;
9489 return rc;
9490 }
9491
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9492 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9493 {
9494 struct hwrm_func_qcaps_output *resp;
9495 struct hwrm_func_qcaps_input *req;
9496 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9497 u32 flags, flags_ext, flags_ext2;
9498 int rc;
9499
9500 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9501 if (rc)
9502 return rc;
9503
9504 req->fid = cpu_to_le16(0xffff);
9505 resp = hwrm_req_hold(bp, req);
9506 rc = hwrm_req_send(bp, req);
9507 if (rc)
9508 goto hwrm_func_qcaps_exit;
9509
9510 flags = le32_to_cpu(resp->flags);
9511 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9512 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9513 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9514 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9515 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9516 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9517 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9518 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9519 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9520 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9521 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9522 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9523 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9524 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9525 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9526 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9527 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9528 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9529
9530 flags_ext = le32_to_cpu(resp->flags_ext);
9531 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9532 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9533 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9534 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9535 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9536 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9537 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9538 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9539 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9540 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9541 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9542 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9543 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9544 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9545 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9546 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9547
9548 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9549 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9550 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9551 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9552 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9553 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9554 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9555 if (flags_ext2 &
9556 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9557 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9558 if (BNXT_PF(bp) &&
9559 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9560 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9561
9562 bp->tx_push_thresh = 0;
9563 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9564 BNXT_FW_MAJ(bp) > 217)
9565 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9566
9567 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9568 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9569 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9570 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9571 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9572 if (!hw_resc->max_hw_ring_grps)
9573 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9574 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9575 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9576 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9577
9578 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9579 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9580 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9581 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9582 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9583 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9584
9585 if (BNXT_PF(bp)) {
9586 struct bnxt_pf_info *pf = &bp->pf;
9587
9588 pf->fw_fid = le16_to_cpu(resp->fid);
9589 pf->port_id = le16_to_cpu(resp->port_id);
9590 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9591 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9592 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9593 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9594 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9595 bp->flags |= BNXT_FLAG_WOL_CAP;
9596 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9597 bp->fw_cap |= BNXT_FW_CAP_PTP;
9598 } else {
9599 bnxt_ptp_clear(bp);
9600 kfree(bp->ptp_cfg);
9601 bp->ptp_cfg = NULL;
9602 }
9603 } else {
9604 #ifdef CONFIG_BNXT_SRIOV
9605 struct bnxt_vf_info *vf = &bp->vf;
9606
9607 vf->fw_fid = le16_to_cpu(resp->fid);
9608 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9609 #endif
9610 }
9611 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9612
9613 hwrm_func_qcaps_exit:
9614 hwrm_req_drop(bp, req);
9615 return rc;
9616 }
9617
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9618 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9619 {
9620 struct hwrm_dbg_qcaps_output *resp;
9621 struct hwrm_dbg_qcaps_input *req;
9622 int rc;
9623
9624 bp->fw_dbg_cap = 0;
9625 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9626 return;
9627
9628 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9629 if (rc)
9630 return;
9631
9632 req->fid = cpu_to_le16(0xffff);
9633 resp = hwrm_req_hold(bp, req);
9634 rc = hwrm_req_send(bp, req);
9635 if (rc)
9636 goto hwrm_dbg_qcaps_exit;
9637
9638 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9639
9640 hwrm_dbg_qcaps_exit:
9641 hwrm_req_drop(bp, req);
9642 }
9643
9644 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9645
bnxt_hwrm_func_qcaps(struct bnxt * bp)9646 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9647 {
9648 int rc;
9649
9650 rc = __bnxt_hwrm_func_qcaps(bp);
9651 if (rc)
9652 return rc;
9653
9654 bnxt_hwrm_dbg_qcaps(bp);
9655
9656 rc = bnxt_hwrm_queue_qportcfg(bp);
9657 if (rc) {
9658 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9659 return rc;
9660 }
9661 if (bp->hwrm_spec_code >= 0x10803) {
9662 rc = bnxt_alloc_ctx_mem(bp);
9663 if (rc)
9664 return rc;
9665 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9666 if (!rc)
9667 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9668 }
9669 return 0;
9670 }
9671
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9672 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9673 {
9674 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9675 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9676 u32 flags;
9677 int rc;
9678
9679 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9680 return 0;
9681
9682 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9683 if (rc)
9684 return rc;
9685
9686 resp = hwrm_req_hold(bp, req);
9687 rc = hwrm_req_send(bp, req);
9688 if (rc)
9689 goto hwrm_cfa_adv_qcaps_exit;
9690
9691 flags = le32_to_cpu(resp->flags);
9692 if (flags &
9693 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9694 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9695
9696 if (flags &
9697 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9698 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9699
9700 if (flags &
9701 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9702 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9703
9704 hwrm_cfa_adv_qcaps_exit:
9705 hwrm_req_drop(bp, req);
9706 return rc;
9707 }
9708
__bnxt_alloc_fw_health(struct bnxt * bp)9709 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9710 {
9711 if (bp->fw_health)
9712 return 0;
9713
9714 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9715 if (!bp->fw_health)
9716 return -ENOMEM;
9717
9718 mutex_init(&bp->fw_health->lock);
9719 return 0;
9720 }
9721
bnxt_alloc_fw_health(struct bnxt * bp)9722 static int bnxt_alloc_fw_health(struct bnxt *bp)
9723 {
9724 int rc;
9725
9726 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9727 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9728 return 0;
9729
9730 rc = __bnxt_alloc_fw_health(bp);
9731 if (rc) {
9732 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9733 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9734 return rc;
9735 }
9736
9737 return 0;
9738 }
9739
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9740 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9741 {
9742 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9743 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9744 BNXT_FW_HEALTH_WIN_MAP_OFF);
9745 }
9746
bnxt_inv_fw_health_reg(struct bnxt * bp)9747 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9748 {
9749 struct bnxt_fw_health *fw_health = bp->fw_health;
9750 u32 reg_type;
9751
9752 if (!fw_health)
9753 return;
9754
9755 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9756 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9757 fw_health->status_reliable = false;
9758
9759 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9760 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9761 fw_health->resets_reliable = false;
9762 }
9763
bnxt_try_map_fw_health_reg(struct bnxt * bp)9764 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9765 {
9766 void __iomem *hs;
9767 u32 status_loc;
9768 u32 reg_type;
9769 u32 sig;
9770
9771 if (bp->fw_health)
9772 bp->fw_health->status_reliable = false;
9773
9774 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9775 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9776
9777 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9778 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9779 if (!bp->chip_num) {
9780 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9781 bp->chip_num = readl(bp->bar0 +
9782 BNXT_FW_HEALTH_WIN_BASE +
9783 BNXT_GRC_REG_CHIP_NUM);
9784 }
9785 if (!BNXT_CHIP_P5_PLUS(bp))
9786 return;
9787
9788 status_loc = BNXT_GRC_REG_STATUS_P5 |
9789 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9790 } else {
9791 status_loc = readl(hs + offsetof(struct hcomm_status,
9792 fw_status_loc));
9793 }
9794
9795 if (__bnxt_alloc_fw_health(bp)) {
9796 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9797 return;
9798 }
9799
9800 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9801 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9802 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9803 __bnxt_map_fw_health_reg(bp, status_loc);
9804 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9805 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9806 }
9807
9808 bp->fw_health->status_reliable = true;
9809 }
9810
bnxt_map_fw_health_regs(struct bnxt * bp)9811 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9812 {
9813 struct bnxt_fw_health *fw_health = bp->fw_health;
9814 u32 reg_base = 0xffffffff;
9815 int i;
9816
9817 bp->fw_health->status_reliable = false;
9818 bp->fw_health->resets_reliable = false;
9819 /* Only pre-map the monitoring GRC registers using window 3 */
9820 for (i = 0; i < 4; i++) {
9821 u32 reg = fw_health->regs[i];
9822
9823 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9824 continue;
9825 if (reg_base == 0xffffffff)
9826 reg_base = reg & BNXT_GRC_BASE_MASK;
9827 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9828 return -ERANGE;
9829 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9830 }
9831 bp->fw_health->status_reliable = true;
9832 bp->fw_health->resets_reliable = true;
9833 if (reg_base == 0xffffffff)
9834 return 0;
9835
9836 __bnxt_map_fw_health_reg(bp, reg_base);
9837 return 0;
9838 }
9839
bnxt_remap_fw_health_regs(struct bnxt * bp)9840 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9841 {
9842 if (!bp->fw_health)
9843 return;
9844
9845 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9846 bp->fw_health->status_reliable = true;
9847 bp->fw_health->resets_reliable = true;
9848 } else {
9849 bnxt_try_map_fw_health_reg(bp);
9850 }
9851 }
9852
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)9853 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9854 {
9855 struct bnxt_fw_health *fw_health = bp->fw_health;
9856 struct hwrm_error_recovery_qcfg_output *resp;
9857 struct hwrm_error_recovery_qcfg_input *req;
9858 int rc, i;
9859
9860 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9861 return 0;
9862
9863 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9864 if (rc)
9865 return rc;
9866
9867 resp = hwrm_req_hold(bp, req);
9868 rc = hwrm_req_send(bp, req);
9869 if (rc)
9870 goto err_recovery_out;
9871 fw_health->flags = le32_to_cpu(resp->flags);
9872 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9873 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9874 rc = -EINVAL;
9875 goto err_recovery_out;
9876 }
9877 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9878 fw_health->master_func_wait_dsecs =
9879 le32_to_cpu(resp->master_func_wait_period);
9880 fw_health->normal_func_wait_dsecs =
9881 le32_to_cpu(resp->normal_func_wait_period);
9882 fw_health->post_reset_wait_dsecs =
9883 le32_to_cpu(resp->master_func_wait_period_after_reset);
9884 fw_health->post_reset_max_wait_dsecs =
9885 le32_to_cpu(resp->max_bailout_time_after_reset);
9886 fw_health->regs[BNXT_FW_HEALTH_REG] =
9887 le32_to_cpu(resp->fw_health_status_reg);
9888 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9889 le32_to_cpu(resp->fw_heartbeat_reg);
9890 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9891 le32_to_cpu(resp->fw_reset_cnt_reg);
9892 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9893 le32_to_cpu(resp->reset_inprogress_reg);
9894 fw_health->fw_reset_inprog_reg_mask =
9895 le32_to_cpu(resp->reset_inprogress_reg_mask);
9896 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9897 if (fw_health->fw_reset_seq_cnt >= 16) {
9898 rc = -EINVAL;
9899 goto err_recovery_out;
9900 }
9901 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9902 fw_health->fw_reset_seq_regs[i] =
9903 le32_to_cpu(resp->reset_reg[i]);
9904 fw_health->fw_reset_seq_vals[i] =
9905 le32_to_cpu(resp->reset_reg_val[i]);
9906 fw_health->fw_reset_seq_delay_msec[i] =
9907 resp->delay_after_reset[i];
9908 }
9909 err_recovery_out:
9910 hwrm_req_drop(bp, req);
9911 if (!rc)
9912 rc = bnxt_map_fw_health_regs(bp);
9913 if (rc)
9914 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9915 return rc;
9916 }
9917
bnxt_hwrm_func_reset(struct bnxt * bp)9918 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9919 {
9920 struct hwrm_func_reset_input *req;
9921 int rc;
9922
9923 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9924 if (rc)
9925 return rc;
9926
9927 req->enables = 0;
9928 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9929 return hwrm_req_send(bp, req);
9930 }
9931
bnxt_nvm_cfg_ver_get(struct bnxt * bp)9932 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9933 {
9934 struct hwrm_nvm_get_dev_info_output nvm_info;
9935
9936 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9937 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9938 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9939 nvm_info.nvm_cfg_ver_upd);
9940 }
9941
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)9942 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9943 {
9944 struct hwrm_queue_qportcfg_output *resp;
9945 struct hwrm_queue_qportcfg_input *req;
9946 u8 i, j, *qptr;
9947 bool no_rdma;
9948 int rc = 0;
9949
9950 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9951 if (rc)
9952 return rc;
9953
9954 resp = hwrm_req_hold(bp, req);
9955 rc = hwrm_req_send(bp, req);
9956 if (rc)
9957 goto qportcfg_exit;
9958
9959 if (!resp->max_configurable_queues) {
9960 rc = -EINVAL;
9961 goto qportcfg_exit;
9962 }
9963 bp->max_tc = resp->max_configurable_queues;
9964 bp->max_lltc = resp->max_configurable_lossless_queues;
9965 if (bp->max_tc > BNXT_MAX_QUEUE)
9966 bp->max_tc = BNXT_MAX_QUEUE;
9967
9968 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9969 qptr = &resp->queue_id0;
9970 for (i = 0, j = 0; i < bp->max_tc; i++) {
9971 bp->q_info[j].queue_id = *qptr;
9972 bp->q_ids[i] = *qptr++;
9973 bp->q_info[j].queue_profile = *qptr++;
9974 bp->tc_to_qidx[j] = j;
9975 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9976 (no_rdma && BNXT_PF(bp)))
9977 j++;
9978 }
9979 bp->max_q = bp->max_tc;
9980 bp->max_tc = max_t(u8, j, 1);
9981
9982 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9983 bp->max_tc = 1;
9984
9985 if (bp->max_lltc > bp->max_tc)
9986 bp->max_lltc = bp->max_tc;
9987
9988 qportcfg_exit:
9989 hwrm_req_drop(bp, req);
9990 return rc;
9991 }
9992
bnxt_hwrm_poll(struct bnxt * bp)9993 static int bnxt_hwrm_poll(struct bnxt *bp)
9994 {
9995 struct hwrm_ver_get_input *req;
9996 int rc;
9997
9998 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9999 if (rc)
10000 return rc;
10001
10002 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10003 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10004 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10005
10006 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10007 rc = hwrm_req_send(bp, req);
10008 return rc;
10009 }
10010
bnxt_hwrm_ver_get(struct bnxt * bp)10011 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10012 {
10013 struct hwrm_ver_get_output *resp;
10014 struct hwrm_ver_get_input *req;
10015 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10016 u32 dev_caps_cfg, hwrm_ver;
10017 int rc, len;
10018
10019 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10020 if (rc)
10021 return rc;
10022
10023 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10024 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10025 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10026 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10027 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10028
10029 resp = hwrm_req_hold(bp, req);
10030 rc = hwrm_req_send(bp, req);
10031 if (rc)
10032 goto hwrm_ver_get_exit;
10033
10034 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10035
10036 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10037 resp->hwrm_intf_min_8b << 8 |
10038 resp->hwrm_intf_upd_8b;
10039 if (resp->hwrm_intf_maj_8b < 1) {
10040 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10041 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10042 resp->hwrm_intf_upd_8b);
10043 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10044 }
10045
10046 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10047 HWRM_VERSION_UPDATE;
10048
10049 if (bp->hwrm_spec_code > hwrm_ver)
10050 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10051 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10052 HWRM_VERSION_UPDATE);
10053 else
10054 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10055 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10056 resp->hwrm_intf_upd_8b);
10057
10058 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10059 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10060 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10061 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10062 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10063 len = FW_VER_STR_LEN;
10064 } else {
10065 fw_maj = resp->hwrm_fw_maj_8b;
10066 fw_min = resp->hwrm_fw_min_8b;
10067 fw_bld = resp->hwrm_fw_bld_8b;
10068 fw_rsv = resp->hwrm_fw_rsvd_8b;
10069 len = BC_HWRM_STR_LEN;
10070 }
10071 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10072 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10073 fw_rsv);
10074
10075 if (strlen(resp->active_pkg_name)) {
10076 int fw_ver_len = strlen(bp->fw_ver_str);
10077
10078 snprintf(bp->fw_ver_str + fw_ver_len,
10079 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10080 resp->active_pkg_name);
10081 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10082 }
10083
10084 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10085 if (!bp->hwrm_cmd_timeout)
10086 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10087 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10088 if (!bp->hwrm_cmd_max_timeout)
10089 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10090 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
10091 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
10092 bp->hwrm_cmd_max_timeout / 1000);
10093
10094 if (resp->hwrm_intf_maj_8b >= 1) {
10095 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10096 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10097 }
10098 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10099 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10100
10101 bp->chip_num = le16_to_cpu(resp->chip_num);
10102 bp->chip_rev = resp->chip_rev;
10103 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10104 !resp->chip_metal)
10105 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10106
10107 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10108 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10109 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10110 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10111
10112 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10113 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10114
10115 if (dev_caps_cfg &
10116 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10117 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10118
10119 if (dev_caps_cfg &
10120 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10121 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10122
10123 if (dev_caps_cfg &
10124 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10125 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10126
10127 hwrm_ver_get_exit:
10128 hwrm_req_drop(bp, req);
10129 return rc;
10130 }
10131
bnxt_hwrm_fw_set_time(struct bnxt * bp)10132 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10133 {
10134 struct hwrm_fw_set_time_input *req;
10135 struct tm tm;
10136 time64_t now = ktime_get_real_seconds();
10137 int rc;
10138
10139 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10140 bp->hwrm_spec_code < 0x10400)
10141 return -EOPNOTSUPP;
10142
10143 time64_to_tm(now, 0, &tm);
10144 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10145 if (rc)
10146 return rc;
10147
10148 req->year = cpu_to_le16(1900 + tm.tm_year);
10149 req->month = 1 + tm.tm_mon;
10150 req->day = tm.tm_mday;
10151 req->hour = tm.tm_hour;
10152 req->minute = tm.tm_min;
10153 req->second = tm.tm_sec;
10154 return hwrm_req_send(bp, req);
10155 }
10156
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10157 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10158 {
10159 u64 sw_tmp;
10160
10161 hw &= mask;
10162 sw_tmp = (*sw & ~mask) | hw;
10163 if (hw < (*sw & mask))
10164 sw_tmp += mask + 1;
10165 WRITE_ONCE(*sw, sw_tmp);
10166 }
10167
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10168 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10169 int count, bool ignore_zero)
10170 {
10171 int i;
10172
10173 for (i = 0; i < count; i++) {
10174 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10175
10176 if (ignore_zero && !hw)
10177 continue;
10178
10179 if (masks[i] == -1ULL)
10180 sw_stats[i] = hw;
10181 else
10182 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10183 }
10184 }
10185
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10186 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10187 {
10188 if (!stats->hw_stats)
10189 return;
10190
10191 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10192 stats->hw_masks, stats->len / 8, false);
10193 }
10194
bnxt_accumulate_all_stats(struct bnxt * bp)10195 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10196 {
10197 struct bnxt_stats_mem *ring0_stats;
10198 bool ignore_zero = false;
10199 int i;
10200
10201 /* Chip bug. Counter intermittently becomes 0. */
10202 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10203 ignore_zero = true;
10204
10205 for (i = 0; i < bp->cp_nr_rings; i++) {
10206 struct bnxt_napi *bnapi = bp->bnapi[i];
10207 struct bnxt_cp_ring_info *cpr;
10208 struct bnxt_stats_mem *stats;
10209
10210 cpr = &bnapi->cp_ring;
10211 stats = &cpr->stats;
10212 if (!i)
10213 ring0_stats = stats;
10214 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10215 ring0_stats->hw_masks,
10216 ring0_stats->len / 8, ignore_zero);
10217 }
10218 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10219 struct bnxt_stats_mem *stats = &bp->port_stats;
10220 __le64 *hw_stats = stats->hw_stats;
10221 u64 *sw_stats = stats->sw_stats;
10222 u64 *masks = stats->hw_masks;
10223 int cnt;
10224
10225 cnt = sizeof(struct rx_port_stats) / 8;
10226 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10227
10228 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10229 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10230 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10231 cnt = sizeof(struct tx_port_stats) / 8;
10232 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10233 }
10234 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10235 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10236 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10237 }
10238 }
10239
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10240 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10241 {
10242 struct hwrm_port_qstats_input *req;
10243 struct bnxt_pf_info *pf = &bp->pf;
10244 int rc;
10245
10246 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10247 return 0;
10248
10249 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10250 return -EOPNOTSUPP;
10251
10252 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10253 if (rc)
10254 return rc;
10255
10256 req->flags = flags;
10257 req->port_id = cpu_to_le16(pf->port_id);
10258 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10259 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10260 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10261 return hwrm_req_send(bp, req);
10262 }
10263
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10264 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10265 {
10266 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10267 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10268 struct hwrm_port_qstats_ext_output *resp_qs;
10269 struct hwrm_port_qstats_ext_input *req_qs;
10270 struct bnxt_pf_info *pf = &bp->pf;
10271 u32 tx_stat_size;
10272 int rc;
10273
10274 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10275 return 0;
10276
10277 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10278 return -EOPNOTSUPP;
10279
10280 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10281 if (rc)
10282 return rc;
10283
10284 req_qs->flags = flags;
10285 req_qs->port_id = cpu_to_le16(pf->port_id);
10286 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10287 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10288 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10289 sizeof(struct tx_port_stats_ext) : 0;
10290 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10291 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10292 resp_qs = hwrm_req_hold(bp, req_qs);
10293 rc = hwrm_req_send(bp, req_qs);
10294 if (!rc) {
10295 bp->fw_rx_stats_ext_size =
10296 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10297 if (BNXT_FW_MAJ(bp) < 220 &&
10298 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10299 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10300
10301 bp->fw_tx_stats_ext_size = tx_stat_size ?
10302 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10303 } else {
10304 bp->fw_rx_stats_ext_size = 0;
10305 bp->fw_tx_stats_ext_size = 0;
10306 }
10307 hwrm_req_drop(bp, req_qs);
10308
10309 if (flags)
10310 return rc;
10311
10312 if (bp->fw_tx_stats_ext_size <=
10313 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10314 bp->pri2cos_valid = 0;
10315 return rc;
10316 }
10317
10318 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10319 if (rc)
10320 return rc;
10321
10322 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10323
10324 resp_qc = hwrm_req_hold(bp, req_qc);
10325 rc = hwrm_req_send(bp, req_qc);
10326 if (!rc) {
10327 u8 *pri2cos;
10328 int i, j;
10329
10330 pri2cos = &resp_qc->pri0_cos_queue_id;
10331 for (i = 0; i < 8; i++) {
10332 u8 queue_id = pri2cos[i];
10333 u8 queue_idx;
10334
10335 /* Per port queue IDs start from 0, 10, 20, etc */
10336 queue_idx = queue_id % 10;
10337 if (queue_idx > BNXT_MAX_QUEUE) {
10338 bp->pri2cos_valid = false;
10339 hwrm_req_drop(bp, req_qc);
10340 return rc;
10341 }
10342 for (j = 0; j < bp->max_q; j++) {
10343 if (bp->q_ids[j] == queue_id)
10344 bp->pri2cos_idx[i] = queue_idx;
10345 }
10346 }
10347 bp->pri2cos_valid = true;
10348 }
10349 hwrm_req_drop(bp, req_qc);
10350
10351 return rc;
10352 }
10353
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10354 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10355 {
10356 bnxt_hwrm_tunnel_dst_port_free(bp,
10357 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10358 bnxt_hwrm_tunnel_dst_port_free(bp,
10359 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10360 }
10361
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10362 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10363 {
10364 int rc, i;
10365 u32 tpa_flags = 0;
10366
10367 if (set_tpa)
10368 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10369 else if (BNXT_NO_FW_ACCESS(bp))
10370 return 0;
10371 for (i = 0; i < bp->nr_vnics; i++) {
10372 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10373 if (rc) {
10374 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10375 i, rc);
10376 return rc;
10377 }
10378 }
10379 return 0;
10380 }
10381
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10382 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10383 {
10384 int i;
10385
10386 for (i = 0; i < bp->nr_vnics; i++)
10387 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10388 }
10389
bnxt_clear_vnic(struct bnxt * bp)10390 static void bnxt_clear_vnic(struct bnxt *bp)
10391 {
10392 if (!bp->vnic_info)
10393 return;
10394
10395 bnxt_hwrm_clear_vnic_filter(bp);
10396 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10397 /* clear all RSS setting before free vnic ctx */
10398 bnxt_hwrm_clear_vnic_rss(bp);
10399 bnxt_hwrm_vnic_ctx_free(bp);
10400 }
10401 /* before free the vnic, undo the vnic tpa settings */
10402 if (bp->flags & BNXT_FLAG_TPA)
10403 bnxt_set_tpa(bp, false);
10404 bnxt_hwrm_vnic_free(bp);
10405 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10406 bnxt_hwrm_vnic_ctx_free(bp);
10407 }
10408
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10409 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10410 bool irq_re_init)
10411 {
10412 bnxt_clear_vnic(bp);
10413 bnxt_hwrm_ring_free(bp, close_path);
10414 bnxt_hwrm_ring_grp_free(bp);
10415 if (irq_re_init) {
10416 bnxt_hwrm_stat_ctx_free(bp);
10417 bnxt_hwrm_free_tunnel_ports(bp);
10418 }
10419 }
10420
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10421 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10422 {
10423 struct hwrm_func_cfg_input *req;
10424 u8 evb_mode;
10425 int rc;
10426
10427 if (br_mode == BRIDGE_MODE_VEB)
10428 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10429 else if (br_mode == BRIDGE_MODE_VEPA)
10430 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10431 else
10432 return -EINVAL;
10433
10434 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10435 if (rc)
10436 return rc;
10437
10438 req->fid = cpu_to_le16(0xffff);
10439 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10440 req->evb_mode = evb_mode;
10441 return hwrm_req_send(bp, req);
10442 }
10443
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10444 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10445 {
10446 struct hwrm_func_cfg_input *req;
10447 int rc;
10448
10449 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10450 return 0;
10451
10452 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10453 if (rc)
10454 return rc;
10455
10456 req->fid = cpu_to_le16(0xffff);
10457 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10458 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10459 if (size == 128)
10460 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10461
10462 return hwrm_req_send(bp, req);
10463 }
10464
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10465 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10466 {
10467 int rc;
10468
10469 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10470 goto skip_rss_ctx;
10471
10472 /* allocate context for vnic */
10473 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10474 if (rc) {
10475 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10476 vnic->vnic_id, rc);
10477 goto vnic_setup_err;
10478 }
10479 bp->rsscos_nr_ctxs++;
10480
10481 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10482 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10483 if (rc) {
10484 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10485 vnic->vnic_id, rc);
10486 goto vnic_setup_err;
10487 }
10488 bp->rsscos_nr_ctxs++;
10489 }
10490
10491 skip_rss_ctx:
10492 /* configure default vnic, ring grp */
10493 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10494 if (rc) {
10495 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10496 vnic->vnic_id, rc);
10497 goto vnic_setup_err;
10498 }
10499
10500 /* Enable RSS hashing on vnic */
10501 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10502 if (rc) {
10503 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10504 vnic->vnic_id, rc);
10505 goto vnic_setup_err;
10506 }
10507
10508 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10509 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10510 if (rc) {
10511 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10512 vnic->vnic_id, rc);
10513 }
10514 }
10515
10516 vnic_setup_err:
10517 return rc;
10518 }
10519
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10520 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10521 u8 valid)
10522 {
10523 struct hwrm_vnic_update_input *req;
10524 int rc;
10525
10526 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10527 if (rc)
10528 return rc;
10529
10530 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10531
10532 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10533 req->mru = cpu_to_le16(vnic->mru);
10534
10535 req->enables = cpu_to_le32(valid);
10536
10537 return hwrm_req_send(bp, req);
10538 }
10539
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10540 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10541 {
10542 int rc;
10543
10544 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10545 if (rc) {
10546 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10547 vnic->vnic_id, rc);
10548 return rc;
10549 }
10550 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10551 if (rc)
10552 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10553 vnic->vnic_id, rc);
10554 return rc;
10555 }
10556
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10557 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10558 {
10559 int rc, i, nr_ctxs;
10560
10561 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10562 for (i = 0; i < nr_ctxs; i++) {
10563 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10564 if (rc) {
10565 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10566 vnic->vnic_id, i, rc);
10567 break;
10568 }
10569 bp->rsscos_nr_ctxs++;
10570 }
10571 if (i < nr_ctxs)
10572 return -ENOMEM;
10573
10574 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10575 if (rc)
10576 return rc;
10577
10578 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10579 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10580 if (rc) {
10581 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10582 vnic->vnic_id, rc);
10583 }
10584 }
10585 return rc;
10586 }
10587
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10588 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10589 {
10590 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10591 return __bnxt_setup_vnic_p5(bp, vnic);
10592 else
10593 return __bnxt_setup_vnic(bp, vnic);
10594 }
10595
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10596 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10597 struct bnxt_vnic_info *vnic,
10598 u16 start_rx_ring_idx, int rx_rings)
10599 {
10600 int rc;
10601
10602 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10603 if (rc) {
10604 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10605 vnic->vnic_id, rc);
10606 return rc;
10607 }
10608 return bnxt_setup_vnic(bp, vnic);
10609 }
10610
bnxt_alloc_rfs_vnics(struct bnxt * bp)10611 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10612 {
10613 struct bnxt_vnic_info *vnic;
10614 int i, rc = 0;
10615
10616 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10617 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10618 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10619 }
10620
10621 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10622 return 0;
10623
10624 for (i = 0; i < bp->rx_nr_rings; i++) {
10625 u16 vnic_id = i + 1;
10626 u16 ring_id = i;
10627
10628 if (vnic_id >= bp->nr_vnics)
10629 break;
10630
10631 vnic = &bp->vnic_info[vnic_id];
10632 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10633 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10634 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10635 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10636 break;
10637 }
10638 return rc;
10639 }
10640
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10641 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10642 bool all)
10643 {
10644 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10645 struct bnxt_filter_base *usr_fltr, *tmp;
10646 struct bnxt_ntuple_filter *ntp_fltr;
10647 int i;
10648
10649 if (netif_running(bp->dev)) {
10650 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10651 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10652 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10653 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10654 }
10655 }
10656 if (!all)
10657 return;
10658
10659 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10660 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10661 usr_fltr->fw_vnic_id == rss_ctx->index) {
10662 ntp_fltr = container_of(usr_fltr,
10663 struct bnxt_ntuple_filter,
10664 base);
10665 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10666 bnxt_del_ntp_filter(bp, ntp_fltr);
10667 bnxt_del_one_usr_fltr(bp, usr_fltr);
10668 }
10669 }
10670
10671 if (vnic->rss_table)
10672 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10673 vnic->rss_table,
10674 vnic->rss_table_dma_addr);
10675 bp->num_rss_ctx--;
10676 }
10677
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10678 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10679 {
10680 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10681 struct ethtool_rxfh_context *ctx;
10682 unsigned long context;
10683
10684 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10685 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10686 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10687
10688 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10689 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10690 __bnxt_setup_vnic_p5(bp, vnic)) {
10691 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10692 rss_ctx->index);
10693 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10694 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10695 }
10696 }
10697 }
10698
bnxt_clear_rss_ctxs(struct bnxt * bp)10699 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10700 {
10701 struct ethtool_rxfh_context *ctx;
10702 unsigned long context;
10703
10704 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10705 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10706
10707 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10708 }
10709 }
10710
10711 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10712 static bool bnxt_promisc_ok(struct bnxt *bp)
10713 {
10714 #ifdef CONFIG_BNXT_SRIOV
10715 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10716 return false;
10717 #endif
10718 return true;
10719 }
10720
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10721 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10722 {
10723 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10724 unsigned int rc = 0;
10725
10726 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10727 if (rc) {
10728 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10729 rc);
10730 return rc;
10731 }
10732
10733 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10734 if (rc) {
10735 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10736 rc);
10737 return rc;
10738 }
10739 return rc;
10740 }
10741
10742 static int bnxt_cfg_rx_mode(struct bnxt *);
10743 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10744
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10745 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10746 {
10747 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10748 int rc = 0;
10749 unsigned int rx_nr_rings = bp->rx_nr_rings;
10750
10751 if (irq_re_init) {
10752 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10753 if (rc) {
10754 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10755 rc);
10756 goto err_out;
10757 }
10758 }
10759
10760 rc = bnxt_hwrm_ring_alloc(bp);
10761 if (rc) {
10762 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10763 goto err_out;
10764 }
10765
10766 rc = bnxt_hwrm_ring_grp_alloc(bp);
10767 if (rc) {
10768 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10769 goto err_out;
10770 }
10771
10772 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10773 rx_nr_rings--;
10774
10775 /* default vnic 0 */
10776 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10777 if (rc) {
10778 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10779 goto err_out;
10780 }
10781
10782 if (BNXT_VF(bp))
10783 bnxt_hwrm_func_qcfg(bp);
10784
10785 rc = bnxt_setup_vnic(bp, vnic);
10786 if (rc)
10787 goto err_out;
10788 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
10789 bnxt_hwrm_update_rss_hash_cfg(bp);
10790
10791 if (bp->flags & BNXT_FLAG_RFS) {
10792 rc = bnxt_alloc_rfs_vnics(bp);
10793 if (rc)
10794 goto err_out;
10795 }
10796
10797 if (bp->flags & BNXT_FLAG_TPA) {
10798 rc = bnxt_set_tpa(bp, true);
10799 if (rc)
10800 goto err_out;
10801 }
10802
10803 if (BNXT_VF(bp))
10804 bnxt_update_vf_mac(bp);
10805
10806 /* Filter for default vnic 0 */
10807 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10808 if (rc) {
10809 if (BNXT_VF(bp) && rc == -ENODEV)
10810 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10811 else
10812 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10813 goto err_out;
10814 }
10815 vnic->uc_filter_count = 1;
10816
10817 vnic->rx_mask = 0;
10818 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10819 goto skip_rx_mask;
10820
10821 if (bp->dev->flags & IFF_BROADCAST)
10822 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10823
10824 if (bp->dev->flags & IFF_PROMISC)
10825 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10826
10827 if (bp->dev->flags & IFF_ALLMULTI) {
10828 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10829 vnic->mc_list_count = 0;
10830 } else if (bp->dev->flags & IFF_MULTICAST) {
10831 u32 mask = 0;
10832
10833 bnxt_mc_list_updated(bp, &mask);
10834 vnic->rx_mask |= mask;
10835 }
10836
10837 rc = bnxt_cfg_rx_mode(bp);
10838 if (rc)
10839 goto err_out;
10840
10841 skip_rx_mask:
10842 rc = bnxt_hwrm_set_coal(bp);
10843 if (rc)
10844 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10845 rc);
10846
10847 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10848 rc = bnxt_setup_nitroa0_vnic(bp);
10849 if (rc)
10850 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10851 rc);
10852 }
10853
10854 if (BNXT_VF(bp)) {
10855 bnxt_hwrm_func_qcfg(bp);
10856 netdev_update_features(bp->dev);
10857 }
10858
10859 return 0;
10860
10861 err_out:
10862 bnxt_hwrm_resource_free(bp, 0, true);
10863
10864 return rc;
10865 }
10866
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)10867 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10868 {
10869 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10870 return 0;
10871 }
10872
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)10873 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10874 {
10875 bnxt_init_cp_rings(bp);
10876 bnxt_init_rx_rings(bp);
10877 bnxt_init_tx_rings(bp);
10878 bnxt_init_ring_grps(bp, irq_re_init);
10879 bnxt_init_vnics(bp);
10880
10881 return bnxt_init_chip(bp, irq_re_init);
10882 }
10883
bnxt_set_real_num_queues(struct bnxt * bp)10884 static int bnxt_set_real_num_queues(struct bnxt *bp)
10885 {
10886 int rc;
10887 struct net_device *dev = bp->dev;
10888
10889 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10890 bp->tx_nr_rings_xdp);
10891 if (rc)
10892 return rc;
10893
10894 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10895 if (rc)
10896 return rc;
10897
10898 #ifdef CONFIG_RFS_ACCEL
10899 if (bp->flags & BNXT_FLAG_RFS)
10900 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10901 #endif
10902
10903 return rc;
10904 }
10905
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)10906 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10907 bool shared)
10908 {
10909 int _rx = *rx, _tx = *tx;
10910
10911 if (shared) {
10912 *rx = min_t(int, _rx, max);
10913 *tx = min_t(int, _tx, max);
10914 } else {
10915 if (max < 2)
10916 return -ENOMEM;
10917
10918 while (_rx + _tx > max) {
10919 if (_rx > _tx && _rx > 1)
10920 _rx--;
10921 else if (_tx > 1)
10922 _tx--;
10923 }
10924 *rx = _rx;
10925 *tx = _tx;
10926 }
10927 return 0;
10928 }
10929
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)10930 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10931 {
10932 return (tx - tx_xdp) / tx_sets + tx_xdp;
10933 }
10934
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)10935 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10936 {
10937 int tcs = bp->num_tc;
10938
10939 if (!tcs)
10940 tcs = 1;
10941 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10942 }
10943
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)10944 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10945 {
10946 int tcs = bp->num_tc;
10947
10948 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10949 bp->tx_nr_rings_xdp;
10950 }
10951
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)10952 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10953 bool sh)
10954 {
10955 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10956
10957 if (tx_cp != *tx) {
10958 int tx_saved = tx_cp, rc;
10959
10960 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10961 if (rc)
10962 return rc;
10963 if (tx_cp != tx_saved)
10964 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
10965 return 0;
10966 }
10967 return __bnxt_trim_rings(bp, rx, tx, max, sh);
10968 }
10969
bnxt_setup_msix(struct bnxt * bp)10970 static void bnxt_setup_msix(struct bnxt *bp)
10971 {
10972 const int len = sizeof(bp->irq_tbl[0].name);
10973 struct net_device *dev = bp->dev;
10974 int tcs, i;
10975
10976 tcs = bp->num_tc;
10977 if (tcs) {
10978 int i, off, count;
10979
10980 for (i = 0; i < tcs; i++) {
10981 count = bp->tx_nr_rings_per_tc;
10982 off = BNXT_TC_TO_RING_BASE(bp, i);
10983 netdev_set_tc_queue(dev, i, count, off);
10984 }
10985 }
10986
10987 for (i = 0; i < bp->cp_nr_rings; i++) {
10988 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10989 char *attr;
10990
10991 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10992 attr = "TxRx";
10993 else if (i < bp->rx_nr_rings)
10994 attr = "rx";
10995 else
10996 attr = "tx";
10997
10998 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10999 attr, i);
11000 bp->irq_tbl[map_idx].handler = bnxt_msix;
11001 }
11002 }
11003
11004 static int bnxt_init_int_mode(struct bnxt *bp);
11005
bnxt_change_msix(struct bnxt * bp,int total)11006 static int bnxt_change_msix(struct bnxt *bp, int total)
11007 {
11008 struct msi_map map;
11009 int i;
11010
11011 /* add MSIX to the end if needed */
11012 for (i = bp->total_irqs; i < total; i++) {
11013 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11014 if (map.index < 0)
11015 return bp->total_irqs;
11016 bp->irq_tbl[i].vector = map.virq;
11017 bp->total_irqs++;
11018 }
11019
11020 /* trim MSIX from the end if needed */
11021 for (i = bp->total_irqs; i > total; i--) {
11022 map.index = i - 1;
11023 map.virq = bp->irq_tbl[i - 1].vector;
11024 pci_msix_free_irq(bp->pdev, map);
11025 bp->total_irqs--;
11026 }
11027 return bp->total_irqs;
11028 }
11029
bnxt_setup_int_mode(struct bnxt * bp)11030 static int bnxt_setup_int_mode(struct bnxt *bp)
11031 {
11032 int rc;
11033
11034 if (!bp->irq_tbl) {
11035 rc = bnxt_init_int_mode(bp);
11036 if (rc || !bp->irq_tbl)
11037 return rc ?: -ENODEV;
11038 }
11039
11040 bnxt_setup_msix(bp);
11041
11042 rc = bnxt_set_real_num_queues(bp);
11043 return rc;
11044 }
11045
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11046 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11047 {
11048 return bp->hw_resc.max_rsscos_ctxs;
11049 }
11050
bnxt_get_max_func_vnics(struct bnxt * bp)11051 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11052 {
11053 return bp->hw_resc.max_vnics;
11054 }
11055
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11056 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11057 {
11058 return bp->hw_resc.max_stat_ctxs;
11059 }
11060
bnxt_get_max_func_cp_rings(struct bnxt * bp)11061 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11062 {
11063 return bp->hw_resc.max_cp_rings;
11064 }
11065
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11066 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11067 {
11068 unsigned int cp = bp->hw_resc.max_cp_rings;
11069
11070 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11071 cp -= bnxt_get_ulp_msix_num(bp);
11072
11073 return cp;
11074 }
11075
bnxt_get_max_func_irqs(struct bnxt * bp)11076 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11077 {
11078 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11079
11080 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11081 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11082
11083 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11084 }
11085
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11086 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11087 {
11088 bp->hw_resc.max_irqs = max_irqs;
11089 }
11090
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11091 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11092 {
11093 unsigned int cp;
11094
11095 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11096 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11097 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11098 else
11099 return cp - bp->cp_nr_rings;
11100 }
11101
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11102 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11103 {
11104 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11105 }
11106
bnxt_get_avail_msix(struct bnxt * bp,int num)11107 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11108 {
11109 int max_irq = bnxt_get_max_func_irqs(bp);
11110 int total_req = bp->cp_nr_rings + num;
11111
11112 if (max_irq < total_req) {
11113 num = max_irq - bp->cp_nr_rings;
11114 if (num <= 0)
11115 return 0;
11116 }
11117 return num;
11118 }
11119
bnxt_get_num_msix(struct bnxt * bp)11120 static int bnxt_get_num_msix(struct bnxt *bp)
11121 {
11122 if (!BNXT_NEW_RM(bp))
11123 return bnxt_get_max_func_irqs(bp);
11124
11125 return bnxt_nq_rings_in_use(bp);
11126 }
11127
bnxt_init_int_mode(struct bnxt * bp)11128 static int bnxt_init_int_mode(struct bnxt *bp)
11129 {
11130 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11131
11132 total_vecs = bnxt_get_num_msix(bp);
11133 max = bnxt_get_max_func_irqs(bp);
11134 if (total_vecs > max)
11135 total_vecs = max;
11136
11137 if (!total_vecs)
11138 return 0;
11139
11140 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11141 min = 2;
11142
11143 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11144 PCI_IRQ_MSIX);
11145 ulp_msix = bnxt_get_ulp_msix_num(bp);
11146 if (total_vecs < 0 || total_vecs < ulp_msix) {
11147 rc = -ENODEV;
11148 goto msix_setup_exit;
11149 }
11150
11151 tbl_size = total_vecs;
11152 if (pci_msix_can_alloc_dyn(bp->pdev))
11153 tbl_size = max;
11154 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11155 if (bp->irq_tbl) {
11156 for (i = 0; i < total_vecs; i++)
11157 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11158
11159 bp->total_irqs = total_vecs;
11160 /* Trim rings based upon num of vectors allocated */
11161 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11162 total_vecs - ulp_msix, min == 1);
11163 if (rc)
11164 goto msix_setup_exit;
11165
11166 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11167 bp->cp_nr_rings = (min == 1) ?
11168 max_t(int, tx_cp, bp->rx_nr_rings) :
11169 tx_cp + bp->rx_nr_rings;
11170
11171 } else {
11172 rc = -ENOMEM;
11173 goto msix_setup_exit;
11174 }
11175 return 0;
11176
11177 msix_setup_exit:
11178 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11179 kfree(bp->irq_tbl);
11180 bp->irq_tbl = NULL;
11181 pci_free_irq_vectors(bp->pdev);
11182 return rc;
11183 }
11184
bnxt_clear_int_mode(struct bnxt * bp)11185 static void bnxt_clear_int_mode(struct bnxt *bp)
11186 {
11187 pci_free_irq_vectors(bp->pdev);
11188
11189 kfree(bp->irq_tbl);
11190 bp->irq_tbl = NULL;
11191 }
11192
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11193 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11194 {
11195 bool irq_cleared = false;
11196 bool irq_change = false;
11197 int tcs = bp->num_tc;
11198 int irqs_required;
11199 int rc;
11200
11201 if (!bnxt_need_reserve_rings(bp))
11202 return 0;
11203
11204 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11205 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11206
11207 if (ulp_msix > bp->ulp_num_msix_want)
11208 ulp_msix = bp->ulp_num_msix_want;
11209 irqs_required = ulp_msix + bp->cp_nr_rings;
11210 } else {
11211 irqs_required = bnxt_get_num_msix(bp);
11212 }
11213
11214 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11215 irq_change = true;
11216 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11217 bnxt_ulp_irq_stop(bp);
11218 bnxt_clear_int_mode(bp);
11219 irq_cleared = true;
11220 }
11221 }
11222 rc = __bnxt_reserve_rings(bp);
11223 if (irq_cleared) {
11224 if (!rc)
11225 rc = bnxt_init_int_mode(bp);
11226 bnxt_ulp_irq_restart(bp, rc);
11227 } else if (irq_change && !rc) {
11228 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11229 rc = -ENOSPC;
11230 }
11231 if (rc) {
11232 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11233 return rc;
11234 }
11235 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11236 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11237 netdev_err(bp->dev, "tx ring reservation failure\n");
11238 netdev_reset_tc(bp->dev);
11239 bp->num_tc = 0;
11240 if (bp->tx_nr_rings_xdp)
11241 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11242 else
11243 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11244 return -ENOMEM;
11245 }
11246 return 0;
11247 }
11248
bnxt_free_irq(struct bnxt * bp)11249 static void bnxt_free_irq(struct bnxt *bp)
11250 {
11251 struct bnxt_irq *irq;
11252 int i;
11253
11254 #ifdef CONFIG_RFS_ACCEL
11255 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11256 bp->dev->rx_cpu_rmap = NULL;
11257 #endif
11258 if (!bp->irq_tbl || !bp->bnapi)
11259 return;
11260
11261 for (i = 0; i < bp->cp_nr_rings; i++) {
11262 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11263
11264 irq = &bp->irq_tbl[map_idx];
11265 if (irq->requested) {
11266 if (irq->have_cpumask) {
11267 irq_update_affinity_hint(irq->vector, NULL);
11268 free_cpumask_var(irq->cpu_mask);
11269 irq->have_cpumask = 0;
11270 }
11271 free_irq(irq->vector, bp->bnapi[i]);
11272 }
11273
11274 irq->requested = 0;
11275 }
11276 }
11277
bnxt_request_irq(struct bnxt * bp)11278 static int bnxt_request_irq(struct bnxt *bp)
11279 {
11280 int i, j, rc = 0;
11281 unsigned long flags = 0;
11282 #ifdef CONFIG_RFS_ACCEL
11283 struct cpu_rmap *rmap;
11284 #endif
11285
11286 rc = bnxt_setup_int_mode(bp);
11287 if (rc) {
11288 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11289 rc);
11290 return rc;
11291 }
11292 #ifdef CONFIG_RFS_ACCEL
11293 rmap = bp->dev->rx_cpu_rmap;
11294 #endif
11295 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11296 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11297 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11298
11299 #ifdef CONFIG_RFS_ACCEL
11300 if (rmap && bp->bnapi[i]->rx_ring) {
11301 rc = irq_cpu_rmap_add(rmap, irq->vector);
11302 if (rc)
11303 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11304 j);
11305 j++;
11306 }
11307 #endif
11308 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11309 bp->bnapi[i]);
11310 if (rc)
11311 break;
11312
11313 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
11314 irq->requested = 1;
11315
11316 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11317 int numa_node = dev_to_node(&bp->pdev->dev);
11318
11319 irq->have_cpumask = 1;
11320 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11321 irq->cpu_mask);
11322 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11323 if (rc) {
11324 netdev_warn(bp->dev,
11325 "Update affinity hint failed, IRQ = %d\n",
11326 irq->vector);
11327 break;
11328 }
11329 }
11330 }
11331 return rc;
11332 }
11333
bnxt_del_napi(struct bnxt * bp)11334 static void bnxt_del_napi(struct bnxt *bp)
11335 {
11336 int i;
11337
11338 if (!bp->bnapi)
11339 return;
11340
11341 for (i = 0; i < bp->rx_nr_rings; i++)
11342 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11343 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11344 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11345
11346 for (i = 0; i < bp->cp_nr_rings; i++) {
11347 struct bnxt_napi *bnapi = bp->bnapi[i];
11348
11349 __netif_napi_del(&bnapi->napi);
11350 }
11351 /* We called __netif_napi_del(), we need
11352 * to respect an RCU grace period before freeing napi structures.
11353 */
11354 synchronize_net();
11355 }
11356
bnxt_init_napi(struct bnxt * bp)11357 static void bnxt_init_napi(struct bnxt *bp)
11358 {
11359 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11360 unsigned int cp_nr_rings = bp->cp_nr_rings;
11361 struct bnxt_napi *bnapi;
11362 int i;
11363
11364 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11365 poll_fn = bnxt_poll_p5;
11366 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11367 cp_nr_rings--;
11368 for (i = 0; i < cp_nr_rings; i++) {
11369 bnapi = bp->bnapi[i];
11370 netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
11371 bnapi->index);
11372 }
11373 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11374 bnapi = bp->bnapi[cp_nr_rings];
11375 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11376 }
11377 }
11378
bnxt_disable_napi(struct bnxt * bp)11379 static void bnxt_disable_napi(struct bnxt *bp)
11380 {
11381 int i;
11382
11383 if (!bp->bnapi ||
11384 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11385 return;
11386
11387 for (i = 0; i < bp->cp_nr_rings; i++) {
11388 struct bnxt_napi *bnapi = bp->bnapi[i];
11389 struct bnxt_cp_ring_info *cpr;
11390
11391 cpr = &bnapi->cp_ring;
11392 if (bnapi->tx_fault)
11393 cpr->sw_stats->tx.tx_resets++;
11394 if (bnapi->in_reset)
11395 cpr->sw_stats->rx.rx_resets++;
11396 napi_disable(&bnapi->napi);
11397 }
11398 }
11399
bnxt_enable_napi(struct bnxt * bp)11400 static void bnxt_enable_napi(struct bnxt *bp)
11401 {
11402 int i;
11403
11404 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11405 for (i = 0; i < bp->cp_nr_rings; i++) {
11406 struct bnxt_napi *bnapi = bp->bnapi[i];
11407 struct bnxt_cp_ring_info *cpr;
11408
11409 bnapi->tx_fault = 0;
11410
11411 cpr = &bnapi->cp_ring;
11412 bnapi->in_reset = false;
11413
11414 if (bnapi->rx_ring) {
11415 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11416 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11417 }
11418 napi_enable(&bnapi->napi);
11419 }
11420 }
11421
bnxt_tx_disable(struct bnxt * bp)11422 void bnxt_tx_disable(struct bnxt *bp)
11423 {
11424 int i;
11425 struct bnxt_tx_ring_info *txr;
11426
11427 if (bp->tx_ring) {
11428 for (i = 0; i < bp->tx_nr_rings; i++) {
11429 txr = &bp->tx_ring[i];
11430 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11431 }
11432 }
11433 /* Make sure napi polls see @dev_state change */
11434 synchronize_net();
11435 /* Drop carrier first to prevent TX timeout */
11436 netif_carrier_off(bp->dev);
11437 /* Stop all TX queues */
11438 netif_tx_disable(bp->dev);
11439 }
11440
bnxt_tx_enable(struct bnxt * bp)11441 void bnxt_tx_enable(struct bnxt *bp)
11442 {
11443 int i;
11444 struct bnxt_tx_ring_info *txr;
11445
11446 for (i = 0; i < bp->tx_nr_rings; i++) {
11447 txr = &bp->tx_ring[i];
11448 WRITE_ONCE(txr->dev_state, 0);
11449 }
11450 /* Make sure napi polls see @dev_state change */
11451 synchronize_net();
11452 netif_tx_wake_all_queues(bp->dev);
11453 if (BNXT_LINK_IS_UP(bp))
11454 netif_carrier_on(bp->dev);
11455 }
11456
bnxt_report_fec(struct bnxt_link_info * link_info)11457 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11458 {
11459 u8 active_fec = link_info->active_fec_sig_mode &
11460 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11461
11462 switch (active_fec) {
11463 default:
11464 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11465 return "None";
11466 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11467 return "Clause 74 BaseR";
11468 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11469 return "Clause 91 RS(528,514)";
11470 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11471 return "Clause 91 RS544_1XN";
11472 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11473 return "Clause 91 RS(544,514)";
11474 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11475 return "Clause 91 RS272_1XN";
11476 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11477 return "Clause 91 RS(272,257)";
11478 }
11479 }
11480
bnxt_report_link(struct bnxt * bp)11481 void bnxt_report_link(struct bnxt *bp)
11482 {
11483 if (BNXT_LINK_IS_UP(bp)) {
11484 const char *signal = "";
11485 const char *flow_ctrl;
11486 const char *duplex;
11487 u32 speed;
11488 u16 fec;
11489
11490 netif_carrier_on(bp->dev);
11491 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11492 if (speed == SPEED_UNKNOWN) {
11493 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11494 return;
11495 }
11496 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11497 duplex = "full";
11498 else
11499 duplex = "half";
11500 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11501 flow_ctrl = "ON - receive & transmit";
11502 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11503 flow_ctrl = "ON - transmit";
11504 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11505 flow_ctrl = "ON - receive";
11506 else
11507 flow_ctrl = "none";
11508 if (bp->link_info.phy_qcfg_resp.option_flags &
11509 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11510 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11511 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11512 switch (sig_mode) {
11513 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11514 signal = "(NRZ) ";
11515 break;
11516 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11517 signal = "(PAM4 56Gbps) ";
11518 break;
11519 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11520 signal = "(PAM4 112Gbps) ";
11521 break;
11522 default:
11523 break;
11524 }
11525 }
11526 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11527 speed, signal, duplex, flow_ctrl);
11528 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11529 netdev_info(bp->dev, "EEE is %s\n",
11530 bp->eee.eee_active ? "active" :
11531 "not active");
11532 fec = bp->link_info.fec_cfg;
11533 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11534 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11535 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11536 bnxt_report_fec(&bp->link_info));
11537 } else {
11538 netif_carrier_off(bp->dev);
11539 netdev_err(bp->dev, "NIC Link is Down\n");
11540 }
11541 }
11542
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11543 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11544 {
11545 if (!resp->supported_speeds_auto_mode &&
11546 !resp->supported_speeds_force_mode &&
11547 !resp->supported_pam4_speeds_auto_mode &&
11548 !resp->supported_pam4_speeds_force_mode &&
11549 !resp->supported_speeds2_auto_mode &&
11550 !resp->supported_speeds2_force_mode)
11551 return true;
11552 return false;
11553 }
11554
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11555 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11556 {
11557 struct bnxt_link_info *link_info = &bp->link_info;
11558 struct hwrm_port_phy_qcaps_output *resp;
11559 struct hwrm_port_phy_qcaps_input *req;
11560 int rc = 0;
11561
11562 if (bp->hwrm_spec_code < 0x10201)
11563 return 0;
11564
11565 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11566 if (rc)
11567 return rc;
11568
11569 resp = hwrm_req_hold(bp, req);
11570 rc = hwrm_req_send(bp, req);
11571 if (rc)
11572 goto hwrm_phy_qcaps_exit;
11573
11574 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11575 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11576 struct ethtool_keee *eee = &bp->eee;
11577 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11578
11579 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11580 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11581 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11582 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11583 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11584 }
11585
11586 if (bp->hwrm_spec_code >= 0x10a01) {
11587 if (bnxt_phy_qcaps_no_speed(resp)) {
11588 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11589 netdev_warn(bp->dev, "Ethernet link disabled\n");
11590 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11591 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11592 netdev_info(bp->dev, "Ethernet link enabled\n");
11593 /* Phy re-enabled, reprobe the speeds */
11594 link_info->support_auto_speeds = 0;
11595 link_info->support_pam4_auto_speeds = 0;
11596 link_info->support_auto_speeds2 = 0;
11597 }
11598 }
11599 if (resp->supported_speeds_auto_mode)
11600 link_info->support_auto_speeds =
11601 le16_to_cpu(resp->supported_speeds_auto_mode);
11602 if (resp->supported_pam4_speeds_auto_mode)
11603 link_info->support_pam4_auto_speeds =
11604 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
11605 if (resp->supported_speeds2_auto_mode)
11606 link_info->support_auto_speeds2 =
11607 le16_to_cpu(resp->supported_speeds2_auto_mode);
11608
11609 bp->port_count = resp->port_cnt;
11610
11611 hwrm_phy_qcaps_exit:
11612 hwrm_req_drop(bp, req);
11613 return rc;
11614 }
11615
bnxt_hwrm_mac_qcaps(struct bnxt * bp)11616 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
11617 {
11618 struct hwrm_port_mac_qcaps_output *resp;
11619 struct hwrm_port_mac_qcaps_input *req;
11620 int rc;
11621
11622 if (bp->hwrm_spec_code < 0x10a03)
11623 return;
11624
11625 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
11626 if (rc)
11627 return;
11628
11629 resp = hwrm_req_hold(bp, req);
11630 rc = hwrm_req_send_silent(bp, req);
11631 if (!rc)
11632 bp->mac_flags = resp->flags;
11633 hwrm_req_drop(bp, req);
11634 }
11635
bnxt_support_dropped(u16 advertising,u16 supported)11636 static bool bnxt_support_dropped(u16 advertising, u16 supported)
11637 {
11638 u16 diff = advertising ^ supported;
11639
11640 return ((supported | diff) != supported);
11641 }
11642
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)11643 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
11644 {
11645 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
11646
11647 /* Check if any advertised speeds are no longer supported. The caller
11648 * holds the link_lock mutex, so we can modify link_info settings.
11649 */
11650 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11651 if (bnxt_support_dropped(link_info->advertising,
11652 link_info->support_auto_speeds2)) {
11653 link_info->advertising = link_info->support_auto_speeds2;
11654 return true;
11655 }
11656 return false;
11657 }
11658 if (bnxt_support_dropped(link_info->advertising,
11659 link_info->support_auto_speeds)) {
11660 link_info->advertising = link_info->support_auto_speeds;
11661 return true;
11662 }
11663 if (bnxt_support_dropped(link_info->advertising_pam4,
11664 link_info->support_pam4_auto_speeds)) {
11665 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
11666 return true;
11667 }
11668 return false;
11669 }
11670
bnxt_update_link(struct bnxt * bp,bool chng_link_state)11671 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
11672 {
11673 struct bnxt_link_info *link_info = &bp->link_info;
11674 struct hwrm_port_phy_qcfg_output *resp;
11675 struct hwrm_port_phy_qcfg_input *req;
11676 u8 link_state = link_info->link_state;
11677 bool support_changed;
11678 int rc;
11679
11680 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
11681 if (rc)
11682 return rc;
11683
11684 resp = hwrm_req_hold(bp, req);
11685 rc = hwrm_req_send(bp, req);
11686 if (rc) {
11687 hwrm_req_drop(bp, req);
11688 if (BNXT_VF(bp) && rc == -ENODEV) {
11689 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
11690 rc = 0;
11691 }
11692 return rc;
11693 }
11694
11695 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
11696 link_info->phy_link_status = resp->link;
11697 link_info->duplex = resp->duplex_cfg;
11698 if (bp->hwrm_spec_code >= 0x10800)
11699 link_info->duplex = resp->duplex_state;
11700 link_info->pause = resp->pause;
11701 link_info->auto_mode = resp->auto_mode;
11702 link_info->auto_pause_setting = resp->auto_pause;
11703 link_info->lp_pause = resp->link_partner_adv_pause;
11704 link_info->force_pause_setting = resp->force_pause;
11705 link_info->duplex_setting = resp->duplex_cfg;
11706 if (link_info->phy_link_status == BNXT_LINK_LINK) {
11707 link_info->link_speed = le16_to_cpu(resp->link_speed);
11708 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
11709 link_info->active_lanes = resp->active_lanes;
11710 } else {
11711 link_info->link_speed = 0;
11712 link_info->active_lanes = 0;
11713 }
11714 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
11715 link_info->force_pam4_link_speed =
11716 le16_to_cpu(resp->force_pam4_link_speed);
11717 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
11718 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
11719 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
11720 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
11721 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
11722 link_info->auto_pam4_link_speeds =
11723 le16_to_cpu(resp->auto_pam4_link_speed_mask);
11724 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
11725 link_info->lp_auto_link_speeds =
11726 le16_to_cpu(resp->link_partner_adv_speeds);
11727 link_info->lp_auto_pam4_link_speeds =
11728 resp->link_partner_pam4_adv_speeds;
11729 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
11730 link_info->phy_ver[0] = resp->phy_maj;
11731 link_info->phy_ver[1] = resp->phy_min;
11732 link_info->phy_ver[2] = resp->phy_bld;
11733 link_info->media_type = resp->media_type;
11734 link_info->phy_type = resp->phy_type;
11735 link_info->transceiver = resp->xcvr_pkg_type;
11736 link_info->phy_addr = resp->eee_config_phy_addr &
11737 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
11738 link_info->module_status = resp->module_status;
11739
11740 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
11741 struct ethtool_keee *eee = &bp->eee;
11742 u16 fw_speeds;
11743
11744 eee->eee_active = 0;
11745 if (resp->eee_config_phy_addr &
11746 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
11747 eee->eee_active = 1;
11748 fw_speeds = le16_to_cpu(
11749 resp->link_partner_adv_eee_link_speed_mask);
11750 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
11751 }
11752
11753 /* Pull initial EEE config */
11754 if (!chng_link_state) {
11755 if (resp->eee_config_phy_addr &
11756 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
11757 eee->eee_enabled = 1;
11758
11759 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
11760 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
11761
11762 if (resp->eee_config_phy_addr &
11763 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
11764 __le32 tmr;
11765
11766 eee->tx_lpi_enabled = 1;
11767 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
11768 eee->tx_lpi_timer = le32_to_cpu(tmr) &
11769 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
11770 }
11771 }
11772 }
11773
11774 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
11775 if (bp->hwrm_spec_code >= 0x10504) {
11776 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
11777 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11778 }
11779 /* TODO: need to add more logic to report VF link */
11780 if (chng_link_state) {
11781 if (link_info->phy_link_status == BNXT_LINK_LINK)
11782 link_info->link_state = BNXT_LINK_STATE_UP;
11783 else
11784 link_info->link_state = BNXT_LINK_STATE_DOWN;
11785 if (link_state != link_info->link_state)
11786 bnxt_report_link(bp);
11787 } else {
11788 /* always link down if not require to update link state */
11789 link_info->link_state = BNXT_LINK_STATE_DOWN;
11790 }
11791 hwrm_req_drop(bp, req);
11792
11793 if (!BNXT_PHY_CFG_ABLE(bp))
11794 return 0;
11795
11796 support_changed = bnxt_support_speed_dropped(link_info);
11797 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11798 bnxt_hwrm_set_link_setting(bp, true, false);
11799 return 0;
11800 }
11801
bnxt_get_port_module_status(struct bnxt * bp)11802 static void bnxt_get_port_module_status(struct bnxt *bp)
11803 {
11804 struct bnxt_link_info *link_info = &bp->link_info;
11805 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11806 u8 module_status;
11807
11808 if (bnxt_update_link(bp, true))
11809 return;
11810
11811 module_status = link_info->module_status;
11812 switch (module_status) {
11813 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11814 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11815 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11816 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11817 bp->pf.port_id);
11818 if (bp->hwrm_spec_code >= 0x10201) {
11819 netdev_warn(bp->dev, "Module part number %s\n",
11820 resp->phy_vendor_partnumber);
11821 }
11822 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11823 netdev_warn(bp->dev, "TX is disabled\n");
11824 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11825 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11826 }
11827 }
11828
11829 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11830 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11831 {
11832 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11833 if (bp->hwrm_spec_code >= 0x10201)
11834 req->auto_pause =
11835 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11836 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11837 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11838 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11839 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11840 req->enables |=
11841 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11842 } else {
11843 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11844 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11845 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11846 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11847 req->enables |=
11848 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11849 if (bp->hwrm_spec_code >= 0x10201) {
11850 req->auto_pause = req->force_pause;
11851 req->enables |= cpu_to_le32(
11852 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11853 }
11854 }
11855 }
11856
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11857 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11858 {
11859 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11860 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11861 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11862 req->enables |=
11863 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11864 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11865 } else if (bp->link_info.advertising) {
11866 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11867 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11868 }
11869 if (bp->link_info.advertising_pam4) {
11870 req->enables |=
11871 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11872 req->auto_link_pam4_speed_mask =
11873 cpu_to_le16(bp->link_info.advertising_pam4);
11874 }
11875 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11876 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11877 } else {
11878 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11879 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11880 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11881 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11882 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11883 (u32)bp->link_info.req_link_speed);
11884 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11885 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11886 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11887 } else {
11888 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11889 }
11890 }
11891
11892 /* tell chimp that the setting takes effect immediately */
11893 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11894 }
11895
bnxt_hwrm_set_pause(struct bnxt * bp)11896 int bnxt_hwrm_set_pause(struct bnxt *bp)
11897 {
11898 struct hwrm_port_phy_cfg_input *req;
11899 int rc;
11900
11901 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11902 if (rc)
11903 return rc;
11904
11905 bnxt_hwrm_set_pause_common(bp, req);
11906
11907 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11908 bp->link_info.force_link_chng)
11909 bnxt_hwrm_set_link_common(bp, req);
11910
11911 rc = hwrm_req_send(bp, req);
11912 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11913 /* since changing of pause setting doesn't trigger any link
11914 * change event, the driver needs to update the current pause
11915 * result upon successfully return of the phy_cfg command
11916 */
11917 bp->link_info.pause =
11918 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11919 bp->link_info.auto_pause_setting = 0;
11920 if (!bp->link_info.force_link_chng)
11921 bnxt_report_link(bp);
11922 }
11923 bp->link_info.force_link_chng = false;
11924 return rc;
11925 }
11926
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)11927 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11928 struct hwrm_port_phy_cfg_input *req)
11929 {
11930 struct ethtool_keee *eee = &bp->eee;
11931
11932 if (eee->eee_enabled) {
11933 u16 eee_speeds;
11934 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11935
11936 if (eee->tx_lpi_enabled)
11937 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11938 else
11939 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11940
11941 req->flags |= cpu_to_le32(flags);
11942 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11943 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11944 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11945 } else {
11946 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11947 }
11948 }
11949
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)11950 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11951 {
11952 struct hwrm_port_phy_cfg_input *req;
11953 int rc;
11954
11955 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11956 if (rc)
11957 return rc;
11958
11959 if (set_pause)
11960 bnxt_hwrm_set_pause_common(bp, req);
11961
11962 bnxt_hwrm_set_link_common(bp, req);
11963
11964 if (set_eee)
11965 bnxt_hwrm_set_eee(bp, req);
11966 return hwrm_req_send(bp, req);
11967 }
11968
bnxt_hwrm_shutdown_link(struct bnxt * bp)11969 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11970 {
11971 struct hwrm_port_phy_cfg_input *req;
11972 int rc;
11973
11974 if (!BNXT_SINGLE_PF(bp))
11975 return 0;
11976
11977 if (pci_num_vf(bp->pdev) &&
11978 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11979 return 0;
11980
11981 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11982 if (rc)
11983 return rc;
11984
11985 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11986 rc = hwrm_req_send(bp, req);
11987 if (!rc) {
11988 mutex_lock(&bp->link_lock);
11989 /* Device is not obliged link down in certain scenarios, even
11990 * when forced. Setting the state unknown is consistent with
11991 * driver startup and will force link state to be reported
11992 * during subsequent open based on PORT_PHY_QCFG.
11993 */
11994 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11995 mutex_unlock(&bp->link_lock);
11996 }
11997 return rc;
11998 }
11999
bnxt_fw_reset_via_optee(struct bnxt * bp)12000 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12001 {
12002 #ifdef CONFIG_TEE_BNXT_FW
12003 int rc = tee_bnxt_fw_load();
12004
12005 if (rc)
12006 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12007
12008 return rc;
12009 #else
12010 netdev_err(bp->dev, "OP-TEE not supported\n");
12011 return -ENODEV;
12012 #endif
12013 }
12014
bnxt_try_recover_fw(struct bnxt * bp)12015 static int bnxt_try_recover_fw(struct bnxt *bp)
12016 {
12017 if (bp->fw_health && bp->fw_health->status_reliable) {
12018 int retry = 0, rc;
12019 u32 sts;
12020
12021 do {
12022 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12023 rc = bnxt_hwrm_poll(bp);
12024 if (!BNXT_FW_IS_BOOTING(sts) &&
12025 !BNXT_FW_IS_RECOVERING(sts))
12026 break;
12027 retry++;
12028 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12029
12030 if (!BNXT_FW_IS_HEALTHY(sts)) {
12031 netdev_err(bp->dev,
12032 "Firmware not responding, status: 0x%x\n",
12033 sts);
12034 rc = -ENODEV;
12035 }
12036 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12037 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12038 return bnxt_fw_reset_via_optee(bp);
12039 }
12040 return rc;
12041 }
12042
12043 return -ENODEV;
12044 }
12045
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12046 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12047 {
12048 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12049
12050 if (!BNXT_NEW_RM(bp))
12051 return; /* no resource reservations required */
12052
12053 hw_resc->resv_cp_rings = 0;
12054 hw_resc->resv_stat_ctxs = 0;
12055 hw_resc->resv_irqs = 0;
12056 hw_resc->resv_tx_rings = 0;
12057 hw_resc->resv_rx_rings = 0;
12058 hw_resc->resv_hw_ring_grps = 0;
12059 hw_resc->resv_vnics = 0;
12060 hw_resc->resv_rsscos_ctxs = 0;
12061 if (!fw_reset) {
12062 bp->tx_nr_rings = 0;
12063 bp->rx_nr_rings = 0;
12064 }
12065 }
12066
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12067 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12068 {
12069 int rc;
12070
12071 if (!BNXT_NEW_RM(bp))
12072 return 0; /* no resource reservations required */
12073
12074 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12075 if (rc)
12076 netdev_err(bp->dev, "resc_qcaps failed\n");
12077
12078 bnxt_clear_reservations(bp, fw_reset);
12079
12080 return rc;
12081 }
12082
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12083 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12084 {
12085 struct hwrm_func_drv_if_change_output *resp;
12086 struct hwrm_func_drv_if_change_input *req;
12087 bool fw_reset = !bp->irq_tbl;
12088 bool resc_reinit = false;
12089 int rc, retry = 0;
12090 u32 flags = 0;
12091
12092 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12093 return 0;
12094
12095 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12096 if (rc)
12097 return rc;
12098
12099 if (up)
12100 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12101 resp = hwrm_req_hold(bp, req);
12102
12103 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12104 while (retry < BNXT_FW_IF_RETRY) {
12105 rc = hwrm_req_send(bp, req);
12106 if (rc != -EAGAIN)
12107 break;
12108
12109 msleep(50);
12110 retry++;
12111 }
12112
12113 if (rc == -EAGAIN) {
12114 hwrm_req_drop(bp, req);
12115 return rc;
12116 } else if (!rc) {
12117 flags = le32_to_cpu(resp->flags);
12118 } else if (up) {
12119 rc = bnxt_try_recover_fw(bp);
12120 fw_reset = true;
12121 }
12122 hwrm_req_drop(bp, req);
12123 if (rc)
12124 return rc;
12125
12126 if (!up) {
12127 bnxt_inv_fw_health_reg(bp);
12128 return 0;
12129 }
12130
12131 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12132 resc_reinit = true;
12133 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12134 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12135 fw_reset = true;
12136 else
12137 bnxt_remap_fw_health_regs(bp);
12138
12139 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12140 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12141 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12142 return -ENODEV;
12143 }
12144 if (resc_reinit || fw_reset) {
12145 if (fw_reset) {
12146 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12147 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12148 bnxt_ulp_irq_stop(bp);
12149 bnxt_free_ctx_mem(bp, false);
12150 bnxt_dcb_free(bp);
12151 rc = bnxt_fw_init_one(bp);
12152 if (rc) {
12153 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12154 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12155 return rc;
12156 }
12157 bnxt_clear_int_mode(bp);
12158 rc = bnxt_init_int_mode(bp);
12159 if (rc) {
12160 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12161 netdev_err(bp->dev, "init int mode failed\n");
12162 return rc;
12163 }
12164 }
12165 rc = bnxt_cancel_reservations(bp, fw_reset);
12166 }
12167 return rc;
12168 }
12169
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12170 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12171 {
12172 struct hwrm_port_led_qcaps_output *resp;
12173 struct hwrm_port_led_qcaps_input *req;
12174 struct bnxt_pf_info *pf = &bp->pf;
12175 int rc;
12176
12177 bp->num_leds = 0;
12178 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12179 return 0;
12180
12181 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12182 if (rc)
12183 return rc;
12184
12185 req->port_id = cpu_to_le16(pf->port_id);
12186 resp = hwrm_req_hold(bp, req);
12187 rc = hwrm_req_send(bp, req);
12188 if (rc) {
12189 hwrm_req_drop(bp, req);
12190 return rc;
12191 }
12192 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12193 int i;
12194
12195 bp->num_leds = resp->num_leds;
12196 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12197 bp->num_leds);
12198 for (i = 0; i < bp->num_leds; i++) {
12199 struct bnxt_led_info *led = &bp->leds[i];
12200 __le16 caps = led->led_state_caps;
12201
12202 if (!led->led_group_id ||
12203 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12204 bp->num_leds = 0;
12205 break;
12206 }
12207 }
12208 }
12209 hwrm_req_drop(bp, req);
12210 return 0;
12211 }
12212
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12213 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12214 {
12215 struct hwrm_wol_filter_alloc_output *resp;
12216 struct hwrm_wol_filter_alloc_input *req;
12217 int rc;
12218
12219 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12220 if (rc)
12221 return rc;
12222
12223 req->port_id = cpu_to_le16(bp->pf.port_id);
12224 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12225 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12226 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12227
12228 resp = hwrm_req_hold(bp, req);
12229 rc = hwrm_req_send(bp, req);
12230 if (!rc)
12231 bp->wol_filter_id = resp->wol_filter_id;
12232 hwrm_req_drop(bp, req);
12233 return rc;
12234 }
12235
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12236 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12237 {
12238 struct hwrm_wol_filter_free_input *req;
12239 int rc;
12240
12241 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12242 if (rc)
12243 return rc;
12244
12245 req->port_id = cpu_to_le16(bp->pf.port_id);
12246 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12247 req->wol_filter_id = bp->wol_filter_id;
12248
12249 return hwrm_req_send(bp, req);
12250 }
12251
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12252 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12253 {
12254 struct hwrm_wol_filter_qcfg_output *resp;
12255 struct hwrm_wol_filter_qcfg_input *req;
12256 u16 next_handle = 0;
12257 int rc;
12258
12259 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12260 if (rc)
12261 return rc;
12262
12263 req->port_id = cpu_to_le16(bp->pf.port_id);
12264 req->handle = cpu_to_le16(handle);
12265 resp = hwrm_req_hold(bp, req);
12266 rc = hwrm_req_send(bp, req);
12267 if (!rc) {
12268 next_handle = le16_to_cpu(resp->next_handle);
12269 if (next_handle != 0) {
12270 if (resp->wol_type ==
12271 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12272 bp->wol = 1;
12273 bp->wol_filter_id = resp->wol_filter_id;
12274 }
12275 }
12276 }
12277 hwrm_req_drop(bp, req);
12278 return next_handle;
12279 }
12280
bnxt_get_wol_settings(struct bnxt * bp)12281 static void bnxt_get_wol_settings(struct bnxt *bp)
12282 {
12283 u16 handle = 0;
12284
12285 bp->wol = 0;
12286 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12287 return;
12288
12289 do {
12290 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12291 } while (handle && handle != 0xffff);
12292 }
12293
bnxt_eee_config_ok(struct bnxt * bp)12294 static bool bnxt_eee_config_ok(struct bnxt *bp)
12295 {
12296 struct ethtool_keee *eee = &bp->eee;
12297 struct bnxt_link_info *link_info = &bp->link_info;
12298
12299 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12300 return true;
12301
12302 if (eee->eee_enabled) {
12303 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12304 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12305
12306 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12307
12308 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12309 eee->eee_enabled = 0;
12310 return false;
12311 }
12312 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12313 linkmode_and(eee->advertised, advertising,
12314 eee->supported);
12315 return false;
12316 }
12317 }
12318 return true;
12319 }
12320
bnxt_update_phy_setting(struct bnxt * bp)12321 static int bnxt_update_phy_setting(struct bnxt *bp)
12322 {
12323 int rc;
12324 bool update_link = false;
12325 bool update_pause = false;
12326 bool update_eee = false;
12327 struct bnxt_link_info *link_info = &bp->link_info;
12328
12329 rc = bnxt_update_link(bp, true);
12330 if (rc) {
12331 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12332 rc);
12333 return rc;
12334 }
12335 if (!BNXT_SINGLE_PF(bp))
12336 return 0;
12337
12338 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12339 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12340 link_info->req_flow_ctrl)
12341 update_pause = true;
12342 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12343 link_info->force_pause_setting != link_info->req_flow_ctrl)
12344 update_pause = true;
12345 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12346 if (BNXT_AUTO_MODE(link_info->auto_mode))
12347 update_link = true;
12348 if (bnxt_force_speed_updated(link_info))
12349 update_link = true;
12350 if (link_info->req_duplex != link_info->duplex_setting)
12351 update_link = true;
12352 } else {
12353 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12354 update_link = true;
12355 if (bnxt_auto_speed_updated(link_info))
12356 update_link = true;
12357 }
12358
12359 /* The last close may have shutdown the link, so need to call
12360 * PHY_CFG to bring it back up.
12361 */
12362 if (!BNXT_LINK_IS_UP(bp))
12363 update_link = true;
12364
12365 if (!bnxt_eee_config_ok(bp))
12366 update_eee = true;
12367
12368 if (update_link)
12369 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12370 else if (update_pause)
12371 rc = bnxt_hwrm_set_pause(bp);
12372 if (rc) {
12373 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12374 rc);
12375 return rc;
12376 }
12377
12378 return rc;
12379 }
12380
12381 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12382
bnxt_reinit_after_abort(struct bnxt * bp)12383 static int bnxt_reinit_after_abort(struct bnxt *bp)
12384 {
12385 int rc;
12386
12387 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12388 return -EBUSY;
12389
12390 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12391 return -ENODEV;
12392
12393 rc = bnxt_fw_init_one(bp);
12394 if (!rc) {
12395 bnxt_clear_int_mode(bp);
12396 rc = bnxt_init_int_mode(bp);
12397 if (!rc) {
12398 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12399 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12400 }
12401 }
12402 return rc;
12403 }
12404
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12405 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12406 {
12407 struct bnxt_ntuple_filter *ntp_fltr;
12408 struct bnxt_l2_filter *l2_fltr;
12409
12410 if (list_empty(&fltr->list))
12411 return;
12412
12413 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12414 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12415 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12416 atomic_inc(&l2_fltr->refcnt);
12417 ntp_fltr->l2_fltr = l2_fltr;
12418 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12419 bnxt_del_ntp_filter(bp, ntp_fltr);
12420 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12421 fltr->sw_id);
12422 }
12423 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12424 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12425 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12426 bnxt_del_l2_filter(bp, l2_fltr);
12427 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12428 fltr->sw_id);
12429 }
12430 }
12431 }
12432
bnxt_cfg_usr_fltrs(struct bnxt * bp)12433 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12434 {
12435 struct bnxt_filter_base *usr_fltr, *tmp;
12436
12437 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12438 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12439 }
12440
bnxt_set_xps_mapping(struct bnxt * bp)12441 static int bnxt_set_xps_mapping(struct bnxt *bp)
12442 {
12443 int numa_node = dev_to_node(&bp->pdev->dev);
12444 unsigned int q_idx, map_idx, cpu, i;
12445 const struct cpumask *cpu_mask_ptr;
12446 int nr_cpus = num_online_cpus();
12447 cpumask_t *q_map;
12448 int rc = 0;
12449
12450 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12451 if (!q_map)
12452 return -ENOMEM;
12453
12454 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12455 * Each TC has the same number of TX queues. The nth TX queue for each
12456 * TC will have the same CPU mask.
12457 */
12458 for (i = 0; i < nr_cpus; i++) {
12459 map_idx = i % bp->tx_nr_rings_per_tc;
12460 cpu = cpumask_local_spread(i, numa_node);
12461 cpu_mask_ptr = get_cpu_mask(cpu);
12462 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12463 }
12464
12465 /* Register CPU mask for each TX queue except the ones marked for XDP */
12466 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12467 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12468 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12469 if (rc) {
12470 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12471 q_idx);
12472 break;
12473 }
12474 }
12475
12476 kfree(q_map);
12477
12478 return rc;
12479 }
12480
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12481 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12482 {
12483 int rc = 0;
12484
12485 netif_carrier_off(bp->dev);
12486 if (irq_re_init) {
12487 /* Reserve rings now if none were reserved at driver probe. */
12488 rc = bnxt_init_dflt_ring_mode(bp);
12489 if (rc) {
12490 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12491 return rc;
12492 }
12493 }
12494 rc = bnxt_reserve_rings(bp, irq_re_init);
12495 if (rc)
12496 return rc;
12497
12498 rc = bnxt_alloc_mem(bp, irq_re_init);
12499 if (rc) {
12500 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12501 goto open_err_free_mem;
12502 }
12503
12504 if (irq_re_init) {
12505 bnxt_init_napi(bp);
12506 rc = bnxt_request_irq(bp);
12507 if (rc) {
12508 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12509 goto open_err_irq;
12510 }
12511 }
12512
12513 rc = bnxt_init_nic(bp, irq_re_init);
12514 if (rc) {
12515 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12516 goto open_err_irq;
12517 }
12518
12519 bnxt_enable_napi(bp);
12520 bnxt_debug_dev_init(bp);
12521
12522 if (link_re_init) {
12523 mutex_lock(&bp->link_lock);
12524 rc = bnxt_update_phy_setting(bp);
12525 mutex_unlock(&bp->link_lock);
12526 if (rc) {
12527 netdev_warn(bp->dev, "failed to update phy settings\n");
12528 if (BNXT_SINGLE_PF(bp)) {
12529 bp->link_info.phy_retry = true;
12530 bp->link_info.phy_retry_expires =
12531 jiffies + 5 * HZ;
12532 }
12533 }
12534 }
12535
12536 if (irq_re_init) {
12537 udp_tunnel_nic_reset_ntf(bp->dev);
12538 rc = bnxt_set_xps_mapping(bp);
12539 if (rc)
12540 netdev_warn(bp->dev, "failed to set xps mapping\n");
12541 }
12542
12543 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12544 if (!static_key_enabled(&bnxt_xdp_locking_key))
12545 static_branch_enable(&bnxt_xdp_locking_key);
12546 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12547 static_branch_disable(&bnxt_xdp_locking_key);
12548 }
12549 set_bit(BNXT_STATE_OPEN, &bp->state);
12550 bnxt_enable_int(bp);
12551 /* Enable TX queues */
12552 bnxt_tx_enable(bp);
12553 mod_timer(&bp->timer, jiffies + bp->current_interval);
12554 /* Poll link status and check for SFP+ module status */
12555 mutex_lock(&bp->link_lock);
12556 bnxt_get_port_module_status(bp);
12557 mutex_unlock(&bp->link_lock);
12558
12559 /* VF-reps may need to be re-opened after the PF is re-opened */
12560 if (BNXT_PF(bp))
12561 bnxt_vf_reps_open(bp);
12562 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
12563 WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
12564 bnxt_ptp_init_rtc(bp, true);
12565 bnxt_ptp_cfg_tstamp_filters(bp);
12566 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12567 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12568 bnxt_cfg_usr_fltrs(bp);
12569 return 0;
12570
12571 open_err_irq:
12572 bnxt_del_napi(bp);
12573
12574 open_err_free_mem:
12575 bnxt_free_skbs(bp);
12576 bnxt_free_irq(bp);
12577 bnxt_free_mem(bp, true);
12578 return rc;
12579 }
12580
12581 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12582 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12583 {
12584 int rc = 0;
12585
12586 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
12587 rc = -EIO;
12588 if (!rc)
12589 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
12590 if (rc) {
12591 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
12592 dev_close(bp->dev);
12593 }
12594 return rc;
12595 }
12596
12597 /* rtnl_lock held, open the NIC half way by allocating all resources, but
12598 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
12599 * self tests.
12600 */
bnxt_half_open_nic(struct bnxt * bp)12601 int bnxt_half_open_nic(struct bnxt *bp)
12602 {
12603 int rc = 0;
12604
12605 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12606 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
12607 rc = -ENODEV;
12608 goto half_open_err;
12609 }
12610
12611 rc = bnxt_alloc_mem(bp, true);
12612 if (rc) {
12613 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12614 goto half_open_err;
12615 }
12616 bnxt_init_napi(bp);
12617 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12618 rc = bnxt_init_nic(bp, true);
12619 if (rc) {
12620 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12621 bnxt_del_napi(bp);
12622 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12623 goto half_open_err;
12624 }
12625 return 0;
12626
12627 half_open_err:
12628 bnxt_free_skbs(bp);
12629 bnxt_free_mem(bp, true);
12630 dev_close(bp->dev);
12631 return rc;
12632 }
12633
12634 /* rtnl_lock held, this call can only be made after a previous successful
12635 * call to bnxt_half_open_nic().
12636 */
bnxt_half_close_nic(struct bnxt * bp)12637 void bnxt_half_close_nic(struct bnxt *bp)
12638 {
12639 bnxt_hwrm_resource_free(bp, false, true);
12640 bnxt_del_napi(bp);
12641 bnxt_free_skbs(bp);
12642 bnxt_free_mem(bp, true);
12643 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
12644 }
12645
bnxt_reenable_sriov(struct bnxt * bp)12646 void bnxt_reenable_sriov(struct bnxt *bp)
12647 {
12648 if (BNXT_PF(bp)) {
12649 struct bnxt_pf_info *pf = &bp->pf;
12650 int n = pf->active_vfs;
12651
12652 if (n)
12653 bnxt_cfg_hw_sriov(bp, &n, true);
12654 }
12655 }
12656
bnxt_open(struct net_device * dev)12657 static int bnxt_open(struct net_device *dev)
12658 {
12659 struct bnxt *bp = netdev_priv(dev);
12660 int rc;
12661
12662 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12663 rc = bnxt_reinit_after_abort(bp);
12664 if (rc) {
12665 if (rc == -EBUSY)
12666 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
12667 else
12668 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
12669 return -ENODEV;
12670 }
12671 }
12672
12673 rc = bnxt_hwrm_if_change(bp, true);
12674 if (rc)
12675 return rc;
12676
12677 rc = __bnxt_open_nic(bp, true, true);
12678 if (rc) {
12679 bnxt_hwrm_if_change(bp, false);
12680 } else {
12681 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12682 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12683 bnxt_queue_sp_work(bp,
12684 BNXT_RESTART_ULP_SP_EVENT);
12685 }
12686 }
12687
12688 return rc;
12689 }
12690
bnxt_drv_busy(struct bnxt * bp)12691 static bool bnxt_drv_busy(struct bnxt *bp)
12692 {
12693 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
12694 test_bit(BNXT_STATE_READ_STATS, &bp->state));
12695 }
12696
12697 static void bnxt_get_ring_stats(struct bnxt *bp,
12698 struct rtnl_link_stats64 *stats);
12699
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12700 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
12701 bool link_re_init)
12702 {
12703 /* Close the VF-reps before closing PF */
12704 if (BNXT_PF(bp))
12705 bnxt_vf_reps_close(bp);
12706
12707 /* Change device state to avoid TX queue wake up's */
12708 bnxt_tx_disable(bp);
12709
12710 clear_bit(BNXT_STATE_OPEN, &bp->state);
12711 smp_mb__after_atomic();
12712 while (bnxt_drv_busy(bp))
12713 msleep(20);
12714
12715 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12716 bnxt_clear_rss_ctxs(bp);
12717 /* Flush rings and disable interrupts */
12718 bnxt_shutdown_nic(bp, irq_re_init);
12719
12720 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
12721
12722 bnxt_debug_dev_exit(bp);
12723 bnxt_disable_napi(bp);
12724 del_timer_sync(&bp->timer);
12725 bnxt_free_skbs(bp);
12726
12727 /* Save ring stats before shutdown */
12728 if (bp->bnapi && irq_re_init) {
12729 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
12730 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
12731 }
12732 if (irq_re_init) {
12733 bnxt_free_irq(bp);
12734 bnxt_del_napi(bp);
12735 }
12736 bnxt_free_mem(bp, irq_re_init);
12737 }
12738
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12739 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12740 {
12741 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12742 /* If we get here, it means firmware reset is in progress
12743 * while we are trying to close. We can safely proceed with
12744 * the close because we are holding rtnl_lock(). Some firmware
12745 * messages may fail as we proceed to close. We set the
12746 * ABORT_ERR flag here so that the FW reset thread will later
12747 * abort when it gets the rtnl_lock() and sees the flag.
12748 */
12749 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
12750 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12751 }
12752
12753 #ifdef CONFIG_BNXT_SRIOV
12754 if (bp->sriov_cfg) {
12755 int rc;
12756
12757 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
12758 !bp->sriov_cfg,
12759 BNXT_SRIOV_CFG_WAIT_TMO);
12760 if (!rc)
12761 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
12762 else if (rc < 0)
12763 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
12764 }
12765 #endif
12766 __bnxt_close_nic(bp, irq_re_init, link_re_init);
12767 }
12768
bnxt_close(struct net_device * dev)12769 static int bnxt_close(struct net_device *dev)
12770 {
12771 struct bnxt *bp = netdev_priv(dev);
12772
12773 bnxt_close_nic(bp, true, true);
12774 bnxt_hwrm_shutdown_link(bp);
12775 bnxt_hwrm_if_change(bp, false);
12776 return 0;
12777 }
12778
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)12779 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
12780 u16 *val)
12781 {
12782 struct hwrm_port_phy_mdio_read_output *resp;
12783 struct hwrm_port_phy_mdio_read_input *req;
12784 int rc;
12785
12786 if (bp->hwrm_spec_code < 0x10a00)
12787 return -EOPNOTSUPP;
12788
12789 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
12790 if (rc)
12791 return rc;
12792
12793 req->port_id = cpu_to_le16(bp->pf.port_id);
12794 req->phy_addr = phy_addr;
12795 req->reg_addr = cpu_to_le16(reg & 0x1f);
12796 if (mdio_phy_id_is_c45(phy_addr)) {
12797 req->cl45_mdio = 1;
12798 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12799 req->dev_addr = mdio_phy_id_devad(phy_addr);
12800 req->reg_addr = cpu_to_le16(reg);
12801 }
12802
12803 resp = hwrm_req_hold(bp, req);
12804 rc = hwrm_req_send(bp, req);
12805 if (!rc)
12806 *val = le16_to_cpu(resp->reg_data);
12807 hwrm_req_drop(bp, req);
12808 return rc;
12809 }
12810
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)12811 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12812 u16 val)
12813 {
12814 struct hwrm_port_phy_mdio_write_input *req;
12815 int rc;
12816
12817 if (bp->hwrm_spec_code < 0x10a00)
12818 return -EOPNOTSUPP;
12819
12820 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12821 if (rc)
12822 return rc;
12823
12824 req->port_id = cpu_to_le16(bp->pf.port_id);
12825 req->phy_addr = phy_addr;
12826 req->reg_addr = cpu_to_le16(reg & 0x1f);
12827 if (mdio_phy_id_is_c45(phy_addr)) {
12828 req->cl45_mdio = 1;
12829 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12830 req->dev_addr = mdio_phy_id_devad(phy_addr);
12831 req->reg_addr = cpu_to_le16(reg);
12832 }
12833 req->reg_data = cpu_to_le16(val);
12834
12835 return hwrm_req_send(bp, req);
12836 }
12837
12838 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)12839 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12840 {
12841 struct mii_ioctl_data *mdio = if_mii(ifr);
12842 struct bnxt *bp = netdev_priv(dev);
12843 int rc;
12844
12845 switch (cmd) {
12846 case SIOCGMIIPHY:
12847 mdio->phy_id = bp->link_info.phy_addr;
12848
12849 fallthrough;
12850 case SIOCGMIIREG: {
12851 u16 mii_regval = 0;
12852
12853 if (!netif_running(dev))
12854 return -EAGAIN;
12855
12856 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12857 &mii_regval);
12858 mdio->val_out = mii_regval;
12859 return rc;
12860 }
12861
12862 case SIOCSMIIREG:
12863 if (!netif_running(dev))
12864 return -EAGAIN;
12865
12866 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12867 mdio->val_in);
12868
12869 case SIOCSHWTSTAMP:
12870 return bnxt_hwtstamp_set(dev, ifr);
12871
12872 case SIOCGHWTSTAMP:
12873 return bnxt_hwtstamp_get(dev, ifr);
12874
12875 default:
12876 /* do nothing */
12877 break;
12878 }
12879 return -EOPNOTSUPP;
12880 }
12881
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12882 static void bnxt_get_ring_stats(struct bnxt *bp,
12883 struct rtnl_link_stats64 *stats)
12884 {
12885 int i;
12886
12887 for (i = 0; i < bp->cp_nr_rings; i++) {
12888 struct bnxt_napi *bnapi = bp->bnapi[i];
12889 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12890 u64 *sw = cpr->stats.sw_stats;
12891
12892 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12893 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12894 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12895
12896 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12897 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12898 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12899
12900 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12901 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12902 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12903
12904 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12905 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12906 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12907
12908 stats->rx_missed_errors +=
12909 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12910
12911 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12912
12913 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12914
12915 stats->rx_dropped +=
12916 cpr->sw_stats->rx.rx_netpoll_discards +
12917 cpr->sw_stats->rx.rx_oom_discards;
12918 }
12919 }
12920
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)12921 static void bnxt_add_prev_stats(struct bnxt *bp,
12922 struct rtnl_link_stats64 *stats)
12923 {
12924 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12925
12926 stats->rx_packets += prev_stats->rx_packets;
12927 stats->tx_packets += prev_stats->tx_packets;
12928 stats->rx_bytes += prev_stats->rx_bytes;
12929 stats->tx_bytes += prev_stats->tx_bytes;
12930 stats->rx_missed_errors += prev_stats->rx_missed_errors;
12931 stats->multicast += prev_stats->multicast;
12932 stats->rx_dropped += prev_stats->rx_dropped;
12933 stats->tx_dropped += prev_stats->tx_dropped;
12934 }
12935
12936 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)12937 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12938 {
12939 struct bnxt *bp = netdev_priv(dev);
12940
12941 set_bit(BNXT_STATE_READ_STATS, &bp->state);
12942 /* Make sure bnxt_close_nic() sees that we are reading stats before
12943 * we check the BNXT_STATE_OPEN flag.
12944 */
12945 smp_mb__after_atomic();
12946 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12947 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12948 *stats = bp->net_stats_prev;
12949 return;
12950 }
12951
12952 bnxt_get_ring_stats(bp, stats);
12953 bnxt_add_prev_stats(bp, stats);
12954
12955 if (bp->flags & BNXT_FLAG_PORT_STATS) {
12956 u64 *rx = bp->port_stats.sw_stats;
12957 u64 *tx = bp->port_stats.sw_stats +
12958 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12959
12960 stats->rx_crc_errors =
12961 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12962 stats->rx_frame_errors =
12963 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12964 stats->rx_length_errors =
12965 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12966 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12967 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12968 stats->rx_errors =
12969 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12970 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12971 stats->collisions =
12972 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12973 stats->tx_fifo_errors =
12974 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12975 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12976 }
12977 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12978 }
12979
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)12980 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12981 struct bnxt_total_ring_err_stats *stats,
12982 struct bnxt_cp_ring_info *cpr)
12983 {
12984 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12985 u64 *hw_stats = cpr->stats.sw_stats;
12986
12987 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12988 stats->rx_total_resets += sw_stats->rx.rx_resets;
12989 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
12990 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
12991 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
12992 stats->rx_total_ring_discards +=
12993 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
12994 stats->tx_total_resets += sw_stats->tx.tx_resets;
12995 stats->tx_total_ring_discards +=
12996 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
12997 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
12998 }
12999
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13000 void bnxt_get_ring_err_stats(struct bnxt *bp,
13001 struct bnxt_total_ring_err_stats *stats)
13002 {
13003 int i;
13004
13005 for (i = 0; i < bp->cp_nr_rings; i++)
13006 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13007 }
13008
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13009 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13010 {
13011 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13012 struct net_device *dev = bp->dev;
13013 struct netdev_hw_addr *ha;
13014 u8 *haddr;
13015 int mc_count = 0;
13016 bool update = false;
13017 int off = 0;
13018
13019 netdev_for_each_mc_addr(ha, dev) {
13020 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13021 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13022 vnic->mc_list_count = 0;
13023 return false;
13024 }
13025 haddr = ha->addr;
13026 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13027 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13028 update = true;
13029 }
13030 off += ETH_ALEN;
13031 mc_count++;
13032 }
13033 if (mc_count)
13034 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13035
13036 if (mc_count != vnic->mc_list_count) {
13037 vnic->mc_list_count = mc_count;
13038 update = true;
13039 }
13040 return update;
13041 }
13042
bnxt_uc_list_updated(struct bnxt * bp)13043 static bool bnxt_uc_list_updated(struct bnxt *bp)
13044 {
13045 struct net_device *dev = bp->dev;
13046 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13047 struct netdev_hw_addr *ha;
13048 int off = 0;
13049
13050 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13051 return true;
13052
13053 netdev_for_each_uc_addr(ha, dev) {
13054 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13055 return true;
13056
13057 off += ETH_ALEN;
13058 }
13059 return false;
13060 }
13061
bnxt_set_rx_mode(struct net_device * dev)13062 static void bnxt_set_rx_mode(struct net_device *dev)
13063 {
13064 struct bnxt *bp = netdev_priv(dev);
13065 struct bnxt_vnic_info *vnic;
13066 bool mc_update = false;
13067 bool uc_update;
13068 u32 mask;
13069
13070 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13071 return;
13072
13073 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13074 mask = vnic->rx_mask;
13075 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13076 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13077 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13078 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13079
13080 if (dev->flags & IFF_PROMISC)
13081 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13082
13083 uc_update = bnxt_uc_list_updated(bp);
13084
13085 if (dev->flags & IFF_BROADCAST)
13086 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13087 if (dev->flags & IFF_ALLMULTI) {
13088 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13089 vnic->mc_list_count = 0;
13090 } else if (dev->flags & IFF_MULTICAST) {
13091 mc_update = bnxt_mc_list_updated(bp, &mask);
13092 }
13093
13094 if (mask != vnic->rx_mask || uc_update || mc_update) {
13095 vnic->rx_mask = mask;
13096
13097 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13098 }
13099 }
13100
bnxt_cfg_rx_mode(struct bnxt * bp)13101 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13102 {
13103 struct net_device *dev = bp->dev;
13104 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13105 struct netdev_hw_addr *ha;
13106 int i, off = 0, rc;
13107 bool uc_update;
13108
13109 netif_addr_lock_bh(dev);
13110 uc_update = bnxt_uc_list_updated(bp);
13111 netif_addr_unlock_bh(dev);
13112
13113 if (!uc_update)
13114 goto skip_uc;
13115
13116 for (i = 1; i < vnic->uc_filter_count; i++) {
13117 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13118
13119 bnxt_hwrm_l2_filter_free(bp, fltr);
13120 bnxt_del_l2_filter(bp, fltr);
13121 }
13122
13123 vnic->uc_filter_count = 1;
13124
13125 netif_addr_lock_bh(dev);
13126 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13127 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13128 } else {
13129 netdev_for_each_uc_addr(ha, dev) {
13130 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13131 off += ETH_ALEN;
13132 vnic->uc_filter_count++;
13133 }
13134 }
13135 netif_addr_unlock_bh(dev);
13136
13137 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13138 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13139 if (rc) {
13140 if (BNXT_VF(bp) && rc == -ENODEV) {
13141 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13142 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13143 else
13144 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13145 rc = 0;
13146 } else {
13147 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13148 }
13149 vnic->uc_filter_count = i;
13150 return rc;
13151 }
13152 }
13153 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13154 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13155
13156 skip_uc:
13157 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13158 !bnxt_promisc_ok(bp))
13159 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13160 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13161 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13162 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13163 rc);
13164 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13165 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13166 vnic->mc_list_count = 0;
13167 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13168 }
13169 if (rc)
13170 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13171 rc);
13172
13173 return rc;
13174 }
13175
bnxt_can_reserve_rings(struct bnxt * bp)13176 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13177 {
13178 #ifdef CONFIG_BNXT_SRIOV
13179 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13180 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13181
13182 /* No minimum rings were provisioned by the PF. Don't
13183 * reserve rings by default when device is down.
13184 */
13185 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13186 return true;
13187
13188 if (!netif_running(bp->dev))
13189 return false;
13190 }
13191 #endif
13192 return true;
13193 }
13194
13195 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13196 static bool bnxt_rfs_supported(struct bnxt *bp)
13197 {
13198 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13199 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13200 return true;
13201 return false;
13202 }
13203 /* 212 firmware is broken for aRFS */
13204 if (BNXT_FW_MAJ(bp) == 212)
13205 return false;
13206 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13207 return true;
13208 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13209 return true;
13210 return false;
13211 }
13212
13213 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13214 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13215 {
13216 struct bnxt_hw_rings hwr = {0};
13217 int max_vnics, max_rss_ctxs;
13218
13219 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13220 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13221 return bnxt_rfs_supported(bp);
13222
13223 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13224 return false;
13225
13226 hwr.grp = bp->rx_nr_rings;
13227 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13228 if (new_rss_ctx)
13229 hwr.vnic++;
13230 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13231 max_vnics = bnxt_get_max_func_vnics(bp);
13232 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13233
13234 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13235 if (bp->rx_nr_rings > 1)
13236 netdev_warn(bp->dev,
13237 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13238 min(max_rss_ctxs - 1, max_vnics - 1));
13239 return false;
13240 }
13241
13242 if (!BNXT_NEW_RM(bp))
13243 return true;
13244
13245 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13246 * issue that will mess up the default VNIC if we reduce the
13247 * reservations.
13248 */
13249 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13250 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13251 return true;
13252
13253 bnxt_hwrm_reserve_rings(bp, &hwr);
13254 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13255 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13256 return true;
13257
13258 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13259 hwr.vnic = 1;
13260 hwr.rss_ctx = 0;
13261 bnxt_hwrm_reserve_rings(bp, &hwr);
13262 return false;
13263 }
13264
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13265 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13266 netdev_features_t features)
13267 {
13268 struct bnxt *bp = netdev_priv(dev);
13269 netdev_features_t vlan_features;
13270
13271 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13272 features &= ~NETIF_F_NTUPLE;
13273
13274 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13275 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13276
13277 if (!(features & NETIF_F_GRO))
13278 features &= ~NETIF_F_GRO_HW;
13279
13280 if (features & NETIF_F_GRO_HW)
13281 features &= ~NETIF_F_LRO;
13282
13283 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13284 * turned on or off together.
13285 */
13286 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13287 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13288 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13289 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13290 else if (vlan_features)
13291 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13292 }
13293 #ifdef CONFIG_BNXT_SRIOV
13294 if (BNXT_VF(bp) && bp->vf.vlan)
13295 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13296 #endif
13297 return features;
13298 }
13299
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13300 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13301 bool link_re_init, u32 flags, bool update_tpa)
13302 {
13303 bnxt_close_nic(bp, irq_re_init, link_re_init);
13304 bp->flags = flags;
13305 if (update_tpa)
13306 bnxt_set_ring_params(bp);
13307 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13308 }
13309
bnxt_set_features(struct net_device * dev,netdev_features_t features)13310 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13311 {
13312 bool update_tpa = false, update_ntuple = false;
13313 struct bnxt *bp = netdev_priv(dev);
13314 u32 flags = bp->flags;
13315 u32 changes;
13316 int rc = 0;
13317 bool re_init = false;
13318
13319 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13320 if (features & NETIF_F_GRO_HW)
13321 flags |= BNXT_FLAG_GRO;
13322 else if (features & NETIF_F_LRO)
13323 flags |= BNXT_FLAG_LRO;
13324
13325 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13326 flags &= ~BNXT_FLAG_TPA;
13327
13328 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13329 flags |= BNXT_FLAG_STRIP_VLAN;
13330
13331 if (features & NETIF_F_NTUPLE)
13332 flags |= BNXT_FLAG_RFS;
13333 else
13334 bnxt_clear_usr_fltrs(bp, true);
13335
13336 changes = flags ^ bp->flags;
13337 if (changes & BNXT_FLAG_TPA) {
13338 update_tpa = true;
13339 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13340 (flags & BNXT_FLAG_TPA) == 0 ||
13341 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13342 re_init = true;
13343 }
13344
13345 if (changes & ~BNXT_FLAG_TPA)
13346 re_init = true;
13347
13348 if (changes & BNXT_FLAG_RFS)
13349 update_ntuple = true;
13350
13351 if (flags != bp->flags) {
13352 u32 old_flags = bp->flags;
13353
13354 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13355 bp->flags = flags;
13356 if (update_tpa)
13357 bnxt_set_ring_params(bp);
13358 return rc;
13359 }
13360
13361 if (update_ntuple)
13362 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13363
13364 if (re_init)
13365 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13366
13367 if (update_tpa) {
13368 bp->flags = flags;
13369 rc = bnxt_set_tpa(bp,
13370 (flags & BNXT_FLAG_TPA) ?
13371 true : false);
13372 if (rc)
13373 bp->flags = old_flags;
13374 }
13375 }
13376 return rc;
13377 }
13378
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13379 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13380 u8 **nextp)
13381 {
13382 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13383 struct hop_jumbo_hdr *jhdr;
13384 int hdr_count = 0;
13385 u8 *nexthdr;
13386 int start;
13387
13388 /* Check that there are at most 2 IPv6 extension headers, no
13389 * fragment header, and each is <= 64 bytes.
13390 */
13391 start = nw_off + sizeof(*ip6h);
13392 nexthdr = &ip6h->nexthdr;
13393 while (ipv6_ext_hdr(*nexthdr)) {
13394 struct ipv6_opt_hdr *hp;
13395 int hdrlen;
13396
13397 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13398 *nexthdr == NEXTHDR_FRAGMENT)
13399 return false;
13400 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13401 skb_headlen(skb), NULL);
13402 if (!hp)
13403 return false;
13404 if (*nexthdr == NEXTHDR_AUTH)
13405 hdrlen = ipv6_authlen(hp);
13406 else
13407 hdrlen = ipv6_optlen(hp);
13408
13409 if (hdrlen > 64)
13410 return false;
13411
13412 /* The ext header may be a hop-by-hop header inserted for
13413 * big TCP purposes. This will be removed before sending
13414 * from NIC, so do not count it.
13415 */
13416 if (*nexthdr == NEXTHDR_HOP) {
13417 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13418 goto increment_hdr;
13419
13420 jhdr = (struct hop_jumbo_hdr *)hp;
13421 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13422 jhdr->nexthdr != IPPROTO_TCP)
13423 goto increment_hdr;
13424
13425 goto next_hdr;
13426 }
13427 increment_hdr:
13428 hdr_count++;
13429 next_hdr:
13430 nexthdr = &hp->nexthdr;
13431 start += hdrlen;
13432 }
13433 if (nextp) {
13434 /* Caller will check inner protocol */
13435 if (skb->encapsulation) {
13436 *nextp = nexthdr;
13437 return true;
13438 }
13439 *nextp = NULL;
13440 }
13441 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13442 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13443 }
13444
13445 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13446 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13447 {
13448 struct udphdr *uh = udp_hdr(skb);
13449 __be16 udp_port = uh->dest;
13450
13451 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13452 udp_port != bp->vxlan_gpe_port)
13453 return false;
13454 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13455 struct ethhdr *eh = inner_eth_hdr(skb);
13456
13457 switch (eh->h_proto) {
13458 case htons(ETH_P_IP):
13459 return true;
13460 case htons(ETH_P_IPV6):
13461 return bnxt_exthdr_check(bp, skb,
13462 skb_inner_network_offset(skb),
13463 NULL);
13464 }
13465 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13466 return true;
13467 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13468 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13469 NULL);
13470 }
13471 return false;
13472 }
13473
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13474 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13475 {
13476 switch (l4_proto) {
13477 case IPPROTO_UDP:
13478 return bnxt_udp_tunl_check(bp, skb);
13479 case IPPROTO_IPIP:
13480 return true;
13481 case IPPROTO_GRE: {
13482 switch (skb->inner_protocol) {
13483 default:
13484 return false;
13485 case htons(ETH_P_IP):
13486 return true;
13487 case htons(ETH_P_IPV6):
13488 fallthrough;
13489 }
13490 }
13491 case IPPROTO_IPV6:
13492 /* Check ext headers of inner ipv6 */
13493 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13494 NULL);
13495 }
13496 return false;
13497 }
13498
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13499 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13500 struct net_device *dev,
13501 netdev_features_t features)
13502 {
13503 struct bnxt *bp = netdev_priv(dev);
13504 u8 *l4_proto;
13505
13506 features = vlan_features_check(skb, features);
13507 switch (vlan_get_protocol(skb)) {
13508 case htons(ETH_P_IP):
13509 if (!skb->encapsulation)
13510 return features;
13511 l4_proto = &ip_hdr(skb)->protocol;
13512 if (bnxt_tunl_check(bp, skb, *l4_proto))
13513 return features;
13514 break;
13515 case htons(ETH_P_IPV6):
13516 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13517 &l4_proto))
13518 break;
13519 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13520 return features;
13521 break;
13522 }
13523 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13524 }
13525
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13526 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13527 u32 *reg_buf)
13528 {
13529 struct hwrm_dbg_read_direct_output *resp;
13530 struct hwrm_dbg_read_direct_input *req;
13531 __le32 *dbg_reg_buf;
13532 dma_addr_t mapping;
13533 int rc, i;
13534
13535 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13536 if (rc)
13537 return rc;
13538
13539 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13540 &mapping);
13541 if (!dbg_reg_buf) {
13542 rc = -ENOMEM;
13543 goto dbg_rd_reg_exit;
13544 }
13545
13546 req->host_dest_addr = cpu_to_le64(mapping);
13547
13548 resp = hwrm_req_hold(bp, req);
13549 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13550 req->read_len32 = cpu_to_le32(num_words);
13551
13552 rc = hwrm_req_send(bp, req);
13553 if (rc || resp->error_code) {
13554 rc = -EIO;
13555 goto dbg_rd_reg_exit;
13556 }
13557 for (i = 0; i < num_words; i++)
13558 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13559
13560 dbg_rd_reg_exit:
13561 hwrm_req_drop(bp, req);
13562 return rc;
13563 }
13564
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13565 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13566 u32 ring_id, u32 *prod, u32 *cons)
13567 {
13568 struct hwrm_dbg_ring_info_get_output *resp;
13569 struct hwrm_dbg_ring_info_get_input *req;
13570 int rc;
13571
13572 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13573 if (rc)
13574 return rc;
13575
13576 req->ring_type = ring_type;
13577 req->fw_ring_id = cpu_to_le32(ring_id);
13578 resp = hwrm_req_hold(bp, req);
13579 rc = hwrm_req_send(bp, req);
13580 if (!rc) {
13581 *prod = le32_to_cpu(resp->producer_index);
13582 *cons = le32_to_cpu(resp->consumer_index);
13583 }
13584 hwrm_req_drop(bp, req);
13585 return rc;
13586 }
13587
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)13588 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13589 {
13590 struct bnxt_tx_ring_info *txr;
13591 int i = bnapi->index, j;
13592
13593 bnxt_for_each_napi_tx(j, bnapi, txr)
13594 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
13595 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
13596 txr->tx_cons);
13597 }
13598
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)13599 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
13600 {
13601 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
13602 int i = bnapi->index;
13603
13604 if (!rxr)
13605 return;
13606
13607 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
13608 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
13609 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
13610 rxr->rx_sw_agg_prod);
13611 }
13612
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)13613 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
13614 {
13615 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13616 int i = bnapi->index;
13617
13618 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
13619 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13620 }
13621
bnxt_dbg_dump_states(struct bnxt * bp)13622 static void bnxt_dbg_dump_states(struct bnxt *bp)
13623 {
13624 int i;
13625 struct bnxt_napi *bnapi;
13626
13627 for (i = 0; i < bp->cp_nr_rings; i++) {
13628 bnapi = bp->bnapi[i];
13629 if (netif_msg_drv(bp)) {
13630 bnxt_dump_tx_sw_state(bnapi);
13631 bnxt_dump_rx_sw_state(bnapi);
13632 bnxt_dump_cp_sw_state(bnapi);
13633 }
13634 }
13635 }
13636
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)13637 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
13638 {
13639 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
13640 struct hwrm_ring_reset_input *req;
13641 struct bnxt_napi *bnapi = rxr->bnapi;
13642 struct bnxt_cp_ring_info *cpr;
13643 u16 cp_ring_id;
13644 int rc;
13645
13646 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
13647 if (rc)
13648 return rc;
13649
13650 cpr = &bnapi->cp_ring;
13651 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13652 req->cmpl_ring = cpu_to_le16(cp_ring_id);
13653 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
13654 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
13655 return hwrm_req_send_silent(bp, req);
13656 }
13657
bnxt_reset_task(struct bnxt * bp,bool silent)13658 static void bnxt_reset_task(struct bnxt *bp, bool silent)
13659 {
13660 if (!silent)
13661 bnxt_dbg_dump_states(bp);
13662 if (netif_running(bp->dev)) {
13663 bnxt_close_nic(bp, !silent, false);
13664 bnxt_open_nic(bp, !silent, false);
13665 }
13666 }
13667
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)13668 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
13669 {
13670 struct bnxt *bp = netdev_priv(dev);
13671
13672 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
13673 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
13674 }
13675
bnxt_fw_health_check(struct bnxt * bp)13676 static void bnxt_fw_health_check(struct bnxt *bp)
13677 {
13678 struct bnxt_fw_health *fw_health = bp->fw_health;
13679 struct pci_dev *pdev = bp->pdev;
13680 u32 val;
13681
13682 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13683 return;
13684
13685 /* Make sure it is enabled before checking the tmr_counter. */
13686 smp_rmb();
13687 if (fw_health->tmr_counter) {
13688 fw_health->tmr_counter--;
13689 return;
13690 }
13691
13692 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13693 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
13694 fw_health->arrests++;
13695 goto fw_reset;
13696 }
13697
13698 fw_health->last_fw_heartbeat = val;
13699
13700 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13701 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
13702 fw_health->discoveries++;
13703 goto fw_reset;
13704 }
13705
13706 fw_health->tmr_counter = fw_health->tmr_multiplier;
13707 return;
13708
13709 fw_reset:
13710 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
13711 }
13712
bnxt_timer(struct timer_list * t)13713 static void bnxt_timer(struct timer_list *t)
13714 {
13715 struct bnxt *bp = from_timer(bp, t, timer);
13716 struct net_device *dev = bp->dev;
13717
13718 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
13719 return;
13720
13721 if (atomic_read(&bp->intr_sem) != 0)
13722 goto bnxt_restart_timer;
13723
13724 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
13725 bnxt_fw_health_check(bp);
13726
13727 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
13728 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
13729
13730 if (bnxt_tc_flower_enabled(bp))
13731 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
13732
13733 #ifdef CONFIG_RFS_ACCEL
13734 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
13735 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13736 #endif /*CONFIG_RFS_ACCEL*/
13737
13738 if (bp->link_info.phy_retry) {
13739 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
13740 bp->link_info.phy_retry = false;
13741 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
13742 } else {
13743 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
13744 }
13745 }
13746
13747 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13748 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13749
13750 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
13751 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
13752
13753 bnxt_restart_timer:
13754 mod_timer(&bp->timer, jiffies + bp->current_interval);
13755 }
13756
bnxt_rtnl_lock_sp(struct bnxt * bp)13757 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
13758 {
13759 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
13760 * set. If the device is being closed, bnxt_close() may be holding
13761 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
13762 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
13763 */
13764 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13765 rtnl_lock();
13766 }
13767
bnxt_rtnl_unlock_sp(struct bnxt * bp)13768 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
13769 {
13770 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13771 rtnl_unlock();
13772 }
13773
13774 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)13775 static void bnxt_reset(struct bnxt *bp, bool silent)
13776 {
13777 bnxt_rtnl_lock_sp(bp);
13778 if (test_bit(BNXT_STATE_OPEN, &bp->state))
13779 bnxt_reset_task(bp, silent);
13780 bnxt_rtnl_unlock_sp(bp);
13781 }
13782
13783 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)13784 static void bnxt_rx_ring_reset(struct bnxt *bp)
13785 {
13786 int i;
13787
13788 bnxt_rtnl_lock_sp(bp);
13789 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13790 bnxt_rtnl_unlock_sp(bp);
13791 return;
13792 }
13793 /* Disable and flush TPA before resetting the RX ring */
13794 if (bp->flags & BNXT_FLAG_TPA)
13795 bnxt_set_tpa(bp, false);
13796 for (i = 0; i < bp->rx_nr_rings; i++) {
13797 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13798 struct bnxt_cp_ring_info *cpr;
13799 int rc;
13800
13801 if (!rxr->bnapi->in_reset)
13802 continue;
13803
13804 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13805 if (rc) {
13806 if (rc == -EINVAL || rc == -EOPNOTSUPP)
13807 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13808 else
13809 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13810 rc);
13811 bnxt_reset_task(bp, true);
13812 break;
13813 }
13814 bnxt_free_one_rx_ring_skbs(bp, rxr);
13815 rxr->rx_prod = 0;
13816 rxr->rx_agg_prod = 0;
13817 rxr->rx_sw_agg_prod = 0;
13818 rxr->rx_next_cons = 0;
13819 rxr->bnapi->in_reset = false;
13820 bnxt_alloc_one_rx_ring(bp, i);
13821 cpr = &rxr->bnapi->cp_ring;
13822 cpr->sw_stats->rx.rx_resets++;
13823 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13824 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13825 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13826 }
13827 if (bp->flags & BNXT_FLAG_TPA)
13828 bnxt_set_tpa(bp, true);
13829 bnxt_rtnl_unlock_sp(bp);
13830 }
13831
bnxt_fw_fatal_close(struct bnxt * bp)13832 static void bnxt_fw_fatal_close(struct bnxt *bp)
13833 {
13834 bnxt_tx_disable(bp);
13835 bnxt_disable_napi(bp);
13836 bnxt_disable_int_sync(bp);
13837 bnxt_free_irq(bp);
13838 bnxt_clear_int_mode(bp);
13839 pci_disable_device(bp->pdev);
13840 }
13841
bnxt_fw_reset_close(struct bnxt * bp)13842 static void bnxt_fw_reset_close(struct bnxt *bp)
13843 {
13844 /* When firmware is in fatal state, quiesce device and disable
13845 * bus master to prevent any potential bad DMAs before freeing
13846 * kernel memory.
13847 */
13848 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13849 u16 val = 0;
13850
13851 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13852 if (val == 0xffff)
13853 bp->fw_reset_min_dsecs = 0;
13854 bnxt_fw_fatal_close(bp);
13855 }
13856 __bnxt_close_nic(bp, true, false);
13857 bnxt_vf_reps_free(bp);
13858 bnxt_clear_int_mode(bp);
13859 bnxt_hwrm_func_drv_unrgtr(bp);
13860 if (pci_is_enabled(bp->pdev))
13861 pci_disable_device(bp->pdev);
13862 bnxt_free_ctx_mem(bp, false);
13863 }
13864
is_bnxt_fw_ok(struct bnxt * bp)13865 static bool is_bnxt_fw_ok(struct bnxt *bp)
13866 {
13867 struct bnxt_fw_health *fw_health = bp->fw_health;
13868 bool no_heartbeat = false, has_reset = false;
13869 u32 val;
13870
13871 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13872 if (val == fw_health->last_fw_heartbeat)
13873 no_heartbeat = true;
13874
13875 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13876 if (val != fw_health->last_fw_reset_cnt)
13877 has_reset = true;
13878
13879 if (!no_heartbeat && has_reset)
13880 return true;
13881
13882 return false;
13883 }
13884
13885 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)13886 static void bnxt_force_fw_reset(struct bnxt *bp)
13887 {
13888 struct bnxt_fw_health *fw_health = bp->fw_health;
13889 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13890 u32 wait_dsecs;
13891
13892 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13893 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13894 return;
13895
13896 /* we have to serialize with bnxt_refclk_read()*/
13897 if (ptp) {
13898 unsigned long flags;
13899
13900 write_seqlock_irqsave(&ptp->ptp_lock, flags);
13901 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13902 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
13903 } else {
13904 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13905 }
13906 bnxt_fw_reset_close(bp);
13907 wait_dsecs = fw_health->master_func_wait_dsecs;
13908 if (fw_health->primary) {
13909 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13910 wait_dsecs = 0;
13911 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13912 } else {
13913 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13914 wait_dsecs = fw_health->normal_func_wait_dsecs;
13915 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13916 }
13917
13918 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13919 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13920 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13921 }
13922
bnxt_fw_exception(struct bnxt * bp)13923 void bnxt_fw_exception(struct bnxt *bp)
13924 {
13925 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13926 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13927 bnxt_ulp_stop(bp);
13928 bnxt_rtnl_lock_sp(bp);
13929 bnxt_force_fw_reset(bp);
13930 bnxt_rtnl_unlock_sp(bp);
13931 }
13932
13933 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13934 * < 0 on error.
13935 */
bnxt_get_registered_vfs(struct bnxt * bp)13936 static int bnxt_get_registered_vfs(struct bnxt *bp)
13937 {
13938 #ifdef CONFIG_BNXT_SRIOV
13939 int rc;
13940
13941 if (!BNXT_PF(bp))
13942 return 0;
13943
13944 rc = bnxt_hwrm_func_qcfg(bp);
13945 if (rc) {
13946 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13947 return rc;
13948 }
13949 if (bp->pf.registered_vfs)
13950 return bp->pf.registered_vfs;
13951 if (bp->sriov_cfg)
13952 return 1;
13953 #endif
13954 return 0;
13955 }
13956
bnxt_fw_reset(struct bnxt * bp)13957 void bnxt_fw_reset(struct bnxt *bp)
13958 {
13959 bnxt_ulp_stop(bp);
13960 bnxt_rtnl_lock_sp(bp);
13961 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13962 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13963 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13964 int n = 0, tmo;
13965
13966 /* we have to serialize with bnxt_refclk_read()*/
13967 if (ptp) {
13968 unsigned long flags;
13969
13970 write_seqlock_irqsave(&ptp->ptp_lock, flags);
13971 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13972 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
13973 } else {
13974 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13975 }
13976 if (bp->pf.active_vfs &&
13977 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13978 n = bnxt_get_registered_vfs(bp);
13979 if (n < 0) {
13980 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13981 n);
13982 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13983 dev_close(bp->dev);
13984 goto fw_reset_exit;
13985 } else if (n > 0) {
13986 u16 vf_tmo_dsecs = n * 10;
13987
13988 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13989 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13990 bp->fw_reset_state =
13991 BNXT_FW_RESET_STATE_POLL_VF;
13992 bnxt_queue_fw_reset_work(bp, HZ / 10);
13993 goto fw_reset_exit;
13994 }
13995 bnxt_fw_reset_close(bp);
13996 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13997 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13998 tmo = HZ / 10;
13999 } else {
14000 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14001 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14002 }
14003 bnxt_queue_fw_reset_work(bp, tmo);
14004 }
14005 fw_reset_exit:
14006 bnxt_rtnl_unlock_sp(bp);
14007 }
14008
bnxt_chk_missed_irq(struct bnxt * bp)14009 static void bnxt_chk_missed_irq(struct bnxt *bp)
14010 {
14011 int i;
14012
14013 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14014 return;
14015
14016 for (i = 0; i < bp->cp_nr_rings; i++) {
14017 struct bnxt_napi *bnapi = bp->bnapi[i];
14018 struct bnxt_cp_ring_info *cpr;
14019 u32 fw_ring_id;
14020 int j;
14021
14022 if (!bnapi)
14023 continue;
14024
14025 cpr = &bnapi->cp_ring;
14026 for (j = 0; j < cpr->cp_ring_count; j++) {
14027 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14028 u32 val[2];
14029
14030 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14031 continue;
14032
14033 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14034 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14035 continue;
14036 }
14037 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14038 bnxt_dbg_hwrm_ring_info_get(bp,
14039 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14040 fw_ring_id, &val[0], &val[1]);
14041 cpr->sw_stats->cmn.missed_irqs++;
14042 }
14043 }
14044 }
14045
14046 static void bnxt_cfg_ntp_filters(struct bnxt *);
14047
bnxt_init_ethtool_link_settings(struct bnxt * bp)14048 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14049 {
14050 struct bnxt_link_info *link_info = &bp->link_info;
14051
14052 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14053 link_info->autoneg = BNXT_AUTONEG_SPEED;
14054 if (bp->hwrm_spec_code >= 0x10201) {
14055 if (link_info->auto_pause_setting &
14056 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14057 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14058 } else {
14059 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14060 }
14061 bnxt_set_auto_speed(link_info);
14062 } else {
14063 bnxt_set_force_speed(link_info);
14064 link_info->req_duplex = link_info->duplex_setting;
14065 }
14066 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14067 link_info->req_flow_ctrl =
14068 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14069 else
14070 link_info->req_flow_ctrl = link_info->force_pause_setting;
14071 }
14072
bnxt_fw_echo_reply(struct bnxt * bp)14073 static void bnxt_fw_echo_reply(struct bnxt *bp)
14074 {
14075 struct bnxt_fw_health *fw_health = bp->fw_health;
14076 struct hwrm_func_echo_response_input *req;
14077 int rc;
14078
14079 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14080 if (rc)
14081 return;
14082 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14083 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14084 hwrm_req_send(bp, req);
14085 }
14086
bnxt_ulp_restart(struct bnxt * bp)14087 static void bnxt_ulp_restart(struct bnxt *bp)
14088 {
14089 bnxt_ulp_stop(bp);
14090 bnxt_ulp_start(bp, 0);
14091 }
14092
bnxt_sp_task(struct work_struct * work)14093 static void bnxt_sp_task(struct work_struct *work)
14094 {
14095 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14096
14097 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14098 smp_mb__after_atomic();
14099 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14100 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14101 return;
14102 }
14103
14104 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14105 bnxt_ulp_restart(bp);
14106 bnxt_reenable_sriov(bp);
14107 }
14108
14109 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14110 bnxt_cfg_rx_mode(bp);
14111
14112 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14113 bnxt_cfg_ntp_filters(bp);
14114 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14115 bnxt_hwrm_exec_fwd_req(bp);
14116 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14117 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14118 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14119 bnxt_hwrm_port_qstats(bp, 0);
14120 bnxt_hwrm_port_qstats_ext(bp, 0);
14121 bnxt_accumulate_all_stats(bp);
14122 }
14123
14124 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14125 int rc;
14126
14127 mutex_lock(&bp->link_lock);
14128 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14129 &bp->sp_event))
14130 bnxt_hwrm_phy_qcaps(bp);
14131
14132 rc = bnxt_update_link(bp, true);
14133 if (rc)
14134 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14135 rc);
14136
14137 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14138 &bp->sp_event))
14139 bnxt_init_ethtool_link_settings(bp);
14140 mutex_unlock(&bp->link_lock);
14141 }
14142 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14143 int rc;
14144
14145 mutex_lock(&bp->link_lock);
14146 rc = bnxt_update_phy_setting(bp);
14147 mutex_unlock(&bp->link_lock);
14148 if (rc) {
14149 netdev_warn(bp->dev, "update phy settings retry failed\n");
14150 } else {
14151 bp->link_info.phy_retry = false;
14152 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14153 }
14154 }
14155 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14156 mutex_lock(&bp->link_lock);
14157 bnxt_get_port_module_status(bp);
14158 mutex_unlock(&bp->link_lock);
14159 }
14160
14161 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14162 bnxt_tc_flow_stats_work(bp);
14163
14164 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14165 bnxt_chk_missed_irq(bp);
14166
14167 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14168 bnxt_fw_echo_reply(bp);
14169
14170 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14171 bnxt_hwmon_notify_event(bp);
14172
14173 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14174 * must be the last functions to be called before exiting.
14175 */
14176 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14177 bnxt_reset(bp, false);
14178
14179 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14180 bnxt_reset(bp, true);
14181
14182 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14183 bnxt_rx_ring_reset(bp);
14184
14185 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14186 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14187 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14188 bnxt_devlink_health_fw_report(bp);
14189 else
14190 bnxt_fw_reset(bp);
14191 }
14192
14193 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14194 if (!is_bnxt_fw_ok(bp))
14195 bnxt_devlink_health_fw_report(bp);
14196 }
14197
14198 smp_mb__before_atomic();
14199 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14200 }
14201
14202 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14203 int *max_cp);
14204
14205 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14206 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14207 int tx_xdp)
14208 {
14209 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14210 struct bnxt_hw_rings hwr = {0};
14211 int rx_rings = rx;
14212 int rc;
14213
14214 if (tcs)
14215 tx_sets = tcs;
14216
14217 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14218
14219 if (max_rx < rx_rings)
14220 return -ENOMEM;
14221
14222 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14223 rx_rings <<= 1;
14224
14225 hwr.rx = rx_rings;
14226 hwr.tx = tx * tx_sets + tx_xdp;
14227 if (max_tx < hwr.tx)
14228 return -ENOMEM;
14229
14230 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14231
14232 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14233 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14234 if (max_cp < hwr.cp)
14235 return -ENOMEM;
14236 hwr.stat = hwr.cp;
14237 if (BNXT_NEW_RM(bp)) {
14238 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14239 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14240 hwr.grp = rx;
14241 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14242 }
14243 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14244 hwr.cp_p5 = hwr.tx + rx;
14245 rc = bnxt_hwrm_check_rings(bp, &hwr);
14246 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14247 if (!bnxt_ulp_registered(bp->edev)) {
14248 hwr.cp += bnxt_get_ulp_msix_num(bp);
14249 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14250 }
14251 if (hwr.cp > bp->total_irqs) {
14252 int total_msix = bnxt_change_msix(bp, hwr.cp);
14253
14254 if (total_msix < hwr.cp) {
14255 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14256 hwr.cp, total_msix);
14257 rc = -ENOSPC;
14258 }
14259 }
14260 }
14261 return rc;
14262 }
14263
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14264 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14265 {
14266 if (bp->bar2) {
14267 pci_iounmap(pdev, bp->bar2);
14268 bp->bar2 = NULL;
14269 }
14270
14271 if (bp->bar1) {
14272 pci_iounmap(pdev, bp->bar1);
14273 bp->bar1 = NULL;
14274 }
14275
14276 if (bp->bar0) {
14277 pci_iounmap(pdev, bp->bar0);
14278 bp->bar0 = NULL;
14279 }
14280 }
14281
bnxt_cleanup_pci(struct bnxt * bp)14282 static void bnxt_cleanup_pci(struct bnxt *bp)
14283 {
14284 bnxt_unmap_bars(bp, bp->pdev);
14285 pci_release_regions(bp->pdev);
14286 if (pci_is_enabled(bp->pdev))
14287 pci_disable_device(bp->pdev);
14288 }
14289
bnxt_init_dflt_coal(struct bnxt * bp)14290 static void bnxt_init_dflt_coal(struct bnxt *bp)
14291 {
14292 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14293 struct bnxt_coal *coal;
14294 u16 flags = 0;
14295
14296 if (coal_cap->cmpl_params &
14297 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14298 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14299
14300 /* Tick values in micro seconds.
14301 * 1 coal_buf x bufs_per_record = 1 completion record.
14302 */
14303 coal = &bp->rx_coal;
14304 coal->coal_ticks = 10;
14305 coal->coal_bufs = 30;
14306 coal->coal_ticks_irq = 1;
14307 coal->coal_bufs_irq = 2;
14308 coal->idle_thresh = 50;
14309 coal->bufs_per_record = 2;
14310 coal->budget = 64; /* NAPI budget */
14311 coal->flags = flags;
14312
14313 coal = &bp->tx_coal;
14314 coal->coal_ticks = 28;
14315 coal->coal_bufs = 30;
14316 coal->coal_ticks_irq = 2;
14317 coal->coal_bufs_irq = 2;
14318 coal->bufs_per_record = 1;
14319 coal->flags = flags;
14320
14321 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14322 }
14323
14324 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14325 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14326 {
14327 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14328
14329 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14330 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14331 return true;
14332 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14333 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14334 return true;
14335 return false;
14336 }
14337
bnxt_fw_init_one_p1(struct bnxt * bp)14338 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14339 {
14340 int rc;
14341
14342 bp->fw_cap = 0;
14343 rc = bnxt_hwrm_ver_get(bp);
14344 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14345 * so wait before continuing with recovery.
14346 */
14347 if (rc)
14348 msleep(100);
14349 bnxt_try_map_fw_health_reg(bp);
14350 if (rc) {
14351 rc = bnxt_try_recover_fw(bp);
14352 if (rc)
14353 return rc;
14354 rc = bnxt_hwrm_ver_get(bp);
14355 if (rc)
14356 return rc;
14357 }
14358
14359 bnxt_nvm_cfg_ver_get(bp);
14360
14361 rc = bnxt_hwrm_func_reset(bp);
14362 if (rc)
14363 return -ENODEV;
14364
14365 bnxt_hwrm_fw_set_time(bp);
14366 return 0;
14367 }
14368
bnxt_fw_init_one_p2(struct bnxt * bp)14369 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14370 {
14371 int rc;
14372
14373 /* Get the MAX capabilities for this function */
14374 rc = bnxt_hwrm_func_qcaps(bp);
14375 if (rc) {
14376 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14377 rc);
14378 return -ENODEV;
14379 }
14380
14381 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14382 if (rc)
14383 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14384 rc);
14385
14386 if (bnxt_alloc_fw_health(bp)) {
14387 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14388 } else {
14389 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14390 if (rc)
14391 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14392 rc);
14393 }
14394
14395 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14396 if (rc)
14397 return -ENODEV;
14398
14399 rc = bnxt_alloc_crash_dump_mem(bp);
14400 if (rc)
14401 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14402 rc);
14403 if (!rc) {
14404 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14405 if (rc) {
14406 bnxt_free_crash_dump_mem(bp);
14407 netdev_warn(bp->dev,
14408 "hwrm crash dump mem failure rc: %d\n", rc);
14409 }
14410 }
14411
14412 if (bnxt_fw_pre_resv_vnics(bp))
14413 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14414
14415 bnxt_hwrm_func_qcfg(bp);
14416 bnxt_hwrm_vnic_qcaps(bp);
14417 bnxt_hwrm_port_led_qcaps(bp);
14418 bnxt_ethtool_init(bp);
14419 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14420 __bnxt_hwrm_ptp_qcfg(bp);
14421 bnxt_dcb_init(bp);
14422 bnxt_hwmon_init(bp);
14423 return 0;
14424 }
14425
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14426 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14427 {
14428 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14429 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14430 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14431 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14432 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14433 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14434 bp->rss_hash_delta = bp->rss_hash_cfg;
14435 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14436 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14437 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14438 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14439 }
14440 }
14441
bnxt_set_dflt_rfs(struct bnxt * bp)14442 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14443 {
14444 struct net_device *dev = bp->dev;
14445
14446 dev->hw_features &= ~NETIF_F_NTUPLE;
14447 dev->features &= ~NETIF_F_NTUPLE;
14448 bp->flags &= ~BNXT_FLAG_RFS;
14449 if (bnxt_rfs_supported(bp)) {
14450 dev->hw_features |= NETIF_F_NTUPLE;
14451 if (bnxt_rfs_capable(bp, false)) {
14452 bp->flags |= BNXT_FLAG_RFS;
14453 dev->features |= NETIF_F_NTUPLE;
14454 }
14455 }
14456 }
14457
bnxt_fw_init_one_p3(struct bnxt * bp)14458 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14459 {
14460 struct pci_dev *pdev = bp->pdev;
14461
14462 bnxt_set_dflt_rss_hash_type(bp);
14463 bnxt_set_dflt_rfs(bp);
14464
14465 bnxt_get_wol_settings(bp);
14466 if (bp->flags & BNXT_FLAG_WOL_CAP)
14467 device_set_wakeup_enable(&pdev->dev, bp->wol);
14468 else
14469 device_set_wakeup_capable(&pdev->dev, false);
14470
14471 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14472 bnxt_hwrm_coal_params_qcaps(bp);
14473 }
14474
14475 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14476
bnxt_fw_init_one(struct bnxt * bp)14477 int bnxt_fw_init_one(struct bnxt *bp)
14478 {
14479 int rc;
14480
14481 rc = bnxt_fw_init_one_p1(bp);
14482 if (rc) {
14483 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14484 return rc;
14485 }
14486 rc = bnxt_fw_init_one_p2(bp);
14487 if (rc) {
14488 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14489 return rc;
14490 }
14491 rc = bnxt_probe_phy(bp, false);
14492 if (rc)
14493 return rc;
14494 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14495 if (rc)
14496 return rc;
14497
14498 bnxt_fw_init_one_p3(bp);
14499 return 0;
14500 }
14501
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14502 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14503 {
14504 struct bnxt_fw_health *fw_health = bp->fw_health;
14505 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14506 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14507 u32 reg_type, reg_off, delay_msecs;
14508
14509 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14510 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14511 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14512 switch (reg_type) {
14513 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14514 pci_write_config_dword(bp->pdev, reg_off, val);
14515 break;
14516 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14517 writel(reg_off & BNXT_GRC_BASE_MASK,
14518 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14519 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14520 fallthrough;
14521 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14522 writel(val, bp->bar0 + reg_off);
14523 break;
14524 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14525 writel(val, bp->bar1 + reg_off);
14526 break;
14527 }
14528 if (delay_msecs) {
14529 pci_read_config_dword(bp->pdev, 0, &val);
14530 msleep(delay_msecs);
14531 }
14532 }
14533
bnxt_hwrm_reset_permitted(struct bnxt * bp)14534 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14535 {
14536 struct hwrm_func_qcfg_output *resp;
14537 struct hwrm_func_qcfg_input *req;
14538 bool result = true; /* firmware will enforce if unknown */
14539
14540 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14541 return result;
14542
14543 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14544 return result;
14545
14546 req->fid = cpu_to_le16(0xffff);
14547 resp = hwrm_req_hold(bp, req);
14548 if (!hwrm_req_send(bp, req))
14549 result = !!(le16_to_cpu(resp->flags) &
14550 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14551 hwrm_req_drop(bp, req);
14552 return result;
14553 }
14554
bnxt_reset_all(struct bnxt * bp)14555 static void bnxt_reset_all(struct bnxt *bp)
14556 {
14557 struct bnxt_fw_health *fw_health = bp->fw_health;
14558 int i, rc;
14559
14560 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14561 bnxt_fw_reset_via_optee(bp);
14562 bp->fw_reset_timestamp = jiffies;
14563 return;
14564 }
14565
14566 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14567 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14568 bnxt_fw_reset_writel(bp, i);
14569 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14570 struct hwrm_fw_reset_input *req;
14571
14572 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
14573 if (!rc) {
14574 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
14575 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
14576 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
14577 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
14578 rc = hwrm_req_send(bp, req);
14579 }
14580 if (rc != -ENODEV)
14581 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
14582 }
14583 bp->fw_reset_timestamp = jiffies;
14584 }
14585
bnxt_fw_reset_timeout(struct bnxt * bp)14586 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
14587 {
14588 return time_after(jiffies, bp->fw_reset_timestamp +
14589 (bp->fw_reset_max_dsecs * HZ / 10));
14590 }
14591
bnxt_fw_reset_abort(struct bnxt * bp,int rc)14592 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
14593 {
14594 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14595 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
14596 bnxt_dl_health_fw_status_update(bp, false);
14597 bp->fw_reset_state = 0;
14598 dev_close(bp->dev);
14599 }
14600
bnxt_fw_reset_task(struct work_struct * work)14601 static void bnxt_fw_reset_task(struct work_struct *work)
14602 {
14603 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
14604 int rc = 0;
14605
14606 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14607 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
14608 return;
14609 }
14610
14611 switch (bp->fw_reset_state) {
14612 case BNXT_FW_RESET_STATE_POLL_VF: {
14613 int n = bnxt_get_registered_vfs(bp);
14614 int tmo;
14615
14616 if (n < 0) {
14617 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
14618 n, jiffies_to_msecs(jiffies -
14619 bp->fw_reset_timestamp));
14620 goto fw_reset_abort;
14621 } else if (n > 0) {
14622 if (bnxt_fw_reset_timeout(bp)) {
14623 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14624 bp->fw_reset_state = 0;
14625 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
14626 n);
14627 goto ulp_start;
14628 }
14629 bnxt_queue_fw_reset_work(bp, HZ / 10);
14630 return;
14631 }
14632 bp->fw_reset_timestamp = jiffies;
14633 rtnl_lock();
14634 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
14635 bnxt_fw_reset_abort(bp, rc);
14636 rtnl_unlock();
14637 goto ulp_start;
14638 }
14639 bnxt_fw_reset_close(bp);
14640 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14641 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14642 tmo = HZ / 10;
14643 } else {
14644 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14645 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14646 }
14647 rtnl_unlock();
14648 bnxt_queue_fw_reset_work(bp, tmo);
14649 return;
14650 }
14651 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
14652 u32 val;
14653
14654 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14655 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
14656 !bnxt_fw_reset_timeout(bp)) {
14657 bnxt_queue_fw_reset_work(bp, HZ / 5);
14658 return;
14659 }
14660
14661 if (!bp->fw_health->primary) {
14662 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
14663
14664 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14665 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14666 return;
14667 }
14668 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14669 }
14670 fallthrough;
14671 case BNXT_FW_RESET_STATE_RESET_FW:
14672 bnxt_reset_all(bp);
14673 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14674 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
14675 return;
14676 case BNXT_FW_RESET_STATE_ENABLE_DEV:
14677 bnxt_inv_fw_health_reg(bp);
14678 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
14679 !bp->fw_reset_min_dsecs) {
14680 u16 val;
14681
14682 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14683 if (val == 0xffff) {
14684 if (bnxt_fw_reset_timeout(bp)) {
14685 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
14686 rc = -ETIMEDOUT;
14687 goto fw_reset_abort;
14688 }
14689 bnxt_queue_fw_reset_work(bp, HZ / 1000);
14690 return;
14691 }
14692 }
14693 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14694 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
14695 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
14696 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
14697 bnxt_dl_remote_reload(bp);
14698 if (pci_enable_device(bp->pdev)) {
14699 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
14700 rc = -ENODEV;
14701 goto fw_reset_abort;
14702 }
14703 pci_set_master(bp->pdev);
14704 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
14705 fallthrough;
14706 case BNXT_FW_RESET_STATE_POLL_FW:
14707 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
14708 rc = bnxt_hwrm_poll(bp);
14709 if (rc) {
14710 if (bnxt_fw_reset_timeout(bp)) {
14711 netdev_err(bp->dev, "Firmware reset aborted\n");
14712 goto fw_reset_abort_status;
14713 }
14714 bnxt_queue_fw_reset_work(bp, HZ / 5);
14715 return;
14716 }
14717 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
14718 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
14719 fallthrough;
14720 case BNXT_FW_RESET_STATE_OPENING:
14721 while (!rtnl_trylock()) {
14722 bnxt_queue_fw_reset_work(bp, HZ / 10);
14723 return;
14724 }
14725 rc = bnxt_open(bp->dev);
14726 if (rc) {
14727 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
14728 bnxt_fw_reset_abort(bp, rc);
14729 rtnl_unlock();
14730 goto ulp_start;
14731 }
14732
14733 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
14734 bp->fw_health->enabled) {
14735 bp->fw_health->last_fw_reset_cnt =
14736 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14737 }
14738 bp->fw_reset_state = 0;
14739 /* Make sure fw_reset_state is 0 before clearing the flag */
14740 smp_mb__before_atomic();
14741 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14742 bnxt_ptp_reapply_pps(bp);
14743 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
14744 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
14745 bnxt_dl_health_fw_recovery_done(bp);
14746 bnxt_dl_health_fw_status_update(bp, true);
14747 }
14748 rtnl_unlock();
14749 bnxt_ulp_start(bp, 0);
14750 bnxt_reenable_sriov(bp);
14751 rtnl_lock();
14752 bnxt_vf_reps_alloc(bp);
14753 bnxt_vf_reps_open(bp);
14754 rtnl_unlock();
14755 break;
14756 }
14757 return;
14758
14759 fw_reset_abort_status:
14760 if (bp->fw_health->status_reliable ||
14761 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
14762 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
14763
14764 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
14765 }
14766 fw_reset_abort:
14767 rtnl_lock();
14768 bnxt_fw_reset_abort(bp, rc);
14769 rtnl_unlock();
14770 ulp_start:
14771 bnxt_ulp_start(bp, rc);
14772 }
14773
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)14774 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
14775 {
14776 int rc;
14777 struct bnxt *bp = netdev_priv(dev);
14778
14779 SET_NETDEV_DEV(dev, &pdev->dev);
14780
14781 /* enable device (incl. PCI PM wakeup), and bus-mastering */
14782 rc = pci_enable_device(pdev);
14783 if (rc) {
14784 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14785 goto init_err;
14786 }
14787
14788 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
14789 dev_err(&pdev->dev,
14790 "Cannot find PCI device base address, aborting\n");
14791 rc = -ENODEV;
14792 goto init_err_disable;
14793 }
14794
14795 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
14796 if (rc) {
14797 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14798 goto init_err_disable;
14799 }
14800
14801 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
14802 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
14803 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
14804 rc = -EIO;
14805 goto init_err_release;
14806 }
14807
14808 pci_set_master(pdev);
14809
14810 bp->dev = dev;
14811 bp->pdev = pdev;
14812
14813 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
14814 * determines the BAR size.
14815 */
14816 bp->bar0 = pci_ioremap_bar(pdev, 0);
14817 if (!bp->bar0) {
14818 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14819 rc = -ENOMEM;
14820 goto init_err_release;
14821 }
14822
14823 bp->bar2 = pci_ioremap_bar(pdev, 4);
14824 if (!bp->bar2) {
14825 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
14826 rc = -ENOMEM;
14827 goto init_err_release;
14828 }
14829
14830 INIT_WORK(&bp->sp_task, bnxt_sp_task);
14831 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
14832
14833 spin_lock_init(&bp->ntp_fltr_lock);
14834 #if BITS_PER_LONG == 32
14835 spin_lock_init(&bp->db_lock);
14836 #endif
14837
14838 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
14839 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
14840
14841 timer_setup(&bp->timer, bnxt_timer, 0);
14842 bp->current_interval = BNXT_TIMER_INTERVAL;
14843
14844 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
14845 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14846
14847 clear_bit(BNXT_STATE_OPEN, &bp->state);
14848 return 0;
14849
14850 init_err_release:
14851 bnxt_unmap_bars(bp, pdev);
14852 pci_release_regions(pdev);
14853
14854 init_err_disable:
14855 pci_disable_device(pdev);
14856
14857 init_err:
14858 return rc;
14859 }
14860
14861 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)14862 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14863 {
14864 struct sockaddr *addr = p;
14865 struct bnxt *bp = netdev_priv(dev);
14866 int rc = 0;
14867
14868 if (!is_valid_ether_addr(addr->sa_data))
14869 return -EADDRNOTAVAIL;
14870
14871 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14872 return 0;
14873
14874 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14875 if (rc)
14876 return rc;
14877
14878 eth_hw_addr_set(dev, addr->sa_data);
14879 bnxt_clear_usr_fltrs(bp, true);
14880 if (netif_running(dev)) {
14881 bnxt_close_nic(bp, false, false);
14882 rc = bnxt_open_nic(bp, false, false);
14883 }
14884
14885 return rc;
14886 }
14887
14888 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)14889 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14890 {
14891 struct bnxt *bp = netdev_priv(dev);
14892
14893 if (netif_running(dev))
14894 bnxt_close_nic(bp, true, false);
14895
14896 WRITE_ONCE(dev->mtu, new_mtu);
14897
14898 /* MTU change may change the AGG ring settings if an XDP multi-buffer
14899 * program is attached. We need to set the AGG rings settings and
14900 * rx_skb_func accordingly.
14901 */
14902 if (READ_ONCE(bp->xdp_prog))
14903 bnxt_set_rx_skb_mode(bp, true);
14904
14905 bnxt_set_ring_params(bp);
14906
14907 if (netif_running(dev))
14908 return bnxt_open_nic(bp, true, false);
14909
14910 return 0;
14911 }
14912
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)14913 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14914 {
14915 struct bnxt *bp = netdev_priv(dev);
14916 bool sh = false;
14917 int rc, tx_cp;
14918
14919 if (tc > bp->max_tc) {
14920 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14921 tc, bp->max_tc);
14922 return -EINVAL;
14923 }
14924
14925 if (bp->num_tc == tc)
14926 return 0;
14927
14928 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14929 sh = true;
14930
14931 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14932 sh, tc, bp->tx_nr_rings_xdp);
14933 if (rc)
14934 return rc;
14935
14936 /* Needs to close the device and do hw resource re-allocations */
14937 if (netif_running(bp->dev))
14938 bnxt_close_nic(bp, true, false);
14939
14940 if (tc) {
14941 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14942 netdev_set_num_tc(dev, tc);
14943 bp->num_tc = tc;
14944 } else {
14945 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14946 netdev_reset_tc(dev);
14947 bp->num_tc = 0;
14948 }
14949 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14950 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14951 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14952 tx_cp + bp->rx_nr_rings;
14953
14954 if (netif_running(bp->dev))
14955 return bnxt_open_nic(bp, true, false);
14956
14957 return 0;
14958 }
14959
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)14960 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14961 void *cb_priv)
14962 {
14963 struct bnxt *bp = cb_priv;
14964
14965 if (!bnxt_tc_flower_enabled(bp) ||
14966 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14967 return -EOPNOTSUPP;
14968
14969 switch (type) {
14970 case TC_SETUP_CLSFLOWER:
14971 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14972 default:
14973 return -EOPNOTSUPP;
14974 }
14975 }
14976
14977 LIST_HEAD(bnxt_block_cb_list);
14978
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)14979 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14980 void *type_data)
14981 {
14982 struct bnxt *bp = netdev_priv(dev);
14983
14984 switch (type) {
14985 case TC_SETUP_BLOCK:
14986 return flow_block_cb_setup_simple(type_data,
14987 &bnxt_block_cb_list,
14988 bnxt_setup_tc_block_cb,
14989 bp, bp, true);
14990 case TC_SETUP_QDISC_MQPRIO: {
14991 struct tc_mqprio_qopt *mqprio = type_data;
14992
14993 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
14994
14995 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
14996 }
14997 default:
14998 return -EOPNOTSUPP;
14999 }
15000 }
15001
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15002 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15003 const struct sk_buff *skb)
15004 {
15005 struct bnxt_vnic_info *vnic;
15006
15007 if (skb)
15008 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15009
15010 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15011 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15012 }
15013
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15014 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15015 u32 idx)
15016 {
15017 struct hlist_head *head;
15018 int bit_id;
15019
15020 spin_lock_bh(&bp->ntp_fltr_lock);
15021 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15022 if (bit_id < 0) {
15023 spin_unlock_bh(&bp->ntp_fltr_lock);
15024 return -ENOMEM;
15025 }
15026
15027 fltr->base.sw_id = (u16)bit_id;
15028 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15029 fltr->base.flags |= BNXT_ACT_RING_DST;
15030 head = &bp->ntp_fltr_hash_tbl[idx];
15031 hlist_add_head_rcu(&fltr->base.hash, head);
15032 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15033 bnxt_insert_usr_fltr(bp, &fltr->base);
15034 bp->ntp_fltr_count++;
15035 spin_unlock_bh(&bp->ntp_fltr_lock);
15036 return 0;
15037 }
15038
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15039 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15040 struct bnxt_ntuple_filter *f2)
15041 {
15042 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15043 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15044 struct flow_keys *keys1 = &f1->fkeys;
15045 struct flow_keys *keys2 = &f2->fkeys;
15046
15047 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15048 keys1->basic.ip_proto != keys2->basic.ip_proto)
15049 return false;
15050
15051 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15052 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15053 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15054 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15055 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15056 return false;
15057 } else {
15058 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15059 &keys2->addrs.v6addrs.src) ||
15060 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15061 &masks2->addrs.v6addrs.src) ||
15062 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15063 &keys2->addrs.v6addrs.dst) ||
15064 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15065 &masks2->addrs.v6addrs.dst))
15066 return false;
15067 }
15068
15069 return keys1->ports.src == keys2->ports.src &&
15070 masks1->ports.src == masks2->ports.src &&
15071 keys1->ports.dst == keys2->ports.dst &&
15072 masks1->ports.dst == masks2->ports.dst &&
15073 keys1->control.flags == keys2->control.flags &&
15074 f1->l2_fltr == f2->l2_fltr;
15075 }
15076
15077 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15078 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15079 struct bnxt_ntuple_filter *fltr, u32 idx)
15080 {
15081 struct bnxt_ntuple_filter *f;
15082 struct hlist_head *head;
15083
15084 head = &bp->ntp_fltr_hash_tbl[idx];
15085 hlist_for_each_entry_rcu(f, head, base.hash) {
15086 if (bnxt_fltr_match(f, fltr))
15087 return f;
15088 }
15089 return NULL;
15090 }
15091
15092 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15093 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15094 u16 rxq_index, u32 flow_id)
15095 {
15096 struct bnxt *bp = netdev_priv(dev);
15097 struct bnxt_ntuple_filter *fltr, *new_fltr;
15098 struct flow_keys *fkeys;
15099 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15100 struct bnxt_l2_filter *l2_fltr;
15101 int rc = 0, idx;
15102 u32 flags;
15103
15104 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15105 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15106 atomic_inc(&l2_fltr->refcnt);
15107 } else {
15108 struct bnxt_l2_key key;
15109
15110 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15111 key.vlan = 0;
15112 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15113 if (!l2_fltr)
15114 return -EINVAL;
15115 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15116 bnxt_del_l2_filter(bp, l2_fltr);
15117 return -EINVAL;
15118 }
15119 }
15120 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15121 if (!new_fltr) {
15122 bnxt_del_l2_filter(bp, l2_fltr);
15123 return -ENOMEM;
15124 }
15125
15126 fkeys = &new_fltr->fkeys;
15127 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15128 rc = -EPROTONOSUPPORT;
15129 goto err_free;
15130 }
15131
15132 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15133 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15134 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15135 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15136 rc = -EPROTONOSUPPORT;
15137 goto err_free;
15138 }
15139 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15140 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15141 if (bp->hwrm_spec_code < 0x10601) {
15142 rc = -EPROTONOSUPPORT;
15143 goto err_free;
15144 }
15145 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15146 }
15147 flags = fkeys->control.flags;
15148 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15149 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15150 rc = -EPROTONOSUPPORT;
15151 goto err_free;
15152 }
15153 new_fltr->l2_fltr = l2_fltr;
15154
15155 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15156 rcu_read_lock();
15157 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15158 if (fltr) {
15159 rc = fltr->base.sw_id;
15160 rcu_read_unlock();
15161 goto err_free;
15162 }
15163 rcu_read_unlock();
15164
15165 new_fltr->flow_id = flow_id;
15166 new_fltr->base.rxq = rxq_index;
15167 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15168 if (!rc) {
15169 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15170 return new_fltr->base.sw_id;
15171 }
15172
15173 err_free:
15174 bnxt_del_l2_filter(bp, l2_fltr);
15175 kfree(new_fltr);
15176 return rc;
15177 }
15178 #endif
15179
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15180 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15181 {
15182 spin_lock_bh(&bp->ntp_fltr_lock);
15183 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15184 spin_unlock_bh(&bp->ntp_fltr_lock);
15185 return;
15186 }
15187 hlist_del_rcu(&fltr->base.hash);
15188 bnxt_del_one_usr_fltr(bp, &fltr->base);
15189 bp->ntp_fltr_count--;
15190 spin_unlock_bh(&bp->ntp_fltr_lock);
15191 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15192 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15193 kfree_rcu(fltr, base.rcu);
15194 }
15195
bnxt_cfg_ntp_filters(struct bnxt * bp)15196 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15197 {
15198 #ifdef CONFIG_RFS_ACCEL
15199 int i;
15200
15201 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15202 struct hlist_head *head;
15203 struct hlist_node *tmp;
15204 struct bnxt_ntuple_filter *fltr;
15205 int rc;
15206
15207 head = &bp->ntp_fltr_hash_tbl[i];
15208 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15209 bool del = false;
15210
15211 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15212 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15213 continue;
15214 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15215 fltr->flow_id,
15216 fltr->base.sw_id)) {
15217 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15218 fltr);
15219 del = true;
15220 }
15221 } else {
15222 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15223 fltr);
15224 if (rc)
15225 del = true;
15226 else
15227 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15228 }
15229
15230 if (del)
15231 bnxt_del_ntp_filter(bp, fltr);
15232 }
15233 }
15234 #endif
15235 }
15236
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15237 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15238 unsigned int entry, struct udp_tunnel_info *ti)
15239 {
15240 struct bnxt *bp = netdev_priv(netdev);
15241 unsigned int cmd;
15242
15243 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15244 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15245 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15246 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15247 else
15248 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15249
15250 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15251 }
15252
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15253 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15254 unsigned int entry, struct udp_tunnel_info *ti)
15255 {
15256 struct bnxt *bp = netdev_priv(netdev);
15257 unsigned int cmd;
15258
15259 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15260 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15261 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15262 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15263 else
15264 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15265
15266 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15267 }
15268
15269 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15270 .set_port = bnxt_udp_tunnel_set_port,
15271 .unset_port = bnxt_udp_tunnel_unset_port,
15272 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
15273 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15274 .tables = {
15275 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15276 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15277 },
15278 }, bnxt_udp_tunnels_p7 = {
15279 .set_port = bnxt_udp_tunnel_set_port,
15280 .unset_port = bnxt_udp_tunnel_unset_port,
15281 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
15282 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15283 .tables = {
15284 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15285 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15286 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15287 },
15288 };
15289
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15290 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15291 struct net_device *dev, u32 filter_mask,
15292 int nlflags)
15293 {
15294 struct bnxt *bp = netdev_priv(dev);
15295
15296 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15297 nlflags, filter_mask, NULL);
15298 }
15299
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15300 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15301 u16 flags, struct netlink_ext_ack *extack)
15302 {
15303 struct bnxt *bp = netdev_priv(dev);
15304 struct nlattr *attr, *br_spec;
15305 int rem, rc = 0;
15306
15307 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15308 return -EOPNOTSUPP;
15309
15310 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15311 if (!br_spec)
15312 return -EINVAL;
15313
15314 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15315 u16 mode;
15316
15317 mode = nla_get_u16(attr);
15318 if (mode == bp->br_mode)
15319 break;
15320
15321 rc = bnxt_hwrm_set_br_mode(bp, mode);
15322 if (!rc)
15323 bp->br_mode = mode;
15324 break;
15325 }
15326 return rc;
15327 }
15328
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15329 int bnxt_get_port_parent_id(struct net_device *dev,
15330 struct netdev_phys_item_id *ppid)
15331 {
15332 struct bnxt *bp = netdev_priv(dev);
15333
15334 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15335 return -EOPNOTSUPP;
15336
15337 /* The PF and it's VF-reps only support the switchdev framework */
15338 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15339 return -EOPNOTSUPP;
15340
15341 ppid->id_len = sizeof(bp->dsn);
15342 memcpy(ppid->id, bp->dsn, ppid->id_len);
15343
15344 return 0;
15345 }
15346
15347 static const struct net_device_ops bnxt_netdev_ops = {
15348 .ndo_open = bnxt_open,
15349 .ndo_start_xmit = bnxt_start_xmit,
15350 .ndo_stop = bnxt_close,
15351 .ndo_get_stats64 = bnxt_get_stats64,
15352 .ndo_set_rx_mode = bnxt_set_rx_mode,
15353 .ndo_eth_ioctl = bnxt_ioctl,
15354 .ndo_validate_addr = eth_validate_addr,
15355 .ndo_set_mac_address = bnxt_change_mac_addr,
15356 .ndo_change_mtu = bnxt_change_mtu,
15357 .ndo_fix_features = bnxt_fix_features,
15358 .ndo_set_features = bnxt_set_features,
15359 .ndo_features_check = bnxt_features_check,
15360 .ndo_tx_timeout = bnxt_tx_timeout,
15361 #ifdef CONFIG_BNXT_SRIOV
15362 .ndo_get_vf_config = bnxt_get_vf_config,
15363 .ndo_set_vf_mac = bnxt_set_vf_mac,
15364 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15365 .ndo_set_vf_rate = bnxt_set_vf_bw,
15366 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15367 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15368 .ndo_set_vf_trust = bnxt_set_vf_trust,
15369 #endif
15370 .ndo_setup_tc = bnxt_setup_tc,
15371 #ifdef CONFIG_RFS_ACCEL
15372 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15373 #endif
15374 .ndo_bpf = bnxt_xdp,
15375 .ndo_xdp_xmit = bnxt_xdp_xmit,
15376 .ndo_bridge_getlink = bnxt_bridge_getlink,
15377 .ndo_bridge_setlink = bnxt_bridge_setlink,
15378 };
15379
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15380 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15381 struct netdev_queue_stats_rx *stats)
15382 {
15383 struct bnxt *bp = netdev_priv(dev);
15384 struct bnxt_cp_ring_info *cpr;
15385 u64 *sw;
15386
15387 if (!bp->bnapi)
15388 return;
15389
15390 cpr = &bp->bnapi[i]->cp_ring;
15391 sw = cpr->stats.sw_stats;
15392
15393 stats->packets = 0;
15394 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15395 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15396 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15397
15398 stats->bytes = 0;
15399 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15400 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15401 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15402
15403 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15404 }
15405
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15406 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15407 struct netdev_queue_stats_tx *stats)
15408 {
15409 struct bnxt *bp = netdev_priv(dev);
15410 struct bnxt_napi *bnapi;
15411 u64 *sw;
15412
15413 if (!bp->tx_ring)
15414 return;
15415
15416 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15417 sw = bnapi->cp_ring.stats.sw_stats;
15418
15419 stats->packets = 0;
15420 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15421 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15422 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15423
15424 stats->bytes = 0;
15425 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15426 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15427 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15428 }
15429
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15430 static void bnxt_get_base_stats(struct net_device *dev,
15431 struct netdev_queue_stats_rx *rx,
15432 struct netdev_queue_stats_tx *tx)
15433 {
15434 struct bnxt *bp = netdev_priv(dev);
15435
15436 rx->packets = bp->net_stats_prev.rx_packets;
15437 rx->bytes = bp->net_stats_prev.rx_bytes;
15438 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15439
15440 tx->packets = bp->net_stats_prev.tx_packets;
15441 tx->bytes = bp->net_stats_prev.tx_bytes;
15442 }
15443
15444 static const struct netdev_stat_ops bnxt_stat_ops = {
15445 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15446 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15447 .get_base_stats = bnxt_get_base_stats,
15448 };
15449
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15450 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15451 {
15452 struct bnxt_rx_ring_info *rxr, *clone;
15453 struct bnxt *bp = netdev_priv(dev);
15454 struct bnxt_ring_struct *ring;
15455 int rc;
15456
15457 if (!bp->rx_ring)
15458 return -ENETDOWN;
15459
15460 rxr = &bp->rx_ring[idx];
15461 clone = qmem;
15462 memcpy(clone, rxr, sizeof(*rxr));
15463 bnxt_init_rx_ring_struct(bp, clone);
15464 bnxt_reset_rx_ring_struct(bp, clone);
15465
15466 clone->rx_prod = 0;
15467 clone->rx_agg_prod = 0;
15468 clone->rx_sw_agg_prod = 0;
15469 clone->rx_next_cons = 0;
15470
15471 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15472 if (rc)
15473 return rc;
15474
15475 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15476 if (rc < 0)
15477 goto err_page_pool_destroy;
15478
15479 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15480 MEM_TYPE_PAGE_POOL,
15481 clone->page_pool);
15482 if (rc)
15483 goto err_rxq_info_unreg;
15484
15485 ring = &clone->rx_ring_struct;
15486 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15487 if (rc)
15488 goto err_free_rx_ring;
15489
15490 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15491 ring = &clone->rx_agg_ring_struct;
15492 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15493 if (rc)
15494 goto err_free_rx_agg_ring;
15495
15496 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15497 if (rc)
15498 goto err_free_rx_agg_ring;
15499 }
15500
15501 if (bp->flags & BNXT_FLAG_TPA) {
15502 rc = bnxt_alloc_one_tpa_info(bp, clone);
15503 if (rc)
15504 goto err_free_tpa_info;
15505 }
15506
15507 bnxt_init_one_rx_ring_rxbd(bp, clone);
15508 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15509
15510 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15511 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15512 bnxt_alloc_one_rx_ring_page(bp, clone, idx);
15513 if (bp->flags & BNXT_FLAG_TPA)
15514 bnxt_alloc_one_tpa_info_data(bp, clone);
15515
15516 return 0;
15517
15518 err_free_tpa_info:
15519 bnxt_free_one_tpa_info(bp, clone);
15520 err_free_rx_agg_ring:
15521 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15522 err_free_rx_ring:
15523 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15524 err_rxq_info_unreg:
15525 xdp_rxq_info_unreg(&clone->xdp_rxq);
15526 err_page_pool_destroy:
15527 page_pool_destroy(clone->page_pool);
15528 if (bnxt_separate_head_pool())
15529 page_pool_destroy(clone->head_pool);
15530 clone->page_pool = NULL;
15531 clone->head_pool = NULL;
15532 return rc;
15533 }
15534
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15535 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15536 {
15537 struct bnxt_rx_ring_info *rxr = qmem;
15538 struct bnxt *bp = netdev_priv(dev);
15539 struct bnxt_ring_struct *ring;
15540
15541 bnxt_free_one_rx_ring_skbs(bp, rxr);
15542 bnxt_free_one_tpa_info(bp, rxr);
15543
15544 xdp_rxq_info_unreg(&rxr->xdp_rxq);
15545
15546 page_pool_destroy(rxr->page_pool);
15547 if (bnxt_separate_head_pool())
15548 page_pool_destroy(rxr->head_pool);
15549 rxr->page_pool = NULL;
15550 rxr->head_pool = NULL;
15551
15552 ring = &rxr->rx_ring_struct;
15553 bnxt_free_ring(bp, &ring->ring_mem);
15554
15555 ring = &rxr->rx_agg_ring_struct;
15556 bnxt_free_ring(bp, &ring->ring_mem);
15557
15558 kfree(rxr->rx_agg_bmap);
15559 rxr->rx_agg_bmap = NULL;
15560 }
15561
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)15562 static void bnxt_copy_rx_ring(struct bnxt *bp,
15563 struct bnxt_rx_ring_info *dst,
15564 struct bnxt_rx_ring_info *src)
15565 {
15566 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15567 struct bnxt_ring_struct *dst_ring, *src_ring;
15568 int i;
15569
15570 dst_ring = &dst->rx_ring_struct;
15571 dst_rmem = &dst_ring->ring_mem;
15572 src_ring = &src->rx_ring_struct;
15573 src_rmem = &src_ring->ring_mem;
15574
15575 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15576 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15577 WARN_ON(dst_rmem->flags != src_rmem->flags);
15578 WARN_ON(dst_rmem->depth != src_rmem->depth);
15579 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15580 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15581
15582 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15583 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15584 *dst_rmem->vmem = *src_rmem->vmem;
15585 for (i = 0; i < dst_rmem->nr_pages; i++) {
15586 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15587 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15588 }
15589
15590 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15591 return;
15592
15593 dst_ring = &dst->rx_agg_ring_struct;
15594 dst_rmem = &dst_ring->ring_mem;
15595 src_ring = &src->rx_agg_ring_struct;
15596 src_rmem = &src_ring->ring_mem;
15597
15598 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15599 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15600 WARN_ON(dst_rmem->flags != src_rmem->flags);
15601 WARN_ON(dst_rmem->depth != src_rmem->depth);
15602 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15603 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15604 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
15605
15606 dst_rmem->pg_tbl = src_rmem->pg_tbl;
15607 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15608 *dst_rmem->vmem = *src_rmem->vmem;
15609 for (i = 0; i < dst_rmem->nr_pages; i++) {
15610 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15611 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15612 }
15613
15614 dst->rx_agg_bmap = src->rx_agg_bmap;
15615 }
15616
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)15617 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15618 {
15619 struct bnxt *bp = netdev_priv(dev);
15620 struct bnxt_rx_ring_info *rxr, *clone;
15621 struct bnxt_cp_ring_info *cpr;
15622 struct bnxt_vnic_info *vnic;
15623 int i, rc;
15624
15625 rxr = &bp->rx_ring[idx];
15626 clone = qmem;
15627
15628 rxr->rx_prod = clone->rx_prod;
15629 rxr->rx_agg_prod = clone->rx_agg_prod;
15630 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
15631 rxr->rx_next_cons = clone->rx_next_cons;
15632 rxr->rx_tpa = clone->rx_tpa;
15633 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
15634 rxr->page_pool = clone->page_pool;
15635 rxr->head_pool = clone->head_pool;
15636 rxr->xdp_rxq = clone->xdp_rxq;
15637
15638 bnxt_copy_rx_ring(bp, rxr, clone);
15639
15640 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
15641 if (rc)
15642 return rc;
15643 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
15644 if (rc)
15645 goto err_free_hwrm_rx_ring;
15646
15647 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
15648 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15649 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
15650
15651 cpr = &rxr->bnapi->cp_ring;
15652 cpr->sw_stats->rx.rx_resets++;
15653
15654 for (i = 0; i <= bp->nr_vnics; i++) {
15655 vnic = &bp->vnic_info[i];
15656
15657 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
15658 if (rc) {
15659 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
15660 vnic->vnic_id, rc);
15661 return rc;
15662 }
15663 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
15664 bnxt_hwrm_vnic_update(bp, vnic,
15665 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15666 }
15667
15668 return 0;
15669
15670 err_free_hwrm_rx_ring:
15671 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15672 return rc;
15673 }
15674
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)15675 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
15676 {
15677 struct bnxt *bp = netdev_priv(dev);
15678 struct bnxt_rx_ring_info *rxr;
15679 struct bnxt_vnic_info *vnic;
15680 int i;
15681
15682 for (i = 0; i <= bp->nr_vnics; i++) {
15683 vnic = &bp->vnic_info[i];
15684 vnic->mru = 0;
15685 bnxt_hwrm_vnic_update(bp, vnic,
15686 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
15687 }
15688 /* Make sure NAPI sees that the VNIC is disabled */
15689 synchronize_net();
15690 rxr = &bp->rx_ring[idx];
15691 cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
15692 bnxt_hwrm_rx_ring_free(bp, rxr, false);
15693 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
15694 rxr->rx_next_cons = 0;
15695 page_pool_disable_direct_recycling(rxr->page_pool);
15696 if (bnxt_separate_head_pool())
15697 page_pool_disable_direct_recycling(rxr->head_pool);
15698
15699 memcpy(qmem, rxr, sizeof(*rxr));
15700 bnxt_init_rx_ring_struct(bp, qmem);
15701
15702 return 0;
15703 }
15704
15705 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
15706 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
15707 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
15708 .ndo_queue_mem_free = bnxt_queue_mem_free,
15709 .ndo_queue_start = bnxt_queue_start,
15710 .ndo_queue_stop = bnxt_queue_stop,
15711 };
15712
bnxt_remove_one(struct pci_dev * pdev)15713 static void bnxt_remove_one(struct pci_dev *pdev)
15714 {
15715 struct net_device *dev = pci_get_drvdata(pdev);
15716 struct bnxt *bp = netdev_priv(dev);
15717
15718 if (BNXT_PF(bp))
15719 bnxt_sriov_disable(bp);
15720
15721 bnxt_rdma_aux_device_del(bp);
15722
15723 bnxt_ptp_clear(bp);
15724 unregister_netdev(dev);
15725
15726 bnxt_rdma_aux_device_uninit(bp);
15727
15728 bnxt_free_l2_filters(bp, true);
15729 bnxt_free_ntp_fltrs(bp, true);
15730 WARN_ON(bp->num_rss_ctx);
15731 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15732 /* Flush any pending tasks */
15733 cancel_work_sync(&bp->sp_task);
15734 cancel_delayed_work_sync(&bp->fw_reset_task);
15735 bp->sp_event = 0;
15736
15737 bnxt_dl_fw_reporters_destroy(bp);
15738 bnxt_dl_unregister(bp);
15739 bnxt_shutdown_tc(bp);
15740
15741 bnxt_clear_int_mode(bp);
15742 bnxt_hwrm_func_drv_unrgtr(bp);
15743 bnxt_free_hwrm_resources(bp);
15744 bnxt_hwmon_uninit(bp);
15745 bnxt_ethtool_free(bp);
15746 bnxt_dcb_free(bp);
15747 kfree(bp->ptp_cfg);
15748 bp->ptp_cfg = NULL;
15749 kfree(bp->fw_health);
15750 bp->fw_health = NULL;
15751 bnxt_cleanup_pci(bp);
15752 bnxt_free_ctx_mem(bp, true);
15753 bnxt_free_crash_dump_mem(bp);
15754 kfree(bp->rss_indir_tbl);
15755 bp->rss_indir_tbl = NULL;
15756 bnxt_free_port_stats(bp);
15757 free_netdev(dev);
15758 }
15759
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)15760 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
15761 {
15762 int rc = 0;
15763 struct bnxt_link_info *link_info = &bp->link_info;
15764
15765 bp->phy_flags = 0;
15766 rc = bnxt_hwrm_phy_qcaps(bp);
15767 if (rc) {
15768 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
15769 rc);
15770 return rc;
15771 }
15772 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
15773 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
15774 else
15775 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
15776
15777 bp->mac_flags = 0;
15778 bnxt_hwrm_mac_qcaps(bp);
15779
15780 if (!fw_dflt)
15781 return 0;
15782
15783 mutex_lock(&bp->link_lock);
15784 rc = bnxt_update_link(bp, false);
15785 if (rc) {
15786 mutex_unlock(&bp->link_lock);
15787 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
15788 rc);
15789 return rc;
15790 }
15791
15792 /* Older firmware does not have supported_auto_speeds, so assume
15793 * that all supported speeds can be autonegotiated.
15794 */
15795 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
15796 link_info->support_auto_speeds = link_info->support_speeds;
15797
15798 bnxt_init_ethtool_link_settings(bp);
15799 mutex_unlock(&bp->link_lock);
15800 return 0;
15801 }
15802
bnxt_get_max_irq(struct pci_dev * pdev)15803 static int bnxt_get_max_irq(struct pci_dev *pdev)
15804 {
15805 u16 ctrl;
15806
15807 if (!pdev->msix_cap)
15808 return 1;
15809
15810 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
15811 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
15812 }
15813
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)15814 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15815 int *max_cp)
15816 {
15817 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
15818 int max_ring_grps = 0, max_irq;
15819
15820 *max_tx = hw_resc->max_tx_rings;
15821 *max_rx = hw_resc->max_rx_rings;
15822 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
15823 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
15824 bnxt_get_ulp_msix_num_in_use(bp),
15825 hw_resc->max_stat_ctxs -
15826 bnxt_get_ulp_stat_ctxs_in_use(bp));
15827 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
15828 *max_cp = min_t(int, *max_cp, max_irq);
15829 max_ring_grps = hw_resc->max_hw_ring_grps;
15830 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
15831 *max_cp -= 1;
15832 *max_rx -= 2;
15833 }
15834 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15835 *max_rx >>= 1;
15836 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
15837 int rc;
15838
15839 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
15840 if (rc) {
15841 *max_rx = 0;
15842 *max_tx = 0;
15843 }
15844 /* On P5 chips, max_cp output param should be available NQs */
15845 *max_cp = max_irq;
15846 }
15847 *max_rx = min_t(int, *max_rx, max_ring_grps);
15848 }
15849
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15850 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
15851 {
15852 int rx, tx, cp;
15853
15854 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
15855 *max_rx = rx;
15856 *max_tx = tx;
15857 if (!rx || !tx || !cp)
15858 return -ENOMEM;
15859
15860 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
15861 }
15862
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)15863 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
15864 bool shared)
15865 {
15866 int rc;
15867
15868 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15869 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
15870 /* Not enough rings, try disabling agg rings. */
15871 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
15872 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
15873 if (rc) {
15874 /* set BNXT_FLAG_AGG_RINGS back for consistency */
15875 bp->flags |= BNXT_FLAG_AGG_RINGS;
15876 return rc;
15877 }
15878 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
15879 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15880 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
15881 bnxt_set_ring_params(bp);
15882 }
15883
15884 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
15885 int max_cp, max_stat, max_irq;
15886
15887 /* Reserve minimum resources for RoCE */
15888 max_cp = bnxt_get_max_func_cp_rings(bp);
15889 max_stat = bnxt_get_max_func_stat_ctxs(bp);
15890 max_irq = bnxt_get_max_func_irqs(bp);
15891 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
15892 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
15893 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
15894 return 0;
15895
15896 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
15897 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
15898 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
15899 max_cp = min_t(int, max_cp, max_irq);
15900 max_cp = min_t(int, max_cp, max_stat);
15901 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
15902 if (rc)
15903 rc = 0;
15904 }
15905 return rc;
15906 }
15907
15908 /* In initial default shared ring setting, each shared ring must have a
15909 * RX/TX ring pair.
15910 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)15911 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
15912 {
15913 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
15914 bp->rx_nr_rings = bp->cp_nr_rings;
15915 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
15916 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15917 }
15918
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)15919 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
15920 {
15921 int dflt_rings, max_rx_rings, max_tx_rings, rc;
15922 int avail_msix;
15923
15924 if (!bnxt_can_reserve_rings(bp))
15925 return 0;
15926
15927 if (sh)
15928 bp->flags |= BNXT_FLAG_SHARED_RINGS;
15929 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
15930 /* Reduce default rings on multi-port cards so that total default
15931 * rings do not exceed CPU count.
15932 */
15933 if (bp->port_count > 1) {
15934 int max_rings =
15935 max_t(int, num_online_cpus() / bp->port_count, 1);
15936
15937 dflt_rings = min_t(int, dflt_rings, max_rings);
15938 }
15939 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
15940 if (rc)
15941 return rc;
15942 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
15943 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
15944 if (sh)
15945 bnxt_trim_dflt_sh_rings(bp);
15946 else
15947 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
15948 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15949
15950 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
15951 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
15952 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
15953
15954 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
15955 bnxt_set_dflt_ulp_stat_ctxs(bp);
15956 }
15957
15958 rc = __bnxt_reserve_rings(bp);
15959 if (rc && rc != -ENODEV)
15960 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
15961 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15962 if (sh)
15963 bnxt_trim_dflt_sh_rings(bp);
15964
15965 /* Rings may have been trimmed, re-reserve the trimmed rings. */
15966 if (bnxt_need_reserve_rings(bp)) {
15967 rc = __bnxt_reserve_rings(bp);
15968 if (rc && rc != -ENODEV)
15969 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
15970 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15971 }
15972 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
15973 bp->rx_nr_rings++;
15974 bp->cp_nr_rings++;
15975 }
15976 if (rc) {
15977 bp->tx_nr_rings = 0;
15978 bp->rx_nr_rings = 0;
15979 }
15980 return rc;
15981 }
15982
bnxt_init_dflt_ring_mode(struct bnxt * bp)15983 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
15984 {
15985 int rc;
15986
15987 if (bp->tx_nr_rings)
15988 return 0;
15989
15990 bnxt_ulp_irq_stop(bp);
15991 bnxt_clear_int_mode(bp);
15992 rc = bnxt_set_dflt_rings(bp, true);
15993 if (rc) {
15994 if (BNXT_VF(bp) && rc == -ENODEV)
15995 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15996 else
15997 netdev_err(bp->dev, "Not enough rings available.\n");
15998 goto init_dflt_ring_err;
15999 }
16000 rc = bnxt_init_int_mode(bp);
16001 if (rc)
16002 goto init_dflt_ring_err;
16003
16004 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16005
16006 bnxt_set_dflt_rfs(bp);
16007
16008 init_dflt_ring_err:
16009 bnxt_ulp_irq_restart(bp, rc);
16010 return rc;
16011 }
16012
bnxt_restore_pf_fw_resources(struct bnxt * bp)16013 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16014 {
16015 int rc;
16016
16017 ASSERT_RTNL();
16018 bnxt_hwrm_func_qcaps(bp);
16019
16020 if (netif_running(bp->dev))
16021 __bnxt_close_nic(bp, true, false);
16022
16023 bnxt_ulp_irq_stop(bp);
16024 bnxt_clear_int_mode(bp);
16025 rc = bnxt_init_int_mode(bp);
16026 bnxt_ulp_irq_restart(bp, rc);
16027
16028 if (netif_running(bp->dev)) {
16029 if (rc)
16030 dev_close(bp->dev);
16031 else
16032 rc = bnxt_open_nic(bp, true, false);
16033 }
16034
16035 return rc;
16036 }
16037
bnxt_init_mac_addr(struct bnxt * bp)16038 static int bnxt_init_mac_addr(struct bnxt *bp)
16039 {
16040 int rc = 0;
16041
16042 if (BNXT_PF(bp)) {
16043 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16044 } else {
16045 #ifdef CONFIG_BNXT_SRIOV
16046 struct bnxt_vf_info *vf = &bp->vf;
16047 bool strict_approval = true;
16048
16049 if (is_valid_ether_addr(vf->mac_addr)) {
16050 /* overwrite netdev dev_addr with admin VF MAC */
16051 eth_hw_addr_set(bp->dev, vf->mac_addr);
16052 /* Older PF driver or firmware may not approve this
16053 * correctly.
16054 */
16055 strict_approval = false;
16056 } else {
16057 eth_hw_addr_random(bp->dev);
16058 }
16059 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16060 #endif
16061 }
16062 return rc;
16063 }
16064
bnxt_vpd_read_info(struct bnxt * bp)16065 static void bnxt_vpd_read_info(struct bnxt *bp)
16066 {
16067 struct pci_dev *pdev = bp->pdev;
16068 unsigned int vpd_size, kw_len;
16069 int pos, size;
16070 u8 *vpd_data;
16071
16072 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16073 if (IS_ERR(vpd_data)) {
16074 pci_warn(pdev, "Unable to read VPD\n");
16075 return;
16076 }
16077
16078 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16079 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16080 if (pos < 0)
16081 goto read_sn;
16082
16083 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16084 memcpy(bp->board_partno, &vpd_data[pos], size);
16085
16086 read_sn:
16087 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16088 PCI_VPD_RO_KEYWORD_SERIALNO,
16089 &kw_len);
16090 if (pos < 0)
16091 goto exit;
16092
16093 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16094 memcpy(bp->board_serialno, &vpd_data[pos], size);
16095 exit:
16096 kfree(vpd_data);
16097 }
16098
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16099 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16100 {
16101 struct pci_dev *pdev = bp->pdev;
16102 u64 qword;
16103
16104 qword = pci_get_dsn(pdev);
16105 if (!qword) {
16106 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16107 return -EOPNOTSUPP;
16108 }
16109
16110 put_unaligned_le64(qword, dsn);
16111
16112 bp->flags |= BNXT_FLAG_DSN_VALID;
16113 return 0;
16114 }
16115
bnxt_map_db_bar(struct bnxt * bp)16116 static int bnxt_map_db_bar(struct bnxt *bp)
16117 {
16118 if (!bp->db_size)
16119 return -ENODEV;
16120 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16121 if (!bp->bar1)
16122 return -ENOMEM;
16123 return 0;
16124 }
16125
bnxt_print_device_info(struct bnxt * bp)16126 void bnxt_print_device_info(struct bnxt *bp)
16127 {
16128 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16129 board_info[bp->board_idx].name,
16130 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16131
16132 pcie_print_link_status(bp->pdev);
16133 }
16134
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16135 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16136 {
16137 struct bnxt_hw_resc *hw_resc;
16138 struct net_device *dev;
16139 struct bnxt *bp;
16140 int rc, max_irqs;
16141
16142 if (pci_is_bridge(pdev))
16143 return -ENODEV;
16144
16145 if (!pdev->msix_cap) {
16146 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16147 return -ENODEV;
16148 }
16149
16150 /* Clear any pending DMA transactions from crash kernel
16151 * while loading driver in capture kernel.
16152 */
16153 if (is_kdump_kernel()) {
16154 pci_clear_master(pdev);
16155 pcie_flr(pdev);
16156 }
16157
16158 max_irqs = bnxt_get_max_irq(pdev);
16159 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16160 max_irqs);
16161 if (!dev)
16162 return -ENOMEM;
16163
16164 bp = netdev_priv(dev);
16165 bp->board_idx = ent->driver_data;
16166 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16167 bnxt_set_max_func_irqs(bp, max_irqs);
16168
16169 if (bnxt_vf_pciid(bp->board_idx))
16170 bp->flags |= BNXT_FLAG_VF;
16171
16172 /* No devlink port registration in case of a VF */
16173 if (BNXT_PF(bp))
16174 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16175
16176 rc = bnxt_init_board(pdev, dev);
16177 if (rc < 0)
16178 goto init_err_free;
16179
16180 dev->netdev_ops = &bnxt_netdev_ops;
16181 dev->stat_ops = &bnxt_stat_ops;
16182 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16183 dev->ethtool_ops = &bnxt_ethtool_ops;
16184 pci_set_drvdata(pdev, dev);
16185
16186 rc = bnxt_alloc_hwrm_resources(bp);
16187 if (rc)
16188 goto init_err_pci_clean;
16189
16190 mutex_init(&bp->hwrm_cmd_lock);
16191 mutex_init(&bp->link_lock);
16192
16193 rc = bnxt_fw_init_one_p1(bp);
16194 if (rc)
16195 goto init_err_pci_clean;
16196
16197 if (BNXT_PF(bp))
16198 bnxt_vpd_read_info(bp);
16199
16200 if (BNXT_CHIP_P5_PLUS(bp)) {
16201 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16202 if (BNXT_CHIP_P7(bp))
16203 bp->flags |= BNXT_FLAG_CHIP_P7;
16204 }
16205
16206 rc = bnxt_alloc_rss_indir_tbl(bp);
16207 if (rc)
16208 goto init_err_pci_clean;
16209
16210 rc = bnxt_fw_init_one_p2(bp);
16211 if (rc)
16212 goto init_err_pci_clean;
16213
16214 rc = bnxt_map_db_bar(bp);
16215 if (rc) {
16216 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16217 rc);
16218 goto init_err_pci_clean;
16219 }
16220
16221 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16222 NETIF_F_TSO | NETIF_F_TSO6 |
16223 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16224 NETIF_F_GSO_IPXIP4 |
16225 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16226 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16227 NETIF_F_RXCSUM | NETIF_F_GRO;
16228 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16229 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16230
16231 if (BNXT_SUPPORTS_TPA(bp))
16232 dev->hw_features |= NETIF_F_LRO;
16233
16234 dev->hw_enc_features =
16235 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16236 NETIF_F_TSO | NETIF_F_TSO6 |
16237 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16238 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16239 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16240 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16241 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16242 if (bp->flags & BNXT_FLAG_CHIP_P7)
16243 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16244 else
16245 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16246
16247 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16248 NETIF_F_GSO_GRE_CSUM;
16249 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16250 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16251 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16252 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16253 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16254 if (BNXT_SUPPORTS_TPA(bp))
16255 dev->hw_features |= NETIF_F_GRO_HW;
16256 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16257 if (dev->features & NETIF_F_GRO_HW)
16258 dev->features &= ~NETIF_F_LRO;
16259 dev->priv_flags |= IFF_UNICAST_FLT;
16260
16261 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16262 if (bp->tso_max_segs)
16263 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16264
16265 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16266 NETDEV_XDP_ACT_RX_SG;
16267
16268 #ifdef CONFIG_BNXT_SRIOV
16269 init_waitqueue_head(&bp->sriov_cfg_wait);
16270 #endif
16271 if (BNXT_SUPPORTS_TPA(bp)) {
16272 bp->gro_func = bnxt_gro_func_5730x;
16273 if (BNXT_CHIP_P4(bp))
16274 bp->gro_func = bnxt_gro_func_5731x;
16275 else if (BNXT_CHIP_P5_PLUS(bp))
16276 bp->gro_func = bnxt_gro_func_5750x;
16277 }
16278 if (!BNXT_CHIP_P4_PLUS(bp))
16279 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16280
16281 rc = bnxt_init_mac_addr(bp);
16282 if (rc) {
16283 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16284 rc = -EADDRNOTAVAIL;
16285 goto init_err_pci_clean;
16286 }
16287
16288 if (BNXT_PF(bp)) {
16289 /* Read the adapter's DSN to use as the eswitch switch_id */
16290 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16291 }
16292
16293 /* MTU range: 60 - FW defined max */
16294 dev->min_mtu = ETH_ZLEN;
16295 dev->max_mtu = bp->max_mtu;
16296
16297 rc = bnxt_probe_phy(bp, true);
16298 if (rc)
16299 goto init_err_pci_clean;
16300
16301 hw_resc = &bp->hw_resc;
16302 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16303 BNXT_L2_FLTR_MAX_FLTR;
16304 /* Older firmware may not report these filters properly */
16305 if (bp->max_fltr < BNXT_MAX_FLTR)
16306 bp->max_fltr = BNXT_MAX_FLTR;
16307 bnxt_init_l2_fltr_tbl(bp);
16308 __bnxt_set_rx_skb_mode(bp, false);
16309 bnxt_set_tpa_flags(bp);
16310 bnxt_init_ring_params(bp);
16311 bnxt_set_ring_params(bp);
16312 bnxt_rdma_aux_device_init(bp);
16313 rc = bnxt_set_dflt_rings(bp, true);
16314 if (rc) {
16315 if (BNXT_VF(bp) && rc == -ENODEV) {
16316 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16317 } else {
16318 netdev_err(bp->dev, "Not enough rings available.\n");
16319 rc = -ENOMEM;
16320 }
16321 goto init_err_pci_clean;
16322 }
16323
16324 bnxt_fw_init_one_p3(bp);
16325
16326 bnxt_init_dflt_coal(bp);
16327
16328 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16329 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16330
16331 rc = bnxt_init_int_mode(bp);
16332 if (rc)
16333 goto init_err_pci_clean;
16334
16335 /* No TC has been set yet and rings may have been trimmed due to
16336 * limited MSIX, so we re-initialize the TX rings per TC.
16337 */
16338 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16339
16340 if (BNXT_PF(bp)) {
16341 if (!bnxt_pf_wq) {
16342 bnxt_pf_wq =
16343 create_singlethread_workqueue("bnxt_pf_wq");
16344 if (!bnxt_pf_wq) {
16345 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16346 rc = -ENOMEM;
16347 goto init_err_pci_clean;
16348 }
16349 }
16350 rc = bnxt_init_tc(bp);
16351 if (rc)
16352 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16353 rc);
16354 }
16355
16356 bnxt_inv_fw_health_reg(bp);
16357 rc = bnxt_dl_register(bp);
16358 if (rc)
16359 goto init_err_dl;
16360
16361 INIT_LIST_HEAD(&bp->usr_fltr_list);
16362
16363 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16364 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16365 if (BNXT_SUPPORTS_QUEUE_API(bp))
16366 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16367
16368 rc = register_netdev(dev);
16369 if (rc)
16370 goto init_err_cleanup;
16371
16372 bnxt_dl_fw_reporters_create(bp);
16373
16374 bnxt_rdma_aux_device_add(bp);
16375
16376 bnxt_print_device_info(bp);
16377
16378 pci_save_state(pdev);
16379
16380 return 0;
16381 init_err_cleanup:
16382 bnxt_rdma_aux_device_uninit(bp);
16383 bnxt_dl_unregister(bp);
16384 init_err_dl:
16385 bnxt_shutdown_tc(bp);
16386 bnxt_clear_int_mode(bp);
16387
16388 init_err_pci_clean:
16389 bnxt_hwrm_func_drv_unrgtr(bp);
16390 bnxt_free_hwrm_resources(bp);
16391 bnxt_hwmon_uninit(bp);
16392 bnxt_ethtool_free(bp);
16393 bnxt_ptp_clear(bp);
16394 kfree(bp->ptp_cfg);
16395 bp->ptp_cfg = NULL;
16396 kfree(bp->fw_health);
16397 bp->fw_health = NULL;
16398 bnxt_cleanup_pci(bp);
16399 bnxt_free_ctx_mem(bp, true);
16400 bnxt_free_crash_dump_mem(bp);
16401 kfree(bp->rss_indir_tbl);
16402 bp->rss_indir_tbl = NULL;
16403
16404 init_err_free:
16405 free_netdev(dev);
16406 return rc;
16407 }
16408
bnxt_shutdown(struct pci_dev * pdev)16409 static void bnxt_shutdown(struct pci_dev *pdev)
16410 {
16411 struct net_device *dev = pci_get_drvdata(pdev);
16412 struct bnxt *bp;
16413
16414 if (!dev)
16415 return;
16416
16417 rtnl_lock();
16418 bp = netdev_priv(dev);
16419 if (!bp)
16420 goto shutdown_exit;
16421
16422 if (netif_running(dev))
16423 dev_close(dev);
16424
16425 bnxt_ptp_clear(bp);
16426 bnxt_clear_int_mode(bp);
16427 pci_disable_device(pdev);
16428
16429 if (system_state == SYSTEM_POWER_OFF) {
16430 pci_wake_from_d3(pdev, bp->wol);
16431 pci_set_power_state(pdev, PCI_D3hot);
16432 }
16433
16434 shutdown_exit:
16435 rtnl_unlock();
16436 }
16437
16438 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)16439 static int bnxt_suspend(struct device *device)
16440 {
16441 struct net_device *dev = dev_get_drvdata(device);
16442 struct bnxt *bp = netdev_priv(dev);
16443 int rc = 0;
16444
16445 bnxt_ulp_stop(bp);
16446
16447 rtnl_lock();
16448 if (netif_running(dev)) {
16449 netif_device_detach(dev);
16450 rc = bnxt_close(dev);
16451 }
16452 bnxt_hwrm_func_drv_unrgtr(bp);
16453 bnxt_ptp_clear(bp);
16454 pci_disable_device(bp->pdev);
16455 bnxt_free_ctx_mem(bp, false);
16456 rtnl_unlock();
16457 return rc;
16458 }
16459
bnxt_resume(struct device * device)16460 static int bnxt_resume(struct device *device)
16461 {
16462 struct net_device *dev = dev_get_drvdata(device);
16463 struct bnxt *bp = netdev_priv(dev);
16464 int rc = 0;
16465
16466 rtnl_lock();
16467 rc = pci_enable_device(bp->pdev);
16468 if (rc) {
16469 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16470 rc);
16471 goto resume_exit;
16472 }
16473 pci_set_master(bp->pdev);
16474 if (bnxt_hwrm_ver_get(bp)) {
16475 rc = -ENODEV;
16476 goto resume_exit;
16477 }
16478 rc = bnxt_hwrm_func_reset(bp);
16479 if (rc) {
16480 rc = -EBUSY;
16481 goto resume_exit;
16482 }
16483
16484 rc = bnxt_hwrm_func_qcaps(bp);
16485 if (rc)
16486 goto resume_exit;
16487
16488 bnxt_clear_reservations(bp, true);
16489
16490 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16491 rc = -ENODEV;
16492 goto resume_exit;
16493 }
16494 if (bp->fw_crash_mem)
16495 bnxt_hwrm_crash_dump_mem_cfg(bp);
16496
16497 if (bnxt_ptp_init(bp)) {
16498 kfree(bp->ptp_cfg);
16499 bp->ptp_cfg = NULL;
16500 }
16501 bnxt_get_wol_settings(bp);
16502 if (netif_running(dev)) {
16503 rc = bnxt_open(dev);
16504 if (!rc)
16505 netif_device_attach(dev);
16506 }
16507
16508 resume_exit:
16509 rtnl_unlock();
16510 bnxt_ulp_start(bp, rc);
16511 if (!rc)
16512 bnxt_reenable_sriov(bp);
16513 return rc;
16514 }
16515
16516 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16517 #define BNXT_PM_OPS (&bnxt_pm_ops)
16518
16519 #else
16520
16521 #define BNXT_PM_OPS NULL
16522
16523 #endif /* CONFIG_PM_SLEEP */
16524
16525 /**
16526 * bnxt_io_error_detected - called when PCI error is detected
16527 * @pdev: Pointer to PCI device
16528 * @state: The current pci connection state
16529 *
16530 * This function is called after a PCI bus error affecting
16531 * this device has been detected.
16532 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)16533 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
16534 pci_channel_state_t state)
16535 {
16536 struct net_device *netdev = pci_get_drvdata(pdev);
16537 struct bnxt *bp = netdev_priv(netdev);
16538 bool abort = false;
16539
16540 netdev_info(netdev, "PCI I/O error detected\n");
16541
16542 bnxt_ulp_stop(bp);
16543
16544 rtnl_lock();
16545 netif_device_detach(netdev);
16546
16547 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
16548 netdev_err(bp->dev, "Firmware reset already in progress\n");
16549 abort = true;
16550 }
16551
16552 if (abort || state == pci_channel_io_perm_failure) {
16553 rtnl_unlock();
16554 return PCI_ERS_RESULT_DISCONNECT;
16555 }
16556
16557 /* Link is not reliable anymore if state is pci_channel_io_frozen
16558 * so we disable bus master to prevent any potential bad DMAs before
16559 * freeing kernel memory.
16560 */
16561 if (state == pci_channel_io_frozen) {
16562 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
16563 bnxt_fw_fatal_close(bp);
16564 }
16565
16566 if (netif_running(netdev))
16567 __bnxt_close_nic(bp, true, true);
16568
16569 if (pci_is_enabled(pdev))
16570 pci_disable_device(pdev);
16571 bnxt_free_ctx_mem(bp, false);
16572 rtnl_unlock();
16573
16574 /* Request a slot slot reset. */
16575 return PCI_ERS_RESULT_NEED_RESET;
16576 }
16577
16578 /**
16579 * bnxt_io_slot_reset - called after the pci bus has been reset.
16580 * @pdev: Pointer to PCI device
16581 *
16582 * Restart the card from scratch, as if from a cold-boot.
16583 * At this point, the card has experienced a hard reset,
16584 * followed by fixups by BIOS, and has its config space
16585 * set up identically to what it was at cold boot.
16586 */
bnxt_io_slot_reset(struct pci_dev * pdev)16587 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
16588 {
16589 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
16590 struct net_device *netdev = pci_get_drvdata(pdev);
16591 struct bnxt *bp = netdev_priv(netdev);
16592 int retry = 0;
16593 int err = 0;
16594 int off;
16595
16596 netdev_info(bp->dev, "PCI Slot Reset\n");
16597
16598 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
16599 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
16600 msleep(900);
16601
16602 rtnl_lock();
16603
16604 if (pci_enable_device(pdev)) {
16605 dev_err(&pdev->dev,
16606 "Cannot re-enable PCI device after reset.\n");
16607 } else {
16608 pci_set_master(pdev);
16609 /* Upon fatal error, our device internal logic that latches to
16610 * BAR value is getting reset and will restore only upon
16611 * rewriting the BARs.
16612 *
16613 * As pci_restore_state() does not re-write the BARs if the
16614 * value is same as saved value earlier, driver needs to
16615 * write the BARs to 0 to force restore, in case of fatal error.
16616 */
16617 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
16618 &bp->state)) {
16619 for (off = PCI_BASE_ADDRESS_0;
16620 off <= PCI_BASE_ADDRESS_5; off += 4)
16621 pci_write_config_dword(bp->pdev, off, 0);
16622 }
16623 pci_restore_state(pdev);
16624 pci_save_state(pdev);
16625
16626 bnxt_inv_fw_health_reg(bp);
16627 bnxt_try_map_fw_health_reg(bp);
16628
16629 /* In some PCIe AER scenarios, firmware may take up to
16630 * 10 seconds to become ready in the worst case.
16631 */
16632 do {
16633 err = bnxt_try_recover_fw(bp);
16634 if (!err)
16635 break;
16636 retry++;
16637 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
16638
16639 if (err) {
16640 dev_err(&pdev->dev, "Firmware not ready\n");
16641 goto reset_exit;
16642 }
16643
16644 err = bnxt_hwrm_func_reset(bp);
16645 if (!err)
16646 result = PCI_ERS_RESULT_RECOVERED;
16647
16648 bnxt_ulp_irq_stop(bp);
16649 bnxt_clear_int_mode(bp);
16650 err = bnxt_init_int_mode(bp);
16651 bnxt_ulp_irq_restart(bp, err);
16652 }
16653
16654 reset_exit:
16655 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16656 bnxt_clear_reservations(bp, true);
16657 rtnl_unlock();
16658
16659 return result;
16660 }
16661
16662 /**
16663 * bnxt_io_resume - called when traffic can start flowing again.
16664 * @pdev: Pointer to PCI device
16665 *
16666 * This callback is called when the error recovery driver tells
16667 * us that its OK to resume normal operation.
16668 */
bnxt_io_resume(struct pci_dev * pdev)16669 static void bnxt_io_resume(struct pci_dev *pdev)
16670 {
16671 struct net_device *netdev = pci_get_drvdata(pdev);
16672 struct bnxt *bp = netdev_priv(netdev);
16673 int err;
16674
16675 netdev_info(bp->dev, "PCI Slot Resume\n");
16676 rtnl_lock();
16677
16678 err = bnxt_hwrm_func_qcaps(bp);
16679 if (!err) {
16680 if (netif_running(netdev))
16681 err = bnxt_open(netdev);
16682 else
16683 err = bnxt_reserve_rings(bp, true);
16684 }
16685
16686 if (!err)
16687 netif_device_attach(netdev);
16688
16689 rtnl_unlock();
16690 bnxt_ulp_start(bp, err);
16691 if (!err)
16692 bnxt_reenable_sriov(bp);
16693 }
16694
16695 static const struct pci_error_handlers bnxt_err_handler = {
16696 .error_detected = bnxt_io_error_detected,
16697 .slot_reset = bnxt_io_slot_reset,
16698 .resume = bnxt_io_resume
16699 };
16700
16701 static struct pci_driver bnxt_pci_driver = {
16702 .name = DRV_MODULE_NAME,
16703 .id_table = bnxt_pci_tbl,
16704 .probe = bnxt_init_one,
16705 .remove = bnxt_remove_one,
16706 .shutdown = bnxt_shutdown,
16707 .driver.pm = BNXT_PM_OPS,
16708 .err_handler = &bnxt_err_handler,
16709 #if defined(CONFIG_BNXT_SRIOV)
16710 .sriov_configure = bnxt_sriov_configure,
16711 #endif
16712 };
16713
bnxt_init(void)16714 static int __init bnxt_init(void)
16715 {
16716 int err;
16717
16718 bnxt_debug_init();
16719 err = pci_register_driver(&bnxt_pci_driver);
16720 if (err) {
16721 bnxt_debug_exit();
16722 return err;
16723 }
16724
16725 return 0;
16726 }
16727
bnxt_exit(void)16728 static void __exit bnxt_exit(void)
16729 {
16730 pci_unregister_driver(&bnxt_pci_driver);
16731 if (bnxt_pf_wq)
16732 destroy_workqueue(bnxt_pf_wq);
16733 bnxt_debug_exit();
16734 }
16735
16736 module_init(bnxt_init);
16737 module_exit(bnxt_exit);
16738