1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77
78 #define BNXT_TX_TIMEOUT (5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
80 NETIF_MSG_TX_ERR)
81
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88
89 #define BNXT_TX_PUSH_THRESH 164
90
91 /* indexed by enum board_idx */
92 static const struct {
93 char *name;
94 } board_info[] = {
95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 { 0 }
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228
229 static const u16 bnxt_vf_req_snif[] = {
230 HWRM_FUNC_CFG,
231 HWRM_FUNC_VF_CFG,
232 HWRM_PORT_PHY_QCFG,
233 HWRM_CFA_L2_FILTER_ALLOC,
234 };
235
236 static const u16 bnxt_async_events_arr[] = {
237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255
256 const u16 bnxt_bstore_to_trace[] = {
257 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271
272 static struct workqueue_struct *bnxt_pf_wq;
273
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 .ports = {
280 .src = 0,
281 .dst = 0,
282 },
283 .addrs = {
284 .v6addrs = {
285 .src = BNXT_IPV6_MASK_NONE,
286 .dst = BNXT_IPV6_MASK_NONE,
287 },
288 },
289 };
290
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 .ports = {
293 .src = cpu_to_be16(0xffff),
294 .dst = cpu_to_be16(0xffff),
295 },
296 .addrs = {
297 .v6addrs = {
298 .src = BNXT_IPV6_MASK_ALL,
299 .dst = BNXT_IPV6_MASK_ALL,
300 },
301 },
302 };
303
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 .ports = {
306 .src = cpu_to_be16(0xffff),
307 .dst = cpu_to_be16(0xffff),
308 },
309 .addrs = {
310 .v4addrs = {
311 .src = cpu_to_be32(0xffffffff),
312 .dst = cpu_to_be32(0xffffffff),
313 },
314 },
315 };
316
bnxt_vf_pciid(enum board_idx idx)317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 idx == NETXTREME_E_P7_VF_HV);
324 }
325
326 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328
329 #define BNXT_DB_CQ(db, idx) \
330 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331
332 #define BNXT_DB_NQ_P5(db, idx) \
333 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 (db)->doorbell)
335
336 #define BNXT_DB_NQ_P7(db, idx) \
337 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
338 DB_RING_IDX(db, idx), (db)->doorbell)
339
340 #define BNXT_DB_CQ_ARM(db, idx) \
341 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342
343 #define BNXT_DB_NQ_ARM_P5(db, idx) \
344 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
345 DB_RING_IDX(db, idx), (db)->doorbell)
346
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 if (bp->flags & BNXT_FLAG_CHIP_P7)
350 BNXT_DB_NQ_P7(db, idx);
351 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 BNXT_DB_NQ_P5(db, idx);
353 else
354 BNXT_DB_CQ(db, idx);
355 }
356
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 BNXT_DB_NQ_ARM_P5(db, idx);
361 else
362 BNXT_DB_CQ_ARM(db, idx);
363 }
364
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 DB_RING_IDX(db, idx), db->doorbell);
370 else
371 BNXT_DB_CQ(db, idx);
372 }
373
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 return;
378
379 if (BNXT_PF(bp))
380 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 else
382 schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384
__bnxt_queue_sp_work(struct bnxt * bp)385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 if (BNXT_PF(bp))
388 queue_work(bnxt_pf_wq, &bp->sp_task);
389 else
390 schedule_work(&bp->sp_task);
391 }
392
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 set_bit(event, &bp->sp_event);
396 __bnxt_queue_sp_work(bp);
397 }
398
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 if (!rxr->bnapi->in_reset) {
402 rxr->bnapi->in_reset = true;
403 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 else
406 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 __bnxt_queue_sp_work(bp);
408 }
409 rxr->rx_next_cons = 0xffff;
410 }
411
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 u16 curr)
414 {
415 struct bnxt_napi *bnapi = txr->bnapi;
416
417 if (bnapi->tx_fault)
418 return;
419
420 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 txr->txq_index, txr->tx_hw_cons,
422 txr->tx_cons, txr->tx_prod, curr);
423 WARN_ON_ONCE(1);
424 bnapi->tx_fault = 1;
425 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427
428 const u16 bnxt_lhint_arr[] = {
429 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 TX_BD_FLAGS_LHINT_512_TO_1023,
431 TX_BD_FLAGS_LHINT_1024_TO_2047,
432 TX_BD_FLAGS_LHINT_1024_TO_2047,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449
bnxt_xmit_get_cfa_action(struct sk_buff * skb)450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 struct metadata_dst *md_dst = skb_metadata_dst(skb);
453
454 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 return 0;
456
457 return md_dst->u.port_info.port_id;
458 }
459
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 u16 prod)
462 {
463 /* Sync BD data before updating doorbell */
464 wmb();
465 bnxt_db_write(bp, &txr->tx_db, prod);
466 txr->kick_pending = 0;
467 }
468
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 struct bnxt *bp = netdev_priv(dev);
472 struct tx_bd *txbd, *txbd0;
473 struct tx_bd_ext *txbd1;
474 struct netdev_queue *txq;
475 int i;
476 dma_addr_t mapping;
477 unsigned int length, pad = 0;
478 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 struct pci_dev *pdev = bp->pdev;
481 u16 prod, last_frag, txts_prod;
482 struct bnxt_tx_ring_info *txr;
483 struct bnxt_sw_tx_bd *tx_buf;
484 __le32 lflags = 0;
485 skb_frag_t *frag;
486
487 i = skb_get_queue_mapping(skb);
488 if (unlikely(i >= bp->tx_nr_rings)) {
489 dev_kfree_skb_any(skb);
490 dev_core_stats_tx_dropped_inc(dev);
491 return NETDEV_TX_OK;
492 }
493
494 txq = netdev_get_tx_queue(dev, i);
495 txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 prod = txr->tx_prod;
497
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
501 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 if (skb_linearize(skb)) {
503 dev_kfree_skb_any(skb);
504 dev_core_stats_tx_dropped_inc(dev);
505 return NETDEV_TX_OK;
506 }
507 }
508 #endif
509 free_size = bnxt_tx_avail(bp, txr);
510 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 /* We must have raced with NAPI cleanup */
512 if (net_ratelimit() && txr->kick_pending)
513 netif_warn(bp, tx_err, dev,
514 "bnxt: ring busy w/ flush pending!\n");
515 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 bp->tx_wake_thresh))
517 return NETDEV_TX_BUSY;
518 }
519
520 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
521 goto tx_free;
522
523 length = skb->len;
524 len = skb_headlen(skb);
525 last_frag = skb_shinfo(skb)->nr_frags;
526
527 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
528
529 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
530 tx_buf->skb = skb;
531 tx_buf->nr_frags = last_frag;
532
533 vlan_tag_flags = 0;
534 cfa_action = bnxt_xmit_get_cfa_action(skb);
535 if (skb_vlan_tag_present(skb)) {
536 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
537 skb_vlan_tag_get(skb);
538 /* Currently supports 8021Q, 8021AD vlan offloads
539 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
540 */
541 if (skb->vlan_proto == htons(ETH_P_8021Q))
542 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
543 }
544
545 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
546 ptp->tx_tstamp_en) {
547 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
548 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
549 tx_buf->is_ts_pkt = 1;
550 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
551 } else if (!skb_is_gso(skb)) {
552 u16 seq_id, hdr_off;
553
554 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
555 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
556 if (vlan_tag_flags)
557 hdr_off += VLAN_HLEN;
558 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
559 tx_buf->is_ts_pkt = 1;
560 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
561
562 ptp->txts_req[txts_prod].tx_seqid = seq_id;
563 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
564 tx_buf->txts_prod = txts_prod;
565 }
566 }
567 }
568 if (unlikely(skb->no_fcs))
569 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
570
571 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
572 skb_frags_readable(skb) && !lflags) {
573 struct tx_push_buffer *tx_push_buf = txr->tx_push;
574 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
575 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
576 void __iomem *db = txr->tx_db.doorbell;
577 void *pdata = tx_push_buf->data;
578 u64 *end;
579 int j, push_len;
580
581 /* Set COAL_NOW to be ready quickly for the next push */
582 tx_push->tx_bd_len_flags_type =
583 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
584 TX_BD_TYPE_LONG_TX_BD |
585 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
586 TX_BD_FLAGS_COAL_NOW |
587 TX_BD_FLAGS_PACKET_END |
588 TX_BD_CNT(2));
589
590 if (skb->ip_summed == CHECKSUM_PARTIAL)
591 tx_push1->tx_bd_hsize_lflags =
592 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
593 else
594 tx_push1->tx_bd_hsize_lflags = 0;
595
596 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
597 tx_push1->tx_bd_cfa_action =
598 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
599
600 end = pdata + length;
601 end = PTR_ALIGN(end, 8) - 1;
602 *end = 0;
603
604 skb_copy_from_linear_data(skb, pdata, len);
605 pdata += len;
606 for (j = 0; j < last_frag; j++) {
607 void *fptr;
608
609 frag = &skb_shinfo(skb)->frags[j];
610 fptr = skb_frag_address_safe(frag);
611 if (!fptr)
612 goto normal_tx;
613
614 memcpy(pdata, fptr, skb_frag_size(frag));
615 pdata += skb_frag_size(frag);
616 }
617
618 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
619 txbd->tx_bd_haddr = txr->data_mapping;
620 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
621 prod = NEXT_TX(prod);
622 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
623 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
624 memcpy(txbd, tx_push1, sizeof(*txbd));
625 prod = NEXT_TX(prod);
626 tx_push->doorbell =
627 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
628 DB_RING_IDX(&txr->tx_db, prod));
629 WRITE_ONCE(txr->tx_prod, prod);
630
631 tx_buf->is_push = 1;
632 netdev_tx_sent_queue(txq, skb->len);
633 wmb(); /* Sync is_push and byte queue before pushing data */
634
635 push_len = (length + sizeof(*tx_push) + 7) / 8;
636 if (push_len > 16) {
637 __iowrite64_copy(db, tx_push_buf, 16);
638 __iowrite32_copy(db + 4, tx_push_buf + 1,
639 (push_len - 16) << 1);
640 } else {
641 __iowrite64_copy(db, tx_push_buf, push_len);
642 }
643
644 goto tx_done;
645 }
646
647 normal_tx:
648 if (length < BNXT_MIN_PKT_SIZE) {
649 pad = BNXT_MIN_PKT_SIZE - length;
650 if (skb_pad(skb, pad))
651 /* SKB already freed. */
652 goto tx_kick_pending;
653 length = BNXT_MIN_PKT_SIZE;
654 }
655
656 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
657
658 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
659 goto tx_free;
660
661 dma_unmap_addr_set(tx_buf, mapping, mapping);
662 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
663 TX_BD_CNT(last_frag + 2);
664
665 txbd->tx_bd_haddr = cpu_to_le64(mapping);
666 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
667
668 prod = NEXT_TX(prod);
669 txbd1 = (struct tx_bd_ext *)
670 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
671
672 txbd1->tx_bd_hsize_lflags = lflags;
673 if (skb_is_gso(skb)) {
674 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
675 u32 hdr_len;
676
677 if (skb->encapsulation) {
678 if (udp_gso)
679 hdr_len = skb_inner_transport_offset(skb) +
680 sizeof(struct udphdr);
681 else
682 hdr_len = skb_inner_tcp_all_headers(skb);
683 } else if (udp_gso) {
684 hdr_len = skb_transport_offset(skb) +
685 sizeof(struct udphdr);
686 } else {
687 hdr_len = skb_tcp_all_headers(skb);
688 }
689
690 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
691 TX_BD_FLAGS_T_IPID |
692 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
693 length = skb_shinfo(skb)->gso_size;
694 txbd1->tx_bd_mss = cpu_to_le32(length);
695 length += hdr_len;
696 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
697 txbd1->tx_bd_hsize_lflags |=
698 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
699 txbd1->tx_bd_mss = 0;
700 }
701
702 length >>= 9;
703 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
704 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
705 skb->len);
706 i = 0;
707 goto tx_dma_error;
708 }
709 flags |= bnxt_lhint_arr[length];
710 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
711
712 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
713 txbd1->tx_bd_cfa_action =
714 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
715 txbd0 = txbd;
716 for (i = 0; i < last_frag; i++) {
717 frag = &skb_shinfo(skb)->frags[i];
718 prod = NEXT_TX(prod);
719 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
720
721 len = skb_frag_size(frag);
722 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
723 DMA_TO_DEVICE);
724
725 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
726 goto tx_dma_error;
727
728 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
729 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
730 mapping, mapping);
731
732 txbd->tx_bd_haddr = cpu_to_le64(mapping);
733
734 flags = len << TX_BD_LEN_SHIFT;
735 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
736 }
737
738 flags &= ~TX_BD_LEN;
739 txbd->tx_bd_len_flags_type =
740 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
741 TX_BD_FLAGS_PACKET_END);
742
743 netdev_tx_sent_queue(txq, skb->len);
744
745 skb_tx_timestamp(skb);
746
747 prod = NEXT_TX(prod);
748 WRITE_ONCE(txr->tx_prod, prod);
749
750 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
751 bnxt_txr_db_kick(bp, txr, prod);
752 } else {
753 if (free_size >= bp->tx_wake_thresh)
754 txbd0->tx_bd_len_flags_type |=
755 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
756 txr->kick_pending = 1;
757 }
758
759 tx_done:
760
761 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
762 if (netdev_xmit_more() && !tx_buf->is_push) {
763 txbd0->tx_bd_len_flags_type &=
764 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
765 bnxt_txr_db_kick(bp, txr, prod);
766 }
767
768 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
769 bp->tx_wake_thresh);
770 }
771 return NETDEV_TX_OK;
772
773 tx_dma_error:
774 last_frag = i;
775
776 /* start back at beginning and unmap skb */
777 prod = txr->tx_prod;
778 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
779 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
780 skb_headlen(skb), DMA_TO_DEVICE);
781 prod = NEXT_TX(prod);
782
783 /* unmap remaining mapped pages */
784 for (i = 0; i < last_frag; i++) {
785 prod = NEXT_TX(prod);
786 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
787 frag = &skb_shinfo(skb)->frags[i];
788 netmem_dma_unmap_page_attrs(&pdev->dev,
789 dma_unmap_addr(tx_buf, mapping),
790 skb_frag_size(frag),
791 DMA_TO_DEVICE, 0);
792 }
793
794 tx_free:
795 dev_kfree_skb_any(skb);
796 tx_kick_pending:
797 if (BNXT_TX_PTP_IS_SET(lflags)) {
798 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
799 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
800 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
801 /* set SKB to err so PTP worker will clean up */
802 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
803 }
804 if (txr->kick_pending)
805 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
806 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
807 dev_core_stats_tx_dropped_inc(dev);
808 return NETDEV_TX_OK;
809 }
810
811 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)812 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
813 int budget)
814 {
815 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
816 struct pci_dev *pdev = bp->pdev;
817 u16 hw_cons = txr->tx_hw_cons;
818 unsigned int tx_bytes = 0;
819 u16 cons = txr->tx_cons;
820 skb_frag_t *frag;
821 int tx_pkts = 0;
822 bool rc = false;
823
824 while (RING_TX(bp, cons) != hw_cons) {
825 struct bnxt_sw_tx_bd *tx_buf;
826 struct sk_buff *skb;
827 bool is_ts_pkt;
828 int j, last;
829
830 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
831 skb = tx_buf->skb;
832
833 if (unlikely(!skb)) {
834 bnxt_sched_reset_txr(bp, txr, cons);
835 return rc;
836 }
837
838 is_ts_pkt = tx_buf->is_ts_pkt;
839 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
840 rc = true;
841 break;
842 }
843
844 cons = NEXT_TX(cons);
845 tx_pkts++;
846 tx_bytes += skb->len;
847 tx_buf->skb = NULL;
848 tx_buf->is_ts_pkt = 0;
849
850 if (tx_buf->is_push) {
851 tx_buf->is_push = 0;
852 goto next_tx_int;
853 }
854
855 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
856 skb_headlen(skb), DMA_TO_DEVICE);
857 last = tx_buf->nr_frags;
858
859 for (j = 0; j < last; j++) {
860 frag = &skb_shinfo(skb)->frags[j];
861 cons = NEXT_TX(cons);
862 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
863 netmem_dma_unmap_page_attrs(&pdev->dev,
864 dma_unmap_addr(tx_buf,
865 mapping),
866 skb_frag_size(frag),
867 DMA_TO_DEVICE, 0);
868 }
869 if (unlikely(is_ts_pkt)) {
870 if (BNXT_CHIP_P5(bp)) {
871 /* PTP worker takes ownership of the skb */
872 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
873 skb = NULL;
874 }
875 }
876
877 next_tx_int:
878 cons = NEXT_TX(cons);
879
880 dev_consume_skb_any(skb);
881 }
882
883 WRITE_ONCE(txr->tx_cons, cons);
884
885 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
886 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
887 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
888
889 return rc;
890 }
891
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)892 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
893 {
894 struct bnxt_tx_ring_info *txr;
895 bool more = false;
896 int i;
897
898 bnxt_for_each_napi_tx(i, bnapi, txr) {
899 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
900 more |= __bnxt_tx_int(bp, txr, budget);
901 }
902 if (!more)
903 bnapi->events &= ~BNXT_TX_CMP_EVENT;
904 }
905
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)906 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
907 {
908 return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
909 }
910
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)911 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
912 struct bnxt_rx_ring_info *rxr,
913 unsigned int *offset,
914 gfp_t gfp)
915 {
916 struct page *page;
917
918 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
919 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
920 BNXT_RX_PAGE_SIZE);
921 } else {
922 page = page_pool_dev_alloc_pages(rxr->page_pool);
923 *offset = 0;
924 }
925 if (!page)
926 return NULL;
927
928 *mapping = page_pool_get_dma_addr(page) + *offset;
929 return page;
930 }
931
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)932 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
933 struct bnxt_rx_ring_info *rxr,
934 unsigned int *offset,
935 gfp_t gfp)
936 {
937 netmem_ref netmem;
938
939 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
940 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
941 } else {
942 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
943 *offset = 0;
944 }
945 if (!netmem)
946 return 0;
947
948 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
949 return netmem;
950 }
951
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)952 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
953 struct bnxt_rx_ring_info *rxr,
954 gfp_t gfp)
955 {
956 unsigned int offset;
957 struct page *page;
958
959 page = page_pool_alloc_frag(rxr->head_pool, &offset,
960 bp->rx_buf_size, gfp);
961 if (!page)
962 return NULL;
963
964 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
965 return page_address(page) + offset;
966 }
967
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)968 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
969 u16 prod, gfp_t gfp)
970 {
971 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
972 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
973 dma_addr_t mapping;
974
975 if (BNXT_RX_PAGE_MODE(bp)) {
976 unsigned int offset;
977 struct page *page =
978 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
979
980 if (!page)
981 return -ENOMEM;
982
983 mapping += bp->rx_dma_offset;
984 rx_buf->data = page;
985 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
986 } else {
987 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
988
989 if (!data)
990 return -ENOMEM;
991
992 rx_buf->data = data;
993 rx_buf->data_ptr = data + bp->rx_offset;
994 }
995 rx_buf->mapping = mapping;
996
997 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
998 return 0;
999 }
1000
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)1001 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1002 {
1003 u16 prod = rxr->rx_prod;
1004 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1005 struct bnxt *bp = rxr->bnapi->bp;
1006 struct rx_bd *cons_bd, *prod_bd;
1007
1008 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1009 cons_rx_buf = &rxr->rx_buf_ring[cons];
1010
1011 prod_rx_buf->data = data;
1012 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1013
1014 prod_rx_buf->mapping = cons_rx_buf->mapping;
1015
1016 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1017 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1018
1019 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1020 }
1021
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1022 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1023 {
1024 u16 next, max = rxr->rx_agg_bmap_size;
1025
1026 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1027 if (next >= max)
1028 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1029 return next;
1030 }
1031
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1032 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1033 u16 prod, gfp_t gfp)
1034 {
1035 struct rx_bd *rxbd =
1036 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1037 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1038 u16 sw_prod = rxr->rx_sw_agg_prod;
1039 unsigned int offset = 0;
1040 dma_addr_t mapping;
1041 netmem_ref netmem;
1042
1043 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1044 if (!netmem)
1045 return -ENOMEM;
1046
1047 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1048 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1049
1050 __set_bit(sw_prod, rxr->rx_agg_bmap);
1051 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1052 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1053
1054 rx_agg_buf->netmem = netmem;
1055 rx_agg_buf->offset = offset;
1056 rx_agg_buf->mapping = mapping;
1057 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1058 rxbd->rx_bd_opaque = sw_prod;
1059 return 0;
1060 }
1061
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1062 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1063 struct bnxt_cp_ring_info *cpr,
1064 u16 cp_cons, u16 curr)
1065 {
1066 struct rx_agg_cmp *agg;
1067
1068 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1069 agg = (struct rx_agg_cmp *)
1070 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1071 return agg;
1072 }
1073
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1074 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1075 struct bnxt_rx_ring_info *rxr,
1076 u16 agg_id, u16 curr)
1077 {
1078 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1079
1080 return &tpa_info->agg_arr[curr];
1081 }
1082
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1083 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1084 u16 start, u32 agg_bufs, bool tpa)
1085 {
1086 struct bnxt_napi *bnapi = cpr->bnapi;
1087 struct bnxt *bp = bnapi->bp;
1088 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1089 u16 prod = rxr->rx_agg_prod;
1090 u16 sw_prod = rxr->rx_sw_agg_prod;
1091 bool p5_tpa = false;
1092 u32 i;
1093
1094 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1095 p5_tpa = true;
1096
1097 for (i = 0; i < agg_bufs; i++) {
1098 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1099 struct rx_agg_cmp *agg;
1100 struct rx_bd *prod_bd;
1101 netmem_ref netmem;
1102 u16 cons;
1103
1104 if (p5_tpa)
1105 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1106 else
1107 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1108 cons = agg->rx_agg_cmp_opaque;
1109 __clear_bit(cons, rxr->rx_agg_bmap);
1110
1111 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1112 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1113
1114 __set_bit(sw_prod, rxr->rx_agg_bmap);
1115 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1116 cons_rx_buf = &rxr->rx_agg_ring[cons];
1117
1118 /* It is possible for sw_prod to be equal to cons, so
1119 * set cons_rx_buf->netmem to 0 first.
1120 */
1121 netmem = cons_rx_buf->netmem;
1122 cons_rx_buf->netmem = 0;
1123 prod_rx_buf->netmem = netmem;
1124 prod_rx_buf->offset = cons_rx_buf->offset;
1125
1126 prod_rx_buf->mapping = cons_rx_buf->mapping;
1127
1128 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1129
1130 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1131 prod_bd->rx_bd_opaque = sw_prod;
1132
1133 prod = NEXT_RX_AGG(prod);
1134 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1135 }
1136 rxr->rx_agg_prod = prod;
1137 rxr->rx_sw_agg_prod = sw_prod;
1138 }
1139
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1140 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1141 struct bnxt_rx_ring_info *rxr,
1142 u16 cons, void *data, u8 *data_ptr,
1143 dma_addr_t dma_addr,
1144 unsigned int offset_and_len)
1145 {
1146 unsigned int len = offset_and_len & 0xffff;
1147 struct page *page = data;
1148 u16 prod = rxr->rx_prod;
1149 struct sk_buff *skb;
1150 int err;
1151
1152 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1153 if (unlikely(err)) {
1154 bnxt_reuse_rx_data(rxr, cons, data);
1155 return NULL;
1156 }
1157 dma_addr -= bp->rx_dma_offset;
1158 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1159 bp->rx_dir);
1160 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1161 if (!skb) {
1162 page_pool_recycle_direct(rxr->page_pool, page);
1163 return NULL;
1164 }
1165 skb_mark_for_recycle(skb);
1166 skb_reserve(skb, bp->rx_offset);
1167 __skb_put(skb, len);
1168
1169 return skb;
1170 }
1171
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1172 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1173 struct bnxt_rx_ring_info *rxr,
1174 u16 cons, void *data, u8 *data_ptr,
1175 dma_addr_t dma_addr,
1176 unsigned int offset_and_len)
1177 {
1178 unsigned int payload = offset_and_len >> 16;
1179 unsigned int len = offset_and_len & 0xffff;
1180 skb_frag_t *frag;
1181 struct page *page = data;
1182 u16 prod = rxr->rx_prod;
1183 struct sk_buff *skb;
1184 int off, err;
1185
1186 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1187 if (unlikely(err)) {
1188 bnxt_reuse_rx_data(rxr, cons, data);
1189 return NULL;
1190 }
1191 dma_addr -= bp->rx_dma_offset;
1192 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1193 bp->rx_dir);
1194
1195 if (unlikely(!payload))
1196 payload = eth_get_headlen(bp->dev, data_ptr, len);
1197
1198 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1199 if (!skb) {
1200 page_pool_recycle_direct(rxr->page_pool, page);
1201 return NULL;
1202 }
1203
1204 skb_mark_for_recycle(skb);
1205 off = (void *)data_ptr - page_address(page);
1206 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1207 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1208 payload + NET_IP_ALIGN);
1209
1210 frag = &skb_shinfo(skb)->frags[0];
1211 skb_frag_size_sub(frag, payload);
1212 skb_frag_off_add(frag, payload);
1213 skb->data_len -= payload;
1214 skb->tail += payload;
1215
1216 return skb;
1217 }
1218
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1219 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1220 struct bnxt_rx_ring_info *rxr, u16 cons,
1221 void *data, u8 *data_ptr,
1222 dma_addr_t dma_addr,
1223 unsigned int offset_and_len)
1224 {
1225 u16 prod = rxr->rx_prod;
1226 struct sk_buff *skb;
1227 int err;
1228
1229 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1230 if (unlikely(err)) {
1231 bnxt_reuse_rx_data(rxr, cons, data);
1232 return NULL;
1233 }
1234
1235 skb = napi_build_skb(data, bp->rx_buf_size);
1236 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1237 bp->rx_dir);
1238 if (!skb) {
1239 page_pool_free_va(rxr->head_pool, data, true);
1240 return NULL;
1241 }
1242
1243 skb_mark_for_recycle(skb);
1244 skb_reserve(skb, bp->rx_offset);
1245 skb_put(skb, offset_and_len & 0xffff);
1246 return skb;
1247 }
1248
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1249 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1250 struct bnxt_cp_ring_info *cpr,
1251 u16 idx, u32 agg_bufs, bool tpa,
1252 struct sk_buff *skb,
1253 struct xdp_buff *xdp)
1254 {
1255 struct bnxt_napi *bnapi = cpr->bnapi;
1256 struct skb_shared_info *shinfo;
1257 struct bnxt_rx_ring_info *rxr;
1258 u32 i, total_frag_len = 0;
1259 bool p5_tpa = false;
1260 u16 prod;
1261
1262 rxr = bnapi->rx_ring;
1263 prod = rxr->rx_agg_prod;
1264
1265 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1266 p5_tpa = true;
1267
1268 if (skb)
1269 shinfo = skb_shinfo(skb);
1270 else
1271 shinfo = xdp_get_shared_info_from_buff(xdp);
1272
1273 for (i = 0; i < agg_bufs; i++) {
1274 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1275 struct rx_agg_cmp *agg;
1276 u16 cons, frag_len;
1277 netmem_ref netmem;
1278
1279 if (p5_tpa)
1280 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1281 else
1282 agg = bnxt_get_agg(bp, cpr, idx, i);
1283 cons = agg->rx_agg_cmp_opaque;
1284 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1285 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1286
1287 cons_rx_buf = &rxr->rx_agg_ring[cons];
1288 if (skb) {
1289 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1290 cons_rx_buf->offset,
1291 frag_len, BNXT_RX_PAGE_SIZE);
1292 } else {
1293 skb_frag_t *frag = &shinfo->frags[i];
1294
1295 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1296 cons_rx_buf->offset,
1297 frag_len);
1298 shinfo->nr_frags = i + 1;
1299 }
1300 __clear_bit(cons, rxr->rx_agg_bmap);
1301
1302 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1303 * a sw_prod index that equals the cons index, so we
1304 * need to clear the cons entry now.
1305 */
1306 netmem = cons_rx_buf->netmem;
1307 cons_rx_buf->netmem = 0;
1308
1309 if (xdp && netmem_is_pfmemalloc(netmem))
1310 xdp_buff_set_frag_pfmemalloc(xdp);
1311
1312 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1313 if (skb) {
1314 skb->len -= frag_len;
1315 skb->data_len -= frag_len;
1316 skb->truesize -= BNXT_RX_PAGE_SIZE;
1317 }
1318
1319 --shinfo->nr_frags;
1320 cons_rx_buf->netmem = netmem;
1321
1322 /* Update prod since possibly some netmems have been
1323 * allocated already.
1324 */
1325 rxr->rx_agg_prod = prod;
1326 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1327 return 0;
1328 }
1329
1330 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1331 BNXT_RX_PAGE_SIZE);
1332
1333 total_frag_len += frag_len;
1334 prod = NEXT_RX_AGG(prod);
1335 }
1336 rxr->rx_agg_prod = prod;
1337 return total_frag_len;
1338 }
1339
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1340 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1341 struct bnxt_cp_ring_info *cpr,
1342 struct sk_buff *skb, u16 idx,
1343 u32 agg_bufs, bool tpa)
1344 {
1345 u32 total_frag_len = 0;
1346
1347 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1348 skb, NULL);
1349 if (!total_frag_len) {
1350 skb_mark_for_recycle(skb);
1351 dev_kfree_skb(skb);
1352 return NULL;
1353 }
1354
1355 return skb;
1356 }
1357
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1358 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1359 struct bnxt_cp_ring_info *cpr,
1360 struct xdp_buff *xdp, u16 idx,
1361 u32 agg_bufs, bool tpa)
1362 {
1363 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1364 u32 total_frag_len = 0;
1365
1366 if (!xdp_buff_has_frags(xdp))
1367 shinfo->nr_frags = 0;
1368
1369 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1370 NULL, xdp);
1371 if (total_frag_len) {
1372 xdp_buff_set_frags_flag(xdp);
1373 shinfo->nr_frags = agg_bufs;
1374 shinfo->xdp_frags_size = total_frag_len;
1375 }
1376 return total_frag_len;
1377 }
1378
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1379 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1380 u8 agg_bufs, u32 *raw_cons)
1381 {
1382 u16 last;
1383 struct rx_agg_cmp *agg;
1384
1385 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1386 last = RING_CMP(*raw_cons);
1387 agg = (struct rx_agg_cmp *)
1388 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1389 return RX_AGG_CMP_VALID(agg, *raw_cons);
1390 }
1391
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1392 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1393 unsigned int len,
1394 dma_addr_t mapping)
1395 {
1396 struct bnxt *bp = bnapi->bp;
1397 struct pci_dev *pdev = bp->pdev;
1398 struct sk_buff *skb;
1399
1400 skb = napi_alloc_skb(&bnapi->napi, len);
1401 if (!skb)
1402 return NULL;
1403
1404 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1405 bp->rx_dir);
1406
1407 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1408 len + NET_IP_ALIGN);
1409
1410 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1411 bp->rx_dir);
1412
1413 skb_put(skb, len);
1414
1415 return skb;
1416 }
1417
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1418 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1419 unsigned int len,
1420 dma_addr_t mapping)
1421 {
1422 return bnxt_copy_data(bnapi, data, len, mapping);
1423 }
1424
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1425 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1426 struct xdp_buff *xdp,
1427 unsigned int len,
1428 dma_addr_t mapping)
1429 {
1430 unsigned int metasize = 0;
1431 u8 *data = xdp->data;
1432 struct sk_buff *skb;
1433
1434 len = xdp->data_end - xdp->data_meta;
1435 metasize = xdp->data - xdp->data_meta;
1436 data = xdp->data_meta;
1437
1438 skb = bnxt_copy_data(bnapi, data, len, mapping);
1439 if (!skb)
1440 return skb;
1441
1442 if (metasize) {
1443 skb_metadata_set(skb, metasize);
1444 __skb_pull(skb, metasize);
1445 }
1446
1447 return skb;
1448 }
1449
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1450 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1451 u32 *raw_cons, void *cmp)
1452 {
1453 struct rx_cmp *rxcmp = cmp;
1454 u32 tmp_raw_cons = *raw_cons;
1455 u8 cmp_type, agg_bufs = 0;
1456
1457 cmp_type = RX_CMP_TYPE(rxcmp);
1458
1459 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1460 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1461 RX_CMP_AGG_BUFS) >>
1462 RX_CMP_AGG_BUFS_SHIFT;
1463 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1464 struct rx_tpa_end_cmp *tpa_end = cmp;
1465
1466 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1467 return 0;
1468
1469 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1470 }
1471
1472 if (agg_bufs) {
1473 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1474 return -EBUSY;
1475 }
1476 *raw_cons = tmp_raw_cons;
1477 return 0;
1478 }
1479
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1480 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1481 {
1482 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1483 u16 idx = agg_id & MAX_TPA_P5_MASK;
1484
1485 if (test_bit(idx, map->agg_idx_bmap))
1486 idx = find_first_zero_bit(map->agg_idx_bmap,
1487 BNXT_AGG_IDX_BMAP_SIZE);
1488 __set_bit(idx, map->agg_idx_bmap);
1489 map->agg_id_tbl[agg_id] = idx;
1490 return idx;
1491 }
1492
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496
1497 __clear_bit(idx, map->agg_idx_bmap);
1498 }
1499
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503
1504 return map->agg_id_tbl[agg_id];
1505 }
1506
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 struct rx_tpa_start_cmp *tpa_start,
1509 struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 tpa_info->cfa_code_valid = 1;
1512 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 tpa_info->vlan_valid = 0;
1514 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 tpa_info->vlan_valid = 1;
1516 tpa_info->metadata =
1517 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 }
1519 }
1520
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 struct rx_tpa_start_cmp *tpa_start,
1523 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 tpa_info->vlan_valid = 0;
1526 if (TPA_START_VLAN_VALID(tpa_start)) {
1527 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 u32 vlan_proto = ETH_P_8021Q;
1529
1530 tpa_info->vlan_valid = 1;
1531 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 vlan_proto = ETH_P_8021AD;
1533 tpa_info->metadata = vlan_proto << 16 |
1534 TPA_START_METADATA0_TCI(tpa_start1);
1535 }
1536 }
1537
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 struct bnxt_tpa_info *tpa_info;
1544 u16 cons, prod, agg_id;
1545 struct rx_bd *prod_bd;
1546 dma_addr_t mapping;
1547
1548 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 } else {
1552 agg_id = TPA_START_AGG_ID(tpa_start);
1553 }
1554 cons = tpa_start->rx_tpa_start_cmp_opaque;
1555 prod = rxr->rx_prod;
1556 cons_rx_buf = &rxr->rx_buf_ring[cons];
1557 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1558 tpa_info = &rxr->rx_tpa[agg_id];
1559
1560 if (unlikely(cons != rxr->rx_next_cons ||
1561 TPA_START_ERROR(tpa_start))) {
1562 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1563 cons, rxr->rx_next_cons,
1564 TPA_START_ERROR_CODE(tpa_start1));
1565 bnxt_sched_reset_rxr(bp, rxr);
1566 return;
1567 }
1568 prod_rx_buf->data = tpa_info->data;
1569 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1570
1571 mapping = tpa_info->mapping;
1572 prod_rx_buf->mapping = mapping;
1573
1574 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1575
1576 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1577
1578 tpa_info->data = cons_rx_buf->data;
1579 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1580 cons_rx_buf->data = NULL;
1581 tpa_info->mapping = cons_rx_buf->mapping;
1582
1583 tpa_info->len =
1584 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1585 RX_TPA_START_CMP_LEN_SHIFT;
1586 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1587 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1588 tpa_info->gso_type = SKB_GSO_TCPV4;
1589 if (TPA_START_IS_IPV6(tpa_start1))
1590 tpa_info->gso_type = SKB_GSO_TCPV6;
1591 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1592 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1593 TPA_START_HASH_TYPE(tpa_start) == 3)
1594 tpa_info->gso_type = SKB_GSO_TCPV6;
1595 tpa_info->rss_hash =
1596 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1597 } else {
1598 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1599 tpa_info->gso_type = 0;
1600 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1601 }
1602 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1603 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1604 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1605 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1606 else
1607 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1608 tpa_info->agg_count = 0;
1609
1610 rxr->rx_prod = NEXT_RX(prod);
1611 cons = RING_RX(bp, NEXT_RX(cons));
1612 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1613 cons_rx_buf = &rxr->rx_buf_ring[cons];
1614
1615 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1616 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1617 cons_rx_buf->data = NULL;
1618 }
1619
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1620 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1621 {
1622 if (agg_bufs)
1623 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1624 }
1625
1626 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1627 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1628 {
1629 struct udphdr *uh = NULL;
1630
1631 if (ip_proto == htons(ETH_P_IP)) {
1632 struct iphdr *iph = (struct iphdr *)skb->data;
1633
1634 if (iph->protocol == IPPROTO_UDP)
1635 uh = (struct udphdr *)(iph + 1);
1636 } else {
1637 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1638
1639 if (iph->nexthdr == IPPROTO_UDP)
1640 uh = (struct udphdr *)(iph + 1);
1641 }
1642 if (uh) {
1643 if (uh->check)
1644 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1645 else
1646 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1647 }
1648 }
1649 #endif
1650
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1651 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1652 int payload_off, int tcp_ts,
1653 struct sk_buff *skb)
1654 {
1655 #ifdef CONFIG_INET
1656 struct tcphdr *th;
1657 int len, nw_off;
1658 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1659 u32 hdr_info = tpa_info->hdr_info;
1660 bool loopback = false;
1661
1662 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1663 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1664 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1665
1666 /* If the packet is an internal loopback packet, the offsets will
1667 * have an extra 4 bytes.
1668 */
1669 if (inner_mac_off == 4) {
1670 loopback = true;
1671 } else if (inner_mac_off > 4) {
1672 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1673 ETH_HLEN - 2));
1674
1675 /* We only support inner iPv4/ipv6. If we don't see the
1676 * correct protocol ID, it must be a loopback packet where
1677 * the offsets are off by 4.
1678 */
1679 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1680 loopback = true;
1681 }
1682 if (loopback) {
1683 /* internal loopback packet, subtract all offsets by 4 */
1684 inner_ip_off -= 4;
1685 inner_mac_off -= 4;
1686 outer_ip_off -= 4;
1687 }
1688
1689 nw_off = inner_ip_off - ETH_HLEN;
1690 skb_set_network_header(skb, nw_off);
1691 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1692 struct ipv6hdr *iph = ipv6_hdr(skb);
1693
1694 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1695 len = skb->len - skb_transport_offset(skb);
1696 th = tcp_hdr(skb);
1697 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1698 } else {
1699 struct iphdr *iph = ip_hdr(skb);
1700
1701 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1702 len = skb->len - skb_transport_offset(skb);
1703 th = tcp_hdr(skb);
1704 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1705 }
1706
1707 if (inner_mac_off) { /* tunnel */
1708 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1709 ETH_HLEN - 2));
1710
1711 bnxt_gro_tunnel(skb, proto);
1712 }
1713 #endif
1714 return skb;
1715 }
1716
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1717 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1718 int payload_off, int tcp_ts,
1719 struct sk_buff *skb)
1720 {
1721 #ifdef CONFIG_INET
1722 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1723 u32 hdr_info = tpa_info->hdr_info;
1724 int iphdr_len, nw_off;
1725
1726 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1727 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1728 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1729
1730 nw_off = inner_ip_off - ETH_HLEN;
1731 skb_set_network_header(skb, nw_off);
1732 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1733 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1734 skb_set_transport_header(skb, nw_off + iphdr_len);
1735
1736 if (inner_mac_off) { /* tunnel */
1737 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1738 ETH_HLEN - 2));
1739
1740 bnxt_gro_tunnel(skb, proto);
1741 }
1742 #endif
1743 return skb;
1744 }
1745
1746 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1747 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1748
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1749 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1750 int payload_off, int tcp_ts,
1751 struct sk_buff *skb)
1752 {
1753 #ifdef CONFIG_INET
1754 struct tcphdr *th;
1755 int len, nw_off, tcp_opt_len = 0;
1756
1757 if (tcp_ts)
1758 tcp_opt_len = 12;
1759
1760 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1761 struct iphdr *iph;
1762
1763 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1764 ETH_HLEN;
1765 skb_set_network_header(skb, nw_off);
1766 iph = ip_hdr(skb);
1767 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1768 len = skb->len - skb_transport_offset(skb);
1769 th = tcp_hdr(skb);
1770 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1771 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1772 struct ipv6hdr *iph;
1773
1774 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1775 ETH_HLEN;
1776 skb_set_network_header(skb, nw_off);
1777 iph = ipv6_hdr(skb);
1778 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1779 len = skb->len - skb_transport_offset(skb);
1780 th = tcp_hdr(skb);
1781 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1782 } else {
1783 dev_kfree_skb_any(skb);
1784 return NULL;
1785 }
1786
1787 if (nw_off) /* tunnel */
1788 bnxt_gro_tunnel(skb, skb->protocol);
1789 #endif
1790 return skb;
1791 }
1792
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1793 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1794 struct bnxt_tpa_info *tpa_info,
1795 struct rx_tpa_end_cmp *tpa_end,
1796 struct rx_tpa_end_cmp_ext *tpa_end1,
1797 struct sk_buff *skb)
1798 {
1799 #ifdef CONFIG_INET
1800 int payload_off;
1801 u16 segs;
1802
1803 segs = TPA_END_TPA_SEGS(tpa_end);
1804 if (segs == 1)
1805 return skb;
1806
1807 NAPI_GRO_CB(skb)->count = segs;
1808 skb_shinfo(skb)->gso_size =
1809 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1810 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1811 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1812 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1813 else
1814 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1815 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1816 if (likely(skb))
1817 tcp_gro_complete(skb);
1818 #endif
1819 return skb;
1820 }
1821
1822 /* Given the cfa_code of a received packet determine which
1823 * netdev (vf-rep or PF) the packet is destined to.
1824 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1825 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1826 {
1827 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1828
1829 /* if vf-rep dev is NULL, it must belong to the PF */
1830 return dev ? dev : bp->dev;
1831 }
1832
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1833 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1834 struct bnxt_cp_ring_info *cpr,
1835 u32 *raw_cons,
1836 struct rx_tpa_end_cmp *tpa_end,
1837 struct rx_tpa_end_cmp_ext *tpa_end1,
1838 u8 *event)
1839 {
1840 struct bnxt_napi *bnapi = cpr->bnapi;
1841 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1842 struct net_device *dev = bp->dev;
1843 u8 *data_ptr, agg_bufs;
1844 unsigned int len;
1845 struct bnxt_tpa_info *tpa_info;
1846 dma_addr_t mapping;
1847 struct sk_buff *skb;
1848 u16 idx = 0, agg_id;
1849 void *data;
1850 bool gro;
1851
1852 if (unlikely(bnapi->in_reset)) {
1853 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1854
1855 if (rc < 0)
1856 return ERR_PTR(-EBUSY);
1857 return NULL;
1858 }
1859
1860 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1861 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1862 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1863 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1864 tpa_info = &rxr->rx_tpa[agg_id];
1865 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1866 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1867 agg_bufs, tpa_info->agg_count);
1868 agg_bufs = tpa_info->agg_count;
1869 }
1870 tpa_info->agg_count = 0;
1871 *event |= BNXT_AGG_EVENT;
1872 bnxt_free_agg_idx(rxr, agg_id);
1873 idx = agg_id;
1874 gro = !!(bp->flags & BNXT_FLAG_GRO);
1875 } else {
1876 agg_id = TPA_END_AGG_ID(tpa_end);
1877 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1878 tpa_info = &rxr->rx_tpa[agg_id];
1879 idx = RING_CMP(*raw_cons);
1880 if (agg_bufs) {
1881 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1882 return ERR_PTR(-EBUSY);
1883
1884 *event |= BNXT_AGG_EVENT;
1885 idx = NEXT_CMP(idx);
1886 }
1887 gro = !!TPA_END_GRO(tpa_end);
1888 }
1889 data = tpa_info->data;
1890 data_ptr = tpa_info->data_ptr;
1891 prefetch(data_ptr);
1892 len = tpa_info->len;
1893 mapping = tpa_info->mapping;
1894
1895 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1896 bnxt_abort_tpa(cpr, idx, agg_bufs);
1897 if (agg_bufs > MAX_SKB_FRAGS)
1898 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1899 agg_bufs, (int)MAX_SKB_FRAGS);
1900 return NULL;
1901 }
1902
1903 if (len <= bp->rx_copybreak) {
1904 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1905 if (!skb) {
1906 bnxt_abort_tpa(cpr, idx, agg_bufs);
1907 cpr->sw_stats->rx.rx_oom_discards += 1;
1908 return NULL;
1909 }
1910 } else {
1911 u8 *new_data;
1912 dma_addr_t new_mapping;
1913
1914 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1915 GFP_ATOMIC);
1916 if (!new_data) {
1917 bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 cpr->sw_stats->rx.rx_oom_discards += 1;
1919 return NULL;
1920 }
1921
1922 tpa_info->data = new_data;
1923 tpa_info->data_ptr = new_data + bp->rx_offset;
1924 tpa_info->mapping = new_mapping;
1925
1926 skb = napi_build_skb(data, bp->rx_buf_size);
1927 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1928 bp->rx_buf_use_size, bp->rx_dir);
1929
1930 if (!skb) {
1931 page_pool_free_va(rxr->head_pool, data, true);
1932 bnxt_abort_tpa(cpr, idx, agg_bufs);
1933 cpr->sw_stats->rx.rx_oom_discards += 1;
1934 return NULL;
1935 }
1936 skb_mark_for_recycle(skb);
1937 skb_reserve(skb, bp->rx_offset);
1938 skb_put(skb, len);
1939 }
1940
1941 if (agg_bufs) {
1942 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1943 true);
1944 if (!skb) {
1945 /* Page reuse already handled by bnxt_rx_pages(). */
1946 cpr->sw_stats->rx.rx_oom_discards += 1;
1947 return NULL;
1948 }
1949 }
1950
1951 if (tpa_info->cfa_code_valid)
1952 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1953 skb->protocol = eth_type_trans(skb, dev);
1954
1955 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1956 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1957
1958 if (tpa_info->vlan_valid &&
1959 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1960 __be16 vlan_proto = htons(tpa_info->metadata >>
1961 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1962 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1963
1964 if (eth_type_vlan(vlan_proto)) {
1965 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1966 } else {
1967 dev_kfree_skb(skb);
1968 return NULL;
1969 }
1970 }
1971
1972 skb_checksum_none_assert(skb);
1973 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1974 skb->ip_summed = CHECKSUM_UNNECESSARY;
1975 skb->csum_level =
1976 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1977 }
1978
1979 if (gro)
1980 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1981
1982 return skb;
1983 }
1984
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1985 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1986 struct rx_agg_cmp *rx_agg)
1987 {
1988 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1989 struct bnxt_tpa_info *tpa_info;
1990
1991 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1992 tpa_info = &rxr->rx_tpa[agg_id];
1993 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1994 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1995 }
1996
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1997 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1998 struct sk_buff *skb)
1999 {
2000 skb_mark_for_recycle(skb);
2001
2002 if (skb->dev != bp->dev) {
2003 /* this packet belongs to a vf-rep */
2004 bnxt_vf_rep_rx(bp, skb);
2005 return;
2006 }
2007 skb_record_rx_queue(skb, bnapi->index);
2008 napi_gro_receive(&bnapi->napi, skb);
2009 }
2010
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2011 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2012 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2013 {
2014 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2015
2016 if (BNXT_PTP_RX_TS_VALID(flags))
2017 goto ts_valid;
2018 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2019 return false;
2020
2021 ts_valid:
2022 *cmpl_ts = ts;
2023 return true;
2024 }
2025
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2026 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2027 struct rx_cmp *rxcmp,
2028 struct rx_cmp_ext *rxcmp1)
2029 {
2030 __be16 vlan_proto;
2031 u16 vtag;
2032
2033 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2034 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2035 u32 meta_data;
2036
2037 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2038 return skb;
2039
2040 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2041 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2042 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2043 if (eth_type_vlan(vlan_proto))
2044 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2045 else
2046 goto vlan_err;
2047 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2048 if (RX_CMP_VLAN_VALID(rxcmp)) {
2049 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2050
2051 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2052 vlan_proto = htons(ETH_P_8021Q);
2053 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2054 vlan_proto = htons(ETH_P_8021AD);
2055 else
2056 goto vlan_err;
2057 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2058 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2059 }
2060 }
2061 return skb;
2062 vlan_err:
2063 skb_mark_for_recycle(skb);
2064 dev_kfree_skb(skb);
2065 return NULL;
2066 }
2067
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2068 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2069 struct rx_cmp *rxcmp)
2070 {
2071 u8 ext_op;
2072
2073 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2074 switch (ext_op) {
2075 case EXT_OP_INNER_4:
2076 case EXT_OP_OUTER_4:
2077 case EXT_OP_INNFL_3:
2078 case EXT_OP_OUTFL_3:
2079 return PKT_HASH_TYPE_L4;
2080 default:
2081 return PKT_HASH_TYPE_L3;
2082 }
2083 }
2084
2085 /* returns the following:
2086 * 1 - 1 packet successfully received
2087 * 0 - successful TPA_START, packet not completed yet
2088 * -EBUSY - completion ring does not have all the agg buffers yet
2089 * -ENOMEM - packet aborted due to out of memory
2090 * -EIO - packet aborted due to hw error indicated in BD
2091 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2092 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2093 u32 *raw_cons, u8 *event)
2094 {
2095 struct bnxt_napi *bnapi = cpr->bnapi;
2096 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2097 struct net_device *dev = bp->dev;
2098 struct rx_cmp *rxcmp;
2099 struct rx_cmp_ext *rxcmp1;
2100 u32 tmp_raw_cons = *raw_cons;
2101 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2102 struct skb_shared_info *sinfo;
2103 struct bnxt_sw_rx_bd *rx_buf;
2104 unsigned int len;
2105 u8 *data_ptr, agg_bufs, cmp_type;
2106 bool xdp_active = false;
2107 dma_addr_t dma_addr;
2108 struct sk_buff *skb;
2109 struct xdp_buff xdp;
2110 u32 flags, misc;
2111 u32 cmpl_ts;
2112 void *data;
2113 int rc = 0;
2114
2115 rxcmp = (struct rx_cmp *)
2116 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2117
2118 cmp_type = RX_CMP_TYPE(rxcmp);
2119
2120 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2121 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2122 goto next_rx_no_prod_no_len;
2123 }
2124
2125 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2126 cp_cons = RING_CMP(tmp_raw_cons);
2127 rxcmp1 = (struct rx_cmp_ext *)
2128 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2129
2130 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2131 return -EBUSY;
2132
2133 /* The valid test of the entry must be done first before
2134 * reading any further.
2135 */
2136 dma_rmb();
2137 prod = rxr->rx_prod;
2138
2139 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2140 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2141 bnxt_tpa_start(bp, rxr, cmp_type,
2142 (struct rx_tpa_start_cmp *)rxcmp,
2143 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2144
2145 *event |= BNXT_RX_EVENT;
2146 goto next_rx_no_prod_no_len;
2147
2148 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2149 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2150 (struct rx_tpa_end_cmp *)rxcmp,
2151 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2152
2153 if (IS_ERR(skb))
2154 return -EBUSY;
2155
2156 rc = -ENOMEM;
2157 if (likely(skb)) {
2158 bnxt_deliver_skb(bp, bnapi, skb);
2159 rc = 1;
2160 }
2161 *event |= BNXT_RX_EVENT;
2162 goto next_rx_no_prod_no_len;
2163 }
2164
2165 cons = rxcmp->rx_cmp_opaque;
2166 if (unlikely(cons != rxr->rx_next_cons)) {
2167 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2168
2169 /* 0xffff is forced error, don't print it */
2170 if (rxr->rx_next_cons != 0xffff)
2171 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2172 cons, rxr->rx_next_cons);
2173 bnxt_sched_reset_rxr(bp, rxr);
2174 if (rc1)
2175 return rc1;
2176 goto next_rx_no_prod_no_len;
2177 }
2178 rx_buf = &rxr->rx_buf_ring[cons];
2179 data = rx_buf->data;
2180 data_ptr = rx_buf->data_ptr;
2181 prefetch(data_ptr);
2182
2183 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2184 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2185
2186 if (agg_bufs) {
2187 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2188 return -EBUSY;
2189
2190 cp_cons = NEXT_CMP(cp_cons);
2191 *event |= BNXT_AGG_EVENT;
2192 }
2193 *event |= BNXT_RX_EVENT;
2194
2195 rx_buf->data = NULL;
2196 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2197 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2198
2199 bnxt_reuse_rx_data(rxr, cons, data);
2200 if (agg_bufs)
2201 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2202 false);
2203
2204 rc = -EIO;
2205 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2206 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2207 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2208 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2209 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2210 rx_err);
2211 bnxt_sched_reset_rxr(bp, rxr);
2212 }
2213 }
2214 goto next_rx_no_len;
2215 }
2216
2217 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2218 len = flags >> RX_CMP_LEN_SHIFT;
2219 dma_addr = rx_buf->mapping;
2220
2221 if (bnxt_xdp_attached(bp, rxr)) {
2222 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2223 if (agg_bufs) {
2224 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2225 cp_cons,
2226 agg_bufs,
2227 false);
2228 if (!frag_len)
2229 goto oom_next_rx;
2230
2231 }
2232 xdp_active = true;
2233 }
2234
2235 if (xdp_active) {
2236 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2237 rc = 1;
2238 goto next_rx;
2239 }
2240 if (xdp_buff_has_frags(&xdp)) {
2241 sinfo = xdp_get_shared_info_from_buff(&xdp);
2242 agg_bufs = sinfo->nr_frags;
2243 } else {
2244 agg_bufs = 0;
2245 }
2246 }
2247
2248 if (len <= bp->rx_copybreak) {
2249 if (!xdp_active)
2250 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2251 else
2252 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2253 bnxt_reuse_rx_data(rxr, cons, data);
2254 if (!skb) {
2255 if (agg_bufs) {
2256 if (!xdp_active)
2257 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2258 agg_bufs, false);
2259 else
2260 bnxt_xdp_buff_frags_free(rxr, &xdp);
2261 }
2262 goto oom_next_rx;
2263 }
2264 } else {
2265 u32 payload;
2266
2267 if (rx_buf->data_ptr == data_ptr)
2268 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2269 else
2270 payload = 0;
2271 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2272 payload | len);
2273 if (!skb)
2274 goto oom_next_rx;
2275 }
2276
2277 if (agg_bufs) {
2278 if (!xdp_active) {
2279 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2280 agg_bufs, false);
2281 if (!skb)
2282 goto oom_next_rx;
2283 } else {
2284 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2285 rxr->page_pool, &xdp);
2286 if (!skb) {
2287 /* we should be able to free the old skb here */
2288 bnxt_xdp_buff_frags_free(rxr, &xdp);
2289 goto oom_next_rx;
2290 }
2291 }
2292 }
2293
2294 if (RX_CMP_HASH_VALID(rxcmp)) {
2295 enum pkt_hash_types type;
2296
2297 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2298 type = bnxt_rss_ext_op(bp, rxcmp);
2299 } else {
2300 u32 itypes = RX_CMP_ITYPES(rxcmp);
2301
2302 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2303 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2304 type = PKT_HASH_TYPE_L4;
2305 else
2306 type = PKT_HASH_TYPE_L3;
2307 }
2308 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2309 }
2310
2311 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2312 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2313 skb->protocol = eth_type_trans(skb, dev);
2314
2315 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2316 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2317 if (!skb)
2318 goto next_rx;
2319 }
2320
2321 skb_checksum_none_assert(skb);
2322 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2323 if (dev->features & NETIF_F_RXCSUM) {
2324 skb->ip_summed = CHECKSUM_UNNECESSARY;
2325 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2326 }
2327 } else {
2328 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2329 if (dev->features & NETIF_F_RXCSUM)
2330 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2331 }
2332 }
2333
2334 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2335 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2336 u64 ns, ts;
2337
2338 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2339 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2340
2341 ns = bnxt_timecounter_cyc2time(ptp, ts);
2342 memset(skb_hwtstamps(skb), 0,
2343 sizeof(*skb_hwtstamps(skb)));
2344 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2345 }
2346 }
2347 }
2348 bnxt_deliver_skb(bp, bnapi, skb);
2349 rc = 1;
2350
2351 next_rx:
2352 cpr->rx_packets += 1;
2353 cpr->rx_bytes += len;
2354
2355 next_rx_no_len:
2356 rxr->rx_prod = NEXT_RX(prod);
2357 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2358
2359 next_rx_no_prod_no_len:
2360 *raw_cons = tmp_raw_cons;
2361
2362 return rc;
2363
2364 oom_next_rx:
2365 cpr->sw_stats->rx.rx_oom_discards += 1;
2366 rc = -ENOMEM;
2367 goto next_rx;
2368 }
2369
2370 /* In netpoll mode, if we are using a combined completion ring, we need to
2371 * discard the rx packets and recycle the buffers.
2372 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2373 static int bnxt_force_rx_discard(struct bnxt *bp,
2374 struct bnxt_cp_ring_info *cpr,
2375 u32 *raw_cons, u8 *event)
2376 {
2377 u32 tmp_raw_cons = *raw_cons;
2378 struct rx_cmp_ext *rxcmp1;
2379 struct rx_cmp *rxcmp;
2380 u16 cp_cons;
2381 u8 cmp_type;
2382 int rc;
2383
2384 cp_cons = RING_CMP(tmp_raw_cons);
2385 rxcmp = (struct rx_cmp *)
2386 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2387
2388 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2389 cp_cons = RING_CMP(tmp_raw_cons);
2390 rxcmp1 = (struct rx_cmp_ext *)
2391 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2392
2393 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2394 return -EBUSY;
2395
2396 /* The valid test of the entry must be done first before
2397 * reading any further.
2398 */
2399 dma_rmb();
2400 cmp_type = RX_CMP_TYPE(rxcmp);
2401 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2402 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2403 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2404 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2405 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2406 struct rx_tpa_end_cmp_ext *tpa_end1;
2407
2408 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2409 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2410 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2411 }
2412 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2413 if (rc && rc != -EBUSY)
2414 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2415 return rc;
2416 }
2417
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2418 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2419 {
2420 struct bnxt_fw_health *fw_health = bp->fw_health;
2421 u32 reg = fw_health->regs[reg_idx];
2422 u32 reg_type, reg_off, val = 0;
2423
2424 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2425 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2426 switch (reg_type) {
2427 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2428 pci_read_config_dword(bp->pdev, reg_off, &val);
2429 break;
2430 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2431 reg_off = fw_health->mapped_regs[reg_idx];
2432 fallthrough;
2433 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2434 val = readl(bp->bar0 + reg_off);
2435 break;
2436 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2437 val = readl(bp->bar1 + reg_off);
2438 break;
2439 }
2440 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2441 val &= fw_health->fw_reset_inprog_reg_mask;
2442 return val;
2443 }
2444
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2445 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2446 {
2447 int i;
2448
2449 for (i = 0; i < bp->rx_nr_rings; i++) {
2450 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2451 struct bnxt_ring_grp_info *grp_info;
2452
2453 grp_info = &bp->grp_info[grp_idx];
2454 if (grp_info->agg_fw_ring_id == ring_id)
2455 return grp_idx;
2456 }
2457 return INVALID_HW_RING_ID;
2458 }
2459
bnxt_get_force_speed(struct bnxt_link_info * link_info)2460 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2461 {
2462 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2463
2464 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2465 return link_info->force_link_speed2;
2466 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2467 return link_info->force_pam4_link_speed;
2468 return link_info->force_link_speed;
2469 }
2470
bnxt_set_force_speed(struct bnxt_link_info * link_info)2471 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2472 {
2473 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2474
2475 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2476 link_info->req_link_speed = link_info->force_link_speed2;
2477 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2478 switch (link_info->req_link_speed) {
2479 case BNXT_LINK_SPEED_50GB_PAM4:
2480 case BNXT_LINK_SPEED_100GB_PAM4:
2481 case BNXT_LINK_SPEED_200GB_PAM4:
2482 case BNXT_LINK_SPEED_400GB_PAM4:
2483 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2484 break;
2485 case BNXT_LINK_SPEED_100GB_PAM4_112:
2486 case BNXT_LINK_SPEED_200GB_PAM4_112:
2487 case BNXT_LINK_SPEED_400GB_PAM4_112:
2488 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2489 break;
2490 default:
2491 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2492 }
2493 return;
2494 }
2495 link_info->req_link_speed = link_info->force_link_speed;
2496 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2497 if (link_info->force_pam4_link_speed) {
2498 link_info->req_link_speed = link_info->force_pam4_link_speed;
2499 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2500 }
2501 }
2502
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2503 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2504 {
2505 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2506
2507 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2508 link_info->advertising = link_info->auto_link_speeds2;
2509 return;
2510 }
2511 link_info->advertising = link_info->auto_link_speeds;
2512 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2513 }
2514
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2515 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2516 {
2517 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2518
2519 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2520 if (link_info->req_link_speed != link_info->force_link_speed2)
2521 return true;
2522 return false;
2523 }
2524 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2525 link_info->req_link_speed != link_info->force_link_speed)
2526 return true;
2527 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2528 link_info->req_link_speed != link_info->force_pam4_link_speed)
2529 return true;
2530 return false;
2531 }
2532
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2533 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2534 {
2535 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2536
2537 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2538 if (link_info->advertising != link_info->auto_link_speeds2)
2539 return true;
2540 return false;
2541 }
2542 if (link_info->advertising != link_info->auto_link_speeds ||
2543 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2544 return true;
2545 return false;
2546 }
2547
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2548 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2549 {
2550 u32 flags = bp->ctx->ctx_arr[type].flags;
2551
2552 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2553 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2554 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2555 }
2556
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2557 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2558 {
2559 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2560 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2561 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2562 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2563 struct bnxt_bs_trace_info *bs_trace;
2564 int last_pg;
2565
2566 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2567 return;
2568
2569 mem_size = ctxm->max_entries * ctxm->entry_size;
2570 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2571 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2572
2573 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2574 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2575
2576 rmem = &ctx_pg[0].ring_mem;
2577 bs_trace = &bp->bs_trace[trace_type];
2578 bs_trace->ctx_type = ctxm->type;
2579 bs_trace->trace_type = trace_type;
2580 if (pages > MAX_CTX_PAGES) {
2581 int last_pg_dir = rmem->nr_pages - 1;
2582
2583 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2584 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2585 } else {
2586 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2587 }
2588 bs_trace->magic_byte += magic_byte_offset;
2589 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2590 }
2591
2592 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2593 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2594 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2595
2596 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2597 (((data2) & \
2598 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2599 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2600
2601 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2602 ((data2) & \
2603 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2604
2605 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2606 (((data2) & \
2607 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2608 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2609
2610 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2611 ((data1) & \
2612 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2613
2614 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2615 (((data1) & \
2616 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2617 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2618
2619 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2620 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2621 {
2622 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2623
2624 switch (err_type) {
2625 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2626 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2627 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2628 break;
2629 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2630 netdev_warn(bp->dev, "Pause Storm detected!\n");
2631 break;
2632 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2633 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2634 break;
2635 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2636 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2637 char *threshold_type;
2638 bool notify = false;
2639 char *dir_str;
2640
2641 switch (type) {
2642 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2643 threshold_type = "warning";
2644 break;
2645 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2646 threshold_type = "critical";
2647 break;
2648 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2649 threshold_type = "fatal";
2650 break;
2651 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2652 threshold_type = "shutdown";
2653 break;
2654 default:
2655 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2656 return false;
2657 }
2658 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2659 dir_str = "above";
2660 notify = true;
2661 } else {
2662 dir_str = "below";
2663 }
2664 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2665 dir_str, threshold_type);
2666 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2667 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2668 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2669 if (notify) {
2670 bp->thermal_threshold_type = type;
2671 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2672 return true;
2673 }
2674 return false;
2675 }
2676 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2677 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2678 break;
2679 default:
2680 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2681 err_type);
2682 break;
2683 }
2684 return false;
2685 }
2686
2687 #define BNXT_GET_EVENT_PORT(data) \
2688 ((data) & \
2689 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2690
2691 #define BNXT_EVENT_RING_TYPE(data2) \
2692 ((data2) & \
2693 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2694
2695 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2696 (BNXT_EVENT_RING_TYPE(data2) == \
2697 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2698
2699 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2700 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2701 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2702
2703 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2704 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2705 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2706
2707 #define BNXT_PHC_BITS 48
2708
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2709 static int bnxt_async_event_process(struct bnxt *bp,
2710 struct hwrm_async_event_cmpl *cmpl)
2711 {
2712 u16 event_id = le16_to_cpu(cmpl->event_id);
2713 u32 data1 = le32_to_cpu(cmpl->event_data1);
2714 u32 data2 = le32_to_cpu(cmpl->event_data2);
2715
2716 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2717 event_id, data1, data2);
2718
2719 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2720 switch (event_id) {
2721 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2722 struct bnxt_link_info *link_info = &bp->link_info;
2723
2724 if (BNXT_VF(bp))
2725 goto async_event_process_exit;
2726
2727 /* print unsupported speed warning in forced speed mode only */
2728 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2729 (data1 & 0x20000)) {
2730 u16 fw_speed = bnxt_get_force_speed(link_info);
2731 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2732
2733 if (speed != SPEED_UNKNOWN)
2734 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2735 speed);
2736 }
2737 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2738 }
2739 fallthrough;
2740 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2741 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2742 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2743 fallthrough;
2744 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2745 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2746 break;
2747 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2748 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2749 break;
2750 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2751 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2752
2753 if (BNXT_VF(bp))
2754 break;
2755
2756 if (bp->pf.port_id != port_id)
2757 break;
2758
2759 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2760 break;
2761 }
2762 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2763 if (BNXT_PF(bp))
2764 goto async_event_process_exit;
2765 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2766 break;
2767 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2768 char *type_str = "Solicited";
2769
2770 if (!bp->fw_health)
2771 goto async_event_process_exit;
2772
2773 bp->fw_reset_timestamp = jiffies;
2774 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2775 if (!bp->fw_reset_min_dsecs)
2776 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2777 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2778 if (!bp->fw_reset_max_dsecs)
2779 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2780 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2781 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2782 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2783 type_str = "Fatal";
2784 bp->fw_health->fatalities++;
2785 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2786 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2787 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2788 type_str = "Non-fatal";
2789 bp->fw_health->survivals++;
2790 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2791 }
2792 netif_warn(bp, hw, bp->dev,
2793 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2794 type_str, data1, data2,
2795 bp->fw_reset_min_dsecs * 100,
2796 bp->fw_reset_max_dsecs * 100);
2797 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2798 break;
2799 }
2800 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2801 struct bnxt_fw_health *fw_health = bp->fw_health;
2802 char *status_desc = "healthy";
2803 u32 status;
2804
2805 if (!fw_health)
2806 goto async_event_process_exit;
2807
2808 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2809 fw_health->enabled = false;
2810 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2811 break;
2812 }
2813 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2814 fw_health->tmr_multiplier =
2815 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2816 bp->current_interval * 10);
2817 fw_health->tmr_counter = fw_health->tmr_multiplier;
2818 if (!fw_health->enabled)
2819 fw_health->last_fw_heartbeat =
2820 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2821 fw_health->last_fw_reset_cnt =
2822 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2823 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2824 if (status != BNXT_FW_STATUS_HEALTHY)
2825 status_desc = "unhealthy";
2826 netif_info(bp, drv, bp->dev,
2827 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2828 fw_health->primary ? "primary" : "backup", status,
2829 status_desc, fw_health->last_fw_reset_cnt);
2830 if (!fw_health->enabled) {
2831 /* Make sure tmr_counter is set and visible to
2832 * bnxt_health_check() before setting enabled to true.
2833 */
2834 smp_wmb();
2835 fw_health->enabled = true;
2836 }
2837 goto async_event_process_exit;
2838 }
2839 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2840 netif_notice(bp, hw, bp->dev,
2841 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2842 data1, data2);
2843 goto async_event_process_exit;
2844 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2845 struct bnxt_rx_ring_info *rxr;
2846 u16 grp_idx;
2847
2848 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2849 goto async_event_process_exit;
2850
2851 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2852 BNXT_EVENT_RING_TYPE(data2), data1);
2853 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2854 goto async_event_process_exit;
2855
2856 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2857 if (grp_idx == INVALID_HW_RING_ID) {
2858 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2859 data1);
2860 goto async_event_process_exit;
2861 }
2862 rxr = bp->bnapi[grp_idx]->rx_ring;
2863 bnxt_sched_reset_rxr(bp, rxr);
2864 goto async_event_process_exit;
2865 }
2866 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2867 struct bnxt_fw_health *fw_health = bp->fw_health;
2868
2869 netif_notice(bp, hw, bp->dev,
2870 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2871 data1, data2);
2872 if (fw_health) {
2873 fw_health->echo_req_data1 = data1;
2874 fw_health->echo_req_data2 = data2;
2875 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2876 break;
2877 }
2878 goto async_event_process_exit;
2879 }
2880 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2881 bnxt_ptp_pps_event(bp, data1, data2);
2882 goto async_event_process_exit;
2883 }
2884 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2885 if (bnxt_event_error_report(bp, data1, data2))
2886 break;
2887 goto async_event_process_exit;
2888 }
2889 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2890 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2891 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2892 if (BNXT_PTP_USE_RTC(bp)) {
2893 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2894 unsigned long flags;
2895 u64 ns;
2896
2897 if (!ptp)
2898 goto async_event_process_exit;
2899
2900 bnxt_ptp_update_current_time(bp);
2901 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2902 BNXT_PHC_BITS) | ptp->current_time);
2903 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2904 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2905 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2906 }
2907 break;
2908 }
2909 goto async_event_process_exit;
2910 }
2911 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2912 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2913
2914 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2915 goto async_event_process_exit;
2916 }
2917 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2918 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2919 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2920
2921 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2922 goto async_event_process_exit;
2923 }
2924 default:
2925 goto async_event_process_exit;
2926 }
2927 __bnxt_queue_sp_work(bp);
2928 async_event_process_exit:
2929 bnxt_ulp_async_events(bp, cmpl);
2930 return 0;
2931 }
2932
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2933 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2934 {
2935 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2936 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2937 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2938 (struct hwrm_fwd_req_cmpl *)txcmp;
2939
2940 switch (cmpl_type) {
2941 case CMPL_BASE_TYPE_HWRM_DONE:
2942 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2943 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2944 break;
2945
2946 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2947 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2948
2949 if ((vf_id < bp->pf.first_vf_id) ||
2950 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2951 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2952 vf_id);
2953 return -EINVAL;
2954 }
2955
2956 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2957 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2958 break;
2959
2960 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2961 bnxt_async_event_process(bp,
2962 (struct hwrm_async_event_cmpl *)txcmp);
2963 break;
2964
2965 default:
2966 break;
2967 }
2968
2969 return 0;
2970 }
2971
bnxt_vnic_is_active(struct bnxt * bp)2972 static bool bnxt_vnic_is_active(struct bnxt *bp)
2973 {
2974 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2975
2976 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2977 }
2978
bnxt_msix(int irq,void * dev_instance)2979 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2980 {
2981 struct bnxt_napi *bnapi = dev_instance;
2982 struct bnxt *bp = bnapi->bp;
2983 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2984 u32 cons = RING_CMP(cpr->cp_raw_cons);
2985
2986 cpr->event_ctr++;
2987 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2988 napi_schedule(&bnapi->napi);
2989 return IRQ_HANDLED;
2990 }
2991
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2992 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2993 {
2994 u32 raw_cons = cpr->cp_raw_cons;
2995 u16 cons = RING_CMP(raw_cons);
2996 struct tx_cmp *txcmp;
2997
2998 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2999
3000 return TX_CMP_VALID(txcmp, raw_cons);
3001 }
3002
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3003 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3004 int budget)
3005 {
3006 struct bnxt_napi *bnapi = cpr->bnapi;
3007 u32 raw_cons = cpr->cp_raw_cons;
3008 bool flush_xdp = false;
3009 u32 cons;
3010 int rx_pkts = 0;
3011 u8 event = 0;
3012 struct tx_cmp *txcmp;
3013
3014 cpr->has_more_work = 0;
3015 cpr->had_work_done = 1;
3016 while (1) {
3017 u8 cmp_type;
3018 int rc;
3019
3020 cons = RING_CMP(raw_cons);
3021 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3022
3023 if (!TX_CMP_VALID(txcmp, raw_cons))
3024 break;
3025
3026 /* The valid test of the entry must be done first before
3027 * reading any further.
3028 */
3029 dma_rmb();
3030 cmp_type = TX_CMP_TYPE(txcmp);
3031 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3032 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3033 u32 opaque = txcmp->tx_cmp_opaque;
3034 struct bnxt_tx_ring_info *txr;
3035 u16 tx_freed;
3036
3037 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3038 event |= BNXT_TX_CMP_EVENT;
3039 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3040 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3041 else
3042 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3043 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3044 bp->tx_ring_mask;
3045 /* return full budget so NAPI will complete. */
3046 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3047 rx_pkts = budget;
3048 raw_cons = NEXT_RAW_CMP(raw_cons);
3049 if (budget)
3050 cpr->has_more_work = 1;
3051 break;
3052 }
3053 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3054 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3055 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3056 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3057 if (likely(budget))
3058 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3059 else
3060 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3061 &event);
3062 if (event & BNXT_REDIRECT_EVENT)
3063 flush_xdp = true;
3064 if (likely(rc >= 0))
3065 rx_pkts += rc;
3066 /* Increment rx_pkts when rc is -ENOMEM to count towards
3067 * the NAPI budget. Otherwise, we may potentially loop
3068 * here forever if we consistently cannot allocate
3069 * buffers.
3070 */
3071 else if (rc == -ENOMEM && budget)
3072 rx_pkts++;
3073 else if (rc == -EBUSY) /* partial completion */
3074 break;
3075 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3076 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3077 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3078 bnxt_hwrm_handler(bp, txcmp);
3079 }
3080 raw_cons = NEXT_RAW_CMP(raw_cons);
3081
3082 if (rx_pkts && rx_pkts == budget) {
3083 cpr->has_more_work = 1;
3084 break;
3085 }
3086 }
3087
3088 if (flush_xdp) {
3089 xdp_do_flush();
3090 event &= ~BNXT_REDIRECT_EVENT;
3091 }
3092
3093 if (event & BNXT_TX_EVENT) {
3094 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3095 u16 prod = txr->tx_prod;
3096
3097 /* Sync BD data before updating doorbell */
3098 wmb();
3099
3100 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3101 event &= ~BNXT_TX_EVENT;
3102 }
3103
3104 cpr->cp_raw_cons = raw_cons;
3105 bnapi->events |= event;
3106 return rx_pkts;
3107 }
3108
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3109 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3110 int budget)
3111 {
3112 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3113 bnapi->tx_int(bp, bnapi, budget);
3114
3115 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3116 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3117
3118 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3119 bnapi->events &= ~BNXT_RX_EVENT;
3120 }
3121 if (bnapi->events & BNXT_AGG_EVENT) {
3122 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3123
3124 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3125 bnapi->events &= ~BNXT_AGG_EVENT;
3126 }
3127 }
3128
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3129 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3130 int budget)
3131 {
3132 struct bnxt_napi *bnapi = cpr->bnapi;
3133 int rx_pkts;
3134
3135 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3136
3137 /* ACK completion ring before freeing tx ring and producing new
3138 * buffers in rx/agg rings to prevent overflowing the completion
3139 * ring.
3140 */
3141 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3142
3143 __bnxt_poll_work_done(bp, bnapi, budget);
3144 return rx_pkts;
3145 }
3146
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3147 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3148 {
3149 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3150 struct bnxt *bp = bnapi->bp;
3151 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3152 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3153 struct tx_cmp *txcmp;
3154 struct rx_cmp_ext *rxcmp1;
3155 u32 cp_cons, tmp_raw_cons;
3156 u32 raw_cons = cpr->cp_raw_cons;
3157 bool flush_xdp = false;
3158 u32 rx_pkts = 0;
3159 u8 event = 0;
3160
3161 while (1) {
3162 int rc;
3163
3164 cp_cons = RING_CMP(raw_cons);
3165 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3166
3167 if (!TX_CMP_VALID(txcmp, raw_cons))
3168 break;
3169
3170 /* The valid test of the entry must be done first before
3171 * reading any further.
3172 */
3173 dma_rmb();
3174 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3175 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3176 cp_cons = RING_CMP(tmp_raw_cons);
3177 rxcmp1 = (struct rx_cmp_ext *)
3178 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3179
3180 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3181 break;
3182
3183 /* force an error to recycle the buffer */
3184 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3185 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3186
3187 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3188 if (likely(rc == -EIO) && budget)
3189 rx_pkts++;
3190 else if (rc == -EBUSY) /* partial completion */
3191 break;
3192 if (event & BNXT_REDIRECT_EVENT)
3193 flush_xdp = true;
3194 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3195 CMPL_BASE_TYPE_HWRM_DONE)) {
3196 bnxt_hwrm_handler(bp, txcmp);
3197 } else {
3198 netdev_err(bp->dev,
3199 "Invalid completion received on special ring\n");
3200 }
3201 raw_cons = NEXT_RAW_CMP(raw_cons);
3202
3203 if (rx_pkts == budget)
3204 break;
3205 }
3206
3207 cpr->cp_raw_cons = raw_cons;
3208 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3209 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3210
3211 if (event & BNXT_AGG_EVENT)
3212 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3213 if (flush_xdp)
3214 xdp_do_flush();
3215
3216 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3217 napi_complete_done(napi, rx_pkts);
3218 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3219 }
3220 return rx_pkts;
3221 }
3222
bnxt_poll(struct napi_struct * napi,int budget)3223 static int bnxt_poll(struct napi_struct *napi, int budget)
3224 {
3225 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3226 struct bnxt *bp = bnapi->bp;
3227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3228 int work_done = 0;
3229
3230 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3231 napi_complete(napi);
3232 return 0;
3233 }
3234 while (1) {
3235 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3236
3237 if (work_done >= budget) {
3238 if (!budget)
3239 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3240 break;
3241 }
3242
3243 if (!bnxt_has_work(bp, cpr)) {
3244 if (napi_complete_done(napi, work_done))
3245 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3246 break;
3247 }
3248 }
3249 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3250 struct dim_sample dim_sample = {};
3251
3252 dim_update_sample(cpr->event_ctr,
3253 cpr->rx_packets,
3254 cpr->rx_bytes,
3255 &dim_sample);
3256 net_dim(&cpr->dim, &dim_sample);
3257 }
3258 return work_done;
3259 }
3260
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3261 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3262 {
3263 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3264 int i, work_done = 0;
3265
3266 for (i = 0; i < cpr->cp_ring_count; i++) {
3267 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3268
3269 if (cpr2->had_nqe_notify) {
3270 work_done += __bnxt_poll_work(bp, cpr2,
3271 budget - work_done);
3272 cpr->has_more_work |= cpr2->has_more_work;
3273 }
3274 }
3275 return work_done;
3276 }
3277
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3278 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3279 u64 dbr_type, int budget)
3280 {
3281 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3282 int i;
3283
3284 for (i = 0; i < cpr->cp_ring_count; i++) {
3285 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3286 struct bnxt_db_info *db;
3287
3288 if (cpr2->had_work_done) {
3289 u32 tgl = 0;
3290
3291 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3292 cpr2->had_nqe_notify = 0;
3293 tgl = cpr2->toggle;
3294 }
3295 db = &cpr2->cp_db;
3296 bnxt_writeq(bp,
3297 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3298 DB_RING_IDX(db, cpr2->cp_raw_cons),
3299 db->doorbell);
3300 cpr2->had_work_done = 0;
3301 }
3302 }
3303 __bnxt_poll_work_done(bp, bnapi, budget);
3304 }
3305
bnxt_poll_p5(struct napi_struct * napi,int budget)3306 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3307 {
3308 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3309 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3310 struct bnxt_cp_ring_info *cpr_rx;
3311 u32 raw_cons = cpr->cp_raw_cons;
3312 struct bnxt *bp = bnapi->bp;
3313 struct nqe_cn *nqcmp;
3314 int work_done = 0;
3315 u32 cons;
3316
3317 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3318 napi_complete(napi);
3319 return 0;
3320 }
3321 if (cpr->has_more_work) {
3322 cpr->has_more_work = 0;
3323 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3324 }
3325 while (1) {
3326 u16 type;
3327
3328 cons = RING_CMP(raw_cons);
3329 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3330
3331 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3332 if (cpr->has_more_work)
3333 break;
3334
3335 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3336 budget);
3337 cpr->cp_raw_cons = raw_cons;
3338 if (napi_complete_done(napi, work_done))
3339 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3340 cpr->cp_raw_cons);
3341 goto poll_done;
3342 }
3343
3344 /* The valid test of the entry must be done first before
3345 * reading any further.
3346 */
3347 dma_rmb();
3348
3349 type = le16_to_cpu(nqcmp->type);
3350 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3351 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3352 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3353 struct bnxt_cp_ring_info *cpr2;
3354
3355 /* No more budget for RX work */
3356 if (budget && work_done >= budget &&
3357 cq_type == BNXT_NQ_HDL_TYPE_RX)
3358 break;
3359
3360 idx = BNXT_NQ_HDL_IDX(idx);
3361 cpr2 = &cpr->cp_ring_arr[idx];
3362 cpr2->had_nqe_notify = 1;
3363 cpr2->toggle = NQE_CN_TOGGLE(type);
3364 work_done += __bnxt_poll_work(bp, cpr2,
3365 budget - work_done);
3366 cpr->has_more_work |= cpr2->has_more_work;
3367 } else {
3368 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3369 }
3370 raw_cons = NEXT_RAW_CMP(raw_cons);
3371 }
3372 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3373 if (raw_cons != cpr->cp_raw_cons) {
3374 cpr->cp_raw_cons = raw_cons;
3375 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3376 }
3377 poll_done:
3378 cpr_rx = &cpr->cp_ring_arr[0];
3379 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3380 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3381 struct dim_sample dim_sample = {};
3382
3383 dim_update_sample(cpr->event_ctr,
3384 cpr_rx->rx_packets,
3385 cpr_rx->rx_bytes,
3386 &dim_sample);
3387 net_dim(&cpr->dim, &dim_sample);
3388 }
3389 return work_done;
3390 }
3391
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3392 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3393 struct bnxt_tx_ring_info *txr, int idx)
3394 {
3395 int i, max_idx;
3396 struct pci_dev *pdev = bp->pdev;
3397
3398 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3399
3400 for (i = 0; i < max_idx;) {
3401 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3402 struct sk_buff *skb;
3403 int j, last;
3404
3405 if (idx < bp->tx_nr_rings_xdp &&
3406 tx_buf->action == XDP_REDIRECT) {
3407 dma_unmap_single(&pdev->dev,
3408 dma_unmap_addr(tx_buf, mapping),
3409 dma_unmap_len(tx_buf, len),
3410 DMA_TO_DEVICE);
3411 xdp_return_frame(tx_buf->xdpf);
3412 tx_buf->action = 0;
3413 tx_buf->xdpf = NULL;
3414 i++;
3415 continue;
3416 }
3417
3418 skb = tx_buf->skb;
3419 if (!skb) {
3420 i++;
3421 continue;
3422 }
3423
3424 tx_buf->skb = NULL;
3425
3426 if (tx_buf->is_push) {
3427 dev_kfree_skb(skb);
3428 i += 2;
3429 continue;
3430 }
3431
3432 dma_unmap_single(&pdev->dev,
3433 dma_unmap_addr(tx_buf, mapping),
3434 skb_headlen(skb),
3435 DMA_TO_DEVICE);
3436
3437 last = tx_buf->nr_frags;
3438 i += 2;
3439 for (j = 0; j < last; j++, i++) {
3440 int ring_idx = i & bp->tx_ring_mask;
3441 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3442
3443 tx_buf = &txr->tx_buf_ring[ring_idx];
3444 netmem_dma_unmap_page_attrs(&pdev->dev,
3445 dma_unmap_addr(tx_buf,
3446 mapping),
3447 skb_frag_size(frag),
3448 DMA_TO_DEVICE, 0);
3449 }
3450 dev_kfree_skb(skb);
3451 }
3452 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3453 }
3454
bnxt_free_tx_skbs(struct bnxt * bp)3455 static void bnxt_free_tx_skbs(struct bnxt *bp)
3456 {
3457 int i;
3458
3459 if (!bp->tx_ring)
3460 return;
3461
3462 for (i = 0; i < bp->tx_nr_rings; i++) {
3463 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3464
3465 if (!txr->tx_buf_ring)
3466 continue;
3467
3468 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3469 }
3470
3471 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3472 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3473 }
3474
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3475 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3476 {
3477 int i, max_idx;
3478
3479 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3480
3481 for (i = 0; i < max_idx; i++) {
3482 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3483 void *data = rx_buf->data;
3484
3485 if (!data)
3486 continue;
3487
3488 rx_buf->data = NULL;
3489 if (BNXT_RX_PAGE_MODE(bp))
3490 page_pool_recycle_direct(rxr->page_pool, data);
3491 else
3492 page_pool_free_va(rxr->head_pool, data, true);
3493 }
3494 }
3495
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3496 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3497 {
3498 int i, max_idx;
3499
3500 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3501
3502 for (i = 0; i < max_idx; i++) {
3503 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3504 netmem_ref netmem = rx_agg_buf->netmem;
3505
3506 if (!netmem)
3507 continue;
3508
3509 rx_agg_buf->netmem = 0;
3510 __clear_bit(i, rxr->rx_agg_bmap);
3511
3512 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3513 }
3514 }
3515
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3516 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3517 struct bnxt_rx_ring_info *rxr)
3518 {
3519 int i;
3520
3521 for (i = 0; i < bp->max_tpa; i++) {
3522 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3523 u8 *data = tpa_info->data;
3524
3525 if (!data)
3526 continue;
3527
3528 tpa_info->data = NULL;
3529 page_pool_free_va(rxr->head_pool, data, false);
3530 }
3531 }
3532
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3533 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3534 struct bnxt_rx_ring_info *rxr)
3535 {
3536 struct bnxt_tpa_idx_map *map;
3537
3538 if (!rxr->rx_tpa)
3539 goto skip_rx_tpa_free;
3540
3541 bnxt_free_one_tpa_info_data(bp, rxr);
3542
3543 skip_rx_tpa_free:
3544 if (!rxr->rx_buf_ring)
3545 goto skip_rx_buf_free;
3546
3547 bnxt_free_one_rx_ring(bp, rxr);
3548
3549 skip_rx_buf_free:
3550 if (!rxr->rx_agg_ring)
3551 goto skip_rx_agg_free;
3552
3553 bnxt_free_one_rx_agg_ring(bp, rxr);
3554
3555 skip_rx_agg_free:
3556 map = rxr->rx_tpa_idx_map;
3557 if (map)
3558 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3559 }
3560
bnxt_free_rx_skbs(struct bnxt * bp)3561 static void bnxt_free_rx_skbs(struct bnxt *bp)
3562 {
3563 int i;
3564
3565 if (!bp->rx_ring)
3566 return;
3567
3568 for (i = 0; i < bp->rx_nr_rings; i++)
3569 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3570 }
3571
bnxt_free_skbs(struct bnxt * bp)3572 static void bnxt_free_skbs(struct bnxt *bp)
3573 {
3574 bnxt_free_tx_skbs(bp);
3575 bnxt_free_rx_skbs(bp);
3576 }
3577
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3578 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3579 {
3580 u8 init_val = ctxm->init_value;
3581 u16 offset = ctxm->init_offset;
3582 u8 *p2 = p;
3583 int i;
3584
3585 if (!init_val)
3586 return;
3587 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3588 memset(p, init_val, len);
3589 return;
3590 }
3591 for (i = 0; i < len; i += ctxm->entry_size)
3592 *(p2 + i + offset) = init_val;
3593 }
3594
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3595 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3596 void *buf, size_t offset, size_t head,
3597 size_t tail)
3598 {
3599 int i, head_page, start_idx, source_offset;
3600 size_t len, rem_len, total_len, max_bytes;
3601
3602 head_page = head / rmem->page_size;
3603 source_offset = head % rmem->page_size;
3604 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3605 if (!total_len)
3606 total_len = MAX_CTX_BYTES;
3607 start_idx = head_page % MAX_CTX_PAGES;
3608 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3609 source_offset;
3610 total_len = min(total_len, max_bytes);
3611 rem_len = total_len;
3612
3613 for (i = start_idx; rem_len; i++, source_offset = 0) {
3614 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3615 if (buf)
3616 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3617 len);
3618 offset += len;
3619 rem_len -= len;
3620 }
3621 return total_len;
3622 }
3623
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3624 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3625 {
3626 struct pci_dev *pdev = bp->pdev;
3627 int i;
3628
3629 if (!rmem->pg_arr)
3630 goto skip_pages;
3631
3632 for (i = 0; i < rmem->nr_pages; i++) {
3633 if (!rmem->pg_arr[i])
3634 continue;
3635
3636 dma_free_coherent(&pdev->dev, rmem->page_size,
3637 rmem->pg_arr[i], rmem->dma_arr[i]);
3638
3639 rmem->pg_arr[i] = NULL;
3640 }
3641 skip_pages:
3642 if (rmem->pg_tbl) {
3643 size_t pg_tbl_size = rmem->nr_pages * 8;
3644
3645 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3646 pg_tbl_size = rmem->page_size;
3647 dma_free_coherent(&pdev->dev, pg_tbl_size,
3648 rmem->pg_tbl, rmem->pg_tbl_map);
3649 rmem->pg_tbl = NULL;
3650 }
3651 if (rmem->vmem_size && *rmem->vmem) {
3652 vfree(*rmem->vmem);
3653 *rmem->vmem = NULL;
3654 }
3655 }
3656
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3657 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3658 {
3659 struct pci_dev *pdev = bp->pdev;
3660 u64 valid_bit = 0;
3661 int i;
3662
3663 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3664 valid_bit = PTU_PTE_VALID;
3665 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3666 size_t pg_tbl_size = rmem->nr_pages * 8;
3667
3668 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3669 pg_tbl_size = rmem->page_size;
3670 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3671 &rmem->pg_tbl_map,
3672 GFP_KERNEL);
3673 if (!rmem->pg_tbl)
3674 return -ENOMEM;
3675 }
3676
3677 for (i = 0; i < rmem->nr_pages; i++) {
3678 u64 extra_bits = valid_bit;
3679
3680 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3681 rmem->page_size,
3682 &rmem->dma_arr[i],
3683 GFP_KERNEL);
3684 if (!rmem->pg_arr[i])
3685 return -ENOMEM;
3686
3687 if (rmem->ctx_mem)
3688 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3689 rmem->page_size);
3690 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3691 if (i == rmem->nr_pages - 2 &&
3692 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3693 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3694 else if (i == rmem->nr_pages - 1 &&
3695 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3696 extra_bits |= PTU_PTE_LAST;
3697 rmem->pg_tbl[i] =
3698 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3699 }
3700 }
3701
3702 if (rmem->vmem_size) {
3703 *rmem->vmem = vzalloc(rmem->vmem_size);
3704 if (!(*rmem->vmem))
3705 return -ENOMEM;
3706 }
3707 return 0;
3708 }
3709
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3710 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3711 struct bnxt_rx_ring_info *rxr)
3712 {
3713 int i;
3714
3715 kfree(rxr->rx_tpa_idx_map);
3716 rxr->rx_tpa_idx_map = NULL;
3717 if (rxr->rx_tpa) {
3718 for (i = 0; i < bp->max_tpa; i++) {
3719 kfree(rxr->rx_tpa[i].agg_arr);
3720 rxr->rx_tpa[i].agg_arr = NULL;
3721 }
3722 }
3723 kfree(rxr->rx_tpa);
3724 rxr->rx_tpa = NULL;
3725 }
3726
bnxt_free_tpa_info(struct bnxt * bp)3727 static void bnxt_free_tpa_info(struct bnxt *bp)
3728 {
3729 int i;
3730
3731 for (i = 0; i < bp->rx_nr_rings; i++) {
3732 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3733
3734 bnxt_free_one_tpa_info(bp, rxr);
3735 }
3736 }
3737
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3738 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3739 struct bnxt_rx_ring_info *rxr)
3740 {
3741 struct rx_agg_cmp *agg;
3742 int i;
3743
3744 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3745 GFP_KERNEL);
3746 if (!rxr->rx_tpa)
3747 return -ENOMEM;
3748
3749 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3750 return 0;
3751 for (i = 0; i < bp->max_tpa; i++) {
3752 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3753 if (!agg)
3754 return -ENOMEM;
3755 rxr->rx_tpa[i].agg_arr = agg;
3756 }
3757 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3758 GFP_KERNEL);
3759 if (!rxr->rx_tpa_idx_map)
3760 return -ENOMEM;
3761
3762 return 0;
3763 }
3764
bnxt_alloc_tpa_info(struct bnxt * bp)3765 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3766 {
3767 int i, rc;
3768
3769 bp->max_tpa = MAX_TPA;
3770 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3771 if (!bp->max_tpa_v2)
3772 return 0;
3773 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3774 }
3775
3776 for (i = 0; i < bp->rx_nr_rings; i++) {
3777 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3778
3779 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3780 if (rc)
3781 return rc;
3782 }
3783 return 0;
3784 }
3785
bnxt_free_rx_rings(struct bnxt * bp)3786 static void bnxt_free_rx_rings(struct bnxt *bp)
3787 {
3788 int i;
3789
3790 if (!bp->rx_ring)
3791 return;
3792
3793 bnxt_free_tpa_info(bp);
3794 for (i = 0; i < bp->rx_nr_rings; i++) {
3795 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3796 struct bnxt_ring_struct *ring;
3797
3798 if (rxr->xdp_prog)
3799 bpf_prog_put(rxr->xdp_prog);
3800
3801 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3802 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3803
3804 page_pool_destroy(rxr->page_pool);
3805 page_pool_destroy(rxr->head_pool);
3806 rxr->page_pool = rxr->head_pool = NULL;
3807
3808 kfree(rxr->rx_agg_bmap);
3809 rxr->rx_agg_bmap = NULL;
3810
3811 ring = &rxr->rx_ring_struct;
3812 bnxt_free_ring(bp, &ring->ring_mem);
3813
3814 ring = &rxr->rx_agg_ring_struct;
3815 bnxt_free_ring(bp, &ring->ring_mem);
3816 }
3817 }
3818
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3819 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3820 struct bnxt_rx_ring_info *rxr,
3821 int numa_node)
3822 {
3823 const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
3824 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3825 struct page_pool_params pp = { 0 };
3826 struct page_pool *pool;
3827
3828 pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
3829 if (BNXT_RX_PAGE_MODE(bp))
3830 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3831 pp.nid = numa_node;
3832 pp.netdev = bp->dev;
3833 pp.dev = &bp->pdev->dev;
3834 pp.dma_dir = bp->rx_dir;
3835 pp.max_len = PAGE_SIZE;
3836 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3837 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3838 pp.queue_idx = rxr->bnapi->index;
3839
3840 pool = page_pool_create(&pp);
3841 if (IS_ERR(pool))
3842 return PTR_ERR(pool);
3843 rxr->page_pool = pool;
3844
3845 rxr->need_head_pool = page_pool_is_unreadable(pool);
3846 if (bnxt_separate_head_pool(rxr)) {
3847 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3848 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3849 pool = page_pool_create(&pp);
3850 if (IS_ERR(pool))
3851 goto err_destroy_pp;
3852 } else {
3853 page_pool_get(pool);
3854 }
3855 rxr->head_pool = pool;
3856
3857 return 0;
3858
3859 err_destroy_pp:
3860 page_pool_destroy(rxr->page_pool);
3861 rxr->page_pool = NULL;
3862 return PTR_ERR(pool);
3863 }
3864
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3865 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3866 {
3867 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3868 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3869 }
3870
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3871 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3872 {
3873 u16 mem_size;
3874
3875 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3876 mem_size = rxr->rx_agg_bmap_size / 8;
3877 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3878 if (!rxr->rx_agg_bmap)
3879 return -ENOMEM;
3880
3881 return 0;
3882 }
3883
bnxt_alloc_rx_rings(struct bnxt * bp)3884 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3885 {
3886 int numa_node = dev_to_node(&bp->pdev->dev);
3887 int i, rc = 0, agg_rings = 0, cpu;
3888
3889 if (!bp->rx_ring)
3890 return -ENOMEM;
3891
3892 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3893 agg_rings = 1;
3894
3895 for (i = 0; i < bp->rx_nr_rings; i++) {
3896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3897 struct bnxt_ring_struct *ring;
3898 int cpu_node;
3899
3900 ring = &rxr->rx_ring_struct;
3901
3902 cpu = cpumask_local_spread(i, numa_node);
3903 cpu_node = cpu_to_node(cpu);
3904 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3905 i, cpu_node);
3906 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3907 if (rc)
3908 return rc;
3909 bnxt_enable_rx_page_pool(rxr);
3910
3911 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3912 if (rc < 0)
3913 return rc;
3914
3915 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3916 MEM_TYPE_PAGE_POOL,
3917 rxr->page_pool);
3918 if (rc) {
3919 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3920 return rc;
3921 }
3922
3923 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3924 if (rc)
3925 return rc;
3926
3927 ring->grp_idx = i;
3928 if (agg_rings) {
3929 ring = &rxr->rx_agg_ring_struct;
3930 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3931 if (rc)
3932 return rc;
3933
3934 ring->grp_idx = i;
3935 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3936 if (rc)
3937 return rc;
3938 }
3939 }
3940 if (bp->flags & BNXT_FLAG_TPA)
3941 rc = bnxt_alloc_tpa_info(bp);
3942 return rc;
3943 }
3944
bnxt_free_tx_rings(struct bnxt * bp)3945 static void bnxt_free_tx_rings(struct bnxt *bp)
3946 {
3947 int i;
3948 struct pci_dev *pdev = bp->pdev;
3949
3950 if (!bp->tx_ring)
3951 return;
3952
3953 for (i = 0; i < bp->tx_nr_rings; i++) {
3954 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3955 struct bnxt_ring_struct *ring;
3956
3957 if (txr->tx_push) {
3958 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3959 txr->tx_push, txr->tx_push_mapping);
3960 txr->tx_push = NULL;
3961 }
3962
3963 ring = &txr->tx_ring_struct;
3964
3965 bnxt_free_ring(bp, &ring->ring_mem);
3966 }
3967 }
3968
3969 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3970 ((tc) * (bp)->tx_nr_rings_per_tc)
3971
3972 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3973 ((tx) % (bp)->tx_nr_rings_per_tc)
3974
3975 #define BNXT_RING_TO_TC(bp, tx) \
3976 ((tx) / (bp)->tx_nr_rings_per_tc)
3977
bnxt_alloc_tx_rings(struct bnxt * bp)3978 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3979 {
3980 int i, j, rc;
3981 struct pci_dev *pdev = bp->pdev;
3982
3983 bp->tx_push_size = 0;
3984 if (bp->tx_push_thresh) {
3985 int push_size;
3986
3987 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3988 bp->tx_push_thresh);
3989
3990 if (push_size > 256) {
3991 push_size = 0;
3992 bp->tx_push_thresh = 0;
3993 }
3994
3995 bp->tx_push_size = push_size;
3996 }
3997
3998 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3999 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4000 struct bnxt_ring_struct *ring;
4001 u8 qidx;
4002
4003 ring = &txr->tx_ring_struct;
4004
4005 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4006 if (rc)
4007 return rc;
4008
4009 ring->grp_idx = txr->bnapi->index;
4010 if (bp->tx_push_size) {
4011 dma_addr_t mapping;
4012
4013 /* One pre-allocated DMA buffer to backup
4014 * TX push operation
4015 */
4016 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4017 bp->tx_push_size,
4018 &txr->tx_push_mapping,
4019 GFP_KERNEL);
4020
4021 if (!txr->tx_push)
4022 return -ENOMEM;
4023
4024 mapping = txr->tx_push_mapping +
4025 sizeof(struct tx_push_bd);
4026 txr->data_mapping = cpu_to_le64(mapping);
4027 }
4028 qidx = bp->tc_to_qidx[j];
4029 ring->queue_id = bp->q_info[qidx].queue_id;
4030 spin_lock_init(&txr->xdp_tx_lock);
4031 if (i < bp->tx_nr_rings_xdp)
4032 continue;
4033 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4034 j++;
4035 }
4036 return 0;
4037 }
4038
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4039 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4040 {
4041 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4042
4043 kfree(cpr->cp_desc_ring);
4044 cpr->cp_desc_ring = NULL;
4045 ring->ring_mem.pg_arr = NULL;
4046 kfree(cpr->cp_desc_mapping);
4047 cpr->cp_desc_mapping = NULL;
4048 ring->ring_mem.dma_arr = NULL;
4049 }
4050
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4051 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4052 {
4053 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
4054 if (!cpr->cp_desc_ring)
4055 return -ENOMEM;
4056 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
4057 GFP_KERNEL);
4058 if (!cpr->cp_desc_mapping)
4059 return -ENOMEM;
4060 return 0;
4061 }
4062
bnxt_free_all_cp_arrays(struct bnxt * bp)4063 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4064 {
4065 int i;
4066
4067 if (!bp->bnapi)
4068 return;
4069 for (i = 0; i < bp->cp_nr_rings; i++) {
4070 struct bnxt_napi *bnapi = bp->bnapi[i];
4071
4072 if (!bnapi)
4073 continue;
4074 bnxt_free_cp_arrays(&bnapi->cp_ring);
4075 }
4076 }
4077
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4078 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4079 {
4080 int i, n = bp->cp_nr_pages;
4081
4082 for (i = 0; i < bp->cp_nr_rings; i++) {
4083 struct bnxt_napi *bnapi = bp->bnapi[i];
4084 int rc;
4085
4086 if (!bnapi)
4087 continue;
4088 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4089 if (rc)
4090 return rc;
4091 }
4092 return 0;
4093 }
4094
bnxt_free_cp_rings(struct bnxt * bp)4095 static void bnxt_free_cp_rings(struct bnxt *bp)
4096 {
4097 int i;
4098
4099 if (!bp->bnapi)
4100 return;
4101
4102 for (i = 0; i < bp->cp_nr_rings; i++) {
4103 struct bnxt_napi *bnapi = bp->bnapi[i];
4104 struct bnxt_cp_ring_info *cpr;
4105 struct bnxt_ring_struct *ring;
4106 int j;
4107
4108 if (!bnapi)
4109 continue;
4110
4111 cpr = &bnapi->cp_ring;
4112 ring = &cpr->cp_ring_struct;
4113
4114 bnxt_free_ring(bp, &ring->ring_mem);
4115
4116 if (!cpr->cp_ring_arr)
4117 continue;
4118
4119 for (j = 0; j < cpr->cp_ring_count; j++) {
4120 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4121
4122 ring = &cpr2->cp_ring_struct;
4123 bnxt_free_ring(bp, &ring->ring_mem);
4124 bnxt_free_cp_arrays(cpr2);
4125 }
4126 kfree(cpr->cp_ring_arr);
4127 cpr->cp_ring_arr = NULL;
4128 cpr->cp_ring_count = 0;
4129 }
4130 }
4131
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4132 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4133 struct bnxt_cp_ring_info *cpr)
4134 {
4135 struct bnxt_ring_mem_info *rmem;
4136 struct bnxt_ring_struct *ring;
4137 int rc;
4138
4139 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4140 if (rc) {
4141 bnxt_free_cp_arrays(cpr);
4142 return -ENOMEM;
4143 }
4144 ring = &cpr->cp_ring_struct;
4145 rmem = &ring->ring_mem;
4146 rmem->nr_pages = bp->cp_nr_pages;
4147 rmem->page_size = HW_CMPD_RING_SIZE;
4148 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4149 rmem->dma_arr = cpr->cp_desc_mapping;
4150 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4151 rc = bnxt_alloc_ring(bp, rmem);
4152 if (rc) {
4153 bnxt_free_ring(bp, rmem);
4154 bnxt_free_cp_arrays(cpr);
4155 }
4156 return rc;
4157 }
4158
bnxt_alloc_cp_rings(struct bnxt * bp)4159 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4160 {
4161 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4162 int i, j, rc, ulp_msix;
4163 int tcs = bp->num_tc;
4164
4165 if (!tcs)
4166 tcs = 1;
4167 ulp_msix = bnxt_get_ulp_msix_num(bp);
4168 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4169 struct bnxt_napi *bnapi = bp->bnapi[i];
4170 struct bnxt_cp_ring_info *cpr, *cpr2;
4171 struct bnxt_ring_struct *ring;
4172 int cp_count = 0, k;
4173 int rx = 0, tx = 0;
4174
4175 if (!bnapi)
4176 continue;
4177
4178 cpr = &bnapi->cp_ring;
4179 cpr->bnapi = bnapi;
4180 ring = &cpr->cp_ring_struct;
4181
4182 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4183 if (rc)
4184 return rc;
4185
4186 ring->map_idx = ulp_msix + i;
4187
4188 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4189 continue;
4190
4191 if (i < bp->rx_nr_rings) {
4192 cp_count++;
4193 rx = 1;
4194 }
4195 if (i < bp->tx_nr_rings_xdp) {
4196 cp_count++;
4197 tx = 1;
4198 } else if ((sh && i < bp->tx_nr_rings) ||
4199 (!sh && i >= bp->rx_nr_rings)) {
4200 cp_count += tcs;
4201 tx = 1;
4202 }
4203
4204 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4205 GFP_KERNEL);
4206 if (!cpr->cp_ring_arr)
4207 return -ENOMEM;
4208 cpr->cp_ring_count = cp_count;
4209
4210 for (k = 0; k < cp_count; k++) {
4211 cpr2 = &cpr->cp_ring_arr[k];
4212 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4213 if (rc)
4214 return rc;
4215 cpr2->bnapi = bnapi;
4216 cpr2->sw_stats = cpr->sw_stats;
4217 cpr2->cp_idx = k;
4218 if (!k && rx) {
4219 bp->rx_ring[i].rx_cpr = cpr2;
4220 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4221 } else {
4222 int n, tc = k - rx;
4223
4224 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4225 bp->tx_ring[n].tx_cpr = cpr2;
4226 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4227 }
4228 }
4229 if (tx)
4230 j++;
4231 }
4232 return 0;
4233 }
4234
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4235 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4236 struct bnxt_rx_ring_info *rxr)
4237 {
4238 struct bnxt_ring_mem_info *rmem;
4239 struct bnxt_ring_struct *ring;
4240
4241 ring = &rxr->rx_ring_struct;
4242 rmem = &ring->ring_mem;
4243 rmem->nr_pages = bp->rx_nr_pages;
4244 rmem->page_size = HW_RXBD_RING_SIZE;
4245 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4246 rmem->dma_arr = rxr->rx_desc_mapping;
4247 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4248 rmem->vmem = (void **)&rxr->rx_buf_ring;
4249
4250 ring = &rxr->rx_agg_ring_struct;
4251 rmem = &ring->ring_mem;
4252 rmem->nr_pages = bp->rx_agg_nr_pages;
4253 rmem->page_size = HW_RXBD_RING_SIZE;
4254 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4255 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4256 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4257 rmem->vmem = (void **)&rxr->rx_agg_ring;
4258 }
4259
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4260 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4261 struct bnxt_rx_ring_info *rxr)
4262 {
4263 struct bnxt_ring_mem_info *rmem;
4264 struct bnxt_ring_struct *ring;
4265 int i;
4266
4267 rxr->page_pool->p.napi = NULL;
4268 rxr->page_pool = NULL;
4269 rxr->head_pool->p.napi = NULL;
4270 rxr->head_pool = NULL;
4271 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4272
4273 ring = &rxr->rx_ring_struct;
4274 rmem = &ring->ring_mem;
4275 rmem->pg_tbl = NULL;
4276 rmem->pg_tbl_map = 0;
4277 for (i = 0; i < rmem->nr_pages; i++) {
4278 rmem->pg_arr[i] = NULL;
4279 rmem->dma_arr[i] = 0;
4280 }
4281 *rmem->vmem = NULL;
4282
4283 ring = &rxr->rx_agg_ring_struct;
4284 rmem = &ring->ring_mem;
4285 rmem->pg_tbl = NULL;
4286 rmem->pg_tbl_map = 0;
4287 for (i = 0; i < rmem->nr_pages; i++) {
4288 rmem->pg_arr[i] = NULL;
4289 rmem->dma_arr[i] = 0;
4290 }
4291 *rmem->vmem = NULL;
4292 }
4293
bnxt_init_ring_struct(struct bnxt * bp)4294 static void bnxt_init_ring_struct(struct bnxt *bp)
4295 {
4296 int i, j;
4297
4298 for (i = 0; i < bp->cp_nr_rings; i++) {
4299 struct bnxt_napi *bnapi = bp->bnapi[i];
4300 struct bnxt_ring_mem_info *rmem;
4301 struct bnxt_cp_ring_info *cpr;
4302 struct bnxt_rx_ring_info *rxr;
4303 struct bnxt_tx_ring_info *txr;
4304 struct bnxt_ring_struct *ring;
4305
4306 if (!bnapi)
4307 continue;
4308
4309 cpr = &bnapi->cp_ring;
4310 ring = &cpr->cp_ring_struct;
4311 rmem = &ring->ring_mem;
4312 rmem->nr_pages = bp->cp_nr_pages;
4313 rmem->page_size = HW_CMPD_RING_SIZE;
4314 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4315 rmem->dma_arr = cpr->cp_desc_mapping;
4316 rmem->vmem_size = 0;
4317
4318 rxr = bnapi->rx_ring;
4319 if (!rxr)
4320 goto skip_rx;
4321
4322 ring = &rxr->rx_ring_struct;
4323 rmem = &ring->ring_mem;
4324 rmem->nr_pages = bp->rx_nr_pages;
4325 rmem->page_size = HW_RXBD_RING_SIZE;
4326 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4327 rmem->dma_arr = rxr->rx_desc_mapping;
4328 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4329 rmem->vmem = (void **)&rxr->rx_buf_ring;
4330
4331 ring = &rxr->rx_agg_ring_struct;
4332 rmem = &ring->ring_mem;
4333 rmem->nr_pages = bp->rx_agg_nr_pages;
4334 rmem->page_size = HW_RXBD_RING_SIZE;
4335 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4336 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4337 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4338 rmem->vmem = (void **)&rxr->rx_agg_ring;
4339
4340 skip_rx:
4341 bnxt_for_each_napi_tx(j, bnapi, txr) {
4342 ring = &txr->tx_ring_struct;
4343 rmem = &ring->ring_mem;
4344 rmem->nr_pages = bp->tx_nr_pages;
4345 rmem->page_size = HW_TXBD_RING_SIZE;
4346 rmem->pg_arr = (void **)txr->tx_desc_ring;
4347 rmem->dma_arr = txr->tx_desc_mapping;
4348 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4349 rmem->vmem = (void **)&txr->tx_buf_ring;
4350 }
4351 }
4352 }
4353
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4354 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4355 {
4356 int i;
4357 u32 prod;
4358 struct rx_bd **rx_buf_ring;
4359
4360 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4361 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4362 int j;
4363 struct rx_bd *rxbd;
4364
4365 rxbd = rx_buf_ring[i];
4366 if (!rxbd)
4367 continue;
4368
4369 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4370 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4371 rxbd->rx_bd_opaque = prod;
4372 }
4373 }
4374 }
4375
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4376 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4377 struct bnxt_rx_ring_info *rxr,
4378 int ring_nr)
4379 {
4380 u32 prod;
4381 int i;
4382
4383 prod = rxr->rx_prod;
4384 for (i = 0; i < bp->rx_ring_size; i++) {
4385 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4386 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4387 ring_nr, i, bp->rx_ring_size);
4388 break;
4389 }
4390 prod = NEXT_RX(prod);
4391 }
4392 rxr->rx_prod = prod;
4393 }
4394
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4395 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4396 struct bnxt_rx_ring_info *rxr,
4397 int ring_nr)
4398 {
4399 u32 prod;
4400 int i;
4401
4402 prod = rxr->rx_agg_prod;
4403 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4404 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4405 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4406 ring_nr, i, bp->rx_agg_ring_size);
4407 break;
4408 }
4409 prod = NEXT_RX_AGG(prod);
4410 }
4411 rxr->rx_agg_prod = prod;
4412 }
4413
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4414 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4415 struct bnxt_rx_ring_info *rxr)
4416 {
4417 dma_addr_t mapping;
4418 u8 *data;
4419 int i;
4420
4421 for (i = 0; i < bp->max_tpa; i++) {
4422 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4423 GFP_KERNEL);
4424 if (!data)
4425 return -ENOMEM;
4426
4427 rxr->rx_tpa[i].data = data;
4428 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4429 rxr->rx_tpa[i].mapping = mapping;
4430 }
4431
4432 return 0;
4433 }
4434
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4435 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4436 {
4437 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4438 int rc;
4439
4440 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4441
4442 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4443 return 0;
4444
4445 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4446
4447 if (rxr->rx_tpa) {
4448 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4449 if (rc)
4450 return rc;
4451 }
4452 return 0;
4453 }
4454
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4455 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4456 struct bnxt_rx_ring_info *rxr)
4457 {
4458 struct bnxt_ring_struct *ring;
4459 u32 type;
4460
4461 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4462 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4463
4464 if (NET_IP_ALIGN == 2)
4465 type |= RX_BD_FLAGS_SOP;
4466
4467 ring = &rxr->rx_ring_struct;
4468 bnxt_init_rxbd_pages(ring, type);
4469 ring->fw_ring_id = INVALID_HW_RING_ID;
4470 }
4471
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4472 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4473 struct bnxt_rx_ring_info *rxr)
4474 {
4475 struct bnxt_ring_struct *ring;
4476 u32 type;
4477
4478 ring = &rxr->rx_agg_ring_struct;
4479 ring->fw_ring_id = INVALID_HW_RING_ID;
4480 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4481 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4482 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4483
4484 bnxt_init_rxbd_pages(ring, type);
4485 }
4486 }
4487
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4488 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4489 {
4490 struct bnxt_rx_ring_info *rxr;
4491
4492 rxr = &bp->rx_ring[ring_nr];
4493 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4494
4495 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4496 &rxr->bnapi->napi);
4497
4498 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4499 bpf_prog_add(bp->xdp_prog, 1);
4500 rxr->xdp_prog = bp->xdp_prog;
4501 }
4502
4503 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4504
4505 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4506 }
4507
bnxt_init_cp_rings(struct bnxt * bp)4508 static void bnxt_init_cp_rings(struct bnxt *bp)
4509 {
4510 int i, j;
4511
4512 for (i = 0; i < bp->cp_nr_rings; i++) {
4513 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4514 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4515
4516 ring->fw_ring_id = INVALID_HW_RING_ID;
4517 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4518 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4519 if (!cpr->cp_ring_arr)
4520 continue;
4521 for (j = 0; j < cpr->cp_ring_count; j++) {
4522 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4523
4524 ring = &cpr2->cp_ring_struct;
4525 ring->fw_ring_id = INVALID_HW_RING_ID;
4526 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4527 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4528 }
4529 }
4530 }
4531
bnxt_init_rx_rings(struct bnxt * bp)4532 static int bnxt_init_rx_rings(struct bnxt *bp)
4533 {
4534 int i, rc = 0;
4535
4536 if (BNXT_RX_PAGE_MODE(bp)) {
4537 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4538 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4539 } else {
4540 bp->rx_offset = BNXT_RX_OFFSET;
4541 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4542 }
4543
4544 for (i = 0; i < bp->rx_nr_rings; i++) {
4545 rc = bnxt_init_one_rx_ring(bp, i);
4546 if (rc)
4547 break;
4548 }
4549
4550 return rc;
4551 }
4552
bnxt_init_tx_rings(struct bnxt * bp)4553 static int bnxt_init_tx_rings(struct bnxt *bp)
4554 {
4555 u16 i;
4556
4557 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4558 BNXT_MIN_TX_DESC_CNT);
4559
4560 for (i = 0; i < bp->tx_nr_rings; i++) {
4561 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4562 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4563
4564 ring->fw_ring_id = INVALID_HW_RING_ID;
4565
4566 if (i >= bp->tx_nr_rings_xdp)
4567 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4568 NETDEV_QUEUE_TYPE_TX,
4569 &txr->bnapi->napi);
4570 }
4571
4572 return 0;
4573 }
4574
bnxt_free_ring_grps(struct bnxt * bp)4575 static void bnxt_free_ring_grps(struct bnxt *bp)
4576 {
4577 kfree(bp->grp_info);
4578 bp->grp_info = NULL;
4579 }
4580
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4581 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4582 {
4583 int i;
4584
4585 if (irq_re_init) {
4586 bp->grp_info = kcalloc(bp->cp_nr_rings,
4587 sizeof(struct bnxt_ring_grp_info),
4588 GFP_KERNEL);
4589 if (!bp->grp_info)
4590 return -ENOMEM;
4591 }
4592 for (i = 0; i < bp->cp_nr_rings; i++) {
4593 if (irq_re_init)
4594 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4595 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4596 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4597 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4598 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4599 }
4600 return 0;
4601 }
4602
bnxt_free_vnics(struct bnxt * bp)4603 static void bnxt_free_vnics(struct bnxt *bp)
4604 {
4605 kfree(bp->vnic_info);
4606 bp->vnic_info = NULL;
4607 bp->nr_vnics = 0;
4608 }
4609
bnxt_alloc_vnics(struct bnxt * bp)4610 static int bnxt_alloc_vnics(struct bnxt *bp)
4611 {
4612 int num_vnics = 1;
4613
4614 #ifdef CONFIG_RFS_ACCEL
4615 if (bp->flags & BNXT_FLAG_RFS) {
4616 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4617 num_vnics++;
4618 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4619 num_vnics += bp->rx_nr_rings;
4620 }
4621 #endif
4622
4623 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4624 num_vnics++;
4625
4626 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4627 GFP_KERNEL);
4628 if (!bp->vnic_info)
4629 return -ENOMEM;
4630
4631 bp->nr_vnics = num_vnics;
4632 return 0;
4633 }
4634
bnxt_init_vnics(struct bnxt * bp)4635 static void bnxt_init_vnics(struct bnxt *bp)
4636 {
4637 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4638 int i;
4639
4640 for (i = 0; i < bp->nr_vnics; i++) {
4641 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4642 int j;
4643
4644 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4645 vnic->vnic_id = i;
4646 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4647 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4648
4649 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4650
4651 if (bp->vnic_info[i].rss_hash_key) {
4652 if (i == BNXT_VNIC_DEFAULT) {
4653 u8 *key = (void *)vnic->rss_hash_key;
4654 int k;
4655
4656 if (!bp->rss_hash_key_valid &&
4657 !bp->rss_hash_key_updated) {
4658 get_random_bytes(bp->rss_hash_key,
4659 HW_HASH_KEY_SIZE);
4660 bp->rss_hash_key_updated = true;
4661 }
4662
4663 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4664 HW_HASH_KEY_SIZE);
4665
4666 if (!bp->rss_hash_key_updated)
4667 continue;
4668
4669 bp->rss_hash_key_updated = false;
4670 bp->rss_hash_key_valid = true;
4671
4672 bp->toeplitz_prefix = 0;
4673 for (k = 0; k < 8; k++) {
4674 bp->toeplitz_prefix <<= 8;
4675 bp->toeplitz_prefix |= key[k];
4676 }
4677 } else {
4678 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4679 HW_HASH_KEY_SIZE);
4680 }
4681 }
4682 }
4683 }
4684
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4685 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4686 {
4687 int pages;
4688
4689 pages = ring_size / desc_per_pg;
4690
4691 if (!pages)
4692 return 1;
4693
4694 pages++;
4695
4696 while (pages & (pages - 1))
4697 pages++;
4698
4699 return pages;
4700 }
4701
bnxt_set_tpa_flags(struct bnxt * bp)4702 void bnxt_set_tpa_flags(struct bnxt *bp)
4703 {
4704 bp->flags &= ~BNXT_FLAG_TPA;
4705 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4706 return;
4707 if (bp->dev->features & NETIF_F_LRO)
4708 bp->flags |= BNXT_FLAG_LRO;
4709 else if (bp->dev->features & NETIF_F_GRO_HW)
4710 bp->flags |= BNXT_FLAG_GRO;
4711 }
4712
bnxt_init_ring_params(struct bnxt * bp)4713 static void bnxt_init_ring_params(struct bnxt *bp)
4714 {
4715 unsigned int rx_size;
4716
4717 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4718 /* Try to fit 4 chunks into a 4k page */
4719 rx_size = SZ_1K -
4720 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4721 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4722 }
4723
4724 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4725 * be set on entry.
4726 */
bnxt_set_ring_params(struct bnxt * bp)4727 void bnxt_set_ring_params(struct bnxt *bp)
4728 {
4729 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4730 u32 agg_factor = 0, agg_ring_size = 0;
4731
4732 /* 8 for CRC and VLAN */
4733 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4734
4735 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4736 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4737
4738 ring_size = bp->rx_ring_size;
4739 bp->rx_agg_ring_size = 0;
4740 bp->rx_agg_nr_pages = 0;
4741
4742 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4743 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4744
4745 bp->flags &= ~BNXT_FLAG_JUMBO;
4746 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4747 u32 jumbo_factor;
4748
4749 bp->flags |= BNXT_FLAG_JUMBO;
4750 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4751 if (jumbo_factor > agg_factor)
4752 agg_factor = jumbo_factor;
4753 }
4754 if (agg_factor) {
4755 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4756 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4757 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4758 bp->rx_ring_size, ring_size);
4759 bp->rx_ring_size = ring_size;
4760 }
4761 agg_ring_size = ring_size * agg_factor;
4762
4763 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4764 RX_DESC_CNT);
4765 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4766 u32 tmp = agg_ring_size;
4767
4768 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4769 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4770 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4771 tmp, agg_ring_size);
4772 }
4773 bp->rx_agg_ring_size = agg_ring_size;
4774 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4775
4776 if (BNXT_RX_PAGE_MODE(bp)) {
4777 rx_space = PAGE_SIZE;
4778 rx_size = PAGE_SIZE -
4779 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4780 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4781 } else {
4782 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4783 bp->rx_copybreak,
4784 bp->dev->cfg_pending->hds_thresh);
4785 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4786 rx_space = rx_size + NET_SKB_PAD +
4787 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4788 }
4789 }
4790
4791 bp->rx_buf_use_size = rx_size;
4792 bp->rx_buf_size = rx_space;
4793
4794 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4795 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4796
4797 ring_size = bp->tx_ring_size;
4798 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4799 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4800
4801 max_rx_cmpl = bp->rx_ring_size;
4802 /* MAX TPA needs to be added because TPA_START completions are
4803 * immediately recycled, so the TPA completions are not bound by
4804 * the RX ring size.
4805 */
4806 if (bp->flags & BNXT_FLAG_TPA)
4807 max_rx_cmpl += bp->max_tpa;
4808 /* RX and TPA completions are 32-byte, all others are 16-byte */
4809 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4810 bp->cp_ring_size = ring_size;
4811
4812 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4813 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4814 bp->cp_nr_pages = MAX_CP_PAGES;
4815 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4816 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4817 ring_size, bp->cp_ring_size);
4818 }
4819 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4820 bp->cp_ring_mask = bp->cp_bit - 1;
4821 }
4822
4823 /* Changing allocation mode of RX rings.
4824 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4825 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4826 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4827 {
4828 struct net_device *dev = bp->dev;
4829
4830 if (page_mode) {
4831 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4832 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4833
4834 if (bp->xdp_prog->aux->xdp_has_frags)
4835 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4836 else
4837 dev->max_mtu =
4838 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4839 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4840 bp->flags |= BNXT_FLAG_JUMBO;
4841 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4842 } else {
4843 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4844 bp->rx_skb_func = bnxt_rx_page_skb;
4845 }
4846 bp->rx_dir = DMA_BIDIRECTIONAL;
4847 } else {
4848 dev->max_mtu = bp->max_mtu;
4849 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4850 bp->rx_dir = DMA_FROM_DEVICE;
4851 bp->rx_skb_func = bnxt_rx_skb;
4852 }
4853 }
4854
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4855 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4856 {
4857 __bnxt_set_rx_skb_mode(bp, page_mode);
4858
4859 if (!page_mode) {
4860 int rx, tx;
4861
4862 bnxt_get_max_rings(bp, &rx, &tx, true);
4863 if (rx > 1) {
4864 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4865 bp->dev->hw_features |= NETIF_F_LRO;
4866 }
4867 }
4868
4869 /* Update LRO and GRO_HW availability */
4870 netdev_update_features(bp->dev);
4871 }
4872
bnxt_free_vnic_attributes(struct bnxt * bp)4873 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4874 {
4875 int i;
4876 struct bnxt_vnic_info *vnic;
4877 struct pci_dev *pdev = bp->pdev;
4878
4879 if (!bp->vnic_info)
4880 return;
4881
4882 for (i = 0; i < bp->nr_vnics; i++) {
4883 vnic = &bp->vnic_info[i];
4884
4885 kfree(vnic->fw_grp_ids);
4886 vnic->fw_grp_ids = NULL;
4887
4888 kfree(vnic->uc_list);
4889 vnic->uc_list = NULL;
4890
4891 if (vnic->mc_list) {
4892 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4893 vnic->mc_list, vnic->mc_list_mapping);
4894 vnic->mc_list = NULL;
4895 }
4896
4897 if (vnic->rss_table) {
4898 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4899 vnic->rss_table,
4900 vnic->rss_table_dma_addr);
4901 vnic->rss_table = NULL;
4902 }
4903
4904 vnic->rss_hash_key = NULL;
4905 vnic->flags = 0;
4906 }
4907 }
4908
bnxt_alloc_vnic_attributes(struct bnxt * bp)4909 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4910 {
4911 int i, rc = 0, size;
4912 struct bnxt_vnic_info *vnic;
4913 struct pci_dev *pdev = bp->pdev;
4914 int max_rings;
4915
4916 for (i = 0; i < bp->nr_vnics; i++) {
4917 vnic = &bp->vnic_info[i];
4918
4919 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4920 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4921
4922 if (mem_size > 0) {
4923 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4924 if (!vnic->uc_list) {
4925 rc = -ENOMEM;
4926 goto out;
4927 }
4928 }
4929 }
4930
4931 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4932 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4933 vnic->mc_list =
4934 dma_alloc_coherent(&pdev->dev,
4935 vnic->mc_list_size,
4936 &vnic->mc_list_mapping,
4937 GFP_KERNEL);
4938 if (!vnic->mc_list) {
4939 rc = -ENOMEM;
4940 goto out;
4941 }
4942 }
4943
4944 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4945 goto vnic_skip_grps;
4946
4947 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4948 max_rings = bp->rx_nr_rings;
4949 else
4950 max_rings = 1;
4951
4952 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4953 if (!vnic->fw_grp_ids) {
4954 rc = -ENOMEM;
4955 goto out;
4956 }
4957 vnic_skip_grps:
4958 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4959 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4960 continue;
4961
4962 /* Allocate rss table and hash key */
4963 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4964 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4965 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4966
4967 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4968 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4969 vnic->rss_table_size,
4970 &vnic->rss_table_dma_addr,
4971 GFP_KERNEL);
4972 if (!vnic->rss_table) {
4973 rc = -ENOMEM;
4974 goto out;
4975 }
4976
4977 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4978 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4979 }
4980 return 0;
4981
4982 out:
4983 return rc;
4984 }
4985
bnxt_free_hwrm_resources(struct bnxt * bp)4986 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4987 {
4988 struct bnxt_hwrm_wait_token *token;
4989
4990 dma_pool_destroy(bp->hwrm_dma_pool);
4991 bp->hwrm_dma_pool = NULL;
4992
4993 rcu_read_lock();
4994 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4995 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4996 rcu_read_unlock();
4997 }
4998
bnxt_alloc_hwrm_resources(struct bnxt * bp)4999 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5000 {
5001 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5002 BNXT_HWRM_DMA_SIZE,
5003 BNXT_HWRM_DMA_ALIGN, 0);
5004 if (!bp->hwrm_dma_pool)
5005 return -ENOMEM;
5006
5007 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5008
5009 return 0;
5010 }
5011
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5012 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5013 {
5014 kfree(stats->hw_masks);
5015 stats->hw_masks = NULL;
5016 kfree(stats->sw_stats);
5017 stats->sw_stats = NULL;
5018 if (stats->hw_stats) {
5019 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5020 stats->hw_stats_map);
5021 stats->hw_stats = NULL;
5022 }
5023 }
5024
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5025 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5026 bool alloc_masks)
5027 {
5028 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5029 &stats->hw_stats_map, GFP_KERNEL);
5030 if (!stats->hw_stats)
5031 return -ENOMEM;
5032
5033 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5034 if (!stats->sw_stats)
5035 goto stats_mem_err;
5036
5037 if (alloc_masks) {
5038 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5039 if (!stats->hw_masks)
5040 goto stats_mem_err;
5041 }
5042 return 0;
5043
5044 stats_mem_err:
5045 bnxt_free_stats_mem(bp, stats);
5046 return -ENOMEM;
5047 }
5048
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5049 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5050 {
5051 int i;
5052
5053 for (i = 0; i < count; i++)
5054 mask_arr[i] = mask;
5055 }
5056
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5057 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5058 {
5059 int i;
5060
5061 for (i = 0; i < count; i++)
5062 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5063 }
5064
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5065 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5066 struct bnxt_stats_mem *stats)
5067 {
5068 struct hwrm_func_qstats_ext_output *resp;
5069 struct hwrm_func_qstats_ext_input *req;
5070 __le64 *hw_masks;
5071 int rc;
5072
5073 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5074 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5075 return -EOPNOTSUPP;
5076
5077 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5078 if (rc)
5079 return rc;
5080
5081 req->fid = cpu_to_le16(0xffff);
5082 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5083
5084 resp = hwrm_req_hold(bp, req);
5085 rc = hwrm_req_send(bp, req);
5086 if (!rc) {
5087 hw_masks = &resp->rx_ucast_pkts;
5088 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5089 }
5090 hwrm_req_drop(bp, req);
5091 return rc;
5092 }
5093
5094 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5095 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5096
bnxt_init_stats(struct bnxt * bp)5097 static void bnxt_init_stats(struct bnxt *bp)
5098 {
5099 struct bnxt_napi *bnapi = bp->bnapi[0];
5100 struct bnxt_cp_ring_info *cpr;
5101 struct bnxt_stats_mem *stats;
5102 __le64 *rx_stats, *tx_stats;
5103 int rc, rx_count, tx_count;
5104 u64 *rx_masks, *tx_masks;
5105 u64 mask;
5106 u8 flags;
5107
5108 cpr = &bnapi->cp_ring;
5109 stats = &cpr->stats;
5110 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5111 if (rc) {
5112 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5113 mask = (1ULL << 48) - 1;
5114 else
5115 mask = -1ULL;
5116 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5117 }
5118 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5119 stats = &bp->port_stats;
5120 rx_stats = stats->hw_stats;
5121 rx_masks = stats->hw_masks;
5122 rx_count = sizeof(struct rx_port_stats) / 8;
5123 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5124 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5125 tx_count = sizeof(struct tx_port_stats) / 8;
5126
5127 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5128 rc = bnxt_hwrm_port_qstats(bp, flags);
5129 if (rc) {
5130 mask = (1ULL << 40) - 1;
5131
5132 bnxt_fill_masks(rx_masks, mask, rx_count);
5133 bnxt_fill_masks(tx_masks, mask, tx_count);
5134 } else {
5135 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5136 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5137 bnxt_hwrm_port_qstats(bp, 0);
5138 }
5139 }
5140 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5141 stats = &bp->rx_port_stats_ext;
5142 rx_stats = stats->hw_stats;
5143 rx_masks = stats->hw_masks;
5144 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5145 stats = &bp->tx_port_stats_ext;
5146 tx_stats = stats->hw_stats;
5147 tx_masks = stats->hw_masks;
5148 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5149
5150 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5151 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5152 if (rc) {
5153 mask = (1ULL << 40) - 1;
5154
5155 bnxt_fill_masks(rx_masks, mask, rx_count);
5156 if (tx_stats)
5157 bnxt_fill_masks(tx_masks, mask, tx_count);
5158 } else {
5159 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5160 if (tx_stats)
5161 bnxt_copy_hw_masks(tx_masks, tx_stats,
5162 tx_count);
5163 bnxt_hwrm_port_qstats_ext(bp, 0);
5164 }
5165 }
5166 }
5167
bnxt_free_port_stats(struct bnxt * bp)5168 static void bnxt_free_port_stats(struct bnxt *bp)
5169 {
5170 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5171 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5172
5173 bnxt_free_stats_mem(bp, &bp->port_stats);
5174 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5175 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5176 }
5177
bnxt_free_ring_stats(struct bnxt * bp)5178 static void bnxt_free_ring_stats(struct bnxt *bp)
5179 {
5180 int i;
5181
5182 if (!bp->bnapi)
5183 return;
5184
5185 for (i = 0; i < bp->cp_nr_rings; i++) {
5186 struct bnxt_napi *bnapi = bp->bnapi[i];
5187 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5188
5189 bnxt_free_stats_mem(bp, &cpr->stats);
5190
5191 kfree(cpr->sw_stats);
5192 cpr->sw_stats = NULL;
5193 }
5194 }
5195
bnxt_alloc_stats(struct bnxt * bp)5196 static int bnxt_alloc_stats(struct bnxt *bp)
5197 {
5198 u32 size, i;
5199 int rc;
5200
5201 size = bp->hw_ring_stats_size;
5202
5203 for (i = 0; i < bp->cp_nr_rings; i++) {
5204 struct bnxt_napi *bnapi = bp->bnapi[i];
5205 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5206
5207 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5208 if (!cpr->sw_stats)
5209 return -ENOMEM;
5210
5211 cpr->stats.len = size;
5212 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5213 if (rc)
5214 return rc;
5215
5216 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5217 }
5218
5219 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5220 return 0;
5221
5222 if (bp->port_stats.hw_stats)
5223 goto alloc_ext_stats;
5224
5225 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5226 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5227 if (rc)
5228 return rc;
5229
5230 bp->flags |= BNXT_FLAG_PORT_STATS;
5231
5232 alloc_ext_stats:
5233 /* Display extended statistics only if FW supports it */
5234 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5235 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5236 return 0;
5237
5238 if (bp->rx_port_stats_ext.hw_stats)
5239 goto alloc_tx_ext_stats;
5240
5241 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5242 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5243 /* Extended stats are optional */
5244 if (rc)
5245 return 0;
5246
5247 alloc_tx_ext_stats:
5248 if (bp->tx_port_stats_ext.hw_stats)
5249 return 0;
5250
5251 if (bp->hwrm_spec_code >= 0x10902 ||
5252 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5253 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5254 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5255 /* Extended stats are optional */
5256 if (rc)
5257 return 0;
5258 }
5259 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5260 return 0;
5261 }
5262
bnxt_clear_ring_indices(struct bnxt * bp)5263 static void bnxt_clear_ring_indices(struct bnxt *bp)
5264 {
5265 int i, j;
5266
5267 if (!bp->bnapi)
5268 return;
5269
5270 for (i = 0; i < bp->cp_nr_rings; i++) {
5271 struct bnxt_napi *bnapi = bp->bnapi[i];
5272 struct bnxt_cp_ring_info *cpr;
5273 struct bnxt_rx_ring_info *rxr;
5274 struct bnxt_tx_ring_info *txr;
5275
5276 if (!bnapi)
5277 continue;
5278
5279 cpr = &bnapi->cp_ring;
5280 cpr->cp_raw_cons = 0;
5281
5282 bnxt_for_each_napi_tx(j, bnapi, txr) {
5283 txr->tx_prod = 0;
5284 txr->tx_cons = 0;
5285 txr->tx_hw_cons = 0;
5286 }
5287
5288 rxr = bnapi->rx_ring;
5289 if (rxr) {
5290 rxr->rx_prod = 0;
5291 rxr->rx_agg_prod = 0;
5292 rxr->rx_sw_agg_prod = 0;
5293 rxr->rx_next_cons = 0;
5294 }
5295 bnapi->events = 0;
5296 }
5297 }
5298
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5299 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5300 {
5301 u8 type = fltr->type, flags = fltr->flags;
5302
5303 INIT_LIST_HEAD(&fltr->list);
5304 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5305 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5306 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5307 }
5308
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5309 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5310 {
5311 if (!list_empty(&fltr->list))
5312 list_del_init(&fltr->list);
5313 }
5314
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5315 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5316 {
5317 struct bnxt_filter_base *usr_fltr, *tmp;
5318
5319 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5320 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5321 continue;
5322 bnxt_del_one_usr_fltr(bp, usr_fltr);
5323 }
5324 }
5325
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5326 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5327 {
5328 hlist_del(&fltr->hash);
5329 bnxt_del_one_usr_fltr(bp, fltr);
5330 if (fltr->flags) {
5331 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5332 bp->ntp_fltr_count--;
5333 }
5334 kfree(fltr);
5335 }
5336
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5337 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5338 {
5339 int i;
5340
5341 netdev_assert_locked_or_invisible(bp->dev);
5342
5343 /* Under netdev instance lock and all our NAPIs have been disabled.
5344 * It's safe to delete the hash table.
5345 */
5346 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5347 struct hlist_head *head;
5348 struct hlist_node *tmp;
5349 struct bnxt_ntuple_filter *fltr;
5350
5351 head = &bp->ntp_fltr_hash_tbl[i];
5352 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5353 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5354 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5355 !list_empty(&fltr->base.list)))
5356 continue;
5357 bnxt_del_fltr(bp, &fltr->base);
5358 }
5359 }
5360 if (!all)
5361 return;
5362
5363 bitmap_free(bp->ntp_fltr_bmap);
5364 bp->ntp_fltr_bmap = NULL;
5365 bp->ntp_fltr_count = 0;
5366 }
5367
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5368 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5369 {
5370 int i, rc = 0;
5371
5372 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5373 return 0;
5374
5375 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5376 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5377
5378 bp->ntp_fltr_count = 0;
5379 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5380
5381 if (!bp->ntp_fltr_bmap)
5382 rc = -ENOMEM;
5383
5384 return rc;
5385 }
5386
bnxt_free_l2_filters(struct bnxt * bp,bool all)5387 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5388 {
5389 int i;
5390
5391 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5392 struct hlist_head *head;
5393 struct hlist_node *tmp;
5394 struct bnxt_l2_filter *fltr;
5395
5396 head = &bp->l2_fltr_hash_tbl[i];
5397 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5398 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5399 !list_empty(&fltr->base.list)))
5400 continue;
5401 bnxt_del_fltr(bp, &fltr->base);
5402 }
5403 }
5404 }
5405
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5406 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5407 {
5408 int i;
5409
5410 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5411 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5412 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5413 }
5414
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5415 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5416 {
5417 bnxt_free_vnic_attributes(bp);
5418 bnxt_free_tx_rings(bp);
5419 bnxt_free_rx_rings(bp);
5420 bnxt_free_cp_rings(bp);
5421 bnxt_free_all_cp_arrays(bp);
5422 bnxt_free_ntp_fltrs(bp, false);
5423 bnxt_free_l2_filters(bp, false);
5424 if (irq_re_init) {
5425 bnxt_free_ring_stats(bp);
5426 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5427 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5428 bnxt_free_port_stats(bp);
5429 bnxt_free_ring_grps(bp);
5430 bnxt_free_vnics(bp);
5431 kfree(bp->tx_ring_map);
5432 bp->tx_ring_map = NULL;
5433 kfree(bp->tx_ring);
5434 bp->tx_ring = NULL;
5435 kfree(bp->rx_ring);
5436 bp->rx_ring = NULL;
5437 kfree(bp->bnapi);
5438 bp->bnapi = NULL;
5439 } else {
5440 bnxt_clear_ring_indices(bp);
5441 }
5442 }
5443
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5444 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5445 {
5446 int i, j, rc, size, arr_size;
5447 void *bnapi;
5448
5449 if (irq_re_init) {
5450 /* Allocate bnapi mem pointer array and mem block for
5451 * all queues
5452 */
5453 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5454 bp->cp_nr_rings);
5455 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5456 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5457 if (!bnapi)
5458 return -ENOMEM;
5459
5460 bp->bnapi = bnapi;
5461 bnapi += arr_size;
5462 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5463 bp->bnapi[i] = bnapi;
5464 bp->bnapi[i]->index = i;
5465 bp->bnapi[i]->bp = bp;
5466 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5467 struct bnxt_cp_ring_info *cpr =
5468 &bp->bnapi[i]->cp_ring;
5469
5470 cpr->cp_ring_struct.ring_mem.flags =
5471 BNXT_RMEM_RING_PTE_FLAG;
5472 }
5473 }
5474
5475 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5476 sizeof(struct bnxt_rx_ring_info),
5477 GFP_KERNEL);
5478 if (!bp->rx_ring)
5479 return -ENOMEM;
5480
5481 for (i = 0; i < bp->rx_nr_rings; i++) {
5482 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5483
5484 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5485 rxr->rx_ring_struct.ring_mem.flags =
5486 BNXT_RMEM_RING_PTE_FLAG;
5487 rxr->rx_agg_ring_struct.ring_mem.flags =
5488 BNXT_RMEM_RING_PTE_FLAG;
5489 } else {
5490 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5491 }
5492 rxr->bnapi = bp->bnapi[i];
5493 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5494 }
5495
5496 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5497 sizeof(struct bnxt_tx_ring_info),
5498 GFP_KERNEL);
5499 if (!bp->tx_ring)
5500 return -ENOMEM;
5501
5502 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5503 GFP_KERNEL);
5504
5505 if (!bp->tx_ring_map)
5506 return -ENOMEM;
5507
5508 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5509 j = 0;
5510 else
5511 j = bp->rx_nr_rings;
5512
5513 for (i = 0; i < bp->tx_nr_rings; i++) {
5514 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5515 struct bnxt_napi *bnapi2;
5516
5517 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5518 txr->tx_ring_struct.ring_mem.flags =
5519 BNXT_RMEM_RING_PTE_FLAG;
5520 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5521 if (i >= bp->tx_nr_rings_xdp) {
5522 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5523
5524 bnapi2 = bp->bnapi[k];
5525 txr->txq_index = i - bp->tx_nr_rings_xdp;
5526 txr->tx_napi_idx =
5527 BNXT_RING_TO_TC(bp, txr->txq_index);
5528 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5529 bnapi2->tx_int = bnxt_tx_int;
5530 } else {
5531 bnapi2 = bp->bnapi[j];
5532 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5533 bnapi2->tx_ring[0] = txr;
5534 bnapi2->tx_int = bnxt_tx_int_xdp;
5535 j++;
5536 }
5537 txr->bnapi = bnapi2;
5538 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5539 txr->tx_cpr = &bnapi2->cp_ring;
5540 }
5541
5542 rc = bnxt_alloc_stats(bp);
5543 if (rc)
5544 goto alloc_mem_err;
5545 bnxt_init_stats(bp);
5546
5547 rc = bnxt_alloc_ntp_fltrs(bp);
5548 if (rc)
5549 goto alloc_mem_err;
5550
5551 rc = bnxt_alloc_vnics(bp);
5552 if (rc)
5553 goto alloc_mem_err;
5554 }
5555
5556 rc = bnxt_alloc_all_cp_arrays(bp);
5557 if (rc)
5558 goto alloc_mem_err;
5559
5560 bnxt_init_ring_struct(bp);
5561
5562 rc = bnxt_alloc_rx_rings(bp);
5563 if (rc)
5564 goto alloc_mem_err;
5565
5566 rc = bnxt_alloc_tx_rings(bp);
5567 if (rc)
5568 goto alloc_mem_err;
5569
5570 rc = bnxt_alloc_cp_rings(bp);
5571 if (rc)
5572 goto alloc_mem_err;
5573
5574 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5575 BNXT_VNIC_MCAST_FLAG |
5576 BNXT_VNIC_UCAST_FLAG;
5577 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5578 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5579 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5580
5581 rc = bnxt_alloc_vnic_attributes(bp);
5582 if (rc)
5583 goto alloc_mem_err;
5584 return 0;
5585
5586 alloc_mem_err:
5587 bnxt_free_mem(bp, true);
5588 return rc;
5589 }
5590
bnxt_disable_int(struct bnxt * bp)5591 static void bnxt_disable_int(struct bnxt *bp)
5592 {
5593 int i;
5594
5595 if (!bp->bnapi)
5596 return;
5597
5598 for (i = 0; i < bp->cp_nr_rings; i++) {
5599 struct bnxt_napi *bnapi = bp->bnapi[i];
5600 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5601 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5602
5603 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5604 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5605 }
5606 }
5607
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5608 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5609 {
5610 struct bnxt_napi *bnapi = bp->bnapi[n];
5611 struct bnxt_cp_ring_info *cpr;
5612
5613 cpr = &bnapi->cp_ring;
5614 return cpr->cp_ring_struct.map_idx;
5615 }
5616
bnxt_disable_int_sync(struct bnxt * bp)5617 static void bnxt_disable_int_sync(struct bnxt *bp)
5618 {
5619 int i;
5620
5621 if (!bp->irq_tbl)
5622 return;
5623
5624 atomic_inc(&bp->intr_sem);
5625
5626 bnxt_disable_int(bp);
5627 for (i = 0; i < bp->cp_nr_rings; i++) {
5628 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5629
5630 synchronize_irq(bp->irq_tbl[map_idx].vector);
5631 }
5632 }
5633
bnxt_enable_int(struct bnxt * bp)5634 static void bnxt_enable_int(struct bnxt *bp)
5635 {
5636 int i;
5637
5638 atomic_set(&bp->intr_sem, 0);
5639 for (i = 0; i < bp->cp_nr_rings; i++) {
5640 struct bnxt_napi *bnapi = bp->bnapi[i];
5641 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5642
5643 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5644 }
5645 }
5646
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5647 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5648 bool async_only)
5649 {
5650 DECLARE_BITMAP(async_events_bmap, 256);
5651 u32 *events = (u32 *)async_events_bmap;
5652 struct hwrm_func_drv_rgtr_output *resp;
5653 struct hwrm_func_drv_rgtr_input *req;
5654 u32 flags;
5655 int rc, i;
5656
5657 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5658 if (rc)
5659 return rc;
5660
5661 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5662 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5663 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5664
5665 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5666 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5667 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5668 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5669 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5670 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5671 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5672 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5673 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5674 req->flags = cpu_to_le32(flags);
5675 req->ver_maj_8b = DRV_VER_MAJ;
5676 req->ver_min_8b = DRV_VER_MIN;
5677 req->ver_upd_8b = DRV_VER_UPD;
5678 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5679 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5680 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5681
5682 if (BNXT_PF(bp)) {
5683 u32 data[8];
5684 int i;
5685
5686 memset(data, 0, sizeof(data));
5687 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5688 u16 cmd = bnxt_vf_req_snif[i];
5689 unsigned int bit, idx;
5690
5691 idx = cmd / 32;
5692 bit = cmd % 32;
5693 data[idx] |= 1 << bit;
5694 }
5695
5696 for (i = 0; i < 8; i++)
5697 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5698
5699 req->enables |=
5700 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5701 }
5702
5703 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5704 req->flags |= cpu_to_le32(
5705 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5706
5707 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5708 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5709 u16 event_id = bnxt_async_events_arr[i];
5710
5711 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5712 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5713 continue;
5714 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5715 !bp->ptp_cfg)
5716 continue;
5717 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5718 }
5719 if (bmap && bmap_size) {
5720 for (i = 0; i < bmap_size; i++) {
5721 if (test_bit(i, bmap))
5722 __set_bit(i, async_events_bmap);
5723 }
5724 }
5725 for (i = 0; i < 8; i++)
5726 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5727
5728 if (async_only)
5729 req->enables =
5730 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5731
5732 resp = hwrm_req_hold(bp, req);
5733 rc = hwrm_req_send(bp, req);
5734 if (!rc) {
5735 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5736 if (resp->flags &
5737 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5738 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5739 }
5740 hwrm_req_drop(bp, req);
5741 return rc;
5742 }
5743
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5744 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5745 {
5746 struct hwrm_func_drv_unrgtr_input *req;
5747 int rc;
5748
5749 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5750 return 0;
5751
5752 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5753 if (rc)
5754 return rc;
5755 return hwrm_req_send(bp, req);
5756 }
5757
5758 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5759
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5760 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5761 {
5762 struct hwrm_tunnel_dst_port_free_input *req;
5763 int rc;
5764
5765 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5766 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5767 return 0;
5768 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5769 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5770 return 0;
5771
5772 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5773 if (rc)
5774 return rc;
5775
5776 req->tunnel_type = tunnel_type;
5777
5778 switch (tunnel_type) {
5779 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5780 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5781 bp->vxlan_port = 0;
5782 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5783 break;
5784 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5785 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5786 bp->nge_port = 0;
5787 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5788 break;
5789 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5790 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5791 bp->vxlan_gpe_port = 0;
5792 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5793 break;
5794 default:
5795 break;
5796 }
5797
5798 rc = hwrm_req_send(bp, req);
5799 if (rc)
5800 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5801 rc);
5802 if (bp->flags & BNXT_FLAG_TPA)
5803 bnxt_set_tpa(bp, true);
5804 return rc;
5805 }
5806
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5807 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5808 u8 tunnel_type)
5809 {
5810 struct hwrm_tunnel_dst_port_alloc_output *resp;
5811 struct hwrm_tunnel_dst_port_alloc_input *req;
5812 int rc;
5813
5814 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5815 if (rc)
5816 return rc;
5817
5818 req->tunnel_type = tunnel_type;
5819 req->tunnel_dst_port_val = port;
5820
5821 resp = hwrm_req_hold(bp, req);
5822 rc = hwrm_req_send(bp, req);
5823 if (rc) {
5824 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5825 rc);
5826 goto err_out;
5827 }
5828
5829 switch (tunnel_type) {
5830 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5831 bp->vxlan_port = port;
5832 bp->vxlan_fw_dst_port_id =
5833 le16_to_cpu(resp->tunnel_dst_port_id);
5834 break;
5835 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5836 bp->nge_port = port;
5837 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5838 break;
5839 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5840 bp->vxlan_gpe_port = port;
5841 bp->vxlan_gpe_fw_dst_port_id =
5842 le16_to_cpu(resp->tunnel_dst_port_id);
5843 break;
5844 default:
5845 break;
5846 }
5847 if (bp->flags & BNXT_FLAG_TPA)
5848 bnxt_set_tpa(bp, true);
5849
5850 err_out:
5851 hwrm_req_drop(bp, req);
5852 return rc;
5853 }
5854
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5855 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5856 {
5857 struct hwrm_cfa_l2_set_rx_mask_input *req;
5858 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5859 int rc;
5860
5861 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5862 if (rc)
5863 return rc;
5864
5865 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5866 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5867 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5868 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5869 }
5870 req->mask = cpu_to_le32(vnic->rx_mask);
5871 return hwrm_req_send_silent(bp, req);
5872 }
5873
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5874 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5875 {
5876 if (!atomic_dec_and_test(&fltr->refcnt))
5877 return;
5878 spin_lock_bh(&bp->ntp_fltr_lock);
5879 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5880 spin_unlock_bh(&bp->ntp_fltr_lock);
5881 return;
5882 }
5883 hlist_del_rcu(&fltr->base.hash);
5884 bnxt_del_one_usr_fltr(bp, &fltr->base);
5885 if (fltr->base.flags) {
5886 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5887 bp->ntp_fltr_count--;
5888 }
5889 spin_unlock_bh(&bp->ntp_fltr_lock);
5890 kfree_rcu(fltr, base.rcu);
5891 }
5892
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5893 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5894 struct bnxt_l2_key *key,
5895 u32 idx)
5896 {
5897 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5898 struct bnxt_l2_filter *fltr;
5899
5900 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5901 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5902
5903 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5904 l2_key->vlan == key->vlan)
5905 return fltr;
5906 }
5907 return NULL;
5908 }
5909
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5910 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5911 struct bnxt_l2_key *key,
5912 u32 idx)
5913 {
5914 struct bnxt_l2_filter *fltr = NULL;
5915
5916 rcu_read_lock();
5917 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5918 if (fltr)
5919 atomic_inc(&fltr->refcnt);
5920 rcu_read_unlock();
5921 return fltr;
5922 }
5923
5924 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5925 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5926 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5927 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5928 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5929
5930 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5931 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5932 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5933 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5934 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5935
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5936 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5937 {
5938 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5939 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5940 return sizeof(fkeys->addrs.v4addrs) +
5941 sizeof(fkeys->ports);
5942
5943 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5944 return sizeof(fkeys->addrs.v4addrs);
5945 }
5946
5947 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5948 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5949 return sizeof(fkeys->addrs.v6addrs) +
5950 sizeof(fkeys->ports);
5951
5952 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5953 return sizeof(fkeys->addrs.v6addrs);
5954 }
5955
5956 return 0;
5957 }
5958
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5959 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5960 const unsigned char *key)
5961 {
5962 u64 prefix = bp->toeplitz_prefix, hash = 0;
5963 struct bnxt_ipv4_tuple tuple4;
5964 struct bnxt_ipv6_tuple tuple6;
5965 int i, j, len = 0;
5966 u8 *four_tuple;
5967
5968 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5969 if (!len)
5970 return 0;
5971
5972 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5973 tuple4.v4addrs = fkeys->addrs.v4addrs;
5974 tuple4.ports = fkeys->ports;
5975 four_tuple = (unsigned char *)&tuple4;
5976 } else {
5977 tuple6.v6addrs = fkeys->addrs.v6addrs;
5978 tuple6.ports = fkeys->ports;
5979 four_tuple = (unsigned char *)&tuple6;
5980 }
5981
5982 for (i = 0, j = 8; i < len; i++, j++) {
5983 u8 byte = four_tuple[i];
5984 int bit;
5985
5986 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5987 if (byte & 0x80)
5988 hash ^= prefix;
5989 }
5990 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5991 }
5992
5993 /* The valid part of the hash is in the upper 32 bits. */
5994 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5995 }
5996
5997 #ifdef CONFIG_RFS_ACCEL
5998 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)5999 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6000 {
6001 struct bnxt_l2_filter *fltr;
6002 u32 idx;
6003
6004 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6005 BNXT_L2_FLTR_HASH_MASK;
6006 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6007 return fltr;
6008 }
6009 #endif
6010
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6011 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6012 struct bnxt_l2_key *key, u32 idx)
6013 {
6014 struct hlist_head *head;
6015
6016 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6017 fltr->l2_key.vlan = key->vlan;
6018 fltr->base.type = BNXT_FLTR_TYPE_L2;
6019 if (fltr->base.flags) {
6020 int bit_id;
6021
6022 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6023 bp->max_fltr, 0);
6024 if (bit_id < 0)
6025 return -ENOMEM;
6026 fltr->base.sw_id = (u16)bit_id;
6027 bp->ntp_fltr_count++;
6028 }
6029 head = &bp->l2_fltr_hash_tbl[idx];
6030 hlist_add_head_rcu(&fltr->base.hash, head);
6031 bnxt_insert_usr_fltr(bp, &fltr->base);
6032 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6033 atomic_set(&fltr->refcnt, 1);
6034 return 0;
6035 }
6036
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6037 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6038 struct bnxt_l2_key *key,
6039 gfp_t gfp)
6040 {
6041 struct bnxt_l2_filter *fltr;
6042 u32 idx;
6043 int rc;
6044
6045 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6046 BNXT_L2_FLTR_HASH_MASK;
6047 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6048 if (fltr)
6049 return fltr;
6050
6051 fltr = kzalloc(sizeof(*fltr), gfp);
6052 if (!fltr)
6053 return ERR_PTR(-ENOMEM);
6054 spin_lock_bh(&bp->ntp_fltr_lock);
6055 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6056 spin_unlock_bh(&bp->ntp_fltr_lock);
6057 if (rc) {
6058 bnxt_del_l2_filter(bp, fltr);
6059 fltr = ERR_PTR(rc);
6060 }
6061 return fltr;
6062 }
6063
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6064 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6065 struct bnxt_l2_key *key,
6066 u16 flags)
6067 {
6068 struct bnxt_l2_filter *fltr;
6069 u32 idx;
6070 int rc;
6071
6072 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6073 BNXT_L2_FLTR_HASH_MASK;
6074 spin_lock_bh(&bp->ntp_fltr_lock);
6075 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6076 if (fltr) {
6077 fltr = ERR_PTR(-EEXIST);
6078 goto l2_filter_exit;
6079 }
6080 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
6081 if (!fltr) {
6082 fltr = ERR_PTR(-ENOMEM);
6083 goto l2_filter_exit;
6084 }
6085 fltr->base.flags = flags;
6086 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6087 if (rc) {
6088 spin_unlock_bh(&bp->ntp_fltr_lock);
6089 bnxt_del_l2_filter(bp, fltr);
6090 return ERR_PTR(rc);
6091 }
6092
6093 l2_filter_exit:
6094 spin_unlock_bh(&bp->ntp_fltr_lock);
6095 return fltr;
6096 }
6097
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6098 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6099 {
6100 #ifdef CONFIG_BNXT_SRIOV
6101 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6102
6103 return vf->fw_fid;
6104 #else
6105 return INVALID_HW_RING_ID;
6106 #endif
6107 }
6108
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6109 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6110 {
6111 struct hwrm_cfa_l2_filter_free_input *req;
6112 u16 target_id = 0xffff;
6113 int rc;
6114
6115 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6116 struct bnxt_pf_info *pf = &bp->pf;
6117
6118 if (fltr->base.vf_idx >= pf->active_vfs)
6119 return -EINVAL;
6120
6121 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6122 if (target_id == INVALID_HW_RING_ID)
6123 return -EINVAL;
6124 }
6125
6126 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6127 if (rc)
6128 return rc;
6129
6130 req->target_id = cpu_to_le16(target_id);
6131 req->l2_filter_id = fltr->base.filter_id;
6132 return hwrm_req_send(bp, req);
6133 }
6134
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6135 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6136 {
6137 struct hwrm_cfa_l2_filter_alloc_output *resp;
6138 struct hwrm_cfa_l2_filter_alloc_input *req;
6139 u16 target_id = 0xffff;
6140 int rc;
6141
6142 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6143 struct bnxt_pf_info *pf = &bp->pf;
6144
6145 if (fltr->base.vf_idx >= pf->active_vfs)
6146 return -EINVAL;
6147
6148 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6149 }
6150 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6151 if (rc)
6152 return rc;
6153
6154 req->target_id = cpu_to_le16(target_id);
6155 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6156
6157 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6158 req->flags |=
6159 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6160 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6161 req->enables =
6162 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6163 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6164 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6165 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6166 eth_broadcast_addr(req->l2_addr_mask);
6167
6168 if (fltr->l2_key.vlan) {
6169 req->enables |=
6170 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6171 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6172 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6173 req->num_vlans = 1;
6174 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6175 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6176 }
6177
6178 resp = hwrm_req_hold(bp, req);
6179 rc = hwrm_req_send(bp, req);
6180 if (!rc) {
6181 fltr->base.filter_id = resp->l2_filter_id;
6182 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6183 }
6184 hwrm_req_drop(bp, req);
6185 return rc;
6186 }
6187
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6188 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6189 struct bnxt_ntuple_filter *fltr)
6190 {
6191 struct hwrm_cfa_ntuple_filter_free_input *req;
6192 int rc;
6193
6194 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6195 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6196 if (rc)
6197 return rc;
6198
6199 req->ntuple_filter_id = fltr->base.filter_id;
6200 return hwrm_req_send(bp, req);
6201 }
6202
6203 #define BNXT_NTP_FLTR_FLAGS \
6204 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6205 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6206 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6207 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6208 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6209 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6210 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6211 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6212 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6213 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6214 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6215 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6216 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6217
6218 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6219 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6220
bnxt_fill_ipv6_mask(__be32 mask[4])6221 void bnxt_fill_ipv6_mask(__be32 mask[4])
6222 {
6223 int i;
6224
6225 for (i = 0; i < 4; i++)
6226 mask[i] = cpu_to_be32(~0);
6227 }
6228
6229 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6230 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6231 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6232 struct bnxt_ntuple_filter *fltr)
6233 {
6234 u16 rxq = fltr->base.rxq;
6235
6236 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6237 struct ethtool_rxfh_context *ctx;
6238 struct bnxt_rss_ctx *rss_ctx;
6239 struct bnxt_vnic_info *vnic;
6240
6241 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6242 fltr->base.fw_vnic_id);
6243 if (ctx) {
6244 rss_ctx = ethtool_rxfh_context_priv(ctx);
6245 vnic = &rss_ctx->vnic;
6246
6247 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6248 }
6249 return;
6250 }
6251 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6252 struct bnxt_vnic_info *vnic;
6253 u32 enables;
6254
6255 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6256 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6257 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6258 req->enables |= cpu_to_le32(enables);
6259 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6260 } else {
6261 u32 flags;
6262
6263 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6264 req->flags |= cpu_to_le32(flags);
6265 req->dst_id = cpu_to_le16(rxq);
6266 }
6267 }
6268
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6269 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6270 struct bnxt_ntuple_filter *fltr)
6271 {
6272 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6273 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6274 struct bnxt_flow_masks *masks = &fltr->fmasks;
6275 struct flow_keys *keys = &fltr->fkeys;
6276 struct bnxt_l2_filter *l2_fltr;
6277 struct bnxt_vnic_info *vnic;
6278 int rc;
6279
6280 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6281 if (rc)
6282 return rc;
6283
6284 l2_fltr = fltr->l2_fltr;
6285 req->l2_filter_id = l2_fltr->base.filter_id;
6286
6287 if (fltr->base.flags & BNXT_ACT_DROP) {
6288 req->flags =
6289 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6290 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6291 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6292 } else {
6293 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6294 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6295 }
6296 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6297
6298 req->ethertype = htons(ETH_P_IP);
6299 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6300 req->ip_protocol = keys->basic.ip_proto;
6301
6302 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6303 req->ethertype = htons(ETH_P_IPV6);
6304 req->ip_addr_type =
6305 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6306 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6307 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6308 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6309 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6310 } else {
6311 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6312 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6313 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6314 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6315 }
6316 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6317 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6318 req->tunnel_type =
6319 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6320 }
6321
6322 req->src_port = keys->ports.src;
6323 req->src_port_mask = masks->ports.src;
6324 req->dst_port = keys->ports.dst;
6325 req->dst_port_mask = masks->ports.dst;
6326
6327 resp = hwrm_req_hold(bp, req);
6328 rc = hwrm_req_send(bp, req);
6329 if (!rc)
6330 fltr->base.filter_id = resp->ntuple_filter_id;
6331 hwrm_req_drop(bp, req);
6332 return rc;
6333 }
6334
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6335 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6336 const u8 *mac_addr)
6337 {
6338 struct bnxt_l2_filter *fltr;
6339 struct bnxt_l2_key key;
6340 int rc;
6341
6342 ether_addr_copy(key.dst_mac_addr, mac_addr);
6343 key.vlan = 0;
6344 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6345 if (IS_ERR(fltr))
6346 return PTR_ERR(fltr);
6347
6348 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6349 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6350 if (rc)
6351 bnxt_del_l2_filter(bp, fltr);
6352 else
6353 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6354 return rc;
6355 }
6356
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6357 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6358 {
6359 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6360
6361 /* Any associated ntuple filters will also be cleared by firmware. */
6362 for (i = 0; i < num_of_vnics; i++) {
6363 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6364
6365 for (j = 0; j < vnic->uc_filter_count; j++) {
6366 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6367
6368 bnxt_hwrm_l2_filter_free(bp, fltr);
6369 bnxt_del_l2_filter(bp, fltr);
6370 }
6371 vnic->uc_filter_count = 0;
6372 }
6373 }
6374
6375 #define BNXT_DFLT_TUNL_TPA_BMAP \
6376 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6377 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6378 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6379
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6380 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6381 struct hwrm_vnic_tpa_cfg_input *req)
6382 {
6383 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6384
6385 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6386 return;
6387
6388 if (bp->vxlan_port)
6389 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6390 if (bp->vxlan_gpe_port)
6391 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6392 if (bp->nge_port)
6393 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6394
6395 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6396 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6397 }
6398
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6399 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6400 u32 tpa_flags)
6401 {
6402 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6403 struct hwrm_vnic_tpa_cfg_input *req;
6404 int rc;
6405
6406 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6407 return 0;
6408
6409 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6410 if (rc)
6411 return rc;
6412
6413 if (tpa_flags) {
6414 u16 mss = bp->dev->mtu - 40;
6415 u32 nsegs, n, segs = 0, flags;
6416
6417 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6418 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6419 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6420 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6421 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6422 if (tpa_flags & BNXT_FLAG_GRO)
6423 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6424
6425 req->flags = cpu_to_le32(flags);
6426
6427 req->enables =
6428 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6429 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6430 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6431
6432 /* Number of segs are log2 units, and first packet is not
6433 * included as part of this units.
6434 */
6435 if (mss <= BNXT_RX_PAGE_SIZE) {
6436 n = BNXT_RX_PAGE_SIZE / mss;
6437 nsegs = (MAX_SKB_FRAGS - 1) * n;
6438 } else {
6439 n = mss / BNXT_RX_PAGE_SIZE;
6440 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6441 n++;
6442 nsegs = (MAX_SKB_FRAGS - n) / n;
6443 }
6444
6445 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6446 segs = MAX_TPA_SEGS_P5;
6447 max_aggs = bp->max_tpa;
6448 } else {
6449 segs = ilog2(nsegs);
6450 }
6451 req->max_agg_segs = cpu_to_le16(segs);
6452 req->max_aggs = cpu_to_le16(max_aggs);
6453
6454 req->min_agg_len = cpu_to_le32(512);
6455 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6456 }
6457 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6458
6459 return hwrm_req_send(bp, req);
6460 }
6461
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6462 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6463 {
6464 struct bnxt_ring_grp_info *grp_info;
6465
6466 grp_info = &bp->grp_info[ring->grp_idx];
6467 return grp_info->cp_fw_ring_id;
6468 }
6469
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6470 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6471 {
6472 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6473 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6474 else
6475 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6476 }
6477
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6478 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6479 {
6480 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6481 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6482 else
6483 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6484 }
6485
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6486 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6487 {
6488 int entries;
6489
6490 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6491 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6492 else
6493 entries = HW_HASH_INDEX_SIZE;
6494
6495 bp->rss_indir_tbl_entries = entries;
6496 bp->rss_indir_tbl =
6497 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6498 if (!bp->rss_indir_tbl)
6499 return -ENOMEM;
6500
6501 return 0;
6502 }
6503
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6504 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6505 struct ethtool_rxfh_context *rss_ctx)
6506 {
6507 u16 max_rings, max_entries, pad, i;
6508 u32 *rss_indir_tbl;
6509
6510 if (!bp->rx_nr_rings)
6511 return;
6512
6513 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6514 max_rings = bp->rx_nr_rings - 1;
6515 else
6516 max_rings = bp->rx_nr_rings;
6517
6518 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6519 if (rss_ctx)
6520 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6521 else
6522 rss_indir_tbl = &bp->rss_indir_tbl[0];
6523
6524 for (i = 0; i < max_entries; i++)
6525 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6526
6527 pad = bp->rss_indir_tbl_entries - max_entries;
6528 if (pad)
6529 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6530 }
6531
bnxt_get_max_rss_ring(struct bnxt * bp)6532 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6533 {
6534 u32 i, tbl_size, max_ring = 0;
6535
6536 if (!bp->rss_indir_tbl)
6537 return 0;
6538
6539 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6540 for (i = 0; i < tbl_size; i++)
6541 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6542 return max_ring;
6543 }
6544
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6545 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6546 {
6547 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6548 if (!rx_rings)
6549 return 0;
6550 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6551 BNXT_RSS_TABLE_ENTRIES_P5);
6552 }
6553 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6554 return 2;
6555 return 1;
6556 }
6557
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6558 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6559 {
6560 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6561 u16 i, j;
6562
6563 /* Fill the RSS indirection table with ring group ids */
6564 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6565 if (!no_rss)
6566 j = bp->rss_indir_tbl[i];
6567 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6568 }
6569 }
6570
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6571 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6572 struct bnxt_vnic_info *vnic)
6573 {
6574 __le16 *ring_tbl = vnic->rss_table;
6575 struct bnxt_rx_ring_info *rxr;
6576 u16 tbl_size, i;
6577
6578 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6579
6580 for (i = 0; i < tbl_size; i++) {
6581 u16 ring_id, j;
6582
6583 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6584 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6585 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6586 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6587 else
6588 j = bp->rss_indir_tbl[i];
6589 rxr = &bp->rx_ring[j];
6590
6591 ring_id = rxr->rx_ring_struct.fw_ring_id;
6592 *ring_tbl++ = cpu_to_le16(ring_id);
6593 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6594 *ring_tbl++ = cpu_to_le16(ring_id);
6595 }
6596 }
6597
6598 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6599 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6600 struct bnxt_vnic_info *vnic)
6601 {
6602 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6603 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6604 if (bp->flags & BNXT_FLAG_CHIP_P7)
6605 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6606 } else {
6607 bnxt_fill_hw_rss_tbl(bp, vnic);
6608 }
6609
6610 if (bp->rss_hash_delta) {
6611 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6612 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6613 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6614 else
6615 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6616 } else {
6617 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6618 }
6619 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6620 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6621 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6622 }
6623
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6624 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6625 bool set_rss)
6626 {
6627 struct hwrm_vnic_rss_cfg_input *req;
6628 int rc;
6629
6630 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6631 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6632 return 0;
6633
6634 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6635 if (rc)
6636 return rc;
6637
6638 if (set_rss)
6639 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6640 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6641 return hwrm_req_send(bp, req);
6642 }
6643
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6644 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6645 struct bnxt_vnic_info *vnic, bool set_rss)
6646 {
6647 struct hwrm_vnic_rss_cfg_input *req;
6648 dma_addr_t ring_tbl_map;
6649 u32 i, nr_ctxs;
6650 int rc;
6651
6652 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6653 if (rc)
6654 return rc;
6655
6656 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6657 if (!set_rss)
6658 return hwrm_req_send(bp, req);
6659
6660 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6661 ring_tbl_map = vnic->rss_table_dma_addr;
6662 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6663
6664 hwrm_req_hold(bp, req);
6665 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6666 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6667 req->ring_table_pair_index = i;
6668 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6669 rc = hwrm_req_send(bp, req);
6670 if (rc)
6671 goto exit;
6672 }
6673
6674 exit:
6675 hwrm_req_drop(bp, req);
6676 return rc;
6677 }
6678
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6679 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6680 {
6681 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6682 struct hwrm_vnic_rss_qcfg_output *resp;
6683 struct hwrm_vnic_rss_qcfg_input *req;
6684
6685 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6686 return;
6687
6688 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6689 /* all contexts configured to same hash_type, zero always exists */
6690 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6691 resp = hwrm_req_hold(bp, req);
6692 if (!hwrm_req_send(bp, req)) {
6693 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6694 bp->rss_hash_delta = 0;
6695 }
6696 hwrm_req_drop(bp, req);
6697 }
6698
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6699 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6700 {
6701 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6702 struct hwrm_vnic_plcmodes_cfg_input *req;
6703 int rc;
6704
6705 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6706 if (rc)
6707 return rc;
6708
6709 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6710 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6711 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6712
6713 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6714 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6715 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6716 req->enables |=
6717 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6718 req->hds_threshold = cpu_to_le16(hds_thresh);
6719 }
6720 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6721 return hwrm_req_send(bp, req);
6722 }
6723
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6724 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6725 struct bnxt_vnic_info *vnic,
6726 u16 ctx_idx)
6727 {
6728 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6729
6730 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6731 return;
6732
6733 req->rss_cos_lb_ctx_id =
6734 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6735
6736 hwrm_req_send(bp, req);
6737 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6738 }
6739
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6740 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6741 {
6742 int i, j;
6743
6744 for (i = 0; i < bp->nr_vnics; i++) {
6745 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6746
6747 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6748 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6749 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6750 }
6751 }
6752 bp->rsscos_nr_ctxs = 0;
6753 }
6754
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6755 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6756 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6757 {
6758 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6759 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6760 int rc;
6761
6762 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6763 if (rc)
6764 return rc;
6765
6766 resp = hwrm_req_hold(bp, req);
6767 rc = hwrm_req_send(bp, req);
6768 if (!rc)
6769 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6770 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6771 hwrm_req_drop(bp, req);
6772
6773 return rc;
6774 }
6775
bnxt_get_roce_vnic_mode(struct bnxt * bp)6776 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6777 {
6778 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6779 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6780 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6781 }
6782
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6783 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6784 {
6785 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6786 struct hwrm_vnic_cfg_input *req;
6787 unsigned int ring = 0, grp_idx;
6788 u16 def_vlan = 0;
6789 int rc;
6790
6791 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6792 if (rc)
6793 return rc;
6794
6795 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6796 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6797
6798 req->default_rx_ring_id =
6799 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6800 req->default_cmpl_ring_id =
6801 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6802 req->enables =
6803 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6804 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6805 goto vnic_mru;
6806 }
6807 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6808 /* Only RSS support for now TBD: COS & LB */
6809 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6810 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6811 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6812 VNIC_CFG_REQ_ENABLES_MRU);
6813 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6814 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6815 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6816 VNIC_CFG_REQ_ENABLES_MRU);
6817 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6818 } else {
6819 req->rss_rule = cpu_to_le16(0xffff);
6820 }
6821
6822 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6823 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6824 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6825 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6826 } else {
6827 req->cos_rule = cpu_to_le16(0xffff);
6828 }
6829
6830 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6831 ring = 0;
6832 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6833 ring = vnic->vnic_id - 1;
6834 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6835 ring = bp->rx_nr_rings - 1;
6836
6837 grp_idx = bp->rx_ring[ring].bnapi->index;
6838 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6839 req->lb_rule = cpu_to_le16(0xffff);
6840 vnic_mru:
6841 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6842 req->mru = cpu_to_le16(vnic->mru);
6843
6844 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6845 #ifdef CONFIG_BNXT_SRIOV
6846 if (BNXT_VF(bp))
6847 def_vlan = bp->vf.vlan;
6848 #endif
6849 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6850 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6851 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6852 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6853
6854 return hwrm_req_send(bp, req);
6855 }
6856
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6857 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6858 struct bnxt_vnic_info *vnic)
6859 {
6860 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6861 struct hwrm_vnic_free_input *req;
6862
6863 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6864 return;
6865
6866 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6867
6868 hwrm_req_send(bp, req);
6869 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6870 }
6871 }
6872
bnxt_hwrm_vnic_free(struct bnxt * bp)6873 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6874 {
6875 u16 i;
6876
6877 for (i = 0; i < bp->nr_vnics; i++)
6878 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6879 }
6880
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6881 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6882 unsigned int start_rx_ring_idx,
6883 unsigned int nr_rings)
6884 {
6885 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6886 struct hwrm_vnic_alloc_output *resp;
6887 struct hwrm_vnic_alloc_input *req;
6888 int rc;
6889
6890 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6891 if (rc)
6892 return rc;
6893
6894 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6895 goto vnic_no_ring_grps;
6896
6897 /* map ring groups to this vnic */
6898 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6899 grp_idx = bp->rx_ring[i].bnapi->index;
6900 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6901 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6902 j, nr_rings);
6903 break;
6904 }
6905 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6906 }
6907
6908 vnic_no_ring_grps:
6909 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6910 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6911 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6912 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6913
6914 resp = hwrm_req_hold(bp, req);
6915 rc = hwrm_req_send(bp, req);
6916 if (!rc)
6917 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6918 hwrm_req_drop(bp, req);
6919 return rc;
6920 }
6921
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6922 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6923 {
6924 struct hwrm_vnic_qcaps_output *resp;
6925 struct hwrm_vnic_qcaps_input *req;
6926 int rc;
6927
6928 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6929 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6930 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6931 if (bp->hwrm_spec_code < 0x10600)
6932 return 0;
6933
6934 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6935 if (rc)
6936 return rc;
6937
6938 resp = hwrm_req_hold(bp, req);
6939 rc = hwrm_req_send(bp, req);
6940 if (!rc) {
6941 u32 flags = le32_to_cpu(resp->flags);
6942
6943 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6944 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6945 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6946 if (flags &
6947 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6948 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6949
6950 /* Older P5 fw before EXT_HW_STATS support did not set
6951 * VLAN_STRIP_CAP properly.
6952 */
6953 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6954 (BNXT_CHIP_P5(bp) &&
6955 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6956 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6957 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6958 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6959 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6960 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6961 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6962 if (bp->max_tpa_v2) {
6963 if (BNXT_CHIP_P5(bp))
6964 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6965 else
6966 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6967 }
6968 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6969 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6970 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6971 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6972 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6973 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6974 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6975 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6976 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6977 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6978 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
6979 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
6980 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6981 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6982 }
6983 hwrm_req_drop(bp, req);
6984 return rc;
6985 }
6986
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6987 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6988 {
6989 struct hwrm_ring_grp_alloc_output *resp;
6990 struct hwrm_ring_grp_alloc_input *req;
6991 int rc;
6992 u16 i;
6993
6994 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6995 return 0;
6996
6997 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6998 if (rc)
6999 return rc;
7000
7001 resp = hwrm_req_hold(bp, req);
7002 for (i = 0; i < bp->rx_nr_rings; i++) {
7003 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7004
7005 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7006 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7007 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7008 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7009
7010 rc = hwrm_req_send(bp, req);
7011
7012 if (rc)
7013 break;
7014
7015 bp->grp_info[grp_idx].fw_grp_id =
7016 le32_to_cpu(resp->ring_group_id);
7017 }
7018 hwrm_req_drop(bp, req);
7019 return rc;
7020 }
7021
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7022 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7023 {
7024 struct hwrm_ring_grp_free_input *req;
7025 u16 i;
7026
7027 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7028 return;
7029
7030 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7031 return;
7032
7033 hwrm_req_hold(bp, req);
7034 for (i = 0; i < bp->cp_nr_rings; i++) {
7035 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7036 continue;
7037 req->ring_group_id =
7038 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7039
7040 hwrm_req_send(bp, req);
7041 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7042 }
7043 hwrm_req_drop(bp, req);
7044 }
7045
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_ring_struct * ring)7046 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7047 struct hwrm_ring_alloc_input *req,
7048 struct bnxt_ring_struct *ring)
7049 {
7050 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7051 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7052 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7053
7054 if (ring_type == HWRM_RING_ALLOC_AGG) {
7055 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7056 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7057 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7058 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7059 } else {
7060 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7061 if (NET_IP_ALIGN == 2)
7062 req->flags =
7063 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7064 }
7065 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7066 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7067 req->enables |= cpu_to_le32(enables);
7068 }
7069
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7070 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7071 struct bnxt_ring_struct *ring,
7072 u32 ring_type, u32 map_index)
7073 {
7074 struct hwrm_ring_alloc_output *resp;
7075 struct hwrm_ring_alloc_input *req;
7076 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7077 struct bnxt_ring_grp_info *grp_info;
7078 int rc, err = 0;
7079 u16 ring_id;
7080
7081 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7082 if (rc)
7083 goto exit;
7084
7085 req->enables = 0;
7086 if (rmem->nr_pages > 1) {
7087 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7088 /* Page size is in log2 units */
7089 req->page_size = BNXT_PAGE_SHIFT;
7090 req->page_tbl_depth = 1;
7091 } else {
7092 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7093 }
7094 req->fbo = 0;
7095 /* Association of ring index with doorbell index and MSIX number */
7096 req->logical_id = cpu_to_le16(map_index);
7097
7098 switch (ring_type) {
7099 case HWRM_RING_ALLOC_TX: {
7100 struct bnxt_tx_ring_info *txr;
7101 u16 flags = 0;
7102
7103 txr = container_of(ring, struct bnxt_tx_ring_info,
7104 tx_ring_struct);
7105 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7106 /* Association of transmit ring with completion ring */
7107 grp_info = &bp->grp_info[ring->grp_idx];
7108 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7109 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7110 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7111 req->queue_id = cpu_to_le16(ring->queue_id);
7112 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7113 req->cmpl_coal_cnt =
7114 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7115 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7116 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7117 req->flags = cpu_to_le16(flags);
7118 break;
7119 }
7120 case HWRM_RING_ALLOC_RX:
7121 case HWRM_RING_ALLOC_AGG:
7122 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7123 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7124 cpu_to_le32(bp->rx_ring_mask + 1) :
7125 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7126 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7127 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
7128 break;
7129 case HWRM_RING_ALLOC_CMPL:
7130 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7131 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7132 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7133 /* Association of cp ring with nq */
7134 grp_info = &bp->grp_info[map_index];
7135 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7136 req->cq_handle = cpu_to_le64(ring->handle);
7137 req->enables |= cpu_to_le32(
7138 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7139 } else {
7140 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7141 }
7142 break;
7143 case HWRM_RING_ALLOC_NQ:
7144 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7145 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7146 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7147 break;
7148 default:
7149 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7150 ring_type);
7151 return -EINVAL;
7152 }
7153
7154 resp = hwrm_req_hold(bp, req);
7155 rc = hwrm_req_send(bp, req);
7156 err = le16_to_cpu(resp->error_code);
7157 ring_id = le16_to_cpu(resp->ring_id);
7158 hwrm_req_drop(bp, req);
7159
7160 exit:
7161 if (rc || err) {
7162 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7163 ring_type, rc, err);
7164 return -EIO;
7165 }
7166 ring->fw_ring_id = ring_id;
7167 return rc;
7168 }
7169
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7170 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7171 {
7172 int rc;
7173
7174 if (BNXT_PF(bp)) {
7175 struct hwrm_func_cfg_input *req;
7176
7177 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7178 if (rc)
7179 return rc;
7180
7181 req->fid = cpu_to_le16(0xffff);
7182 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7183 req->async_event_cr = cpu_to_le16(idx);
7184 return hwrm_req_send(bp, req);
7185 } else {
7186 struct hwrm_func_vf_cfg_input *req;
7187
7188 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7189 if (rc)
7190 return rc;
7191
7192 req->enables =
7193 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7194 req->async_event_cr = cpu_to_le16(idx);
7195 return hwrm_req_send(bp, req);
7196 }
7197 }
7198
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7199 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7200 u32 ring_type)
7201 {
7202 switch (ring_type) {
7203 case HWRM_RING_ALLOC_TX:
7204 db->db_ring_mask = bp->tx_ring_mask;
7205 break;
7206 case HWRM_RING_ALLOC_RX:
7207 db->db_ring_mask = bp->rx_ring_mask;
7208 break;
7209 case HWRM_RING_ALLOC_AGG:
7210 db->db_ring_mask = bp->rx_agg_ring_mask;
7211 break;
7212 case HWRM_RING_ALLOC_CMPL:
7213 case HWRM_RING_ALLOC_NQ:
7214 db->db_ring_mask = bp->cp_ring_mask;
7215 break;
7216 }
7217 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7218 db->db_epoch_mask = db->db_ring_mask + 1;
7219 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7220 }
7221 }
7222
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7223 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7224 u32 map_idx, u32 xid)
7225 {
7226 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7227 switch (ring_type) {
7228 case HWRM_RING_ALLOC_TX:
7229 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7230 break;
7231 case HWRM_RING_ALLOC_RX:
7232 case HWRM_RING_ALLOC_AGG:
7233 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7234 break;
7235 case HWRM_RING_ALLOC_CMPL:
7236 db->db_key64 = DBR_PATH_L2;
7237 break;
7238 case HWRM_RING_ALLOC_NQ:
7239 db->db_key64 = DBR_PATH_L2;
7240 break;
7241 }
7242 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7243
7244 if (bp->flags & BNXT_FLAG_CHIP_P7)
7245 db->db_key64 |= DBR_VALID;
7246
7247 db->doorbell = bp->bar1 + bp->db_offset;
7248 } else {
7249 db->doorbell = bp->bar1 + map_idx * 0x80;
7250 switch (ring_type) {
7251 case HWRM_RING_ALLOC_TX:
7252 db->db_key32 = DB_KEY_TX;
7253 break;
7254 case HWRM_RING_ALLOC_RX:
7255 case HWRM_RING_ALLOC_AGG:
7256 db->db_key32 = DB_KEY_RX;
7257 break;
7258 case HWRM_RING_ALLOC_CMPL:
7259 db->db_key32 = DB_KEY_CP;
7260 break;
7261 }
7262 }
7263 bnxt_set_db_mask(bp, db, ring_type);
7264 }
7265
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7266 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7267 struct bnxt_rx_ring_info *rxr)
7268 {
7269 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7270 struct bnxt_napi *bnapi = rxr->bnapi;
7271 u32 type = HWRM_RING_ALLOC_RX;
7272 u32 map_idx = bnapi->index;
7273 int rc;
7274
7275 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7276 if (rc)
7277 return rc;
7278
7279 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7280 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7281
7282 return 0;
7283 }
7284
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7285 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7286 struct bnxt_rx_ring_info *rxr)
7287 {
7288 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7289 u32 type = HWRM_RING_ALLOC_AGG;
7290 u32 grp_idx = ring->grp_idx;
7291 u32 map_idx;
7292 int rc;
7293
7294 map_idx = grp_idx + bp->rx_nr_rings;
7295 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7296 if (rc)
7297 return rc;
7298
7299 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7300 ring->fw_ring_id);
7301 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7302 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7303 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7304
7305 return 0;
7306 }
7307
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7308 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7309 struct bnxt_cp_ring_info *cpr)
7310 {
7311 const u32 type = HWRM_RING_ALLOC_CMPL;
7312 struct bnxt_napi *bnapi = cpr->bnapi;
7313 struct bnxt_ring_struct *ring;
7314 u32 map_idx = bnapi->index;
7315 int rc;
7316
7317 ring = &cpr->cp_ring_struct;
7318 ring->handle = BNXT_SET_NQ_HDL(cpr);
7319 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7320 if (rc)
7321 return rc;
7322 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7323 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7324 return 0;
7325 }
7326
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7327 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7328 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7329 {
7330 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7331 const u32 type = HWRM_RING_ALLOC_TX;
7332 int rc;
7333
7334 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
7335 if (rc)
7336 return rc;
7337 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7338 return 0;
7339 }
7340
bnxt_hwrm_ring_alloc(struct bnxt * bp)7341 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7342 {
7343 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7344 int i, rc = 0;
7345 u32 type;
7346
7347 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7348 type = HWRM_RING_ALLOC_NQ;
7349 else
7350 type = HWRM_RING_ALLOC_CMPL;
7351 for (i = 0; i < bp->cp_nr_rings; i++) {
7352 struct bnxt_napi *bnapi = bp->bnapi[i];
7353 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7354 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7355 u32 map_idx = ring->map_idx;
7356 unsigned int vector;
7357
7358 vector = bp->irq_tbl[map_idx].vector;
7359 disable_irq_nosync(vector);
7360 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7361 if (rc) {
7362 enable_irq(vector);
7363 goto err_out;
7364 }
7365 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7366 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7367 enable_irq(vector);
7368 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7369
7370 if (!i) {
7371 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7372 if (rc)
7373 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7374 }
7375 }
7376
7377 for (i = 0; i < bp->tx_nr_rings; i++) {
7378 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7379
7380 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7381 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7382 if (rc)
7383 goto err_out;
7384 }
7385 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7386 if (rc)
7387 goto err_out;
7388 }
7389
7390 for (i = 0; i < bp->rx_nr_rings; i++) {
7391 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7392
7393 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7394 if (rc)
7395 goto err_out;
7396 /* If we have agg rings, post agg buffers first. */
7397 if (!agg_rings)
7398 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7399 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7400 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7401 if (rc)
7402 goto err_out;
7403 }
7404 }
7405
7406 if (agg_rings) {
7407 for (i = 0; i < bp->rx_nr_rings; i++) {
7408 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7409 if (rc)
7410 goto err_out;
7411 }
7412 }
7413 err_out:
7414 return rc;
7415 }
7416
bnxt_cancel_dim(struct bnxt * bp)7417 static void bnxt_cancel_dim(struct bnxt *bp)
7418 {
7419 int i;
7420
7421 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7422 * if NAPI is enabled.
7423 */
7424 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7425 return;
7426
7427 /* Make sure NAPI sees that the VNIC is disabled */
7428 synchronize_net();
7429 for (i = 0; i < bp->rx_nr_rings; i++) {
7430 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7431 struct bnxt_napi *bnapi = rxr->bnapi;
7432
7433 cancel_work_sync(&bnapi->cp_ring.dim.work);
7434 }
7435 }
7436
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7437 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7438 struct bnxt_ring_struct *ring,
7439 u32 ring_type, int cmpl_ring_id)
7440 {
7441 struct hwrm_ring_free_output *resp;
7442 struct hwrm_ring_free_input *req;
7443 u16 error_code = 0;
7444 int rc;
7445
7446 if (BNXT_NO_FW_ACCESS(bp))
7447 return 0;
7448
7449 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7450 if (rc)
7451 goto exit;
7452
7453 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7454 req->ring_type = ring_type;
7455 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7456
7457 resp = hwrm_req_hold(bp, req);
7458 rc = hwrm_req_send(bp, req);
7459 error_code = le16_to_cpu(resp->error_code);
7460 hwrm_req_drop(bp, req);
7461 exit:
7462 if (rc || error_code) {
7463 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7464 ring_type, rc, error_code);
7465 return -EIO;
7466 }
7467 return 0;
7468 }
7469
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7470 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7471 struct bnxt_tx_ring_info *txr,
7472 bool close_path)
7473 {
7474 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7475 u32 cmpl_ring_id;
7476
7477 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7478 return;
7479
7480 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7481 INVALID_HW_RING_ID;
7482 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7483 cmpl_ring_id);
7484 ring->fw_ring_id = INVALID_HW_RING_ID;
7485 }
7486
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7487 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7488 struct bnxt_rx_ring_info *rxr,
7489 bool close_path)
7490 {
7491 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7492 u32 grp_idx = rxr->bnapi->index;
7493 u32 cmpl_ring_id;
7494
7495 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7496 return;
7497
7498 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7499 hwrm_ring_free_send_msg(bp, ring,
7500 RING_FREE_REQ_RING_TYPE_RX,
7501 close_path ? cmpl_ring_id :
7502 INVALID_HW_RING_ID);
7503 ring->fw_ring_id = INVALID_HW_RING_ID;
7504 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7505 }
7506
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7507 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7508 struct bnxt_rx_ring_info *rxr,
7509 bool close_path)
7510 {
7511 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7512 u32 grp_idx = rxr->bnapi->index;
7513 u32 type, cmpl_ring_id;
7514
7515 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7516 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7517 else
7518 type = RING_FREE_REQ_RING_TYPE_RX;
7519
7520 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7521 return;
7522
7523 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7524 hwrm_ring_free_send_msg(bp, ring, type,
7525 close_path ? cmpl_ring_id :
7526 INVALID_HW_RING_ID);
7527 ring->fw_ring_id = INVALID_HW_RING_ID;
7528 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7529 }
7530
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7531 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7532 struct bnxt_cp_ring_info *cpr)
7533 {
7534 struct bnxt_ring_struct *ring;
7535
7536 ring = &cpr->cp_ring_struct;
7537 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7538 return;
7539
7540 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7541 INVALID_HW_RING_ID);
7542 ring->fw_ring_id = INVALID_HW_RING_ID;
7543 }
7544
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7545 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7546 {
7547 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7548 int i, size = ring->ring_mem.page_size;
7549
7550 cpr->cp_raw_cons = 0;
7551 cpr->toggle = 0;
7552
7553 for (i = 0; i < bp->cp_nr_pages; i++)
7554 if (cpr->cp_desc_ring[i])
7555 memset(cpr->cp_desc_ring[i], 0, size);
7556 }
7557
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7558 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7559 {
7560 u32 type;
7561 int i;
7562
7563 if (!bp->bnapi)
7564 return;
7565
7566 for (i = 0; i < bp->tx_nr_rings; i++)
7567 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7568
7569 bnxt_cancel_dim(bp);
7570 for (i = 0; i < bp->rx_nr_rings; i++) {
7571 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7572 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7573 }
7574
7575 /* The completion rings are about to be freed. After that the
7576 * IRQ doorbell will not work anymore. So we need to disable
7577 * IRQ here.
7578 */
7579 bnxt_disable_int_sync(bp);
7580
7581 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7582 type = RING_FREE_REQ_RING_TYPE_NQ;
7583 else
7584 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7585 for (i = 0; i < bp->cp_nr_rings; i++) {
7586 struct bnxt_napi *bnapi = bp->bnapi[i];
7587 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7588 struct bnxt_ring_struct *ring;
7589 int j;
7590
7591 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7592 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7593
7594 ring = &cpr->cp_ring_struct;
7595 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7596 hwrm_ring_free_send_msg(bp, ring, type,
7597 INVALID_HW_RING_ID);
7598 ring->fw_ring_id = INVALID_HW_RING_ID;
7599 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7600 }
7601 }
7602 }
7603
7604 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7605 bool shared);
7606 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7607 bool shared);
7608
bnxt_hwrm_get_rings(struct bnxt * bp)7609 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7610 {
7611 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7612 struct hwrm_func_qcfg_output *resp;
7613 struct hwrm_func_qcfg_input *req;
7614 int rc;
7615
7616 if (bp->hwrm_spec_code < 0x10601)
7617 return 0;
7618
7619 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7620 if (rc)
7621 return rc;
7622
7623 req->fid = cpu_to_le16(0xffff);
7624 resp = hwrm_req_hold(bp, req);
7625 rc = hwrm_req_send(bp, req);
7626 if (rc) {
7627 hwrm_req_drop(bp, req);
7628 return rc;
7629 }
7630
7631 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7632 if (BNXT_NEW_RM(bp)) {
7633 u16 cp, stats;
7634
7635 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7636 hw_resc->resv_hw_ring_grps =
7637 le32_to_cpu(resp->alloc_hw_ring_grps);
7638 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7639 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7640 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7641 stats = le16_to_cpu(resp->alloc_stat_ctx);
7642 hw_resc->resv_irqs = cp;
7643 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7644 int rx = hw_resc->resv_rx_rings;
7645 int tx = hw_resc->resv_tx_rings;
7646
7647 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7648 rx >>= 1;
7649 if (cp < (rx + tx)) {
7650 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7651 if (rc)
7652 goto get_rings_exit;
7653 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7654 rx <<= 1;
7655 hw_resc->resv_rx_rings = rx;
7656 hw_resc->resv_tx_rings = tx;
7657 }
7658 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7659 hw_resc->resv_hw_ring_grps = rx;
7660 }
7661 hw_resc->resv_cp_rings = cp;
7662 hw_resc->resv_stat_ctxs = stats;
7663 }
7664 get_rings_exit:
7665 hwrm_req_drop(bp, req);
7666 return rc;
7667 }
7668
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7669 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7670 {
7671 struct hwrm_func_qcfg_output *resp;
7672 struct hwrm_func_qcfg_input *req;
7673 int rc;
7674
7675 if (bp->hwrm_spec_code < 0x10601)
7676 return 0;
7677
7678 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7679 if (rc)
7680 return rc;
7681
7682 req->fid = cpu_to_le16(fid);
7683 resp = hwrm_req_hold(bp, req);
7684 rc = hwrm_req_send(bp, req);
7685 if (!rc)
7686 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7687
7688 hwrm_req_drop(bp, req);
7689 return rc;
7690 }
7691
7692 static bool bnxt_rfs_supported(struct bnxt *bp);
7693
7694 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7695 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7696 {
7697 struct hwrm_func_cfg_input *req;
7698 u32 enables = 0;
7699
7700 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7701 return NULL;
7702
7703 req->fid = cpu_to_le16(0xffff);
7704 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7705 req->num_tx_rings = cpu_to_le16(hwr->tx);
7706 if (BNXT_NEW_RM(bp)) {
7707 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7708 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7709 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7710 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7711 enables |= hwr->cp_p5 ?
7712 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7713 } else {
7714 enables |= hwr->cp ?
7715 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7716 enables |= hwr->grp ?
7717 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7718 }
7719 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7720 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7721 0;
7722 req->num_rx_rings = cpu_to_le16(hwr->rx);
7723 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7724 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7725 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7726 req->num_msix = cpu_to_le16(hwr->cp);
7727 } else {
7728 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7729 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7730 }
7731 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7732 req->num_vnics = cpu_to_le16(hwr->vnic);
7733 }
7734 req->enables = cpu_to_le32(enables);
7735 return req;
7736 }
7737
7738 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7739 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7740 {
7741 struct hwrm_func_vf_cfg_input *req;
7742 u32 enables = 0;
7743
7744 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7745 return NULL;
7746
7747 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7748 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7749 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7750 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7751 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7752 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7753 enables |= hwr->cp_p5 ?
7754 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7755 } else {
7756 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7757 enables |= hwr->grp ?
7758 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7759 }
7760 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7761 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7762
7763 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7764 req->num_tx_rings = cpu_to_le16(hwr->tx);
7765 req->num_rx_rings = cpu_to_le16(hwr->rx);
7766 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7767 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7768 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7769 } else {
7770 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7771 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7772 }
7773 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7774 req->num_vnics = cpu_to_le16(hwr->vnic);
7775
7776 req->enables = cpu_to_le32(enables);
7777 return req;
7778 }
7779
7780 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7781 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7782 {
7783 struct hwrm_func_cfg_input *req;
7784 int rc;
7785
7786 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7787 if (!req)
7788 return -ENOMEM;
7789
7790 if (!req->enables) {
7791 hwrm_req_drop(bp, req);
7792 return 0;
7793 }
7794
7795 rc = hwrm_req_send(bp, req);
7796 if (rc)
7797 return rc;
7798
7799 if (bp->hwrm_spec_code < 0x10601)
7800 bp->hw_resc.resv_tx_rings = hwr->tx;
7801
7802 return bnxt_hwrm_get_rings(bp);
7803 }
7804
7805 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7806 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7807 {
7808 struct hwrm_func_vf_cfg_input *req;
7809 int rc;
7810
7811 if (!BNXT_NEW_RM(bp)) {
7812 bp->hw_resc.resv_tx_rings = hwr->tx;
7813 return 0;
7814 }
7815
7816 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7817 if (!req)
7818 return -ENOMEM;
7819
7820 rc = hwrm_req_send(bp, req);
7821 if (rc)
7822 return rc;
7823
7824 return bnxt_hwrm_get_rings(bp);
7825 }
7826
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7827 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7828 {
7829 if (BNXT_PF(bp))
7830 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7831 else
7832 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7833 }
7834
bnxt_nq_rings_in_use(struct bnxt * bp)7835 int bnxt_nq_rings_in_use(struct bnxt *bp)
7836 {
7837 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7838 }
7839
bnxt_cp_rings_in_use(struct bnxt * bp)7840 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7841 {
7842 int cp;
7843
7844 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7845 return bnxt_nq_rings_in_use(bp);
7846
7847 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7848 return cp;
7849 }
7850
bnxt_get_func_stat_ctxs(struct bnxt * bp)7851 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7852 {
7853 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7854 }
7855
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7856 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7857 {
7858 if (!hwr->grp)
7859 return 0;
7860 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7861 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7862
7863 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7864 rss_ctx *= hwr->vnic;
7865 return rss_ctx;
7866 }
7867 if (BNXT_VF(bp))
7868 return BNXT_VF_MAX_RSS_CTX;
7869 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7870 return hwr->grp + 1;
7871 return 1;
7872 }
7873
7874 /* Check if a default RSS map needs to be setup. This function is only
7875 * used on older firmware that does not require reserving RX rings.
7876 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7877 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7878 {
7879 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7880
7881 /* The RSS map is valid for RX rings set to resv_rx_rings */
7882 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7883 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7884 if (!netif_is_rxfh_configured(bp->dev))
7885 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7886 }
7887 }
7888
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7889 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7890 {
7891 if (bp->flags & BNXT_FLAG_RFS) {
7892 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7893 return 2 + bp->num_rss_ctx;
7894 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7895 return rx_rings + 1;
7896 }
7897 return 1;
7898 }
7899
bnxt_need_reserve_rings(struct bnxt * bp)7900 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7901 {
7902 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7903 int cp = bnxt_cp_rings_in_use(bp);
7904 int nq = bnxt_nq_rings_in_use(bp);
7905 int rx = bp->rx_nr_rings, stat;
7906 int vnic, grp = rx;
7907
7908 /* Old firmware does not need RX ring reservations but we still
7909 * need to setup a default RSS map when needed. With new firmware
7910 * we go through RX ring reservations first and then set up the
7911 * RSS map for the successfully reserved RX rings when needed.
7912 */
7913 if (!BNXT_NEW_RM(bp))
7914 bnxt_check_rss_tbl_no_rmgr(bp);
7915
7916 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7917 bp->hwrm_spec_code >= 0x10601)
7918 return true;
7919
7920 if (!BNXT_NEW_RM(bp))
7921 return false;
7922
7923 vnic = bnxt_get_total_vnics(bp, rx);
7924
7925 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7926 rx <<= 1;
7927 stat = bnxt_get_func_stat_ctxs(bp);
7928 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7929 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7930 (hw_resc->resv_hw_ring_grps != grp &&
7931 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7932 return true;
7933 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7934 hw_resc->resv_irqs != nq)
7935 return true;
7936 return false;
7937 }
7938
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7939 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7940 {
7941 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7942
7943 hwr->tx = hw_resc->resv_tx_rings;
7944 if (BNXT_NEW_RM(bp)) {
7945 hwr->rx = hw_resc->resv_rx_rings;
7946 hwr->cp = hw_resc->resv_irqs;
7947 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7948 hwr->cp_p5 = hw_resc->resv_cp_rings;
7949 hwr->grp = hw_resc->resv_hw_ring_grps;
7950 hwr->vnic = hw_resc->resv_vnics;
7951 hwr->stat = hw_resc->resv_stat_ctxs;
7952 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7953 }
7954 }
7955
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7956 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7957 {
7958 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7959 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7960 }
7961
7962 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7963
__bnxt_reserve_rings(struct bnxt * bp)7964 static int __bnxt_reserve_rings(struct bnxt *bp)
7965 {
7966 struct bnxt_hw_rings hwr = {0};
7967 int rx_rings, old_rx_rings, rc;
7968 int cp = bp->cp_nr_rings;
7969 int ulp_msix = 0;
7970 bool sh = false;
7971 int tx_cp;
7972
7973 if (!bnxt_need_reserve_rings(bp))
7974 return 0;
7975
7976 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7977 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7978 if (!ulp_msix)
7979 bnxt_set_ulp_stat_ctxs(bp, 0);
7980
7981 if (ulp_msix > bp->ulp_num_msix_want)
7982 ulp_msix = bp->ulp_num_msix_want;
7983 hwr.cp = cp + ulp_msix;
7984 } else {
7985 hwr.cp = bnxt_nq_rings_in_use(bp);
7986 }
7987
7988 hwr.tx = bp->tx_nr_rings;
7989 hwr.rx = bp->rx_nr_rings;
7990 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7991 sh = true;
7992 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7993 hwr.cp_p5 = hwr.rx + hwr.tx;
7994
7995 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7996
7997 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7998 hwr.rx <<= 1;
7999 hwr.grp = bp->rx_nr_rings;
8000 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8001 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8002 old_rx_rings = bp->hw_resc.resv_rx_rings;
8003
8004 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8005 if (rc)
8006 return rc;
8007
8008 bnxt_copy_reserved_rings(bp, &hwr);
8009
8010 rx_rings = hwr.rx;
8011 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8012 if (hwr.rx >= 2) {
8013 rx_rings = hwr.rx >> 1;
8014 } else {
8015 if (netif_running(bp->dev))
8016 return -ENOMEM;
8017
8018 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8019 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8020 bp->dev->hw_features &= ~NETIF_F_LRO;
8021 bp->dev->features &= ~NETIF_F_LRO;
8022 bnxt_set_ring_params(bp);
8023 }
8024 }
8025 rx_rings = min_t(int, rx_rings, hwr.grp);
8026 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8027 if (bnxt_ulp_registered(bp->edev) &&
8028 hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8029 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8030 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8031 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8032 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8033 hwr.rx = rx_rings << 1;
8034 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8035 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8036 if (hwr.tx != bp->tx_nr_rings) {
8037 netdev_warn(bp->dev,
8038 "Able to reserve only %d out of %d requested TX rings\n",
8039 hwr.tx, bp->tx_nr_rings);
8040 }
8041 bp->tx_nr_rings = hwr.tx;
8042
8043 /* If we cannot reserve all the RX rings, reset the RSS map only
8044 * if absolutely necessary
8045 */
8046 if (rx_rings != bp->rx_nr_rings) {
8047 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8048 rx_rings, bp->rx_nr_rings);
8049 if (netif_is_rxfh_configured(bp->dev) &&
8050 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8051 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8052 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8053 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8054 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8055 }
8056 }
8057 bp->rx_nr_rings = rx_rings;
8058 bp->cp_nr_rings = hwr.cp;
8059
8060 if (!bnxt_rings_ok(bp, &hwr))
8061 return -ENOMEM;
8062
8063 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8064 !netif_is_rxfh_configured(bp->dev))
8065 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8066
8067 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8068 int resv_msix, resv_ctx, ulp_ctxs;
8069 struct bnxt_hw_resc *hw_resc;
8070
8071 hw_resc = &bp->hw_resc;
8072 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8073 ulp_msix = min_t(int, resv_msix, ulp_msix);
8074 bnxt_set_ulp_msix_num(bp, ulp_msix);
8075 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8076 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8077 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8078 }
8079
8080 return rc;
8081 }
8082
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8083 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8084 {
8085 struct hwrm_func_vf_cfg_input *req;
8086 u32 flags;
8087
8088 if (!BNXT_NEW_RM(bp))
8089 return 0;
8090
8091 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8092 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8093 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8094 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8095 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8096 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8097 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8098 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8099 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8100
8101 req->flags = cpu_to_le32(flags);
8102 return hwrm_req_send_silent(bp, req);
8103 }
8104
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8105 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8106 {
8107 struct hwrm_func_cfg_input *req;
8108 u32 flags;
8109
8110 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8111 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8112 if (BNXT_NEW_RM(bp)) {
8113 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8114 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8115 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8116 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8117 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8118 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8119 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8120 else
8121 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8122 }
8123
8124 req->flags = cpu_to_le32(flags);
8125 return hwrm_req_send_silent(bp, req);
8126 }
8127
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8128 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8129 {
8130 if (bp->hwrm_spec_code < 0x10801)
8131 return 0;
8132
8133 if (BNXT_PF(bp))
8134 return bnxt_hwrm_check_pf_rings(bp, hwr);
8135
8136 return bnxt_hwrm_check_vf_rings(bp, hwr);
8137 }
8138
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8139 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8140 {
8141 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8142 struct hwrm_ring_aggint_qcaps_output *resp;
8143 struct hwrm_ring_aggint_qcaps_input *req;
8144 int rc;
8145
8146 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8147 coal_cap->num_cmpl_dma_aggr_max = 63;
8148 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8149 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8150 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8151 coal_cap->int_lat_tmr_min_max = 65535;
8152 coal_cap->int_lat_tmr_max_max = 65535;
8153 coal_cap->num_cmpl_aggr_int_max = 65535;
8154 coal_cap->timer_units = 80;
8155
8156 if (bp->hwrm_spec_code < 0x10902)
8157 return;
8158
8159 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8160 return;
8161
8162 resp = hwrm_req_hold(bp, req);
8163 rc = hwrm_req_send_silent(bp, req);
8164 if (!rc) {
8165 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8166 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8167 coal_cap->num_cmpl_dma_aggr_max =
8168 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8169 coal_cap->num_cmpl_dma_aggr_during_int_max =
8170 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8171 coal_cap->cmpl_aggr_dma_tmr_max =
8172 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8173 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8174 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8175 coal_cap->int_lat_tmr_min_max =
8176 le16_to_cpu(resp->int_lat_tmr_min_max);
8177 coal_cap->int_lat_tmr_max_max =
8178 le16_to_cpu(resp->int_lat_tmr_max_max);
8179 coal_cap->num_cmpl_aggr_int_max =
8180 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8181 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8182 }
8183 hwrm_req_drop(bp, req);
8184 }
8185
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8186 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8187 {
8188 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8189
8190 return usec * 1000 / coal_cap->timer_units;
8191 }
8192
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8193 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8194 struct bnxt_coal *hw_coal,
8195 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8196 {
8197 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8198 u16 val, tmr, max, flags = hw_coal->flags;
8199 u32 cmpl_params = coal_cap->cmpl_params;
8200
8201 max = hw_coal->bufs_per_record * 128;
8202 if (hw_coal->budget)
8203 max = hw_coal->bufs_per_record * hw_coal->budget;
8204 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8205
8206 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8207 req->num_cmpl_aggr_int = cpu_to_le16(val);
8208
8209 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8210 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8211
8212 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8213 coal_cap->num_cmpl_dma_aggr_during_int_max);
8214 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8215
8216 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8217 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8218 req->int_lat_tmr_max = cpu_to_le16(tmr);
8219
8220 /* min timer set to 1/2 of interrupt timer */
8221 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8222 val = tmr / 2;
8223 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8224 req->int_lat_tmr_min = cpu_to_le16(val);
8225 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8226 }
8227
8228 /* buf timer set to 1/4 of interrupt timer */
8229 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8230 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8231
8232 if (cmpl_params &
8233 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8234 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8235 val = clamp_t(u16, tmr, 1,
8236 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8237 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8238 req->enables |=
8239 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8240 }
8241
8242 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8243 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8244 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8245 req->flags = cpu_to_le16(flags);
8246 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8247 }
8248
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8249 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8250 struct bnxt_coal *hw_coal)
8251 {
8252 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8253 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8254 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8255 u32 nq_params = coal_cap->nq_params;
8256 u16 tmr;
8257 int rc;
8258
8259 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8260 return 0;
8261
8262 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8263 if (rc)
8264 return rc;
8265
8266 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8267 req->flags =
8268 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8269
8270 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8271 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8272 req->int_lat_tmr_min = cpu_to_le16(tmr);
8273 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8274 return hwrm_req_send(bp, req);
8275 }
8276
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8277 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8278 {
8279 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8280 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8281 struct bnxt_coal coal;
8282 int rc;
8283
8284 /* Tick values in micro seconds.
8285 * 1 coal_buf x bufs_per_record = 1 completion record.
8286 */
8287 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8288
8289 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8290 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8291
8292 if (!bnapi->rx_ring)
8293 return -ENODEV;
8294
8295 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8296 if (rc)
8297 return rc;
8298
8299 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8300
8301 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8302
8303 return hwrm_req_send(bp, req_rx);
8304 }
8305
8306 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8307 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8308 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8309 {
8310 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8311
8312 req->ring_id = cpu_to_le16(ring_id);
8313 return hwrm_req_send(bp, req);
8314 }
8315
8316 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8317 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8318 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8319 {
8320 struct bnxt_tx_ring_info *txr;
8321 int i, rc;
8322
8323 bnxt_for_each_napi_tx(i, bnapi, txr) {
8324 u16 ring_id;
8325
8326 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8327 req->ring_id = cpu_to_le16(ring_id);
8328 rc = hwrm_req_send(bp, req);
8329 if (rc)
8330 return rc;
8331 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8332 return 0;
8333 }
8334 return 0;
8335 }
8336
bnxt_hwrm_set_coal(struct bnxt * bp)8337 int bnxt_hwrm_set_coal(struct bnxt *bp)
8338 {
8339 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8340 int i, rc;
8341
8342 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8343 if (rc)
8344 return rc;
8345
8346 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8347 if (rc) {
8348 hwrm_req_drop(bp, req_rx);
8349 return rc;
8350 }
8351
8352 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8353 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8354
8355 hwrm_req_hold(bp, req_rx);
8356 hwrm_req_hold(bp, req_tx);
8357 for (i = 0; i < bp->cp_nr_rings; i++) {
8358 struct bnxt_napi *bnapi = bp->bnapi[i];
8359 struct bnxt_coal *hw_coal;
8360
8361 if (!bnapi->rx_ring)
8362 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8363 else
8364 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8365 if (rc)
8366 break;
8367
8368 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8369 continue;
8370
8371 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8372 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8373 if (rc)
8374 break;
8375 }
8376 if (bnapi->rx_ring)
8377 hw_coal = &bp->rx_coal;
8378 else
8379 hw_coal = &bp->tx_coal;
8380 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8381 }
8382 hwrm_req_drop(bp, req_rx);
8383 hwrm_req_drop(bp, req_tx);
8384 return rc;
8385 }
8386
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8387 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8388 {
8389 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8390 struct hwrm_stat_ctx_free_input *req;
8391 int i;
8392
8393 if (!bp->bnapi)
8394 return;
8395
8396 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8397 return;
8398
8399 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8400 return;
8401 if (BNXT_FW_MAJ(bp) <= 20) {
8402 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8403 hwrm_req_drop(bp, req);
8404 return;
8405 }
8406 hwrm_req_hold(bp, req0);
8407 }
8408 hwrm_req_hold(bp, req);
8409 for (i = 0; i < bp->cp_nr_rings; i++) {
8410 struct bnxt_napi *bnapi = bp->bnapi[i];
8411 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8412
8413 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8414 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8415 if (req0) {
8416 req0->stat_ctx_id = req->stat_ctx_id;
8417 hwrm_req_send(bp, req0);
8418 }
8419 hwrm_req_send(bp, req);
8420
8421 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8422 }
8423 }
8424 hwrm_req_drop(bp, req);
8425 if (req0)
8426 hwrm_req_drop(bp, req0);
8427 }
8428
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8429 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8430 {
8431 struct hwrm_stat_ctx_alloc_output *resp;
8432 struct hwrm_stat_ctx_alloc_input *req;
8433 int rc, i;
8434
8435 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8436 return 0;
8437
8438 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8439 if (rc)
8440 return rc;
8441
8442 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8443 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8444
8445 resp = hwrm_req_hold(bp, req);
8446 for (i = 0; i < bp->cp_nr_rings; i++) {
8447 struct bnxt_napi *bnapi = bp->bnapi[i];
8448 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8449
8450 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8451
8452 rc = hwrm_req_send(bp, req);
8453 if (rc)
8454 break;
8455
8456 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8457
8458 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8459 }
8460 hwrm_req_drop(bp, req);
8461 return rc;
8462 }
8463
bnxt_hwrm_func_qcfg(struct bnxt * bp)8464 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8465 {
8466 struct hwrm_func_qcfg_output *resp;
8467 struct hwrm_func_qcfg_input *req;
8468 u16 flags;
8469 int rc;
8470
8471 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8472 if (rc)
8473 return rc;
8474
8475 req->fid = cpu_to_le16(0xffff);
8476 resp = hwrm_req_hold(bp, req);
8477 rc = hwrm_req_send(bp, req);
8478 if (rc)
8479 goto func_qcfg_exit;
8480
8481 flags = le16_to_cpu(resp->flags);
8482 #ifdef CONFIG_BNXT_SRIOV
8483 if (BNXT_VF(bp)) {
8484 struct bnxt_vf_info *vf = &bp->vf;
8485
8486 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8487 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8488 vf->flags |= BNXT_VF_TRUST;
8489 else
8490 vf->flags &= ~BNXT_VF_TRUST;
8491 } else {
8492 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8493 }
8494 #endif
8495 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8496 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8497 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8498 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8499 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8500 }
8501 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8502 bp->flags |= BNXT_FLAG_MULTI_HOST;
8503
8504 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8505 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8506
8507 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8508 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8509
8510 switch (resp->port_partition_type) {
8511 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8512 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8513 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8514 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8515 bp->port_partition_type = resp->port_partition_type;
8516 break;
8517 }
8518 if (bp->hwrm_spec_code < 0x10707 ||
8519 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8520 bp->br_mode = BRIDGE_MODE_VEB;
8521 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8522 bp->br_mode = BRIDGE_MODE_VEPA;
8523 else
8524 bp->br_mode = BRIDGE_MODE_UNDEF;
8525
8526 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8527 if (!bp->max_mtu)
8528 bp->max_mtu = BNXT_MAX_MTU;
8529
8530 if (bp->db_size)
8531 goto func_qcfg_exit;
8532
8533 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8534 if (BNXT_CHIP_P5(bp)) {
8535 if (BNXT_PF(bp))
8536 bp->db_offset = DB_PF_OFFSET_P5;
8537 else
8538 bp->db_offset = DB_VF_OFFSET_P5;
8539 }
8540 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8541 1024);
8542 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8543 bp->db_size <= bp->db_offset)
8544 bp->db_size = pci_resource_len(bp->pdev, 2);
8545
8546 func_qcfg_exit:
8547 hwrm_req_drop(bp, req);
8548 return rc;
8549 }
8550
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8551 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8552 u8 init_val, u8 init_offset,
8553 bool init_mask_set)
8554 {
8555 ctxm->init_value = init_val;
8556 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8557 if (init_mask_set)
8558 ctxm->init_offset = init_offset * 4;
8559 else
8560 ctxm->init_value = 0;
8561 }
8562
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8563 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8564 {
8565 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8566 u16 type;
8567
8568 for (type = 0; type < ctx_max; type++) {
8569 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8570 int n = 1;
8571
8572 if (!ctxm->max_entries || ctxm->pg_info)
8573 continue;
8574
8575 if (ctxm->instance_bmap)
8576 n = hweight32(ctxm->instance_bmap);
8577 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8578 if (!ctxm->pg_info)
8579 return -ENOMEM;
8580 }
8581 return 0;
8582 }
8583
8584 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8585 struct bnxt_ctx_mem_type *ctxm, bool force);
8586
8587 #define BNXT_CTX_INIT_VALID(flags) \
8588 (!!((flags) & \
8589 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8590
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8591 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8592 {
8593 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8594 struct hwrm_func_backing_store_qcaps_v2_input *req;
8595 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8596 u16 type;
8597 int rc;
8598
8599 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8600 if (rc)
8601 return rc;
8602
8603 if (!ctx) {
8604 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8605 if (!ctx)
8606 return -ENOMEM;
8607 bp->ctx = ctx;
8608 }
8609
8610 resp = hwrm_req_hold(bp, req);
8611
8612 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8613 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8614 u8 init_val, init_off, i;
8615 u32 max_entries;
8616 u16 entry_size;
8617 __le32 *p;
8618 u32 flags;
8619
8620 req->type = cpu_to_le16(type);
8621 rc = hwrm_req_send(bp, req);
8622 if (rc)
8623 goto ctx_done;
8624 flags = le32_to_cpu(resp->flags);
8625 type = le16_to_cpu(resp->next_valid_type);
8626 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8627 bnxt_free_one_ctx_mem(bp, ctxm, true);
8628 continue;
8629 }
8630 entry_size = le16_to_cpu(resp->entry_size);
8631 max_entries = le32_to_cpu(resp->max_num_entries);
8632 if (ctxm->mem_valid) {
8633 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8634 ctxm->entry_size != entry_size ||
8635 ctxm->max_entries != max_entries)
8636 bnxt_free_one_ctx_mem(bp, ctxm, true);
8637 else
8638 continue;
8639 }
8640 ctxm->type = le16_to_cpu(resp->type);
8641 ctxm->entry_size = entry_size;
8642 ctxm->flags = flags;
8643 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8644 ctxm->entry_multiple = resp->entry_multiple;
8645 ctxm->max_entries = max_entries;
8646 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8647 init_val = resp->ctx_init_value;
8648 init_off = resp->ctx_init_offset;
8649 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8650 BNXT_CTX_INIT_VALID(flags));
8651 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8652 BNXT_MAX_SPLIT_ENTRY);
8653 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8654 i++, p++)
8655 ctxm->split[i] = le32_to_cpu(*p);
8656 }
8657 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8658
8659 ctx_done:
8660 hwrm_req_drop(bp, req);
8661 return rc;
8662 }
8663
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8664 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8665 {
8666 struct hwrm_func_backing_store_qcaps_output *resp;
8667 struct hwrm_func_backing_store_qcaps_input *req;
8668 int rc;
8669
8670 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8671 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8672 return 0;
8673
8674 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8675 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8676
8677 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8678 if (rc)
8679 return rc;
8680
8681 resp = hwrm_req_hold(bp, req);
8682 rc = hwrm_req_send_silent(bp, req);
8683 if (!rc) {
8684 struct bnxt_ctx_mem_type *ctxm;
8685 struct bnxt_ctx_mem_info *ctx;
8686 u8 init_val, init_idx = 0;
8687 u16 init_mask;
8688
8689 ctx = bp->ctx;
8690 if (!ctx) {
8691 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8692 if (!ctx) {
8693 rc = -ENOMEM;
8694 goto ctx_err;
8695 }
8696 bp->ctx = ctx;
8697 }
8698 init_val = resp->ctx_kind_initializer;
8699 init_mask = le16_to_cpu(resp->ctx_init_mask);
8700
8701 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8702 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8703 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8704 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8705 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8706 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8707 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8708 (init_mask & (1 << init_idx++)) != 0);
8709
8710 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8711 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8712 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8713 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8714 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8715 (init_mask & (1 << init_idx++)) != 0);
8716
8717 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8718 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8719 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8720 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8721 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8722 (init_mask & (1 << init_idx++)) != 0);
8723
8724 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8725 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8726 ctxm->max_entries = ctxm->vnic_entries +
8727 le16_to_cpu(resp->vnic_max_ring_table_entries);
8728 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8729 bnxt_init_ctx_initializer(ctxm, init_val,
8730 resp->vnic_init_offset,
8731 (init_mask & (1 << init_idx++)) != 0);
8732
8733 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8734 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8735 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8736 bnxt_init_ctx_initializer(ctxm, init_val,
8737 resp->stat_init_offset,
8738 (init_mask & (1 << init_idx++)) != 0);
8739
8740 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8741 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8742 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8743 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8744 ctxm->entry_multiple = resp->tqm_entries_multiple;
8745 if (!ctxm->entry_multiple)
8746 ctxm->entry_multiple = 1;
8747
8748 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8749
8750 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8751 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8752 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8753 ctxm->mrav_num_entries_units =
8754 le16_to_cpu(resp->mrav_num_entries_units);
8755 bnxt_init_ctx_initializer(ctxm, init_val,
8756 resp->mrav_init_offset,
8757 (init_mask & (1 << init_idx++)) != 0);
8758
8759 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8760 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8761 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8762
8763 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8764 if (!ctx->tqm_fp_rings_count)
8765 ctx->tqm_fp_rings_count = bp->max_q;
8766 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8767 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8768
8769 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8770 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8771 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8772
8773 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8774 } else {
8775 rc = 0;
8776 }
8777 ctx_err:
8778 hwrm_req_drop(bp, req);
8779 return rc;
8780 }
8781
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8782 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8783 __le64 *pg_dir)
8784 {
8785 if (!rmem->nr_pages)
8786 return;
8787
8788 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8789 if (rmem->depth >= 1) {
8790 if (rmem->depth == 2)
8791 *pg_attr |= 2;
8792 else
8793 *pg_attr |= 1;
8794 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8795 } else {
8796 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8797 }
8798 }
8799
8800 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8801 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8802 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8803 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8804 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8805 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8806
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8807 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8808 {
8809 struct hwrm_func_backing_store_cfg_input *req;
8810 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8811 struct bnxt_ctx_pg_info *ctx_pg;
8812 struct bnxt_ctx_mem_type *ctxm;
8813 void **__req = (void **)&req;
8814 u32 req_len = sizeof(*req);
8815 __le32 *num_entries;
8816 __le64 *pg_dir;
8817 u32 flags = 0;
8818 u8 *pg_attr;
8819 u32 ena;
8820 int rc;
8821 int i;
8822
8823 if (!ctx)
8824 return 0;
8825
8826 if (req_len > bp->hwrm_max_ext_req_len)
8827 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8828 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8829 if (rc)
8830 return rc;
8831
8832 req->enables = cpu_to_le32(enables);
8833 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8834 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8835 ctx_pg = ctxm->pg_info;
8836 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8837 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8838 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8839 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8840 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8841 &req->qpc_pg_size_qpc_lvl,
8842 &req->qpc_page_dir);
8843
8844 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8845 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8846 }
8847 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8848 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8849 ctx_pg = ctxm->pg_info;
8850 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8851 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8852 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8853 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8854 &req->srq_pg_size_srq_lvl,
8855 &req->srq_page_dir);
8856 }
8857 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8858 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8859 ctx_pg = ctxm->pg_info;
8860 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8861 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8862 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8863 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8864 &req->cq_pg_size_cq_lvl,
8865 &req->cq_page_dir);
8866 }
8867 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8868 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8869 ctx_pg = ctxm->pg_info;
8870 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8871 req->vnic_num_ring_table_entries =
8872 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8873 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8874 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8875 &req->vnic_pg_size_vnic_lvl,
8876 &req->vnic_page_dir);
8877 }
8878 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8879 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8880 ctx_pg = ctxm->pg_info;
8881 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8882 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8883 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8884 &req->stat_pg_size_stat_lvl,
8885 &req->stat_page_dir);
8886 }
8887 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8888 u32 units;
8889
8890 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8891 ctx_pg = ctxm->pg_info;
8892 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8893 units = ctxm->mrav_num_entries_units;
8894 if (units) {
8895 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8896 u32 entries;
8897
8898 num_mr = ctx_pg->entries - num_ah;
8899 entries = ((num_mr / units) << 16) | (num_ah / units);
8900 req->mrav_num_entries = cpu_to_le32(entries);
8901 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8902 }
8903 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8904 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8905 &req->mrav_pg_size_mrav_lvl,
8906 &req->mrav_page_dir);
8907 }
8908 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8909 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8910 ctx_pg = ctxm->pg_info;
8911 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8912 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8913 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8914 &req->tim_pg_size_tim_lvl,
8915 &req->tim_page_dir);
8916 }
8917 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8918 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8919 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8920 pg_dir = &req->tqm_sp_page_dir,
8921 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8922 ctx_pg = ctxm->pg_info;
8923 i < BNXT_MAX_TQM_RINGS;
8924 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8925 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8926 if (!(enables & ena))
8927 continue;
8928
8929 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8930 *num_entries = cpu_to_le32(ctx_pg->entries);
8931 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8932 }
8933 req->flags = cpu_to_le32(flags);
8934 return hwrm_req_send(bp, req);
8935 }
8936
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8937 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8938 struct bnxt_ctx_pg_info *ctx_pg)
8939 {
8940 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8941
8942 rmem->page_size = BNXT_PAGE_SIZE;
8943 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8944 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8945 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8946 if (rmem->depth >= 1)
8947 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8948 return bnxt_alloc_ring(bp, rmem);
8949 }
8950
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8951 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8952 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8953 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8954 {
8955 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8956 int rc;
8957
8958 if (!mem_size)
8959 return -EINVAL;
8960
8961 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8962 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8963 ctx_pg->nr_pages = 0;
8964 return -EINVAL;
8965 }
8966 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8967 int nr_tbls, i;
8968
8969 rmem->depth = 2;
8970 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8971 GFP_KERNEL);
8972 if (!ctx_pg->ctx_pg_tbl)
8973 return -ENOMEM;
8974 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8975 rmem->nr_pages = nr_tbls;
8976 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8977 if (rc)
8978 return rc;
8979 for (i = 0; i < nr_tbls; i++) {
8980 struct bnxt_ctx_pg_info *pg_tbl;
8981
8982 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8983 if (!pg_tbl)
8984 return -ENOMEM;
8985 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8986 rmem = &pg_tbl->ring_mem;
8987 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8988 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8989 rmem->depth = 1;
8990 rmem->nr_pages = MAX_CTX_PAGES;
8991 rmem->ctx_mem = ctxm;
8992 if (i == (nr_tbls - 1)) {
8993 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8994
8995 if (rem)
8996 rmem->nr_pages = rem;
8997 }
8998 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8999 if (rc)
9000 break;
9001 }
9002 } else {
9003 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9004 if (rmem->nr_pages > 1 || depth)
9005 rmem->depth = 1;
9006 rmem->ctx_mem = ctxm;
9007 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9008 }
9009 return rc;
9010 }
9011
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9012 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9013 struct bnxt_ctx_pg_info *ctx_pg,
9014 void *buf, size_t offset, size_t head,
9015 size_t tail)
9016 {
9017 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9018 size_t nr_pages = ctx_pg->nr_pages;
9019 int page_size = rmem->page_size;
9020 size_t len = 0, total_len = 0;
9021 u16 depth = rmem->depth;
9022
9023 tail %= nr_pages * page_size;
9024 do {
9025 if (depth > 1) {
9026 int i = head / (page_size * MAX_CTX_PAGES);
9027 struct bnxt_ctx_pg_info *pg_tbl;
9028
9029 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9030 rmem = &pg_tbl->ring_mem;
9031 }
9032 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9033 head += len;
9034 offset += len;
9035 total_len += len;
9036 if (head >= nr_pages * page_size)
9037 head = 0;
9038 } while (head != tail);
9039 return total_len;
9040 }
9041
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9042 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9043 struct bnxt_ctx_pg_info *ctx_pg)
9044 {
9045 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9046
9047 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9048 ctx_pg->ctx_pg_tbl) {
9049 int i, nr_tbls = rmem->nr_pages;
9050
9051 for (i = 0; i < nr_tbls; i++) {
9052 struct bnxt_ctx_pg_info *pg_tbl;
9053 struct bnxt_ring_mem_info *rmem2;
9054
9055 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9056 if (!pg_tbl)
9057 continue;
9058 rmem2 = &pg_tbl->ring_mem;
9059 bnxt_free_ring(bp, rmem2);
9060 ctx_pg->ctx_pg_arr[i] = NULL;
9061 kfree(pg_tbl);
9062 ctx_pg->ctx_pg_tbl[i] = NULL;
9063 }
9064 kfree(ctx_pg->ctx_pg_tbl);
9065 ctx_pg->ctx_pg_tbl = NULL;
9066 }
9067 bnxt_free_ring(bp, rmem);
9068 ctx_pg->nr_pages = 0;
9069 }
9070
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9071 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9072 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9073 u8 pg_lvl)
9074 {
9075 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9076 int i, rc = 0, n = 1;
9077 u32 mem_size;
9078
9079 if (!ctxm->entry_size || !ctx_pg)
9080 return -EINVAL;
9081 if (ctxm->instance_bmap)
9082 n = hweight32(ctxm->instance_bmap);
9083 if (ctxm->entry_multiple)
9084 entries = roundup(entries, ctxm->entry_multiple);
9085 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9086 mem_size = entries * ctxm->entry_size;
9087 for (i = 0; i < n && !rc; i++) {
9088 ctx_pg[i].entries = entries;
9089 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9090 ctxm->init_value ? ctxm : NULL);
9091 }
9092 if (!rc)
9093 ctxm->mem_valid = 1;
9094 return rc;
9095 }
9096
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9097 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9098 struct bnxt_ctx_mem_type *ctxm,
9099 bool last)
9100 {
9101 struct hwrm_func_backing_store_cfg_v2_input *req;
9102 u32 instance_bmap = ctxm->instance_bmap;
9103 int i, j, rc = 0, n = 1;
9104 __le32 *p;
9105
9106 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9107 return 0;
9108
9109 if (instance_bmap)
9110 n = hweight32(ctxm->instance_bmap);
9111 else
9112 instance_bmap = 1;
9113
9114 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9115 if (rc)
9116 return rc;
9117 hwrm_req_hold(bp, req);
9118 req->type = cpu_to_le16(ctxm->type);
9119 req->entry_size = cpu_to_le16(ctxm->entry_size);
9120 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9121 bnxt_bs_trace_avail(bp, ctxm->type)) {
9122 struct bnxt_bs_trace_info *bs_trace;
9123 u32 enables;
9124
9125 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9126 req->enables = cpu_to_le32(enables);
9127 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9128 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9129 }
9130 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9131 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9132 p[i] = cpu_to_le32(ctxm->split[i]);
9133 for (i = 0, j = 0; j < n && !rc; i++) {
9134 struct bnxt_ctx_pg_info *ctx_pg;
9135
9136 if (!(instance_bmap & (1 << i)))
9137 continue;
9138 req->instance = cpu_to_le16(i);
9139 ctx_pg = &ctxm->pg_info[j++];
9140 if (!ctx_pg->entries)
9141 continue;
9142 req->num_entries = cpu_to_le32(ctx_pg->entries);
9143 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9144 &req->page_size_pbl_level,
9145 &req->page_dir);
9146 if (last && j == n)
9147 req->flags =
9148 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9149 rc = hwrm_req_send(bp, req);
9150 }
9151 hwrm_req_drop(bp, req);
9152 return rc;
9153 }
9154
bnxt_backing_store_cfg_v2(struct bnxt * bp)9155 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9156 {
9157 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9158 struct bnxt_ctx_mem_type *ctxm;
9159 u16 last_type = BNXT_CTX_INV;
9160 int rc = 0;
9161 u16 type;
9162
9163 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9164 ctxm = &ctx->ctx_arr[type];
9165 if (!bnxt_bs_trace_avail(bp, type))
9166 continue;
9167 if (!ctxm->mem_valid) {
9168 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9169 ctxm->max_entries, 1);
9170 if (rc) {
9171 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9172 type);
9173 continue;
9174 }
9175 bnxt_bs_trace_init(bp, ctxm);
9176 }
9177 last_type = type;
9178 }
9179
9180 if (last_type == BNXT_CTX_INV) {
9181 for (type = 0; type < BNXT_CTX_MAX; type++) {
9182 ctxm = &ctx->ctx_arr[type];
9183 if (ctxm->mem_valid)
9184 last_type = type;
9185 }
9186 if (last_type == BNXT_CTX_INV)
9187 return 0;
9188 }
9189 ctx->ctx_arr[last_type].last = 1;
9190
9191 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9192 ctxm = &ctx->ctx_arr[type];
9193
9194 if (!ctxm->mem_valid)
9195 continue;
9196 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9197 if (rc)
9198 return rc;
9199 }
9200 return 0;
9201 }
9202
9203 /**
9204 * __bnxt_copy_ctx_mem - copy host context memory
9205 * @bp: The driver context
9206 * @ctxm: The pointer to the context memory type
9207 * @buf: The destination buffer or NULL to just obtain the length
9208 * @offset: The buffer offset to copy the data to
9209 * @head: The head offset of context memory to copy from
9210 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9211 *
9212 * This function is called for debugging purposes to dump the host context
9213 * used by the chip.
9214 *
9215 * Return: Length of memory copied
9216 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9217 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9218 struct bnxt_ctx_mem_type *ctxm, void *buf,
9219 size_t offset, size_t head, size_t tail)
9220 {
9221 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9222 size_t len = 0, total_len = 0;
9223 int i, n = 1;
9224
9225 if (!ctx_pg)
9226 return 0;
9227
9228 if (ctxm->instance_bmap)
9229 n = hweight32(ctxm->instance_bmap);
9230 for (i = 0; i < n; i++) {
9231 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9232 tail);
9233 offset += len;
9234 total_len += len;
9235 }
9236 return total_len;
9237 }
9238
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9239 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9240 void *buf, size_t offset)
9241 {
9242 size_t tail = ctxm->max_entries * ctxm->entry_size;
9243
9244 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9245 }
9246
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9247 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9248 struct bnxt_ctx_mem_type *ctxm, bool force)
9249 {
9250 struct bnxt_ctx_pg_info *ctx_pg;
9251 int i, n = 1;
9252
9253 ctxm->last = 0;
9254
9255 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9256 return;
9257
9258 ctx_pg = ctxm->pg_info;
9259 if (ctx_pg) {
9260 if (ctxm->instance_bmap)
9261 n = hweight32(ctxm->instance_bmap);
9262 for (i = 0; i < n; i++)
9263 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9264
9265 kfree(ctx_pg);
9266 ctxm->pg_info = NULL;
9267 ctxm->mem_valid = 0;
9268 }
9269 memset(ctxm, 0, sizeof(*ctxm));
9270 }
9271
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9272 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9273 {
9274 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9275 u16 type;
9276
9277 if (!ctx)
9278 return;
9279
9280 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9281 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9282
9283 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9284 if (force) {
9285 kfree(ctx);
9286 bp->ctx = NULL;
9287 }
9288 }
9289
bnxt_alloc_ctx_mem(struct bnxt * bp)9290 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9291 {
9292 struct bnxt_ctx_mem_type *ctxm;
9293 struct bnxt_ctx_mem_info *ctx;
9294 u32 l2_qps, qp1_qps, max_qps;
9295 u32 ena, entries_sp, entries;
9296 u32 srqs, max_srqs, min;
9297 u32 num_mr, num_ah;
9298 u32 extra_srqs = 0;
9299 u32 extra_qps = 0;
9300 u32 fast_qpmd_qps;
9301 u8 pg_lvl = 1;
9302 int i, rc;
9303
9304 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9305 if (rc) {
9306 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9307 rc);
9308 return rc;
9309 }
9310 ctx = bp->ctx;
9311 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9312 return 0;
9313
9314 ena = 0;
9315 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9316 goto skip_legacy;
9317
9318 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9319 l2_qps = ctxm->qp_l2_entries;
9320 qp1_qps = ctxm->qp_qp1_entries;
9321 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9322 max_qps = ctxm->max_entries;
9323 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9324 srqs = ctxm->srq_l2_entries;
9325 max_srqs = ctxm->max_entries;
9326 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9327 pg_lvl = 2;
9328 if (BNXT_SW_RES_LMT(bp)) {
9329 extra_qps = max_qps - l2_qps - qp1_qps;
9330 extra_srqs = max_srqs - srqs;
9331 } else {
9332 extra_qps = min_t(u32, 65536,
9333 max_qps - l2_qps - qp1_qps);
9334 /* allocate extra qps if fw supports RoCE fast qp
9335 * destroy feature
9336 */
9337 extra_qps += fast_qpmd_qps;
9338 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9339 }
9340 if (fast_qpmd_qps)
9341 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9342 }
9343
9344 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9345 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9346 pg_lvl);
9347 if (rc)
9348 return rc;
9349
9350 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9351 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9352 if (rc)
9353 return rc;
9354
9355 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9356 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9357 extra_qps * 2, pg_lvl);
9358 if (rc)
9359 return rc;
9360
9361 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9362 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9363 if (rc)
9364 return rc;
9365
9366 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9367 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9368 if (rc)
9369 return rc;
9370
9371 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9372 goto skip_rdma;
9373
9374 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9375 if (BNXT_SW_RES_LMT(bp) &&
9376 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9377 num_ah = ctxm->mrav_av_entries;
9378 num_mr = ctxm->max_entries - num_ah;
9379 } else {
9380 /* 128K extra is needed to accommodate static AH context
9381 * allocation by f/w.
9382 */
9383 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9384 num_ah = min_t(u32, num_mr, 1024 * 128);
9385 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9386 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9387 ctxm->mrav_av_entries = num_ah;
9388 }
9389
9390 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9391 if (rc)
9392 return rc;
9393 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9394
9395 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9396 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9397 if (rc)
9398 return rc;
9399 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9400
9401 skip_rdma:
9402 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9403 min = ctxm->min_entries;
9404 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9405 2 * (extra_qps + qp1_qps) + min;
9406 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9407 if (rc)
9408 return rc;
9409
9410 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9411 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9412 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9413 if (rc)
9414 return rc;
9415 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9416 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9417 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9418
9419 skip_legacy:
9420 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9421 rc = bnxt_backing_store_cfg_v2(bp);
9422 else
9423 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9424 if (rc) {
9425 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9426 rc);
9427 return rc;
9428 }
9429 ctx->flags |= BNXT_CTX_FLAG_INITED;
9430 return 0;
9431 }
9432
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9433 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9434 {
9435 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9436 u16 page_attr;
9437 int rc;
9438
9439 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9440 return 0;
9441
9442 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9443 if (rc)
9444 return rc;
9445
9446 if (BNXT_PAGE_SIZE == 0x2000)
9447 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9448 else if (BNXT_PAGE_SIZE == 0x10000)
9449 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9450 else
9451 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9452 req->pg_size_lvl = cpu_to_le16(page_attr |
9453 bp->fw_crash_mem->ring_mem.depth);
9454 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9455 req->size = cpu_to_le32(bp->fw_crash_len);
9456 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9457 return hwrm_req_send(bp, req);
9458 }
9459
bnxt_free_crash_dump_mem(struct bnxt * bp)9460 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9461 {
9462 if (bp->fw_crash_mem) {
9463 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9464 kfree(bp->fw_crash_mem);
9465 bp->fw_crash_mem = NULL;
9466 }
9467 }
9468
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9469 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9470 {
9471 u32 mem_size = 0;
9472 int rc;
9473
9474 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9475 return 0;
9476
9477 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9478 if (rc)
9479 return rc;
9480
9481 mem_size = round_up(mem_size, 4);
9482
9483 /* keep and use the existing pages */
9484 if (bp->fw_crash_mem &&
9485 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9486 goto alloc_done;
9487
9488 if (bp->fw_crash_mem)
9489 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9490 else
9491 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9492 GFP_KERNEL);
9493 if (!bp->fw_crash_mem)
9494 return -ENOMEM;
9495
9496 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9497 if (rc) {
9498 bnxt_free_crash_dump_mem(bp);
9499 return rc;
9500 }
9501
9502 alloc_done:
9503 bp->fw_crash_len = mem_size;
9504 return 0;
9505 }
9506
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9507 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9508 {
9509 struct hwrm_func_resource_qcaps_output *resp;
9510 struct hwrm_func_resource_qcaps_input *req;
9511 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9512 int rc;
9513
9514 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9515 if (rc)
9516 return rc;
9517
9518 req->fid = cpu_to_le16(0xffff);
9519 resp = hwrm_req_hold(bp, req);
9520 rc = hwrm_req_send_silent(bp, req);
9521 if (rc)
9522 goto hwrm_func_resc_qcaps_exit;
9523
9524 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9525 if (!all)
9526 goto hwrm_func_resc_qcaps_exit;
9527
9528 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9529 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9530 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9531 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9532 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9533 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9534 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9535 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9536 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9537 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9538 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9539 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9540 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9541 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9542 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9543 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9544
9545 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9546 u16 max_msix = le16_to_cpu(resp->max_msix);
9547
9548 hw_resc->max_nqs = max_msix;
9549 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9550 }
9551
9552 if (BNXT_PF(bp)) {
9553 struct bnxt_pf_info *pf = &bp->pf;
9554
9555 pf->vf_resv_strategy =
9556 le16_to_cpu(resp->vf_reservation_strategy);
9557 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9558 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9559 }
9560 hwrm_func_resc_qcaps_exit:
9561 hwrm_req_drop(bp, req);
9562 return rc;
9563 }
9564
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9565 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9566 {
9567 struct hwrm_port_mac_ptp_qcfg_output *resp;
9568 struct hwrm_port_mac_ptp_qcfg_input *req;
9569 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9570 u8 flags;
9571 int rc;
9572
9573 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9574 rc = -ENODEV;
9575 goto no_ptp;
9576 }
9577
9578 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9579 if (rc)
9580 goto no_ptp;
9581
9582 req->port_id = cpu_to_le16(bp->pf.port_id);
9583 resp = hwrm_req_hold(bp, req);
9584 rc = hwrm_req_send(bp, req);
9585 if (rc)
9586 goto exit;
9587
9588 flags = resp->flags;
9589 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9590 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9591 rc = -ENODEV;
9592 goto exit;
9593 }
9594 if (!ptp) {
9595 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9596 if (!ptp) {
9597 rc = -ENOMEM;
9598 goto exit;
9599 }
9600 ptp->bp = bp;
9601 bp->ptp_cfg = ptp;
9602 }
9603
9604 if (flags &
9605 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9606 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9607 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9608 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9609 } else if (BNXT_CHIP_P5(bp)) {
9610 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9611 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9612 } else {
9613 rc = -ENODEV;
9614 goto exit;
9615 }
9616 ptp->rtc_configured =
9617 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9618 rc = bnxt_ptp_init(bp);
9619 if (rc)
9620 netdev_warn(bp->dev, "PTP initialization failed.\n");
9621 exit:
9622 hwrm_req_drop(bp, req);
9623 if (!rc)
9624 return 0;
9625
9626 no_ptp:
9627 bnxt_ptp_clear(bp);
9628 kfree(ptp);
9629 bp->ptp_cfg = NULL;
9630 return rc;
9631 }
9632
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9633 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9634 {
9635 u32 flags, flags_ext, flags_ext2, flags_ext3;
9636 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9637 struct hwrm_func_qcaps_output *resp;
9638 struct hwrm_func_qcaps_input *req;
9639 int rc;
9640
9641 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9642 if (rc)
9643 return rc;
9644
9645 req->fid = cpu_to_le16(0xffff);
9646 resp = hwrm_req_hold(bp, req);
9647 rc = hwrm_req_send(bp, req);
9648 if (rc)
9649 goto hwrm_func_qcaps_exit;
9650
9651 flags = le32_to_cpu(resp->flags);
9652 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9653 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9654 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9655 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9656 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9657 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9658 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9659 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9660 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9661 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9662 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9663 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9664 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9665 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9666 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9667 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9668 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9669 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9670
9671 flags_ext = le32_to_cpu(resp->flags_ext);
9672 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9673 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9674 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9675 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9676 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9677 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9678 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9679 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9680 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9681 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9682 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9683 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9684 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9685 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9686 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9687 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9688 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9689 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9690
9691 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9692 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9693 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9694 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9695 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9696 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9697 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9698 if (flags_ext2 &
9699 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9700 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9701 if (BNXT_PF(bp) &&
9702 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9703 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9704
9705 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9706 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9707 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9708 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9709 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9710
9711 bp->tx_push_thresh = 0;
9712 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9713 BNXT_FW_MAJ(bp) > 217)
9714 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9715
9716 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9717 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9718 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9719 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9720 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9721 if (!hw_resc->max_hw_ring_grps)
9722 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9723 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9724 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9725 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9726
9727 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9728 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9729 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9730 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9731 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9732 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9733
9734 if (BNXT_PF(bp)) {
9735 struct bnxt_pf_info *pf = &bp->pf;
9736
9737 pf->fw_fid = le16_to_cpu(resp->fid);
9738 pf->port_id = le16_to_cpu(resp->port_id);
9739 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9740 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9741 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9742 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9743 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9744 bp->flags |= BNXT_FLAG_WOL_CAP;
9745 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9746 bp->fw_cap |= BNXT_FW_CAP_PTP;
9747 } else {
9748 bnxt_ptp_clear(bp);
9749 kfree(bp->ptp_cfg);
9750 bp->ptp_cfg = NULL;
9751 }
9752 } else {
9753 #ifdef CONFIG_BNXT_SRIOV
9754 struct bnxt_vf_info *vf = &bp->vf;
9755
9756 vf->fw_fid = le16_to_cpu(resp->fid);
9757 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9758 #endif
9759 }
9760 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9761
9762 hwrm_func_qcaps_exit:
9763 hwrm_req_drop(bp, req);
9764 return rc;
9765 }
9766
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9767 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9768 {
9769 struct hwrm_dbg_qcaps_output *resp;
9770 struct hwrm_dbg_qcaps_input *req;
9771 int rc;
9772
9773 bp->fw_dbg_cap = 0;
9774 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9775 return;
9776
9777 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9778 if (rc)
9779 return;
9780
9781 req->fid = cpu_to_le16(0xffff);
9782 resp = hwrm_req_hold(bp, req);
9783 rc = hwrm_req_send(bp, req);
9784 if (rc)
9785 goto hwrm_dbg_qcaps_exit;
9786
9787 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9788
9789 hwrm_dbg_qcaps_exit:
9790 hwrm_req_drop(bp, req);
9791 }
9792
9793 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9794
bnxt_hwrm_func_qcaps(struct bnxt * bp)9795 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9796 {
9797 int rc;
9798
9799 rc = __bnxt_hwrm_func_qcaps(bp);
9800 if (rc)
9801 return rc;
9802
9803 bnxt_hwrm_dbg_qcaps(bp);
9804
9805 rc = bnxt_hwrm_queue_qportcfg(bp);
9806 if (rc) {
9807 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9808 return rc;
9809 }
9810 if (bp->hwrm_spec_code >= 0x10803) {
9811 rc = bnxt_alloc_ctx_mem(bp);
9812 if (rc)
9813 return rc;
9814 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9815 if (!rc)
9816 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9817 }
9818 return 0;
9819 }
9820
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9821 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9822 {
9823 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9824 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9825 u32 flags;
9826 int rc;
9827
9828 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9829 return 0;
9830
9831 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9832 if (rc)
9833 return rc;
9834
9835 resp = hwrm_req_hold(bp, req);
9836 rc = hwrm_req_send(bp, req);
9837 if (rc)
9838 goto hwrm_cfa_adv_qcaps_exit;
9839
9840 flags = le32_to_cpu(resp->flags);
9841 if (flags &
9842 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9843 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9844
9845 if (flags &
9846 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9847 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9848
9849 if (flags &
9850 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9851 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9852
9853 hwrm_cfa_adv_qcaps_exit:
9854 hwrm_req_drop(bp, req);
9855 return rc;
9856 }
9857
__bnxt_alloc_fw_health(struct bnxt * bp)9858 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9859 {
9860 if (bp->fw_health)
9861 return 0;
9862
9863 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9864 if (!bp->fw_health)
9865 return -ENOMEM;
9866
9867 mutex_init(&bp->fw_health->lock);
9868 return 0;
9869 }
9870
bnxt_alloc_fw_health(struct bnxt * bp)9871 static int bnxt_alloc_fw_health(struct bnxt *bp)
9872 {
9873 int rc;
9874
9875 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9876 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9877 return 0;
9878
9879 rc = __bnxt_alloc_fw_health(bp);
9880 if (rc) {
9881 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9882 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9883 return rc;
9884 }
9885
9886 return 0;
9887 }
9888
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9889 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9890 {
9891 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9892 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9893 BNXT_FW_HEALTH_WIN_MAP_OFF);
9894 }
9895
bnxt_inv_fw_health_reg(struct bnxt * bp)9896 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9897 {
9898 struct bnxt_fw_health *fw_health = bp->fw_health;
9899 u32 reg_type;
9900
9901 if (!fw_health)
9902 return;
9903
9904 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9905 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9906 fw_health->status_reliable = false;
9907
9908 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9909 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9910 fw_health->resets_reliable = false;
9911 }
9912
bnxt_try_map_fw_health_reg(struct bnxt * bp)9913 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9914 {
9915 void __iomem *hs;
9916 u32 status_loc;
9917 u32 reg_type;
9918 u32 sig;
9919
9920 if (bp->fw_health)
9921 bp->fw_health->status_reliable = false;
9922
9923 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9924 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9925
9926 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9927 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9928 if (!bp->chip_num) {
9929 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9930 bp->chip_num = readl(bp->bar0 +
9931 BNXT_FW_HEALTH_WIN_BASE +
9932 BNXT_GRC_REG_CHIP_NUM);
9933 }
9934 if (!BNXT_CHIP_P5_PLUS(bp))
9935 return;
9936
9937 status_loc = BNXT_GRC_REG_STATUS_P5 |
9938 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9939 } else {
9940 status_loc = readl(hs + offsetof(struct hcomm_status,
9941 fw_status_loc));
9942 }
9943
9944 if (__bnxt_alloc_fw_health(bp)) {
9945 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9946 return;
9947 }
9948
9949 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9950 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9951 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9952 __bnxt_map_fw_health_reg(bp, status_loc);
9953 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9954 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9955 }
9956
9957 bp->fw_health->status_reliable = true;
9958 }
9959
bnxt_map_fw_health_regs(struct bnxt * bp)9960 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9961 {
9962 struct bnxt_fw_health *fw_health = bp->fw_health;
9963 u32 reg_base = 0xffffffff;
9964 int i;
9965
9966 bp->fw_health->status_reliable = false;
9967 bp->fw_health->resets_reliable = false;
9968 /* Only pre-map the monitoring GRC registers using window 3 */
9969 for (i = 0; i < 4; i++) {
9970 u32 reg = fw_health->regs[i];
9971
9972 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9973 continue;
9974 if (reg_base == 0xffffffff)
9975 reg_base = reg & BNXT_GRC_BASE_MASK;
9976 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9977 return -ERANGE;
9978 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9979 }
9980 bp->fw_health->status_reliable = true;
9981 bp->fw_health->resets_reliable = true;
9982 if (reg_base == 0xffffffff)
9983 return 0;
9984
9985 __bnxt_map_fw_health_reg(bp, reg_base);
9986 return 0;
9987 }
9988
bnxt_remap_fw_health_regs(struct bnxt * bp)9989 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9990 {
9991 if (!bp->fw_health)
9992 return;
9993
9994 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9995 bp->fw_health->status_reliable = true;
9996 bp->fw_health->resets_reliable = true;
9997 } else {
9998 bnxt_try_map_fw_health_reg(bp);
9999 }
10000 }
10001
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10002 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10003 {
10004 struct bnxt_fw_health *fw_health = bp->fw_health;
10005 struct hwrm_error_recovery_qcfg_output *resp;
10006 struct hwrm_error_recovery_qcfg_input *req;
10007 int rc, i;
10008
10009 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10010 return 0;
10011
10012 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10013 if (rc)
10014 return rc;
10015
10016 resp = hwrm_req_hold(bp, req);
10017 rc = hwrm_req_send(bp, req);
10018 if (rc)
10019 goto err_recovery_out;
10020 fw_health->flags = le32_to_cpu(resp->flags);
10021 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10022 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10023 rc = -EINVAL;
10024 goto err_recovery_out;
10025 }
10026 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10027 fw_health->master_func_wait_dsecs =
10028 le32_to_cpu(resp->master_func_wait_period);
10029 fw_health->normal_func_wait_dsecs =
10030 le32_to_cpu(resp->normal_func_wait_period);
10031 fw_health->post_reset_wait_dsecs =
10032 le32_to_cpu(resp->master_func_wait_period_after_reset);
10033 fw_health->post_reset_max_wait_dsecs =
10034 le32_to_cpu(resp->max_bailout_time_after_reset);
10035 fw_health->regs[BNXT_FW_HEALTH_REG] =
10036 le32_to_cpu(resp->fw_health_status_reg);
10037 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10038 le32_to_cpu(resp->fw_heartbeat_reg);
10039 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10040 le32_to_cpu(resp->fw_reset_cnt_reg);
10041 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10042 le32_to_cpu(resp->reset_inprogress_reg);
10043 fw_health->fw_reset_inprog_reg_mask =
10044 le32_to_cpu(resp->reset_inprogress_reg_mask);
10045 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10046 if (fw_health->fw_reset_seq_cnt >= 16) {
10047 rc = -EINVAL;
10048 goto err_recovery_out;
10049 }
10050 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10051 fw_health->fw_reset_seq_regs[i] =
10052 le32_to_cpu(resp->reset_reg[i]);
10053 fw_health->fw_reset_seq_vals[i] =
10054 le32_to_cpu(resp->reset_reg_val[i]);
10055 fw_health->fw_reset_seq_delay_msec[i] =
10056 resp->delay_after_reset[i];
10057 }
10058 err_recovery_out:
10059 hwrm_req_drop(bp, req);
10060 if (!rc)
10061 rc = bnxt_map_fw_health_regs(bp);
10062 if (rc)
10063 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10064 return rc;
10065 }
10066
bnxt_hwrm_func_reset(struct bnxt * bp)10067 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10068 {
10069 struct hwrm_func_reset_input *req;
10070 int rc;
10071
10072 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10073 if (rc)
10074 return rc;
10075
10076 req->enables = 0;
10077 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10078 return hwrm_req_send(bp, req);
10079 }
10080
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10081 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10082 {
10083 struct hwrm_nvm_get_dev_info_output nvm_info;
10084
10085 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10086 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10087 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10088 nvm_info.nvm_cfg_ver_upd);
10089 }
10090
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10091 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10092 {
10093 struct hwrm_queue_qportcfg_output *resp;
10094 struct hwrm_queue_qportcfg_input *req;
10095 u8 i, j, *qptr;
10096 bool no_rdma;
10097 int rc = 0;
10098
10099 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10100 if (rc)
10101 return rc;
10102
10103 resp = hwrm_req_hold(bp, req);
10104 rc = hwrm_req_send(bp, req);
10105 if (rc)
10106 goto qportcfg_exit;
10107
10108 if (!resp->max_configurable_queues) {
10109 rc = -EINVAL;
10110 goto qportcfg_exit;
10111 }
10112 bp->max_tc = resp->max_configurable_queues;
10113 bp->max_lltc = resp->max_configurable_lossless_queues;
10114 if (bp->max_tc > BNXT_MAX_QUEUE)
10115 bp->max_tc = BNXT_MAX_QUEUE;
10116
10117 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10118 qptr = &resp->queue_id0;
10119 for (i = 0, j = 0; i < bp->max_tc; i++) {
10120 bp->q_info[j].queue_id = *qptr;
10121 bp->q_ids[i] = *qptr++;
10122 bp->q_info[j].queue_profile = *qptr++;
10123 bp->tc_to_qidx[j] = j;
10124 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10125 (no_rdma && BNXT_PF(bp)))
10126 j++;
10127 }
10128 bp->max_q = bp->max_tc;
10129 bp->max_tc = max_t(u8, j, 1);
10130
10131 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10132 bp->max_tc = 1;
10133
10134 if (bp->max_lltc > bp->max_tc)
10135 bp->max_lltc = bp->max_tc;
10136
10137 qportcfg_exit:
10138 hwrm_req_drop(bp, req);
10139 return rc;
10140 }
10141
bnxt_hwrm_poll(struct bnxt * bp)10142 static int bnxt_hwrm_poll(struct bnxt *bp)
10143 {
10144 struct hwrm_ver_get_input *req;
10145 int rc;
10146
10147 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10148 if (rc)
10149 return rc;
10150
10151 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10152 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10153 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10154
10155 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10156 rc = hwrm_req_send(bp, req);
10157 return rc;
10158 }
10159
bnxt_hwrm_ver_get(struct bnxt * bp)10160 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10161 {
10162 struct hwrm_ver_get_output *resp;
10163 struct hwrm_ver_get_input *req;
10164 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10165 u32 dev_caps_cfg, hwrm_ver;
10166 int rc, len, max_tmo_secs;
10167
10168 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10169 if (rc)
10170 return rc;
10171
10172 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10173 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10174 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10175 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10176 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10177
10178 resp = hwrm_req_hold(bp, req);
10179 rc = hwrm_req_send(bp, req);
10180 if (rc)
10181 goto hwrm_ver_get_exit;
10182
10183 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10184
10185 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10186 resp->hwrm_intf_min_8b << 8 |
10187 resp->hwrm_intf_upd_8b;
10188 if (resp->hwrm_intf_maj_8b < 1) {
10189 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10190 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10191 resp->hwrm_intf_upd_8b);
10192 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10193 }
10194
10195 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10196 HWRM_VERSION_UPDATE;
10197
10198 if (bp->hwrm_spec_code > hwrm_ver)
10199 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10200 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10201 HWRM_VERSION_UPDATE);
10202 else
10203 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10204 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10205 resp->hwrm_intf_upd_8b);
10206
10207 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10208 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10209 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10210 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10211 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10212 len = FW_VER_STR_LEN;
10213 } else {
10214 fw_maj = resp->hwrm_fw_maj_8b;
10215 fw_min = resp->hwrm_fw_min_8b;
10216 fw_bld = resp->hwrm_fw_bld_8b;
10217 fw_rsv = resp->hwrm_fw_rsvd_8b;
10218 len = BC_HWRM_STR_LEN;
10219 }
10220 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10221 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10222 fw_rsv);
10223
10224 if (strlen(resp->active_pkg_name)) {
10225 int fw_ver_len = strlen(bp->fw_ver_str);
10226
10227 snprintf(bp->fw_ver_str + fw_ver_len,
10228 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10229 resp->active_pkg_name);
10230 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10231 }
10232
10233 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10234 if (!bp->hwrm_cmd_timeout)
10235 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10236 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10237 if (!bp->hwrm_cmd_max_timeout)
10238 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10239 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10240 #ifdef CONFIG_DETECT_HUNG_TASK
10241 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10242 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10243 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10244 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10245 }
10246 #endif
10247
10248 if (resp->hwrm_intf_maj_8b >= 1) {
10249 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10250 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10251 }
10252 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10253 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10254
10255 bp->chip_num = le16_to_cpu(resp->chip_num);
10256 bp->chip_rev = resp->chip_rev;
10257 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10258 !resp->chip_metal)
10259 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10260
10261 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10262 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10263 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10264 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10265
10266 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10267 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10268
10269 if (dev_caps_cfg &
10270 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10271 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10272
10273 if (dev_caps_cfg &
10274 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10275 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10276
10277 if (dev_caps_cfg &
10278 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10279 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10280
10281 hwrm_ver_get_exit:
10282 hwrm_req_drop(bp, req);
10283 return rc;
10284 }
10285
bnxt_hwrm_fw_set_time(struct bnxt * bp)10286 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10287 {
10288 struct hwrm_fw_set_time_input *req;
10289 struct tm tm;
10290 time64_t now = ktime_get_real_seconds();
10291 int rc;
10292
10293 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10294 bp->hwrm_spec_code < 0x10400)
10295 return -EOPNOTSUPP;
10296
10297 time64_to_tm(now, 0, &tm);
10298 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10299 if (rc)
10300 return rc;
10301
10302 req->year = cpu_to_le16(1900 + tm.tm_year);
10303 req->month = 1 + tm.tm_mon;
10304 req->day = tm.tm_mday;
10305 req->hour = tm.tm_hour;
10306 req->minute = tm.tm_min;
10307 req->second = tm.tm_sec;
10308 return hwrm_req_send(bp, req);
10309 }
10310
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10311 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10312 {
10313 u64 sw_tmp;
10314
10315 hw &= mask;
10316 sw_tmp = (*sw & ~mask) | hw;
10317 if (hw < (*sw & mask))
10318 sw_tmp += mask + 1;
10319 WRITE_ONCE(*sw, sw_tmp);
10320 }
10321
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10322 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10323 int count, bool ignore_zero)
10324 {
10325 int i;
10326
10327 for (i = 0; i < count; i++) {
10328 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10329
10330 if (ignore_zero && !hw)
10331 continue;
10332
10333 if (masks[i] == -1ULL)
10334 sw_stats[i] = hw;
10335 else
10336 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10337 }
10338 }
10339
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10340 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10341 {
10342 if (!stats->hw_stats)
10343 return;
10344
10345 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10346 stats->hw_masks, stats->len / 8, false);
10347 }
10348
bnxt_accumulate_all_stats(struct bnxt * bp)10349 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10350 {
10351 struct bnxt_stats_mem *ring0_stats;
10352 bool ignore_zero = false;
10353 int i;
10354
10355 /* Chip bug. Counter intermittently becomes 0. */
10356 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10357 ignore_zero = true;
10358
10359 for (i = 0; i < bp->cp_nr_rings; i++) {
10360 struct bnxt_napi *bnapi = bp->bnapi[i];
10361 struct bnxt_cp_ring_info *cpr;
10362 struct bnxt_stats_mem *stats;
10363
10364 cpr = &bnapi->cp_ring;
10365 stats = &cpr->stats;
10366 if (!i)
10367 ring0_stats = stats;
10368 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10369 ring0_stats->hw_masks,
10370 ring0_stats->len / 8, ignore_zero);
10371 }
10372 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10373 struct bnxt_stats_mem *stats = &bp->port_stats;
10374 __le64 *hw_stats = stats->hw_stats;
10375 u64 *sw_stats = stats->sw_stats;
10376 u64 *masks = stats->hw_masks;
10377 int cnt;
10378
10379 cnt = sizeof(struct rx_port_stats) / 8;
10380 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10381
10382 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10383 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10384 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10385 cnt = sizeof(struct tx_port_stats) / 8;
10386 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10387 }
10388 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10389 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10390 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10391 }
10392 }
10393
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10394 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10395 {
10396 struct hwrm_port_qstats_input *req;
10397 struct bnxt_pf_info *pf = &bp->pf;
10398 int rc;
10399
10400 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10401 return 0;
10402
10403 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10404 return -EOPNOTSUPP;
10405
10406 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10407 if (rc)
10408 return rc;
10409
10410 req->flags = flags;
10411 req->port_id = cpu_to_le16(pf->port_id);
10412 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10413 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10414 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10415 return hwrm_req_send(bp, req);
10416 }
10417
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10418 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10419 {
10420 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10421 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10422 struct hwrm_port_qstats_ext_output *resp_qs;
10423 struct hwrm_port_qstats_ext_input *req_qs;
10424 struct bnxt_pf_info *pf = &bp->pf;
10425 u32 tx_stat_size;
10426 int rc;
10427
10428 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10429 return 0;
10430
10431 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10432 return -EOPNOTSUPP;
10433
10434 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10435 if (rc)
10436 return rc;
10437
10438 req_qs->flags = flags;
10439 req_qs->port_id = cpu_to_le16(pf->port_id);
10440 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10441 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10442 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10443 sizeof(struct tx_port_stats_ext) : 0;
10444 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10445 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10446 resp_qs = hwrm_req_hold(bp, req_qs);
10447 rc = hwrm_req_send(bp, req_qs);
10448 if (!rc) {
10449 bp->fw_rx_stats_ext_size =
10450 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10451 if (BNXT_FW_MAJ(bp) < 220 &&
10452 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10453 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10454
10455 bp->fw_tx_stats_ext_size = tx_stat_size ?
10456 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10457 } else {
10458 bp->fw_rx_stats_ext_size = 0;
10459 bp->fw_tx_stats_ext_size = 0;
10460 }
10461 hwrm_req_drop(bp, req_qs);
10462
10463 if (flags)
10464 return rc;
10465
10466 if (bp->fw_tx_stats_ext_size <=
10467 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10468 bp->pri2cos_valid = 0;
10469 return rc;
10470 }
10471
10472 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10473 if (rc)
10474 return rc;
10475
10476 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10477
10478 resp_qc = hwrm_req_hold(bp, req_qc);
10479 rc = hwrm_req_send(bp, req_qc);
10480 if (!rc) {
10481 u8 *pri2cos;
10482 int i, j;
10483
10484 pri2cos = &resp_qc->pri0_cos_queue_id;
10485 for (i = 0; i < 8; i++) {
10486 u8 queue_id = pri2cos[i];
10487 u8 queue_idx;
10488
10489 /* Per port queue IDs start from 0, 10, 20, etc */
10490 queue_idx = queue_id % 10;
10491 if (queue_idx > BNXT_MAX_QUEUE) {
10492 bp->pri2cos_valid = false;
10493 hwrm_req_drop(bp, req_qc);
10494 return rc;
10495 }
10496 for (j = 0; j < bp->max_q; j++) {
10497 if (bp->q_ids[j] == queue_id)
10498 bp->pri2cos_idx[i] = queue_idx;
10499 }
10500 }
10501 bp->pri2cos_valid = true;
10502 }
10503 hwrm_req_drop(bp, req_qc);
10504
10505 return rc;
10506 }
10507
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10508 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10509 {
10510 bnxt_hwrm_tunnel_dst_port_free(bp,
10511 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10512 bnxt_hwrm_tunnel_dst_port_free(bp,
10513 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10514 }
10515
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10516 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10517 {
10518 int rc, i;
10519 u32 tpa_flags = 0;
10520
10521 if (set_tpa)
10522 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10523 else if (BNXT_NO_FW_ACCESS(bp))
10524 return 0;
10525 for (i = 0; i < bp->nr_vnics; i++) {
10526 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10527 if (rc) {
10528 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10529 i, rc);
10530 return rc;
10531 }
10532 }
10533 return 0;
10534 }
10535
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10536 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10537 {
10538 int i;
10539
10540 for (i = 0; i < bp->nr_vnics; i++)
10541 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10542 }
10543
bnxt_clear_vnic(struct bnxt * bp)10544 static void bnxt_clear_vnic(struct bnxt *bp)
10545 {
10546 if (!bp->vnic_info)
10547 return;
10548
10549 bnxt_hwrm_clear_vnic_filter(bp);
10550 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10551 /* clear all RSS setting before free vnic ctx */
10552 bnxt_hwrm_clear_vnic_rss(bp);
10553 bnxt_hwrm_vnic_ctx_free(bp);
10554 }
10555 /* before free the vnic, undo the vnic tpa settings */
10556 if (bp->flags & BNXT_FLAG_TPA)
10557 bnxt_set_tpa(bp, false);
10558 bnxt_hwrm_vnic_free(bp);
10559 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10560 bnxt_hwrm_vnic_ctx_free(bp);
10561 }
10562
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10563 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10564 bool irq_re_init)
10565 {
10566 bnxt_clear_vnic(bp);
10567 bnxt_hwrm_ring_free(bp, close_path);
10568 bnxt_hwrm_ring_grp_free(bp);
10569 if (irq_re_init) {
10570 bnxt_hwrm_stat_ctx_free(bp);
10571 bnxt_hwrm_free_tunnel_ports(bp);
10572 }
10573 }
10574
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10575 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10576 {
10577 struct hwrm_func_cfg_input *req;
10578 u8 evb_mode;
10579 int rc;
10580
10581 if (br_mode == BRIDGE_MODE_VEB)
10582 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10583 else if (br_mode == BRIDGE_MODE_VEPA)
10584 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10585 else
10586 return -EINVAL;
10587
10588 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10589 if (rc)
10590 return rc;
10591
10592 req->fid = cpu_to_le16(0xffff);
10593 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10594 req->evb_mode = evb_mode;
10595 return hwrm_req_send(bp, req);
10596 }
10597
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10598 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10599 {
10600 struct hwrm_func_cfg_input *req;
10601 int rc;
10602
10603 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10604 return 0;
10605
10606 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10607 if (rc)
10608 return rc;
10609
10610 req->fid = cpu_to_le16(0xffff);
10611 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10612 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10613 if (size == 128)
10614 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10615
10616 return hwrm_req_send(bp, req);
10617 }
10618
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10619 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10620 {
10621 int rc;
10622
10623 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10624 goto skip_rss_ctx;
10625
10626 /* allocate context for vnic */
10627 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10628 if (rc) {
10629 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10630 vnic->vnic_id, rc);
10631 goto vnic_setup_err;
10632 }
10633 bp->rsscos_nr_ctxs++;
10634
10635 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10636 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10637 if (rc) {
10638 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10639 vnic->vnic_id, rc);
10640 goto vnic_setup_err;
10641 }
10642 bp->rsscos_nr_ctxs++;
10643 }
10644
10645 skip_rss_ctx:
10646 /* configure default vnic, ring grp */
10647 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10648 if (rc) {
10649 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10650 vnic->vnic_id, rc);
10651 goto vnic_setup_err;
10652 }
10653
10654 /* Enable RSS hashing on vnic */
10655 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10656 if (rc) {
10657 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10658 vnic->vnic_id, rc);
10659 goto vnic_setup_err;
10660 }
10661
10662 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10663 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10664 if (rc) {
10665 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10666 vnic->vnic_id, rc);
10667 }
10668 }
10669
10670 vnic_setup_err:
10671 return rc;
10672 }
10673
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10674 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10675 u8 valid)
10676 {
10677 struct hwrm_vnic_update_input *req;
10678 int rc;
10679
10680 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10681 if (rc)
10682 return rc;
10683
10684 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10685
10686 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10687 req->mru = cpu_to_le16(vnic->mru);
10688
10689 req->enables = cpu_to_le32(valid);
10690
10691 return hwrm_req_send(bp, req);
10692 }
10693
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10694 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10695 {
10696 int rc;
10697
10698 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10699 if (rc) {
10700 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10701 vnic->vnic_id, rc);
10702 return rc;
10703 }
10704 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10705 if (rc)
10706 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10707 vnic->vnic_id, rc);
10708 return rc;
10709 }
10710
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10711 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10712 {
10713 int rc, i, nr_ctxs;
10714
10715 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10716 for (i = 0; i < nr_ctxs; i++) {
10717 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10718 if (rc) {
10719 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10720 vnic->vnic_id, i, rc);
10721 break;
10722 }
10723 bp->rsscos_nr_ctxs++;
10724 }
10725 if (i < nr_ctxs)
10726 return -ENOMEM;
10727
10728 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10729 if (rc)
10730 return rc;
10731
10732 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10733 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10734 if (rc) {
10735 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10736 vnic->vnic_id, rc);
10737 }
10738 }
10739 return rc;
10740 }
10741
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10742 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10743 {
10744 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10745 return __bnxt_setup_vnic_p5(bp, vnic);
10746 else
10747 return __bnxt_setup_vnic(bp, vnic);
10748 }
10749
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10750 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10751 struct bnxt_vnic_info *vnic,
10752 u16 start_rx_ring_idx, int rx_rings)
10753 {
10754 int rc;
10755
10756 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10757 if (rc) {
10758 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10759 vnic->vnic_id, rc);
10760 return rc;
10761 }
10762 return bnxt_setup_vnic(bp, vnic);
10763 }
10764
bnxt_alloc_rfs_vnics(struct bnxt * bp)10765 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10766 {
10767 struct bnxt_vnic_info *vnic;
10768 int i, rc = 0;
10769
10770 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10771 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10772 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10773 }
10774
10775 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10776 return 0;
10777
10778 for (i = 0; i < bp->rx_nr_rings; i++) {
10779 u16 vnic_id = i + 1;
10780 u16 ring_id = i;
10781
10782 if (vnic_id >= bp->nr_vnics)
10783 break;
10784
10785 vnic = &bp->vnic_info[vnic_id];
10786 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10787 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10788 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10789 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10790 break;
10791 }
10792 return rc;
10793 }
10794
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10795 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10796 bool all)
10797 {
10798 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10799 struct bnxt_filter_base *usr_fltr, *tmp;
10800 struct bnxt_ntuple_filter *ntp_fltr;
10801 int i;
10802
10803 if (netif_running(bp->dev)) {
10804 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10805 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10806 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10807 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10808 }
10809 }
10810 if (!all)
10811 return;
10812
10813 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10814 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10815 usr_fltr->fw_vnic_id == rss_ctx->index) {
10816 ntp_fltr = container_of(usr_fltr,
10817 struct bnxt_ntuple_filter,
10818 base);
10819 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10820 bnxt_del_ntp_filter(bp, ntp_fltr);
10821 bnxt_del_one_usr_fltr(bp, usr_fltr);
10822 }
10823 }
10824
10825 if (vnic->rss_table)
10826 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10827 vnic->rss_table,
10828 vnic->rss_table_dma_addr);
10829 bp->num_rss_ctx--;
10830 }
10831
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)10832 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10833 int rxr_id)
10834 {
10835 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10836 int i, vnic_rx;
10837
10838 /* Ntuple VNIC always has all the rx rings. Any change of ring id
10839 * must be updated because a future filter may use it.
10840 */
10841 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10842 return true;
10843
10844 for (i = 0; i < tbl_size; i++) {
10845 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10846 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10847 else
10848 vnic_rx = bp->rss_indir_tbl[i];
10849
10850 if (rxr_id == vnic_rx)
10851 return true;
10852 }
10853
10854 return false;
10855 }
10856
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)10857 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10858 u16 mru, int rxr_id)
10859 {
10860 int rc;
10861
10862 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10863 return 0;
10864
10865 if (mru) {
10866 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10867 if (rc) {
10868 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10869 vnic->vnic_id, rc);
10870 return rc;
10871 }
10872 }
10873 vnic->mru = mru;
10874 bnxt_hwrm_vnic_update(bp, vnic,
10875 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10876
10877 return 0;
10878 }
10879
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)10880 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10881 {
10882 struct ethtool_rxfh_context *ctx;
10883 unsigned long context;
10884 int rc;
10885
10886 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10887 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10888 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10889
10890 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10891 if (rc)
10892 return rc;
10893 }
10894
10895 return 0;
10896 }
10897
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10898 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10899 {
10900 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10901 struct ethtool_rxfh_context *ctx;
10902 unsigned long context;
10903
10904 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10905 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10906 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10907
10908 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10909 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10910 __bnxt_setup_vnic_p5(bp, vnic)) {
10911 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10912 rss_ctx->index);
10913 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10914 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10915 }
10916 }
10917 }
10918
bnxt_clear_rss_ctxs(struct bnxt * bp)10919 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10920 {
10921 struct ethtool_rxfh_context *ctx;
10922 unsigned long context;
10923
10924 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10925 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10926
10927 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10928 }
10929 }
10930
10931 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10932 static bool bnxt_promisc_ok(struct bnxt *bp)
10933 {
10934 #ifdef CONFIG_BNXT_SRIOV
10935 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10936 return false;
10937 #endif
10938 return true;
10939 }
10940
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10941 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10942 {
10943 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10944 unsigned int rc = 0;
10945
10946 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10947 if (rc) {
10948 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10949 rc);
10950 return rc;
10951 }
10952
10953 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10954 if (rc) {
10955 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10956 rc);
10957 return rc;
10958 }
10959 return rc;
10960 }
10961
10962 static int bnxt_cfg_rx_mode(struct bnxt *);
10963 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10964
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10965 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10966 {
10967 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10968 int rc = 0;
10969 unsigned int rx_nr_rings = bp->rx_nr_rings;
10970
10971 if (irq_re_init) {
10972 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10973 if (rc) {
10974 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10975 rc);
10976 goto err_out;
10977 }
10978 }
10979
10980 rc = bnxt_hwrm_ring_alloc(bp);
10981 if (rc) {
10982 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
10983 goto err_out;
10984 }
10985
10986 rc = bnxt_hwrm_ring_grp_alloc(bp);
10987 if (rc) {
10988 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
10989 goto err_out;
10990 }
10991
10992 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10993 rx_nr_rings--;
10994
10995 /* default vnic 0 */
10996 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
10997 if (rc) {
10998 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
10999 goto err_out;
11000 }
11001
11002 if (BNXT_VF(bp))
11003 bnxt_hwrm_func_qcfg(bp);
11004
11005 rc = bnxt_setup_vnic(bp, vnic);
11006 if (rc)
11007 goto err_out;
11008 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11009 bnxt_hwrm_update_rss_hash_cfg(bp);
11010
11011 if (bp->flags & BNXT_FLAG_RFS) {
11012 rc = bnxt_alloc_rfs_vnics(bp);
11013 if (rc)
11014 goto err_out;
11015 }
11016
11017 if (bp->flags & BNXT_FLAG_TPA) {
11018 rc = bnxt_set_tpa(bp, true);
11019 if (rc)
11020 goto err_out;
11021 }
11022
11023 if (BNXT_VF(bp))
11024 bnxt_update_vf_mac(bp);
11025
11026 /* Filter for default vnic 0 */
11027 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11028 if (rc) {
11029 if (BNXT_VF(bp) && rc == -ENODEV)
11030 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11031 else
11032 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11033 goto err_out;
11034 }
11035 vnic->uc_filter_count = 1;
11036
11037 vnic->rx_mask = 0;
11038 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11039 goto skip_rx_mask;
11040
11041 if (bp->dev->flags & IFF_BROADCAST)
11042 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11043
11044 if (bp->dev->flags & IFF_PROMISC)
11045 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11046
11047 if (bp->dev->flags & IFF_ALLMULTI) {
11048 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11049 vnic->mc_list_count = 0;
11050 } else if (bp->dev->flags & IFF_MULTICAST) {
11051 u32 mask = 0;
11052
11053 bnxt_mc_list_updated(bp, &mask);
11054 vnic->rx_mask |= mask;
11055 }
11056
11057 rc = bnxt_cfg_rx_mode(bp);
11058 if (rc)
11059 goto err_out;
11060
11061 skip_rx_mask:
11062 rc = bnxt_hwrm_set_coal(bp);
11063 if (rc)
11064 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11065 rc);
11066
11067 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11068 rc = bnxt_setup_nitroa0_vnic(bp);
11069 if (rc)
11070 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11071 rc);
11072 }
11073
11074 if (BNXT_VF(bp)) {
11075 bnxt_hwrm_func_qcfg(bp);
11076 netdev_update_features(bp->dev);
11077 }
11078
11079 return 0;
11080
11081 err_out:
11082 bnxt_hwrm_resource_free(bp, 0, true);
11083
11084 return rc;
11085 }
11086
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11087 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11088 {
11089 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11090 return 0;
11091 }
11092
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11093 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11094 {
11095 bnxt_init_cp_rings(bp);
11096 bnxt_init_rx_rings(bp);
11097 bnxt_init_tx_rings(bp);
11098 bnxt_init_ring_grps(bp, irq_re_init);
11099 bnxt_init_vnics(bp);
11100
11101 return bnxt_init_chip(bp, irq_re_init);
11102 }
11103
bnxt_set_real_num_queues(struct bnxt * bp)11104 static int bnxt_set_real_num_queues(struct bnxt *bp)
11105 {
11106 int rc;
11107 struct net_device *dev = bp->dev;
11108
11109 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11110 bp->tx_nr_rings_xdp);
11111 if (rc)
11112 return rc;
11113
11114 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11115 if (rc)
11116 return rc;
11117
11118 #ifdef CONFIG_RFS_ACCEL
11119 if (bp->flags & BNXT_FLAG_RFS)
11120 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11121 #endif
11122
11123 return rc;
11124 }
11125
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11126 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11127 bool shared)
11128 {
11129 int _rx = *rx, _tx = *tx;
11130
11131 if (shared) {
11132 *rx = min_t(int, _rx, max);
11133 *tx = min_t(int, _tx, max);
11134 } else {
11135 if (max < 2)
11136 return -ENOMEM;
11137
11138 while (_rx + _tx > max) {
11139 if (_rx > _tx && _rx > 1)
11140 _rx--;
11141 else if (_tx > 1)
11142 _tx--;
11143 }
11144 *rx = _rx;
11145 *tx = _tx;
11146 }
11147 return 0;
11148 }
11149
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11150 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11151 {
11152 return (tx - tx_xdp) / tx_sets + tx_xdp;
11153 }
11154
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11155 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11156 {
11157 int tcs = bp->num_tc;
11158
11159 if (!tcs)
11160 tcs = 1;
11161 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11162 }
11163
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11164 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11165 {
11166 int tcs = bp->num_tc;
11167
11168 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11169 bp->tx_nr_rings_xdp;
11170 }
11171
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11172 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11173 bool sh)
11174 {
11175 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11176
11177 if (tx_cp != *tx) {
11178 int tx_saved = tx_cp, rc;
11179
11180 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11181 if (rc)
11182 return rc;
11183 if (tx_cp != tx_saved)
11184 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11185 return 0;
11186 }
11187 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11188 }
11189
bnxt_setup_msix(struct bnxt * bp)11190 static void bnxt_setup_msix(struct bnxt *bp)
11191 {
11192 const int len = sizeof(bp->irq_tbl[0].name);
11193 struct net_device *dev = bp->dev;
11194 int tcs, i;
11195
11196 tcs = bp->num_tc;
11197 if (tcs) {
11198 int i, off, count;
11199
11200 for (i = 0; i < tcs; i++) {
11201 count = bp->tx_nr_rings_per_tc;
11202 off = BNXT_TC_TO_RING_BASE(bp, i);
11203 netdev_set_tc_queue(dev, i, count, off);
11204 }
11205 }
11206
11207 for (i = 0; i < bp->cp_nr_rings; i++) {
11208 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11209 char *attr;
11210
11211 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11212 attr = "TxRx";
11213 else if (i < bp->rx_nr_rings)
11214 attr = "rx";
11215 else
11216 attr = "tx";
11217
11218 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11219 attr, i);
11220 bp->irq_tbl[map_idx].handler = bnxt_msix;
11221 }
11222 }
11223
11224 static int bnxt_init_int_mode(struct bnxt *bp);
11225
bnxt_change_msix(struct bnxt * bp,int total)11226 static int bnxt_change_msix(struct bnxt *bp, int total)
11227 {
11228 struct msi_map map;
11229 int i;
11230
11231 /* add MSIX to the end if needed */
11232 for (i = bp->total_irqs; i < total; i++) {
11233 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11234 if (map.index < 0)
11235 return bp->total_irqs;
11236 bp->irq_tbl[i].vector = map.virq;
11237 bp->total_irqs++;
11238 }
11239
11240 /* trim MSIX from the end if needed */
11241 for (i = bp->total_irqs; i > total; i--) {
11242 map.index = i - 1;
11243 map.virq = bp->irq_tbl[i - 1].vector;
11244 pci_msix_free_irq(bp->pdev, map);
11245 bp->total_irqs--;
11246 }
11247 return bp->total_irqs;
11248 }
11249
bnxt_setup_int_mode(struct bnxt * bp)11250 static int bnxt_setup_int_mode(struct bnxt *bp)
11251 {
11252 int rc;
11253
11254 if (!bp->irq_tbl) {
11255 rc = bnxt_init_int_mode(bp);
11256 if (rc || !bp->irq_tbl)
11257 return rc ?: -ENODEV;
11258 }
11259
11260 bnxt_setup_msix(bp);
11261
11262 rc = bnxt_set_real_num_queues(bp);
11263 return rc;
11264 }
11265
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11266 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11267 {
11268 return bp->hw_resc.max_rsscos_ctxs;
11269 }
11270
bnxt_get_max_func_vnics(struct bnxt * bp)11271 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11272 {
11273 return bp->hw_resc.max_vnics;
11274 }
11275
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11276 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11277 {
11278 return bp->hw_resc.max_stat_ctxs;
11279 }
11280
bnxt_get_max_func_cp_rings(struct bnxt * bp)11281 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11282 {
11283 return bp->hw_resc.max_cp_rings;
11284 }
11285
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11286 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11287 {
11288 unsigned int cp = bp->hw_resc.max_cp_rings;
11289
11290 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11291 cp -= bnxt_get_ulp_msix_num(bp);
11292
11293 return cp;
11294 }
11295
bnxt_get_max_func_irqs(struct bnxt * bp)11296 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11297 {
11298 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11299
11300 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11301 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11302
11303 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11304 }
11305
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11306 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11307 {
11308 bp->hw_resc.max_irqs = max_irqs;
11309 }
11310
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11311 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11312 {
11313 unsigned int cp;
11314
11315 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11316 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11317 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11318 else
11319 return cp - bp->cp_nr_rings;
11320 }
11321
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11322 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11323 {
11324 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11325 }
11326
bnxt_get_avail_msix(struct bnxt * bp,int num)11327 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11328 {
11329 int max_irq = bnxt_get_max_func_irqs(bp);
11330 int total_req = bp->cp_nr_rings + num;
11331
11332 if (max_irq < total_req) {
11333 num = max_irq - bp->cp_nr_rings;
11334 if (num <= 0)
11335 return 0;
11336 }
11337 return num;
11338 }
11339
bnxt_get_num_msix(struct bnxt * bp)11340 static int bnxt_get_num_msix(struct bnxt *bp)
11341 {
11342 if (!BNXT_NEW_RM(bp))
11343 return bnxt_get_max_func_irqs(bp);
11344
11345 return bnxt_nq_rings_in_use(bp);
11346 }
11347
bnxt_init_int_mode(struct bnxt * bp)11348 static int bnxt_init_int_mode(struct bnxt *bp)
11349 {
11350 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11351
11352 total_vecs = bnxt_get_num_msix(bp);
11353 max = bnxt_get_max_func_irqs(bp);
11354 if (total_vecs > max)
11355 total_vecs = max;
11356
11357 if (!total_vecs)
11358 return 0;
11359
11360 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11361 min = 2;
11362
11363 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11364 PCI_IRQ_MSIX);
11365 ulp_msix = bnxt_get_ulp_msix_num(bp);
11366 if (total_vecs < 0 || total_vecs < ulp_msix) {
11367 rc = -ENODEV;
11368 goto msix_setup_exit;
11369 }
11370
11371 tbl_size = total_vecs;
11372 if (pci_msix_can_alloc_dyn(bp->pdev))
11373 tbl_size = max;
11374 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11375 if (bp->irq_tbl) {
11376 for (i = 0; i < total_vecs; i++)
11377 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11378
11379 bp->total_irqs = total_vecs;
11380 /* Trim rings based upon num of vectors allocated */
11381 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11382 total_vecs - ulp_msix, min == 1);
11383 if (rc)
11384 goto msix_setup_exit;
11385
11386 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11387 bp->cp_nr_rings = (min == 1) ?
11388 max_t(int, tx_cp, bp->rx_nr_rings) :
11389 tx_cp + bp->rx_nr_rings;
11390
11391 } else {
11392 rc = -ENOMEM;
11393 goto msix_setup_exit;
11394 }
11395 return 0;
11396
11397 msix_setup_exit:
11398 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11399 kfree(bp->irq_tbl);
11400 bp->irq_tbl = NULL;
11401 pci_free_irq_vectors(bp->pdev);
11402 return rc;
11403 }
11404
bnxt_clear_int_mode(struct bnxt * bp)11405 static void bnxt_clear_int_mode(struct bnxt *bp)
11406 {
11407 pci_free_irq_vectors(bp->pdev);
11408
11409 kfree(bp->irq_tbl);
11410 bp->irq_tbl = NULL;
11411 }
11412
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11413 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11414 {
11415 bool irq_cleared = false;
11416 bool irq_change = false;
11417 int tcs = bp->num_tc;
11418 int irqs_required;
11419 int rc;
11420
11421 if (!bnxt_need_reserve_rings(bp))
11422 return 0;
11423
11424 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11425 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11426
11427 if (ulp_msix > bp->ulp_num_msix_want)
11428 ulp_msix = bp->ulp_num_msix_want;
11429 irqs_required = ulp_msix + bp->cp_nr_rings;
11430 } else {
11431 irqs_required = bnxt_get_num_msix(bp);
11432 }
11433
11434 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11435 irq_change = true;
11436 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11437 bnxt_ulp_irq_stop(bp);
11438 bnxt_clear_int_mode(bp);
11439 irq_cleared = true;
11440 }
11441 }
11442 rc = __bnxt_reserve_rings(bp);
11443 if (irq_cleared) {
11444 if (!rc)
11445 rc = bnxt_init_int_mode(bp);
11446 bnxt_ulp_irq_restart(bp, rc);
11447 } else if (irq_change && !rc) {
11448 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11449 rc = -ENOSPC;
11450 }
11451 if (rc) {
11452 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11453 return rc;
11454 }
11455 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11456 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11457 netdev_err(bp->dev, "tx ring reservation failure\n");
11458 netdev_reset_tc(bp->dev);
11459 bp->num_tc = 0;
11460 if (bp->tx_nr_rings_xdp)
11461 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11462 else
11463 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11464 return -ENOMEM;
11465 }
11466 return 0;
11467 }
11468
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11469 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11470 {
11471 struct bnxt_tx_ring_info *txr;
11472 struct netdev_queue *txq;
11473 struct bnxt_napi *bnapi;
11474 int i;
11475
11476 bnapi = bp->bnapi[idx];
11477 bnxt_for_each_napi_tx(i, bnapi, txr) {
11478 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11479 synchronize_net();
11480
11481 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11482 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11483 if (txq) {
11484 __netif_tx_lock_bh(txq);
11485 netif_tx_stop_queue(txq);
11486 __netif_tx_unlock_bh(txq);
11487 }
11488 }
11489
11490 if (!bp->tph_mode)
11491 continue;
11492
11493 bnxt_hwrm_tx_ring_free(bp, txr, true);
11494 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11495 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11496 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11497 }
11498 }
11499
bnxt_tx_queue_start(struct bnxt * bp,int idx)11500 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11501 {
11502 struct bnxt_tx_ring_info *txr;
11503 struct netdev_queue *txq;
11504 struct bnxt_napi *bnapi;
11505 int rc, i;
11506
11507 bnapi = bp->bnapi[idx];
11508 /* All rings have been reserved and previously allocated.
11509 * Reallocating with the same parameters should never fail.
11510 */
11511 bnxt_for_each_napi_tx(i, bnapi, txr) {
11512 if (!bp->tph_mode)
11513 goto start_tx;
11514
11515 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11516 if (rc)
11517 return rc;
11518
11519 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11520 if (rc)
11521 return rc;
11522
11523 txr->tx_prod = 0;
11524 txr->tx_cons = 0;
11525 txr->tx_hw_cons = 0;
11526 start_tx:
11527 WRITE_ONCE(txr->dev_state, 0);
11528 synchronize_net();
11529
11530 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11531 continue;
11532
11533 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11534 if (txq)
11535 netif_tx_start_queue(txq);
11536 }
11537
11538 return 0;
11539 }
11540
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11541 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11542 const cpumask_t *mask)
11543 {
11544 struct bnxt_irq *irq;
11545 u16 tag;
11546 int err;
11547
11548 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11549
11550 if (!irq->bp->tph_mode)
11551 return;
11552
11553 cpumask_copy(irq->cpu_mask, mask);
11554
11555 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11556 return;
11557
11558 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11559 cpumask_first(irq->cpu_mask), &tag))
11560 return;
11561
11562 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11563 return;
11564
11565 netdev_lock(irq->bp->dev);
11566 if (netif_running(irq->bp->dev)) {
11567 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11568 if (err)
11569 netdev_err(irq->bp->dev,
11570 "RX queue restart failed: err=%d\n", err);
11571 }
11572 netdev_unlock(irq->bp->dev);
11573 }
11574
bnxt_irq_affinity_release(struct kref * ref)11575 static void bnxt_irq_affinity_release(struct kref *ref)
11576 {
11577 struct irq_affinity_notify *notify =
11578 container_of(ref, struct irq_affinity_notify, kref);
11579 struct bnxt_irq *irq;
11580
11581 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11582
11583 if (!irq->bp->tph_mode)
11584 return;
11585
11586 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11587 netdev_err(irq->bp->dev,
11588 "Setting ST=0 for MSIX entry %d failed\n",
11589 irq->msix_nr);
11590 return;
11591 }
11592 }
11593
bnxt_release_irq_notifier(struct bnxt_irq * irq)11594 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11595 {
11596 irq_set_affinity_notifier(irq->vector, NULL);
11597 }
11598
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11599 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11600 {
11601 struct irq_affinity_notify *notify;
11602
11603 irq->bp = bp;
11604
11605 /* Nothing to do if TPH is not enabled */
11606 if (!bp->tph_mode)
11607 return;
11608
11609 /* Register IRQ affinity notifier */
11610 notify = &irq->affinity_notify;
11611 notify->irq = irq->vector;
11612 notify->notify = bnxt_irq_affinity_notify;
11613 notify->release = bnxt_irq_affinity_release;
11614
11615 irq_set_affinity_notifier(irq->vector, notify);
11616 }
11617
bnxt_free_irq(struct bnxt * bp)11618 static void bnxt_free_irq(struct bnxt *bp)
11619 {
11620 struct bnxt_irq *irq;
11621 int i;
11622
11623 #ifdef CONFIG_RFS_ACCEL
11624 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11625 bp->dev->rx_cpu_rmap = NULL;
11626 #endif
11627 if (!bp->irq_tbl || !bp->bnapi)
11628 return;
11629
11630 for (i = 0; i < bp->cp_nr_rings; i++) {
11631 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11632
11633 irq = &bp->irq_tbl[map_idx];
11634 if (irq->requested) {
11635 if (irq->have_cpumask) {
11636 irq_update_affinity_hint(irq->vector, NULL);
11637 free_cpumask_var(irq->cpu_mask);
11638 irq->have_cpumask = 0;
11639 }
11640
11641 bnxt_release_irq_notifier(irq);
11642
11643 free_irq(irq->vector, bp->bnapi[i]);
11644 }
11645
11646 irq->requested = 0;
11647 }
11648
11649 /* Disable TPH support */
11650 pcie_disable_tph(bp->pdev);
11651 bp->tph_mode = 0;
11652 }
11653
bnxt_request_irq(struct bnxt * bp)11654 static int bnxt_request_irq(struct bnxt *bp)
11655 {
11656 struct cpu_rmap *rmap = NULL;
11657 int i, j, rc = 0;
11658 unsigned long flags = 0;
11659
11660 rc = bnxt_setup_int_mode(bp);
11661 if (rc) {
11662 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11663 rc);
11664 return rc;
11665 }
11666 #ifdef CONFIG_RFS_ACCEL
11667 rmap = bp->dev->rx_cpu_rmap;
11668 #endif
11669
11670 /* Enable TPH support as part of IRQ request */
11671 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11672 if (!rc)
11673 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11674
11675 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11676 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11677 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11678
11679 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11680 rmap && bp->bnapi[i]->rx_ring) {
11681 rc = irq_cpu_rmap_add(rmap, irq->vector);
11682 if (rc)
11683 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11684 j);
11685 j++;
11686 }
11687
11688 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11689 bp->bnapi[i]);
11690 if (rc)
11691 break;
11692
11693 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11694 irq->requested = 1;
11695
11696 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11697 int numa_node = dev_to_node(&bp->pdev->dev);
11698 u16 tag;
11699
11700 irq->have_cpumask = 1;
11701 irq->msix_nr = map_idx;
11702 irq->ring_nr = i;
11703 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11704 irq->cpu_mask);
11705 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11706 if (rc) {
11707 netdev_warn(bp->dev,
11708 "Update affinity hint failed, IRQ = %d\n",
11709 irq->vector);
11710 break;
11711 }
11712
11713 bnxt_register_irq_notifier(bp, irq);
11714
11715 /* Init ST table entry */
11716 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11717 cpumask_first(irq->cpu_mask),
11718 &tag))
11719 continue;
11720
11721 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11722 }
11723 }
11724 return rc;
11725 }
11726
bnxt_del_napi(struct bnxt * bp)11727 static void bnxt_del_napi(struct bnxt *bp)
11728 {
11729 int i;
11730
11731 if (!bp->bnapi)
11732 return;
11733
11734 for (i = 0; i < bp->rx_nr_rings; i++)
11735 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11736 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11737 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11738
11739 for (i = 0; i < bp->cp_nr_rings; i++) {
11740 struct bnxt_napi *bnapi = bp->bnapi[i];
11741
11742 __netif_napi_del_locked(&bnapi->napi);
11743 }
11744 /* We called __netif_napi_del_locked(), we need
11745 * to respect an RCU grace period before freeing napi structures.
11746 */
11747 synchronize_net();
11748 }
11749
bnxt_init_napi(struct bnxt * bp)11750 static void bnxt_init_napi(struct bnxt *bp)
11751 {
11752 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11753 unsigned int cp_nr_rings = bp->cp_nr_rings;
11754 struct bnxt_napi *bnapi;
11755 int i;
11756
11757 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11758 poll_fn = bnxt_poll_p5;
11759 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11760 cp_nr_rings--;
11761
11762 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11763
11764 for (i = 0; i < cp_nr_rings; i++) {
11765 bnapi = bp->bnapi[i];
11766 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11767 bnapi->index);
11768 }
11769 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11770 bnapi = bp->bnapi[cp_nr_rings];
11771 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11772 }
11773 }
11774
bnxt_disable_napi(struct bnxt * bp)11775 static void bnxt_disable_napi(struct bnxt *bp)
11776 {
11777 int i;
11778
11779 if (!bp->bnapi ||
11780 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11781 return;
11782
11783 for (i = 0; i < bp->cp_nr_rings; i++) {
11784 struct bnxt_napi *bnapi = bp->bnapi[i];
11785 struct bnxt_cp_ring_info *cpr;
11786
11787 cpr = &bnapi->cp_ring;
11788 if (bnapi->tx_fault)
11789 cpr->sw_stats->tx.tx_resets++;
11790 if (bnapi->in_reset)
11791 cpr->sw_stats->rx.rx_resets++;
11792 napi_disable_locked(&bnapi->napi);
11793 }
11794 }
11795
bnxt_enable_napi(struct bnxt * bp)11796 static void bnxt_enable_napi(struct bnxt *bp)
11797 {
11798 int i;
11799
11800 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11801 for (i = 0; i < bp->cp_nr_rings; i++) {
11802 struct bnxt_napi *bnapi = bp->bnapi[i];
11803 struct bnxt_cp_ring_info *cpr;
11804
11805 bnapi->tx_fault = 0;
11806
11807 cpr = &bnapi->cp_ring;
11808 bnapi->in_reset = false;
11809
11810 if (bnapi->rx_ring) {
11811 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11812 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11813 }
11814 napi_enable_locked(&bnapi->napi);
11815 }
11816 }
11817
bnxt_tx_disable(struct bnxt * bp)11818 void bnxt_tx_disable(struct bnxt *bp)
11819 {
11820 int i;
11821 struct bnxt_tx_ring_info *txr;
11822
11823 if (bp->tx_ring) {
11824 for (i = 0; i < bp->tx_nr_rings; i++) {
11825 txr = &bp->tx_ring[i];
11826 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11827 }
11828 }
11829 /* Make sure napi polls see @dev_state change */
11830 synchronize_net();
11831 /* Drop carrier first to prevent TX timeout */
11832 netif_carrier_off(bp->dev);
11833 /* Stop all TX queues */
11834 netif_tx_disable(bp->dev);
11835 }
11836
bnxt_tx_enable(struct bnxt * bp)11837 void bnxt_tx_enable(struct bnxt *bp)
11838 {
11839 int i;
11840 struct bnxt_tx_ring_info *txr;
11841
11842 for (i = 0; i < bp->tx_nr_rings; i++) {
11843 txr = &bp->tx_ring[i];
11844 WRITE_ONCE(txr->dev_state, 0);
11845 }
11846 /* Make sure napi polls see @dev_state change */
11847 synchronize_net();
11848 netif_tx_wake_all_queues(bp->dev);
11849 if (BNXT_LINK_IS_UP(bp))
11850 netif_carrier_on(bp->dev);
11851 }
11852
bnxt_report_fec(struct bnxt_link_info * link_info)11853 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11854 {
11855 u8 active_fec = link_info->active_fec_sig_mode &
11856 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11857
11858 switch (active_fec) {
11859 default:
11860 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11861 return "None";
11862 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11863 return "Clause 74 BaseR";
11864 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11865 return "Clause 91 RS(528,514)";
11866 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11867 return "Clause 91 RS544_1XN";
11868 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11869 return "Clause 91 RS(544,514)";
11870 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11871 return "Clause 91 RS272_1XN";
11872 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11873 return "Clause 91 RS(272,257)";
11874 }
11875 }
11876
bnxt_report_link(struct bnxt * bp)11877 void bnxt_report_link(struct bnxt *bp)
11878 {
11879 if (BNXT_LINK_IS_UP(bp)) {
11880 const char *signal = "";
11881 const char *flow_ctrl;
11882 const char *duplex;
11883 u32 speed;
11884 u16 fec;
11885
11886 netif_carrier_on(bp->dev);
11887 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11888 if (speed == SPEED_UNKNOWN) {
11889 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11890 return;
11891 }
11892 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11893 duplex = "full";
11894 else
11895 duplex = "half";
11896 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11897 flow_ctrl = "ON - receive & transmit";
11898 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11899 flow_ctrl = "ON - transmit";
11900 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11901 flow_ctrl = "ON - receive";
11902 else
11903 flow_ctrl = "none";
11904 if (bp->link_info.phy_qcfg_resp.option_flags &
11905 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11906 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11907 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11908 switch (sig_mode) {
11909 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11910 signal = "(NRZ) ";
11911 break;
11912 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11913 signal = "(PAM4 56Gbps) ";
11914 break;
11915 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11916 signal = "(PAM4 112Gbps) ";
11917 break;
11918 default:
11919 break;
11920 }
11921 }
11922 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11923 speed, signal, duplex, flow_ctrl);
11924 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11925 netdev_info(bp->dev, "EEE is %s\n",
11926 bp->eee.eee_active ? "active" :
11927 "not active");
11928 fec = bp->link_info.fec_cfg;
11929 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11930 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11931 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11932 bnxt_report_fec(&bp->link_info));
11933 } else {
11934 netif_carrier_off(bp->dev);
11935 netdev_err(bp->dev, "NIC Link is Down\n");
11936 }
11937 }
11938
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11939 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11940 {
11941 if (!resp->supported_speeds_auto_mode &&
11942 !resp->supported_speeds_force_mode &&
11943 !resp->supported_pam4_speeds_auto_mode &&
11944 !resp->supported_pam4_speeds_force_mode &&
11945 !resp->supported_speeds2_auto_mode &&
11946 !resp->supported_speeds2_force_mode)
11947 return true;
11948 return false;
11949 }
11950
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11951 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11952 {
11953 struct bnxt_link_info *link_info = &bp->link_info;
11954 struct hwrm_port_phy_qcaps_output *resp;
11955 struct hwrm_port_phy_qcaps_input *req;
11956 int rc = 0;
11957
11958 if (bp->hwrm_spec_code < 0x10201)
11959 return 0;
11960
11961 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11962 if (rc)
11963 return rc;
11964
11965 resp = hwrm_req_hold(bp, req);
11966 rc = hwrm_req_send(bp, req);
11967 if (rc)
11968 goto hwrm_phy_qcaps_exit;
11969
11970 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11971 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11972 struct ethtool_keee *eee = &bp->eee;
11973 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11974
11975 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11976 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11977 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11978 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11979 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11980 }
11981
11982 if (bp->hwrm_spec_code >= 0x10a01) {
11983 if (bnxt_phy_qcaps_no_speed(resp)) {
11984 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
11985 netdev_warn(bp->dev, "Ethernet link disabled\n");
11986 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
11987 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
11988 netdev_info(bp->dev, "Ethernet link enabled\n");
11989 /* Phy re-enabled, reprobe the speeds */
11990 link_info->support_auto_speeds = 0;
11991 link_info->support_pam4_auto_speeds = 0;
11992 link_info->support_auto_speeds2 = 0;
11993 }
11994 }
11995 if (resp->supported_speeds_auto_mode)
11996 link_info->support_auto_speeds =
11997 le16_to_cpu(resp->supported_speeds_auto_mode);
11998 if (resp->supported_pam4_speeds_auto_mode)
11999 link_info->support_pam4_auto_speeds =
12000 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12001 if (resp->supported_speeds2_auto_mode)
12002 link_info->support_auto_speeds2 =
12003 le16_to_cpu(resp->supported_speeds2_auto_mode);
12004
12005 bp->port_count = resp->port_cnt;
12006
12007 hwrm_phy_qcaps_exit:
12008 hwrm_req_drop(bp, req);
12009 return rc;
12010 }
12011
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12012 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12013 {
12014 struct hwrm_port_mac_qcaps_output *resp;
12015 struct hwrm_port_mac_qcaps_input *req;
12016 int rc;
12017
12018 if (bp->hwrm_spec_code < 0x10a03)
12019 return;
12020
12021 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12022 if (rc)
12023 return;
12024
12025 resp = hwrm_req_hold(bp, req);
12026 rc = hwrm_req_send_silent(bp, req);
12027 if (!rc)
12028 bp->mac_flags = resp->flags;
12029 hwrm_req_drop(bp, req);
12030 }
12031
bnxt_support_dropped(u16 advertising,u16 supported)12032 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12033 {
12034 u16 diff = advertising ^ supported;
12035
12036 return ((supported | diff) != supported);
12037 }
12038
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12039 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12040 {
12041 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12042
12043 /* Check if any advertised speeds are no longer supported. The caller
12044 * holds the link_lock mutex, so we can modify link_info settings.
12045 */
12046 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12047 if (bnxt_support_dropped(link_info->advertising,
12048 link_info->support_auto_speeds2)) {
12049 link_info->advertising = link_info->support_auto_speeds2;
12050 return true;
12051 }
12052 return false;
12053 }
12054 if (bnxt_support_dropped(link_info->advertising,
12055 link_info->support_auto_speeds)) {
12056 link_info->advertising = link_info->support_auto_speeds;
12057 return true;
12058 }
12059 if (bnxt_support_dropped(link_info->advertising_pam4,
12060 link_info->support_pam4_auto_speeds)) {
12061 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12062 return true;
12063 }
12064 return false;
12065 }
12066
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12067 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12068 {
12069 struct bnxt_link_info *link_info = &bp->link_info;
12070 struct hwrm_port_phy_qcfg_output *resp;
12071 struct hwrm_port_phy_qcfg_input *req;
12072 u8 link_state = link_info->link_state;
12073 bool support_changed;
12074 int rc;
12075
12076 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12077 if (rc)
12078 return rc;
12079
12080 resp = hwrm_req_hold(bp, req);
12081 rc = hwrm_req_send(bp, req);
12082 if (rc) {
12083 hwrm_req_drop(bp, req);
12084 if (BNXT_VF(bp) && rc == -ENODEV) {
12085 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12086 rc = 0;
12087 }
12088 return rc;
12089 }
12090
12091 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12092 link_info->phy_link_status = resp->link;
12093 link_info->duplex = resp->duplex_cfg;
12094 if (bp->hwrm_spec_code >= 0x10800)
12095 link_info->duplex = resp->duplex_state;
12096 link_info->pause = resp->pause;
12097 link_info->auto_mode = resp->auto_mode;
12098 link_info->auto_pause_setting = resp->auto_pause;
12099 link_info->lp_pause = resp->link_partner_adv_pause;
12100 link_info->force_pause_setting = resp->force_pause;
12101 link_info->duplex_setting = resp->duplex_cfg;
12102 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12103 link_info->link_speed = le16_to_cpu(resp->link_speed);
12104 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12105 link_info->active_lanes = resp->active_lanes;
12106 } else {
12107 link_info->link_speed = 0;
12108 link_info->active_lanes = 0;
12109 }
12110 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12111 link_info->force_pam4_link_speed =
12112 le16_to_cpu(resp->force_pam4_link_speed);
12113 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12114 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12115 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12116 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12117 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12118 link_info->auto_pam4_link_speeds =
12119 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12120 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12121 link_info->lp_auto_link_speeds =
12122 le16_to_cpu(resp->link_partner_adv_speeds);
12123 link_info->lp_auto_pam4_link_speeds =
12124 resp->link_partner_pam4_adv_speeds;
12125 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12126 link_info->phy_ver[0] = resp->phy_maj;
12127 link_info->phy_ver[1] = resp->phy_min;
12128 link_info->phy_ver[2] = resp->phy_bld;
12129 link_info->media_type = resp->media_type;
12130 link_info->phy_type = resp->phy_type;
12131 link_info->transceiver = resp->xcvr_pkg_type;
12132 link_info->phy_addr = resp->eee_config_phy_addr &
12133 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12134 link_info->module_status = resp->module_status;
12135
12136 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12137 struct ethtool_keee *eee = &bp->eee;
12138 u16 fw_speeds;
12139
12140 eee->eee_active = 0;
12141 if (resp->eee_config_phy_addr &
12142 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12143 eee->eee_active = 1;
12144 fw_speeds = le16_to_cpu(
12145 resp->link_partner_adv_eee_link_speed_mask);
12146 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12147 }
12148
12149 /* Pull initial EEE config */
12150 if (!chng_link_state) {
12151 if (resp->eee_config_phy_addr &
12152 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12153 eee->eee_enabled = 1;
12154
12155 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12156 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12157
12158 if (resp->eee_config_phy_addr &
12159 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12160 __le32 tmr;
12161
12162 eee->tx_lpi_enabled = 1;
12163 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12164 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12165 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12166 }
12167 }
12168 }
12169
12170 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12171 if (bp->hwrm_spec_code >= 0x10504) {
12172 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12173 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12174 }
12175 /* TODO: need to add more logic to report VF link */
12176 if (chng_link_state) {
12177 if (link_info->phy_link_status == BNXT_LINK_LINK)
12178 link_info->link_state = BNXT_LINK_STATE_UP;
12179 else
12180 link_info->link_state = BNXT_LINK_STATE_DOWN;
12181 if (link_state != link_info->link_state)
12182 bnxt_report_link(bp);
12183 } else {
12184 /* always link down if not require to update link state */
12185 link_info->link_state = BNXT_LINK_STATE_DOWN;
12186 }
12187 hwrm_req_drop(bp, req);
12188
12189 if (!BNXT_PHY_CFG_ABLE(bp))
12190 return 0;
12191
12192 support_changed = bnxt_support_speed_dropped(link_info);
12193 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12194 bnxt_hwrm_set_link_setting(bp, true, false);
12195 return 0;
12196 }
12197
bnxt_get_port_module_status(struct bnxt * bp)12198 static void bnxt_get_port_module_status(struct bnxt *bp)
12199 {
12200 struct bnxt_link_info *link_info = &bp->link_info;
12201 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12202 u8 module_status;
12203
12204 if (bnxt_update_link(bp, true))
12205 return;
12206
12207 module_status = link_info->module_status;
12208 switch (module_status) {
12209 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12210 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12211 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12212 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12213 bp->pf.port_id);
12214 if (bp->hwrm_spec_code >= 0x10201) {
12215 netdev_warn(bp->dev, "Module part number %s\n",
12216 resp->phy_vendor_partnumber);
12217 }
12218 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12219 netdev_warn(bp->dev, "TX is disabled\n");
12220 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12221 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12222 }
12223 }
12224
12225 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12226 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12227 {
12228 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12229 if (bp->hwrm_spec_code >= 0x10201)
12230 req->auto_pause =
12231 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12232 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12233 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12234 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12235 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12236 req->enables |=
12237 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12238 } else {
12239 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12240 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12241 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12242 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12243 req->enables |=
12244 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12245 if (bp->hwrm_spec_code >= 0x10201) {
12246 req->auto_pause = req->force_pause;
12247 req->enables |= cpu_to_le32(
12248 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12249 }
12250 }
12251 }
12252
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12253 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12254 {
12255 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12256 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12257 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12258 req->enables |=
12259 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12260 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12261 } else if (bp->link_info.advertising) {
12262 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12263 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12264 }
12265 if (bp->link_info.advertising_pam4) {
12266 req->enables |=
12267 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12268 req->auto_link_pam4_speed_mask =
12269 cpu_to_le16(bp->link_info.advertising_pam4);
12270 }
12271 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12272 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12273 } else {
12274 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12275 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12276 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12277 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12278 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12279 (u32)bp->link_info.req_link_speed);
12280 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12281 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12282 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12283 } else {
12284 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12285 }
12286 }
12287
12288 /* tell chimp that the setting takes effect immediately */
12289 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12290 }
12291
bnxt_hwrm_set_pause(struct bnxt * bp)12292 int bnxt_hwrm_set_pause(struct bnxt *bp)
12293 {
12294 struct hwrm_port_phy_cfg_input *req;
12295 int rc;
12296
12297 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12298 if (rc)
12299 return rc;
12300
12301 bnxt_hwrm_set_pause_common(bp, req);
12302
12303 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12304 bp->link_info.force_link_chng)
12305 bnxt_hwrm_set_link_common(bp, req);
12306
12307 rc = hwrm_req_send(bp, req);
12308 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12309 /* since changing of pause setting doesn't trigger any link
12310 * change event, the driver needs to update the current pause
12311 * result upon successfully return of the phy_cfg command
12312 */
12313 bp->link_info.pause =
12314 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12315 bp->link_info.auto_pause_setting = 0;
12316 if (!bp->link_info.force_link_chng)
12317 bnxt_report_link(bp);
12318 }
12319 bp->link_info.force_link_chng = false;
12320 return rc;
12321 }
12322
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12323 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12324 struct hwrm_port_phy_cfg_input *req)
12325 {
12326 struct ethtool_keee *eee = &bp->eee;
12327
12328 if (eee->eee_enabled) {
12329 u16 eee_speeds;
12330 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12331
12332 if (eee->tx_lpi_enabled)
12333 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12334 else
12335 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12336
12337 req->flags |= cpu_to_le32(flags);
12338 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12339 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12340 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12341 } else {
12342 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12343 }
12344 }
12345
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12346 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12347 {
12348 struct hwrm_port_phy_cfg_input *req;
12349 int rc;
12350
12351 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12352 if (rc)
12353 return rc;
12354
12355 if (set_pause)
12356 bnxt_hwrm_set_pause_common(bp, req);
12357
12358 bnxt_hwrm_set_link_common(bp, req);
12359
12360 if (set_eee)
12361 bnxt_hwrm_set_eee(bp, req);
12362 return hwrm_req_send(bp, req);
12363 }
12364
bnxt_hwrm_shutdown_link(struct bnxt * bp)12365 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12366 {
12367 struct hwrm_port_phy_cfg_input *req;
12368 int rc;
12369
12370 if (!BNXT_SINGLE_PF(bp))
12371 return 0;
12372
12373 if (pci_num_vf(bp->pdev) &&
12374 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12375 return 0;
12376
12377 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12378 if (rc)
12379 return rc;
12380
12381 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12382 rc = hwrm_req_send(bp, req);
12383 if (!rc) {
12384 mutex_lock(&bp->link_lock);
12385 /* Device is not obliged link down in certain scenarios, even
12386 * when forced. Setting the state unknown is consistent with
12387 * driver startup and will force link state to be reported
12388 * during subsequent open based on PORT_PHY_QCFG.
12389 */
12390 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12391 mutex_unlock(&bp->link_lock);
12392 }
12393 return rc;
12394 }
12395
bnxt_fw_reset_via_optee(struct bnxt * bp)12396 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12397 {
12398 #ifdef CONFIG_TEE_BNXT_FW
12399 int rc = tee_bnxt_fw_load();
12400
12401 if (rc)
12402 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12403
12404 return rc;
12405 #else
12406 netdev_err(bp->dev, "OP-TEE not supported\n");
12407 return -ENODEV;
12408 #endif
12409 }
12410
bnxt_try_recover_fw(struct bnxt * bp)12411 static int bnxt_try_recover_fw(struct bnxt *bp)
12412 {
12413 if (bp->fw_health && bp->fw_health->status_reliable) {
12414 int retry = 0, rc;
12415 u32 sts;
12416
12417 do {
12418 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12419 rc = bnxt_hwrm_poll(bp);
12420 if (!BNXT_FW_IS_BOOTING(sts) &&
12421 !BNXT_FW_IS_RECOVERING(sts))
12422 break;
12423 retry++;
12424 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12425
12426 if (!BNXT_FW_IS_HEALTHY(sts)) {
12427 netdev_err(bp->dev,
12428 "Firmware not responding, status: 0x%x\n",
12429 sts);
12430 rc = -ENODEV;
12431 }
12432 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12433 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12434 return bnxt_fw_reset_via_optee(bp);
12435 }
12436 return rc;
12437 }
12438
12439 return -ENODEV;
12440 }
12441
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12442 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12443 {
12444 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12445
12446 if (!BNXT_NEW_RM(bp))
12447 return; /* no resource reservations required */
12448
12449 hw_resc->resv_cp_rings = 0;
12450 hw_resc->resv_stat_ctxs = 0;
12451 hw_resc->resv_irqs = 0;
12452 hw_resc->resv_tx_rings = 0;
12453 hw_resc->resv_rx_rings = 0;
12454 hw_resc->resv_hw_ring_grps = 0;
12455 hw_resc->resv_vnics = 0;
12456 hw_resc->resv_rsscos_ctxs = 0;
12457 if (!fw_reset) {
12458 bp->tx_nr_rings = 0;
12459 bp->rx_nr_rings = 0;
12460 }
12461 }
12462
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12463 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12464 {
12465 int rc;
12466
12467 if (!BNXT_NEW_RM(bp))
12468 return 0; /* no resource reservations required */
12469
12470 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12471 if (rc)
12472 netdev_err(bp->dev, "resc_qcaps failed\n");
12473
12474 bnxt_clear_reservations(bp, fw_reset);
12475
12476 return rc;
12477 }
12478
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12479 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12480 {
12481 struct hwrm_func_drv_if_change_output *resp;
12482 struct hwrm_func_drv_if_change_input *req;
12483 bool resc_reinit = false;
12484 bool caps_change = false;
12485 int rc, retry = 0;
12486 bool fw_reset;
12487 u32 flags = 0;
12488
12489 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12490 bp->fw_reset_state = 0;
12491
12492 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12493 return 0;
12494
12495 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12496 if (rc)
12497 return rc;
12498
12499 if (up)
12500 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12501 resp = hwrm_req_hold(bp, req);
12502
12503 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12504 while (retry < BNXT_FW_IF_RETRY) {
12505 rc = hwrm_req_send(bp, req);
12506 if (rc != -EAGAIN)
12507 break;
12508
12509 msleep(50);
12510 retry++;
12511 }
12512
12513 if (rc == -EAGAIN) {
12514 hwrm_req_drop(bp, req);
12515 return rc;
12516 } else if (!rc) {
12517 flags = le32_to_cpu(resp->flags);
12518 } else if (up) {
12519 rc = bnxt_try_recover_fw(bp);
12520 fw_reset = true;
12521 }
12522 hwrm_req_drop(bp, req);
12523 if (rc)
12524 return rc;
12525
12526 if (!up) {
12527 bnxt_inv_fw_health_reg(bp);
12528 return 0;
12529 }
12530
12531 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12532 resc_reinit = true;
12533 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12534 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12535 fw_reset = true;
12536 else
12537 bnxt_remap_fw_health_regs(bp);
12538
12539 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12540 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12541 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12542 return -ENODEV;
12543 }
12544 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12545 caps_change = true;
12546
12547 if (resc_reinit || fw_reset || caps_change) {
12548 if (fw_reset || caps_change) {
12549 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12550 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12551 bnxt_ulp_irq_stop(bp);
12552 bnxt_free_ctx_mem(bp, false);
12553 bnxt_dcb_free(bp);
12554 rc = bnxt_fw_init_one(bp);
12555 if (rc) {
12556 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12557 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12558 return rc;
12559 }
12560 /* IRQ will be initialized later in bnxt_request_irq()*/
12561 bnxt_clear_int_mode(bp);
12562 }
12563 rc = bnxt_cancel_reservations(bp, fw_reset);
12564 }
12565 return rc;
12566 }
12567
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12568 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12569 {
12570 struct hwrm_port_led_qcaps_output *resp;
12571 struct hwrm_port_led_qcaps_input *req;
12572 struct bnxt_pf_info *pf = &bp->pf;
12573 int rc;
12574
12575 bp->num_leds = 0;
12576 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12577 return 0;
12578
12579 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12580 if (rc)
12581 return rc;
12582
12583 req->port_id = cpu_to_le16(pf->port_id);
12584 resp = hwrm_req_hold(bp, req);
12585 rc = hwrm_req_send(bp, req);
12586 if (rc) {
12587 hwrm_req_drop(bp, req);
12588 return rc;
12589 }
12590 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12591 int i;
12592
12593 bp->num_leds = resp->num_leds;
12594 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12595 bp->num_leds);
12596 for (i = 0; i < bp->num_leds; i++) {
12597 struct bnxt_led_info *led = &bp->leds[i];
12598 __le16 caps = led->led_state_caps;
12599
12600 if (!led->led_group_id ||
12601 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12602 bp->num_leds = 0;
12603 break;
12604 }
12605 }
12606 }
12607 hwrm_req_drop(bp, req);
12608 return 0;
12609 }
12610
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12611 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12612 {
12613 struct hwrm_wol_filter_alloc_output *resp;
12614 struct hwrm_wol_filter_alloc_input *req;
12615 int rc;
12616
12617 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12618 if (rc)
12619 return rc;
12620
12621 req->port_id = cpu_to_le16(bp->pf.port_id);
12622 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12623 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12624 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12625
12626 resp = hwrm_req_hold(bp, req);
12627 rc = hwrm_req_send(bp, req);
12628 if (!rc)
12629 bp->wol_filter_id = resp->wol_filter_id;
12630 hwrm_req_drop(bp, req);
12631 return rc;
12632 }
12633
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12634 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12635 {
12636 struct hwrm_wol_filter_free_input *req;
12637 int rc;
12638
12639 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12640 if (rc)
12641 return rc;
12642
12643 req->port_id = cpu_to_le16(bp->pf.port_id);
12644 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12645 req->wol_filter_id = bp->wol_filter_id;
12646
12647 return hwrm_req_send(bp, req);
12648 }
12649
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12650 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12651 {
12652 struct hwrm_wol_filter_qcfg_output *resp;
12653 struct hwrm_wol_filter_qcfg_input *req;
12654 u16 next_handle = 0;
12655 int rc;
12656
12657 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12658 if (rc)
12659 return rc;
12660
12661 req->port_id = cpu_to_le16(bp->pf.port_id);
12662 req->handle = cpu_to_le16(handle);
12663 resp = hwrm_req_hold(bp, req);
12664 rc = hwrm_req_send(bp, req);
12665 if (!rc) {
12666 next_handle = le16_to_cpu(resp->next_handle);
12667 if (next_handle != 0) {
12668 if (resp->wol_type ==
12669 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12670 bp->wol = 1;
12671 bp->wol_filter_id = resp->wol_filter_id;
12672 }
12673 }
12674 }
12675 hwrm_req_drop(bp, req);
12676 return next_handle;
12677 }
12678
bnxt_get_wol_settings(struct bnxt * bp)12679 static void bnxt_get_wol_settings(struct bnxt *bp)
12680 {
12681 u16 handle = 0;
12682
12683 bp->wol = 0;
12684 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12685 return;
12686
12687 do {
12688 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12689 } while (handle && handle != 0xffff);
12690 }
12691
bnxt_eee_config_ok(struct bnxt * bp)12692 static bool bnxt_eee_config_ok(struct bnxt *bp)
12693 {
12694 struct ethtool_keee *eee = &bp->eee;
12695 struct bnxt_link_info *link_info = &bp->link_info;
12696
12697 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12698 return true;
12699
12700 if (eee->eee_enabled) {
12701 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12702 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12703
12704 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12705
12706 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12707 eee->eee_enabled = 0;
12708 return false;
12709 }
12710 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12711 linkmode_and(eee->advertised, advertising,
12712 eee->supported);
12713 return false;
12714 }
12715 }
12716 return true;
12717 }
12718
bnxt_update_phy_setting(struct bnxt * bp)12719 static int bnxt_update_phy_setting(struct bnxt *bp)
12720 {
12721 int rc;
12722 bool update_link = false;
12723 bool update_pause = false;
12724 bool update_eee = false;
12725 struct bnxt_link_info *link_info = &bp->link_info;
12726
12727 rc = bnxt_update_link(bp, true);
12728 if (rc) {
12729 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12730 rc);
12731 return rc;
12732 }
12733 if (!BNXT_SINGLE_PF(bp))
12734 return 0;
12735
12736 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12737 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12738 link_info->req_flow_ctrl)
12739 update_pause = true;
12740 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12741 link_info->force_pause_setting != link_info->req_flow_ctrl)
12742 update_pause = true;
12743 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12744 if (BNXT_AUTO_MODE(link_info->auto_mode))
12745 update_link = true;
12746 if (bnxt_force_speed_updated(link_info))
12747 update_link = true;
12748 if (link_info->req_duplex != link_info->duplex_setting)
12749 update_link = true;
12750 } else {
12751 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12752 update_link = true;
12753 if (bnxt_auto_speed_updated(link_info))
12754 update_link = true;
12755 }
12756
12757 /* The last close may have shutdown the link, so need to call
12758 * PHY_CFG to bring it back up.
12759 */
12760 if (!BNXT_LINK_IS_UP(bp))
12761 update_link = true;
12762
12763 if (!bnxt_eee_config_ok(bp))
12764 update_eee = true;
12765
12766 if (update_link)
12767 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12768 else if (update_pause)
12769 rc = bnxt_hwrm_set_pause(bp);
12770 if (rc) {
12771 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12772 rc);
12773 return rc;
12774 }
12775
12776 return rc;
12777 }
12778
12779 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12780
bnxt_reinit_after_abort(struct bnxt * bp)12781 static int bnxt_reinit_after_abort(struct bnxt *bp)
12782 {
12783 int rc;
12784
12785 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12786 return -EBUSY;
12787
12788 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12789 return -ENODEV;
12790
12791 rc = bnxt_fw_init_one(bp);
12792 if (!rc) {
12793 bnxt_clear_int_mode(bp);
12794 rc = bnxt_init_int_mode(bp);
12795 if (!rc) {
12796 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12797 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12798 }
12799 }
12800 return rc;
12801 }
12802
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12803 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12804 {
12805 struct bnxt_ntuple_filter *ntp_fltr;
12806 struct bnxt_l2_filter *l2_fltr;
12807
12808 if (list_empty(&fltr->list))
12809 return;
12810
12811 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12812 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12813 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12814 atomic_inc(&l2_fltr->refcnt);
12815 ntp_fltr->l2_fltr = l2_fltr;
12816 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12817 bnxt_del_ntp_filter(bp, ntp_fltr);
12818 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12819 fltr->sw_id);
12820 }
12821 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12822 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12823 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12824 bnxt_del_l2_filter(bp, l2_fltr);
12825 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12826 fltr->sw_id);
12827 }
12828 }
12829 }
12830
bnxt_cfg_usr_fltrs(struct bnxt * bp)12831 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12832 {
12833 struct bnxt_filter_base *usr_fltr, *tmp;
12834
12835 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12836 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12837 }
12838
bnxt_set_xps_mapping(struct bnxt * bp)12839 static int bnxt_set_xps_mapping(struct bnxt *bp)
12840 {
12841 int numa_node = dev_to_node(&bp->pdev->dev);
12842 unsigned int q_idx, map_idx, cpu, i;
12843 const struct cpumask *cpu_mask_ptr;
12844 int nr_cpus = num_online_cpus();
12845 cpumask_t *q_map;
12846 int rc = 0;
12847
12848 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12849 if (!q_map)
12850 return -ENOMEM;
12851
12852 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12853 * Each TC has the same number of TX queues. The nth TX queue for each
12854 * TC will have the same CPU mask.
12855 */
12856 for (i = 0; i < nr_cpus; i++) {
12857 map_idx = i % bp->tx_nr_rings_per_tc;
12858 cpu = cpumask_local_spread(i, numa_node);
12859 cpu_mask_ptr = get_cpu_mask(cpu);
12860 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12861 }
12862
12863 /* Register CPU mask for each TX queue except the ones marked for XDP */
12864 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12865 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12866 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12867 if (rc) {
12868 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12869 q_idx);
12870 break;
12871 }
12872 }
12873
12874 kfree(q_map);
12875
12876 return rc;
12877 }
12878
bnxt_tx_nr_rings(struct bnxt * bp)12879 static int bnxt_tx_nr_rings(struct bnxt *bp)
12880 {
12881 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12882 bp->tx_nr_rings_per_tc;
12883 }
12884
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)12885 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12886 {
12887 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12888 }
12889
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12890 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12891 {
12892 int rc = 0;
12893
12894 netif_carrier_off(bp->dev);
12895 if (irq_re_init) {
12896 /* Reserve rings now if none were reserved at driver probe. */
12897 rc = bnxt_init_dflt_ring_mode(bp);
12898 if (rc) {
12899 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12900 return rc;
12901 }
12902 }
12903 rc = bnxt_reserve_rings(bp, irq_re_init);
12904 if (rc)
12905 return rc;
12906
12907 /* Make adjustments if reserved TX rings are less than requested */
12908 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
12909 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
12910 if (bp->tx_nr_rings_xdp) {
12911 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
12912 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12913 }
12914 rc = bnxt_alloc_mem(bp, irq_re_init);
12915 if (rc) {
12916 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12917 goto open_err_free_mem;
12918 }
12919
12920 if (irq_re_init) {
12921 bnxt_init_napi(bp);
12922 rc = bnxt_request_irq(bp);
12923 if (rc) {
12924 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12925 goto open_err_irq;
12926 }
12927 }
12928
12929 rc = bnxt_init_nic(bp, irq_re_init);
12930 if (rc) {
12931 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12932 goto open_err_irq;
12933 }
12934
12935 bnxt_enable_napi(bp);
12936 bnxt_debug_dev_init(bp);
12937
12938 if (link_re_init) {
12939 mutex_lock(&bp->link_lock);
12940 rc = bnxt_update_phy_setting(bp);
12941 mutex_unlock(&bp->link_lock);
12942 if (rc) {
12943 netdev_warn(bp->dev, "failed to update phy settings\n");
12944 if (BNXT_SINGLE_PF(bp)) {
12945 bp->link_info.phy_retry = true;
12946 bp->link_info.phy_retry_expires =
12947 jiffies + 5 * HZ;
12948 }
12949 }
12950 }
12951
12952 if (irq_re_init) {
12953 udp_tunnel_nic_reset_ntf(bp->dev);
12954 rc = bnxt_set_xps_mapping(bp);
12955 if (rc)
12956 netdev_warn(bp->dev, "failed to set xps mapping\n");
12957 }
12958
12959 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12960 if (!static_key_enabled(&bnxt_xdp_locking_key))
12961 static_branch_enable(&bnxt_xdp_locking_key);
12962 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12963 static_branch_disable(&bnxt_xdp_locking_key);
12964 }
12965 set_bit(BNXT_STATE_OPEN, &bp->state);
12966 bnxt_enable_int(bp);
12967 /* Enable TX queues */
12968 bnxt_tx_enable(bp);
12969 mod_timer(&bp->timer, jiffies + bp->current_interval);
12970 /* Poll link status and check for SFP+ module status */
12971 mutex_lock(&bp->link_lock);
12972 bnxt_get_port_module_status(bp);
12973 mutex_unlock(&bp->link_lock);
12974
12975 /* VF-reps may need to be re-opened after the PF is re-opened */
12976 if (BNXT_PF(bp))
12977 bnxt_vf_reps_open(bp);
12978 bnxt_ptp_init_rtc(bp, true);
12979 bnxt_ptp_cfg_tstamp_filters(bp);
12980 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12981 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
12982 bnxt_cfg_usr_fltrs(bp);
12983 return 0;
12984
12985 open_err_irq:
12986 bnxt_del_napi(bp);
12987
12988 open_err_free_mem:
12989 bnxt_free_skbs(bp);
12990 bnxt_free_irq(bp);
12991 bnxt_free_mem(bp, true);
12992 return rc;
12993 }
12994
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12995 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12996 {
12997 int rc = 0;
12998
12999 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13000 rc = -EIO;
13001 if (!rc)
13002 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13003 if (rc) {
13004 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13005 netif_close(bp->dev);
13006 }
13007 return rc;
13008 }
13009
13010 /* netdev instance lock held, open the NIC half way by allocating all
13011 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13012 * for offline self tests.
13013 */
bnxt_half_open_nic(struct bnxt * bp)13014 int bnxt_half_open_nic(struct bnxt *bp)
13015 {
13016 int rc = 0;
13017
13018 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13019 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13020 rc = -ENODEV;
13021 goto half_open_err;
13022 }
13023
13024 rc = bnxt_alloc_mem(bp, true);
13025 if (rc) {
13026 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13027 goto half_open_err;
13028 }
13029 bnxt_init_napi(bp);
13030 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13031 rc = bnxt_init_nic(bp, true);
13032 if (rc) {
13033 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13034 bnxt_del_napi(bp);
13035 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13036 goto half_open_err;
13037 }
13038 return 0;
13039
13040 half_open_err:
13041 bnxt_free_skbs(bp);
13042 bnxt_free_mem(bp, true);
13043 netif_close(bp->dev);
13044 return rc;
13045 }
13046
13047 /* netdev instance lock held, this call can only be made after a previous
13048 * successful call to bnxt_half_open_nic().
13049 */
bnxt_half_close_nic(struct bnxt * bp)13050 void bnxt_half_close_nic(struct bnxt *bp)
13051 {
13052 bnxt_hwrm_resource_free(bp, false, true);
13053 bnxt_del_napi(bp);
13054 bnxt_free_skbs(bp);
13055 bnxt_free_mem(bp, true);
13056 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13057 }
13058
bnxt_reenable_sriov(struct bnxt * bp)13059 void bnxt_reenable_sriov(struct bnxt *bp)
13060 {
13061 if (BNXT_PF(bp)) {
13062 struct bnxt_pf_info *pf = &bp->pf;
13063 int n = pf->active_vfs;
13064
13065 if (n)
13066 bnxt_cfg_hw_sriov(bp, &n, true);
13067 }
13068 }
13069
bnxt_open(struct net_device * dev)13070 static int bnxt_open(struct net_device *dev)
13071 {
13072 struct bnxt *bp = netdev_priv(dev);
13073 int rc;
13074
13075 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13076 rc = bnxt_reinit_after_abort(bp);
13077 if (rc) {
13078 if (rc == -EBUSY)
13079 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13080 else
13081 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13082 return -ENODEV;
13083 }
13084 }
13085
13086 rc = bnxt_hwrm_if_change(bp, true);
13087 if (rc)
13088 return rc;
13089
13090 rc = __bnxt_open_nic(bp, true, true);
13091 if (rc) {
13092 bnxt_hwrm_if_change(bp, false);
13093 } else {
13094 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13095 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13096 bnxt_queue_sp_work(bp,
13097 BNXT_RESTART_ULP_SP_EVENT);
13098 }
13099 }
13100
13101 return rc;
13102 }
13103
bnxt_drv_busy(struct bnxt * bp)13104 static bool bnxt_drv_busy(struct bnxt *bp)
13105 {
13106 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13107 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13108 }
13109
13110 static void bnxt_get_ring_stats(struct bnxt *bp,
13111 struct rtnl_link_stats64 *stats);
13112
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13113 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13114 bool link_re_init)
13115 {
13116 /* Close the VF-reps before closing PF */
13117 if (BNXT_PF(bp))
13118 bnxt_vf_reps_close(bp);
13119
13120 /* Change device state to avoid TX queue wake up's */
13121 bnxt_tx_disable(bp);
13122
13123 clear_bit(BNXT_STATE_OPEN, &bp->state);
13124 smp_mb__after_atomic();
13125 while (bnxt_drv_busy(bp))
13126 msleep(20);
13127
13128 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13129 bnxt_clear_rss_ctxs(bp);
13130 /* Flush rings and disable interrupts */
13131 bnxt_shutdown_nic(bp, irq_re_init);
13132
13133 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13134
13135 bnxt_debug_dev_exit(bp);
13136 bnxt_disable_napi(bp);
13137 timer_delete_sync(&bp->timer);
13138 bnxt_free_skbs(bp);
13139
13140 /* Save ring stats before shutdown */
13141 if (bp->bnapi && irq_re_init) {
13142 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13143 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13144 }
13145 if (irq_re_init) {
13146 bnxt_free_irq(bp);
13147 bnxt_del_napi(bp);
13148 }
13149 bnxt_free_mem(bp, irq_re_init);
13150 }
13151
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13152 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13153 {
13154 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13155 /* If we get here, it means firmware reset is in progress
13156 * while we are trying to close. We can safely proceed with
13157 * the close because we are holding netdev instance lock.
13158 * Some firmware messages may fail as we proceed to close.
13159 * We set the ABORT_ERR flag here so that the FW reset thread
13160 * will later abort when it gets the netdev instance lock
13161 * and sees the flag.
13162 */
13163 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13164 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13165 }
13166
13167 #ifdef CONFIG_BNXT_SRIOV
13168 if (bp->sriov_cfg) {
13169 int rc;
13170
13171 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13172 !bp->sriov_cfg,
13173 BNXT_SRIOV_CFG_WAIT_TMO);
13174 if (!rc)
13175 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13176 else if (rc < 0)
13177 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13178 }
13179 #endif
13180 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13181 }
13182
bnxt_close(struct net_device * dev)13183 static int bnxt_close(struct net_device *dev)
13184 {
13185 struct bnxt *bp = netdev_priv(dev);
13186
13187 bnxt_close_nic(bp, true, true);
13188 bnxt_hwrm_shutdown_link(bp);
13189 bnxt_hwrm_if_change(bp, false);
13190 return 0;
13191 }
13192
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13193 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13194 u16 *val)
13195 {
13196 struct hwrm_port_phy_mdio_read_output *resp;
13197 struct hwrm_port_phy_mdio_read_input *req;
13198 int rc;
13199
13200 if (bp->hwrm_spec_code < 0x10a00)
13201 return -EOPNOTSUPP;
13202
13203 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13204 if (rc)
13205 return rc;
13206
13207 req->port_id = cpu_to_le16(bp->pf.port_id);
13208 req->phy_addr = phy_addr;
13209 req->reg_addr = cpu_to_le16(reg & 0x1f);
13210 if (mdio_phy_id_is_c45(phy_addr)) {
13211 req->cl45_mdio = 1;
13212 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13213 req->dev_addr = mdio_phy_id_devad(phy_addr);
13214 req->reg_addr = cpu_to_le16(reg);
13215 }
13216
13217 resp = hwrm_req_hold(bp, req);
13218 rc = hwrm_req_send(bp, req);
13219 if (!rc)
13220 *val = le16_to_cpu(resp->reg_data);
13221 hwrm_req_drop(bp, req);
13222 return rc;
13223 }
13224
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13225 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13226 u16 val)
13227 {
13228 struct hwrm_port_phy_mdio_write_input *req;
13229 int rc;
13230
13231 if (bp->hwrm_spec_code < 0x10a00)
13232 return -EOPNOTSUPP;
13233
13234 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13235 if (rc)
13236 return rc;
13237
13238 req->port_id = cpu_to_le16(bp->pf.port_id);
13239 req->phy_addr = phy_addr;
13240 req->reg_addr = cpu_to_le16(reg & 0x1f);
13241 if (mdio_phy_id_is_c45(phy_addr)) {
13242 req->cl45_mdio = 1;
13243 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13244 req->dev_addr = mdio_phy_id_devad(phy_addr);
13245 req->reg_addr = cpu_to_le16(reg);
13246 }
13247 req->reg_data = cpu_to_le16(val);
13248
13249 return hwrm_req_send(bp, req);
13250 }
13251
13252 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13253 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13254 {
13255 struct mii_ioctl_data *mdio = if_mii(ifr);
13256 struct bnxt *bp = netdev_priv(dev);
13257 int rc;
13258
13259 switch (cmd) {
13260 case SIOCGMIIPHY:
13261 mdio->phy_id = bp->link_info.phy_addr;
13262
13263 fallthrough;
13264 case SIOCGMIIREG: {
13265 u16 mii_regval = 0;
13266
13267 if (!netif_running(dev))
13268 return -EAGAIN;
13269
13270 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13271 &mii_regval);
13272 mdio->val_out = mii_regval;
13273 return rc;
13274 }
13275
13276 case SIOCSMIIREG:
13277 if (!netif_running(dev))
13278 return -EAGAIN;
13279
13280 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13281 mdio->val_in);
13282
13283 default:
13284 /* do nothing */
13285 break;
13286 }
13287 return -EOPNOTSUPP;
13288 }
13289
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13290 static void bnxt_get_ring_stats(struct bnxt *bp,
13291 struct rtnl_link_stats64 *stats)
13292 {
13293 int i;
13294
13295 for (i = 0; i < bp->cp_nr_rings; i++) {
13296 struct bnxt_napi *bnapi = bp->bnapi[i];
13297 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13298 u64 *sw = cpr->stats.sw_stats;
13299
13300 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13301 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13302 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13303
13304 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13305 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13306 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13307
13308 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13309 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13310 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13311
13312 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13313 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13314 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13315
13316 stats->rx_missed_errors +=
13317 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13318
13319 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13320
13321 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13322
13323 stats->rx_dropped +=
13324 cpr->sw_stats->rx.rx_netpoll_discards +
13325 cpr->sw_stats->rx.rx_oom_discards;
13326 }
13327 }
13328
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13329 static void bnxt_add_prev_stats(struct bnxt *bp,
13330 struct rtnl_link_stats64 *stats)
13331 {
13332 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13333
13334 stats->rx_packets += prev_stats->rx_packets;
13335 stats->tx_packets += prev_stats->tx_packets;
13336 stats->rx_bytes += prev_stats->rx_bytes;
13337 stats->tx_bytes += prev_stats->tx_bytes;
13338 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13339 stats->multicast += prev_stats->multicast;
13340 stats->rx_dropped += prev_stats->rx_dropped;
13341 stats->tx_dropped += prev_stats->tx_dropped;
13342 }
13343
13344 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13345 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13346 {
13347 struct bnxt *bp = netdev_priv(dev);
13348
13349 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13350 /* Make sure bnxt_close_nic() sees that we are reading stats before
13351 * we check the BNXT_STATE_OPEN flag.
13352 */
13353 smp_mb__after_atomic();
13354 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13355 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13356 *stats = bp->net_stats_prev;
13357 return;
13358 }
13359
13360 bnxt_get_ring_stats(bp, stats);
13361 bnxt_add_prev_stats(bp, stats);
13362
13363 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13364 u64 *rx = bp->port_stats.sw_stats;
13365 u64 *tx = bp->port_stats.sw_stats +
13366 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13367
13368 stats->rx_crc_errors =
13369 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13370 stats->rx_frame_errors =
13371 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13372 stats->rx_length_errors =
13373 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13374 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13375 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13376 stats->rx_errors =
13377 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13378 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13379 stats->collisions =
13380 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13381 stats->tx_fifo_errors =
13382 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13383 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13384 }
13385 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13386 }
13387
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)13388 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13389 struct bnxt_total_ring_err_stats *stats,
13390 struct bnxt_cp_ring_info *cpr)
13391 {
13392 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13393 u64 *hw_stats = cpr->stats.sw_stats;
13394
13395 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13396 stats->rx_total_resets += sw_stats->rx.rx_resets;
13397 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13398 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13399 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13400 stats->rx_total_ring_discards +=
13401 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13402 stats->tx_total_resets += sw_stats->tx.tx_resets;
13403 stats->tx_total_ring_discards +=
13404 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13405 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13406 }
13407
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13408 void bnxt_get_ring_err_stats(struct bnxt *bp,
13409 struct bnxt_total_ring_err_stats *stats)
13410 {
13411 int i;
13412
13413 for (i = 0; i < bp->cp_nr_rings; i++)
13414 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13415 }
13416
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13417 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13418 {
13419 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13420 struct net_device *dev = bp->dev;
13421 struct netdev_hw_addr *ha;
13422 u8 *haddr;
13423 int mc_count = 0;
13424 bool update = false;
13425 int off = 0;
13426
13427 netdev_for_each_mc_addr(ha, dev) {
13428 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13429 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13430 vnic->mc_list_count = 0;
13431 return false;
13432 }
13433 haddr = ha->addr;
13434 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13435 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13436 update = true;
13437 }
13438 off += ETH_ALEN;
13439 mc_count++;
13440 }
13441 if (mc_count)
13442 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13443
13444 if (mc_count != vnic->mc_list_count) {
13445 vnic->mc_list_count = mc_count;
13446 update = true;
13447 }
13448 return update;
13449 }
13450
bnxt_uc_list_updated(struct bnxt * bp)13451 static bool bnxt_uc_list_updated(struct bnxt *bp)
13452 {
13453 struct net_device *dev = bp->dev;
13454 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13455 struct netdev_hw_addr *ha;
13456 int off = 0;
13457
13458 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13459 return true;
13460
13461 netdev_for_each_uc_addr(ha, dev) {
13462 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13463 return true;
13464
13465 off += ETH_ALEN;
13466 }
13467 return false;
13468 }
13469
bnxt_set_rx_mode(struct net_device * dev)13470 static void bnxt_set_rx_mode(struct net_device *dev)
13471 {
13472 struct bnxt *bp = netdev_priv(dev);
13473 struct bnxt_vnic_info *vnic;
13474 bool mc_update = false;
13475 bool uc_update;
13476 u32 mask;
13477
13478 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13479 return;
13480
13481 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13482 mask = vnic->rx_mask;
13483 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13484 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13485 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13486 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13487
13488 if (dev->flags & IFF_PROMISC)
13489 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13490
13491 uc_update = bnxt_uc_list_updated(bp);
13492
13493 if (dev->flags & IFF_BROADCAST)
13494 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13495 if (dev->flags & IFF_ALLMULTI) {
13496 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13497 vnic->mc_list_count = 0;
13498 } else if (dev->flags & IFF_MULTICAST) {
13499 mc_update = bnxt_mc_list_updated(bp, &mask);
13500 }
13501
13502 if (mask != vnic->rx_mask || uc_update || mc_update) {
13503 vnic->rx_mask = mask;
13504
13505 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13506 }
13507 }
13508
bnxt_cfg_rx_mode(struct bnxt * bp)13509 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13510 {
13511 struct net_device *dev = bp->dev;
13512 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13513 struct netdev_hw_addr *ha;
13514 int i, off = 0, rc;
13515 bool uc_update;
13516
13517 netif_addr_lock_bh(dev);
13518 uc_update = bnxt_uc_list_updated(bp);
13519 netif_addr_unlock_bh(dev);
13520
13521 if (!uc_update)
13522 goto skip_uc;
13523
13524 for (i = 1; i < vnic->uc_filter_count; i++) {
13525 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13526
13527 bnxt_hwrm_l2_filter_free(bp, fltr);
13528 bnxt_del_l2_filter(bp, fltr);
13529 }
13530
13531 vnic->uc_filter_count = 1;
13532
13533 netif_addr_lock_bh(dev);
13534 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13535 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13536 } else {
13537 netdev_for_each_uc_addr(ha, dev) {
13538 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13539 off += ETH_ALEN;
13540 vnic->uc_filter_count++;
13541 }
13542 }
13543 netif_addr_unlock_bh(dev);
13544
13545 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13546 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13547 if (rc) {
13548 if (BNXT_VF(bp) && rc == -ENODEV) {
13549 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13550 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13551 else
13552 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13553 rc = 0;
13554 } else {
13555 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13556 }
13557 vnic->uc_filter_count = i;
13558 return rc;
13559 }
13560 }
13561 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13562 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13563
13564 skip_uc:
13565 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13566 !bnxt_promisc_ok(bp))
13567 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13568 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13569 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13570 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13571 rc);
13572 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13573 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13574 vnic->mc_list_count = 0;
13575 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13576 }
13577 if (rc)
13578 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13579 rc);
13580
13581 return rc;
13582 }
13583
bnxt_can_reserve_rings(struct bnxt * bp)13584 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13585 {
13586 #ifdef CONFIG_BNXT_SRIOV
13587 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13588 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13589
13590 /* No minimum rings were provisioned by the PF. Don't
13591 * reserve rings by default when device is down.
13592 */
13593 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13594 return true;
13595
13596 if (!netif_running(bp->dev))
13597 return false;
13598 }
13599 #endif
13600 return true;
13601 }
13602
13603 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13604 static bool bnxt_rfs_supported(struct bnxt *bp)
13605 {
13606 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13607 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13608 return true;
13609 return false;
13610 }
13611 /* 212 firmware is broken for aRFS */
13612 if (BNXT_FW_MAJ(bp) == 212)
13613 return false;
13614 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13615 return true;
13616 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13617 return true;
13618 return false;
13619 }
13620
13621 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13622 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13623 {
13624 struct bnxt_hw_rings hwr = {0};
13625 int max_vnics, max_rss_ctxs;
13626
13627 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13628 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13629 return bnxt_rfs_supported(bp);
13630
13631 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13632 return false;
13633
13634 hwr.grp = bp->rx_nr_rings;
13635 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13636 if (new_rss_ctx)
13637 hwr.vnic++;
13638 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13639 max_vnics = bnxt_get_max_func_vnics(bp);
13640 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13641
13642 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13643 if (bp->rx_nr_rings > 1)
13644 netdev_warn(bp->dev,
13645 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13646 min(max_rss_ctxs - 1, max_vnics - 1));
13647 return false;
13648 }
13649
13650 if (!BNXT_NEW_RM(bp))
13651 return true;
13652
13653 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13654 * issue that will mess up the default VNIC if we reduce the
13655 * reservations.
13656 */
13657 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13658 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13659 return true;
13660
13661 bnxt_hwrm_reserve_rings(bp, &hwr);
13662 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13663 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13664 return true;
13665
13666 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13667 hwr.vnic = 1;
13668 hwr.rss_ctx = 0;
13669 bnxt_hwrm_reserve_rings(bp, &hwr);
13670 return false;
13671 }
13672
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13673 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13674 netdev_features_t features)
13675 {
13676 struct bnxt *bp = netdev_priv(dev);
13677 netdev_features_t vlan_features;
13678
13679 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13680 features &= ~NETIF_F_NTUPLE;
13681
13682 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13683 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13684
13685 if (!(features & NETIF_F_GRO))
13686 features &= ~NETIF_F_GRO_HW;
13687
13688 if (features & NETIF_F_GRO_HW)
13689 features &= ~NETIF_F_LRO;
13690
13691 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13692 * turned on or off together.
13693 */
13694 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13695 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13696 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13697 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13698 else if (vlan_features)
13699 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13700 }
13701 #ifdef CONFIG_BNXT_SRIOV
13702 if (BNXT_VF(bp) && bp->vf.vlan)
13703 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13704 #endif
13705 return features;
13706 }
13707
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13708 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13709 bool link_re_init, u32 flags, bool update_tpa)
13710 {
13711 bnxt_close_nic(bp, irq_re_init, link_re_init);
13712 bp->flags = flags;
13713 if (update_tpa)
13714 bnxt_set_ring_params(bp);
13715 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13716 }
13717
bnxt_set_features(struct net_device * dev,netdev_features_t features)13718 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13719 {
13720 bool update_tpa = false, update_ntuple = false;
13721 struct bnxt *bp = netdev_priv(dev);
13722 u32 flags = bp->flags;
13723 u32 changes;
13724 int rc = 0;
13725 bool re_init = false;
13726
13727 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13728 if (features & NETIF_F_GRO_HW)
13729 flags |= BNXT_FLAG_GRO;
13730 else if (features & NETIF_F_LRO)
13731 flags |= BNXT_FLAG_LRO;
13732
13733 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13734 flags &= ~BNXT_FLAG_TPA;
13735
13736 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13737 flags |= BNXT_FLAG_STRIP_VLAN;
13738
13739 if (features & NETIF_F_NTUPLE)
13740 flags |= BNXT_FLAG_RFS;
13741 else
13742 bnxt_clear_usr_fltrs(bp, true);
13743
13744 changes = flags ^ bp->flags;
13745 if (changes & BNXT_FLAG_TPA) {
13746 update_tpa = true;
13747 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13748 (flags & BNXT_FLAG_TPA) == 0 ||
13749 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13750 re_init = true;
13751 }
13752
13753 if (changes & ~BNXT_FLAG_TPA)
13754 re_init = true;
13755
13756 if (changes & BNXT_FLAG_RFS)
13757 update_ntuple = true;
13758
13759 if (flags != bp->flags) {
13760 u32 old_flags = bp->flags;
13761
13762 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13763 bp->flags = flags;
13764 if (update_tpa)
13765 bnxt_set_ring_params(bp);
13766 return rc;
13767 }
13768
13769 if (update_ntuple)
13770 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13771
13772 if (re_init)
13773 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13774
13775 if (update_tpa) {
13776 bp->flags = flags;
13777 rc = bnxt_set_tpa(bp,
13778 (flags & BNXT_FLAG_TPA) ?
13779 true : false);
13780 if (rc)
13781 bp->flags = old_flags;
13782 }
13783 }
13784 return rc;
13785 }
13786
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13787 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13788 u8 **nextp)
13789 {
13790 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13791 struct hop_jumbo_hdr *jhdr;
13792 int hdr_count = 0;
13793 u8 *nexthdr;
13794 int start;
13795
13796 /* Check that there are at most 2 IPv6 extension headers, no
13797 * fragment header, and each is <= 64 bytes.
13798 */
13799 start = nw_off + sizeof(*ip6h);
13800 nexthdr = &ip6h->nexthdr;
13801 while (ipv6_ext_hdr(*nexthdr)) {
13802 struct ipv6_opt_hdr *hp;
13803 int hdrlen;
13804
13805 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13806 *nexthdr == NEXTHDR_FRAGMENT)
13807 return false;
13808 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13809 skb_headlen(skb), NULL);
13810 if (!hp)
13811 return false;
13812 if (*nexthdr == NEXTHDR_AUTH)
13813 hdrlen = ipv6_authlen(hp);
13814 else
13815 hdrlen = ipv6_optlen(hp);
13816
13817 if (hdrlen > 64)
13818 return false;
13819
13820 /* The ext header may be a hop-by-hop header inserted for
13821 * big TCP purposes. This will be removed before sending
13822 * from NIC, so do not count it.
13823 */
13824 if (*nexthdr == NEXTHDR_HOP) {
13825 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13826 goto increment_hdr;
13827
13828 jhdr = (struct hop_jumbo_hdr *)hp;
13829 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13830 jhdr->nexthdr != IPPROTO_TCP)
13831 goto increment_hdr;
13832
13833 goto next_hdr;
13834 }
13835 increment_hdr:
13836 hdr_count++;
13837 next_hdr:
13838 nexthdr = &hp->nexthdr;
13839 start += hdrlen;
13840 }
13841 if (nextp) {
13842 /* Caller will check inner protocol */
13843 if (skb->encapsulation) {
13844 *nextp = nexthdr;
13845 return true;
13846 }
13847 *nextp = NULL;
13848 }
13849 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13850 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13851 }
13852
13853 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13854 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13855 {
13856 struct udphdr *uh = udp_hdr(skb);
13857 __be16 udp_port = uh->dest;
13858
13859 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13860 udp_port != bp->vxlan_gpe_port)
13861 return false;
13862 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13863 struct ethhdr *eh = inner_eth_hdr(skb);
13864
13865 switch (eh->h_proto) {
13866 case htons(ETH_P_IP):
13867 return true;
13868 case htons(ETH_P_IPV6):
13869 return bnxt_exthdr_check(bp, skb,
13870 skb_inner_network_offset(skb),
13871 NULL);
13872 }
13873 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13874 return true;
13875 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13876 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13877 NULL);
13878 }
13879 return false;
13880 }
13881
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13882 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13883 {
13884 switch (l4_proto) {
13885 case IPPROTO_UDP:
13886 return bnxt_udp_tunl_check(bp, skb);
13887 case IPPROTO_IPIP:
13888 return true;
13889 case IPPROTO_GRE: {
13890 switch (skb->inner_protocol) {
13891 default:
13892 return false;
13893 case htons(ETH_P_IP):
13894 return true;
13895 case htons(ETH_P_IPV6):
13896 fallthrough;
13897 }
13898 }
13899 case IPPROTO_IPV6:
13900 /* Check ext headers of inner ipv6 */
13901 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13902 NULL);
13903 }
13904 return false;
13905 }
13906
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13907 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13908 struct net_device *dev,
13909 netdev_features_t features)
13910 {
13911 struct bnxt *bp = netdev_priv(dev);
13912 u8 *l4_proto;
13913
13914 features = vlan_features_check(skb, features);
13915 switch (vlan_get_protocol(skb)) {
13916 case htons(ETH_P_IP):
13917 if (!skb->encapsulation)
13918 return features;
13919 l4_proto = &ip_hdr(skb)->protocol;
13920 if (bnxt_tunl_check(bp, skb, *l4_proto))
13921 return features;
13922 break;
13923 case htons(ETH_P_IPV6):
13924 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13925 &l4_proto))
13926 break;
13927 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13928 return features;
13929 break;
13930 }
13931 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13932 }
13933
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13934 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13935 u32 *reg_buf)
13936 {
13937 struct hwrm_dbg_read_direct_output *resp;
13938 struct hwrm_dbg_read_direct_input *req;
13939 __le32 *dbg_reg_buf;
13940 dma_addr_t mapping;
13941 int rc, i;
13942
13943 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13944 if (rc)
13945 return rc;
13946
13947 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13948 &mapping);
13949 if (!dbg_reg_buf) {
13950 rc = -ENOMEM;
13951 goto dbg_rd_reg_exit;
13952 }
13953
13954 req->host_dest_addr = cpu_to_le64(mapping);
13955
13956 resp = hwrm_req_hold(bp, req);
13957 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13958 req->read_len32 = cpu_to_le32(num_words);
13959
13960 rc = hwrm_req_send(bp, req);
13961 if (rc || resp->error_code) {
13962 rc = -EIO;
13963 goto dbg_rd_reg_exit;
13964 }
13965 for (i = 0; i < num_words; i++)
13966 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13967
13968 dbg_rd_reg_exit:
13969 hwrm_req_drop(bp, req);
13970 return rc;
13971 }
13972
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13973 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13974 u32 ring_id, u32 *prod, u32 *cons)
13975 {
13976 struct hwrm_dbg_ring_info_get_output *resp;
13977 struct hwrm_dbg_ring_info_get_input *req;
13978 int rc;
13979
13980 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13981 if (rc)
13982 return rc;
13983
13984 req->ring_type = ring_type;
13985 req->fw_ring_id = cpu_to_le32(ring_id);
13986 resp = hwrm_req_hold(bp, req);
13987 rc = hwrm_req_send(bp, req);
13988 if (!rc) {
13989 *prod = le32_to_cpu(resp->producer_index);
13990 *cons = le32_to_cpu(resp->consumer_index);
13991 }
13992 hwrm_req_drop(bp, req);
13993 return rc;
13994 }
13995
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)13996 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
13997 {
13998 struct bnxt_tx_ring_info *txr;
13999 int i = bnapi->index, j;
14000
14001 bnxt_for_each_napi_tx(j, bnapi, txr)
14002 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14003 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14004 txr->tx_cons);
14005 }
14006
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14007 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14008 {
14009 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14010 int i = bnapi->index;
14011
14012 if (!rxr)
14013 return;
14014
14015 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14016 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14017 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14018 rxr->rx_sw_agg_prod);
14019 }
14020
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14021 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14022 {
14023 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
14024 int i = bnapi->index;
14025
14026 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14027 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14028 }
14029
bnxt_dbg_dump_states(struct bnxt * bp)14030 static void bnxt_dbg_dump_states(struct bnxt *bp)
14031 {
14032 int i;
14033 struct bnxt_napi *bnapi;
14034
14035 for (i = 0; i < bp->cp_nr_rings; i++) {
14036 bnapi = bp->bnapi[i];
14037 if (netif_msg_drv(bp)) {
14038 bnxt_dump_tx_sw_state(bnapi);
14039 bnxt_dump_rx_sw_state(bnapi);
14040 bnxt_dump_cp_sw_state(bnapi);
14041 }
14042 }
14043 }
14044
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14045 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14046 {
14047 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14048 struct hwrm_ring_reset_input *req;
14049 struct bnxt_napi *bnapi = rxr->bnapi;
14050 struct bnxt_cp_ring_info *cpr;
14051 u16 cp_ring_id;
14052 int rc;
14053
14054 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14055 if (rc)
14056 return rc;
14057
14058 cpr = &bnapi->cp_ring;
14059 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14060 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14061 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14062 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14063 return hwrm_req_send_silent(bp, req);
14064 }
14065
bnxt_reset_task(struct bnxt * bp,bool silent)14066 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14067 {
14068 if (!silent)
14069 bnxt_dbg_dump_states(bp);
14070 if (netif_running(bp->dev)) {
14071 bnxt_close_nic(bp, !silent, false);
14072 bnxt_open_nic(bp, !silent, false);
14073 }
14074 }
14075
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14076 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14077 {
14078 struct bnxt *bp = netdev_priv(dev);
14079
14080 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14081 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14082 }
14083
bnxt_fw_health_check(struct bnxt * bp)14084 static void bnxt_fw_health_check(struct bnxt *bp)
14085 {
14086 struct bnxt_fw_health *fw_health = bp->fw_health;
14087 struct pci_dev *pdev = bp->pdev;
14088 u32 val;
14089
14090 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14091 return;
14092
14093 /* Make sure it is enabled before checking the tmr_counter. */
14094 smp_rmb();
14095 if (fw_health->tmr_counter) {
14096 fw_health->tmr_counter--;
14097 return;
14098 }
14099
14100 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14101 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14102 fw_health->arrests++;
14103 goto fw_reset;
14104 }
14105
14106 fw_health->last_fw_heartbeat = val;
14107
14108 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14109 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14110 fw_health->discoveries++;
14111 goto fw_reset;
14112 }
14113
14114 fw_health->tmr_counter = fw_health->tmr_multiplier;
14115 return;
14116
14117 fw_reset:
14118 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14119 }
14120
bnxt_timer(struct timer_list * t)14121 static void bnxt_timer(struct timer_list *t)
14122 {
14123 struct bnxt *bp = timer_container_of(bp, t, timer);
14124 struct net_device *dev = bp->dev;
14125
14126 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14127 return;
14128
14129 if (atomic_read(&bp->intr_sem) != 0)
14130 goto bnxt_restart_timer;
14131
14132 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14133 bnxt_fw_health_check(bp);
14134
14135 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14136 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14137
14138 if (bnxt_tc_flower_enabled(bp))
14139 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14140
14141 #ifdef CONFIG_RFS_ACCEL
14142 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14143 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14144 #endif /*CONFIG_RFS_ACCEL*/
14145
14146 if (bp->link_info.phy_retry) {
14147 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14148 bp->link_info.phy_retry = false;
14149 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14150 } else {
14151 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14152 }
14153 }
14154
14155 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14156 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14157
14158 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14159 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14160
14161 bnxt_restart_timer:
14162 mod_timer(&bp->timer, jiffies + bp->current_interval);
14163 }
14164
bnxt_lock_sp(struct bnxt * bp)14165 static void bnxt_lock_sp(struct bnxt *bp)
14166 {
14167 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14168 * set. If the device is being closed, bnxt_close() may be holding
14169 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14170 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14171 * instance lock.
14172 */
14173 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14174 netdev_lock(bp->dev);
14175 }
14176
bnxt_unlock_sp(struct bnxt * bp)14177 static void bnxt_unlock_sp(struct bnxt *bp)
14178 {
14179 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14180 netdev_unlock(bp->dev);
14181 }
14182
14183 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14184 static void bnxt_reset(struct bnxt *bp, bool silent)
14185 {
14186 bnxt_lock_sp(bp);
14187 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14188 bnxt_reset_task(bp, silent);
14189 bnxt_unlock_sp(bp);
14190 }
14191
14192 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14193 static void bnxt_rx_ring_reset(struct bnxt *bp)
14194 {
14195 int i;
14196
14197 bnxt_lock_sp(bp);
14198 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14199 bnxt_unlock_sp(bp);
14200 return;
14201 }
14202 /* Disable and flush TPA before resetting the RX ring */
14203 if (bp->flags & BNXT_FLAG_TPA)
14204 bnxt_set_tpa(bp, false);
14205 for (i = 0; i < bp->rx_nr_rings; i++) {
14206 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14207 struct bnxt_cp_ring_info *cpr;
14208 int rc;
14209
14210 if (!rxr->bnapi->in_reset)
14211 continue;
14212
14213 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14214 if (rc) {
14215 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14216 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14217 else
14218 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14219 rc);
14220 bnxt_reset_task(bp, true);
14221 break;
14222 }
14223 bnxt_free_one_rx_ring_skbs(bp, rxr);
14224 rxr->rx_prod = 0;
14225 rxr->rx_agg_prod = 0;
14226 rxr->rx_sw_agg_prod = 0;
14227 rxr->rx_next_cons = 0;
14228 rxr->bnapi->in_reset = false;
14229 bnxt_alloc_one_rx_ring(bp, i);
14230 cpr = &rxr->bnapi->cp_ring;
14231 cpr->sw_stats->rx.rx_resets++;
14232 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14233 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14234 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14235 }
14236 if (bp->flags & BNXT_FLAG_TPA)
14237 bnxt_set_tpa(bp, true);
14238 bnxt_unlock_sp(bp);
14239 }
14240
bnxt_fw_fatal_close(struct bnxt * bp)14241 static void bnxt_fw_fatal_close(struct bnxt *bp)
14242 {
14243 bnxt_tx_disable(bp);
14244 bnxt_disable_napi(bp);
14245 bnxt_disable_int_sync(bp);
14246 bnxt_free_irq(bp);
14247 bnxt_clear_int_mode(bp);
14248 pci_disable_device(bp->pdev);
14249 }
14250
bnxt_fw_reset_close(struct bnxt * bp)14251 static void bnxt_fw_reset_close(struct bnxt *bp)
14252 {
14253 /* When firmware is in fatal state, quiesce device and disable
14254 * bus master to prevent any potential bad DMAs before freeing
14255 * kernel memory.
14256 */
14257 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14258 u16 val = 0;
14259
14260 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14261 if (val == 0xffff)
14262 bp->fw_reset_min_dsecs = 0;
14263 bnxt_fw_fatal_close(bp);
14264 }
14265 __bnxt_close_nic(bp, true, false);
14266 bnxt_vf_reps_free(bp);
14267 bnxt_clear_int_mode(bp);
14268 bnxt_hwrm_func_drv_unrgtr(bp);
14269 if (pci_is_enabled(bp->pdev))
14270 pci_disable_device(bp->pdev);
14271 bnxt_free_ctx_mem(bp, false);
14272 }
14273
is_bnxt_fw_ok(struct bnxt * bp)14274 static bool is_bnxt_fw_ok(struct bnxt *bp)
14275 {
14276 struct bnxt_fw_health *fw_health = bp->fw_health;
14277 bool no_heartbeat = false, has_reset = false;
14278 u32 val;
14279
14280 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14281 if (val == fw_health->last_fw_heartbeat)
14282 no_heartbeat = true;
14283
14284 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14285 if (val != fw_health->last_fw_reset_cnt)
14286 has_reset = true;
14287
14288 if (!no_heartbeat && has_reset)
14289 return true;
14290
14291 return false;
14292 }
14293
14294 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14295 static void bnxt_force_fw_reset(struct bnxt *bp)
14296 {
14297 struct bnxt_fw_health *fw_health = bp->fw_health;
14298 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14299 u32 wait_dsecs;
14300
14301 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14302 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14303 return;
14304
14305 /* we have to serialize with bnxt_refclk_read()*/
14306 if (ptp) {
14307 unsigned long flags;
14308
14309 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14310 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14311 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14312 } else {
14313 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14314 }
14315 bnxt_fw_reset_close(bp);
14316 wait_dsecs = fw_health->master_func_wait_dsecs;
14317 if (fw_health->primary) {
14318 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14319 wait_dsecs = 0;
14320 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14321 } else {
14322 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14323 wait_dsecs = fw_health->normal_func_wait_dsecs;
14324 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14325 }
14326
14327 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14328 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14329 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14330 }
14331
bnxt_fw_exception(struct bnxt * bp)14332 void bnxt_fw_exception(struct bnxt *bp)
14333 {
14334 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14335 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14336 bnxt_ulp_stop(bp);
14337 bnxt_lock_sp(bp);
14338 bnxt_force_fw_reset(bp);
14339 bnxt_unlock_sp(bp);
14340 }
14341
14342 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14343 * < 0 on error.
14344 */
bnxt_get_registered_vfs(struct bnxt * bp)14345 static int bnxt_get_registered_vfs(struct bnxt *bp)
14346 {
14347 #ifdef CONFIG_BNXT_SRIOV
14348 int rc;
14349
14350 if (!BNXT_PF(bp))
14351 return 0;
14352
14353 rc = bnxt_hwrm_func_qcfg(bp);
14354 if (rc) {
14355 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14356 return rc;
14357 }
14358 if (bp->pf.registered_vfs)
14359 return bp->pf.registered_vfs;
14360 if (bp->sriov_cfg)
14361 return 1;
14362 #endif
14363 return 0;
14364 }
14365
bnxt_fw_reset(struct bnxt * bp)14366 void bnxt_fw_reset(struct bnxt *bp)
14367 {
14368 bnxt_ulp_stop(bp);
14369 bnxt_lock_sp(bp);
14370 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14371 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14372 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14373 int n = 0, tmo;
14374
14375 /* we have to serialize with bnxt_refclk_read()*/
14376 if (ptp) {
14377 unsigned long flags;
14378
14379 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14380 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14381 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14382 } else {
14383 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14384 }
14385 if (bp->pf.active_vfs &&
14386 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14387 n = bnxt_get_registered_vfs(bp);
14388 if (n < 0) {
14389 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14390 n);
14391 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14392 netif_close(bp->dev);
14393 goto fw_reset_exit;
14394 } else if (n > 0) {
14395 u16 vf_tmo_dsecs = n * 10;
14396
14397 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14398 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14399 bp->fw_reset_state =
14400 BNXT_FW_RESET_STATE_POLL_VF;
14401 bnxt_queue_fw_reset_work(bp, HZ / 10);
14402 goto fw_reset_exit;
14403 }
14404 bnxt_fw_reset_close(bp);
14405 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14406 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14407 tmo = HZ / 10;
14408 } else {
14409 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14410 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14411 }
14412 bnxt_queue_fw_reset_work(bp, tmo);
14413 }
14414 fw_reset_exit:
14415 bnxt_unlock_sp(bp);
14416 }
14417
bnxt_chk_missed_irq(struct bnxt * bp)14418 static void bnxt_chk_missed_irq(struct bnxt *bp)
14419 {
14420 int i;
14421
14422 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14423 return;
14424
14425 for (i = 0; i < bp->cp_nr_rings; i++) {
14426 struct bnxt_napi *bnapi = bp->bnapi[i];
14427 struct bnxt_cp_ring_info *cpr;
14428 u32 fw_ring_id;
14429 int j;
14430
14431 if (!bnapi)
14432 continue;
14433
14434 cpr = &bnapi->cp_ring;
14435 for (j = 0; j < cpr->cp_ring_count; j++) {
14436 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14437 u32 val[2];
14438
14439 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14440 continue;
14441
14442 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14443 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14444 continue;
14445 }
14446 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14447 bnxt_dbg_hwrm_ring_info_get(bp,
14448 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14449 fw_ring_id, &val[0], &val[1]);
14450 cpr->sw_stats->cmn.missed_irqs++;
14451 }
14452 }
14453 }
14454
14455 static void bnxt_cfg_ntp_filters(struct bnxt *);
14456
bnxt_init_ethtool_link_settings(struct bnxt * bp)14457 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14458 {
14459 struct bnxt_link_info *link_info = &bp->link_info;
14460
14461 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14462 link_info->autoneg = BNXT_AUTONEG_SPEED;
14463 if (bp->hwrm_spec_code >= 0x10201) {
14464 if (link_info->auto_pause_setting &
14465 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14466 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14467 } else {
14468 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14469 }
14470 bnxt_set_auto_speed(link_info);
14471 } else {
14472 bnxt_set_force_speed(link_info);
14473 link_info->req_duplex = link_info->duplex_setting;
14474 }
14475 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14476 link_info->req_flow_ctrl =
14477 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14478 else
14479 link_info->req_flow_ctrl = link_info->force_pause_setting;
14480 }
14481
bnxt_fw_echo_reply(struct bnxt * bp)14482 static void bnxt_fw_echo_reply(struct bnxt *bp)
14483 {
14484 struct bnxt_fw_health *fw_health = bp->fw_health;
14485 struct hwrm_func_echo_response_input *req;
14486 int rc;
14487
14488 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14489 if (rc)
14490 return;
14491 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14492 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14493 hwrm_req_send(bp, req);
14494 }
14495
bnxt_ulp_restart(struct bnxt * bp)14496 static void bnxt_ulp_restart(struct bnxt *bp)
14497 {
14498 bnxt_ulp_stop(bp);
14499 bnxt_ulp_start(bp, 0);
14500 }
14501
bnxt_sp_task(struct work_struct * work)14502 static void bnxt_sp_task(struct work_struct *work)
14503 {
14504 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14505
14506 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14507 smp_mb__after_atomic();
14508 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14509 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14510 return;
14511 }
14512
14513 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14514 bnxt_ulp_restart(bp);
14515 bnxt_reenable_sriov(bp);
14516 }
14517
14518 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14519 bnxt_cfg_rx_mode(bp);
14520
14521 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14522 bnxt_cfg_ntp_filters(bp);
14523 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14524 bnxt_hwrm_exec_fwd_req(bp);
14525 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14526 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14527 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14528 bnxt_hwrm_port_qstats(bp, 0);
14529 bnxt_hwrm_port_qstats_ext(bp, 0);
14530 bnxt_accumulate_all_stats(bp);
14531 }
14532
14533 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14534 int rc;
14535
14536 mutex_lock(&bp->link_lock);
14537 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14538 &bp->sp_event))
14539 bnxt_hwrm_phy_qcaps(bp);
14540
14541 rc = bnxt_update_link(bp, true);
14542 if (rc)
14543 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14544 rc);
14545
14546 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14547 &bp->sp_event))
14548 bnxt_init_ethtool_link_settings(bp);
14549 mutex_unlock(&bp->link_lock);
14550 }
14551 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14552 int rc;
14553
14554 mutex_lock(&bp->link_lock);
14555 rc = bnxt_update_phy_setting(bp);
14556 mutex_unlock(&bp->link_lock);
14557 if (rc) {
14558 netdev_warn(bp->dev, "update phy settings retry failed\n");
14559 } else {
14560 bp->link_info.phy_retry = false;
14561 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14562 }
14563 }
14564 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14565 mutex_lock(&bp->link_lock);
14566 bnxt_get_port_module_status(bp);
14567 mutex_unlock(&bp->link_lock);
14568 }
14569
14570 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14571 bnxt_tc_flow_stats_work(bp);
14572
14573 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14574 bnxt_chk_missed_irq(bp);
14575
14576 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14577 bnxt_fw_echo_reply(bp);
14578
14579 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14580 bnxt_hwmon_notify_event(bp);
14581
14582 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14583 * must be the last functions to be called before exiting.
14584 */
14585 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14586 bnxt_reset(bp, false);
14587
14588 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14589 bnxt_reset(bp, true);
14590
14591 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14592 bnxt_rx_ring_reset(bp);
14593
14594 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14595 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14596 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14597 bnxt_devlink_health_fw_report(bp);
14598 else
14599 bnxt_fw_reset(bp);
14600 }
14601
14602 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14603 if (!is_bnxt_fw_ok(bp))
14604 bnxt_devlink_health_fw_report(bp);
14605 }
14606
14607 smp_mb__before_atomic();
14608 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14609 }
14610
14611 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14612 int *max_cp);
14613
14614 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14615 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14616 int tx_xdp)
14617 {
14618 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14619 struct bnxt_hw_rings hwr = {0};
14620 int rx_rings = rx;
14621 int rc;
14622
14623 if (tcs)
14624 tx_sets = tcs;
14625
14626 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14627
14628 if (max_rx < rx_rings)
14629 return -ENOMEM;
14630
14631 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14632 rx_rings <<= 1;
14633
14634 hwr.rx = rx_rings;
14635 hwr.tx = tx * tx_sets + tx_xdp;
14636 if (max_tx < hwr.tx)
14637 return -ENOMEM;
14638
14639 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14640
14641 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14642 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14643 if (max_cp < hwr.cp)
14644 return -ENOMEM;
14645 hwr.stat = hwr.cp;
14646 if (BNXT_NEW_RM(bp)) {
14647 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14648 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14649 hwr.grp = rx;
14650 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14651 }
14652 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14653 hwr.cp_p5 = hwr.tx + rx;
14654 rc = bnxt_hwrm_check_rings(bp, &hwr);
14655 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14656 if (!bnxt_ulp_registered(bp->edev)) {
14657 hwr.cp += bnxt_get_ulp_msix_num(bp);
14658 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14659 }
14660 if (hwr.cp > bp->total_irqs) {
14661 int total_msix = bnxt_change_msix(bp, hwr.cp);
14662
14663 if (total_msix < hwr.cp) {
14664 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14665 hwr.cp, total_msix);
14666 rc = -ENOSPC;
14667 }
14668 }
14669 }
14670 return rc;
14671 }
14672
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14673 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14674 {
14675 if (bp->bar2) {
14676 pci_iounmap(pdev, bp->bar2);
14677 bp->bar2 = NULL;
14678 }
14679
14680 if (bp->bar1) {
14681 pci_iounmap(pdev, bp->bar1);
14682 bp->bar1 = NULL;
14683 }
14684
14685 if (bp->bar0) {
14686 pci_iounmap(pdev, bp->bar0);
14687 bp->bar0 = NULL;
14688 }
14689 }
14690
bnxt_cleanup_pci(struct bnxt * bp)14691 static void bnxt_cleanup_pci(struct bnxt *bp)
14692 {
14693 bnxt_unmap_bars(bp, bp->pdev);
14694 pci_release_regions(bp->pdev);
14695 if (pci_is_enabled(bp->pdev))
14696 pci_disable_device(bp->pdev);
14697 }
14698
bnxt_init_dflt_coal(struct bnxt * bp)14699 static void bnxt_init_dflt_coal(struct bnxt *bp)
14700 {
14701 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14702 struct bnxt_coal *coal;
14703 u16 flags = 0;
14704
14705 if (coal_cap->cmpl_params &
14706 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14707 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14708
14709 /* Tick values in micro seconds.
14710 * 1 coal_buf x bufs_per_record = 1 completion record.
14711 */
14712 coal = &bp->rx_coal;
14713 coal->coal_ticks = 10;
14714 coal->coal_bufs = 30;
14715 coal->coal_ticks_irq = 1;
14716 coal->coal_bufs_irq = 2;
14717 coal->idle_thresh = 50;
14718 coal->bufs_per_record = 2;
14719 coal->budget = 64; /* NAPI budget */
14720 coal->flags = flags;
14721
14722 coal = &bp->tx_coal;
14723 coal->coal_ticks = 28;
14724 coal->coal_bufs = 30;
14725 coal->coal_ticks_irq = 2;
14726 coal->coal_bufs_irq = 2;
14727 coal->bufs_per_record = 1;
14728 coal->flags = flags;
14729
14730 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14731 }
14732
14733 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14734 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14735 {
14736 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14737
14738 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14739 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14740 return true;
14741 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14742 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14743 return true;
14744 return false;
14745 }
14746
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14747 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14748 {
14749 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14750 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14751 int rc;
14752
14753 bp->max_pfcwd_tmo_ms = 0;
14754 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14755 if (rc)
14756 return;
14757 resp = hwrm_req_hold(bp, req);
14758 rc = hwrm_req_send_silent(bp, req);
14759 if (!rc)
14760 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14761 hwrm_req_drop(bp, req);
14762 }
14763
bnxt_fw_init_one_p1(struct bnxt * bp)14764 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14765 {
14766 int rc;
14767
14768 bp->fw_cap = 0;
14769 rc = bnxt_hwrm_ver_get(bp);
14770 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14771 * so wait before continuing with recovery.
14772 */
14773 if (rc)
14774 msleep(100);
14775 bnxt_try_map_fw_health_reg(bp);
14776 if (rc) {
14777 rc = bnxt_try_recover_fw(bp);
14778 if (rc)
14779 return rc;
14780 rc = bnxt_hwrm_ver_get(bp);
14781 if (rc)
14782 return rc;
14783 }
14784
14785 bnxt_nvm_cfg_ver_get(bp);
14786
14787 rc = bnxt_hwrm_func_reset(bp);
14788 if (rc)
14789 return -ENODEV;
14790
14791 bnxt_hwrm_fw_set_time(bp);
14792 return 0;
14793 }
14794
bnxt_fw_init_one_p2(struct bnxt * bp)14795 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14796 {
14797 int rc;
14798
14799 /* Get the MAX capabilities for this function */
14800 rc = bnxt_hwrm_func_qcaps(bp);
14801 if (rc) {
14802 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14803 rc);
14804 return -ENODEV;
14805 }
14806
14807 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14808 if (rc)
14809 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14810 rc);
14811
14812 if (bnxt_alloc_fw_health(bp)) {
14813 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14814 } else {
14815 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14816 if (rc)
14817 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14818 rc);
14819 }
14820
14821 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14822 if (rc)
14823 return -ENODEV;
14824
14825 rc = bnxt_alloc_crash_dump_mem(bp);
14826 if (rc)
14827 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14828 rc);
14829 if (!rc) {
14830 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14831 if (rc) {
14832 bnxt_free_crash_dump_mem(bp);
14833 netdev_warn(bp->dev,
14834 "hwrm crash dump mem failure rc: %d\n", rc);
14835 }
14836 }
14837
14838 if (bnxt_fw_pre_resv_vnics(bp))
14839 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14840
14841 bnxt_hwrm_pfcwd_qcaps(bp);
14842 bnxt_hwrm_func_qcfg(bp);
14843 bnxt_hwrm_vnic_qcaps(bp);
14844 bnxt_hwrm_port_led_qcaps(bp);
14845 bnxt_ethtool_init(bp);
14846 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14847 __bnxt_hwrm_ptp_qcfg(bp);
14848 bnxt_dcb_init(bp);
14849 bnxt_hwmon_init(bp);
14850 return 0;
14851 }
14852
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14853 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14854 {
14855 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14856 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14857 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14858 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14859 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14860 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14861 bp->rss_hash_delta = bp->rss_hash_cfg;
14862 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14863 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14864 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14865 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14866 }
14867 }
14868
bnxt_set_dflt_rfs(struct bnxt * bp)14869 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14870 {
14871 struct net_device *dev = bp->dev;
14872
14873 dev->hw_features &= ~NETIF_F_NTUPLE;
14874 dev->features &= ~NETIF_F_NTUPLE;
14875 bp->flags &= ~BNXT_FLAG_RFS;
14876 if (bnxt_rfs_supported(bp)) {
14877 dev->hw_features |= NETIF_F_NTUPLE;
14878 if (bnxt_rfs_capable(bp, false)) {
14879 bp->flags |= BNXT_FLAG_RFS;
14880 dev->features |= NETIF_F_NTUPLE;
14881 }
14882 }
14883 }
14884
bnxt_fw_init_one_p3(struct bnxt * bp)14885 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14886 {
14887 struct pci_dev *pdev = bp->pdev;
14888
14889 bnxt_set_dflt_rss_hash_type(bp);
14890 bnxt_set_dflt_rfs(bp);
14891
14892 bnxt_get_wol_settings(bp);
14893 if (bp->flags & BNXT_FLAG_WOL_CAP)
14894 device_set_wakeup_enable(&pdev->dev, bp->wol);
14895 else
14896 device_set_wakeup_capable(&pdev->dev, false);
14897
14898 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14899 bnxt_hwrm_coal_params_qcaps(bp);
14900 }
14901
14902 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14903
bnxt_fw_init_one(struct bnxt * bp)14904 int bnxt_fw_init_one(struct bnxt *bp)
14905 {
14906 int rc;
14907
14908 rc = bnxt_fw_init_one_p1(bp);
14909 if (rc) {
14910 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14911 return rc;
14912 }
14913 rc = bnxt_fw_init_one_p2(bp);
14914 if (rc) {
14915 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14916 return rc;
14917 }
14918 rc = bnxt_probe_phy(bp, false);
14919 if (rc)
14920 return rc;
14921 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14922 if (rc)
14923 return rc;
14924
14925 bnxt_fw_init_one_p3(bp);
14926 return 0;
14927 }
14928
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14929 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14930 {
14931 struct bnxt_fw_health *fw_health = bp->fw_health;
14932 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14933 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14934 u32 reg_type, reg_off, delay_msecs;
14935
14936 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14937 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14938 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14939 switch (reg_type) {
14940 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14941 pci_write_config_dword(bp->pdev, reg_off, val);
14942 break;
14943 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14944 writel(reg_off & BNXT_GRC_BASE_MASK,
14945 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14946 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14947 fallthrough;
14948 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14949 writel(val, bp->bar0 + reg_off);
14950 break;
14951 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14952 writel(val, bp->bar1 + reg_off);
14953 break;
14954 }
14955 if (delay_msecs) {
14956 pci_read_config_dword(bp->pdev, 0, &val);
14957 msleep(delay_msecs);
14958 }
14959 }
14960
bnxt_hwrm_reset_permitted(struct bnxt * bp)14961 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14962 {
14963 struct hwrm_func_qcfg_output *resp;
14964 struct hwrm_func_qcfg_input *req;
14965 bool result = true; /* firmware will enforce if unknown */
14966
14967 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14968 return result;
14969
14970 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14971 return result;
14972
14973 req->fid = cpu_to_le16(0xffff);
14974 resp = hwrm_req_hold(bp, req);
14975 if (!hwrm_req_send(bp, req))
14976 result = !!(le16_to_cpu(resp->flags) &
14977 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
14978 hwrm_req_drop(bp, req);
14979 return result;
14980 }
14981
bnxt_reset_all(struct bnxt * bp)14982 static void bnxt_reset_all(struct bnxt *bp)
14983 {
14984 struct bnxt_fw_health *fw_health = bp->fw_health;
14985 int i, rc;
14986
14987 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14988 bnxt_fw_reset_via_optee(bp);
14989 bp->fw_reset_timestamp = jiffies;
14990 return;
14991 }
14992
14993 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
14994 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
14995 bnxt_fw_reset_writel(bp, i);
14996 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
14997 struct hwrm_fw_reset_input *req;
14998
14999 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15000 if (!rc) {
15001 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15002 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15003 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15004 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15005 rc = hwrm_req_send(bp, req);
15006 }
15007 if (rc != -ENODEV)
15008 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15009 }
15010 bp->fw_reset_timestamp = jiffies;
15011 }
15012
bnxt_fw_reset_timeout(struct bnxt * bp)15013 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15014 {
15015 return time_after(jiffies, bp->fw_reset_timestamp +
15016 (bp->fw_reset_max_dsecs * HZ / 10));
15017 }
15018
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15019 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15020 {
15021 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15022 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15023 bnxt_dl_health_fw_status_update(bp, false);
15024 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15025 netif_close(bp->dev);
15026 }
15027
bnxt_fw_reset_task(struct work_struct * work)15028 static void bnxt_fw_reset_task(struct work_struct *work)
15029 {
15030 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15031 int rc = 0;
15032
15033 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15034 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15035 return;
15036 }
15037
15038 switch (bp->fw_reset_state) {
15039 case BNXT_FW_RESET_STATE_POLL_VF: {
15040 int n = bnxt_get_registered_vfs(bp);
15041 int tmo;
15042
15043 if (n < 0) {
15044 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15045 n, jiffies_to_msecs(jiffies -
15046 bp->fw_reset_timestamp));
15047 goto fw_reset_abort;
15048 } else if (n > 0) {
15049 if (bnxt_fw_reset_timeout(bp)) {
15050 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15051 bp->fw_reset_state = 0;
15052 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15053 n);
15054 goto ulp_start;
15055 }
15056 bnxt_queue_fw_reset_work(bp, HZ / 10);
15057 return;
15058 }
15059 bp->fw_reset_timestamp = jiffies;
15060 netdev_lock(bp->dev);
15061 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15062 bnxt_fw_reset_abort(bp, rc);
15063 netdev_unlock(bp->dev);
15064 goto ulp_start;
15065 }
15066 bnxt_fw_reset_close(bp);
15067 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15068 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15069 tmo = HZ / 10;
15070 } else {
15071 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15072 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15073 }
15074 netdev_unlock(bp->dev);
15075 bnxt_queue_fw_reset_work(bp, tmo);
15076 return;
15077 }
15078 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15079 u32 val;
15080
15081 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15082 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15083 !bnxt_fw_reset_timeout(bp)) {
15084 bnxt_queue_fw_reset_work(bp, HZ / 5);
15085 return;
15086 }
15087
15088 if (!bp->fw_health->primary) {
15089 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15090
15091 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15092 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15093 return;
15094 }
15095 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15096 }
15097 fallthrough;
15098 case BNXT_FW_RESET_STATE_RESET_FW:
15099 bnxt_reset_all(bp);
15100 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15101 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15102 return;
15103 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15104 bnxt_inv_fw_health_reg(bp);
15105 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15106 !bp->fw_reset_min_dsecs) {
15107 u16 val;
15108
15109 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15110 if (val == 0xffff) {
15111 if (bnxt_fw_reset_timeout(bp)) {
15112 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15113 rc = -ETIMEDOUT;
15114 goto fw_reset_abort;
15115 }
15116 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15117 return;
15118 }
15119 }
15120 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15121 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15122 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15123 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15124 bnxt_dl_remote_reload(bp);
15125 if (pci_enable_device(bp->pdev)) {
15126 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15127 rc = -ENODEV;
15128 goto fw_reset_abort;
15129 }
15130 pci_set_master(bp->pdev);
15131 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15132 fallthrough;
15133 case BNXT_FW_RESET_STATE_POLL_FW:
15134 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15135 rc = bnxt_hwrm_poll(bp);
15136 if (rc) {
15137 if (bnxt_fw_reset_timeout(bp)) {
15138 netdev_err(bp->dev, "Firmware reset aborted\n");
15139 goto fw_reset_abort_status;
15140 }
15141 bnxt_queue_fw_reset_work(bp, HZ / 5);
15142 return;
15143 }
15144 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15145 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15146 fallthrough;
15147 case BNXT_FW_RESET_STATE_OPENING:
15148 while (!netdev_trylock(bp->dev)) {
15149 bnxt_queue_fw_reset_work(bp, HZ / 10);
15150 return;
15151 }
15152 rc = bnxt_open(bp->dev);
15153 if (rc) {
15154 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15155 bnxt_fw_reset_abort(bp, rc);
15156 netdev_unlock(bp->dev);
15157 goto ulp_start;
15158 }
15159
15160 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15161 bp->fw_health->enabled) {
15162 bp->fw_health->last_fw_reset_cnt =
15163 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15164 }
15165 bp->fw_reset_state = 0;
15166 /* Make sure fw_reset_state is 0 before clearing the flag */
15167 smp_mb__before_atomic();
15168 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15169 bnxt_ptp_reapply_pps(bp);
15170 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15171 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15172 bnxt_dl_health_fw_recovery_done(bp);
15173 bnxt_dl_health_fw_status_update(bp, true);
15174 }
15175 netdev_unlock(bp->dev);
15176 bnxt_ulp_start(bp, 0);
15177 bnxt_reenable_sriov(bp);
15178 netdev_lock(bp->dev);
15179 bnxt_vf_reps_alloc(bp);
15180 bnxt_vf_reps_open(bp);
15181 netdev_unlock(bp->dev);
15182 break;
15183 }
15184 return;
15185
15186 fw_reset_abort_status:
15187 if (bp->fw_health->status_reliable ||
15188 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15189 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15190
15191 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15192 }
15193 fw_reset_abort:
15194 netdev_lock(bp->dev);
15195 bnxt_fw_reset_abort(bp, rc);
15196 netdev_unlock(bp->dev);
15197 ulp_start:
15198 bnxt_ulp_start(bp, rc);
15199 }
15200
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15201 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15202 {
15203 int rc;
15204 struct bnxt *bp = netdev_priv(dev);
15205
15206 SET_NETDEV_DEV(dev, &pdev->dev);
15207
15208 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15209 rc = pci_enable_device(pdev);
15210 if (rc) {
15211 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15212 goto init_err;
15213 }
15214
15215 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15216 dev_err(&pdev->dev,
15217 "Cannot find PCI device base address, aborting\n");
15218 rc = -ENODEV;
15219 goto init_err_disable;
15220 }
15221
15222 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15223 if (rc) {
15224 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15225 goto init_err_disable;
15226 }
15227
15228 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15229 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15230 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15231 rc = -EIO;
15232 goto init_err_release;
15233 }
15234
15235 pci_set_master(pdev);
15236
15237 bp->dev = dev;
15238 bp->pdev = pdev;
15239
15240 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15241 * determines the BAR size.
15242 */
15243 bp->bar0 = pci_ioremap_bar(pdev, 0);
15244 if (!bp->bar0) {
15245 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15246 rc = -ENOMEM;
15247 goto init_err_release;
15248 }
15249
15250 bp->bar2 = pci_ioremap_bar(pdev, 4);
15251 if (!bp->bar2) {
15252 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15253 rc = -ENOMEM;
15254 goto init_err_release;
15255 }
15256
15257 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15258 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15259
15260 spin_lock_init(&bp->ntp_fltr_lock);
15261 #if BITS_PER_LONG == 32
15262 spin_lock_init(&bp->db_lock);
15263 #endif
15264
15265 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15266 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15267
15268 timer_setup(&bp->timer, bnxt_timer, 0);
15269 bp->current_interval = BNXT_TIMER_INTERVAL;
15270
15271 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15272 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15273
15274 clear_bit(BNXT_STATE_OPEN, &bp->state);
15275 return 0;
15276
15277 init_err_release:
15278 bnxt_unmap_bars(bp, pdev);
15279 pci_release_regions(pdev);
15280
15281 init_err_disable:
15282 pci_disable_device(pdev);
15283
15284 init_err:
15285 return rc;
15286 }
15287
bnxt_change_mac_addr(struct net_device * dev,void * p)15288 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15289 {
15290 struct sockaddr *addr = p;
15291 struct bnxt *bp = netdev_priv(dev);
15292 int rc = 0;
15293
15294 netdev_assert_locked(dev);
15295
15296 if (!is_valid_ether_addr(addr->sa_data))
15297 return -EADDRNOTAVAIL;
15298
15299 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15300 return 0;
15301
15302 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15303 if (rc)
15304 return rc;
15305
15306 eth_hw_addr_set(dev, addr->sa_data);
15307 bnxt_clear_usr_fltrs(bp, true);
15308 if (netif_running(dev)) {
15309 bnxt_close_nic(bp, false, false);
15310 rc = bnxt_open_nic(bp, false, false);
15311 }
15312
15313 return rc;
15314 }
15315
bnxt_change_mtu(struct net_device * dev,int new_mtu)15316 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15317 {
15318 struct bnxt *bp = netdev_priv(dev);
15319
15320 netdev_assert_locked(dev);
15321
15322 if (netif_running(dev))
15323 bnxt_close_nic(bp, true, false);
15324
15325 WRITE_ONCE(dev->mtu, new_mtu);
15326
15327 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15328 * program is attached. We need to set the AGG rings settings and
15329 * rx_skb_func accordingly.
15330 */
15331 if (READ_ONCE(bp->xdp_prog))
15332 bnxt_set_rx_skb_mode(bp, true);
15333
15334 bnxt_set_ring_params(bp);
15335
15336 if (netif_running(dev))
15337 return bnxt_open_nic(bp, true, false);
15338
15339 return 0;
15340 }
15341
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15342 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15343 {
15344 struct bnxt *bp = netdev_priv(dev);
15345 bool sh = false;
15346 int rc, tx_cp;
15347
15348 if (tc > bp->max_tc) {
15349 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15350 tc, bp->max_tc);
15351 return -EINVAL;
15352 }
15353
15354 if (bp->num_tc == tc)
15355 return 0;
15356
15357 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15358 sh = true;
15359
15360 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15361 sh, tc, bp->tx_nr_rings_xdp);
15362 if (rc)
15363 return rc;
15364
15365 /* Needs to close the device and do hw resource re-allocations */
15366 if (netif_running(bp->dev))
15367 bnxt_close_nic(bp, true, false);
15368
15369 if (tc) {
15370 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15371 netdev_set_num_tc(dev, tc);
15372 bp->num_tc = tc;
15373 } else {
15374 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15375 netdev_reset_tc(dev);
15376 bp->num_tc = 0;
15377 }
15378 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15379 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15380 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15381 tx_cp + bp->rx_nr_rings;
15382
15383 if (netif_running(bp->dev))
15384 return bnxt_open_nic(bp, true, false);
15385
15386 return 0;
15387 }
15388
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15389 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15390 void *cb_priv)
15391 {
15392 struct bnxt *bp = cb_priv;
15393
15394 if (!bnxt_tc_flower_enabled(bp) ||
15395 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15396 return -EOPNOTSUPP;
15397
15398 switch (type) {
15399 case TC_SETUP_CLSFLOWER:
15400 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15401 default:
15402 return -EOPNOTSUPP;
15403 }
15404 }
15405
15406 LIST_HEAD(bnxt_block_cb_list);
15407
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15408 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15409 void *type_data)
15410 {
15411 struct bnxt *bp = netdev_priv(dev);
15412
15413 switch (type) {
15414 case TC_SETUP_BLOCK:
15415 return flow_block_cb_setup_simple(type_data,
15416 &bnxt_block_cb_list,
15417 bnxt_setup_tc_block_cb,
15418 bp, bp, true);
15419 case TC_SETUP_QDISC_MQPRIO: {
15420 struct tc_mqprio_qopt *mqprio = type_data;
15421
15422 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15423
15424 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15425 }
15426 default:
15427 return -EOPNOTSUPP;
15428 }
15429 }
15430
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15431 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15432 const struct sk_buff *skb)
15433 {
15434 struct bnxt_vnic_info *vnic;
15435
15436 if (skb)
15437 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15438
15439 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15440 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15441 }
15442
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15443 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15444 u32 idx)
15445 {
15446 struct hlist_head *head;
15447 int bit_id;
15448
15449 spin_lock_bh(&bp->ntp_fltr_lock);
15450 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15451 if (bit_id < 0) {
15452 spin_unlock_bh(&bp->ntp_fltr_lock);
15453 return -ENOMEM;
15454 }
15455
15456 fltr->base.sw_id = (u16)bit_id;
15457 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15458 fltr->base.flags |= BNXT_ACT_RING_DST;
15459 head = &bp->ntp_fltr_hash_tbl[idx];
15460 hlist_add_head_rcu(&fltr->base.hash, head);
15461 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15462 bnxt_insert_usr_fltr(bp, &fltr->base);
15463 bp->ntp_fltr_count++;
15464 spin_unlock_bh(&bp->ntp_fltr_lock);
15465 return 0;
15466 }
15467
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15468 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15469 struct bnxt_ntuple_filter *f2)
15470 {
15471 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15472 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15473 struct flow_keys *keys1 = &f1->fkeys;
15474 struct flow_keys *keys2 = &f2->fkeys;
15475
15476 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15477 keys1->basic.ip_proto != keys2->basic.ip_proto)
15478 return false;
15479
15480 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15481 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15482 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15483 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15484 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15485 return false;
15486 } else {
15487 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15488 &keys2->addrs.v6addrs.src) ||
15489 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15490 &masks2->addrs.v6addrs.src) ||
15491 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15492 &keys2->addrs.v6addrs.dst) ||
15493 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15494 &masks2->addrs.v6addrs.dst))
15495 return false;
15496 }
15497
15498 return keys1->ports.src == keys2->ports.src &&
15499 masks1->ports.src == masks2->ports.src &&
15500 keys1->ports.dst == keys2->ports.dst &&
15501 masks1->ports.dst == masks2->ports.dst &&
15502 keys1->control.flags == keys2->control.flags &&
15503 f1->l2_fltr == f2->l2_fltr;
15504 }
15505
15506 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15507 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15508 struct bnxt_ntuple_filter *fltr, u32 idx)
15509 {
15510 struct bnxt_ntuple_filter *f;
15511 struct hlist_head *head;
15512
15513 head = &bp->ntp_fltr_hash_tbl[idx];
15514 hlist_for_each_entry_rcu(f, head, base.hash) {
15515 if (bnxt_fltr_match(f, fltr))
15516 return f;
15517 }
15518 return NULL;
15519 }
15520
15521 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15522 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15523 u16 rxq_index, u32 flow_id)
15524 {
15525 struct bnxt *bp = netdev_priv(dev);
15526 struct bnxt_ntuple_filter *fltr, *new_fltr;
15527 struct flow_keys *fkeys;
15528 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15529 struct bnxt_l2_filter *l2_fltr;
15530 int rc = 0, idx;
15531 u32 flags;
15532
15533 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15534 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15535 atomic_inc(&l2_fltr->refcnt);
15536 } else {
15537 struct bnxt_l2_key key;
15538
15539 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15540 key.vlan = 0;
15541 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15542 if (!l2_fltr)
15543 return -EINVAL;
15544 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15545 bnxt_del_l2_filter(bp, l2_fltr);
15546 return -EINVAL;
15547 }
15548 }
15549 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15550 if (!new_fltr) {
15551 bnxt_del_l2_filter(bp, l2_fltr);
15552 return -ENOMEM;
15553 }
15554
15555 fkeys = &new_fltr->fkeys;
15556 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15557 rc = -EPROTONOSUPPORT;
15558 goto err_free;
15559 }
15560
15561 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15562 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15563 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15564 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15565 rc = -EPROTONOSUPPORT;
15566 goto err_free;
15567 }
15568 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15569 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15570 if (bp->hwrm_spec_code < 0x10601) {
15571 rc = -EPROTONOSUPPORT;
15572 goto err_free;
15573 }
15574 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15575 }
15576 flags = fkeys->control.flags;
15577 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15578 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15579 rc = -EPROTONOSUPPORT;
15580 goto err_free;
15581 }
15582 new_fltr->l2_fltr = l2_fltr;
15583
15584 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15585 rcu_read_lock();
15586 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15587 if (fltr) {
15588 rc = fltr->base.sw_id;
15589 rcu_read_unlock();
15590 goto err_free;
15591 }
15592 rcu_read_unlock();
15593
15594 new_fltr->flow_id = flow_id;
15595 new_fltr->base.rxq = rxq_index;
15596 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15597 if (!rc) {
15598 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15599 return new_fltr->base.sw_id;
15600 }
15601
15602 err_free:
15603 bnxt_del_l2_filter(bp, l2_fltr);
15604 kfree(new_fltr);
15605 return rc;
15606 }
15607 #endif
15608
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15609 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15610 {
15611 spin_lock_bh(&bp->ntp_fltr_lock);
15612 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15613 spin_unlock_bh(&bp->ntp_fltr_lock);
15614 return;
15615 }
15616 hlist_del_rcu(&fltr->base.hash);
15617 bnxt_del_one_usr_fltr(bp, &fltr->base);
15618 bp->ntp_fltr_count--;
15619 spin_unlock_bh(&bp->ntp_fltr_lock);
15620 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15621 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15622 kfree_rcu(fltr, base.rcu);
15623 }
15624
bnxt_cfg_ntp_filters(struct bnxt * bp)15625 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15626 {
15627 #ifdef CONFIG_RFS_ACCEL
15628 int i;
15629
15630 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15631 struct hlist_head *head;
15632 struct hlist_node *tmp;
15633 struct bnxt_ntuple_filter *fltr;
15634 int rc;
15635
15636 head = &bp->ntp_fltr_hash_tbl[i];
15637 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15638 bool del = false;
15639
15640 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15641 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15642 continue;
15643 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15644 fltr->flow_id,
15645 fltr->base.sw_id)) {
15646 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15647 fltr);
15648 del = true;
15649 }
15650 } else {
15651 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15652 fltr);
15653 if (rc)
15654 del = true;
15655 else
15656 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15657 }
15658
15659 if (del)
15660 bnxt_del_ntp_filter(bp, fltr);
15661 }
15662 }
15663 #endif
15664 }
15665
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15666 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15667 unsigned int entry, struct udp_tunnel_info *ti)
15668 {
15669 struct bnxt *bp = netdev_priv(netdev);
15670 unsigned int cmd;
15671
15672 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15673 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15674 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15675 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15676 else
15677 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15678
15679 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15680 }
15681
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15682 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15683 unsigned int entry, struct udp_tunnel_info *ti)
15684 {
15685 struct bnxt *bp = netdev_priv(netdev);
15686 unsigned int cmd;
15687
15688 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15689 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15690 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15691 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15692 else
15693 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15694
15695 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15696 }
15697
15698 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15699 .set_port = bnxt_udp_tunnel_set_port,
15700 .unset_port = bnxt_udp_tunnel_unset_port,
15701 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15702 .tables = {
15703 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15704 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15705 },
15706 }, bnxt_udp_tunnels_p7 = {
15707 .set_port = bnxt_udp_tunnel_set_port,
15708 .unset_port = bnxt_udp_tunnel_unset_port,
15709 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15710 .tables = {
15711 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15712 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15713 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15714 },
15715 };
15716
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15717 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15718 struct net_device *dev, u32 filter_mask,
15719 int nlflags)
15720 {
15721 struct bnxt *bp = netdev_priv(dev);
15722
15723 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15724 nlflags, filter_mask, NULL);
15725 }
15726
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15727 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15728 u16 flags, struct netlink_ext_ack *extack)
15729 {
15730 struct bnxt *bp = netdev_priv(dev);
15731 struct nlattr *attr, *br_spec;
15732 int rem, rc = 0;
15733
15734 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15735 return -EOPNOTSUPP;
15736
15737 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15738 if (!br_spec)
15739 return -EINVAL;
15740
15741 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15742 u16 mode;
15743
15744 mode = nla_get_u16(attr);
15745 if (mode == bp->br_mode)
15746 break;
15747
15748 rc = bnxt_hwrm_set_br_mode(bp, mode);
15749 if (!rc)
15750 bp->br_mode = mode;
15751 break;
15752 }
15753 return rc;
15754 }
15755
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15756 int bnxt_get_port_parent_id(struct net_device *dev,
15757 struct netdev_phys_item_id *ppid)
15758 {
15759 struct bnxt *bp = netdev_priv(dev);
15760
15761 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15762 return -EOPNOTSUPP;
15763
15764 /* The PF and it's VF-reps only support the switchdev framework */
15765 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15766 return -EOPNOTSUPP;
15767
15768 ppid->id_len = sizeof(bp->dsn);
15769 memcpy(ppid->id, bp->dsn, ppid->id_len);
15770
15771 return 0;
15772 }
15773
15774 static const struct net_device_ops bnxt_netdev_ops = {
15775 .ndo_open = bnxt_open,
15776 .ndo_start_xmit = bnxt_start_xmit,
15777 .ndo_stop = bnxt_close,
15778 .ndo_get_stats64 = bnxt_get_stats64,
15779 .ndo_set_rx_mode = bnxt_set_rx_mode,
15780 .ndo_eth_ioctl = bnxt_ioctl,
15781 .ndo_validate_addr = eth_validate_addr,
15782 .ndo_set_mac_address = bnxt_change_mac_addr,
15783 .ndo_change_mtu = bnxt_change_mtu,
15784 .ndo_fix_features = bnxt_fix_features,
15785 .ndo_set_features = bnxt_set_features,
15786 .ndo_features_check = bnxt_features_check,
15787 .ndo_tx_timeout = bnxt_tx_timeout,
15788 #ifdef CONFIG_BNXT_SRIOV
15789 .ndo_get_vf_config = bnxt_get_vf_config,
15790 .ndo_set_vf_mac = bnxt_set_vf_mac,
15791 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15792 .ndo_set_vf_rate = bnxt_set_vf_bw,
15793 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15794 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15795 .ndo_set_vf_trust = bnxt_set_vf_trust,
15796 #endif
15797 .ndo_setup_tc = bnxt_setup_tc,
15798 #ifdef CONFIG_RFS_ACCEL
15799 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15800 #endif
15801 .ndo_bpf = bnxt_xdp,
15802 .ndo_xdp_xmit = bnxt_xdp_xmit,
15803 .ndo_bridge_getlink = bnxt_bridge_getlink,
15804 .ndo_bridge_setlink = bnxt_bridge_setlink,
15805 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
15806 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
15807 };
15808
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15809 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15810 struct netdev_queue_stats_rx *stats)
15811 {
15812 struct bnxt *bp = netdev_priv(dev);
15813 struct bnxt_cp_ring_info *cpr;
15814 u64 *sw;
15815
15816 if (!bp->bnapi)
15817 return;
15818
15819 cpr = &bp->bnapi[i]->cp_ring;
15820 sw = cpr->stats.sw_stats;
15821
15822 stats->packets = 0;
15823 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15824 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15825 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15826
15827 stats->bytes = 0;
15828 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15829 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15830 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15831
15832 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15833 }
15834
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15835 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15836 struct netdev_queue_stats_tx *stats)
15837 {
15838 struct bnxt *bp = netdev_priv(dev);
15839 struct bnxt_napi *bnapi;
15840 u64 *sw;
15841
15842 if (!bp->tx_ring)
15843 return;
15844
15845 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15846 sw = bnapi->cp_ring.stats.sw_stats;
15847
15848 stats->packets = 0;
15849 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15850 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15851 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15852
15853 stats->bytes = 0;
15854 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15855 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15856 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15857 }
15858
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15859 static void bnxt_get_base_stats(struct net_device *dev,
15860 struct netdev_queue_stats_rx *rx,
15861 struct netdev_queue_stats_tx *tx)
15862 {
15863 struct bnxt *bp = netdev_priv(dev);
15864
15865 rx->packets = bp->net_stats_prev.rx_packets;
15866 rx->bytes = bp->net_stats_prev.rx_bytes;
15867 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15868
15869 tx->packets = bp->net_stats_prev.tx_packets;
15870 tx->bytes = bp->net_stats_prev.tx_bytes;
15871 }
15872
15873 static const struct netdev_stat_ops bnxt_stat_ops = {
15874 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15875 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15876 .get_base_stats = bnxt_get_base_stats,
15877 };
15878
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15879 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15880 {
15881 struct bnxt_rx_ring_info *rxr, *clone;
15882 struct bnxt *bp = netdev_priv(dev);
15883 struct bnxt_ring_struct *ring;
15884 int rc;
15885
15886 if (!bp->rx_ring)
15887 return -ENETDOWN;
15888
15889 rxr = &bp->rx_ring[idx];
15890 clone = qmem;
15891 memcpy(clone, rxr, sizeof(*rxr));
15892 bnxt_init_rx_ring_struct(bp, clone);
15893 bnxt_reset_rx_ring_struct(bp, clone);
15894
15895 clone->rx_prod = 0;
15896 clone->rx_agg_prod = 0;
15897 clone->rx_sw_agg_prod = 0;
15898 clone->rx_next_cons = 0;
15899 clone->need_head_pool = false;
15900
15901 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15902 if (rc)
15903 return rc;
15904
15905 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15906 if (rc < 0)
15907 goto err_page_pool_destroy;
15908
15909 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15910 MEM_TYPE_PAGE_POOL,
15911 clone->page_pool);
15912 if (rc)
15913 goto err_rxq_info_unreg;
15914
15915 ring = &clone->rx_ring_struct;
15916 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15917 if (rc)
15918 goto err_free_rx_ring;
15919
15920 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15921 ring = &clone->rx_agg_ring_struct;
15922 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15923 if (rc)
15924 goto err_free_rx_agg_ring;
15925
15926 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15927 if (rc)
15928 goto err_free_rx_agg_ring;
15929 }
15930
15931 if (bp->flags & BNXT_FLAG_TPA) {
15932 rc = bnxt_alloc_one_tpa_info(bp, clone);
15933 if (rc)
15934 goto err_free_tpa_info;
15935 }
15936
15937 bnxt_init_one_rx_ring_rxbd(bp, clone);
15938 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15939
15940 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15941 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15942 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
15943 if (bp->flags & BNXT_FLAG_TPA)
15944 bnxt_alloc_one_tpa_info_data(bp, clone);
15945
15946 return 0;
15947
15948 err_free_tpa_info:
15949 bnxt_free_one_tpa_info(bp, clone);
15950 err_free_rx_agg_ring:
15951 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15952 err_free_rx_ring:
15953 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15954 err_rxq_info_unreg:
15955 xdp_rxq_info_unreg(&clone->xdp_rxq);
15956 err_page_pool_destroy:
15957 page_pool_destroy(clone->page_pool);
15958 page_pool_destroy(clone->head_pool);
15959 clone->page_pool = NULL;
15960 clone->head_pool = NULL;
15961 return rc;
15962 }
15963
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15964 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15965 {
15966 struct bnxt_rx_ring_info *rxr = qmem;
15967 struct bnxt *bp = netdev_priv(dev);
15968 struct bnxt_ring_struct *ring;
15969
15970 bnxt_free_one_rx_ring_skbs(bp, rxr);
15971 bnxt_free_one_tpa_info(bp, rxr);
15972
15973 xdp_rxq_info_unreg(&rxr->xdp_rxq);
15974
15975 page_pool_destroy(rxr->page_pool);
15976 page_pool_destroy(rxr->head_pool);
15977 rxr->page_pool = NULL;
15978 rxr->head_pool = NULL;
15979
15980 ring = &rxr->rx_ring_struct;
15981 bnxt_free_ring(bp, &ring->ring_mem);
15982
15983 ring = &rxr->rx_agg_ring_struct;
15984 bnxt_free_ring(bp, &ring->ring_mem);
15985
15986 kfree(rxr->rx_agg_bmap);
15987 rxr->rx_agg_bmap = NULL;
15988 }
15989
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)15990 static void bnxt_copy_rx_ring(struct bnxt *bp,
15991 struct bnxt_rx_ring_info *dst,
15992 struct bnxt_rx_ring_info *src)
15993 {
15994 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15995 struct bnxt_ring_struct *dst_ring, *src_ring;
15996 int i;
15997
15998 dst_ring = &dst->rx_ring_struct;
15999 dst_rmem = &dst_ring->ring_mem;
16000 src_ring = &src->rx_ring_struct;
16001 src_rmem = &src_ring->ring_mem;
16002
16003 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16004 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16005 WARN_ON(dst_rmem->flags != src_rmem->flags);
16006 WARN_ON(dst_rmem->depth != src_rmem->depth);
16007 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16008 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16009
16010 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16011 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16012 *dst_rmem->vmem = *src_rmem->vmem;
16013 for (i = 0; i < dst_rmem->nr_pages; i++) {
16014 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16015 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16016 }
16017
16018 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16019 return;
16020
16021 dst_ring = &dst->rx_agg_ring_struct;
16022 dst_rmem = &dst_ring->ring_mem;
16023 src_ring = &src->rx_agg_ring_struct;
16024 src_rmem = &src_ring->ring_mem;
16025
16026 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16027 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16028 WARN_ON(dst_rmem->flags != src_rmem->flags);
16029 WARN_ON(dst_rmem->depth != src_rmem->depth);
16030 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16031 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16032 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16033
16034 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16035 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16036 *dst_rmem->vmem = *src_rmem->vmem;
16037 for (i = 0; i < dst_rmem->nr_pages; i++) {
16038 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16039 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16040 }
16041
16042 dst->rx_agg_bmap = src->rx_agg_bmap;
16043 }
16044
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)16045 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
16046 {
16047 struct bnxt *bp = netdev_priv(dev);
16048 struct bnxt_rx_ring_info *rxr, *clone;
16049 struct bnxt_cp_ring_info *cpr;
16050 struct bnxt_vnic_info *vnic;
16051 struct bnxt_napi *bnapi;
16052 int i, rc;
16053 u16 mru;
16054
16055 rxr = &bp->rx_ring[idx];
16056 clone = qmem;
16057
16058 rxr->rx_prod = clone->rx_prod;
16059 rxr->rx_agg_prod = clone->rx_agg_prod;
16060 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16061 rxr->rx_next_cons = clone->rx_next_cons;
16062 rxr->rx_tpa = clone->rx_tpa;
16063 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16064 rxr->page_pool = clone->page_pool;
16065 rxr->head_pool = clone->head_pool;
16066 rxr->xdp_rxq = clone->xdp_rxq;
16067 rxr->need_head_pool = clone->need_head_pool;
16068
16069 bnxt_copy_rx_ring(bp, rxr, clone);
16070
16071 bnapi = rxr->bnapi;
16072 cpr = &bnapi->cp_ring;
16073
16074 /* All rings have been reserved and previously allocated.
16075 * Reallocating with the same parameters should never fail.
16076 */
16077 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16078 if (rc)
16079 goto err_reset;
16080
16081 if (bp->tph_mode) {
16082 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16083 if (rc)
16084 goto err_reset;
16085 }
16086
16087 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16088 if (rc)
16089 goto err_reset;
16090
16091 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16092 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16093 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16094
16095 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16096 rc = bnxt_tx_queue_start(bp, idx);
16097 if (rc)
16098 goto err_reset;
16099 }
16100
16101 bnxt_enable_rx_page_pool(rxr);
16102 napi_enable_locked(&bnapi->napi);
16103 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16104
16105 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16106 for (i = 0; i < bp->nr_vnics; i++) {
16107 vnic = &bp->vnic_info[i];
16108
16109 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16110 if (rc)
16111 return rc;
16112 }
16113 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16114
16115 err_reset:
16116 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16117 rc);
16118 napi_enable_locked(&bnapi->napi);
16119 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16120 bnxt_reset_task(bp, true);
16121 return rc;
16122 }
16123
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16124 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16125 {
16126 struct bnxt *bp = netdev_priv(dev);
16127 struct bnxt_rx_ring_info *rxr;
16128 struct bnxt_cp_ring_info *cpr;
16129 struct bnxt_vnic_info *vnic;
16130 struct bnxt_napi *bnapi;
16131 int i;
16132
16133 for (i = 0; i < bp->nr_vnics; i++) {
16134 vnic = &bp->vnic_info[i];
16135
16136 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16137 }
16138 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16139 /* Make sure NAPI sees that the VNIC is disabled */
16140 synchronize_net();
16141 rxr = &bp->rx_ring[idx];
16142 bnapi = rxr->bnapi;
16143 cpr = &bnapi->cp_ring;
16144 cancel_work_sync(&cpr->dim.work);
16145 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16146 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16147 page_pool_disable_direct_recycling(rxr->page_pool);
16148 if (bnxt_separate_head_pool(rxr))
16149 page_pool_disable_direct_recycling(rxr->head_pool);
16150
16151 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16152 bnxt_tx_queue_stop(bp, idx);
16153
16154 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16155 * completion is handled in NAPI to guarantee no more DMA on that ring
16156 * after seeing the completion.
16157 */
16158 napi_disable_locked(&bnapi->napi);
16159
16160 if (bp->tph_mode) {
16161 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16162 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16163 }
16164 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16165
16166 memcpy(qmem, rxr, sizeof(*rxr));
16167 bnxt_init_rx_ring_struct(bp, qmem);
16168
16169 return 0;
16170 }
16171
16172 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16173 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16174 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16175 .ndo_queue_mem_free = bnxt_queue_mem_free,
16176 .ndo_queue_start = bnxt_queue_start,
16177 .ndo_queue_stop = bnxt_queue_stop,
16178 };
16179
bnxt_remove_one(struct pci_dev * pdev)16180 static void bnxt_remove_one(struct pci_dev *pdev)
16181 {
16182 struct net_device *dev = pci_get_drvdata(pdev);
16183 struct bnxt *bp = netdev_priv(dev);
16184
16185 if (BNXT_PF(bp))
16186 __bnxt_sriov_disable(bp);
16187
16188 bnxt_rdma_aux_device_del(bp);
16189
16190 unregister_netdev(dev);
16191 bnxt_ptp_clear(bp);
16192
16193 bnxt_rdma_aux_device_uninit(bp);
16194
16195 bnxt_free_l2_filters(bp, true);
16196 bnxt_free_ntp_fltrs(bp, true);
16197 WARN_ON(bp->num_rss_ctx);
16198 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16199 /* Flush any pending tasks */
16200 cancel_work_sync(&bp->sp_task);
16201 cancel_delayed_work_sync(&bp->fw_reset_task);
16202 bp->sp_event = 0;
16203
16204 bnxt_dl_fw_reporters_destroy(bp);
16205 bnxt_dl_unregister(bp);
16206 bnxt_shutdown_tc(bp);
16207
16208 bnxt_clear_int_mode(bp);
16209 bnxt_hwrm_func_drv_unrgtr(bp);
16210 bnxt_free_hwrm_resources(bp);
16211 bnxt_hwmon_uninit(bp);
16212 bnxt_ethtool_free(bp);
16213 bnxt_dcb_free(bp);
16214 kfree(bp->ptp_cfg);
16215 bp->ptp_cfg = NULL;
16216 kfree(bp->fw_health);
16217 bp->fw_health = NULL;
16218 bnxt_cleanup_pci(bp);
16219 bnxt_free_ctx_mem(bp, true);
16220 bnxt_free_crash_dump_mem(bp);
16221 kfree(bp->rss_indir_tbl);
16222 bp->rss_indir_tbl = NULL;
16223 bnxt_free_port_stats(bp);
16224 free_netdev(dev);
16225 }
16226
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16227 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16228 {
16229 int rc = 0;
16230 struct bnxt_link_info *link_info = &bp->link_info;
16231
16232 bp->phy_flags = 0;
16233 rc = bnxt_hwrm_phy_qcaps(bp);
16234 if (rc) {
16235 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16236 rc);
16237 return rc;
16238 }
16239 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16240 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16241 else
16242 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16243
16244 bp->mac_flags = 0;
16245 bnxt_hwrm_mac_qcaps(bp);
16246
16247 if (!fw_dflt)
16248 return 0;
16249
16250 mutex_lock(&bp->link_lock);
16251 rc = bnxt_update_link(bp, false);
16252 if (rc) {
16253 mutex_unlock(&bp->link_lock);
16254 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16255 rc);
16256 return rc;
16257 }
16258
16259 /* Older firmware does not have supported_auto_speeds, so assume
16260 * that all supported speeds can be autonegotiated.
16261 */
16262 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16263 link_info->support_auto_speeds = link_info->support_speeds;
16264
16265 bnxt_init_ethtool_link_settings(bp);
16266 mutex_unlock(&bp->link_lock);
16267 return 0;
16268 }
16269
bnxt_get_max_irq(struct pci_dev * pdev)16270 static int bnxt_get_max_irq(struct pci_dev *pdev)
16271 {
16272 u16 ctrl;
16273
16274 if (!pdev->msix_cap)
16275 return 1;
16276
16277 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16278 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16279 }
16280
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16281 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16282 int *max_cp)
16283 {
16284 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16285 int max_ring_grps = 0, max_irq;
16286
16287 *max_tx = hw_resc->max_tx_rings;
16288 *max_rx = hw_resc->max_rx_rings;
16289 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16290 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16291 bnxt_get_ulp_msix_num_in_use(bp),
16292 hw_resc->max_stat_ctxs -
16293 bnxt_get_ulp_stat_ctxs_in_use(bp));
16294 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16295 *max_cp = min_t(int, *max_cp, max_irq);
16296 max_ring_grps = hw_resc->max_hw_ring_grps;
16297 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16298 *max_cp -= 1;
16299 *max_rx -= 2;
16300 }
16301 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16302 *max_rx >>= 1;
16303 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16304 int rc;
16305
16306 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16307 if (rc) {
16308 *max_rx = 0;
16309 *max_tx = 0;
16310 }
16311 /* On P5 chips, max_cp output param should be available NQs */
16312 *max_cp = max_irq;
16313 }
16314 *max_rx = min_t(int, *max_rx, max_ring_grps);
16315 }
16316
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16317 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16318 {
16319 int rx, tx, cp;
16320
16321 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16322 *max_rx = rx;
16323 *max_tx = tx;
16324 if (!rx || !tx || !cp)
16325 return -ENOMEM;
16326
16327 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16328 }
16329
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16330 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16331 bool shared)
16332 {
16333 int rc;
16334
16335 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16336 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16337 /* Not enough rings, try disabling agg rings. */
16338 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16339 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16340 if (rc) {
16341 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16342 bp->flags |= BNXT_FLAG_AGG_RINGS;
16343 return rc;
16344 }
16345 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16346 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16347 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16348 bnxt_set_ring_params(bp);
16349 }
16350
16351 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16352 int max_cp, max_stat, max_irq;
16353
16354 /* Reserve minimum resources for RoCE */
16355 max_cp = bnxt_get_max_func_cp_rings(bp);
16356 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16357 max_irq = bnxt_get_max_func_irqs(bp);
16358 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16359 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16360 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16361 return 0;
16362
16363 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16364 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16365 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16366 max_cp = min_t(int, max_cp, max_irq);
16367 max_cp = min_t(int, max_cp, max_stat);
16368 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16369 if (rc)
16370 rc = 0;
16371 }
16372 return rc;
16373 }
16374
16375 /* In initial default shared ring setting, each shared ring must have a
16376 * RX/TX ring pair.
16377 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16378 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16379 {
16380 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16381 bp->rx_nr_rings = bp->cp_nr_rings;
16382 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16383 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16384 }
16385
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16386 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16387 {
16388 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16389 int avail_msix;
16390
16391 if (!bnxt_can_reserve_rings(bp))
16392 return 0;
16393
16394 if (sh)
16395 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16396 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16397 /* Reduce default rings on multi-port cards so that total default
16398 * rings do not exceed CPU count.
16399 */
16400 if (bp->port_count > 1) {
16401 int max_rings =
16402 max_t(int, num_online_cpus() / bp->port_count, 1);
16403
16404 dflt_rings = min_t(int, dflt_rings, max_rings);
16405 }
16406 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16407 if (rc)
16408 return rc;
16409 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16410 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16411 if (sh)
16412 bnxt_trim_dflt_sh_rings(bp);
16413 else
16414 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16415 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16416
16417 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16418 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16419 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16420
16421 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16422 bnxt_set_dflt_ulp_stat_ctxs(bp);
16423 }
16424
16425 rc = __bnxt_reserve_rings(bp);
16426 if (rc && rc != -ENODEV)
16427 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16428 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16429 if (sh)
16430 bnxt_trim_dflt_sh_rings(bp);
16431
16432 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16433 if (bnxt_need_reserve_rings(bp)) {
16434 rc = __bnxt_reserve_rings(bp);
16435 if (rc && rc != -ENODEV)
16436 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16437 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16438 }
16439 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16440 bp->rx_nr_rings++;
16441 bp->cp_nr_rings++;
16442 }
16443 if (rc) {
16444 bp->tx_nr_rings = 0;
16445 bp->rx_nr_rings = 0;
16446 }
16447 return rc;
16448 }
16449
bnxt_init_dflt_ring_mode(struct bnxt * bp)16450 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16451 {
16452 int rc;
16453
16454 if (bp->tx_nr_rings)
16455 return 0;
16456
16457 bnxt_ulp_irq_stop(bp);
16458 bnxt_clear_int_mode(bp);
16459 rc = bnxt_set_dflt_rings(bp, true);
16460 if (rc) {
16461 if (BNXT_VF(bp) && rc == -ENODEV)
16462 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16463 else
16464 netdev_err(bp->dev, "Not enough rings available.\n");
16465 goto init_dflt_ring_err;
16466 }
16467 rc = bnxt_init_int_mode(bp);
16468 if (rc)
16469 goto init_dflt_ring_err;
16470
16471 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16472
16473 bnxt_set_dflt_rfs(bp);
16474
16475 init_dflt_ring_err:
16476 bnxt_ulp_irq_restart(bp, rc);
16477 return rc;
16478 }
16479
bnxt_restore_pf_fw_resources(struct bnxt * bp)16480 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16481 {
16482 int rc;
16483
16484 netdev_ops_assert_locked(bp->dev);
16485 bnxt_hwrm_func_qcaps(bp);
16486
16487 if (netif_running(bp->dev))
16488 __bnxt_close_nic(bp, true, false);
16489
16490 bnxt_ulp_irq_stop(bp);
16491 bnxt_clear_int_mode(bp);
16492 rc = bnxt_init_int_mode(bp);
16493 bnxt_ulp_irq_restart(bp, rc);
16494
16495 if (netif_running(bp->dev)) {
16496 if (rc)
16497 netif_close(bp->dev);
16498 else
16499 rc = bnxt_open_nic(bp, true, false);
16500 }
16501
16502 return rc;
16503 }
16504
bnxt_init_mac_addr(struct bnxt * bp)16505 static int bnxt_init_mac_addr(struct bnxt *bp)
16506 {
16507 int rc = 0;
16508
16509 if (BNXT_PF(bp)) {
16510 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16511 } else {
16512 #ifdef CONFIG_BNXT_SRIOV
16513 struct bnxt_vf_info *vf = &bp->vf;
16514 bool strict_approval = true;
16515
16516 if (is_valid_ether_addr(vf->mac_addr)) {
16517 /* overwrite netdev dev_addr with admin VF MAC */
16518 eth_hw_addr_set(bp->dev, vf->mac_addr);
16519 /* Older PF driver or firmware may not approve this
16520 * correctly.
16521 */
16522 strict_approval = false;
16523 } else {
16524 eth_hw_addr_random(bp->dev);
16525 }
16526 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16527 #endif
16528 }
16529 return rc;
16530 }
16531
bnxt_vpd_read_info(struct bnxt * bp)16532 static void bnxt_vpd_read_info(struct bnxt *bp)
16533 {
16534 struct pci_dev *pdev = bp->pdev;
16535 unsigned int vpd_size, kw_len;
16536 int pos, size;
16537 u8 *vpd_data;
16538
16539 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16540 if (IS_ERR(vpd_data)) {
16541 pci_warn(pdev, "Unable to read VPD\n");
16542 return;
16543 }
16544
16545 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16546 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16547 if (pos < 0)
16548 goto read_sn;
16549
16550 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16551 memcpy(bp->board_partno, &vpd_data[pos], size);
16552
16553 read_sn:
16554 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16555 PCI_VPD_RO_KEYWORD_SERIALNO,
16556 &kw_len);
16557 if (pos < 0)
16558 goto exit;
16559
16560 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16561 memcpy(bp->board_serialno, &vpd_data[pos], size);
16562 exit:
16563 kfree(vpd_data);
16564 }
16565
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16566 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16567 {
16568 struct pci_dev *pdev = bp->pdev;
16569 u64 qword;
16570
16571 qword = pci_get_dsn(pdev);
16572 if (!qword) {
16573 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16574 return -EOPNOTSUPP;
16575 }
16576
16577 put_unaligned_le64(qword, dsn);
16578
16579 bp->flags |= BNXT_FLAG_DSN_VALID;
16580 return 0;
16581 }
16582
bnxt_map_db_bar(struct bnxt * bp)16583 static int bnxt_map_db_bar(struct bnxt *bp)
16584 {
16585 if (!bp->db_size)
16586 return -ENODEV;
16587 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16588 if (!bp->bar1)
16589 return -ENOMEM;
16590 return 0;
16591 }
16592
bnxt_print_device_info(struct bnxt * bp)16593 void bnxt_print_device_info(struct bnxt *bp)
16594 {
16595 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16596 board_info[bp->board_idx].name,
16597 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16598
16599 pcie_print_link_status(bp->pdev);
16600 }
16601
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16602 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16603 {
16604 struct bnxt_hw_resc *hw_resc;
16605 struct net_device *dev;
16606 struct bnxt *bp;
16607 int rc, max_irqs;
16608
16609 if (pci_is_bridge(pdev))
16610 return -ENODEV;
16611
16612 if (!pdev->msix_cap) {
16613 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16614 return -ENODEV;
16615 }
16616
16617 /* Clear any pending DMA transactions from crash kernel
16618 * while loading driver in capture kernel.
16619 */
16620 if (is_kdump_kernel()) {
16621 pci_clear_master(pdev);
16622 pcie_flr(pdev);
16623 }
16624
16625 max_irqs = bnxt_get_max_irq(pdev);
16626 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16627 max_irqs);
16628 if (!dev)
16629 return -ENOMEM;
16630
16631 bp = netdev_priv(dev);
16632 bp->board_idx = ent->driver_data;
16633 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16634 bnxt_set_max_func_irqs(bp, max_irqs);
16635
16636 if (bnxt_vf_pciid(bp->board_idx))
16637 bp->flags |= BNXT_FLAG_VF;
16638
16639 /* No devlink port registration in case of a VF */
16640 if (BNXT_PF(bp))
16641 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16642
16643 rc = bnxt_init_board(pdev, dev);
16644 if (rc < 0)
16645 goto init_err_free;
16646
16647 dev->netdev_ops = &bnxt_netdev_ops;
16648 dev->stat_ops = &bnxt_stat_ops;
16649 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16650 dev->ethtool_ops = &bnxt_ethtool_ops;
16651 pci_set_drvdata(pdev, dev);
16652
16653 rc = bnxt_alloc_hwrm_resources(bp);
16654 if (rc)
16655 goto init_err_pci_clean;
16656
16657 mutex_init(&bp->hwrm_cmd_lock);
16658 mutex_init(&bp->link_lock);
16659
16660 rc = bnxt_fw_init_one_p1(bp);
16661 if (rc)
16662 goto init_err_pci_clean;
16663
16664 if (BNXT_PF(bp))
16665 bnxt_vpd_read_info(bp);
16666
16667 if (BNXT_CHIP_P5_PLUS(bp)) {
16668 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16669 if (BNXT_CHIP_P7(bp))
16670 bp->flags |= BNXT_FLAG_CHIP_P7;
16671 }
16672
16673 rc = bnxt_alloc_rss_indir_tbl(bp);
16674 if (rc)
16675 goto init_err_pci_clean;
16676
16677 rc = bnxt_fw_init_one_p2(bp);
16678 if (rc)
16679 goto init_err_pci_clean;
16680
16681 rc = bnxt_map_db_bar(bp);
16682 if (rc) {
16683 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16684 rc);
16685 goto init_err_pci_clean;
16686 }
16687
16688 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16689 NETIF_F_TSO | NETIF_F_TSO6 |
16690 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16691 NETIF_F_GSO_IPXIP4 |
16692 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16693 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16694 NETIF_F_RXCSUM | NETIF_F_GRO;
16695 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16696 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16697
16698 if (BNXT_SUPPORTS_TPA(bp))
16699 dev->hw_features |= NETIF_F_LRO;
16700
16701 dev->hw_enc_features =
16702 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16703 NETIF_F_TSO | NETIF_F_TSO6 |
16704 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16705 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16706 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16707 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16708 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16709 if (bp->flags & BNXT_FLAG_CHIP_P7)
16710 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16711 else
16712 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16713
16714 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16715 NETIF_F_GSO_GRE_CSUM;
16716 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16717 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16718 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16719 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16720 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16721 if (BNXT_SUPPORTS_TPA(bp))
16722 dev->hw_features |= NETIF_F_GRO_HW;
16723 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16724 if (dev->features & NETIF_F_GRO_HW)
16725 dev->features &= ~NETIF_F_LRO;
16726 dev->priv_flags |= IFF_UNICAST_FLT;
16727
16728 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16729 if (bp->tso_max_segs)
16730 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16731
16732 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16733 NETDEV_XDP_ACT_RX_SG;
16734
16735 #ifdef CONFIG_BNXT_SRIOV
16736 init_waitqueue_head(&bp->sriov_cfg_wait);
16737 #endif
16738 if (BNXT_SUPPORTS_TPA(bp)) {
16739 bp->gro_func = bnxt_gro_func_5730x;
16740 if (BNXT_CHIP_P4(bp))
16741 bp->gro_func = bnxt_gro_func_5731x;
16742 else if (BNXT_CHIP_P5_PLUS(bp))
16743 bp->gro_func = bnxt_gro_func_5750x;
16744 }
16745 if (!BNXT_CHIP_P4_PLUS(bp))
16746 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16747
16748 rc = bnxt_init_mac_addr(bp);
16749 if (rc) {
16750 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16751 rc = -EADDRNOTAVAIL;
16752 goto init_err_pci_clean;
16753 }
16754
16755 if (BNXT_PF(bp)) {
16756 /* Read the adapter's DSN to use as the eswitch switch_id */
16757 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16758 }
16759
16760 /* MTU range: 60 - FW defined max */
16761 dev->min_mtu = ETH_ZLEN;
16762 dev->max_mtu = bp->max_mtu;
16763
16764 rc = bnxt_probe_phy(bp, true);
16765 if (rc)
16766 goto init_err_pci_clean;
16767
16768 hw_resc = &bp->hw_resc;
16769 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16770 BNXT_L2_FLTR_MAX_FLTR;
16771 /* Older firmware may not report these filters properly */
16772 if (bp->max_fltr < BNXT_MAX_FLTR)
16773 bp->max_fltr = BNXT_MAX_FLTR;
16774 bnxt_init_l2_fltr_tbl(bp);
16775 __bnxt_set_rx_skb_mode(bp, false);
16776 bnxt_set_tpa_flags(bp);
16777 bnxt_init_ring_params(bp);
16778 bnxt_set_ring_params(bp);
16779 bnxt_rdma_aux_device_init(bp);
16780 rc = bnxt_set_dflt_rings(bp, true);
16781 if (rc) {
16782 if (BNXT_VF(bp) && rc == -ENODEV) {
16783 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16784 } else {
16785 netdev_err(bp->dev, "Not enough rings available.\n");
16786 rc = -ENOMEM;
16787 }
16788 goto init_err_pci_clean;
16789 }
16790
16791 bnxt_fw_init_one_p3(bp);
16792
16793 bnxt_init_dflt_coal(bp);
16794
16795 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16796 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16797
16798 rc = bnxt_init_int_mode(bp);
16799 if (rc)
16800 goto init_err_pci_clean;
16801
16802 /* No TC has been set yet and rings may have been trimmed due to
16803 * limited MSIX, so we re-initialize the TX rings per TC.
16804 */
16805 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16806
16807 if (BNXT_PF(bp)) {
16808 if (!bnxt_pf_wq) {
16809 bnxt_pf_wq =
16810 create_singlethread_workqueue("bnxt_pf_wq");
16811 if (!bnxt_pf_wq) {
16812 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16813 rc = -ENOMEM;
16814 goto init_err_pci_clean;
16815 }
16816 }
16817 rc = bnxt_init_tc(bp);
16818 if (rc)
16819 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16820 rc);
16821 }
16822
16823 bnxt_inv_fw_health_reg(bp);
16824 rc = bnxt_dl_register(bp);
16825 if (rc)
16826 goto init_err_dl;
16827
16828 INIT_LIST_HEAD(&bp->usr_fltr_list);
16829
16830 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16831 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16832 if (BNXT_SUPPORTS_QUEUE_API(bp))
16833 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16834 dev->request_ops_lock = true;
16835 dev->netmem_tx = true;
16836
16837 rc = register_netdev(dev);
16838 if (rc)
16839 goto init_err_cleanup;
16840
16841 bnxt_dl_fw_reporters_create(bp);
16842
16843 bnxt_rdma_aux_device_add(bp);
16844
16845 bnxt_print_device_info(bp);
16846
16847 pci_save_state(pdev);
16848
16849 return 0;
16850 init_err_cleanup:
16851 bnxt_rdma_aux_device_uninit(bp);
16852 bnxt_dl_unregister(bp);
16853 init_err_dl:
16854 bnxt_shutdown_tc(bp);
16855 bnxt_clear_int_mode(bp);
16856
16857 init_err_pci_clean:
16858 bnxt_hwrm_func_drv_unrgtr(bp);
16859 bnxt_free_hwrm_resources(bp);
16860 bnxt_hwmon_uninit(bp);
16861 bnxt_ethtool_free(bp);
16862 bnxt_ptp_clear(bp);
16863 kfree(bp->ptp_cfg);
16864 bp->ptp_cfg = NULL;
16865 kfree(bp->fw_health);
16866 bp->fw_health = NULL;
16867 bnxt_cleanup_pci(bp);
16868 bnxt_free_ctx_mem(bp, true);
16869 bnxt_free_crash_dump_mem(bp);
16870 kfree(bp->rss_indir_tbl);
16871 bp->rss_indir_tbl = NULL;
16872
16873 init_err_free:
16874 free_netdev(dev);
16875 return rc;
16876 }
16877
bnxt_shutdown(struct pci_dev * pdev)16878 static void bnxt_shutdown(struct pci_dev *pdev)
16879 {
16880 struct net_device *dev = pci_get_drvdata(pdev);
16881 struct bnxt *bp;
16882
16883 if (!dev)
16884 return;
16885
16886 rtnl_lock();
16887 netdev_lock(dev);
16888 bp = netdev_priv(dev);
16889 if (!bp)
16890 goto shutdown_exit;
16891
16892 if (netif_running(dev))
16893 netif_close(dev);
16894
16895 bnxt_ptp_clear(bp);
16896 bnxt_clear_int_mode(bp);
16897 pci_disable_device(pdev);
16898
16899 if (system_state == SYSTEM_POWER_OFF) {
16900 pci_wake_from_d3(pdev, bp->wol);
16901 pci_set_power_state(pdev, PCI_D3hot);
16902 }
16903
16904 shutdown_exit:
16905 netdev_unlock(dev);
16906 rtnl_unlock();
16907 }
16908
16909 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)16910 static int bnxt_suspend(struct device *device)
16911 {
16912 struct net_device *dev = dev_get_drvdata(device);
16913 struct bnxt *bp = netdev_priv(dev);
16914 int rc = 0;
16915
16916 bnxt_ulp_stop(bp);
16917
16918 netdev_lock(dev);
16919 if (netif_running(dev)) {
16920 netif_device_detach(dev);
16921 rc = bnxt_close(dev);
16922 }
16923 bnxt_hwrm_func_drv_unrgtr(bp);
16924 bnxt_ptp_clear(bp);
16925 pci_disable_device(bp->pdev);
16926 bnxt_free_ctx_mem(bp, false);
16927 netdev_unlock(dev);
16928 return rc;
16929 }
16930
bnxt_resume(struct device * device)16931 static int bnxt_resume(struct device *device)
16932 {
16933 struct net_device *dev = dev_get_drvdata(device);
16934 struct bnxt *bp = netdev_priv(dev);
16935 int rc = 0;
16936
16937 netdev_lock(dev);
16938 rc = pci_enable_device(bp->pdev);
16939 if (rc) {
16940 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16941 rc);
16942 goto resume_exit;
16943 }
16944 pci_set_master(bp->pdev);
16945 if (bnxt_hwrm_ver_get(bp)) {
16946 rc = -ENODEV;
16947 goto resume_exit;
16948 }
16949 rc = bnxt_hwrm_func_reset(bp);
16950 if (rc) {
16951 rc = -EBUSY;
16952 goto resume_exit;
16953 }
16954
16955 rc = bnxt_hwrm_func_qcaps(bp);
16956 if (rc)
16957 goto resume_exit;
16958
16959 bnxt_clear_reservations(bp, true);
16960
16961 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16962 rc = -ENODEV;
16963 goto resume_exit;
16964 }
16965 if (bp->fw_crash_mem)
16966 bnxt_hwrm_crash_dump_mem_cfg(bp);
16967
16968 if (bnxt_ptp_init(bp)) {
16969 kfree(bp->ptp_cfg);
16970 bp->ptp_cfg = NULL;
16971 }
16972 bnxt_get_wol_settings(bp);
16973 if (netif_running(dev)) {
16974 rc = bnxt_open(dev);
16975 if (!rc)
16976 netif_device_attach(dev);
16977 }
16978
16979 resume_exit:
16980 netdev_unlock(bp->dev);
16981 bnxt_ulp_start(bp, rc);
16982 if (!rc)
16983 bnxt_reenable_sriov(bp);
16984 return rc;
16985 }
16986
16987 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
16988 #define BNXT_PM_OPS (&bnxt_pm_ops)
16989
16990 #else
16991
16992 #define BNXT_PM_OPS NULL
16993
16994 #endif /* CONFIG_PM_SLEEP */
16995
16996 /**
16997 * bnxt_io_error_detected - called when PCI error is detected
16998 * @pdev: Pointer to PCI device
16999 * @state: The current pci connection state
17000 *
17001 * This function is called after a PCI bus error affecting
17002 * this device has been detected.
17003 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17004 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17005 pci_channel_state_t state)
17006 {
17007 struct net_device *netdev = pci_get_drvdata(pdev);
17008 struct bnxt *bp = netdev_priv(netdev);
17009 bool abort = false;
17010
17011 netdev_info(netdev, "PCI I/O error detected\n");
17012
17013 bnxt_ulp_stop(bp);
17014
17015 netdev_lock(netdev);
17016 netif_device_detach(netdev);
17017
17018 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17019 netdev_err(bp->dev, "Firmware reset already in progress\n");
17020 abort = true;
17021 }
17022
17023 if (abort || state == pci_channel_io_perm_failure) {
17024 netdev_unlock(netdev);
17025 return PCI_ERS_RESULT_DISCONNECT;
17026 }
17027
17028 /* Link is not reliable anymore if state is pci_channel_io_frozen
17029 * so we disable bus master to prevent any potential bad DMAs before
17030 * freeing kernel memory.
17031 */
17032 if (state == pci_channel_io_frozen) {
17033 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17034 bnxt_fw_fatal_close(bp);
17035 }
17036
17037 if (netif_running(netdev))
17038 __bnxt_close_nic(bp, true, true);
17039
17040 if (pci_is_enabled(pdev))
17041 pci_disable_device(pdev);
17042 bnxt_free_ctx_mem(bp, false);
17043 netdev_unlock(netdev);
17044
17045 /* Request a slot reset. */
17046 return PCI_ERS_RESULT_NEED_RESET;
17047 }
17048
17049 /**
17050 * bnxt_io_slot_reset - called after the pci bus has been reset.
17051 * @pdev: Pointer to PCI device
17052 *
17053 * Restart the card from scratch, as if from a cold-boot.
17054 * At this point, the card has experienced a hard reset,
17055 * followed by fixups by BIOS, and has its config space
17056 * set up identically to what it was at cold boot.
17057 */
bnxt_io_slot_reset(struct pci_dev * pdev)17058 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17059 {
17060 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17061 struct net_device *netdev = pci_get_drvdata(pdev);
17062 struct bnxt *bp = netdev_priv(netdev);
17063 int retry = 0;
17064 int err = 0;
17065 int off;
17066
17067 netdev_info(bp->dev, "PCI Slot Reset\n");
17068
17069 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17070 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17071 msleep(900);
17072
17073 netdev_lock(netdev);
17074
17075 if (pci_enable_device(pdev)) {
17076 dev_err(&pdev->dev,
17077 "Cannot re-enable PCI device after reset.\n");
17078 } else {
17079 pci_set_master(pdev);
17080 /* Upon fatal error, our device internal logic that latches to
17081 * BAR value is getting reset and will restore only upon
17082 * rewriting the BARs.
17083 *
17084 * As pci_restore_state() does not re-write the BARs if the
17085 * value is same as saved value earlier, driver needs to
17086 * write the BARs to 0 to force restore, in case of fatal error.
17087 */
17088 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17089 &bp->state)) {
17090 for (off = PCI_BASE_ADDRESS_0;
17091 off <= PCI_BASE_ADDRESS_5; off += 4)
17092 pci_write_config_dword(bp->pdev, off, 0);
17093 }
17094 pci_restore_state(pdev);
17095 pci_save_state(pdev);
17096
17097 bnxt_inv_fw_health_reg(bp);
17098 bnxt_try_map_fw_health_reg(bp);
17099
17100 /* In some PCIe AER scenarios, firmware may take up to
17101 * 10 seconds to become ready in the worst case.
17102 */
17103 do {
17104 err = bnxt_try_recover_fw(bp);
17105 if (!err)
17106 break;
17107 retry++;
17108 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17109
17110 if (err) {
17111 dev_err(&pdev->dev, "Firmware not ready\n");
17112 goto reset_exit;
17113 }
17114
17115 err = bnxt_hwrm_func_reset(bp);
17116 if (!err)
17117 result = PCI_ERS_RESULT_RECOVERED;
17118
17119 /* IRQ will be initialized later in bnxt_io_resume */
17120 bnxt_ulp_irq_stop(bp);
17121 bnxt_clear_int_mode(bp);
17122 }
17123
17124 reset_exit:
17125 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17126 bnxt_clear_reservations(bp, true);
17127 netdev_unlock(netdev);
17128
17129 return result;
17130 }
17131
17132 /**
17133 * bnxt_io_resume - called when traffic can start flowing again.
17134 * @pdev: Pointer to PCI device
17135 *
17136 * This callback is called when the error recovery driver tells
17137 * us that its OK to resume normal operation.
17138 */
bnxt_io_resume(struct pci_dev * pdev)17139 static void bnxt_io_resume(struct pci_dev *pdev)
17140 {
17141 struct net_device *netdev = pci_get_drvdata(pdev);
17142 struct bnxt *bp = netdev_priv(netdev);
17143 int err;
17144
17145 netdev_info(bp->dev, "PCI Slot Resume\n");
17146 netdev_lock(netdev);
17147
17148 err = bnxt_hwrm_func_qcaps(bp);
17149 if (!err) {
17150 if (netif_running(netdev)) {
17151 err = bnxt_open(netdev);
17152 } else {
17153 err = bnxt_reserve_rings(bp, true);
17154 if (!err)
17155 err = bnxt_init_int_mode(bp);
17156 }
17157 }
17158
17159 if (!err)
17160 netif_device_attach(netdev);
17161
17162 netdev_unlock(netdev);
17163 bnxt_ulp_start(bp, err);
17164 if (!err)
17165 bnxt_reenable_sriov(bp);
17166 }
17167
17168 static const struct pci_error_handlers bnxt_err_handler = {
17169 .error_detected = bnxt_io_error_detected,
17170 .slot_reset = bnxt_io_slot_reset,
17171 .resume = bnxt_io_resume
17172 };
17173
17174 static struct pci_driver bnxt_pci_driver = {
17175 .name = DRV_MODULE_NAME,
17176 .id_table = bnxt_pci_tbl,
17177 .probe = bnxt_init_one,
17178 .remove = bnxt_remove_one,
17179 .shutdown = bnxt_shutdown,
17180 .driver.pm = BNXT_PM_OPS,
17181 .err_handler = &bnxt_err_handler,
17182 #if defined(CONFIG_BNXT_SRIOV)
17183 .sriov_configure = bnxt_sriov_configure,
17184 #endif
17185 };
17186
bnxt_init(void)17187 static int __init bnxt_init(void)
17188 {
17189 int err;
17190
17191 bnxt_debug_init();
17192 err = pci_register_driver(&bnxt_pci_driver);
17193 if (err) {
17194 bnxt_debug_exit();
17195 return err;
17196 }
17197
17198 return 0;
17199 }
17200
bnxt_exit(void)17201 static void __exit bnxt_exit(void)
17202 {
17203 pci_unregister_driver(&bnxt_pci_driver);
17204 if (bnxt_pf_wq)
17205 destroy_workqueue(bnxt_pf_wq);
17206 bnxt_debug_exit();
17207 }
17208
17209 module_init(bnxt_init);
17210 module_exit(bnxt_exit);
17211