1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_lock.h>
58 #include <net/netdev_queues.h>
59 #include <net/netdev_rx_queue.h>
60 #include <linux/pci-tph.h>
61 #include <linux/bnxt/hsi.h>
62
63 #include "bnxt.h"
64 #include "bnxt_hwrm.h"
65 #include "bnxt_ulp.h"
66 #include "bnxt_sriov.h"
67 #include "bnxt_ethtool.h"
68 #include "bnxt_dcb.h"
69 #include "bnxt_xdp.h"
70 #include "bnxt_ptp.h"
71 #include "bnxt_vfr.h"
72 #include "bnxt_tc.h"
73 #include "bnxt_devlink.h"
74 #include "bnxt_debugfs.h"
75 #include "bnxt_coredump.h"
76 #include "bnxt_hwmon.h"
77
78 #define BNXT_TX_TIMEOUT (5 * HZ)
79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
80 NETIF_MSG_TX_ERR)
81
82 MODULE_IMPORT_NS("NETDEV_INTERNAL");
83 MODULE_LICENSE("GPL");
84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
85
86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
88
89 #define BNXT_TX_PUSH_THRESH 164
90
91 /* indexed by enum board_idx */
92 static const struct {
93 char *name;
94 } board_info[] = {
95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
145 [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
146 };
147
148 static const struct pci_device_id bnxt_pci_tbl[] = {
149 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
151 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
152 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
153 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
154 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
155 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
156 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
157 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
158 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
159 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
160 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
161 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
162 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
163 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
165 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
166 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
167 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
168 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
169 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
171 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
172 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
173 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
176 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
183 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
184 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
185 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
186 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
187 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
188 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
189 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
190 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
191 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
197 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
198 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
199 #ifdef CONFIG_BNXT_SRIOV
200 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
201 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
204 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
205 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
208 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
209 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
216 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
217 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
218 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
219 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
220 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
221 { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
222 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
223 #endif
224 { 0 }
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
228
229 static const u16 bnxt_vf_req_snif[] = {
230 HWRM_FUNC_CFG,
231 HWRM_FUNC_VF_CFG,
232 HWRM_PORT_PHY_QCFG,
233 HWRM_CFA_L2_FILTER_ALLOC,
234 };
235
236 static const u16 bnxt_async_events_arr[] = {
237 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
238 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
239 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
240 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
241 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
242 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
243 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
245 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
246 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
247 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
248 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
249 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
250 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
251 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
252 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
253 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
254 };
255
256 const u16 bnxt_bstore_to_trace[] = {
257 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
258 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
259 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
260 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
261 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
262 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
263 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
264 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
265 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
266 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
267 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
268 [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
269 [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
270 };
271
272 static struct workqueue_struct *bnxt_pf_wq;
273
274 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
275 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
276 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
277
278 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
279 .ports = {
280 .src = 0,
281 .dst = 0,
282 },
283 .addrs = {
284 .v6addrs = {
285 .src = BNXT_IPV6_MASK_NONE,
286 .dst = BNXT_IPV6_MASK_NONE,
287 },
288 },
289 };
290
291 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
292 .ports = {
293 .src = cpu_to_be16(0xffff),
294 .dst = cpu_to_be16(0xffff),
295 },
296 .addrs = {
297 .v6addrs = {
298 .src = BNXT_IPV6_MASK_ALL,
299 .dst = BNXT_IPV6_MASK_ALL,
300 },
301 },
302 };
303
304 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
305 .ports = {
306 .src = cpu_to_be16(0xffff),
307 .dst = cpu_to_be16(0xffff),
308 },
309 .addrs = {
310 .v4addrs = {
311 .src = cpu_to_be32(0xffffffff),
312 .dst = cpu_to_be32(0xffffffff),
313 },
314 },
315 };
316
bnxt_vf_pciid(enum board_idx idx)317 static bool bnxt_vf_pciid(enum board_idx idx)
318 {
319 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
320 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
321 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
322 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
323 idx == NETXTREME_E_P7_VF_HV);
324 }
325
326 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
327 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
328
329 #define BNXT_DB_CQ(db, idx) \
330 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
331
332 #define BNXT_DB_NQ_P5(db, idx) \
333 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
334 (db)->doorbell)
335
336 #define BNXT_DB_NQ_P7(db, idx) \
337 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
338 DB_RING_IDX(db, idx), (db)->doorbell)
339
340 #define BNXT_DB_CQ_ARM(db, idx) \
341 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
342
343 #define BNXT_DB_NQ_ARM_P5(db, idx) \
344 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
345 DB_RING_IDX(db, idx), (db)->doorbell)
346
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)347 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
348 {
349 if (bp->flags & BNXT_FLAG_CHIP_P7)
350 BNXT_DB_NQ_P7(db, idx);
351 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
352 BNXT_DB_NQ_P5(db, idx);
353 else
354 BNXT_DB_CQ(db, idx);
355 }
356
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)357 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
358 {
359 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
360 BNXT_DB_NQ_ARM_P5(db, idx);
361 else
362 BNXT_DB_CQ_ARM(db, idx);
363 }
364
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)365 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
366 {
367 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
368 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
369 DB_RING_IDX(db, idx), db->doorbell);
370 else
371 BNXT_DB_CQ(db, idx);
372 }
373
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)374 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
375 {
376 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
377 return;
378
379 if (BNXT_PF(bp))
380 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
381 else
382 schedule_delayed_work(&bp->fw_reset_task, delay);
383 }
384
__bnxt_queue_sp_work(struct bnxt * bp)385 static void __bnxt_queue_sp_work(struct bnxt *bp)
386 {
387 if (BNXT_PF(bp))
388 queue_work(bnxt_pf_wq, &bp->sp_task);
389 else
390 schedule_work(&bp->sp_task);
391 }
392
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)393 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
394 {
395 set_bit(event, &bp->sp_event);
396 __bnxt_queue_sp_work(bp);
397 }
398
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)399 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
400 {
401 if (!rxr->bnapi->in_reset) {
402 rxr->bnapi->in_reset = true;
403 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
404 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
405 else
406 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
407 __bnxt_queue_sp_work(bp);
408 }
409 rxr->rx_next_cons = 0xffff;
410 }
411
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 curr)412 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
413 u16 curr)
414 {
415 struct bnxt_napi *bnapi = txr->bnapi;
416
417 if (bnapi->tx_fault)
418 return;
419
420 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
421 txr->txq_index, txr->tx_hw_cons,
422 txr->tx_cons, txr->tx_prod, curr);
423 WARN_ON_ONCE(1);
424 bnapi->tx_fault = 1;
425 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
426 }
427
428 const u16 bnxt_lhint_arr[] = {
429 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
430 TX_BD_FLAGS_LHINT_512_TO_1023,
431 TX_BD_FLAGS_LHINT_1024_TO_2047,
432 TX_BD_FLAGS_LHINT_1024_TO_2047,
433 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
434 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
435 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
436 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
437 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
438 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
439 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
440 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
441 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
442 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
443 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
444 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
445 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
446 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
447 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
448 };
449
bnxt_xmit_get_cfa_action(struct sk_buff * skb)450 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
451 {
452 struct metadata_dst *md_dst = skb_metadata_dst(skb);
453
454 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
455 return 0;
456
457 return md_dst->u.port_info.port_id;
458 }
459
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)460 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
461 u16 prod)
462 {
463 /* Sync BD data before updating doorbell */
464 wmb();
465 bnxt_db_write(bp, &txr->tx_db, prod);
466 txr->kick_pending = 0;
467 }
468
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)469 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
470 {
471 struct bnxt *bp = netdev_priv(dev);
472 struct tx_bd *txbd, *txbd0;
473 struct tx_bd_ext *txbd1;
474 struct netdev_queue *txq;
475 int i;
476 dma_addr_t mapping;
477 unsigned int length, pad = 0;
478 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
479 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
480 struct pci_dev *pdev = bp->pdev;
481 u16 prod, last_frag, txts_prod;
482 struct bnxt_tx_ring_info *txr;
483 struct bnxt_sw_tx_bd *tx_buf;
484 __le32 lflags = 0;
485 skb_frag_t *frag;
486
487 i = skb_get_queue_mapping(skb);
488 if (unlikely(i >= bp->tx_nr_rings)) {
489 dev_kfree_skb_any(skb);
490 dev_core_stats_tx_dropped_inc(dev);
491 return NETDEV_TX_OK;
492 }
493
494 txq = netdev_get_tx_queue(dev, i);
495 txr = &bp->tx_ring[bp->tx_ring_map[i]];
496 prod = txr->tx_prod;
497
498 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
499 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
500 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
501 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
502 if (skb_linearize(skb)) {
503 dev_kfree_skb_any(skb);
504 dev_core_stats_tx_dropped_inc(dev);
505 return NETDEV_TX_OK;
506 }
507 }
508 #endif
509 free_size = bnxt_tx_avail(bp, txr);
510 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
511 /* We must have raced with NAPI cleanup */
512 if (net_ratelimit() && txr->kick_pending)
513 netif_warn(bp, tx_err, dev,
514 "bnxt: ring busy w/ flush pending!\n");
515 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
516 bp->tx_wake_thresh))
517 return NETDEV_TX_BUSY;
518 }
519
520 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
521 goto tx_free;
522
523 length = skb->len;
524 len = skb_headlen(skb);
525 last_frag = skb_shinfo(skb)->nr_frags;
526
527 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
528
529 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
530 tx_buf->skb = skb;
531 tx_buf->nr_frags = last_frag;
532
533 vlan_tag_flags = 0;
534 cfa_action = bnxt_xmit_get_cfa_action(skb);
535 if (skb_vlan_tag_present(skb)) {
536 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
537 skb_vlan_tag_get(skb);
538 /* Currently supports 8021Q, 8021AD vlan offloads
539 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
540 */
541 if (skb->vlan_proto == htons(ETH_P_8021Q))
542 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
543 }
544
545 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
546 ptp->tx_tstamp_en) {
547 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
548 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
549 tx_buf->is_ts_pkt = 1;
550 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
551 } else if (!skb_is_gso(skb)) {
552 u16 seq_id, hdr_off;
553
554 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
555 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
556 if (vlan_tag_flags)
557 hdr_off += VLAN_HLEN;
558 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
559 tx_buf->is_ts_pkt = 1;
560 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
561
562 ptp->txts_req[txts_prod].tx_seqid = seq_id;
563 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
564 tx_buf->txts_prod = txts_prod;
565 }
566 }
567 }
568 if (unlikely(skb->no_fcs))
569 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
570
571 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
572 skb_frags_readable(skb) && !lflags) {
573 struct tx_push_buffer *tx_push_buf = txr->tx_push;
574 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
575 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
576 void __iomem *db = txr->tx_db.doorbell;
577 void *pdata = tx_push_buf->data;
578 u64 *end;
579 int j, push_len;
580
581 /* Set COAL_NOW to be ready quickly for the next push */
582 tx_push->tx_bd_len_flags_type =
583 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
584 TX_BD_TYPE_LONG_TX_BD |
585 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
586 TX_BD_FLAGS_COAL_NOW |
587 TX_BD_FLAGS_PACKET_END |
588 TX_BD_CNT(2));
589
590 if (skb->ip_summed == CHECKSUM_PARTIAL)
591 tx_push1->tx_bd_hsize_lflags =
592 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
593 else
594 tx_push1->tx_bd_hsize_lflags = 0;
595
596 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
597 tx_push1->tx_bd_cfa_action =
598 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
599
600 end = pdata + length;
601 end = PTR_ALIGN(end, 8) - 1;
602 *end = 0;
603
604 skb_copy_from_linear_data(skb, pdata, len);
605 pdata += len;
606 for (j = 0; j < last_frag; j++) {
607 void *fptr;
608
609 frag = &skb_shinfo(skb)->frags[j];
610 fptr = skb_frag_address_safe(frag);
611 if (!fptr)
612 goto normal_tx;
613
614 memcpy(pdata, fptr, skb_frag_size(frag));
615 pdata += skb_frag_size(frag);
616 }
617
618 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
619 txbd->tx_bd_haddr = txr->data_mapping;
620 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
621 prod = NEXT_TX(prod);
622 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
623 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
624 memcpy(txbd, tx_push1, sizeof(*txbd));
625 prod = NEXT_TX(prod);
626 tx_push->doorbell =
627 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
628 DB_RING_IDX(&txr->tx_db, prod));
629 WRITE_ONCE(txr->tx_prod, prod);
630
631 tx_buf->is_push = 1;
632 netdev_tx_sent_queue(txq, skb->len);
633 wmb(); /* Sync is_push and byte queue before pushing data */
634
635 push_len = (length + sizeof(*tx_push) + 7) / 8;
636 if (push_len > 16) {
637 __iowrite64_copy(db, tx_push_buf, 16);
638 __iowrite32_copy(db + 4, tx_push_buf + 1,
639 (push_len - 16) << 1);
640 } else {
641 __iowrite64_copy(db, tx_push_buf, push_len);
642 }
643
644 goto tx_done;
645 }
646
647 normal_tx:
648 if (length < BNXT_MIN_PKT_SIZE) {
649 pad = BNXT_MIN_PKT_SIZE - length;
650 if (skb_pad(skb, pad))
651 /* SKB already freed. */
652 goto tx_kick_pending;
653 length = BNXT_MIN_PKT_SIZE;
654 }
655
656 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
657
658 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
659 goto tx_free;
660
661 dma_unmap_addr_set(tx_buf, mapping, mapping);
662 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
663 TX_BD_CNT(last_frag + 2);
664
665 txbd->tx_bd_haddr = cpu_to_le64(mapping);
666 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
667
668 prod = NEXT_TX(prod);
669 txbd1 = (struct tx_bd_ext *)
670 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
671
672 txbd1->tx_bd_hsize_lflags = lflags;
673 if (skb_is_gso(skb)) {
674 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
675 u32 hdr_len;
676
677 if (skb->encapsulation) {
678 if (udp_gso)
679 hdr_len = skb_inner_transport_offset(skb) +
680 sizeof(struct udphdr);
681 else
682 hdr_len = skb_inner_tcp_all_headers(skb);
683 } else if (udp_gso) {
684 hdr_len = skb_transport_offset(skb) +
685 sizeof(struct udphdr);
686 } else {
687 hdr_len = skb_tcp_all_headers(skb);
688 }
689
690 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
691 TX_BD_FLAGS_T_IPID |
692 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
693 length = skb_shinfo(skb)->gso_size;
694 txbd1->tx_bd_mss = cpu_to_le32(length);
695 length += hdr_len;
696 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
697 txbd1->tx_bd_hsize_lflags |=
698 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
699 txbd1->tx_bd_mss = 0;
700 }
701
702 length >>= 9;
703 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
704 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
705 skb->len);
706 i = 0;
707 goto tx_dma_error;
708 }
709 flags |= bnxt_lhint_arr[length];
710 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
711
712 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
713 txbd1->tx_bd_cfa_action =
714 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
715 txbd0 = txbd;
716 for (i = 0; i < last_frag; i++) {
717 frag = &skb_shinfo(skb)->frags[i];
718 prod = NEXT_TX(prod);
719 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
720
721 len = skb_frag_size(frag);
722 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
723 DMA_TO_DEVICE);
724
725 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
726 goto tx_dma_error;
727
728 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
729 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
730 mapping, mapping);
731
732 txbd->tx_bd_haddr = cpu_to_le64(mapping);
733
734 flags = len << TX_BD_LEN_SHIFT;
735 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
736 }
737
738 flags &= ~TX_BD_LEN;
739 txbd->tx_bd_len_flags_type =
740 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
741 TX_BD_FLAGS_PACKET_END);
742
743 netdev_tx_sent_queue(txq, skb->len);
744
745 skb_tx_timestamp(skb);
746
747 prod = NEXT_TX(prod);
748 WRITE_ONCE(txr->tx_prod, prod);
749
750 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
751 bnxt_txr_db_kick(bp, txr, prod);
752 } else {
753 if (free_size >= bp->tx_wake_thresh)
754 txbd0->tx_bd_len_flags_type |=
755 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
756 txr->kick_pending = 1;
757 }
758
759 tx_done:
760
761 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
762 if (netdev_xmit_more() && !tx_buf->is_push) {
763 txbd0->tx_bd_len_flags_type &=
764 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
765 bnxt_txr_db_kick(bp, txr, prod);
766 }
767
768 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
769 bp->tx_wake_thresh);
770 }
771 return NETDEV_TX_OK;
772
773 tx_dma_error:
774 last_frag = i;
775
776 /* start back at beginning and unmap skb */
777 prod = txr->tx_prod;
778 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
779 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
780 skb_headlen(skb), DMA_TO_DEVICE);
781 prod = NEXT_TX(prod);
782
783 /* unmap remaining mapped pages */
784 for (i = 0; i < last_frag; i++) {
785 prod = NEXT_TX(prod);
786 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
787 frag = &skb_shinfo(skb)->frags[i];
788 netmem_dma_unmap_page_attrs(&pdev->dev,
789 dma_unmap_addr(tx_buf, mapping),
790 skb_frag_size(frag),
791 DMA_TO_DEVICE, 0);
792 }
793
794 tx_free:
795 dev_kfree_skb_any(skb);
796 tx_kick_pending:
797 if (BNXT_TX_PTP_IS_SET(lflags)) {
798 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
799 atomic64_inc(&bp->ptp_cfg->stats.ts_err);
800 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
801 /* set SKB to err so PTP worker will clean up */
802 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
803 }
804 if (txr->kick_pending)
805 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
806 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
807 dev_core_stats_tx_dropped_inc(dev);
808 return NETDEV_TX_OK;
809 }
810
811 /* Returns true if some remaining TX packets not processed. */
__bnxt_tx_int(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int budget)812 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
813 int budget)
814 {
815 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
816 struct pci_dev *pdev = bp->pdev;
817 u16 hw_cons = txr->tx_hw_cons;
818 unsigned int tx_bytes = 0;
819 u16 cons = txr->tx_cons;
820 skb_frag_t *frag;
821 int tx_pkts = 0;
822 bool rc = false;
823
824 while (RING_TX(bp, cons) != hw_cons) {
825 struct bnxt_sw_tx_bd *tx_buf;
826 struct sk_buff *skb;
827 bool is_ts_pkt;
828 int j, last;
829
830 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
831 skb = tx_buf->skb;
832
833 if (unlikely(!skb)) {
834 bnxt_sched_reset_txr(bp, txr, cons);
835 return rc;
836 }
837
838 is_ts_pkt = tx_buf->is_ts_pkt;
839 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
840 rc = true;
841 break;
842 }
843
844 cons = NEXT_TX(cons);
845 tx_pkts++;
846 tx_bytes += skb->len;
847 tx_buf->skb = NULL;
848 tx_buf->is_ts_pkt = 0;
849
850 if (tx_buf->is_push) {
851 tx_buf->is_push = 0;
852 goto next_tx_int;
853 }
854
855 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
856 skb_headlen(skb), DMA_TO_DEVICE);
857 last = tx_buf->nr_frags;
858
859 for (j = 0; j < last; j++) {
860 frag = &skb_shinfo(skb)->frags[j];
861 cons = NEXT_TX(cons);
862 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
863 netmem_dma_unmap_page_attrs(&pdev->dev,
864 dma_unmap_addr(tx_buf,
865 mapping),
866 skb_frag_size(frag),
867 DMA_TO_DEVICE, 0);
868 }
869 if (unlikely(is_ts_pkt)) {
870 if (BNXT_CHIP_P5(bp)) {
871 /* PTP worker takes ownership of the skb */
872 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
873 skb = NULL;
874 }
875 }
876
877 next_tx_int:
878 cons = NEXT_TX(cons);
879
880 napi_consume_skb(skb, budget);
881 }
882
883 WRITE_ONCE(txr->tx_cons, cons);
884
885 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
886 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
887 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
888
889 return rc;
890 }
891
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)892 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
893 {
894 struct bnxt_tx_ring_info *txr;
895 bool more = false;
896 int i;
897
898 bnxt_for_each_napi_tx(i, bnapi, txr) {
899 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
900 more |= __bnxt_tx_int(bp, txr, budget);
901 }
902 if (!more)
903 bnapi->events &= ~BNXT_TX_CMP_EVENT;
904 }
905
bnxt_separate_head_pool(struct bnxt_rx_ring_info * rxr)906 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
907 {
908 return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
909 }
910
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)911 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
912 struct bnxt_rx_ring_info *rxr,
913 unsigned int *offset,
914 gfp_t gfp)
915 {
916 struct page *page;
917
918 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
919 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
920 BNXT_RX_PAGE_SIZE);
921 } else {
922 page = page_pool_dev_alloc_pages(rxr->page_pool);
923 *offset = 0;
924 }
925 if (!page)
926 return NULL;
927
928 *mapping = page_pool_get_dma_addr(page) + *offset;
929 return page;
930 }
931
__bnxt_alloc_rx_netmem(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)932 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
933 struct bnxt_rx_ring_info *rxr,
934 unsigned int *offset,
935 gfp_t gfp)
936 {
937 netmem_ref netmem;
938
939 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
940 netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
941 } else {
942 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
943 *offset = 0;
944 }
945 if (!netmem)
946 return 0;
947
948 *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
949 return netmem;
950 }
951
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)952 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
953 struct bnxt_rx_ring_info *rxr,
954 gfp_t gfp)
955 {
956 unsigned int offset;
957 struct page *page;
958
959 page = page_pool_alloc_frag(rxr->head_pool, &offset,
960 bp->rx_buf_size, gfp);
961 if (!page)
962 return NULL;
963
964 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
965 return page_address(page) + offset;
966 }
967
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)968 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
969 u16 prod, gfp_t gfp)
970 {
971 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
972 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
973 dma_addr_t mapping;
974
975 if (BNXT_RX_PAGE_MODE(bp)) {
976 unsigned int offset;
977 struct page *page =
978 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
979
980 if (!page)
981 return -ENOMEM;
982
983 mapping += bp->rx_dma_offset;
984 rx_buf->data = page;
985 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
986 } else {
987 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
988
989 if (!data)
990 return -ENOMEM;
991
992 rx_buf->data = data;
993 rx_buf->data_ptr = data + bp->rx_offset;
994 }
995 rx_buf->mapping = mapping;
996
997 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
998 return 0;
999 }
1000
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)1001 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
1002 {
1003 u16 prod = rxr->rx_prod;
1004 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1005 struct bnxt *bp = rxr->bnapi->bp;
1006 struct rx_bd *cons_bd, *prod_bd;
1007
1008 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1009 cons_rx_buf = &rxr->rx_buf_ring[cons];
1010
1011 prod_rx_buf->data = data;
1012 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
1013
1014 prod_rx_buf->mapping = cons_rx_buf->mapping;
1015
1016 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1017 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
1018
1019 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
1020 }
1021
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1022 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1023 {
1024 u16 next, max = rxr->rx_agg_bmap_size;
1025
1026 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
1027 if (next >= max)
1028 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
1029 return next;
1030 }
1031
bnxt_alloc_rx_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)1032 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1033 u16 prod, gfp_t gfp)
1034 {
1035 struct rx_bd *rxbd =
1036 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1037 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
1038 u16 sw_prod = rxr->rx_sw_agg_prod;
1039 unsigned int offset = 0;
1040 dma_addr_t mapping;
1041 netmem_ref netmem;
1042
1043 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
1044 if (!netmem)
1045 return -ENOMEM;
1046
1047 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1048 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1049
1050 __set_bit(sw_prod, rxr->rx_agg_bmap);
1051 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
1052 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1053
1054 rx_agg_buf->netmem = netmem;
1055 rx_agg_buf->offset = offset;
1056 rx_agg_buf->mapping = mapping;
1057 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
1058 rxbd->rx_bd_opaque = sw_prod;
1059 return 0;
1060 }
1061
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)1062 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
1063 struct bnxt_cp_ring_info *cpr,
1064 u16 cp_cons, u16 curr)
1065 {
1066 struct rx_agg_cmp *agg;
1067
1068 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
1069 agg = (struct rx_agg_cmp *)
1070 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1071 return agg;
1072 }
1073
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)1074 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1075 struct bnxt_rx_ring_info *rxr,
1076 u16 agg_id, u16 curr)
1077 {
1078 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1079
1080 return &tpa_info->agg_arr[curr];
1081 }
1082
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)1083 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1084 u16 start, u32 agg_bufs, bool tpa)
1085 {
1086 struct bnxt_napi *bnapi = cpr->bnapi;
1087 struct bnxt *bp = bnapi->bp;
1088 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1089 u16 prod = rxr->rx_agg_prod;
1090 u16 sw_prod = rxr->rx_sw_agg_prod;
1091 bool p5_tpa = false;
1092 u32 i;
1093
1094 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1095 p5_tpa = true;
1096
1097 for (i = 0; i < agg_bufs; i++) {
1098 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1099 struct rx_agg_cmp *agg;
1100 struct rx_bd *prod_bd;
1101 netmem_ref netmem;
1102 u16 cons;
1103
1104 if (p5_tpa)
1105 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1106 else
1107 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1108 cons = agg->rx_agg_cmp_opaque;
1109 __clear_bit(cons, rxr->rx_agg_bmap);
1110
1111 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1112 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1113
1114 __set_bit(sw_prod, rxr->rx_agg_bmap);
1115 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1116 cons_rx_buf = &rxr->rx_agg_ring[cons];
1117
1118 /* It is possible for sw_prod to be equal to cons, so
1119 * set cons_rx_buf->netmem to 0 first.
1120 */
1121 netmem = cons_rx_buf->netmem;
1122 cons_rx_buf->netmem = 0;
1123 prod_rx_buf->netmem = netmem;
1124 prod_rx_buf->offset = cons_rx_buf->offset;
1125
1126 prod_rx_buf->mapping = cons_rx_buf->mapping;
1127
1128 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1129
1130 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1131 prod_bd->rx_bd_opaque = sw_prod;
1132
1133 prod = NEXT_RX_AGG(prod);
1134 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1135 }
1136 rxr->rx_agg_prod = prod;
1137 rxr->rx_sw_agg_prod = sw_prod;
1138 }
1139
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1140 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1141 struct bnxt_rx_ring_info *rxr,
1142 u16 cons, void *data, u8 *data_ptr,
1143 dma_addr_t dma_addr,
1144 unsigned int offset_and_len)
1145 {
1146 unsigned int len = offset_and_len & 0xffff;
1147 struct page *page = data;
1148 u16 prod = rxr->rx_prod;
1149 struct sk_buff *skb;
1150 int err;
1151
1152 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1153 if (unlikely(err)) {
1154 bnxt_reuse_rx_data(rxr, cons, data);
1155 return NULL;
1156 }
1157 dma_addr -= bp->rx_dma_offset;
1158 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1159 bp->rx_dir);
1160 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1161 if (!skb) {
1162 page_pool_recycle_direct(rxr->page_pool, page);
1163 return NULL;
1164 }
1165 skb_mark_for_recycle(skb);
1166 skb_reserve(skb, bp->rx_offset);
1167 __skb_put(skb, len);
1168
1169 return skb;
1170 }
1171
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1172 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1173 struct bnxt_rx_ring_info *rxr,
1174 u16 cons, void *data, u8 *data_ptr,
1175 dma_addr_t dma_addr,
1176 unsigned int offset_and_len)
1177 {
1178 unsigned int payload = offset_and_len >> 16;
1179 unsigned int len = offset_and_len & 0xffff;
1180 skb_frag_t *frag;
1181 struct page *page = data;
1182 u16 prod = rxr->rx_prod;
1183 struct sk_buff *skb;
1184 int off, err;
1185
1186 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1187 if (unlikely(err)) {
1188 bnxt_reuse_rx_data(rxr, cons, data);
1189 return NULL;
1190 }
1191 dma_addr -= bp->rx_dma_offset;
1192 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1193 bp->rx_dir);
1194
1195 if (unlikely(!payload))
1196 payload = eth_get_headlen(bp->dev, data_ptr, len);
1197
1198 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1199 if (!skb) {
1200 page_pool_recycle_direct(rxr->page_pool, page);
1201 return NULL;
1202 }
1203
1204 skb_mark_for_recycle(skb);
1205 off = (void *)data_ptr - page_address(page);
1206 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1207 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1208 payload + NET_IP_ALIGN);
1209
1210 frag = &skb_shinfo(skb)->frags[0];
1211 skb_frag_size_sub(frag, payload);
1212 skb_frag_off_add(frag, payload);
1213 skb->data_len -= payload;
1214 skb->tail += payload;
1215
1216 return skb;
1217 }
1218
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1219 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1220 struct bnxt_rx_ring_info *rxr, u16 cons,
1221 void *data, u8 *data_ptr,
1222 dma_addr_t dma_addr,
1223 unsigned int offset_and_len)
1224 {
1225 u16 prod = rxr->rx_prod;
1226 struct sk_buff *skb;
1227 int err;
1228
1229 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1230 if (unlikely(err)) {
1231 bnxt_reuse_rx_data(rxr, cons, data);
1232 return NULL;
1233 }
1234
1235 skb = napi_build_skb(data, bp->rx_buf_size);
1236 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1237 bp->rx_dir);
1238 if (!skb) {
1239 page_pool_free_va(rxr->head_pool, data, true);
1240 return NULL;
1241 }
1242
1243 skb_mark_for_recycle(skb);
1244 skb_reserve(skb, bp->rx_offset);
1245 skb_put(skb, offset_and_len & 0xffff);
1246 return skb;
1247 }
1248
__bnxt_rx_agg_netmems(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb,struct xdp_buff * xdp)1249 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
1250 struct bnxt_cp_ring_info *cpr,
1251 u16 idx, u32 agg_bufs, bool tpa,
1252 struct sk_buff *skb,
1253 struct xdp_buff *xdp)
1254 {
1255 struct bnxt_napi *bnapi = cpr->bnapi;
1256 struct skb_shared_info *shinfo;
1257 struct bnxt_rx_ring_info *rxr;
1258 u32 i, total_frag_len = 0;
1259 bool p5_tpa = false;
1260 u16 prod;
1261
1262 rxr = bnapi->rx_ring;
1263 prod = rxr->rx_agg_prod;
1264
1265 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1266 p5_tpa = true;
1267
1268 if (skb)
1269 shinfo = skb_shinfo(skb);
1270 else
1271 shinfo = xdp_get_shared_info_from_buff(xdp);
1272
1273 for (i = 0; i < agg_bufs; i++) {
1274 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1275 struct rx_agg_cmp *agg;
1276 u16 cons, frag_len;
1277 netmem_ref netmem;
1278
1279 if (p5_tpa)
1280 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1281 else
1282 agg = bnxt_get_agg(bp, cpr, idx, i);
1283 cons = agg->rx_agg_cmp_opaque;
1284 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1285 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1286
1287 cons_rx_buf = &rxr->rx_agg_ring[cons];
1288 if (skb) {
1289 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
1290 cons_rx_buf->offset,
1291 frag_len, BNXT_RX_PAGE_SIZE);
1292 } else {
1293 skb_frag_t *frag = &shinfo->frags[i];
1294
1295 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
1296 cons_rx_buf->offset,
1297 frag_len);
1298 shinfo->nr_frags = i + 1;
1299 }
1300 __clear_bit(cons, rxr->rx_agg_bmap);
1301
1302 /* It is possible for bnxt_alloc_rx_netmem() to allocate
1303 * a sw_prod index that equals the cons index, so we
1304 * need to clear the cons entry now.
1305 */
1306 netmem = cons_rx_buf->netmem;
1307 cons_rx_buf->netmem = 0;
1308
1309 if (xdp && netmem_is_pfmemalloc(netmem))
1310 xdp_buff_set_frag_pfmemalloc(xdp);
1311
1312 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
1313 if (skb) {
1314 skb->len -= frag_len;
1315 skb->data_len -= frag_len;
1316 skb->truesize -= BNXT_RX_PAGE_SIZE;
1317 }
1318
1319 --shinfo->nr_frags;
1320 cons_rx_buf->netmem = netmem;
1321
1322 /* Update prod since possibly some netmems have been
1323 * allocated already.
1324 */
1325 rxr->rx_agg_prod = prod;
1326 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1327 return 0;
1328 }
1329
1330 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
1331 BNXT_RX_PAGE_SIZE);
1332
1333 total_frag_len += frag_len;
1334 prod = NEXT_RX_AGG(prod);
1335 }
1336 rxr->rx_agg_prod = prod;
1337 return total_frag_len;
1338 }
1339
bnxt_rx_agg_netmems_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1340 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
1341 struct bnxt_cp_ring_info *cpr,
1342 struct sk_buff *skb, u16 idx,
1343 u32 agg_bufs, bool tpa)
1344 {
1345 u32 total_frag_len = 0;
1346
1347 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1348 skb, NULL);
1349 if (!total_frag_len) {
1350 skb_mark_for_recycle(skb);
1351 dev_kfree_skb(skb);
1352 return NULL;
1353 }
1354
1355 return skb;
1356 }
1357
bnxt_rx_agg_netmems_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1358 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
1359 struct bnxt_cp_ring_info *cpr,
1360 struct xdp_buff *xdp, u16 idx,
1361 u32 agg_bufs, bool tpa)
1362 {
1363 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1364 u32 total_frag_len = 0;
1365
1366 if (!xdp_buff_has_frags(xdp))
1367 shinfo->nr_frags = 0;
1368
1369 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
1370 NULL, xdp);
1371 if (total_frag_len) {
1372 xdp_buff_set_frags_flag(xdp);
1373 shinfo->nr_frags = agg_bufs;
1374 shinfo->xdp_frags_size = total_frag_len;
1375 }
1376 return total_frag_len;
1377 }
1378
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1379 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1380 u8 agg_bufs, u32 *raw_cons)
1381 {
1382 u16 last;
1383 struct rx_agg_cmp *agg;
1384
1385 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1386 last = RING_CMP(*raw_cons);
1387 agg = (struct rx_agg_cmp *)
1388 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1389 return RX_AGG_CMP_VALID(agg, *raw_cons);
1390 }
1391
bnxt_copy_data(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1392 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
1393 unsigned int len,
1394 dma_addr_t mapping)
1395 {
1396 struct bnxt *bp = bnapi->bp;
1397 struct pci_dev *pdev = bp->pdev;
1398 struct sk_buff *skb;
1399
1400 skb = napi_alloc_skb(&bnapi->napi, len);
1401 if (!skb)
1402 return NULL;
1403
1404 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
1405 bp->rx_dir);
1406
1407 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1408 len + NET_IP_ALIGN);
1409
1410 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
1411 bp->rx_dir);
1412
1413 skb_put(skb, len);
1414
1415 return skb;
1416 }
1417
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1418 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1419 unsigned int len,
1420 dma_addr_t mapping)
1421 {
1422 return bnxt_copy_data(bnapi, data, len, mapping);
1423 }
1424
bnxt_copy_xdp(struct bnxt_napi * bnapi,struct xdp_buff * xdp,unsigned int len,dma_addr_t mapping)1425 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
1426 struct xdp_buff *xdp,
1427 unsigned int len,
1428 dma_addr_t mapping)
1429 {
1430 unsigned int metasize = 0;
1431 u8 *data = xdp->data;
1432 struct sk_buff *skb;
1433
1434 len = xdp->data_end - xdp->data_meta;
1435 metasize = xdp->data - xdp->data_meta;
1436 data = xdp->data_meta;
1437
1438 skb = bnxt_copy_data(bnapi, data, len, mapping);
1439 if (!skb)
1440 return skb;
1441
1442 if (metasize) {
1443 skb_metadata_set(skb, metasize);
1444 __skb_pull(skb, metasize);
1445 }
1446
1447 return skb;
1448 }
1449
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1450 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1451 u32 *raw_cons, void *cmp)
1452 {
1453 struct rx_cmp *rxcmp = cmp;
1454 u32 tmp_raw_cons = *raw_cons;
1455 u8 cmp_type, agg_bufs = 0;
1456
1457 cmp_type = RX_CMP_TYPE(rxcmp);
1458
1459 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1460 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1461 RX_CMP_AGG_BUFS) >>
1462 RX_CMP_AGG_BUFS_SHIFT;
1463 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1464 struct rx_tpa_end_cmp *tpa_end = cmp;
1465
1466 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1467 return 0;
1468
1469 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1470 }
1471
1472 if (agg_bufs) {
1473 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1474 return -EBUSY;
1475 }
1476 *raw_cons = tmp_raw_cons;
1477 return 0;
1478 }
1479
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1480 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1481 {
1482 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1483 u16 idx = agg_id & MAX_TPA_P5_MASK;
1484
1485 if (test_bit(idx, map->agg_idx_bmap))
1486 idx = find_first_zero_bit(map->agg_idx_bmap,
1487 BNXT_AGG_IDX_BMAP_SIZE);
1488 __set_bit(idx, map->agg_idx_bmap);
1489 map->agg_id_tbl[agg_id] = idx;
1490 return idx;
1491 }
1492
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1493 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1494 {
1495 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1496
1497 __clear_bit(idx, map->agg_idx_bmap);
1498 }
1499
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1500 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1501 {
1502 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1503
1504 return map->agg_id_tbl[agg_id];
1505 }
1506
bnxt_tpa_metadata(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1507 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1508 struct rx_tpa_start_cmp *tpa_start,
1509 struct rx_tpa_start_cmp_ext *tpa_start1)
1510 {
1511 tpa_info->cfa_code_valid = 1;
1512 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1513 tpa_info->vlan_valid = 0;
1514 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1515 tpa_info->vlan_valid = 1;
1516 tpa_info->metadata =
1517 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1518 }
1519 }
1520
bnxt_tpa_metadata_v2(struct bnxt_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1521 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1522 struct rx_tpa_start_cmp *tpa_start,
1523 struct rx_tpa_start_cmp_ext *tpa_start1)
1524 {
1525 tpa_info->vlan_valid = 0;
1526 if (TPA_START_VLAN_VALID(tpa_start)) {
1527 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1528 u32 vlan_proto = ETH_P_8021Q;
1529
1530 tpa_info->vlan_valid = 1;
1531 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1532 vlan_proto = ETH_P_8021AD;
1533 tpa_info->metadata = vlan_proto << 16 |
1534 TPA_START_METADATA0_TCI(tpa_start1);
1535 }
1536 }
1537
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1538 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1539 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1540 struct rx_tpa_start_cmp_ext *tpa_start1)
1541 {
1542 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1543 struct bnxt_tpa_info *tpa_info;
1544 u16 cons, prod, agg_id;
1545 struct rx_bd *prod_bd;
1546 dma_addr_t mapping;
1547
1548 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1549 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1550 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1551 } else {
1552 agg_id = TPA_START_AGG_ID(tpa_start);
1553 }
1554 cons = tpa_start->rx_tpa_start_cmp_opaque;
1555 prod = rxr->rx_prod;
1556 cons_rx_buf = &rxr->rx_buf_ring[cons];
1557 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1558 tpa_info = &rxr->rx_tpa[agg_id];
1559
1560 if (unlikely(cons != rxr->rx_next_cons ||
1561 TPA_START_ERROR(tpa_start))) {
1562 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1563 cons, rxr->rx_next_cons,
1564 TPA_START_ERROR_CODE(tpa_start1));
1565 bnxt_sched_reset_rxr(bp, rxr);
1566 return;
1567 }
1568 prod_rx_buf->data = tpa_info->data;
1569 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1570
1571 mapping = tpa_info->mapping;
1572 prod_rx_buf->mapping = mapping;
1573
1574 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1575
1576 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1577
1578 tpa_info->data = cons_rx_buf->data;
1579 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1580 cons_rx_buf->data = NULL;
1581 tpa_info->mapping = cons_rx_buf->mapping;
1582
1583 tpa_info->len =
1584 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1585 RX_TPA_START_CMP_LEN_SHIFT;
1586 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1587 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1588 tpa_info->gso_type = SKB_GSO_TCPV4;
1589 if (TPA_START_IS_IPV6(tpa_start1))
1590 tpa_info->gso_type = SKB_GSO_TCPV6;
1591 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1592 else if (!BNXT_CHIP_P4_PLUS(bp) &&
1593 TPA_START_HASH_TYPE(tpa_start) == 3)
1594 tpa_info->gso_type = SKB_GSO_TCPV6;
1595 tpa_info->rss_hash =
1596 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1597 } else {
1598 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1599 tpa_info->gso_type = 0;
1600 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1601 }
1602 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1603 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1604 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1605 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1606 else
1607 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1608 tpa_info->agg_count = 0;
1609
1610 rxr->rx_prod = NEXT_RX(prod);
1611 cons = RING_RX(bp, NEXT_RX(cons));
1612 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1613 cons_rx_buf = &rxr->rx_buf_ring[cons];
1614
1615 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1616 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1617 cons_rx_buf->data = NULL;
1618 }
1619
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1620 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1621 {
1622 if (agg_bufs)
1623 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1624 }
1625
1626 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1627 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1628 {
1629 struct udphdr *uh = NULL;
1630
1631 if (ip_proto == htons(ETH_P_IP)) {
1632 struct iphdr *iph = (struct iphdr *)skb->data;
1633
1634 if (iph->protocol == IPPROTO_UDP)
1635 uh = (struct udphdr *)(iph + 1);
1636 } else {
1637 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1638
1639 if (iph->nexthdr == IPPROTO_UDP)
1640 uh = (struct udphdr *)(iph + 1);
1641 }
1642 if (uh) {
1643 if (uh->check)
1644 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1645 else
1646 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1647 }
1648 }
1649 #endif
1650
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1651 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1652 int payload_off, int tcp_ts,
1653 struct sk_buff *skb)
1654 {
1655 #ifdef CONFIG_INET
1656 struct tcphdr *th;
1657 int len, nw_off;
1658 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1659 u32 hdr_info = tpa_info->hdr_info;
1660 bool loopback = false;
1661
1662 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1663 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1664 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1665
1666 /* If the packet is an internal loopback packet, the offsets will
1667 * have an extra 4 bytes.
1668 */
1669 if (inner_mac_off == 4) {
1670 loopback = true;
1671 } else if (inner_mac_off > 4) {
1672 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1673 ETH_HLEN - 2));
1674
1675 /* We only support inner iPv4/ipv6. If we don't see the
1676 * correct protocol ID, it must be a loopback packet where
1677 * the offsets are off by 4.
1678 */
1679 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1680 loopback = true;
1681 }
1682 if (loopback) {
1683 /* internal loopback packet, subtract all offsets by 4 */
1684 inner_ip_off -= 4;
1685 inner_mac_off -= 4;
1686 outer_ip_off -= 4;
1687 }
1688
1689 nw_off = inner_ip_off - ETH_HLEN;
1690 skb_set_network_header(skb, nw_off);
1691 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1692 struct ipv6hdr *iph = ipv6_hdr(skb);
1693
1694 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1695 len = skb->len - skb_transport_offset(skb);
1696 th = tcp_hdr(skb);
1697 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1698 } else {
1699 struct iphdr *iph = ip_hdr(skb);
1700
1701 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1702 len = skb->len - skb_transport_offset(skb);
1703 th = tcp_hdr(skb);
1704 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1705 }
1706
1707 if (inner_mac_off) { /* tunnel */
1708 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1709 ETH_HLEN - 2));
1710
1711 bnxt_gro_tunnel(skb, proto);
1712 }
1713 #endif
1714 return skb;
1715 }
1716
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1717 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1718 int payload_off, int tcp_ts,
1719 struct sk_buff *skb)
1720 {
1721 #ifdef CONFIG_INET
1722 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1723 u32 hdr_info = tpa_info->hdr_info;
1724 int iphdr_len, nw_off;
1725
1726 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1727 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1728 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1729
1730 nw_off = inner_ip_off - ETH_HLEN;
1731 skb_set_network_header(skb, nw_off);
1732 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1733 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1734 skb_set_transport_header(skb, nw_off + iphdr_len);
1735
1736 if (inner_mac_off) { /* tunnel */
1737 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1738 ETH_HLEN - 2));
1739
1740 bnxt_gro_tunnel(skb, proto);
1741 }
1742 #endif
1743 return skb;
1744 }
1745
1746 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1747 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1748
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1749 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1750 int payload_off, int tcp_ts,
1751 struct sk_buff *skb)
1752 {
1753 #ifdef CONFIG_INET
1754 struct tcphdr *th;
1755 int len, nw_off, tcp_opt_len = 0;
1756
1757 if (tcp_ts)
1758 tcp_opt_len = 12;
1759
1760 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1761 struct iphdr *iph;
1762
1763 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1764 ETH_HLEN;
1765 skb_set_network_header(skb, nw_off);
1766 iph = ip_hdr(skb);
1767 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1768 len = skb->len - skb_transport_offset(skb);
1769 th = tcp_hdr(skb);
1770 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1771 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1772 struct ipv6hdr *iph;
1773
1774 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1775 ETH_HLEN;
1776 skb_set_network_header(skb, nw_off);
1777 iph = ipv6_hdr(skb);
1778 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1779 len = skb->len - skb_transport_offset(skb);
1780 th = tcp_hdr(skb);
1781 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1782 } else {
1783 dev_kfree_skb_any(skb);
1784 return NULL;
1785 }
1786
1787 if (nw_off) /* tunnel */
1788 bnxt_gro_tunnel(skb, skb->protocol);
1789 #endif
1790 return skb;
1791 }
1792
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1793 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1794 struct bnxt_tpa_info *tpa_info,
1795 struct rx_tpa_end_cmp *tpa_end,
1796 struct rx_tpa_end_cmp_ext *tpa_end1,
1797 struct sk_buff *skb)
1798 {
1799 #ifdef CONFIG_INET
1800 int payload_off;
1801 u16 segs;
1802
1803 segs = TPA_END_TPA_SEGS(tpa_end);
1804 if (segs == 1)
1805 return skb;
1806
1807 NAPI_GRO_CB(skb)->count = segs;
1808 skb_shinfo(skb)->gso_size =
1809 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1810 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1811 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1812 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1813 else
1814 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1815 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1816 if (likely(skb))
1817 tcp_gro_complete(skb);
1818 #endif
1819 return skb;
1820 }
1821
1822 /* Given the cfa_code of a received packet determine which
1823 * netdev (vf-rep or PF) the packet is destined to.
1824 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1825 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1826 {
1827 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1828
1829 /* if vf-rep dev is NULL, it must belong to the PF */
1830 return dev ? dev : bp->dev;
1831 }
1832
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1833 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1834 struct bnxt_cp_ring_info *cpr,
1835 u32 *raw_cons,
1836 struct rx_tpa_end_cmp *tpa_end,
1837 struct rx_tpa_end_cmp_ext *tpa_end1,
1838 u8 *event)
1839 {
1840 struct bnxt_napi *bnapi = cpr->bnapi;
1841 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1842 struct net_device *dev = bp->dev;
1843 u8 *data_ptr, agg_bufs;
1844 unsigned int len;
1845 struct bnxt_tpa_info *tpa_info;
1846 dma_addr_t mapping;
1847 struct sk_buff *skb;
1848 u16 idx = 0, agg_id;
1849 void *data;
1850 bool gro;
1851
1852 if (unlikely(bnapi->in_reset)) {
1853 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1854
1855 if (rc < 0)
1856 return ERR_PTR(-EBUSY);
1857 return NULL;
1858 }
1859
1860 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1861 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1862 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1863 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1864 tpa_info = &rxr->rx_tpa[agg_id];
1865 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1866 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1867 agg_bufs, tpa_info->agg_count);
1868 agg_bufs = tpa_info->agg_count;
1869 }
1870 tpa_info->agg_count = 0;
1871 *event |= BNXT_AGG_EVENT;
1872 bnxt_free_agg_idx(rxr, agg_id);
1873 idx = agg_id;
1874 gro = !!(bp->flags & BNXT_FLAG_GRO);
1875 } else {
1876 agg_id = TPA_END_AGG_ID(tpa_end);
1877 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1878 tpa_info = &rxr->rx_tpa[agg_id];
1879 idx = RING_CMP(*raw_cons);
1880 if (agg_bufs) {
1881 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1882 return ERR_PTR(-EBUSY);
1883
1884 *event |= BNXT_AGG_EVENT;
1885 idx = NEXT_CMP(idx);
1886 }
1887 gro = !!TPA_END_GRO(tpa_end);
1888 }
1889 data = tpa_info->data;
1890 data_ptr = tpa_info->data_ptr;
1891 prefetch(data_ptr);
1892 len = tpa_info->len;
1893 mapping = tpa_info->mapping;
1894
1895 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1896 bnxt_abort_tpa(cpr, idx, agg_bufs);
1897 if (agg_bufs > MAX_SKB_FRAGS)
1898 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1899 agg_bufs, (int)MAX_SKB_FRAGS);
1900 return NULL;
1901 }
1902
1903 if (len <= bp->rx_copybreak) {
1904 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1905 if (!skb) {
1906 bnxt_abort_tpa(cpr, idx, agg_bufs);
1907 cpr->sw_stats->rx.rx_oom_discards += 1;
1908 return NULL;
1909 }
1910 } else {
1911 u8 *new_data;
1912 dma_addr_t new_mapping;
1913
1914 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
1915 GFP_ATOMIC);
1916 if (!new_data) {
1917 bnxt_abort_tpa(cpr, idx, agg_bufs);
1918 cpr->sw_stats->rx.rx_oom_discards += 1;
1919 return NULL;
1920 }
1921
1922 tpa_info->data = new_data;
1923 tpa_info->data_ptr = new_data + bp->rx_offset;
1924 tpa_info->mapping = new_mapping;
1925
1926 skb = napi_build_skb(data, bp->rx_buf_size);
1927 dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
1928 bp->rx_buf_use_size, bp->rx_dir);
1929
1930 if (!skb) {
1931 page_pool_free_va(rxr->head_pool, data, true);
1932 bnxt_abort_tpa(cpr, idx, agg_bufs);
1933 cpr->sw_stats->rx.rx_oom_discards += 1;
1934 return NULL;
1935 }
1936 skb_mark_for_recycle(skb);
1937 skb_reserve(skb, bp->rx_offset);
1938 skb_put(skb, len);
1939 }
1940
1941 if (agg_bufs) {
1942 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
1943 true);
1944 if (!skb) {
1945 /* Page reuse already handled by bnxt_rx_pages(). */
1946 cpr->sw_stats->rx.rx_oom_discards += 1;
1947 return NULL;
1948 }
1949 }
1950
1951 if (tpa_info->cfa_code_valid)
1952 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1953 skb->protocol = eth_type_trans(skb, dev);
1954
1955 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1956 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1957
1958 if (tpa_info->vlan_valid &&
1959 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1960 __be16 vlan_proto = htons(tpa_info->metadata >>
1961 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1962 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1963
1964 if (eth_type_vlan(vlan_proto)) {
1965 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1966 } else {
1967 dev_kfree_skb(skb);
1968 return NULL;
1969 }
1970 }
1971
1972 skb_checksum_none_assert(skb);
1973 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1974 skb->ip_summed = CHECKSUM_UNNECESSARY;
1975 skb->csum_level =
1976 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1977 }
1978
1979 if (gro)
1980 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1981
1982 return skb;
1983 }
1984
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1985 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1986 struct rx_agg_cmp *rx_agg)
1987 {
1988 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1989 struct bnxt_tpa_info *tpa_info;
1990
1991 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1992 tpa_info = &rxr->rx_tpa[agg_id];
1993 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1994 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1995 }
1996
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1997 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1998 struct sk_buff *skb)
1999 {
2000 skb_mark_for_recycle(skb);
2001
2002 if (skb->dev != bp->dev) {
2003 /* this packet belongs to a vf-rep */
2004 bnxt_vf_rep_rx(bp, skb);
2005 return;
2006 }
2007 skb_record_rx_queue(skb, bnapi->index);
2008 napi_gro_receive(&bnapi->napi, skb);
2009 }
2010
bnxt_rx_ts_valid(struct bnxt * bp,u32 flags,struct rx_cmp_ext * rxcmp1,u32 * cmpl_ts)2011 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
2012 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
2013 {
2014 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2015
2016 if (BNXT_PTP_RX_TS_VALID(flags))
2017 goto ts_valid;
2018 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
2019 return false;
2020
2021 ts_valid:
2022 *cmpl_ts = ts;
2023 return true;
2024 }
2025
bnxt_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)2026 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
2027 struct rx_cmp *rxcmp,
2028 struct rx_cmp_ext *rxcmp1)
2029 {
2030 __be16 vlan_proto;
2031 u16 vtag;
2032
2033 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2034 __le32 flags2 = rxcmp1->rx_cmp_flags2;
2035 u32 meta_data;
2036
2037 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
2038 return skb;
2039
2040 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2041 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2042 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
2043 if (eth_type_vlan(vlan_proto))
2044 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2045 else
2046 goto vlan_err;
2047 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2048 if (RX_CMP_VLAN_VALID(rxcmp)) {
2049 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
2050
2051 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
2052 vlan_proto = htons(ETH_P_8021Q);
2053 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
2054 vlan_proto = htons(ETH_P_8021AD);
2055 else
2056 goto vlan_err;
2057 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
2058 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2059 }
2060 }
2061 return skb;
2062 vlan_err:
2063 skb_mark_for_recycle(skb);
2064 dev_kfree_skb(skb);
2065 return NULL;
2066 }
2067
bnxt_rss_ext_op(struct bnxt * bp,struct rx_cmp * rxcmp)2068 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
2069 struct rx_cmp *rxcmp)
2070 {
2071 u8 ext_op;
2072
2073 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
2074 switch (ext_op) {
2075 case EXT_OP_INNER_4:
2076 case EXT_OP_OUTER_4:
2077 case EXT_OP_INNFL_3:
2078 case EXT_OP_OUTFL_3:
2079 return PKT_HASH_TYPE_L4;
2080 default:
2081 return PKT_HASH_TYPE_L3;
2082 }
2083 }
2084
2085 /* returns the following:
2086 * 1 - 1 packet successfully received
2087 * 0 - successful TPA_START, packet not completed yet
2088 * -EBUSY - completion ring does not have all the agg buffers yet
2089 * -ENOMEM - packet aborted due to out of memory
2090 * -EIO - packet aborted due to hw error indicated in BD
2091 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2092 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2093 u32 *raw_cons, u8 *event)
2094 {
2095 struct bnxt_napi *bnapi = cpr->bnapi;
2096 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2097 struct net_device *dev = bp->dev;
2098 struct rx_cmp *rxcmp;
2099 struct rx_cmp_ext *rxcmp1;
2100 u32 tmp_raw_cons = *raw_cons;
2101 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
2102 struct skb_shared_info *sinfo;
2103 struct bnxt_sw_rx_bd *rx_buf;
2104 unsigned int len;
2105 u8 *data_ptr, agg_bufs, cmp_type;
2106 bool xdp_active = false;
2107 dma_addr_t dma_addr;
2108 struct sk_buff *skb;
2109 struct xdp_buff xdp;
2110 u32 flags, misc;
2111 u32 cmpl_ts;
2112 void *data;
2113 int rc = 0;
2114
2115 rxcmp = (struct rx_cmp *)
2116 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2117
2118 cmp_type = RX_CMP_TYPE(rxcmp);
2119
2120 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
2121 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2122 goto next_rx_no_prod_no_len;
2123 }
2124
2125 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2126 cp_cons = RING_CMP(tmp_raw_cons);
2127 rxcmp1 = (struct rx_cmp_ext *)
2128 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2129
2130 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2131 return -EBUSY;
2132
2133 /* The valid test of the entry must be done first before
2134 * reading any further.
2135 */
2136 dma_rmb();
2137 prod = rxr->rx_prod;
2138
2139 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2140 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2141 bnxt_tpa_start(bp, rxr, cmp_type,
2142 (struct rx_tpa_start_cmp *)rxcmp,
2143 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2144
2145 *event |= BNXT_RX_EVENT;
2146 goto next_rx_no_prod_no_len;
2147
2148 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2149 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2150 (struct rx_tpa_end_cmp *)rxcmp,
2151 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2152
2153 if (IS_ERR(skb))
2154 return -EBUSY;
2155
2156 rc = -ENOMEM;
2157 if (likely(skb)) {
2158 bnxt_deliver_skb(bp, bnapi, skb);
2159 rc = 1;
2160 }
2161 *event |= BNXT_RX_EVENT;
2162 goto next_rx_no_prod_no_len;
2163 }
2164
2165 cons = rxcmp->rx_cmp_opaque;
2166 if (unlikely(cons != rxr->rx_next_cons)) {
2167 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2168
2169 /* 0xffff is forced error, don't print it */
2170 if (rxr->rx_next_cons != 0xffff)
2171 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2172 cons, rxr->rx_next_cons);
2173 bnxt_sched_reset_rxr(bp, rxr);
2174 if (rc1)
2175 return rc1;
2176 goto next_rx_no_prod_no_len;
2177 }
2178 rx_buf = &rxr->rx_buf_ring[cons];
2179 data = rx_buf->data;
2180 data_ptr = rx_buf->data_ptr;
2181 prefetch(data_ptr);
2182
2183 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2184 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2185
2186 if (agg_bufs) {
2187 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2188 return -EBUSY;
2189
2190 cp_cons = NEXT_CMP(cp_cons);
2191 *event |= BNXT_AGG_EVENT;
2192 }
2193 *event |= BNXT_RX_EVENT;
2194
2195 rx_buf->data = NULL;
2196 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2197 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2198
2199 bnxt_reuse_rx_data(rxr, cons, data);
2200 if (agg_bufs)
2201 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2202 false);
2203
2204 rc = -EIO;
2205 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2206 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
2207 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2208 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2209 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2210 rx_err);
2211 bnxt_sched_reset_rxr(bp, rxr);
2212 }
2213 }
2214 goto next_rx_no_len;
2215 }
2216
2217 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2218 len = flags >> RX_CMP_LEN_SHIFT;
2219 dma_addr = rx_buf->mapping;
2220
2221 if (bnxt_xdp_attached(bp, rxr)) {
2222 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2223 if (agg_bufs) {
2224 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
2225 cp_cons,
2226 agg_bufs,
2227 false);
2228 if (!frag_len)
2229 goto oom_next_rx;
2230
2231 }
2232 xdp_active = true;
2233 }
2234
2235 if (xdp_active) {
2236 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
2237 rc = 1;
2238 goto next_rx;
2239 }
2240 if (xdp_buff_has_frags(&xdp)) {
2241 sinfo = xdp_get_shared_info_from_buff(&xdp);
2242 agg_bufs = sinfo->nr_frags;
2243 } else {
2244 agg_bufs = 0;
2245 }
2246 }
2247
2248 if (len <= bp->rx_copybreak) {
2249 if (!xdp_active)
2250 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2251 else
2252 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
2253 bnxt_reuse_rx_data(rxr, cons, data);
2254 if (!skb) {
2255 if (agg_bufs) {
2256 if (!xdp_active)
2257 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2258 agg_bufs, false);
2259 else
2260 bnxt_xdp_buff_frags_free(rxr, &xdp);
2261 }
2262 goto oom_next_rx;
2263 }
2264 } else {
2265 u32 payload;
2266
2267 if (rx_buf->data_ptr == data_ptr)
2268 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2269 else
2270 payload = 0;
2271 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2272 payload | len);
2273 if (!skb)
2274 goto oom_next_rx;
2275 }
2276
2277 if (agg_bufs) {
2278 if (!xdp_active) {
2279 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
2280 agg_bufs, false);
2281 if (!skb)
2282 goto oom_next_rx;
2283 } else {
2284 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
2285 rxr->page_pool, &xdp);
2286 if (!skb) {
2287 /* we should be able to free the old skb here */
2288 bnxt_xdp_buff_frags_free(rxr, &xdp);
2289 goto oom_next_rx;
2290 }
2291 }
2292 }
2293
2294 if (RX_CMP_HASH_VALID(rxcmp)) {
2295 enum pkt_hash_types type;
2296
2297 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2298 type = bnxt_rss_ext_op(bp, rxcmp);
2299 } else {
2300 u32 itypes = RX_CMP_ITYPES(rxcmp);
2301
2302 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
2303 itypes == RX_CMP_FLAGS_ITYPE_UDP)
2304 type = PKT_HASH_TYPE_L4;
2305 else
2306 type = PKT_HASH_TYPE_L3;
2307 }
2308 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2309 }
2310
2311 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2312 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2313 skb->protocol = eth_type_trans(skb, dev);
2314
2315 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2316 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2317 if (!skb)
2318 goto next_rx;
2319 }
2320
2321 skb_checksum_none_assert(skb);
2322 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2323 if (dev->features & NETIF_F_RXCSUM) {
2324 skb->ip_summed = CHECKSUM_UNNECESSARY;
2325 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2326 }
2327 } else {
2328 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2329 if (dev->features & NETIF_F_RXCSUM)
2330 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
2331 }
2332 }
2333
2334 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2335 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2336 u64 ns, ts;
2337
2338 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2339 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2340
2341 ns = bnxt_timecounter_cyc2time(ptp, ts);
2342 memset(skb_hwtstamps(skb), 0,
2343 sizeof(*skb_hwtstamps(skb)));
2344 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2345 }
2346 }
2347 }
2348 bnxt_deliver_skb(bp, bnapi, skb);
2349 rc = 1;
2350
2351 next_rx:
2352 cpr->rx_packets += 1;
2353 cpr->rx_bytes += len;
2354
2355 next_rx_no_len:
2356 rxr->rx_prod = NEXT_RX(prod);
2357 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2358
2359 next_rx_no_prod_no_len:
2360 *raw_cons = tmp_raw_cons;
2361
2362 return rc;
2363
2364 oom_next_rx:
2365 cpr->sw_stats->rx.rx_oom_discards += 1;
2366 rc = -ENOMEM;
2367 goto next_rx;
2368 }
2369
2370 /* In netpoll mode, if we are using a combined completion ring, we need to
2371 * discard the rx packets and recycle the buffers.
2372 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2373 static int bnxt_force_rx_discard(struct bnxt *bp,
2374 struct bnxt_cp_ring_info *cpr,
2375 u32 *raw_cons, u8 *event)
2376 {
2377 u32 tmp_raw_cons = *raw_cons;
2378 struct rx_cmp_ext *rxcmp1;
2379 struct rx_cmp *rxcmp;
2380 u16 cp_cons;
2381 u8 cmp_type;
2382 int rc;
2383
2384 cp_cons = RING_CMP(tmp_raw_cons);
2385 rxcmp = (struct rx_cmp *)
2386 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2387
2388 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2389 cp_cons = RING_CMP(tmp_raw_cons);
2390 rxcmp1 = (struct rx_cmp_ext *)
2391 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2392
2393 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2394 return -EBUSY;
2395
2396 /* The valid test of the entry must be done first before
2397 * reading any further.
2398 */
2399 dma_rmb();
2400 cmp_type = RX_CMP_TYPE(rxcmp);
2401 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2402 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2403 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2404 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2405 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2406 struct rx_tpa_end_cmp_ext *tpa_end1;
2407
2408 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2409 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2410 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2411 }
2412 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2413 if (rc && rc != -EBUSY)
2414 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2415 return rc;
2416 }
2417
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2418 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2419 {
2420 struct bnxt_fw_health *fw_health = bp->fw_health;
2421 u32 reg = fw_health->regs[reg_idx];
2422 u32 reg_type, reg_off, val = 0;
2423
2424 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2425 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2426 switch (reg_type) {
2427 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2428 pci_read_config_dword(bp->pdev, reg_off, &val);
2429 break;
2430 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2431 reg_off = fw_health->mapped_regs[reg_idx];
2432 fallthrough;
2433 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2434 val = readl(bp->bar0 + reg_off);
2435 break;
2436 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2437 val = readl(bp->bar1 + reg_off);
2438 break;
2439 }
2440 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2441 val &= fw_health->fw_reset_inprog_reg_mask;
2442 return val;
2443 }
2444
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2445 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2446 {
2447 int i;
2448
2449 for (i = 0; i < bp->rx_nr_rings; i++) {
2450 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2451 struct bnxt_ring_grp_info *grp_info;
2452
2453 grp_info = &bp->grp_info[grp_idx];
2454 if (grp_info->agg_fw_ring_id == ring_id)
2455 return grp_idx;
2456 }
2457 return INVALID_HW_RING_ID;
2458 }
2459
bnxt_get_force_speed(struct bnxt_link_info * link_info)2460 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2461 {
2462 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2463
2464 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2465 return link_info->force_link_speed2;
2466 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2467 return link_info->force_pam4_link_speed;
2468 return link_info->force_link_speed;
2469 }
2470
bnxt_set_force_speed(struct bnxt_link_info * link_info)2471 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2472 {
2473 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2474
2475 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2476 link_info->req_link_speed = link_info->force_link_speed2;
2477 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2478 switch (link_info->req_link_speed) {
2479 case BNXT_LINK_SPEED_50GB_PAM4:
2480 case BNXT_LINK_SPEED_100GB_PAM4:
2481 case BNXT_LINK_SPEED_200GB_PAM4:
2482 case BNXT_LINK_SPEED_400GB_PAM4:
2483 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2484 break;
2485 case BNXT_LINK_SPEED_100GB_PAM4_112:
2486 case BNXT_LINK_SPEED_200GB_PAM4_112:
2487 case BNXT_LINK_SPEED_400GB_PAM4_112:
2488 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2489 break;
2490 default:
2491 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2492 }
2493 return;
2494 }
2495 link_info->req_link_speed = link_info->force_link_speed;
2496 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2497 if (link_info->force_pam4_link_speed) {
2498 link_info->req_link_speed = link_info->force_pam4_link_speed;
2499 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2500 }
2501 }
2502
bnxt_set_auto_speed(struct bnxt_link_info * link_info)2503 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2504 {
2505 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2506
2507 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2508 link_info->advertising = link_info->auto_link_speeds2;
2509 return;
2510 }
2511 link_info->advertising = link_info->auto_link_speeds;
2512 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2513 }
2514
bnxt_force_speed_updated(struct bnxt_link_info * link_info)2515 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2516 {
2517 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2518
2519 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2520 if (link_info->req_link_speed != link_info->force_link_speed2)
2521 return true;
2522 return false;
2523 }
2524 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2525 link_info->req_link_speed != link_info->force_link_speed)
2526 return true;
2527 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2528 link_info->req_link_speed != link_info->force_pam4_link_speed)
2529 return true;
2530 return false;
2531 }
2532
bnxt_auto_speed_updated(struct bnxt_link_info * link_info)2533 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2534 {
2535 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2536
2537 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2538 if (link_info->advertising != link_info->auto_link_speeds2)
2539 return true;
2540 return false;
2541 }
2542 if (link_info->advertising != link_info->auto_link_speeds ||
2543 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2544 return true;
2545 return false;
2546 }
2547
bnxt_bs_trace_avail(struct bnxt * bp,u16 type)2548 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
2549 {
2550 u32 flags = bp->ctx->ctx_arr[type].flags;
2551
2552 return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
2553 ((flags & BNXT_CTX_MEM_FW_TRACE) ||
2554 (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
2555 }
2556
bnxt_bs_trace_init(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm)2557 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
2558 {
2559 u32 mem_size, pages, rem_bytes, magic_byte_offset;
2560 u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
2561 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
2562 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
2563 struct bnxt_bs_trace_info *bs_trace;
2564 int last_pg;
2565
2566 if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
2567 return;
2568
2569 mem_size = ctxm->max_entries * ctxm->entry_size;
2570 rem_bytes = mem_size % BNXT_PAGE_SIZE;
2571 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
2572
2573 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
2574 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
2575
2576 rmem = &ctx_pg[0].ring_mem;
2577 bs_trace = &bp->bs_trace[trace_type];
2578 bs_trace->ctx_type = ctxm->type;
2579 bs_trace->trace_type = trace_type;
2580 if (pages > MAX_CTX_PAGES) {
2581 int last_pg_dir = rmem->nr_pages - 1;
2582
2583 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
2584 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
2585 } else {
2586 bs_trace->magic_byte = rmem->pg_arr[last_pg];
2587 }
2588 bs_trace->magic_byte += magic_byte_offset;
2589 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
2590 }
2591
2592 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
2593 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
2594 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
2595
2596 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
2597 (((data2) & \
2598 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
2599 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
2600
2601 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2602 ((data2) & \
2603 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2604
2605 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2606 (((data2) & \
2607 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2608 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2609
2610 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2611 ((data1) & \
2612 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2613
2614 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2615 (((data1) & \
2616 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2617 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2618
2619 /* Return true if the workqueue has to be scheduled */
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2620 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2621 {
2622 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2623
2624 switch (err_type) {
2625 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2626 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2627 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2628 break;
2629 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2630 netdev_warn(bp->dev, "Pause Storm detected!\n");
2631 break;
2632 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2633 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2634 break;
2635 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2636 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2637 char *threshold_type;
2638 bool notify = false;
2639 char *dir_str;
2640
2641 switch (type) {
2642 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2643 threshold_type = "warning";
2644 break;
2645 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2646 threshold_type = "critical";
2647 break;
2648 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2649 threshold_type = "fatal";
2650 break;
2651 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2652 threshold_type = "shutdown";
2653 break;
2654 default:
2655 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2656 return false;
2657 }
2658 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2659 dir_str = "above";
2660 notify = true;
2661 } else {
2662 dir_str = "below";
2663 }
2664 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2665 dir_str, threshold_type);
2666 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2667 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2668 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2669 if (notify) {
2670 bp->thermal_threshold_type = type;
2671 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2672 return true;
2673 }
2674 return false;
2675 }
2676 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
2677 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
2678 break;
2679 default:
2680 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2681 err_type);
2682 break;
2683 }
2684 return false;
2685 }
2686
2687 #define BNXT_GET_EVENT_PORT(data) \
2688 ((data) & \
2689 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2690
2691 #define BNXT_EVENT_RING_TYPE(data2) \
2692 ((data2) & \
2693 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2694
2695 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2696 (BNXT_EVENT_RING_TYPE(data2) == \
2697 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2698
2699 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2700 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2701 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2702
2703 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2704 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2705 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2706
2707 #define BNXT_PHC_BITS 48
2708
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2709 static int bnxt_async_event_process(struct bnxt *bp,
2710 struct hwrm_async_event_cmpl *cmpl)
2711 {
2712 u16 event_id = le16_to_cpu(cmpl->event_id);
2713 u32 data1 = le32_to_cpu(cmpl->event_data1);
2714 u32 data2 = le32_to_cpu(cmpl->event_data2);
2715
2716 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2717 event_id, data1, data2);
2718
2719 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2720 switch (event_id) {
2721 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2722 struct bnxt_link_info *link_info = &bp->link_info;
2723
2724 if (BNXT_VF(bp))
2725 goto async_event_process_exit;
2726
2727 /* print unsupported speed warning in forced speed mode only */
2728 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2729 (data1 & 0x20000)) {
2730 u16 fw_speed = bnxt_get_force_speed(link_info);
2731 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2732
2733 if (speed != SPEED_UNKNOWN)
2734 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2735 speed);
2736 }
2737 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2738 }
2739 fallthrough;
2740 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2741 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2742 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2743 fallthrough;
2744 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2745 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2746 break;
2747 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2748 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2749 break;
2750 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2751 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2752
2753 if (BNXT_VF(bp))
2754 break;
2755
2756 if (bp->pf.port_id != port_id)
2757 break;
2758
2759 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2760 break;
2761 }
2762 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2763 if (BNXT_PF(bp))
2764 goto async_event_process_exit;
2765 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2766 break;
2767 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2768 char *type_str = "Solicited";
2769
2770 if (!bp->fw_health)
2771 goto async_event_process_exit;
2772
2773 bp->fw_reset_timestamp = jiffies;
2774 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2775 if (!bp->fw_reset_min_dsecs)
2776 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2777 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2778 if (!bp->fw_reset_max_dsecs)
2779 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2780 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2781 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2782 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2783 type_str = "Fatal";
2784 bp->fw_health->fatalities++;
2785 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2786 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2787 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2788 type_str = "Non-fatal";
2789 bp->fw_health->survivals++;
2790 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2791 }
2792 netif_warn(bp, hw, bp->dev,
2793 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2794 type_str, data1, data2,
2795 bp->fw_reset_min_dsecs * 100,
2796 bp->fw_reset_max_dsecs * 100);
2797 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2798 break;
2799 }
2800 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2801 struct bnxt_fw_health *fw_health = bp->fw_health;
2802 char *status_desc = "healthy";
2803 u32 status;
2804
2805 if (!fw_health)
2806 goto async_event_process_exit;
2807
2808 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2809 fw_health->enabled = false;
2810 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2811 break;
2812 }
2813 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2814 fw_health->tmr_multiplier =
2815 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2816 bp->current_interval * 10);
2817 fw_health->tmr_counter = fw_health->tmr_multiplier;
2818 if (!fw_health->enabled)
2819 fw_health->last_fw_heartbeat =
2820 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2821 fw_health->last_fw_reset_cnt =
2822 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2823 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2824 if (status != BNXT_FW_STATUS_HEALTHY)
2825 status_desc = "unhealthy";
2826 netif_info(bp, drv, bp->dev,
2827 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2828 fw_health->primary ? "primary" : "backup", status,
2829 status_desc, fw_health->last_fw_reset_cnt);
2830 if (!fw_health->enabled) {
2831 /* Make sure tmr_counter is set and visible to
2832 * bnxt_health_check() before setting enabled to true.
2833 */
2834 smp_wmb();
2835 fw_health->enabled = true;
2836 }
2837 goto async_event_process_exit;
2838 }
2839 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2840 netif_notice(bp, hw, bp->dev,
2841 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2842 data1, data2);
2843 goto async_event_process_exit;
2844 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2845 struct bnxt_rx_ring_info *rxr;
2846 u16 grp_idx;
2847
2848 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2849 goto async_event_process_exit;
2850
2851 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2852 BNXT_EVENT_RING_TYPE(data2), data1);
2853 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2854 goto async_event_process_exit;
2855
2856 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2857 if (grp_idx == INVALID_HW_RING_ID) {
2858 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2859 data1);
2860 goto async_event_process_exit;
2861 }
2862 rxr = bp->bnapi[grp_idx]->rx_ring;
2863 bnxt_sched_reset_rxr(bp, rxr);
2864 goto async_event_process_exit;
2865 }
2866 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2867 struct bnxt_fw_health *fw_health = bp->fw_health;
2868
2869 netif_notice(bp, hw, bp->dev,
2870 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2871 data1, data2);
2872 if (fw_health) {
2873 fw_health->echo_req_data1 = data1;
2874 fw_health->echo_req_data2 = data2;
2875 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2876 break;
2877 }
2878 goto async_event_process_exit;
2879 }
2880 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2881 bnxt_ptp_pps_event(bp, data1, data2);
2882 goto async_event_process_exit;
2883 }
2884 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2885 if (bnxt_event_error_report(bp, data1, data2))
2886 break;
2887 goto async_event_process_exit;
2888 }
2889 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2890 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2891 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2892 if (BNXT_PTP_USE_RTC(bp)) {
2893 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2894 unsigned long flags;
2895 u64 ns;
2896
2897 if (!ptp)
2898 goto async_event_process_exit;
2899
2900 bnxt_ptp_update_current_time(bp);
2901 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2902 BNXT_PHC_BITS) | ptp->current_time);
2903 write_seqlock_irqsave(&ptp->ptp_lock, flags);
2904 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2905 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
2906 }
2907 break;
2908 }
2909 goto async_event_process_exit;
2910 }
2911 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2912 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2913
2914 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2915 goto async_event_process_exit;
2916 }
2917 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
2918 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
2919 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
2920
2921 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
2922 goto async_event_process_exit;
2923 }
2924 default:
2925 goto async_event_process_exit;
2926 }
2927 __bnxt_queue_sp_work(bp);
2928 async_event_process_exit:
2929 bnxt_ulp_async_events(bp, cmpl);
2930 return 0;
2931 }
2932
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2933 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2934 {
2935 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2936 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2937 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2938 (struct hwrm_fwd_req_cmpl *)txcmp;
2939
2940 switch (cmpl_type) {
2941 case CMPL_BASE_TYPE_HWRM_DONE:
2942 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2943 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2944 break;
2945
2946 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2947 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2948
2949 if ((vf_id < bp->pf.first_vf_id) ||
2950 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2951 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2952 vf_id);
2953 return -EINVAL;
2954 }
2955
2956 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2957 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2958 break;
2959
2960 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2961 bnxt_async_event_process(bp,
2962 (struct hwrm_async_event_cmpl *)txcmp);
2963 break;
2964
2965 default:
2966 break;
2967 }
2968
2969 return 0;
2970 }
2971
bnxt_vnic_is_active(struct bnxt * bp)2972 static bool bnxt_vnic_is_active(struct bnxt *bp)
2973 {
2974 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2975
2976 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
2977 }
2978
bnxt_msix(int irq,void * dev_instance)2979 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2980 {
2981 struct bnxt_napi *bnapi = dev_instance;
2982 struct bnxt *bp = bnapi->bp;
2983 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2984 u32 cons = RING_CMP(cpr->cp_raw_cons);
2985
2986 cpr->event_ctr++;
2987 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2988 napi_schedule(&bnapi->napi);
2989 return IRQ_HANDLED;
2990 }
2991
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2992 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2993 {
2994 u32 raw_cons = cpr->cp_raw_cons;
2995 u16 cons = RING_CMP(raw_cons);
2996 struct tx_cmp *txcmp;
2997
2998 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2999
3000 return TX_CMP_VALID(txcmp, raw_cons);
3001 }
3002
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3003 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3004 int budget)
3005 {
3006 struct bnxt_napi *bnapi = cpr->bnapi;
3007 u32 raw_cons = cpr->cp_raw_cons;
3008 bool flush_xdp = false;
3009 u32 cons;
3010 int rx_pkts = 0;
3011 u8 event = 0;
3012 struct tx_cmp *txcmp;
3013
3014 cpr->has_more_work = 0;
3015 cpr->had_work_done = 1;
3016 while (1) {
3017 u8 cmp_type;
3018 int rc;
3019
3020 cons = RING_CMP(raw_cons);
3021 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3022
3023 if (!TX_CMP_VALID(txcmp, raw_cons))
3024 break;
3025
3026 /* The valid test of the entry must be done first before
3027 * reading any further.
3028 */
3029 dma_rmb();
3030 cmp_type = TX_CMP_TYPE(txcmp);
3031 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
3032 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
3033 u32 opaque = txcmp->tx_cmp_opaque;
3034 struct bnxt_tx_ring_info *txr;
3035 u16 tx_freed;
3036
3037 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
3038 event |= BNXT_TX_CMP_EVENT;
3039 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
3040 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
3041 else
3042 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
3043 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
3044 bp->tx_ring_mask;
3045 /* return full budget so NAPI will complete. */
3046 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
3047 rx_pkts = budget;
3048 raw_cons = NEXT_RAW_CMP(raw_cons);
3049 if (budget)
3050 cpr->has_more_work = 1;
3051 break;
3052 }
3053 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
3054 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
3055 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
3056 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
3057 if (likely(budget))
3058 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3059 else
3060 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
3061 &event);
3062 if (event & BNXT_REDIRECT_EVENT)
3063 flush_xdp = true;
3064 if (likely(rc >= 0))
3065 rx_pkts += rc;
3066 /* Increment rx_pkts when rc is -ENOMEM to count towards
3067 * the NAPI budget. Otherwise, we may potentially loop
3068 * here forever if we consistently cannot allocate
3069 * buffers.
3070 */
3071 else if (rc == -ENOMEM && budget)
3072 rx_pkts++;
3073 else if (rc == -EBUSY) /* partial completion */
3074 break;
3075 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
3076 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
3077 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
3078 bnxt_hwrm_handler(bp, txcmp);
3079 }
3080 raw_cons = NEXT_RAW_CMP(raw_cons);
3081
3082 if (rx_pkts && rx_pkts == budget) {
3083 cpr->has_more_work = 1;
3084 break;
3085 }
3086 }
3087
3088 if (flush_xdp) {
3089 xdp_do_flush();
3090 event &= ~BNXT_REDIRECT_EVENT;
3091 }
3092
3093 if (event & BNXT_TX_EVENT) {
3094 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
3095 u16 prod = txr->tx_prod;
3096
3097 /* Sync BD data before updating doorbell */
3098 wmb();
3099
3100 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
3101 event &= ~BNXT_TX_EVENT;
3102 }
3103
3104 cpr->cp_raw_cons = raw_cons;
3105 bnapi->events |= event;
3106 return rx_pkts;
3107 }
3108
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3109 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3110 int budget)
3111 {
3112 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
3113 bnapi->tx_int(bp, bnapi, budget);
3114
3115 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
3116 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3117
3118 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3119 bnapi->events &= ~BNXT_RX_EVENT;
3120 }
3121 if (bnapi->events & BNXT_AGG_EVENT) {
3122 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3123
3124 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3125 bnapi->events &= ~BNXT_AGG_EVENT;
3126 }
3127 }
3128
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)3129 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3130 int budget)
3131 {
3132 struct bnxt_napi *bnapi = cpr->bnapi;
3133 int rx_pkts;
3134
3135 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
3136
3137 /* ACK completion ring before freeing tx ring and producing new
3138 * buffers in rx/agg rings to prevent overflowing the completion
3139 * ring.
3140 */
3141 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3142
3143 __bnxt_poll_work_done(bp, bnapi, budget);
3144 return rx_pkts;
3145 }
3146
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)3147 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
3148 {
3149 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3150 struct bnxt *bp = bnapi->bp;
3151 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3152 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
3153 struct tx_cmp *txcmp;
3154 struct rx_cmp_ext *rxcmp1;
3155 u32 cp_cons, tmp_raw_cons;
3156 u32 raw_cons = cpr->cp_raw_cons;
3157 bool flush_xdp = false;
3158 u32 rx_pkts = 0;
3159 u8 event = 0;
3160
3161 while (1) {
3162 int rc;
3163
3164 cp_cons = RING_CMP(raw_cons);
3165 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3166
3167 if (!TX_CMP_VALID(txcmp, raw_cons))
3168 break;
3169
3170 /* The valid test of the entry must be done first before
3171 * reading any further.
3172 */
3173 dma_rmb();
3174 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
3175 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
3176 cp_cons = RING_CMP(tmp_raw_cons);
3177 rxcmp1 = (struct rx_cmp_ext *)
3178 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3179
3180 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
3181 break;
3182
3183 /* force an error to recycle the buffer */
3184 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3185 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3186
3187 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3188 if (likely(rc == -EIO) && budget)
3189 rx_pkts++;
3190 else if (rc == -EBUSY) /* partial completion */
3191 break;
3192 if (event & BNXT_REDIRECT_EVENT)
3193 flush_xdp = true;
3194 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3195 CMPL_BASE_TYPE_HWRM_DONE)) {
3196 bnxt_hwrm_handler(bp, txcmp);
3197 } else {
3198 netdev_err(bp->dev,
3199 "Invalid completion received on special ring\n");
3200 }
3201 raw_cons = NEXT_RAW_CMP(raw_cons);
3202
3203 if (rx_pkts == budget)
3204 break;
3205 }
3206
3207 cpr->cp_raw_cons = raw_cons;
3208 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3209 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3210
3211 if (event & BNXT_AGG_EVENT)
3212 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3213 if (flush_xdp)
3214 xdp_do_flush();
3215
3216 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3217 napi_complete_done(napi, rx_pkts);
3218 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3219 }
3220 return rx_pkts;
3221 }
3222
bnxt_poll(struct napi_struct * napi,int budget)3223 static int bnxt_poll(struct napi_struct *napi, int budget)
3224 {
3225 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3226 struct bnxt *bp = bnapi->bp;
3227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3228 int work_done = 0;
3229
3230 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3231 napi_complete(napi);
3232 return 0;
3233 }
3234 while (1) {
3235 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3236
3237 if (work_done >= budget) {
3238 if (!budget)
3239 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3240 break;
3241 }
3242
3243 if (!bnxt_has_work(bp, cpr)) {
3244 if (napi_complete_done(napi, work_done))
3245 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3246 break;
3247 }
3248 }
3249 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3250 struct dim_sample dim_sample = {};
3251
3252 dim_update_sample(cpr->event_ctr,
3253 cpr->rx_packets,
3254 cpr->rx_bytes,
3255 &dim_sample);
3256 net_dim(&cpr->dim, &dim_sample);
3257 }
3258 return work_done;
3259 }
3260
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)3261 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3262 {
3263 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3264 int i, work_done = 0;
3265
3266 for (i = 0; i < cpr->cp_ring_count; i++) {
3267 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3268
3269 if (cpr2->had_nqe_notify) {
3270 work_done += __bnxt_poll_work(bp, cpr2,
3271 budget - work_done);
3272 cpr->has_more_work |= cpr2->has_more_work;
3273 }
3274 }
3275 return work_done;
3276 }
3277
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)3278 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3279 u64 dbr_type, int budget)
3280 {
3281 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3282 int i;
3283
3284 for (i = 0; i < cpr->cp_ring_count; i++) {
3285 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3286 struct bnxt_db_info *db;
3287
3288 if (cpr2->had_work_done) {
3289 u32 tgl = 0;
3290
3291 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3292 cpr2->had_nqe_notify = 0;
3293 tgl = cpr2->toggle;
3294 }
3295 db = &cpr2->cp_db;
3296 bnxt_writeq(bp,
3297 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3298 DB_RING_IDX(db, cpr2->cp_raw_cons),
3299 db->doorbell);
3300 cpr2->had_work_done = 0;
3301 }
3302 }
3303 __bnxt_poll_work_done(bp, bnapi, budget);
3304 }
3305
bnxt_poll_p5(struct napi_struct * napi,int budget)3306 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3307 {
3308 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3309 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3310 struct bnxt_cp_ring_info *cpr_rx;
3311 u32 raw_cons = cpr->cp_raw_cons;
3312 struct bnxt *bp = bnapi->bp;
3313 struct nqe_cn *nqcmp;
3314 int work_done = 0;
3315 u32 cons;
3316
3317 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3318 napi_complete(napi);
3319 return 0;
3320 }
3321 if (cpr->has_more_work) {
3322 cpr->has_more_work = 0;
3323 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3324 }
3325 while (1) {
3326 u16 type;
3327
3328 cons = RING_CMP(raw_cons);
3329 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3330
3331 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3332 if (cpr->has_more_work)
3333 break;
3334
3335 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3336 budget);
3337 cpr->cp_raw_cons = raw_cons;
3338 if (napi_complete_done(napi, work_done))
3339 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3340 cpr->cp_raw_cons);
3341 goto poll_done;
3342 }
3343
3344 /* The valid test of the entry must be done first before
3345 * reading any further.
3346 */
3347 dma_rmb();
3348
3349 type = le16_to_cpu(nqcmp->type);
3350 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3351 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3352 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3353 struct bnxt_cp_ring_info *cpr2;
3354
3355 /* No more budget for RX work */
3356 if (budget && work_done >= budget &&
3357 cq_type == BNXT_NQ_HDL_TYPE_RX)
3358 break;
3359
3360 idx = BNXT_NQ_HDL_IDX(idx);
3361 cpr2 = &cpr->cp_ring_arr[idx];
3362 cpr2->had_nqe_notify = 1;
3363 cpr2->toggle = NQE_CN_TOGGLE(type);
3364 work_done += __bnxt_poll_work(bp, cpr2,
3365 budget - work_done);
3366 cpr->has_more_work |= cpr2->has_more_work;
3367 } else {
3368 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3369 }
3370 raw_cons = NEXT_RAW_CMP(raw_cons);
3371 }
3372 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3373 if (raw_cons != cpr->cp_raw_cons) {
3374 cpr->cp_raw_cons = raw_cons;
3375 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3376 }
3377 poll_done:
3378 cpr_rx = &cpr->cp_ring_arr[0];
3379 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3380 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
3381 struct dim_sample dim_sample = {};
3382
3383 dim_update_sample(cpr->event_ctr,
3384 cpr_rx->rx_packets,
3385 cpr_rx->rx_bytes,
3386 &dim_sample);
3387 net_dim(&cpr->dim, &dim_sample);
3388 }
3389 return work_done;
3390 }
3391
bnxt_free_one_tx_ring_skbs(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)3392 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
3393 struct bnxt_tx_ring_info *txr, int idx)
3394 {
3395 int i, max_idx;
3396 struct pci_dev *pdev = bp->pdev;
3397
3398 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3399
3400 for (i = 0; i < max_idx;) {
3401 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
3402 struct sk_buff *skb;
3403 int j, last;
3404
3405 if (idx < bp->tx_nr_rings_xdp &&
3406 tx_buf->action == XDP_REDIRECT) {
3407 dma_unmap_single(&pdev->dev,
3408 dma_unmap_addr(tx_buf, mapping),
3409 dma_unmap_len(tx_buf, len),
3410 DMA_TO_DEVICE);
3411 xdp_return_frame(tx_buf->xdpf);
3412 tx_buf->action = 0;
3413 tx_buf->xdpf = NULL;
3414 i++;
3415 continue;
3416 }
3417
3418 skb = tx_buf->skb;
3419 if (!skb) {
3420 i++;
3421 continue;
3422 }
3423
3424 tx_buf->skb = NULL;
3425
3426 if (tx_buf->is_push) {
3427 dev_kfree_skb(skb);
3428 i += 2;
3429 continue;
3430 }
3431
3432 dma_unmap_single(&pdev->dev,
3433 dma_unmap_addr(tx_buf, mapping),
3434 skb_headlen(skb),
3435 DMA_TO_DEVICE);
3436
3437 last = tx_buf->nr_frags;
3438 i += 2;
3439 for (j = 0; j < last; j++, i++) {
3440 int ring_idx = i & bp->tx_ring_mask;
3441 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
3442
3443 tx_buf = &txr->tx_buf_ring[ring_idx];
3444 netmem_dma_unmap_page_attrs(&pdev->dev,
3445 dma_unmap_addr(tx_buf,
3446 mapping),
3447 skb_frag_size(frag),
3448 DMA_TO_DEVICE, 0);
3449 }
3450 dev_kfree_skb(skb);
3451 }
3452 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
3453 }
3454
bnxt_free_tx_skbs(struct bnxt * bp)3455 static void bnxt_free_tx_skbs(struct bnxt *bp)
3456 {
3457 int i;
3458
3459 if (!bp->tx_ring)
3460 return;
3461
3462 for (i = 0; i < bp->tx_nr_rings; i++) {
3463 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3464
3465 if (!txr->tx_buf_ring)
3466 continue;
3467
3468 bnxt_free_one_tx_ring_skbs(bp, txr, i);
3469 }
3470
3471 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
3472 bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
3473 }
3474
bnxt_free_one_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3475 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3476 {
3477 int i, max_idx;
3478
3479 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3480
3481 for (i = 0; i < max_idx; i++) {
3482 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3483 void *data = rx_buf->data;
3484
3485 if (!data)
3486 continue;
3487
3488 rx_buf->data = NULL;
3489 if (BNXT_RX_PAGE_MODE(bp))
3490 page_pool_recycle_direct(rxr->page_pool, data);
3491 else
3492 page_pool_free_va(rxr->head_pool, data, true);
3493 }
3494 }
3495
bnxt_free_one_rx_agg_ring(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3496 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3497 {
3498 int i, max_idx;
3499
3500 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3501
3502 for (i = 0; i < max_idx; i++) {
3503 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3504 netmem_ref netmem = rx_agg_buf->netmem;
3505
3506 if (!netmem)
3507 continue;
3508
3509 rx_agg_buf->netmem = 0;
3510 __clear_bit(i, rxr->rx_agg_bmap);
3511
3512 page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
3513 }
3514 }
3515
bnxt_free_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3516 static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
3517 struct bnxt_rx_ring_info *rxr)
3518 {
3519 int i;
3520
3521 for (i = 0; i < bp->max_tpa; i++) {
3522 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3523 u8 *data = tpa_info->data;
3524
3525 if (!data)
3526 continue;
3527
3528 tpa_info->data = NULL;
3529 page_pool_free_va(rxr->head_pool, data, false);
3530 }
3531 }
3532
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3533 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
3534 struct bnxt_rx_ring_info *rxr)
3535 {
3536 struct bnxt_tpa_idx_map *map;
3537
3538 if (!rxr->rx_tpa)
3539 goto skip_rx_tpa_free;
3540
3541 bnxt_free_one_tpa_info_data(bp, rxr);
3542
3543 skip_rx_tpa_free:
3544 if (!rxr->rx_buf_ring)
3545 goto skip_rx_buf_free;
3546
3547 bnxt_free_one_rx_ring(bp, rxr);
3548
3549 skip_rx_buf_free:
3550 if (!rxr->rx_agg_ring)
3551 goto skip_rx_agg_free;
3552
3553 bnxt_free_one_rx_agg_ring(bp, rxr);
3554
3555 skip_rx_agg_free:
3556 map = rxr->rx_tpa_idx_map;
3557 if (map)
3558 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3559 }
3560
bnxt_free_rx_skbs(struct bnxt * bp)3561 static void bnxt_free_rx_skbs(struct bnxt *bp)
3562 {
3563 int i;
3564
3565 if (!bp->rx_ring)
3566 return;
3567
3568 for (i = 0; i < bp->rx_nr_rings; i++)
3569 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
3570 }
3571
bnxt_free_skbs(struct bnxt * bp)3572 static void bnxt_free_skbs(struct bnxt *bp)
3573 {
3574 bnxt_free_tx_skbs(bp);
3575 bnxt_free_rx_skbs(bp);
3576 }
3577
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)3578 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3579 {
3580 u8 init_val = ctxm->init_value;
3581 u16 offset = ctxm->init_offset;
3582 u8 *p2 = p;
3583 int i;
3584
3585 if (!init_val)
3586 return;
3587 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3588 memset(p, init_val, len);
3589 return;
3590 }
3591 for (i = 0; i < len; i += ctxm->entry_size)
3592 *(p2 + i + offset) = init_val;
3593 }
3594
__bnxt_copy_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem,void * buf,size_t offset,size_t head,size_t tail)3595 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
3596 void *buf, size_t offset, size_t head,
3597 size_t tail)
3598 {
3599 int i, head_page, start_idx, source_offset;
3600 size_t len, rem_len, total_len, max_bytes;
3601
3602 head_page = head / rmem->page_size;
3603 source_offset = head % rmem->page_size;
3604 total_len = (tail - head) & MAX_CTX_BYTES_MASK;
3605 if (!total_len)
3606 total_len = MAX_CTX_BYTES;
3607 start_idx = head_page % MAX_CTX_PAGES;
3608 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
3609 source_offset;
3610 total_len = min(total_len, max_bytes);
3611 rem_len = total_len;
3612
3613 for (i = start_idx; rem_len; i++, source_offset = 0) {
3614 len = min((size_t)(rmem->page_size - source_offset), rem_len);
3615 if (buf)
3616 memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
3617 len);
3618 offset += len;
3619 rem_len -= len;
3620 }
3621 return total_len;
3622 }
3623
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3624 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3625 {
3626 struct pci_dev *pdev = bp->pdev;
3627 int i;
3628
3629 if (!rmem->pg_arr)
3630 goto skip_pages;
3631
3632 for (i = 0; i < rmem->nr_pages; i++) {
3633 if (!rmem->pg_arr[i])
3634 continue;
3635
3636 dma_free_coherent(&pdev->dev, rmem->page_size,
3637 rmem->pg_arr[i], rmem->dma_arr[i]);
3638
3639 rmem->pg_arr[i] = NULL;
3640 }
3641 skip_pages:
3642 if (rmem->pg_tbl) {
3643 size_t pg_tbl_size = rmem->nr_pages * 8;
3644
3645 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3646 pg_tbl_size = rmem->page_size;
3647 dma_free_coherent(&pdev->dev, pg_tbl_size,
3648 rmem->pg_tbl, rmem->pg_tbl_map);
3649 rmem->pg_tbl = NULL;
3650 }
3651 if (rmem->vmem_size && *rmem->vmem) {
3652 vfree(*rmem->vmem);
3653 *rmem->vmem = NULL;
3654 }
3655 }
3656
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3657 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3658 {
3659 struct pci_dev *pdev = bp->pdev;
3660 u64 valid_bit = 0;
3661 int i;
3662
3663 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3664 valid_bit = PTU_PTE_VALID;
3665 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3666 size_t pg_tbl_size = rmem->nr_pages * 8;
3667
3668 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3669 pg_tbl_size = rmem->page_size;
3670 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3671 &rmem->pg_tbl_map,
3672 GFP_KERNEL);
3673 if (!rmem->pg_tbl)
3674 return -ENOMEM;
3675 }
3676
3677 for (i = 0; i < rmem->nr_pages; i++) {
3678 u64 extra_bits = valid_bit;
3679
3680 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3681 rmem->page_size,
3682 &rmem->dma_arr[i],
3683 GFP_KERNEL);
3684 if (!rmem->pg_arr[i])
3685 return -ENOMEM;
3686
3687 if (rmem->ctx_mem)
3688 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3689 rmem->page_size);
3690 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3691 if (i == rmem->nr_pages - 2 &&
3692 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3693 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3694 else if (i == rmem->nr_pages - 1 &&
3695 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3696 extra_bits |= PTU_PTE_LAST;
3697 rmem->pg_tbl[i] =
3698 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3699 }
3700 }
3701
3702 if (rmem->vmem_size) {
3703 *rmem->vmem = vzalloc(rmem->vmem_size);
3704 if (!(*rmem->vmem))
3705 return -ENOMEM;
3706 }
3707 return 0;
3708 }
3709
bnxt_free_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3710 static void bnxt_free_one_tpa_info(struct bnxt *bp,
3711 struct bnxt_rx_ring_info *rxr)
3712 {
3713 int i;
3714
3715 kfree(rxr->rx_tpa_idx_map);
3716 rxr->rx_tpa_idx_map = NULL;
3717 if (rxr->rx_tpa) {
3718 for (i = 0; i < bp->max_tpa; i++) {
3719 kfree(rxr->rx_tpa[i].agg_arr);
3720 rxr->rx_tpa[i].agg_arr = NULL;
3721 }
3722 }
3723 kfree(rxr->rx_tpa);
3724 rxr->rx_tpa = NULL;
3725 }
3726
bnxt_free_tpa_info(struct bnxt * bp)3727 static void bnxt_free_tpa_info(struct bnxt *bp)
3728 {
3729 int i;
3730
3731 for (i = 0; i < bp->rx_nr_rings; i++) {
3732 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3733
3734 bnxt_free_one_tpa_info(bp, rxr);
3735 }
3736 }
3737
bnxt_alloc_one_tpa_info(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3738 static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
3739 struct bnxt_rx_ring_info *rxr)
3740 {
3741 struct rx_agg_cmp *agg;
3742 int i;
3743
3744 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3745 GFP_KERNEL);
3746 if (!rxr->rx_tpa)
3747 return -ENOMEM;
3748
3749 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3750 return 0;
3751 for (i = 0; i < bp->max_tpa; i++) {
3752 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3753 if (!agg)
3754 return -ENOMEM;
3755 rxr->rx_tpa[i].agg_arr = agg;
3756 }
3757 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3758 GFP_KERNEL);
3759 if (!rxr->rx_tpa_idx_map)
3760 return -ENOMEM;
3761
3762 return 0;
3763 }
3764
bnxt_alloc_tpa_info(struct bnxt * bp)3765 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3766 {
3767 int i, rc;
3768
3769 bp->max_tpa = MAX_TPA;
3770 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3771 if (!bp->max_tpa_v2)
3772 return 0;
3773 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3774 }
3775
3776 for (i = 0; i < bp->rx_nr_rings; i++) {
3777 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3778
3779 rc = bnxt_alloc_one_tpa_info(bp, rxr);
3780 if (rc)
3781 return rc;
3782 }
3783 return 0;
3784 }
3785
bnxt_free_rx_rings(struct bnxt * bp)3786 static void bnxt_free_rx_rings(struct bnxt *bp)
3787 {
3788 int i;
3789
3790 if (!bp->rx_ring)
3791 return;
3792
3793 bnxt_free_tpa_info(bp);
3794 for (i = 0; i < bp->rx_nr_rings; i++) {
3795 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3796 struct bnxt_ring_struct *ring;
3797
3798 if (rxr->xdp_prog)
3799 bpf_prog_put(rxr->xdp_prog);
3800
3801 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3802 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3803
3804 page_pool_destroy(rxr->page_pool);
3805 page_pool_destroy(rxr->head_pool);
3806 rxr->page_pool = rxr->head_pool = NULL;
3807
3808 kfree(rxr->rx_agg_bmap);
3809 rxr->rx_agg_bmap = NULL;
3810
3811 ring = &rxr->rx_ring_struct;
3812 bnxt_free_ring(bp, &ring->ring_mem);
3813
3814 ring = &rxr->rx_agg_ring_struct;
3815 bnxt_free_ring(bp, &ring->ring_mem);
3816 }
3817 }
3818
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int numa_node)3819 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3820 struct bnxt_rx_ring_info *rxr,
3821 int numa_node)
3822 {
3823 const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
3824 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
3825 struct page_pool_params pp = { 0 };
3826 struct page_pool *pool;
3827
3828 pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
3829 if (BNXT_RX_PAGE_MODE(bp))
3830 pp.pool_size += bp->rx_ring_size / rx_size_fac;
3831 pp.nid = numa_node;
3832 pp.netdev = bp->dev;
3833 pp.dev = &bp->pdev->dev;
3834 pp.dma_dir = bp->rx_dir;
3835 pp.max_len = PAGE_SIZE;
3836 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
3837 PP_FLAG_ALLOW_UNREADABLE_NETMEM;
3838 pp.queue_idx = rxr->bnapi->index;
3839
3840 pool = page_pool_create(&pp);
3841 if (IS_ERR(pool))
3842 return PTR_ERR(pool);
3843 rxr->page_pool = pool;
3844
3845 rxr->need_head_pool = page_pool_is_unreadable(pool);
3846 if (bnxt_separate_head_pool(rxr)) {
3847 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
3848 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3849 pool = page_pool_create(&pp);
3850 if (IS_ERR(pool))
3851 goto err_destroy_pp;
3852 } else {
3853 page_pool_get(pool);
3854 }
3855 rxr->head_pool = pool;
3856
3857 return 0;
3858
3859 err_destroy_pp:
3860 page_pool_destroy(rxr->page_pool);
3861 rxr->page_pool = NULL;
3862 return PTR_ERR(pool);
3863 }
3864
bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info * rxr)3865 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
3866 {
3867 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
3868 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
3869 }
3870
bnxt_alloc_rx_agg_bmap(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3871 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
3872 {
3873 u16 mem_size;
3874
3875 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3876 mem_size = rxr->rx_agg_bmap_size / 8;
3877 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3878 if (!rxr->rx_agg_bmap)
3879 return -ENOMEM;
3880
3881 return 0;
3882 }
3883
bnxt_alloc_rx_rings(struct bnxt * bp)3884 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3885 {
3886 int numa_node = dev_to_node(&bp->pdev->dev);
3887 int i, rc = 0, agg_rings = 0, cpu;
3888
3889 if (!bp->rx_ring)
3890 return -ENOMEM;
3891
3892 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3893 agg_rings = 1;
3894
3895 for (i = 0; i < bp->rx_nr_rings; i++) {
3896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3897 struct bnxt_ring_struct *ring;
3898 int cpu_node;
3899
3900 ring = &rxr->rx_ring_struct;
3901
3902 cpu = cpumask_local_spread(i, numa_node);
3903 cpu_node = cpu_to_node(cpu);
3904 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
3905 i, cpu_node);
3906 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
3907 if (rc)
3908 return rc;
3909 bnxt_enable_rx_page_pool(rxr);
3910
3911 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3912 if (rc < 0)
3913 return rc;
3914
3915 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3916 MEM_TYPE_PAGE_POOL,
3917 rxr->page_pool);
3918 if (rc) {
3919 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3920 return rc;
3921 }
3922
3923 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3924 if (rc)
3925 return rc;
3926
3927 ring->grp_idx = i;
3928 if (agg_rings) {
3929 ring = &rxr->rx_agg_ring_struct;
3930 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3931 if (rc)
3932 return rc;
3933
3934 ring->grp_idx = i;
3935 rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
3936 if (rc)
3937 return rc;
3938 }
3939 }
3940 if (bp->flags & BNXT_FLAG_TPA)
3941 rc = bnxt_alloc_tpa_info(bp);
3942 return rc;
3943 }
3944
bnxt_free_tx_rings(struct bnxt * bp)3945 static void bnxt_free_tx_rings(struct bnxt *bp)
3946 {
3947 int i;
3948 struct pci_dev *pdev = bp->pdev;
3949
3950 if (!bp->tx_ring)
3951 return;
3952
3953 for (i = 0; i < bp->tx_nr_rings; i++) {
3954 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3955 struct bnxt_ring_struct *ring;
3956
3957 if (txr->tx_push) {
3958 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3959 txr->tx_push, txr->tx_push_mapping);
3960 txr->tx_push = NULL;
3961 }
3962
3963 ring = &txr->tx_ring_struct;
3964
3965 bnxt_free_ring(bp, &ring->ring_mem);
3966 }
3967 }
3968
3969 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3970 ((tc) * (bp)->tx_nr_rings_per_tc)
3971
3972 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3973 ((tx) % (bp)->tx_nr_rings_per_tc)
3974
3975 #define BNXT_RING_TO_TC(bp, tx) \
3976 ((tx) / (bp)->tx_nr_rings_per_tc)
3977
bnxt_alloc_tx_rings(struct bnxt * bp)3978 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3979 {
3980 int i, j, rc;
3981 struct pci_dev *pdev = bp->pdev;
3982
3983 bp->tx_push_size = 0;
3984 if (bp->tx_push_thresh) {
3985 int push_size;
3986
3987 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3988 bp->tx_push_thresh);
3989
3990 if (push_size > 256) {
3991 push_size = 0;
3992 bp->tx_push_thresh = 0;
3993 }
3994
3995 bp->tx_push_size = push_size;
3996 }
3997
3998 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3999 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4000 struct bnxt_ring_struct *ring;
4001 u8 qidx;
4002
4003 ring = &txr->tx_ring_struct;
4004
4005 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4006 if (rc)
4007 return rc;
4008
4009 ring->grp_idx = txr->bnapi->index;
4010 if (bp->tx_push_size) {
4011 dma_addr_t mapping;
4012
4013 /* One pre-allocated DMA buffer to backup
4014 * TX push operation
4015 */
4016 txr->tx_push = dma_alloc_coherent(&pdev->dev,
4017 bp->tx_push_size,
4018 &txr->tx_push_mapping,
4019 GFP_KERNEL);
4020
4021 if (!txr->tx_push)
4022 return -ENOMEM;
4023
4024 mapping = txr->tx_push_mapping +
4025 sizeof(struct tx_push_bd);
4026 txr->data_mapping = cpu_to_le64(mapping);
4027 }
4028 qidx = bp->tc_to_qidx[j];
4029 ring->queue_id = bp->q_info[qidx].queue_id;
4030 spin_lock_init(&txr->xdp_tx_lock);
4031 if (i < bp->tx_nr_rings_xdp)
4032 continue;
4033 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
4034 j++;
4035 }
4036 return 0;
4037 }
4038
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)4039 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
4040 {
4041 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4042
4043 kfree(cpr->cp_desc_ring);
4044 cpr->cp_desc_ring = NULL;
4045 ring->ring_mem.pg_arr = NULL;
4046 kfree(cpr->cp_desc_mapping);
4047 cpr->cp_desc_mapping = NULL;
4048 ring->ring_mem.dma_arr = NULL;
4049 }
4050
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)4051 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
4052 {
4053 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
4054 if (!cpr->cp_desc_ring)
4055 return -ENOMEM;
4056 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
4057 GFP_KERNEL);
4058 if (!cpr->cp_desc_mapping)
4059 return -ENOMEM;
4060 return 0;
4061 }
4062
bnxt_free_all_cp_arrays(struct bnxt * bp)4063 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
4064 {
4065 int i;
4066
4067 if (!bp->bnapi)
4068 return;
4069 for (i = 0; i < bp->cp_nr_rings; i++) {
4070 struct bnxt_napi *bnapi = bp->bnapi[i];
4071
4072 if (!bnapi)
4073 continue;
4074 bnxt_free_cp_arrays(&bnapi->cp_ring);
4075 }
4076 }
4077
bnxt_alloc_all_cp_arrays(struct bnxt * bp)4078 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
4079 {
4080 int i, n = bp->cp_nr_pages;
4081
4082 for (i = 0; i < bp->cp_nr_rings; i++) {
4083 struct bnxt_napi *bnapi = bp->bnapi[i];
4084 int rc;
4085
4086 if (!bnapi)
4087 continue;
4088 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
4089 if (rc)
4090 return rc;
4091 }
4092 return 0;
4093 }
4094
bnxt_free_cp_rings(struct bnxt * bp)4095 static void bnxt_free_cp_rings(struct bnxt *bp)
4096 {
4097 int i;
4098
4099 if (!bp->bnapi)
4100 return;
4101
4102 for (i = 0; i < bp->cp_nr_rings; i++) {
4103 struct bnxt_napi *bnapi = bp->bnapi[i];
4104 struct bnxt_cp_ring_info *cpr;
4105 struct bnxt_ring_struct *ring;
4106 int j;
4107
4108 if (!bnapi)
4109 continue;
4110
4111 cpr = &bnapi->cp_ring;
4112 ring = &cpr->cp_ring_struct;
4113
4114 bnxt_free_ring(bp, &ring->ring_mem);
4115
4116 if (!cpr->cp_ring_arr)
4117 continue;
4118
4119 for (j = 0; j < cpr->cp_ring_count; j++) {
4120 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4121
4122 ring = &cpr2->cp_ring_struct;
4123 bnxt_free_ring(bp, &ring->ring_mem);
4124 bnxt_free_cp_arrays(cpr2);
4125 }
4126 kfree(cpr->cp_ring_arr);
4127 cpr->cp_ring_arr = NULL;
4128 cpr->cp_ring_count = 0;
4129 }
4130 }
4131
bnxt_alloc_cp_sub_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)4132 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
4133 struct bnxt_cp_ring_info *cpr)
4134 {
4135 struct bnxt_ring_mem_info *rmem;
4136 struct bnxt_ring_struct *ring;
4137 int rc;
4138
4139 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
4140 if (rc) {
4141 bnxt_free_cp_arrays(cpr);
4142 return -ENOMEM;
4143 }
4144 ring = &cpr->cp_ring_struct;
4145 rmem = &ring->ring_mem;
4146 rmem->nr_pages = bp->cp_nr_pages;
4147 rmem->page_size = HW_CMPD_RING_SIZE;
4148 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4149 rmem->dma_arr = cpr->cp_desc_mapping;
4150 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
4151 rc = bnxt_alloc_ring(bp, rmem);
4152 if (rc) {
4153 bnxt_free_ring(bp, rmem);
4154 bnxt_free_cp_arrays(cpr);
4155 }
4156 return rc;
4157 }
4158
bnxt_alloc_cp_rings(struct bnxt * bp)4159 static int bnxt_alloc_cp_rings(struct bnxt *bp)
4160 {
4161 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
4162 int i, j, rc, ulp_msix;
4163 int tcs = bp->num_tc;
4164
4165 if (!tcs)
4166 tcs = 1;
4167 ulp_msix = bnxt_get_ulp_msix_num(bp);
4168 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4169 struct bnxt_napi *bnapi = bp->bnapi[i];
4170 struct bnxt_cp_ring_info *cpr, *cpr2;
4171 struct bnxt_ring_struct *ring;
4172 int cp_count = 0, k;
4173 int rx = 0, tx = 0;
4174
4175 if (!bnapi)
4176 continue;
4177
4178 cpr = &bnapi->cp_ring;
4179 cpr->bnapi = bnapi;
4180 ring = &cpr->cp_ring_struct;
4181
4182 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
4183 if (rc)
4184 return rc;
4185
4186 ring->map_idx = ulp_msix + i;
4187
4188 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4189 continue;
4190
4191 if (i < bp->rx_nr_rings) {
4192 cp_count++;
4193 rx = 1;
4194 }
4195 if (i < bp->tx_nr_rings_xdp) {
4196 cp_count++;
4197 tx = 1;
4198 } else if ((sh && i < bp->tx_nr_rings) ||
4199 (!sh && i >= bp->rx_nr_rings)) {
4200 cp_count += tcs;
4201 tx = 1;
4202 }
4203
4204 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
4205 GFP_KERNEL);
4206 if (!cpr->cp_ring_arr)
4207 return -ENOMEM;
4208 cpr->cp_ring_count = cp_count;
4209
4210 for (k = 0; k < cp_count; k++) {
4211 cpr2 = &cpr->cp_ring_arr[k];
4212 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
4213 if (rc)
4214 return rc;
4215 cpr2->bnapi = bnapi;
4216 cpr2->sw_stats = cpr->sw_stats;
4217 cpr2->cp_idx = k;
4218 if (!k && rx) {
4219 bp->rx_ring[i].rx_cpr = cpr2;
4220 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
4221 } else {
4222 int n, tc = k - rx;
4223
4224 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
4225 bp->tx_ring[n].tx_cpr = cpr2;
4226 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
4227 }
4228 }
4229 if (tx)
4230 j++;
4231 }
4232 return 0;
4233 }
4234
bnxt_init_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4235 static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4236 struct bnxt_rx_ring_info *rxr)
4237 {
4238 struct bnxt_ring_mem_info *rmem;
4239 struct bnxt_ring_struct *ring;
4240
4241 ring = &rxr->rx_ring_struct;
4242 rmem = &ring->ring_mem;
4243 rmem->nr_pages = bp->rx_nr_pages;
4244 rmem->page_size = HW_RXBD_RING_SIZE;
4245 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4246 rmem->dma_arr = rxr->rx_desc_mapping;
4247 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4248 rmem->vmem = (void **)&rxr->rx_buf_ring;
4249
4250 ring = &rxr->rx_agg_ring_struct;
4251 rmem = &ring->ring_mem;
4252 rmem->nr_pages = bp->rx_agg_nr_pages;
4253 rmem->page_size = HW_RXBD_RING_SIZE;
4254 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4255 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4256 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4257 rmem->vmem = (void **)&rxr->rx_agg_ring;
4258 }
4259
bnxt_reset_rx_ring_struct(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4260 static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4261 struct bnxt_rx_ring_info *rxr)
4262 {
4263 struct bnxt_ring_mem_info *rmem;
4264 struct bnxt_ring_struct *ring;
4265 int i;
4266
4267 rxr->page_pool->p.napi = NULL;
4268 rxr->page_pool = NULL;
4269 rxr->head_pool->p.napi = NULL;
4270 rxr->head_pool = NULL;
4271 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
4272
4273 ring = &rxr->rx_ring_struct;
4274 rmem = &ring->ring_mem;
4275 rmem->pg_tbl = NULL;
4276 rmem->pg_tbl_map = 0;
4277 for (i = 0; i < rmem->nr_pages; i++) {
4278 rmem->pg_arr[i] = NULL;
4279 rmem->dma_arr[i] = 0;
4280 }
4281 *rmem->vmem = NULL;
4282
4283 ring = &rxr->rx_agg_ring_struct;
4284 rmem = &ring->ring_mem;
4285 rmem->pg_tbl = NULL;
4286 rmem->pg_tbl_map = 0;
4287 for (i = 0; i < rmem->nr_pages; i++) {
4288 rmem->pg_arr[i] = NULL;
4289 rmem->dma_arr[i] = 0;
4290 }
4291 *rmem->vmem = NULL;
4292 }
4293
bnxt_init_ring_struct(struct bnxt * bp)4294 static void bnxt_init_ring_struct(struct bnxt *bp)
4295 {
4296 int i, j;
4297
4298 for (i = 0; i < bp->cp_nr_rings; i++) {
4299 struct bnxt_napi *bnapi = bp->bnapi[i];
4300 struct bnxt_ring_mem_info *rmem;
4301 struct bnxt_cp_ring_info *cpr;
4302 struct bnxt_rx_ring_info *rxr;
4303 struct bnxt_tx_ring_info *txr;
4304 struct bnxt_ring_struct *ring;
4305
4306 if (!bnapi)
4307 continue;
4308
4309 cpr = &bnapi->cp_ring;
4310 ring = &cpr->cp_ring_struct;
4311 rmem = &ring->ring_mem;
4312 rmem->nr_pages = bp->cp_nr_pages;
4313 rmem->page_size = HW_CMPD_RING_SIZE;
4314 rmem->pg_arr = (void **)cpr->cp_desc_ring;
4315 rmem->dma_arr = cpr->cp_desc_mapping;
4316 rmem->vmem_size = 0;
4317
4318 rxr = bnapi->rx_ring;
4319 if (!rxr)
4320 goto skip_rx;
4321
4322 ring = &rxr->rx_ring_struct;
4323 rmem = &ring->ring_mem;
4324 rmem->nr_pages = bp->rx_nr_pages;
4325 rmem->page_size = HW_RXBD_RING_SIZE;
4326 rmem->pg_arr = (void **)rxr->rx_desc_ring;
4327 rmem->dma_arr = rxr->rx_desc_mapping;
4328 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4329 rmem->vmem = (void **)&rxr->rx_buf_ring;
4330
4331 ring = &rxr->rx_agg_ring_struct;
4332 rmem = &ring->ring_mem;
4333 rmem->nr_pages = bp->rx_agg_nr_pages;
4334 rmem->page_size = HW_RXBD_RING_SIZE;
4335 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4336 rmem->dma_arr = rxr->rx_agg_desc_mapping;
4337 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4338 rmem->vmem = (void **)&rxr->rx_agg_ring;
4339
4340 skip_rx:
4341 bnxt_for_each_napi_tx(j, bnapi, txr) {
4342 ring = &txr->tx_ring_struct;
4343 rmem = &ring->ring_mem;
4344 rmem->nr_pages = bp->tx_nr_pages;
4345 rmem->page_size = HW_TXBD_RING_SIZE;
4346 rmem->pg_arr = (void **)txr->tx_desc_ring;
4347 rmem->dma_arr = txr->tx_desc_mapping;
4348 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4349 rmem->vmem = (void **)&txr->tx_buf_ring;
4350 }
4351 }
4352 }
4353
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)4354 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4355 {
4356 int i;
4357 u32 prod;
4358 struct rx_bd **rx_buf_ring;
4359
4360 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4361 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4362 int j;
4363 struct rx_bd *rxbd;
4364
4365 rxbd = rx_buf_ring[i];
4366 if (!rxbd)
4367 continue;
4368
4369 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4370 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4371 rxbd->rx_bd_opaque = prod;
4372 }
4373 }
4374 }
4375
bnxt_alloc_one_rx_ring_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4376 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
4377 struct bnxt_rx_ring_info *rxr,
4378 int ring_nr)
4379 {
4380 u32 prod;
4381 int i;
4382
4383 prod = rxr->rx_prod;
4384 for (i = 0; i < bp->rx_ring_size; i++) {
4385 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4386 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
4387 ring_nr, i, bp->rx_ring_size);
4388 break;
4389 }
4390 prod = NEXT_RX(prod);
4391 }
4392 rxr->rx_prod = prod;
4393 }
4394
bnxt_alloc_one_rx_ring_netmem(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,int ring_nr)4395 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
4396 struct bnxt_rx_ring_info *rxr,
4397 int ring_nr)
4398 {
4399 u32 prod;
4400 int i;
4401
4402 prod = rxr->rx_agg_prod;
4403 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4404 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
4405 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
4406 ring_nr, i, bp->rx_agg_ring_size);
4407 break;
4408 }
4409 prod = NEXT_RX_AGG(prod);
4410 }
4411 rxr->rx_agg_prod = prod;
4412 }
4413
bnxt_alloc_one_tpa_info_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4414 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
4415 struct bnxt_rx_ring_info *rxr)
4416 {
4417 dma_addr_t mapping;
4418 u8 *data;
4419 int i;
4420
4421 for (i = 0; i < bp->max_tpa; i++) {
4422 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
4423 GFP_KERNEL);
4424 if (!data)
4425 return -ENOMEM;
4426
4427 rxr->rx_tpa[i].data = data;
4428 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4429 rxr->rx_tpa[i].mapping = mapping;
4430 }
4431
4432 return 0;
4433 }
4434
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)4435 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4436 {
4437 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4438 int rc;
4439
4440 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
4441
4442 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4443 return 0;
4444
4445 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
4446
4447 if (rxr->rx_tpa) {
4448 rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
4449 if (rc)
4450 return rc;
4451 }
4452 return 0;
4453 }
4454
bnxt_init_one_rx_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4455 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
4456 struct bnxt_rx_ring_info *rxr)
4457 {
4458 struct bnxt_ring_struct *ring;
4459 u32 type;
4460
4461 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4462 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4463
4464 if (NET_IP_ALIGN == 2)
4465 type |= RX_BD_FLAGS_SOP;
4466
4467 ring = &rxr->rx_ring_struct;
4468 bnxt_init_rxbd_pages(ring, type);
4469 ring->fw_ring_id = INVALID_HW_RING_ID;
4470 }
4471
bnxt_init_one_rx_agg_ring_rxbd(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4472 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
4473 struct bnxt_rx_ring_info *rxr)
4474 {
4475 struct bnxt_ring_struct *ring;
4476 u32 type;
4477
4478 ring = &rxr->rx_agg_ring_struct;
4479 ring->fw_ring_id = INVALID_HW_RING_ID;
4480 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4481 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4482 RX_BD_TYPE_RX_AGG_BD;
4483
4484 /* On P7, setting EOP will cause the chip to disable
4485 * Relaxed Ordering (RO) for TPA data. Disable EOP for
4486 * potentially higher performance with RO.
4487 */
4488 if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
4489 type |= RX_BD_FLAGS_AGG_EOP;
4490
4491 bnxt_init_rxbd_pages(ring, type);
4492 }
4493 }
4494
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)4495 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4496 {
4497 struct bnxt_rx_ring_info *rxr;
4498
4499 rxr = &bp->rx_ring[ring_nr];
4500 bnxt_init_one_rx_ring_rxbd(bp, rxr);
4501
4502 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4503 &rxr->bnapi->napi);
4504
4505 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4506 bpf_prog_add(bp->xdp_prog, 1);
4507 rxr->xdp_prog = bp->xdp_prog;
4508 }
4509
4510 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
4511
4512 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4513 }
4514
bnxt_init_cp_rings(struct bnxt * bp)4515 static void bnxt_init_cp_rings(struct bnxt *bp)
4516 {
4517 int i, j;
4518
4519 for (i = 0; i < bp->cp_nr_rings; i++) {
4520 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4521 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4522
4523 ring->fw_ring_id = INVALID_HW_RING_ID;
4524 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4525 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4526 if (!cpr->cp_ring_arr)
4527 continue;
4528 for (j = 0; j < cpr->cp_ring_count; j++) {
4529 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4530
4531 ring = &cpr2->cp_ring_struct;
4532 ring->fw_ring_id = INVALID_HW_RING_ID;
4533 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4534 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4535 }
4536 }
4537 }
4538
bnxt_init_rx_rings(struct bnxt * bp)4539 static int bnxt_init_rx_rings(struct bnxt *bp)
4540 {
4541 int i, rc = 0;
4542
4543 if (BNXT_RX_PAGE_MODE(bp)) {
4544 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4545 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4546 } else {
4547 bp->rx_offset = BNXT_RX_OFFSET;
4548 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4549 }
4550
4551 for (i = 0; i < bp->rx_nr_rings; i++) {
4552 rc = bnxt_init_one_rx_ring(bp, i);
4553 if (rc)
4554 break;
4555 }
4556
4557 return rc;
4558 }
4559
bnxt_init_tx_rings(struct bnxt * bp)4560 static int bnxt_init_tx_rings(struct bnxt *bp)
4561 {
4562 u16 i;
4563
4564 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4565 BNXT_MIN_TX_DESC_CNT);
4566
4567 for (i = 0; i < bp->tx_nr_rings; i++) {
4568 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4569 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4570
4571 ring->fw_ring_id = INVALID_HW_RING_ID;
4572
4573 if (i >= bp->tx_nr_rings_xdp)
4574 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4575 NETDEV_QUEUE_TYPE_TX,
4576 &txr->bnapi->napi);
4577 }
4578
4579 return 0;
4580 }
4581
bnxt_free_ring_grps(struct bnxt * bp)4582 static void bnxt_free_ring_grps(struct bnxt *bp)
4583 {
4584 kfree(bp->grp_info);
4585 bp->grp_info = NULL;
4586 }
4587
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)4588 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4589 {
4590 int i;
4591
4592 if (irq_re_init) {
4593 bp->grp_info = kcalloc(bp->cp_nr_rings,
4594 sizeof(struct bnxt_ring_grp_info),
4595 GFP_KERNEL);
4596 if (!bp->grp_info)
4597 return -ENOMEM;
4598 }
4599 for (i = 0; i < bp->cp_nr_rings; i++) {
4600 if (irq_re_init)
4601 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4602 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4603 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4604 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4605 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4606 }
4607 return 0;
4608 }
4609
bnxt_free_vnics(struct bnxt * bp)4610 static void bnxt_free_vnics(struct bnxt *bp)
4611 {
4612 kfree(bp->vnic_info);
4613 bp->vnic_info = NULL;
4614 bp->nr_vnics = 0;
4615 }
4616
bnxt_alloc_vnics(struct bnxt * bp)4617 static int bnxt_alloc_vnics(struct bnxt *bp)
4618 {
4619 int num_vnics = 1;
4620
4621 #ifdef CONFIG_RFS_ACCEL
4622 if (bp->flags & BNXT_FLAG_RFS) {
4623 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4624 num_vnics++;
4625 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4626 num_vnics += bp->rx_nr_rings;
4627 }
4628 #endif
4629
4630 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4631 num_vnics++;
4632
4633 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4634 GFP_KERNEL);
4635 if (!bp->vnic_info)
4636 return -ENOMEM;
4637
4638 bp->nr_vnics = num_vnics;
4639 return 0;
4640 }
4641
bnxt_init_vnics(struct bnxt * bp)4642 static void bnxt_init_vnics(struct bnxt *bp)
4643 {
4644 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4645 int i;
4646
4647 for (i = 0; i < bp->nr_vnics; i++) {
4648 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4649 int j;
4650
4651 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4652 vnic->vnic_id = i;
4653 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4654 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4655
4656 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4657
4658 if (bp->vnic_info[i].rss_hash_key) {
4659 if (i == BNXT_VNIC_DEFAULT) {
4660 u8 *key = (void *)vnic->rss_hash_key;
4661 int k;
4662
4663 if (!bp->rss_hash_key_valid &&
4664 !bp->rss_hash_key_updated) {
4665 get_random_bytes(bp->rss_hash_key,
4666 HW_HASH_KEY_SIZE);
4667 bp->rss_hash_key_updated = true;
4668 }
4669
4670 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4671 HW_HASH_KEY_SIZE);
4672
4673 if (!bp->rss_hash_key_updated)
4674 continue;
4675
4676 bp->rss_hash_key_updated = false;
4677 bp->rss_hash_key_valid = true;
4678
4679 bp->toeplitz_prefix = 0;
4680 for (k = 0; k < 8; k++) {
4681 bp->toeplitz_prefix <<= 8;
4682 bp->toeplitz_prefix |= key[k];
4683 }
4684 } else {
4685 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4686 HW_HASH_KEY_SIZE);
4687 }
4688 }
4689 }
4690 }
4691
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)4692 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4693 {
4694 int pages;
4695
4696 pages = ring_size / desc_per_pg;
4697
4698 if (!pages)
4699 return 1;
4700
4701 pages++;
4702
4703 while (pages & (pages - 1))
4704 pages++;
4705
4706 return pages;
4707 }
4708
bnxt_set_tpa_flags(struct bnxt * bp)4709 void bnxt_set_tpa_flags(struct bnxt *bp)
4710 {
4711 bp->flags &= ~BNXT_FLAG_TPA;
4712 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4713 return;
4714 if (bp->dev->features & NETIF_F_LRO)
4715 bp->flags |= BNXT_FLAG_LRO;
4716 else if (bp->dev->features & NETIF_F_GRO_HW)
4717 bp->flags |= BNXT_FLAG_GRO;
4718 }
4719
bnxt_init_ring_params(struct bnxt * bp)4720 static void bnxt_init_ring_params(struct bnxt *bp)
4721 {
4722 unsigned int rx_size;
4723
4724 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
4725 /* Try to fit 4 chunks into a 4k page */
4726 rx_size = SZ_1K -
4727 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4728 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
4729 }
4730
4731 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4732 * be set on entry.
4733 */
bnxt_set_ring_params(struct bnxt * bp)4734 void bnxt_set_ring_params(struct bnxt *bp)
4735 {
4736 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4737 u32 agg_factor = 0, agg_ring_size = 0;
4738
4739 /* 8 for CRC and VLAN */
4740 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4741
4742 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4743 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4744
4745 ring_size = bp->rx_ring_size;
4746 bp->rx_agg_ring_size = 0;
4747 bp->rx_agg_nr_pages = 0;
4748
4749 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
4750 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4751
4752 bp->flags &= ~BNXT_FLAG_JUMBO;
4753 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4754 u32 jumbo_factor;
4755
4756 bp->flags |= BNXT_FLAG_JUMBO;
4757 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4758 if (jumbo_factor > agg_factor)
4759 agg_factor = jumbo_factor;
4760 }
4761 if (agg_factor) {
4762 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4763 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4764 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4765 bp->rx_ring_size, ring_size);
4766 bp->rx_ring_size = ring_size;
4767 }
4768 agg_ring_size = ring_size * agg_factor;
4769
4770 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4771 RX_DESC_CNT);
4772 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4773 u32 tmp = agg_ring_size;
4774
4775 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4776 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4777 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4778 tmp, agg_ring_size);
4779 }
4780 bp->rx_agg_ring_size = agg_ring_size;
4781 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4782
4783 if (BNXT_RX_PAGE_MODE(bp)) {
4784 rx_space = PAGE_SIZE;
4785 rx_size = PAGE_SIZE -
4786 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4787 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4788 } else {
4789 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
4790 bp->rx_copybreak,
4791 bp->dev->cfg_pending->hds_thresh);
4792 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
4793 rx_space = rx_size + NET_SKB_PAD +
4794 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4795 }
4796 }
4797
4798 bp->rx_buf_use_size = rx_size;
4799 bp->rx_buf_size = rx_space;
4800
4801 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4802 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4803
4804 ring_size = bp->tx_ring_size;
4805 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4806 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4807
4808 max_rx_cmpl = bp->rx_ring_size;
4809 /* MAX TPA needs to be added because TPA_START completions are
4810 * immediately recycled, so the TPA completions are not bound by
4811 * the RX ring size.
4812 */
4813 if (bp->flags & BNXT_FLAG_TPA)
4814 max_rx_cmpl += bp->max_tpa;
4815 /* RX and TPA completions are 32-byte, all others are 16-byte */
4816 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4817 bp->cp_ring_size = ring_size;
4818
4819 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4820 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4821 bp->cp_nr_pages = MAX_CP_PAGES;
4822 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4823 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4824 ring_size, bp->cp_ring_size);
4825 }
4826 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4827 bp->cp_ring_mask = bp->cp_bit - 1;
4828 }
4829
4830 /* Changing allocation mode of RX rings.
4831 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4832 */
__bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4833 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4834 {
4835 struct net_device *dev = bp->dev;
4836
4837 if (page_mode) {
4838 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
4839 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4840
4841 if (bp->xdp_prog->aux->xdp_has_frags)
4842 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4843 else
4844 dev->max_mtu =
4845 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4846 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4847 bp->flags |= BNXT_FLAG_JUMBO;
4848 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4849 } else {
4850 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4851 bp->rx_skb_func = bnxt_rx_page_skb;
4852 }
4853 bp->rx_dir = DMA_BIDIRECTIONAL;
4854 } else {
4855 dev->max_mtu = bp->max_mtu;
4856 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4857 bp->rx_dir = DMA_FROM_DEVICE;
4858 bp->rx_skb_func = bnxt_rx_skb;
4859 }
4860 }
4861
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)4862 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4863 {
4864 __bnxt_set_rx_skb_mode(bp, page_mode);
4865
4866 if (!page_mode) {
4867 int rx, tx;
4868
4869 bnxt_get_max_rings(bp, &rx, &tx, true);
4870 if (rx > 1) {
4871 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
4872 bp->dev->hw_features |= NETIF_F_LRO;
4873 }
4874 }
4875
4876 /* Update LRO and GRO_HW availability */
4877 netdev_update_features(bp->dev);
4878 }
4879
bnxt_free_vnic_attributes(struct bnxt * bp)4880 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4881 {
4882 int i;
4883 struct bnxt_vnic_info *vnic;
4884 struct pci_dev *pdev = bp->pdev;
4885
4886 if (!bp->vnic_info)
4887 return;
4888
4889 for (i = 0; i < bp->nr_vnics; i++) {
4890 vnic = &bp->vnic_info[i];
4891
4892 kfree(vnic->fw_grp_ids);
4893 vnic->fw_grp_ids = NULL;
4894
4895 kfree(vnic->uc_list);
4896 vnic->uc_list = NULL;
4897
4898 if (vnic->mc_list) {
4899 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4900 vnic->mc_list, vnic->mc_list_mapping);
4901 vnic->mc_list = NULL;
4902 }
4903
4904 if (vnic->rss_table) {
4905 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4906 vnic->rss_table,
4907 vnic->rss_table_dma_addr);
4908 vnic->rss_table = NULL;
4909 }
4910
4911 vnic->rss_hash_key = NULL;
4912 vnic->flags = 0;
4913 }
4914 }
4915
bnxt_alloc_vnic_attributes(struct bnxt * bp)4916 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4917 {
4918 int i, rc = 0, size;
4919 struct bnxt_vnic_info *vnic;
4920 struct pci_dev *pdev = bp->pdev;
4921 int max_rings;
4922
4923 for (i = 0; i < bp->nr_vnics; i++) {
4924 vnic = &bp->vnic_info[i];
4925
4926 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4927 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4928
4929 if (mem_size > 0) {
4930 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4931 if (!vnic->uc_list) {
4932 rc = -ENOMEM;
4933 goto out;
4934 }
4935 }
4936 }
4937
4938 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4939 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4940 vnic->mc_list =
4941 dma_alloc_coherent(&pdev->dev,
4942 vnic->mc_list_size,
4943 &vnic->mc_list_mapping,
4944 GFP_KERNEL);
4945 if (!vnic->mc_list) {
4946 rc = -ENOMEM;
4947 goto out;
4948 }
4949 }
4950
4951 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4952 goto vnic_skip_grps;
4953
4954 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4955 max_rings = bp->rx_nr_rings;
4956 else
4957 max_rings = 1;
4958
4959 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4960 if (!vnic->fw_grp_ids) {
4961 rc = -ENOMEM;
4962 goto out;
4963 }
4964 vnic_skip_grps:
4965 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4966 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4967 continue;
4968
4969 /* Allocate rss table and hash key */
4970 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4971 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4972 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4973
4974 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4975 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4976 vnic->rss_table_size,
4977 &vnic->rss_table_dma_addr,
4978 GFP_KERNEL);
4979 if (!vnic->rss_table) {
4980 rc = -ENOMEM;
4981 goto out;
4982 }
4983
4984 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4985 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4986 }
4987 return 0;
4988
4989 out:
4990 return rc;
4991 }
4992
bnxt_free_hwrm_resources(struct bnxt * bp)4993 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4994 {
4995 struct bnxt_hwrm_wait_token *token;
4996
4997 dma_pool_destroy(bp->hwrm_dma_pool);
4998 bp->hwrm_dma_pool = NULL;
4999
5000 rcu_read_lock();
5001 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
5002 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
5003 rcu_read_unlock();
5004 }
5005
bnxt_alloc_hwrm_resources(struct bnxt * bp)5006 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
5007 {
5008 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
5009 BNXT_HWRM_DMA_SIZE,
5010 BNXT_HWRM_DMA_ALIGN, 0);
5011 if (!bp->hwrm_dma_pool)
5012 return -ENOMEM;
5013
5014 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
5015
5016 return 0;
5017 }
5018
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)5019 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
5020 {
5021 kfree(stats->hw_masks);
5022 stats->hw_masks = NULL;
5023 kfree(stats->sw_stats);
5024 stats->sw_stats = NULL;
5025 if (stats->hw_stats) {
5026 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
5027 stats->hw_stats_map);
5028 stats->hw_stats = NULL;
5029 }
5030 }
5031
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)5032 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
5033 bool alloc_masks)
5034 {
5035 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
5036 &stats->hw_stats_map, GFP_KERNEL);
5037 if (!stats->hw_stats)
5038 return -ENOMEM;
5039
5040 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
5041 if (!stats->sw_stats)
5042 goto stats_mem_err;
5043
5044 if (alloc_masks) {
5045 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
5046 if (!stats->hw_masks)
5047 goto stats_mem_err;
5048 }
5049 return 0;
5050
5051 stats_mem_err:
5052 bnxt_free_stats_mem(bp, stats);
5053 return -ENOMEM;
5054 }
5055
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)5056 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
5057 {
5058 int i;
5059
5060 for (i = 0; i < count; i++)
5061 mask_arr[i] = mask;
5062 }
5063
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)5064 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
5065 {
5066 int i;
5067
5068 for (i = 0; i < count; i++)
5069 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
5070 }
5071
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)5072 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
5073 struct bnxt_stats_mem *stats)
5074 {
5075 struct hwrm_func_qstats_ext_output *resp;
5076 struct hwrm_func_qstats_ext_input *req;
5077 __le64 *hw_masks;
5078 int rc;
5079
5080 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
5081 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5082 return -EOPNOTSUPP;
5083
5084 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
5085 if (rc)
5086 return rc;
5087
5088 req->fid = cpu_to_le16(0xffff);
5089 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5090
5091 resp = hwrm_req_hold(bp, req);
5092 rc = hwrm_req_send(bp, req);
5093 if (!rc) {
5094 hw_masks = &resp->rx_ucast_pkts;
5095 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
5096 }
5097 hwrm_req_drop(bp, req);
5098 return rc;
5099 }
5100
5101 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
5102 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
5103
bnxt_init_stats(struct bnxt * bp)5104 static void bnxt_init_stats(struct bnxt *bp)
5105 {
5106 struct bnxt_napi *bnapi = bp->bnapi[0];
5107 struct bnxt_cp_ring_info *cpr;
5108 struct bnxt_stats_mem *stats;
5109 __le64 *rx_stats, *tx_stats;
5110 int rc, rx_count, tx_count;
5111 u64 *rx_masks, *tx_masks;
5112 u64 mask;
5113 u8 flags;
5114
5115 cpr = &bnapi->cp_ring;
5116 stats = &cpr->stats;
5117 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
5118 if (rc) {
5119 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5120 mask = (1ULL << 48) - 1;
5121 else
5122 mask = -1ULL;
5123 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
5124 }
5125 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5126 stats = &bp->port_stats;
5127 rx_stats = stats->hw_stats;
5128 rx_masks = stats->hw_masks;
5129 rx_count = sizeof(struct rx_port_stats) / 8;
5130 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5131 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5132 tx_count = sizeof(struct tx_port_stats) / 8;
5133
5134 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
5135 rc = bnxt_hwrm_port_qstats(bp, flags);
5136 if (rc) {
5137 mask = (1ULL << 40) - 1;
5138
5139 bnxt_fill_masks(rx_masks, mask, rx_count);
5140 bnxt_fill_masks(tx_masks, mask, tx_count);
5141 } else {
5142 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5143 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
5144 bnxt_hwrm_port_qstats(bp, 0);
5145 }
5146 }
5147 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
5148 stats = &bp->rx_port_stats_ext;
5149 rx_stats = stats->hw_stats;
5150 rx_masks = stats->hw_masks;
5151 rx_count = sizeof(struct rx_port_stats_ext) / 8;
5152 stats = &bp->tx_port_stats_ext;
5153 tx_stats = stats->hw_stats;
5154 tx_masks = stats->hw_masks;
5155 tx_count = sizeof(struct tx_port_stats_ext) / 8;
5156
5157 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
5158 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
5159 if (rc) {
5160 mask = (1ULL << 40) - 1;
5161
5162 bnxt_fill_masks(rx_masks, mask, rx_count);
5163 if (tx_stats)
5164 bnxt_fill_masks(tx_masks, mask, tx_count);
5165 } else {
5166 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
5167 if (tx_stats)
5168 bnxt_copy_hw_masks(tx_masks, tx_stats,
5169 tx_count);
5170 bnxt_hwrm_port_qstats_ext(bp, 0);
5171 }
5172 }
5173 }
5174
bnxt_free_port_stats(struct bnxt * bp)5175 static void bnxt_free_port_stats(struct bnxt *bp)
5176 {
5177 bp->flags &= ~BNXT_FLAG_PORT_STATS;
5178 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
5179
5180 bnxt_free_stats_mem(bp, &bp->port_stats);
5181 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
5182 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
5183 }
5184
bnxt_free_ring_stats(struct bnxt * bp)5185 static void bnxt_free_ring_stats(struct bnxt *bp)
5186 {
5187 int i;
5188
5189 if (!bp->bnapi)
5190 return;
5191
5192 for (i = 0; i < bp->cp_nr_rings; i++) {
5193 struct bnxt_napi *bnapi = bp->bnapi[i];
5194 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5195
5196 bnxt_free_stats_mem(bp, &cpr->stats);
5197
5198 kfree(cpr->sw_stats);
5199 cpr->sw_stats = NULL;
5200 }
5201 }
5202
bnxt_alloc_stats(struct bnxt * bp)5203 static int bnxt_alloc_stats(struct bnxt *bp)
5204 {
5205 u32 size, i;
5206 int rc;
5207
5208 size = bp->hw_ring_stats_size;
5209
5210 for (i = 0; i < bp->cp_nr_rings; i++) {
5211 struct bnxt_napi *bnapi = bp->bnapi[i];
5212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5213
5214 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
5215 if (!cpr->sw_stats)
5216 return -ENOMEM;
5217
5218 cpr->stats.len = size;
5219 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
5220 if (rc)
5221 return rc;
5222
5223 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5224 }
5225
5226 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
5227 return 0;
5228
5229 if (bp->port_stats.hw_stats)
5230 goto alloc_ext_stats;
5231
5232 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
5233 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
5234 if (rc)
5235 return rc;
5236
5237 bp->flags |= BNXT_FLAG_PORT_STATS;
5238
5239 alloc_ext_stats:
5240 /* Display extended statistics only if FW supports it */
5241 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
5242 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
5243 return 0;
5244
5245 if (bp->rx_port_stats_ext.hw_stats)
5246 goto alloc_tx_ext_stats;
5247
5248 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
5249 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
5250 /* Extended stats are optional */
5251 if (rc)
5252 return 0;
5253
5254 alloc_tx_ext_stats:
5255 if (bp->tx_port_stats_ext.hw_stats)
5256 return 0;
5257
5258 if (bp->hwrm_spec_code >= 0x10902 ||
5259 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
5260 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
5261 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
5262 /* Extended stats are optional */
5263 if (rc)
5264 return 0;
5265 }
5266 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
5267 return 0;
5268 }
5269
bnxt_clear_ring_indices(struct bnxt * bp)5270 static void bnxt_clear_ring_indices(struct bnxt *bp)
5271 {
5272 int i, j;
5273
5274 if (!bp->bnapi)
5275 return;
5276
5277 for (i = 0; i < bp->cp_nr_rings; i++) {
5278 struct bnxt_napi *bnapi = bp->bnapi[i];
5279 struct bnxt_cp_ring_info *cpr;
5280 struct bnxt_rx_ring_info *rxr;
5281 struct bnxt_tx_ring_info *txr;
5282
5283 if (!bnapi)
5284 continue;
5285
5286 cpr = &bnapi->cp_ring;
5287 cpr->cp_raw_cons = 0;
5288
5289 bnxt_for_each_napi_tx(j, bnapi, txr) {
5290 txr->tx_prod = 0;
5291 txr->tx_cons = 0;
5292 txr->tx_hw_cons = 0;
5293 }
5294
5295 rxr = bnapi->rx_ring;
5296 if (rxr) {
5297 rxr->rx_prod = 0;
5298 rxr->rx_agg_prod = 0;
5299 rxr->rx_sw_agg_prod = 0;
5300 rxr->rx_next_cons = 0;
5301 }
5302 bnapi->events = 0;
5303 }
5304 }
5305
bnxt_insert_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5306 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5307 {
5308 u8 type = fltr->type, flags = fltr->flags;
5309
5310 INIT_LIST_HEAD(&fltr->list);
5311 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
5312 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
5313 list_add_tail(&fltr->list, &bp->usr_fltr_list);
5314 }
5315
bnxt_del_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5316 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5317 {
5318 if (!list_empty(&fltr->list))
5319 list_del_init(&fltr->list);
5320 }
5321
bnxt_clear_usr_fltrs(struct bnxt * bp,bool all)5322 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
5323 {
5324 struct bnxt_filter_base *usr_fltr, *tmp;
5325
5326 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
5327 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
5328 continue;
5329 bnxt_del_one_usr_fltr(bp, usr_fltr);
5330 }
5331 }
5332
bnxt_del_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)5333 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
5334 {
5335 hlist_del(&fltr->hash);
5336 bnxt_del_one_usr_fltr(bp, fltr);
5337 if (fltr->flags) {
5338 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5339 bp->ntp_fltr_count--;
5340 }
5341 kfree(fltr);
5342 }
5343
bnxt_free_ntp_fltrs(struct bnxt * bp,bool all)5344 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
5345 {
5346 int i;
5347
5348 netdev_assert_locked_or_invisible(bp->dev);
5349
5350 /* Under netdev instance lock and all our NAPIs have been disabled.
5351 * It's safe to delete the hash table.
5352 */
5353 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5354 struct hlist_head *head;
5355 struct hlist_node *tmp;
5356 struct bnxt_ntuple_filter *fltr;
5357
5358 head = &bp->ntp_fltr_hash_tbl[i];
5359 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5360 bnxt_del_l2_filter(bp, fltr->l2_fltr);
5361 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5362 !list_empty(&fltr->base.list)))
5363 continue;
5364 bnxt_del_fltr(bp, &fltr->base);
5365 }
5366 }
5367 if (!all)
5368 return;
5369
5370 bitmap_free(bp->ntp_fltr_bmap);
5371 bp->ntp_fltr_bmap = NULL;
5372 bp->ntp_fltr_count = 0;
5373 }
5374
bnxt_alloc_ntp_fltrs(struct bnxt * bp)5375 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
5376 {
5377 int i, rc = 0;
5378
5379 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
5380 return 0;
5381
5382 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
5383 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
5384
5385 bp->ntp_fltr_count = 0;
5386 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
5387
5388 if (!bp->ntp_fltr_bmap)
5389 rc = -ENOMEM;
5390
5391 return rc;
5392 }
5393
bnxt_free_l2_filters(struct bnxt * bp,bool all)5394 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
5395 {
5396 int i;
5397
5398 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
5399 struct hlist_head *head;
5400 struct hlist_node *tmp;
5401 struct bnxt_l2_filter *fltr;
5402
5403 head = &bp->l2_fltr_hash_tbl[i];
5404 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
5405 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
5406 !list_empty(&fltr->base.list)))
5407 continue;
5408 bnxt_del_fltr(bp, &fltr->base);
5409 }
5410 }
5411 }
5412
bnxt_init_l2_fltr_tbl(struct bnxt * bp)5413 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
5414 {
5415 int i;
5416
5417 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
5418 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
5419 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
5420 }
5421
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)5422 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
5423 {
5424 bnxt_free_vnic_attributes(bp);
5425 bnxt_free_tx_rings(bp);
5426 bnxt_free_rx_rings(bp);
5427 bnxt_free_cp_rings(bp);
5428 bnxt_free_all_cp_arrays(bp);
5429 bnxt_free_ntp_fltrs(bp, false);
5430 bnxt_free_l2_filters(bp, false);
5431 if (irq_re_init) {
5432 bnxt_free_ring_stats(bp);
5433 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
5434 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
5435 bnxt_free_port_stats(bp);
5436 bnxt_free_ring_grps(bp);
5437 bnxt_free_vnics(bp);
5438 kfree(bp->tx_ring_map);
5439 bp->tx_ring_map = NULL;
5440 kfree(bp->tx_ring);
5441 bp->tx_ring = NULL;
5442 kfree(bp->rx_ring);
5443 bp->rx_ring = NULL;
5444 kfree(bp->bnapi);
5445 bp->bnapi = NULL;
5446 } else {
5447 bnxt_clear_ring_indices(bp);
5448 }
5449 }
5450
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)5451 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5452 {
5453 int i, j, rc, size, arr_size;
5454 void *bnapi;
5455
5456 if (irq_re_init) {
5457 /* Allocate bnapi mem pointer array and mem block for
5458 * all queues
5459 */
5460 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5461 bp->cp_nr_rings);
5462 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5463 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5464 if (!bnapi)
5465 return -ENOMEM;
5466
5467 bp->bnapi = bnapi;
5468 bnapi += arr_size;
5469 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5470 bp->bnapi[i] = bnapi;
5471 bp->bnapi[i]->index = i;
5472 bp->bnapi[i]->bp = bp;
5473 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5474 struct bnxt_cp_ring_info *cpr =
5475 &bp->bnapi[i]->cp_ring;
5476
5477 cpr->cp_ring_struct.ring_mem.flags =
5478 BNXT_RMEM_RING_PTE_FLAG;
5479 }
5480 }
5481
5482 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5483 sizeof(struct bnxt_rx_ring_info),
5484 GFP_KERNEL);
5485 if (!bp->rx_ring)
5486 return -ENOMEM;
5487
5488 for (i = 0; i < bp->rx_nr_rings; i++) {
5489 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5490
5491 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5492 rxr->rx_ring_struct.ring_mem.flags =
5493 BNXT_RMEM_RING_PTE_FLAG;
5494 rxr->rx_agg_ring_struct.ring_mem.flags =
5495 BNXT_RMEM_RING_PTE_FLAG;
5496 } else {
5497 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5498 }
5499 rxr->bnapi = bp->bnapi[i];
5500 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5501 }
5502
5503 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5504 sizeof(struct bnxt_tx_ring_info),
5505 GFP_KERNEL);
5506 if (!bp->tx_ring)
5507 return -ENOMEM;
5508
5509 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5510 GFP_KERNEL);
5511
5512 if (!bp->tx_ring_map)
5513 return -ENOMEM;
5514
5515 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5516 j = 0;
5517 else
5518 j = bp->rx_nr_rings;
5519
5520 for (i = 0; i < bp->tx_nr_rings; i++) {
5521 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5522 struct bnxt_napi *bnapi2;
5523
5524 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5525 txr->tx_ring_struct.ring_mem.flags =
5526 BNXT_RMEM_RING_PTE_FLAG;
5527 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5528 if (i >= bp->tx_nr_rings_xdp) {
5529 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5530
5531 bnapi2 = bp->bnapi[k];
5532 txr->txq_index = i - bp->tx_nr_rings_xdp;
5533 txr->tx_napi_idx =
5534 BNXT_RING_TO_TC(bp, txr->txq_index);
5535 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5536 bnapi2->tx_int = bnxt_tx_int;
5537 } else {
5538 bnapi2 = bp->bnapi[j];
5539 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5540 bnapi2->tx_ring[0] = txr;
5541 bnapi2->tx_int = bnxt_tx_int_xdp;
5542 j++;
5543 }
5544 txr->bnapi = bnapi2;
5545 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5546 txr->tx_cpr = &bnapi2->cp_ring;
5547 }
5548
5549 rc = bnxt_alloc_stats(bp);
5550 if (rc)
5551 goto alloc_mem_err;
5552 bnxt_init_stats(bp);
5553
5554 rc = bnxt_alloc_ntp_fltrs(bp);
5555 if (rc)
5556 goto alloc_mem_err;
5557
5558 rc = bnxt_alloc_vnics(bp);
5559 if (rc)
5560 goto alloc_mem_err;
5561 }
5562
5563 rc = bnxt_alloc_all_cp_arrays(bp);
5564 if (rc)
5565 goto alloc_mem_err;
5566
5567 bnxt_init_ring_struct(bp);
5568
5569 rc = bnxt_alloc_rx_rings(bp);
5570 if (rc)
5571 goto alloc_mem_err;
5572
5573 rc = bnxt_alloc_tx_rings(bp);
5574 if (rc)
5575 goto alloc_mem_err;
5576
5577 rc = bnxt_alloc_cp_rings(bp);
5578 if (rc)
5579 goto alloc_mem_err;
5580
5581 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5582 BNXT_VNIC_MCAST_FLAG |
5583 BNXT_VNIC_UCAST_FLAG;
5584 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5585 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5586 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5587
5588 rc = bnxt_alloc_vnic_attributes(bp);
5589 if (rc)
5590 goto alloc_mem_err;
5591 return 0;
5592
5593 alloc_mem_err:
5594 bnxt_free_mem(bp, true);
5595 return rc;
5596 }
5597
bnxt_disable_int(struct bnxt * bp)5598 static void bnxt_disable_int(struct bnxt *bp)
5599 {
5600 int i;
5601
5602 if (!bp->bnapi)
5603 return;
5604
5605 for (i = 0; i < bp->cp_nr_rings; i++) {
5606 struct bnxt_napi *bnapi = bp->bnapi[i];
5607 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5608 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5609
5610 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5611 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5612 }
5613 }
5614
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)5615 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5616 {
5617 struct bnxt_napi *bnapi = bp->bnapi[n];
5618 struct bnxt_cp_ring_info *cpr;
5619
5620 cpr = &bnapi->cp_ring;
5621 return cpr->cp_ring_struct.map_idx;
5622 }
5623
bnxt_disable_int_sync(struct bnxt * bp)5624 static void bnxt_disable_int_sync(struct bnxt *bp)
5625 {
5626 int i;
5627
5628 if (!bp->irq_tbl)
5629 return;
5630
5631 atomic_inc(&bp->intr_sem);
5632
5633 bnxt_disable_int(bp);
5634 for (i = 0; i < bp->cp_nr_rings; i++) {
5635 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5636
5637 synchronize_irq(bp->irq_tbl[map_idx].vector);
5638 }
5639 }
5640
bnxt_enable_int(struct bnxt * bp)5641 static void bnxt_enable_int(struct bnxt *bp)
5642 {
5643 int i;
5644
5645 atomic_set(&bp->intr_sem, 0);
5646 for (i = 0; i < bp->cp_nr_rings; i++) {
5647 struct bnxt_napi *bnapi = bp->bnapi[i];
5648 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5649
5650 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5651 }
5652 }
5653
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)5654 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5655 bool async_only)
5656 {
5657 DECLARE_BITMAP(async_events_bmap, 256);
5658 u32 *events = (u32 *)async_events_bmap;
5659 struct hwrm_func_drv_rgtr_output *resp;
5660 struct hwrm_func_drv_rgtr_input *req;
5661 u32 flags;
5662 int rc, i;
5663
5664 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5665 if (rc)
5666 return rc;
5667
5668 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5669 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5670 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5671
5672 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5673 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5674 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5675 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5676 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5677 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5678 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5679 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
5680 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
5681 req->flags = cpu_to_le32(flags);
5682 req->ver_maj_8b = DRV_VER_MAJ;
5683 req->ver_min_8b = DRV_VER_MIN;
5684 req->ver_upd_8b = DRV_VER_UPD;
5685 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5686 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5687 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5688
5689 if (BNXT_PF(bp)) {
5690 u32 data[8];
5691 int i;
5692
5693 memset(data, 0, sizeof(data));
5694 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5695 u16 cmd = bnxt_vf_req_snif[i];
5696 unsigned int bit, idx;
5697
5698 if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
5699 cmd == HWRM_PORT_PHY_QCFG)
5700 continue;
5701
5702 idx = cmd / 32;
5703 bit = cmd % 32;
5704 data[idx] |= 1 << bit;
5705 }
5706
5707 for (i = 0; i < 8; i++)
5708 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5709
5710 req->enables |=
5711 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5712 }
5713
5714 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5715 req->flags |= cpu_to_le32(
5716 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5717
5718 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5719 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5720 u16 event_id = bnxt_async_events_arr[i];
5721
5722 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5723 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5724 continue;
5725 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5726 !bp->ptp_cfg)
5727 continue;
5728 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5729 }
5730 if (bmap && bmap_size) {
5731 for (i = 0; i < bmap_size; i++) {
5732 if (test_bit(i, bmap))
5733 __set_bit(i, async_events_bmap);
5734 }
5735 }
5736 for (i = 0; i < 8; i++)
5737 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5738
5739 if (async_only)
5740 req->enables =
5741 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5742
5743 resp = hwrm_req_hold(bp, req);
5744 rc = hwrm_req_send(bp, req);
5745 if (!rc) {
5746 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5747 if (resp->flags &
5748 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5749 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5750 }
5751 hwrm_req_drop(bp, req);
5752 return rc;
5753 }
5754
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)5755 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5756 {
5757 struct hwrm_func_drv_unrgtr_input *req;
5758 int rc;
5759
5760 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5761 return 0;
5762
5763 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5764 if (rc)
5765 return rc;
5766 return hwrm_req_send(bp, req);
5767 }
5768
5769 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5770
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)5771 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5772 {
5773 struct hwrm_tunnel_dst_port_free_input *req;
5774 int rc;
5775
5776 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5777 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5778 return 0;
5779 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5780 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5781 return 0;
5782
5783 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5784 if (rc)
5785 return rc;
5786
5787 req->tunnel_type = tunnel_type;
5788
5789 switch (tunnel_type) {
5790 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5791 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5792 bp->vxlan_port = 0;
5793 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5794 break;
5795 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5796 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5797 bp->nge_port = 0;
5798 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5799 break;
5800 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5801 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5802 bp->vxlan_gpe_port = 0;
5803 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5804 break;
5805 default:
5806 break;
5807 }
5808
5809 rc = hwrm_req_send(bp, req);
5810 if (rc)
5811 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5812 rc);
5813 if (bp->flags & BNXT_FLAG_TPA)
5814 bnxt_set_tpa(bp, true);
5815 return rc;
5816 }
5817
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)5818 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5819 u8 tunnel_type)
5820 {
5821 struct hwrm_tunnel_dst_port_alloc_output *resp;
5822 struct hwrm_tunnel_dst_port_alloc_input *req;
5823 int rc;
5824
5825 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5826 if (rc)
5827 return rc;
5828
5829 req->tunnel_type = tunnel_type;
5830 req->tunnel_dst_port_val = port;
5831
5832 resp = hwrm_req_hold(bp, req);
5833 rc = hwrm_req_send(bp, req);
5834 if (rc) {
5835 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5836 rc);
5837 goto err_out;
5838 }
5839
5840 switch (tunnel_type) {
5841 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5842 bp->vxlan_port = port;
5843 bp->vxlan_fw_dst_port_id =
5844 le16_to_cpu(resp->tunnel_dst_port_id);
5845 break;
5846 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5847 bp->nge_port = port;
5848 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5849 break;
5850 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5851 bp->vxlan_gpe_port = port;
5852 bp->vxlan_gpe_fw_dst_port_id =
5853 le16_to_cpu(resp->tunnel_dst_port_id);
5854 break;
5855 default:
5856 break;
5857 }
5858 if (bp->flags & BNXT_FLAG_TPA)
5859 bnxt_set_tpa(bp, true);
5860
5861 err_out:
5862 hwrm_req_drop(bp, req);
5863 return rc;
5864 }
5865
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)5866 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5867 {
5868 struct hwrm_cfa_l2_set_rx_mask_input *req;
5869 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5870 int rc;
5871
5872 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5873 if (rc)
5874 return rc;
5875
5876 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5877 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5878 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5879 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5880 }
5881 req->mask = cpu_to_le32(vnic->rx_mask);
5882 return hwrm_req_send_silent(bp, req);
5883 }
5884
bnxt_del_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr)5885 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5886 {
5887 if (!atomic_dec_and_test(&fltr->refcnt))
5888 return;
5889 spin_lock_bh(&bp->ntp_fltr_lock);
5890 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5891 spin_unlock_bh(&bp->ntp_fltr_lock);
5892 return;
5893 }
5894 hlist_del_rcu(&fltr->base.hash);
5895 bnxt_del_one_usr_fltr(bp, &fltr->base);
5896 if (fltr->base.flags) {
5897 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5898 bp->ntp_fltr_count--;
5899 }
5900 spin_unlock_bh(&bp->ntp_fltr_lock);
5901 kfree_rcu(fltr, base.rcu);
5902 }
5903
__bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5904 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5905 struct bnxt_l2_key *key,
5906 u32 idx)
5907 {
5908 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5909 struct bnxt_l2_filter *fltr;
5910
5911 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5912 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5913
5914 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5915 l2_key->vlan == key->vlan)
5916 return fltr;
5917 }
5918 return NULL;
5919 }
5920
bnxt_lookup_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u32 idx)5921 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5922 struct bnxt_l2_key *key,
5923 u32 idx)
5924 {
5925 struct bnxt_l2_filter *fltr = NULL;
5926
5927 rcu_read_lock();
5928 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5929 if (fltr)
5930 atomic_inc(&fltr->refcnt);
5931 rcu_read_unlock();
5932 return fltr;
5933 }
5934
5935 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5936 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5937 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5938 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5939 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5940
5941 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5942 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5943 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5944 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5945 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5946
bnxt_get_rss_flow_tuple_len(struct bnxt * bp,struct flow_keys * fkeys)5947 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5948 {
5949 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5950 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5951 return sizeof(fkeys->addrs.v4addrs) +
5952 sizeof(fkeys->ports);
5953
5954 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5955 return sizeof(fkeys->addrs.v4addrs);
5956 }
5957
5958 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5959 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5960 return sizeof(fkeys->addrs.v6addrs) +
5961 sizeof(fkeys->ports);
5962
5963 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5964 return sizeof(fkeys->addrs.v6addrs);
5965 }
5966
5967 return 0;
5968 }
5969
bnxt_toeplitz(struct bnxt * bp,struct flow_keys * fkeys,const unsigned char * key)5970 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5971 const unsigned char *key)
5972 {
5973 u64 prefix = bp->toeplitz_prefix, hash = 0;
5974 struct bnxt_ipv4_tuple tuple4;
5975 struct bnxt_ipv6_tuple tuple6;
5976 int i, j, len = 0;
5977 u8 *four_tuple;
5978
5979 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5980 if (!len)
5981 return 0;
5982
5983 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5984 tuple4.v4addrs = fkeys->addrs.v4addrs;
5985 tuple4.ports = fkeys->ports;
5986 four_tuple = (unsigned char *)&tuple4;
5987 } else {
5988 tuple6.v6addrs = fkeys->addrs.v6addrs;
5989 tuple6.ports = fkeys->ports;
5990 four_tuple = (unsigned char *)&tuple6;
5991 }
5992
5993 for (i = 0, j = 8; i < len; i++, j++) {
5994 u8 byte = four_tuple[i];
5995 int bit;
5996
5997 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5998 if (byte & 0x80)
5999 hash ^= prefix;
6000 }
6001 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
6002 }
6003
6004 /* The valid part of the hash is in the upper 32 bits. */
6005 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
6006 }
6007
6008 #ifdef CONFIG_RFS_ACCEL
6009 static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt * bp,struct bnxt_l2_key * key)6010 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
6011 {
6012 struct bnxt_l2_filter *fltr;
6013 u32 idx;
6014
6015 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6016 BNXT_L2_FLTR_HASH_MASK;
6017 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6018 return fltr;
6019 }
6020 #endif
6021
bnxt_init_l2_filter(struct bnxt * bp,struct bnxt_l2_filter * fltr,struct bnxt_l2_key * key,u32 idx)6022 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
6023 struct bnxt_l2_key *key, u32 idx)
6024 {
6025 struct hlist_head *head;
6026
6027 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
6028 fltr->l2_key.vlan = key->vlan;
6029 fltr->base.type = BNXT_FLTR_TYPE_L2;
6030 if (fltr->base.flags) {
6031 int bit_id;
6032
6033 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6034 bp->max_fltr, 0);
6035 if (bit_id < 0)
6036 return -ENOMEM;
6037 fltr->base.sw_id = (u16)bit_id;
6038 bp->ntp_fltr_count++;
6039 }
6040 head = &bp->l2_fltr_hash_tbl[idx];
6041 hlist_add_head_rcu(&fltr->base.hash, head);
6042 bnxt_insert_usr_fltr(bp, &fltr->base);
6043 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
6044 atomic_set(&fltr->refcnt, 1);
6045 return 0;
6046 }
6047
bnxt_alloc_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,gfp_t gfp)6048 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
6049 struct bnxt_l2_key *key,
6050 gfp_t gfp)
6051 {
6052 struct bnxt_l2_filter *fltr;
6053 u32 idx;
6054 int rc;
6055
6056 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6057 BNXT_L2_FLTR_HASH_MASK;
6058 fltr = bnxt_lookup_l2_filter(bp, key, idx);
6059 if (fltr)
6060 return fltr;
6061
6062 fltr = kzalloc(sizeof(*fltr), gfp);
6063 if (!fltr)
6064 return ERR_PTR(-ENOMEM);
6065 spin_lock_bh(&bp->ntp_fltr_lock);
6066 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6067 spin_unlock_bh(&bp->ntp_fltr_lock);
6068 if (rc) {
6069 bnxt_del_l2_filter(bp, fltr);
6070 fltr = ERR_PTR(rc);
6071 }
6072 return fltr;
6073 }
6074
bnxt_alloc_new_l2_filter(struct bnxt * bp,struct bnxt_l2_key * key,u16 flags)6075 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
6076 struct bnxt_l2_key *key,
6077 u16 flags)
6078 {
6079 struct bnxt_l2_filter *fltr;
6080 u32 idx;
6081 int rc;
6082
6083 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
6084 BNXT_L2_FLTR_HASH_MASK;
6085 spin_lock_bh(&bp->ntp_fltr_lock);
6086 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
6087 if (fltr) {
6088 fltr = ERR_PTR(-EEXIST);
6089 goto l2_filter_exit;
6090 }
6091 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
6092 if (!fltr) {
6093 fltr = ERR_PTR(-ENOMEM);
6094 goto l2_filter_exit;
6095 }
6096 fltr->base.flags = flags;
6097 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
6098 if (rc) {
6099 spin_unlock_bh(&bp->ntp_fltr_lock);
6100 bnxt_del_l2_filter(bp, fltr);
6101 return ERR_PTR(rc);
6102 }
6103
6104 l2_filter_exit:
6105 spin_unlock_bh(&bp->ntp_fltr_lock);
6106 return fltr;
6107 }
6108
bnxt_vf_target_id(struct bnxt_pf_info * pf,u16 vf_idx)6109 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
6110 {
6111 #ifdef CONFIG_BNXT_SRIOV
6112 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
6113
6114 return vf->fw_fid;
6115 #else
6116 return INVALID_HW_RING_ID;
6117 #endif
6118 }
6119
bnxt_hwrm_l2_filter_free(struct bnxt * bp,struct bnxt_l2_filter * fltr)6120 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6121 {
6122 struct hwrm_cfa_l2_filter_free_input *req;
6123 u16 target_id = 0xffff;
6124 int rc;
6125
6126 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6127 struct bnxt_pf_info *pf = &bp->pf;
6128
6129 if (fltr->base.vf_idx >= pf->active_vfs)
6130 return -EINVAL;
6131
6132 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6133 if (target_id == INVALID_HW_RING_ID)
6134 return -EINVAL;
6135 }
6136
6137 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
6138 if (rc)
6139 return rc;
6140
6141 req->target_id = cpu_to_le16(target_id);
6142 req->l2_filter_id = fltr->base.filter_id;
6143 return hwrm_req_send(bp, req);
6144 }
6145
bnxt_hwrm_l2_filter_alloc(struct bnxt * bp,struct bnxt_l2_filter * fltr)6146 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
6147 {
6148 struct hwrm_cfa_l2_filter_alloc_output *resp;
6149 struct hwrm_cfa_l2_filter_alloc_input *req;
6150 u16 target_id = 0xffff;
6151 int rc;
6152
6153 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
6154 struct bnxt_pf_info *pf = &bp->pf;
6155
6156 if (fltr->base.vf_idx >= pf->active_vfs)
6157 return -EINVAL;
6158
6159 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
6160 }
6161 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
6162 if (rc)
6163 return rc;
6164
6165 req->target_id = cpu_to_le16(target_id);
6166 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
6167
6168 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
6169 req->flags |=
6170 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
6171 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
6172 req->enables =
6173 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
6174 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
6175 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
6176 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
6177 eth_broadcast_addr(req->l2_addr_mask);
6178
6179 if (fltr->l2_key.vlan) {
6180 req->enables |=
6181 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
6182 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
6183 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
6184 req->num_vlans = 1;
6185 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
6186 req->l2_ivlan_mask = cpu_to_le16(0xfff);
6187 }
6188
6189 resp = hwrm_req_hold(bp, req);
6190 rc = hwrm_req_send(bp, req);
6191 if (!rc) {
6192 fltr->base.filter_id = resp->l2_filter_id;
6193 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
6194 }
6195 hwrm_req_drop(bp, req);
6196 return rc;
6197 }
6198
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6199 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
6200 struct bnxt_ntuple_filter *fltr)
6201 {
6202 struct hwrm_cfa_ntuple_filter_free_input *req;
6203 int rc;
6204
6205 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
6206 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
6207 if (rc)
6208 return rc;
6209
6210 req->ntuple_filter_id = fltr->base.filter_id;
6211 return hwrm_req_send(bp, req);
6212 }
6213
6214 #define BNXT_NTP_FLTR_FLAGS \
6215 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
6216 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
6217 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
6218 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
6219 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
6220 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
6221 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
6222 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
6223 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
6224 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
6225 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
6226 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
6227 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
6228
6229 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
6230 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
6231
bnxt_fill_ipv6_mask(__be32 mask[4])6232 void bnxt_fill_ipv6_mask(__be32 mask[4])
6233 {
6234 int i;
6235
6236 for (i = 0; i < 4; i++)
6237 mask[i] = cpu_to_be32(~0);
6238 }
6239
6240 static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt * bp,struct hwrm_cfa_ntuple_filter_alloc_input * req,struct bnxt_ntuple_filter * fltr)6241 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
6242 struct hwrm_cfa_ntuple_filter_alloc_input *req,
6243 struct bnxt_ntuple_filter *fltr)
6244 {
6245 u16 rxq = fltr->base.rxq;
6246
6247 if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
6248 struct ethtool_rxfh_context *ctx;
6249 struct bnxt_rss_ctx *rss_ctx;
6250 struct bnxt_vnic_info *vnic;
6251
6252 ctx = xa_load(&bp->dev->ethtool->rss_ctx,
6253 fltr->base.fw_vnic_id);
6254 if (ctx) {
6255 rss_ctx = ethtool_rxfh_context_priv(ctx);
6256 vnic = &rss_ctx->vnic;
6257
6258 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6259 }
6260 return;
6261 }
6262 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
6263 struct bnxt_vnic_info *vnic;
6264 u32 enables;
6265
6266 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
6267 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6268 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
6269 req->enables |= cpu_to_le32(enables);
6270 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
6271 } else {
6272 u32 flags;
6273
6274 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
6275 req->flags |= cpu_to_le32(flags);
6276 req->dst_id = cpu_to_le16(rxq);
6277 }
6278 }
6279
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)6280 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
6281 struct bnxt_ntuple_filter *fltr)
6282 {
6283 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
6284 struct hwrm_cfa_ntuple_filter_alloc_input *req;
6285 struct bnxt_flow_masks *masks = &fltr->fmasks;
6286 struct flow_keys *keys = &fltr->fkeys;
6287 struct bnxt_l2_filter *l2_fltr;
6288 struct bnxt_vnic_info *vnic;
6289 int rc;
6290
6291 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
6292 if (rc)
6293 return rc;
6294
6295 l2_fltr = fltr->l2_fltr;
6296 req->l2_filter_id = l2_fltr->base.filter_id;
6297
6298 if (fltr->base.flags & BNXT_ACT_DROP) {
6299 req->flags =
6300 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
6301 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
6302 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
6303 } else {
6304 vnic = &bp->vnic_info[fltr->base.rxq + 1];
6305 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
6306 }
6307 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
6308
6309 req->ethertype = htons(ETH_P_IP);
6310 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
6311 req->ip_protocol = keys->basic.ip_proto;
6312
6313 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
6314 req->ethertype = htons(ETH_P_IPV6);
6315 req->ip_addr_type =
6316 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
6317 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
6318 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
6319 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
6320 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
6321 } else {
6322 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
6323 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
6324 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
6325 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
6326 }
6327 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
6328 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
6329 req->tunnel_type =
6330 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
6331 }
6332
6333 req->src_port = keys->ports.src;
6334 req->src_port_mask = masks->ports.src;
6335 req->dst_port = keys->ports.dst;
6336 req->dst_port_mask = masks->ports.dst;
6337
6338 resp = hwrm_req_hold(bp, req);
6339 rc = hwrm_req_send(bp, req);
6340 if (!rc)
6341 fltr->base.filter_id = resp->ntuple_filter_id;
6342 hwrm_req_drop(bp, req);
6343 return rc;
6344 }
6345
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)6346 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
6347 const u8 *mac_addr)
6348 {
6349 struct bnxt_l2_filter *fltr;
6350 struct bnxt_l2_key key;
6351 int rc;
6352
6353 ether_addr_copy(key.dst_mac_addr, mac_addr);
6354 key.vlan = 0;
6355 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
6356 if (IS_ERR(fltr))
6357 return PTR_ERR(fltr);
6358
6359 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
6360 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
6361 if (rc)
6362 bnxt_del_l2_filter(bp, fltr);
6363 else
6364 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
6365 return rc;
6366 }
6367
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)6368 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
6369 {
6370 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
6371
6372 /* Any associated ntuple filters will also be cleared by firmware. */
6373 for (i = 0; i < num_of_vnics; i++) {
6374 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6375
6376 for (j = 0; j < vnic->uc_filter_count; j++) {
6377 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
6378
6379 bnxt_hwrm_l2_filter_free(bp, fltr);
6380 bnxt_del_l2_filter(bp, fltr);
6381 }
6382 vnic->uc_filter_count = 0;
6383 }
6384 }
6385
6386 #define BNXT_DFLT_TUNL_TPA_BMAP \
6387 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
6388 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
6389 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
6390
bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt * bp,struct hwrm_vnic_tpa_cfg_input * req)6391 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
6392 struct hwrm_vnic_tpa_cfg_input *req)
6393 {
6394 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
6395
6396 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
6397 return;
6398
6399 if (bp->vxlan_port)
6400 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
6401 if (bp->vxlan_gpe_port)
6402 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
6403 if (bp->nge_port)
6404 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
6405
6406 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
6407 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
6408 }
6409
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,struct bnxt_vnic_info * vnic,u32 tpa_flags)6410 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6411 u32 tpa_flags)
6412 {
6413 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
6414 struct hwrm_vnic_tpa_cfg_input *req;
6415 int rc;
6416
6417 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
6418 return 0;
6419
6420 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
6421 if (rc)
6422 return rc;
6423
6424 if (tpa_flags) {
6425 u16 mss = bp->dev->mtu - 40;
6426 u32 nsegs, n, segs = 0, flags;
6427
6428 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
6429 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
6430 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
6431 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
6432 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
6433 if (tpa_flags & BNXT_FLAG_GRO)
6434 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
6435
6436 req->flags = cpu_to_le32(flags);
6437
6438 req->enables =
6439 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
6440 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
6441 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
6442
6443 /* Number of segs are log2 units, and first packet is not
6444 * included as part of this units.
6445 */
6446 if (mss <= BNXT_RX_PAGE_SIZE) {
6447 n = BNXT_RX_PAGE_SIZE / mss;
6448 nsegs = (MAX_SKB_FRAGS - 1) * n;
6449 } else {
6450 n = mss / BNXT_RX_PAGE_SIZE;
6451 if (mss & (BNXT_RX_PAGE_SIZE - 1))
6452 n++;
6453 nsegs = (MAX_SKB_FRAGS - n) / n;
6454 }
6455
6456 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6457 segs = MAX_TPA_SEGS_P5;
6458 max_aggs = bp->max_tpa;
6459 } else {
6460 segs = ilog2(nsegs);
6461 }
6462 req->max_agg_segs = cpu_to_le16(segs);
6463 req->max_aggs = cpu_to_le16(max_aggs);
6464
6465 req->min_agg_len = cpu_to_le32(512);
6466 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
6467 }
6468 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6469
6470 return hwrm_req_send(bp, req);
6471 }
6472
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)6473 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6474 {
6475 struct bnxt_ring_grp_info *grp_info;
6476
6477 grp_info = &bp->grp_info[ring->grp_idx];
6478 return grp_info->cp_fw_ring_id;
6479 }
6480
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)6481 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6482 {
6483 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6484 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6485 else
6486 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6487 }
6488
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)6489 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6490 {
6491 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6492 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6493 else
6494 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6495 }
6496
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)6497 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6498 {
6499 int entries;
6500
6501 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6502 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6503 else
6504 entries = HW_HASH_INDEX_SIZE;
6505
6506 bp->rss_indir_tbl_entries = entries;
6507 bp->rss_indir_tbl =
6508 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
6509 if (!bp->rss_indir_tbl)
6510 return -ENOMEM;
6511
6512 return 0;
6513 }
6514
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp,struct ethtool_rxfh_context * rss_ctx)6515 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
6516 struct ethtool_rxfh_context *rss_ctx)
6517 {
6518 u16 max_rings, max_entries, pad, i;
6519 u32 *rss_indir_tbl;
6520
6521 if (!bp->rx_nr_rings)
6522 return;
6523
6524 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6525 max_rings = bp->rx_nr_rings - 1;
6526 else
6527 max_rings = bp->rx_nr_rings;
6528
6529 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6530 if (rss_ctx)
6531 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
6532 else
6533 rss_indir_tbl = &bp->rss_indir_tbl[0];
6534
6535 for (i = 0; i < max_entries; i++)
6536 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6537
6538 pad = bp->rss_indir_tbl_entries - max_entries;
6539 if (pad)
6540 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
6541 }
6542
bnxt_get_max_rss_ring(struct bnxt * bp)6543 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6544 {
6545 u32 i, tbl_size, max_ring = 0;
6546
6547 if (!bp->rss_indir_tbl)
6548 return 0;
6549
6550 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6551 for (i = 0; i < tbl_size; i++)
6552 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6553 return max_ring;
6554 }
6555
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)6556 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6557 {
6558 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6559 if (!rx_rings)
6560 return 0;
6561 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6562 BNXT_RSS_TABLE_ENTRIES_P5);
6563 }
6564 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6565 return 2;
6566 return 1;
6567 }
6568
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)6569 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6570 {
6571 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6572 u16 i, j;
6573
6574 /* Fill the RSS indirection table with ring group ids */
6575 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6576 if (!no_rss)
6577 j = bp->rss_indir_tbl[i];
6578 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6579 }
6580 }
6581
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)6582 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6583 struct bnxt_vnic_info *vnic)
6584 {
6585 __le16 *ring_tbl = vnic->rss_table;
6586 struct bnxt_rx_ring_info *rxr;
6587 u16 tbl_size, i;
6588
6589 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6590
6591 for (i = 0; i < tbl_size; i++) {
6592 u16 ring_id, j;
6593
6594 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6595 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6596 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
6597 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
6598 else
6599 j = bp->rss_indir_tbl[i];
6600 rxr = &bp->rx_ring[j];
6601
6602 ring_id = rxr->rx_ring_struct.fw_ring_id;
6603 *ring_tbl++ = cpu_to_le16(ring_id);
6604 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6605 *ring_tbl++ = cpu_to_le16(ring_id);
6606 }
6607 }
6608
6609 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)6610 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6611 struct bnxt_vnic_info *vnic)
6612 {
6613 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6614 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6615 if (bp->flags & BNXT_FLAG_CHIP_P7)
6616 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6617 } else {
6618 bnxt_fill_hw_rss_tbl(bp, vnic);
6619 }
6620
6621 if (bp->rss_hash_delta) {
6622 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6623 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6624 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6625 else
6626 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6627 } else {
6628 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6629 }
6630 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6631 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6632 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6633 }
6634
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6635 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6636 bool set_rss)
6637 {
6638 struct hwrm_vnic_rss_cfg_input *req;
6639 int rc;
6640
6641 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6642 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6643 return 0;
6644
6645 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6646 if (rc)
6647 return rc;
6648
6649 if (set_rss)
6650 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6651 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6652 return hwrm_req_send(bp, req);
6653 }
6654
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool set_rss)6655 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
6656 struct bnxt_vnic_info *vnic, bool set_rss)
6657 {
6658 struct hwrm_vnic_rss_cfg_input *req;
6659 dma_addr_t ring_tbl_map;
6660 u32 i, nr_ctxs;
6661 int rc;
6662
6663 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6664 if (rc)
6665 return rc;
6666
6667 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6668 if (!set_rss)
6669 return hwrm_req_send(bp, req);
6670
6671 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6672 ring_tbl_map = vnic->rss_table_dma_addr;
6673 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6674
6675 hwrm_req_hold(bp, req);
6676 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6677 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6678 req->ring_table_pair_index = i;
6679 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6680 rc = hwrm_req_send(bp, req);
6681 if (rc)
6682 goto exit;
6683 }
6684
6685 exit:
6686 hwrm_req_drop(bp, req);
6687 return rc;
6688 }
6689
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)6690 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6691 {
6692 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6693 struct hwrm_vnic_rss_qcfg_output *resp;
6694 struct hwrm_vnic_rss_qcfg_input *req;
6695
6696 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6697 return;
6698
6699 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6700 /* all contexts configured to same hash_type, zero always exists */
6701 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6702 resp = hwrm_req_hold(bp, req);
6703 if (!hwrm_req_send(bp, req)) {
6704 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6705 bp->rss_hash_delta = 0;
6706 }
6707 hwrm_req_drop(bp, req);
6708 }
6709
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,struct bnxt_vnic_info * vnic)6710 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6711 {
6712 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
6713 struct hwrm_vnic_plcmodes_cfg_input *req;
6714 int rc;
6715
6716 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6717 if (rc)
6718 return rc;
6719
6720 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6721 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6722 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6723
6724 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
6725 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6726 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6727 req->enables |=
6728 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6729 req->hds_threshold = cpu_to_le16(hds_thresh);
6730 }
6731 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6732 return hwrm_req_send(bp, req);
6733 }
6734
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6735 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
6736 struct bnxt_vnic_info *vnic,
6737 u16 ctx_idx)
6738 {
6739 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6740
6741 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6742 return;
6743
6744 req->rss_cos_lb_ctx_id =
6745 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
6746
6747 hwrm_req_send(bp, req);
6748 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6749 }
6750
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)6751 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6752 {
6753 int i, j;
6754
6755 for (i = 0; i < bp->nr_vnics; i++) {
6756 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6757
6758 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6759 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6760 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
6761 }
6762 }
6763 bp->rsscos_nr_ctxs = 0;
6764 }
6765
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 ctx_idx)6766 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
6767 struct bnxt_vnic_info *vnic, u16 ctx_idx)
6768 {
6769 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6770 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6771 int rc;
6772
6773 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6774 if (rc)
6775 return rc;
6776
6777 resp = hwrm_req_hold(bp, req);
6778 rc = hwrm_req_send(bp, req);
6779 if (!rc)
6780 vnic->fw_rss_cos_lb_ctx[ctx_idx] =
6781 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6782 hwrm_req_drop(bp, req);
6783
6784 return rc;
6785 }
6786
bnxt_get_roce_vnic_mode(struct bnxt * bp)6787 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6788 {
6789 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6790 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6791 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6792 }
6793
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)6794 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6795 {
6796 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6797 struct hwrm_vnic_cfg_input *req;
6798 unsigned int ring = 0, grp_idx;
6799 u16 def_vlan = 0;
6800 int rc;
6801
6802 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6803 if (rc)
6804 return rc;
6805
6806 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6807 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6808
6809 req->default_rx_ring_id =
6810 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6811 req->default_cmpl_ring_id =
6812 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6813 req->enables =
6814 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6815 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6816 goto vnic_mru;
6817 }
6818 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6819 /* Only RSS support for now TBD: COS & LB */
6820 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6821 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6822 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6823 VNIC_CFG_REQ_ENABLES_MRU);
6824 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6825 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6826 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6827 VNIC_CFG_REQ_ENABLES_MRU);
6828 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6829 } else {
6830 req->rss_rule = cpu_to_le16(0xffff);
6831 }
6832
6833 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6834 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6835 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6836 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6837 } else {
6838 req->cos_rule = cpu_to_le16(0xffff);
6839 }
6840
6841 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6842 ring = 0;
6843 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6844 ring = vnic->vnic_id - 1;
6845 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6846 ring = bp->rx_nr_rings - 1;
6847
6848 grp_idx = bp->rx_ring[ring].bnapi->index;
6849 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6850 req->lb_rule = cpu_to_le16(0xffff);
6851 vnic_mru:
6852 vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
6853 req->mru = cpu_to_le16(vnic->mru);
6854
6855 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6856 #ifdef CONFIG_BNXT_SRIOV
6857 if (BNXT_VF(bp))
6858 def_vlan = bp->vf.vlan;
6859 #endif
6860 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6861 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6862 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
6863 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6864
6865 return hwrm_req_send(bp, req);
6866 }
6867
bnxt_hwrm_vnic_free_one(struct bnxt * bp,struct bnxt_vnic_info * vnic)6868 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
6869 struct bnxt_vnic_info *vnic)
6870 {
6871 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
6872 struct hwrm_vnic_free_input *req;
6873
6874 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6875 return;
6876
6877 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6878
6879 hwrm_req_send(bp, req);
6880 vnic->fw_vnic_id = INVALID_HW_RING_ID;
6881 }
6882 }
6883
bnxt_hwrm_vnic_free(struct bnxt * bp)6884 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6885 {
6886 u16 i;
6887
6888 for (i = 0; i < bp->nr_vnics; i++)
6889 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
6890 }
6891
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,unsigned int start_rx_ring_idx,unsigned int nr_rings)6892 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
6893 unsigned int start_rx_ring_idx,
6894 unsigned int nr_rings)
6895 {
6896 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6897 struct hwrm_vnic_alloc_output *resp;
6898 struct hwrm_vnic_alloc_input *req;
6899 int rc;
6900
6901 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6902 if (rc)
6903 return rc;
6904
6905 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6906 goto vnic_no_ring_grps;
6907
6908 /* map ring groups to this vnic */
6909 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6910 grp_idx = bp->rx_ring[i].bnapi->index;
6911 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6912 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6913 j, nr_rings);
6914 break;
6915 }
6916 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6917 }
6918
6919 vnic_no_ring_grps:
6920 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6921 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6922 if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
6923 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6924
6925 resp = hwrm_req_hold(bp, req);
6926 rc = hwrm_req_send(bp, req);
6927 if (!rc)
6928 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6929 hwrm_req_drop(bp, req);
6930 return rc;
6931 }
6932
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)6933 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6934 {
6935 struct hwrm_vnic_qcaps_output *resp;
6936 struct hwrm_vnic_qcaps_input *req;
6937 int rc;
6938
6939 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6940 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6941 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6942 if (bp->hwrm_spec_code < 0x10600)
6943 return 0;
6944
6945 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6946 if (rc)
6947 return rc;
6948
6949 resp = hwrm_req_hold(bp, req);
6950 rc = hwrm_req_send(bp, req);
6951 if (!rc) {
6952 u32 flags = le32_to_cpu(resp->flags);
6953
6954 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6955 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6956 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6957 if (flags &
6958 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6959 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6960
6961 /* Older P5 fw before EXT_HW_STATS support did not set
6962 * VLAN_STRIP_CAP properly.
6963 */
6964 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6965 (BNXT_CHIP_P5(bp) &&
6966 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6967 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6968 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6969 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6970 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6971 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6972 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6973 if (bp->max_tpa_v2) {
6974 if (BNXT_CHIP_P5(bp))
6975 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6976 else
6977 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6978 }
6979 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6980 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6981 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6982 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6983 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6984 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6985 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6986 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6987 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6988 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6989 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
6990 bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
6991 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
6992 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
6993 }
6994 hwrm_req_drop(bp, req);
6995 return rc;
6996 }
6997
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)6998 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6999 {
7000 struct hwrm_ring_grp_alloc_output *resp;
7001 struct hwrm_ring_grp_alloc_input *req;
7002 int rc;
7003 u16 i;
7004
7005 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7006 return 0;
7007
7008 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
7009 if (rc)
7010 return rc;
7011
7012 resp = hwrm_req_hold(bp, req);
7013 for (i = 0; i < bp->rx_nr_rings; i++) {
7014 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
7015
7016 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
7017 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
7018 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
7019 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
7020
7021 rc = hwrm_req_send(bp, req);
7022
7023 if (rc)
7024 break;
7025
7026 bp->grp_info[grp_idx].fw_grp_id =
7027 le32_to_cpu(resp->ring_group_id);
7028 }
7029 hwrm_req_drop(bp, req);
7030 return rc;
7031 }
7032
bnxt_hwrm_ring_grp_free(struct bnxt * bp)7033 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
7034 {
7035 struct hwrm_ring_grp_free_input *req;
7036 u16 i;
7037
7038 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7039 return;
7040
7041 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
7042 return;
7043
7044 hwrm_req_hold(bp, req);
7045 for (i = 0; i < bp->cp_nr_rings; i++) {
7046 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
7047 continue;
7048 req->ring_group_id =
7049 cpu_to_le32(bp->grp_info[i].fw_grp_id);
7050
7051 hwrm_req_send(bp, req);
7052 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
7053 }
7054 hwrm_req_drop(bp, req);
7055 }
7056
bnxt_set_rx_ring_params_p5(struct bnxt * bp,u32 ring_type,struct hwrm_ring_alloc_input * req,struct bnxt_ring_struct * ring)7057 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
7058 struct hwrm_ring_alloc_input *req,
7059 struct bnxt_ring_struct *ring)
7060 {
7061 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
7062 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
7063 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
7064
7065 if (ring_type == HWRM_RING_ALLOC_AGG) {
7066 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
7067 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
7068 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
7069 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
7070 } else {
7071 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
7072 if (NET_IP_ALIGN == 2)
7073 req->flags =
7074 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
7075 }
7076 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7077 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7078 req->enables |= cpu_to_le32(enables);
7079 }
7080
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)7081 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
7082 struct bnxt_ring_struct *ring,
7083 u32 ring_type, u32 map_index)
7084 {
7085 struct hwrm_ring_alloc_output *resp;
7086 struct hwrm_ring_alloc_input *req;
7087 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
7088 struct bnxt_ring_grp_info *grp_info;
7089 int rc, err = 0;
7090 u16 ring_id;
7091
7092 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
7093 if (rc)
7094 goto exit;
7095
7096 req->enables = 0;
7097 if (rmem->nr_pages > 1) {
7098 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
7099 /* Page size is in log2 units */
7100 req->page_size = BNXT_PAGE_SHIFT;
7101 req->page_tbl_depth = 1;
7102 } else {
7103 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
7104 }
7105 req->fbo = 0;
7106 /* Association of ring index with doorbell index and MSIX number */
7107 req->logical_id = cpu_to_le16(map_index);
7108
7109 switch (ring_type) {
7110 case HWRM_RING_ALLOC_TX: {
7111 struct bnxt_tx_ring_info *txr;
7112 u16 flags = 0;
7113
7114 txr = container_of(ring, struct bnxt_tx_ring_info,
7115 tx_ring_struct);
7116 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
7117 /* Association of transmit ring with completion ring */
7118 grp_info = &bp->grp_info[ring->grp_idx];
7119 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
7120 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
7121 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
7122 req->queue_id = cpu_to_le16(ring->queue_id);
7123 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
7124 req->cmpl_coal_cnt =
7125 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
7126 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
7127 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
7128 req->flags = cpu_to_le16(flags);
7129 break;
7130 }
7131 case HWRM_RING_ALLOC_RX:
7132 case HWRM_RING_ALLOC_AGG:
7133 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
7134 req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
7135 cpu_to_le32(bp->rx_ring_mask + 1) :
7136 cpu_to_le32(bp->rx_agg_ring_mask + 1);
7137 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7138 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
7139 break;
7140 case HWRM_RING_ALLOC_CMPL:
7141 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
7142 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7143 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7144 /* Association of cp ring with nq */
7145 grp_info = &bp->grp_info[map_index];
7146 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
7147 req->cq_handle = cpu_to_le64(ring->handle);
7148 req->enables |= cpu_to_le32(
7149 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
7150 } else {
7151 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7152 }
7153 break;
7154 case HWRM_RING_ALLOC_NQ:
7155 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
7156 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
7157 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
7158 break;
7159 default:
7160 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
7161 ring_type);
7162 return -EINVAL;
7163 }
7164
7165 resp = hwrm_req_hold(bp, req);
7166 rc = hwrm_req_send(bp, req);
7167 err = le16_to_cpu(resp->error_code);
7168 ring_id = le16_to_cpu(resp->ring_id);
7169 hwrm_req_drop(bp, req);
7170
7171 exit:
7172 if (rc || err) {
7173 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
7174 ring_type, rc, err);
7175 return -EIO;
7176 }
7177 ring->fw_ring_id = ring_id;
7178 return rc;
7179 }
7180
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)7181 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
7182 {
7183 int rc;
7184
7185 if (BNXT_PF(bp)) {
7186 struct hwrm_func_cfg_input *req;
7187
7188 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
7189 if (rc)
7190 return rc;
7191
7192 req->fid = cpu_to_le16(0xffff);
7193 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7194 req->async_event_cr = cpu_to_le16(idx);
7195 return hwrm_req_send(bp, req);
7196 } else {
7197 struct hwrm_func_vf_cfg_input *req;
7198
7199 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
7200 if (rc)
7201 return rc;
7202
7203 req->enables =
7204 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
7205 req->async_event_cr = cpu_to_le16(idx);
7206 return hwrm_req_send(bp, req);
7207 }
7208 }
7209
bnxt_set_db_mask(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type)7210 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
7211 u32 ring_type)
7212 {
7213 switch (ring_type) {
7214 case HWRM_RING_ALLOC_TX:
7215 db->db_ring_mask = bp->tx_ring_mask;
7216 break;
7217 case HWRM_RING_ALLOC_RX:
7218 db->db_ring_mask = bp->rx_ring_mask;
7219 break;
7220 case HWRM_RING_ALLOC_AGG:
7221 db->db_ring_mask = bp->rx_agg_ring_mask;
7222 break;
7223 case HWRM_RING_ALLOC_CMPL:
7224 case HWRM_RING_ALLOC_NQ:
7225 db->db_ring_mask = bp->cp_ring_mask;
7226 break;
7227 }
7228 if (bp->flags & BNXT_FLAG_CHIP_P7) {
7229 db->db_epoch_mask = db->db_ring_mask + 1;
7230 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
7231 }
7232 }
7233
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)7234 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
7235 u32 map_idx, u32 xid)
7236 {
7237 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7238 switch (ring_type) {
7239 case HWRM_RING_ALLOC_TX:
7240 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
7241 break;
7242 case HWRM_RING_ALLOC_RX:
7243 case HWRM_RING_ALLOC_AGG:
7244 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
7245 break;
7246 case HWRM_RING_ALLOC_CMPL:
7247 db->db_key64 = DBR_PATH_L2;
7248 break;
7249 case HWRM_RING_ALLOC_NQ:
7250 db->db_key64 = DBR_PATH_L2;
7251 break;
7252 }
7253 db->db_key64 |= (u64)xid << DBR_XID_SFT;
7254
7255 if (bp->flags & BNXT_FLAG_CHIP_P7)
7256 db->db_key64 |= DBR_VALID;
7257
7258 db->doorbell = bp->bar1 + bp->db_offset;
7259 } else {
7260 db->doorbell = bp->bar1 + map_idx * 0x80;
7261 switch (ring_type) {
7262 case HWRM_RING_ALLOC_TX:
7263 db->db_key32 = DB_KEY_TX;
7264 break;
7265 case HWRM_RING_ALLOC_RX:
7266 case HWRM_RING_ALLOC_AGG:
7267 db->db_key32 = DB_KEY_RX;
7268 break;
7269 case HWRM_RING_ALLOC_CMPL:
7270 db->db_key32 = DB_KEY_CP;
7271 break;
7272 }
7273 }
7274 bnxt_set_db_mask(bp, db, ring_type);
7275 }
7276
bnxt_hwrm_rx_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7277 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
7278 struct bnxt_rx_ring_info *rxr)
7279 {
7280 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7281 struct bnxt_napi *bnapi = rxr->bnapi;
7282 u32 type = HWRM_RING_ALLOC_RX;
7283 u32 map_idx = bnapi->index;
7284 int rc;
7285
7286 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7287 if (rc)
7288 return rc;
7289
7290 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
7291 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
7292
7293 return 0;
7294 }
7295
bnxt_hwrm_rx_agg_ring_alloc(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)7296 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
7297 struct bnxt_rx_ring_info *rxr)
7298 {
7299 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7300 u32 type = HWRM_RING_ALLOC_AGG;
7301 u32 grp_idx = ring->grp_idx;
7302 u32 map_idx;
7303 int rc;
7304
7305 map_idx = grp_idx + bp->rx_nr_rings;
7306 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7307 if (rc)
7308 return rc;
7309
7310 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
7311 ring->fw_ring_id);
7312 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
7313 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7314 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
7315
7316 return 0;
7317 }
7318
bnxt_hwrm_cp_ring_alloc_p5(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7319 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
7320 struct bnxt_cp_ring_info *cpr)
7321 {
7322 const u32 type = HWRM_RING_ALLOC_CMPL;
7323 struct bnxt_napi *bnapi = cpr->bnapi;
7324 struct bnxt_ring_struct *ring;
7325 u32 map_idx = bnapi->index;
7326 int rc;
7327
7328 ring = &cpr->cp_ring_struct;
7329 ring->handle = BNXT_SET_NQ_HDL(cpr);
7330 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7331 if (rc)
7332 return rc;
7333 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7334 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7335 return 0;
7336 }
7337
bnxt_hwrm_tx_ring_alloc(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u32 tx_idx)7338 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
7339 struct bnxt_tx_ring_info *txr, u32 tx_idx)
7340 {
7341 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7342 const u32 type = HWRM_RING_ALLOC_TX;
7343 int rc;
7344
7345 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
7346 if (rc)
7347 return rc;
7348 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
7349 return 0;
7350 }
7351
bnxt_hwrm_ring_alloc(struct bnxt * bp)7352 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
7353 {
7354 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
7355 int i, rc = 0;
7356 u32 type;
7357
7358 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7359 type = HWRM_RING_ALLOC_NQ;
7360 else
7361 type = HWRM_RING_ALLOC_CMPL;
7362 for (i = 0; i < bp->cp_nr_rings; i++) {
7363 struct bnxt_napi *bnapi = bp->bnapi[i];
7364 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7365 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7366 u32 map_idx = ring->map_idx;
7367 unsigned int vector;
7368
7369 vector = bp->irq_tbl[map_idx].vector;
7370 disable_irq_nosync(vector);
7371 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
7372 if (rc) {
7373 enable_irq(vector);
7374 goto err_out;
7375 }
7376 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
7377 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7378 enable_irq(vector);
7379 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
7380
7381 if (!i) {
7382 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
7383 if (rc)
7384 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
7385 }
7386 }
7387
7388 for (i = 0; i < bp->tx_nr_rings; i++) {
7389 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7390
7391 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7392 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
7393 if (rc)
7394 goto err_out;
7395 }
7396 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
7397 if (rc)
7398 goto err_out;
7399 }
7400
7401 for (i = 0; i < bp->rx_nr_rings; i++) {
7402 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7403
7404 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
7405 if (rc)
7406 goto err_out;
7407 /* If we have agg rings, post agg buffers first. */
7408 if (!agg_rings)
7409 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
7410 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7411 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
7412 if (rc)
7413 goto err_out;
7414 }
7415 }
7416
7417 if (agg_rings) {
7418 for (i = 0; i < bp->rx_nr_rings; i++) {
7419 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
7420 if (rc)
7421 goto err_out;
7422 }
7423 }
7424 err_out:
7425 return rc;
7426 }
7427
bnxt_cancel_dim(struct bnxt * bp)7428 static void bnxt_cancel_dim(struct bnxt *bp)
7429 {
7430 int i;
7431
7432 /* DIM work is initialized in bnxt_enable_napi(). Proceed only
7433 * if NAPI is enabled.
7434 */
7435 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
7436 return;
7437
7438 /* Make sure NAPI sees that the VNIC is disabled */
7439 synchronize_net();
7440 for (i = 0; i < bp->rx_nr_rings; i++) {
7441 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
7442 struct bnxt_napi *bnapi = rxr->bnapi;
7443
7444 cancel_work_sync(&bnapi->cp_ring.dim.work);
7445 }
7446 }
7447
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)7448 static int hwrm_ring_free_send_msg(struct bnxt *bp,
7449 struct bnxt_ring_struct *ring,
7450 u32 ring_type, int cmpl_ring_id)
7451 {
7452 struct hwrm_ring_free_output *resp;
7453 struct hwrm_ring_free_input *req;
7454 u16 error_code = 0;
7455 int rc;
7456
7457 if (BNXT_NO_FW_ACCESS(bp))
7458 return 0;
7459
7460 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
7461 if (rc)
7462 goto exit;
7463
7464 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
7465 req->ring_type = ring_type;
7466 req->ring_id = cpu_to_le16(ring->fw_ring_id);
7467
7468 resp = hwrm_req_hold(bp, req);
7469 rc = hwrm_req_send(bp, req);
7470 error_code = le16_to_cpu(resp->error_code);
7471 hwrm_req_drop(bp, req);
7472 exit:
7473 if (rc || error_code) {
7474 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
7475 ring_type, rc, error_code);
7476 return -EIO;
7477 }
7478 return 0;
7479 }
7480
bnxt_hwrm_tx_ring_free(struct bnxt * bp,struct bnxt_tx_ring_info * txr,bool close_path)7481 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
7482 struct bnxt_tx_ring_info *txr,
7483 bool close_path)
7484 {
7485 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
7486 u32 cmpl_ring_id;
7487
7488 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7489 return;
7490
7491 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
7492 INVALID_HW_RING_ID;
7493 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
7494 cmpl_ring_id);
7495 ring->fw_ring_id = INVALID_HW_RING_ID;
7496 }
7497
bnxt_hwrm_rx_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7498 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
7499 struct bnxt_rx_ring_info *rxr,
7500 bool close_path)
7501 {
7502 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
7503 u32 grp_idx = rxr->bnapi->index;
7504 u32 cmpl_ring_id;
7505
7506 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7507 return;
7508
7509 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7510 hwrm_ring_free_send_msg(bp, ring,
7511 RING_FREE_REQ_RING_TYPE_RX,
7512 close_path ? cmpl_ring_id :
7513 INVALID_HW_RING_ID);
7514 ring->fw_ring_id = INVALID_HW_RING_ID;
7515 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
7516 }
7517
bnxt_hwrm_rx_agg_ring_free(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,bool close_path)7518 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
7519 struct bnxt_rx_ring_info *rxr,
7520 bool close_path)
7521 {
7522 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
7523 u32 grp_idx = rxr->bnapi->index;
7524 u32 type, cmpl_ring_id;
7525
7526 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7527 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
7528 else
7529 type = RING_FREE_REQ_RING_TYPE_RX;
7530
7531 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7532 return;
7533
7534 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
7535 hwrm_ring_free_send_msg(bp, ring, type,
7536 close_path ? cmpl_ring_id :
7537 INVALID_HW_RING_ID);
7538 ring->fw_ring_id = INVALID_HW_RING_ID;
7539 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
7540 }
7541
bnxt_hwrm_cp_ring_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7542 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
7543 struct bnxt_cp_ring_info *cpr)
7544 {
7545 struct bnxt_ring_struct *ring;
7546
7547 ring = &cpr->cp_ring_struct;
7548 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7549 return;
7550
7551 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
7552 INVALID_HW_RING_ID);
7553 ring->fw_ring_id = INVALID_HW_RING_ID;
7554 }
7555
bnxt_clear_one_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)7556 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
7557 {
7558 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
7559 int i, size = ring->ring_mem.page_size;
7560
7561 cpr->cp_raw_cons = 0;
7562 cpr->toggle = 0;
7563
7564 for (i = 0; i < bp->cp_nr_pages; i++)
7565 if (cpr->cp_desc_ring[i])
7566 memset(cpr->cp_desc_ring[i], 0, size);
7567 }
7568
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)7569 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
7570 {
7571 u32 type;
7572 int i;
7573
7574 if (!bp->bnapi)
7575 return;
7576
7577 for (i = 0; i < bp->tx_nr_rings; i++)
7578 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
7579
7580 bnxt_cancel_dim(bp);
7581 for (i = 0; i < bp->rx_nr_rings; i++) {
7582 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
7583 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
7584 }
7585
7586 /* The completion rings are about to be freed. After that the
7587 * IRQ doorbell will not work anymore. So we need to disable
7588 * IRQ here.
7589 */
7590 bnxt_disable_int_sync(bp);
7591
7592 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7593 type = RING_FREE_REQ_RING_TYPE_NQ;
7594 else
7595 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7596 for (i = 0; i < bp->cp_nr_rings; i++) {
7597 struct bnxt_napi *bnapi = bp->bnapi[i];
7598 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7599 struct bnxt_ring_struct *ring;
7600 int j;
7601
7602 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
7603 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
7604
7605 ring = &cpr->cp_ring_struct;
7606 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7607 hwrm_ring_free_send_msg(bp, ring, type,
7608 INVALID_HW_RING_ID);
7609 ring->fw_ring_id = INVALID_HW_RING_ID;
7610 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7611 }
7612 }
7613 }
7614
7615 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7616 bool shared);
7617 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7618 bool shared);
7619
bnxt_hwrm_get_rings(struct bnxt * bp)7620 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7621 {
7622 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7623 struct hwrm_func_qcfg_output *resp;
7624 struct hwrm_func_qcfg_input *req;
7625 int rc;
7626
7627 if (bp->hwrm_spec_code < 0x10601)
7628 return 0;
7629
7630 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7631 if (rc)
7632 return rc;
7633
7634 req->fid = cpu_to_le16(0xffff);
7635 resp = hwrm_req_hold(bp, req);
7636 rc = hwrm_req_send(bp, req);
7637 if (rc) {
7638 hwrm_req_drop(bp, req);
7639 return rc;
7640 }
7641
7642 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7643 if (BNXT_NEW_RM(bp)) {
7644 u16 cp, stats;
7645
7646 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7647 hw_resc->resv_hw_ring_grps =
7648 le32_to_cpu(resp->alloc_hw_ring_grps);
7649 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7650 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7651 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7652 stats = le16_to_cpu(resp->alloc_stat_ctx);
7653 hw_resc->resv_irqs = cp;
7654 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7655 int rx = hw_resc->resv_rx_rings;
7656 int tx = hw_resc->resv_tx_rings;
7657
7658 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7659 rx >>= 1;
7660 if (cp < (rx + tx)) {
7661 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7662 if (rc)
7663 goto get_rings_exit;
7664 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7665 rx <<= 1;
7666 hw_resc->resv_rx_rings = rx;
7667 hw_resc->resv_tx_rings = tx;
7668 }
7669 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7670 hw_resc->resv_hw_ring_grps = rx;
7671 }
7672 hw_resc->resv_cp_rings = cp;
7673 hw_resc->resv_stat_ctxs = stats;
7674 }
7675 get_rings_exit:
7676 hwrm_req_drop(bp, req);
7677 return rc;
7678 }
7679
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)7680 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7681 {
7682 struct hwrm_func_qcfg_output *resp;
7683 struct hwrm_func_qcfg_input *req;
7684 int rc;
7685
7686 if (bp->hwrm_spec_code < 0x10601)
7687 return 0;
7688
7689 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7690 if (rc)
7691 return rc;
7692
7693 req->fid = cpu_to_le16(fid);
7694 resp = hwrm_req_hold(bp, req);
7695 rc = hwrm_req_send(bp, req);
7696 if (!rc)
7697 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7698
7699 hwrm_req_drop(bp, req);
7700 return rc;
7701 }
7702
7703 static bool bnxt_rfs_supported(struct bnxt *bp);
7704
7705 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7706 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7707 {
7708 struct hwrm_func_cfg_input *req;
7709 u32 enables = 0;
7710
7711 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7712 return NULL;
7713
7714 req->fid = cpu_to_le16(0xffff);
7715 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7716 req->num_tx_rings = cpu_to_le16(hwr->tx);
7717 if (BNXT_NEW_RM(bp)) {
7718 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7719 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7720 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7721 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7722 enables |= hwr->cp_p5 ?
7723 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7724 } else {
7725 enables |= hwr->cp ?
7726 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7727 enables |= hwr->grp ?
7728 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7729 }
7730 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7731 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7732 0;
7733 req->num_rx_rings = cpu_to_le16(hwr->rx);
7734 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7735 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7736 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7737 req->num_msix = cpu_to_le16(hwr->cp);
7738 } else {
7739 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7740 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7741 }
7742 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7743 req->num_vnics = cpu_to_le16(hwr->vnic);
7744 }
7745 req->enables = cpu_to_le32(enables);
7746 return req;
7747 }
7748
7749 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7750 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7751 {
7752 struct hwrm_func_vf_cfg_input *req;
7753 u32 enables = 0;
7754
7755 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7756 return NULL;
7757
7758 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7759 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7760 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7761 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7762 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7763 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7764 enables |= hwr->cp_p5 ?
7765 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7766 } else {
7767 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7768 enables |= hwr->grp ?
7769 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7770 }
7771 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7772 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7773
7774 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7775 req->num_tx_rings = cpu_to_le16(hwr->tx);
7776 req->num_rx_rings = cpu_to_le16(hwr->rx);
7777 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7778 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7779 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7780 } else {
7781 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7782 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7783 }
7784 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7785 req->num_vnics = cpu_to_le16(hwr->vnic);
7786
7787 req->enables = cpu_to_le32(enables);
7788 return req;
7789 }
7790
7791 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7792 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7793 {
7794 struct hwrm_func_cfg_input *req;
7795 int rc;
7796
7797 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7798 if (!req)
7799 return -ENOMEM;
7800
7801 if (!req->enables) {
7802 hwrm_req_drop(bp, req);
7803 return 0;
7804 }
7805
7806 rc = hwrm_req_send(bp, req);
7807 if (rc)
7808 return rc;
7809
7810 if (bp->hwrm_spec_code < 0x10601)
7811 bp->hw_resc.resv_tx_rings = hwr->tx;
7812
7813 return bnxt_hwrm_get_rings(bp);
7814 }
7815
7816 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7817 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7818 {
7819 struct hwrm_func_vf_cfg_input *req;
7820 int rc;
7821
7822 if (!BNXT_NEW_RM(bp)) {
7823 bp->hw_resc.resv_tx_rings = hwr->tx;
7824 return 0;
7825 }
7826
7827 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7828 if (!req)
7829 return -ENOMEM;
7830
7831 rc = hwrm_req_send(bp, req);
7832 if (rc)
7833 return rc;
7834
7835 return bnxt_hwrm_get_rings(bp);
7836 }
7837
bnxt_hwrm_reserve_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7838 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7839 {
7840 if (BNXT_PF(bp))
7841 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7842 else
7843 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7844 }
7845
bnxt_nq_rings_in_use(struct bnxt * bp)7846 int bnxt_nq_rings_in_use(struct bnxt *bp)
7847 {
7848 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
7849 }
7850
bnxt_cp_rings_in_use(struct bnxt * bp)7851 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7852 {
7853 int cp;
7854
7855 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7856 return bnxt_nq_rings_in_use(bp);
7857
7858 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7859 return cp;
7860 }
7861
bnxt_get_func_stat_ctxs(struct bnxt * bp)7862 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7863 {
7864 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
7865 }
7866
bnxt_get_total_rss_ctxs(struct bnxt * bp,struct bnxt_hw_rings * hwr)7867 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7868 {
7869 if (!hwr->grp)
7870 return 0;
7871 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7872 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7873
7874 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7875 rss_ctx *= hwr->vnic;
7876 return rss_ctx;
7877 }
7878 if (BNXT_VF(bp))
7879 return BNXT_VF_MAX_RSS_CTX;
7880 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7881 return hwr->grp + 1;
7882 return 1;
7883 }
7884
7885 /* Check if a default RSS map needs to be setup. This function is only
7886 * used on older firmware that does not require reserving RX rings.
7887 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)7888 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7889 {
7890 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7891
7892 /* The RSS map is valid for RX rings set to resv_rx_rings */
7893 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7894 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7895 if (!netif_is_rxfh_configured(bp->dev))
7896 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
7897 }
7898 }
7899
bnxt_get_total_vnics(struct bnxt * bp,int rx_rings)7900 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7901 {
7902 if (bp->flags & BNXT_FLAG_RFS) {
7903 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7904 return 2 + bp->num_rss_ctx;
7905 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7906 return rx_rings + 1;
7907 }
7908 return 1;
7909 }
7910
bnxt_need_reserve_rings(struct bnxt * bp)7911 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7912 {
7913 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7914 int cp = bnxt_cp_rings_in_use(bp);
7915 int nq = bnxt_nq_rings_in_use(bp);
7916 int rx = bp->rx_nr_rings, stat;
7917 int vnic, grp = rx;
7918
7919 /* Old firmware does not need RX ring reservations but we still
7920 * need to setup a default RSS map when needed. With new firmware
7921 * we go through RX ring reservations first and then set up the
7922 * RSS map for the successfully reserved RX rings when needed.
7923 */
7924 if (!BNXT_NEW_RM(bp))
7925 bnxt_check_rss_tbl_no_rmgr(bp);
7926
7927 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7928 bp->hwrm_spec_code >= 0x10601)
7929 return true;
7930
7931 if (!BNXT_NEW_RM(bp))
7932 return false;
7933
7934 vnic = bnxt_get_total_vnics(bp, rx);
7935
7936 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7937 rx <<= 1;
7938 stat = bnxt_get_func_stat_ctxs(bp);
7939 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7940 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7941 (hw_resc->resv_hw_ring_grps != grp &&
7942 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7943 return true;
7944 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7945 hw_resc->resv_irqs != nq)
7946 return true;
7947 return false;
7948 }
7949
bnxt_copy_reserved_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)7950 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7951 {
7952 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7953
7954 hwr->tx = hw_resc->resv_tx_rings;
7955 if (BNXT_NEW_RM(bp)) {
7956 hwr->rx = hw_resc->resv_rx_rings;
7957 hwr->cp = hw_resc->resv_irqs;
7958 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7959 hwr->cp_p5 = hw_resc->resv_cp_rings;
7960 hwr->grp = hw_resc->resv_hw_ring_grps;
7961 hwr->vnic = hw_resc->resv_vnics;
7962 hwr->stat = hw_resc->resv_stat_ctxs;
7963 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7964 }
7965 }
7966
bnxt_rings_ok(struct bnxt * bp,struct bnxt_hw_rings * hwr)7967 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7968 {
7969 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7970 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7971 }
7972
7973 static int bnxt_get_avail_msix(struct bnxt *bp, int num);
7974
__bnxt_reserve_rings(struct bnxt * bp)7975 static int __bnxt_reserve_rings(struct bnxt *bp)
7976 {
7977 struct bnxt_hw_rings hwr = {0};
7978 int rx_rings, old_rx_rings, rc;
7979 int cp = bp->cp_nr_rings;
7980 int ulp_msix = 0;
7981 bool sh = false;
7982 int tx_cp;
7983
7984 if (!bnxt_need_reserve_rings(bp))
7985 return 0;
7986
7987 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
7988 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
7989 if (!ulp_msix)
7990 bnxt_set_ulp_stat_ctxs(bp, 0);
7991
7992 if (ulp_msix > bp->ulp_num_msix_want)
7993 ulp_msix = bp->ulp_num_msix_want;
7994 hwr.cp = cp + ulp_msix;
7995 } else {
7996 hwr.cp = bnxt_nq_rings_in_use(bp);
7997 }
7998
7999 hwr.tx = bp->tx_nr_rings;
8000 hwr.rx = bp->rx_nr_rings;
8001 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8002 sh = true;
8003 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8004 hwr.cp_p5 = hwr.rx + hwr.tx;
8005
8006 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
8007
8008 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8009 hwr.rx <<= 1;
8010 hwr.grp = bp->rx_nr_rings;
8011 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
8012 hwr.stat = bnxt_get_func_stat_ctxs(bp);
8013 old_rx_rings = bp->hw_resc.resv_rx_rings;
8014
8015 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
8016 if (rc)
8017 return rc;
8018
8019 bnxt_copy_reserved_rings(bp, &hwr);
8020
8021 rx_rings = hwr.rx;
8022 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8023 if (hwr.rx >= 2) {
8024 rx_rings = hwr.rx >> 1;
8025 } else {
8026 if (netif_running(bp->dev))
8027 return -ENOMEM;
8028
8029 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8030 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8031 bp->dev->hw_features &= ~NETIF_F_LRO;
8032 bp->dev->features &= ~NETIF_F_LRO;
8033 bnxt_set_ring_params(bp);
8034 }
8035 }
8036 rx_rings = min_t(int, rx_rings, hwr.grp);
8037 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
8038 if (bnxt_ulp_registered(bp->edev) &&
8039 hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
8040 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
8041 hwr.cp = min_t(int, hwr.cp, hwr.stat);
8042 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
8043 if (bp->flags & BNXT_FLAG_AGG_RINGS)
8044 hwr.rx = rx_rings << 1;
8045 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
8046 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
8047 if (hwr.tx != bp->tx_nr_rings) {
8048 netdev_warn(bp->dev,
8049 "Able to reserve only %d out of %d requested TX rings\n",
8050 hwr.tx, bp->tx_nr_rings);
8051 }
8052 bp->tx_nr_rings = hwr.tx;
8053
8054 /* If we cannot reserve all the RX rings, reset the RSS map only
8055 * if absolutely necessary
8056 */
8057 if (rx_rings != bp->rx_nr_rings) {
8058 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
8059 rx_rings, bp->rx_nr_rings);
8060 if (netif_is_rxfh_configured(bp->dev) &&
8061 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
8062 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
8063 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
8064 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
8065 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
8066 }
8067 }
8068 bp->rx_nr_rings = rx_rings;
8069 bp->cp_nr_rings = hwr.cp;
8070
8071 if (!bnxt_rings_ok(bp, &hwr))
8072 return -ENOMEM;
8073
8074 if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
8075 !netif_is_rxfh_configured(bp->dev))
8076 bnxt_set_dflt_rss_indir_tbl(bp, NULL);
8077
8078 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
8079 int resv_msix, resv_ctx, ulp_ctxs;
8080 struct bnxt_hw_resc *hw_resc;
8081
8082 hw_resc = &bp->hw_resc;
8083 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
8084 ulp_msix = min_t(int, resv_msix, ulp_msix);
8085 bnxt_set_ulp_msix_num(bp, ulp_msix);
8086 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
8087 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
8088 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
8089 }
8090
8091 return rc;
8092 }
8093
bnxt_hwrm_check_vf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8094 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8095 {
8096 struct hwrm_func_vf_cfg_input *req;
8097 u32 flags;
8098
8099 if (!BNXT_NEW_RM(bp))
8100 return 0;
8101
8102 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
8103 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
8104 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8105 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8106 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8107 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
8108 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
8109 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8110 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8111
8112 req->flags = cpu_to_le32(flags);
8113 return hwrm_req_send_silent(bp, req);
8114 }
8115
bnxt_hwrm_check_pf_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8116 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8117 {
8118 struct hwrm_func_cfg_input *req;
8119 u32 flags;
8120
8121 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
8122 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
8123 if (BNXT_NEW_RM(bp)) {
8124 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
8125 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8126 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
8127 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
8128 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
8129 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
8130 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
8131 else
8132 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8133 }
8134
8135 req->flags = cpu_to_le32(flags);
8136 return hwrm_req_send_silent(bp, req);
8137 }
8138
bnxt_hwrm_check_rings(struct bnxt * bp,struct bnxt_hw_rings * hwr)8139 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
8140 {
8141 if (bp->hwrm_spec_code < 0x10801)
8142 return 0;
8143
8144 if (BNXT_PF(bp))
8145 return bnxt_hwrm_check_pf_rings(bp, hwr);
8146
8147 return bnxt_hwrm_check_vf_rings(bp, hwr);
8148 }
8149
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)8150 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
8151 {
8152 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8153 struct hwrm_ring_aggint_qcaps_output *resp;
8154 struct hwrm_ring_aggint_qcaps_input *req;
8155 int rc;
8156
8157 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
8158 coal_cap->num_cmpl_dma_aggr_max = 63;
8159 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
8160 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
8161 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
8162 coal_cap->int_lat_tmr_min_max = 65535;
8163 coal_cap->int_lat_tmr_max_max = 65535;
8164 coal_cap->num_cmpl_aggr_int_max = 65535;
8165 coal_cap->timer_units = 80;
8166
8167 if (bp->hwrm_spec_code < 0x10902)
8168 return;
8169
8170 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
8171 return;
8172
8173 resp = hwrm_req_hold(bp, req);
8174 rc = hwrm_req_send_silent(bp, req);
8175 if (!rc) {
8176 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
8177 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
8178 coal_cap->num_cmpl_dma_aggr_max =
8179 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
8180 coal_cap->num_cmpl_dma_aggr_during_int_max =
8181 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
8182 coal_cap->cmpl_aggr_dma_tmr_max =
8183 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
8184 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
8185 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
8186 coal_cap->int_lat_tmr_min_max =
8187 le16_to_cpu(resp->int_lat_tmr_min_max);
8188 coal_cap->int_lat_tmr_max_max =
8189 le16_to_cpu(resp->int_lat_tmr_max_max);
8190 coal_cap->num_cmpl_aggr_int_max =
8191 le16_to_cpu(resp->num_cmpl_aggr_int_max);
8192 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
8193 }
8194 hwrm_req_drop(bp, req);
8195 }
8196
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)8197 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
8198 {
8199 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8200
8201 return usec * 1000 / coal_cap->timer_units;
8202 }
8203
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8204 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
8205 struct bnxt_coal *hw_coal,
8206 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8207 {
8208 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8209 u16 val, tmr, max, flags = hw_coal->flags;
8210 u32 cmpl_params = coal_cap->cmpl_params;
8211
8212 max = hw_coal->bufs_per_record * 128;
8213 if (hw_coal->budget)
8214 max = hw_coal->bufs_per_record * hw_coal->budget;
8215 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
8216
8217 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
8218 req->num_cmpl_aggr_int = cpu_to_le16(val);
8219
8220 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
8221 req->num_cmpl_dma_aggr = cpu_to_le16(val);
8222
8223 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
8224 coal_cap->num_cmpl_dma_aggr_during_int_max);
8225 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
8226
8227 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
8228 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
8229 req->int_lat_tmr_max = cpu_to_le16(tmr);
8230
8231 /* min timer set to 1/2 of interrupt timer */
8232 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
8233 val = tmr / 2;
8234 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
8235 req->int_lat_tmr_min = cpu_to_le16(val);
8236 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8237 }
8238
8239 /* buf timer set to 1/4 of interrupt timer */
8240 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
8241 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
8242
8243 if (cmpl_params &
8244 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
8245 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
8246 val = clamp_t(u16, tmr, 1,
8247 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
8248 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
8249 req->enables |=
8250 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
8251 }
8252
8253 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
8254 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
8255 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
8256 req->flags = cpu_to_le16(flags);
8257 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
8258 }
8259
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)8260 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
8261 struct bnxt_coal *hw_coal)
8262 {
8263 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
8264 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8265 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
8266 u32 nq_params = coal_cap->nq_params;
8267 u16 tmr;
8268 int rc;
8269
8270 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
8271 return 0;
8272
8273 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8274 if (rc)
8275 return rc;
8276
8277 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
8278 req->flags =
8279 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
8280
8281 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
8282 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
8283 req->int_lat_tmr_min = cpu_to_le16(tmr);
8284 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
8285 return hwrm_req_send(bp, req);
8286 }
8287
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)8288 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
8289 {
8290 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
8291 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8292 struct bnxt_coal coal;
8293 int rc;
8294
8295 /* Tick values in micro seconds.
8296 * 1 coal_buf x bufs_per_record = 1 completion record.
8297 */
8298 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
8299
8300 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
8301 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
8302
8303 if (!bnapi->rx_ring)
8304 return -ENODEV;
8305
8306 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8307 if (rc)
8308 return rc;
8309
8310 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
8311
8312 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
8313
8314 return hwrm_req_send(bp, req_rx);
8315 }
8316
8317 static int
bnxt_hwrm_set_rx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8318 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8319 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8320 {
8321 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
8322
8323 req->ring_id = cpu_to_le16(ring_id);
8324 return hwrm_req_send(bp, req);
8325 }
8326
8327 static int
bnxt_hwrm_set_tx_coal(struct bnxt * bp,struct bnxt_napi * bnapi,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)8328 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
8329 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
8330 {
8331 struct bnxt_tx_ring_info *txr;
8332 int i, rc;
8333
8334 bnxt_for_each_napi_tx(i, bnapi, txr) {
8335 u16 ring_id;
8336
8337 ring_id = bnxt_cp_ring_for_tx(bp, txr);
8338 req->ring_id = cpu_to_le16(ring_id);
8339 rc = hwrm_req_send(bp, req);
8340 if (rc)
8341 return rc;
8342 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8343 return 0;
8344 }
8345 return 0;
8346 }
8347
bnxt_hwrm_set_coal(struct bnxt * bp)8348 int bnxt_hwrm_set_coal(struct bnxt *bp)
8349 {
8350 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
8351 int i, rc;
8352
8353 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8354 if (rc)
8355 return rc;
8356
8357 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
8358 if (rc) {
8359 hwrm_req_drop(bp, req_rx);
8360 return rc;
8361 }
8362
8363 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
8364 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
8365
8366 hwrm_req_hold(bp, req_rx);
8367 hwrm_req_hold(bp, req_tx);
8368 for (i = 0; i < bp->cp_nr_rings; i++) {
8369 struct bnxt_napi *bnapi = bp->bnapi[i];
8370 struct bnxt_coal *hw_coal;
8371
8372 if (!bnapi->rx_ring)
8373 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8374 else
8375 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
8376 if (rc)
8377 break;
8378
8379 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
8380 continue;
8381
8382 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
8383 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
8384 if (rc)
8385 break;
8386 }
8387 if (bnapi->rx_ring)
8388 hw_coal = &bp->rx_coal;
8389 else
8390 hw_coal = &bp->tx_coal;
8391 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
8392 }
8393 hwrm_req_drop(bp, req_rx);
8394 hwrm_req_drop(bp, req_tx);
8395 return rc;
8396 }
8397
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)8398 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
8399 {
8400 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
8401 struct hwrm_stat_ctx_free_input *req;
8402 int i;
8403
8404 if (!bp->bnapi)
8405 return;
8406
8407 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8408 return;
8409
8410 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
8411 return;
8412 if (BNXT_FW_MAJ(bp) <= 20) {
8413 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
8414 hwrm_req_drop(bp, req);
8415 return;
8416 }
8417 hwrm_req_hold(bp, req0);
8418 }
8419 hwrm_req_hold(bp, req);
8420 for (i = 0; i < bp->cp_nr_rings; i++) {
8421 struct bnxt_napi *bnapi = bp->bnapi[i];
8422 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8423
8424 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
8425 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
8426 if (req0) {
8427 req0->stat_ctx_id = req->stat_ctx_id;
8428 hwrm_req_send(bp, req0);
8429 }
8430 hwrm_req_send(bp, req);
8431
8432 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
8433 }
8434 }
8435 hwrm_req_drop(bp, req);
8436 if (req0)
8437 hwrm_req_drop(bp, req0);
8438 }
8439
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)8440 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
8441 {
8442 struct hwrm_stat_ctx_alloc_output *resp;
8443 struct hwrm_stat_ctx_alloc_input *req;
8444 int rc, i;
8445
8446 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8447 return 0;
8448
8449 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
8450 if (rc)
8451 return rc;
8452
8453 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
8454 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
8455
8456 resp = hwrm_req_hold(bp, req);
8457 for (i = 0; i < bp->cp_nr_rings; i++) {
8458 struct bnxt_napi *bnapi = bp->bnapi[i];
8459 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8460
8461 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
8462
8463 rc = hwrm_req_send(bp, req);
8464 if (rc)
8465 break;
8466
8467 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
8468
8469 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
8470 }
8471 hwrm_req_drop(bp, req);
8472 return rc;
8473 }
8474
bnxt_hwrm_func_qcfg(struct bnxt * bp)8475 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
8476 {
8477 struct hwrm_func_qcfg_output *resp;
8478 struct hwrm_func_qcfg_input *req;
8479 u16 flags;
8480 int rc;
8481
8482 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
8483 if (rc)
8484 return rc;
8485
8486 req->fid = cpu_to_le16(0xffff);
8487 resp = hwrm_req_hold(bp, req);
8488 rc = hwrm_req_send(bp, req);
8489 if (rc)
8490 goto func_qcfg_exit;
8491
8492 flags = le16_to_cpu(resp->flags);
8493 #ifdef CONFIG_BNXT_SRIOV
8494 if (BNXT_VF(bp)) {
8495 struct bnxt_vf_info *vf = &bp->vf;
8496
8497 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
8498 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
8499 vf->flags |= BNXT_VF_TRUST;
8500 else
8501 vf->flags &= ~BNXT_VF_TRUST;
8502 } else {
8503 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
8504 }
8505 #endif
8506 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
8507 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
8508 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
8509 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
8510 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
8511 }
8512 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
8513 bp->flags |= BNXT_FLAG_MULTI_HOST;
8514
8515 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
8516 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
8517
8518 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
8519 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
8520 if (resp->roce_bidi_opt_mode &
8521 FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
8522 bp->cos0_cos1_shared = 1;
8523 else
8524 bp->cos0_cos1_shared = 0;
8525
8526 switch (resp->port_partition_type) {
8527 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
8528 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
8529 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
8530 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
8531 bp->port_partition_type = resp->port_partition_type;
8532 break;
8533 }
8534 if (bp->hwrm_spec_code < 0x10707 ||
8535 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
8536 bp->br_mode = BRIDGE_MODE_VEB;
8537 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
8538 bp->br_mode = BRIDGE_MODE_VEPA;
8539 else
8540 bp->br_mode = BRIDGE_MODE_UNDEF;
8541
8542 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
8543 if (!bp->max_mtu)
8544 bp->max_mtu = BNXT_MAX_MTU;
8545
8546 if (bp->db_size)
8547 goto func_qcfg_exit;
8548
8549 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
8550 if (BNXT_CHIP_P5(bp)) {
8551 if (BNXT_PF(bp))
8552 bp->db_offset = DB_PF_OFFSET_P5;
8553 else
8554 bp->db_offset = DB_VF_OFFSET_P5;
8555 }
8556 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
8557 1024);
8558 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
8559 bp->db_size <= bp->db_offset)
8560 bp->db_size = pci_resource_len(bp->pdev, 2);
8561
8562 func_qcfg_exit:
8563 hwrm_req_drop(bp, req);
8564 return rc;
8565 }
8566
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)8567 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
8568 u8 init_val, u8 init_offset,
8569 bool init_mask_set)
8570 {
8571 ctxm->init_value = init_val;
8572 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
8573 if (init_mask_set)
8574 ctxm->init_offset = init_offset * 4;
8575 else
8576 ctxm->init_value = 0;
8577 }
8578
bnxt_alloc_all_ctx_pg_info(struct bnxt * bp,int ctx_max)8579 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
8580 {
8581 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8582 u16 type;
8583
8584 for (type = 0; type < ctx_max; type++) {
8585 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8586 int n = 1;
8587
8588 if (!ctxm->max_entries || ctxm->pg_info)
8589 continue;
8590
8591 if (ctxm->instance_bmap)
8592 n = hweight32(ctxm->instance_bmap);
8593 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
8594 if (!ctxm->pg_info)
8595 return -ENOMEM;
8596 }
8597 return 0;
8598 }
8599
8600 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
8601 struct bnxt_ctx_mem_type *ctxm, bool force);
8602
8603 #define BNXT_CTX_INIT_VALID(flags) \
8604 (!!((flags) & \
8605 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8606
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt * bp)8607 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8608 {
8609 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8610 struct hwrm_func_backing_store_qcaps_v2_input *req;
8611 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8612 u16 type;
8613 int rc;
8614
8615 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8616 if (rc)
8617 return rc;
8618
8619 if (!ctx) {
8620 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8621 if (!ctx)
8622 return -ENOMEM;
8623 bp->ctx = ctx;
8624 }
8625
8626 resp = hwrm_req_hold(bp, req);
8627
8628 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8629 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8630 u8 init_val, init_off, i;
8631 u32 max_entries;
8632 u16 entry_size;
8633 __le32 *p;
8634 u32 flags;
8635
8636 req->type = cpu_to_le16(type);
8637 rc = hwrm_req_send(bp, req);
8638 if (rc)
8639 goto ctx_done;
8640 flags = le32_to_cpu(resp->flags);
8641 type = le16_to_cpu(resp->next_valid_type);
8642 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
8643 bnxt_free_one_ctx_mem(bp, ctxm, true);
8644 continue;
8645 }
8646 entry_size = le16_to_cpu(resp->entry_size);
8647 max_entries = le32_to_cpu(resp->max_num_entries);
8648 if (ctxm->mem_valid) {
8649 if (!(flags & BNXT_CTX_MEM_PERSIST) ||
8650 ctxm->entry_size != entry_size ||
8651 ctxm->max_entries != max_entries)
8652 bnxt_free_one_ctx_mem(bp, ctxm, true);
8653 else
8654 continue;
8655 }
8656 ctxm->type = le16_to_cpu(resp->type);
8657 ctxm->entry_size = entry_size;
8658 ctxm->flags = flags;
8659 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8660 ctxm->entry_multiple = resp->entry_multiple;
8661 ctxm->max_entries = max_entries;
8662 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8663 init_val = resp->ctx_init_value;
8664 init_off = resp->ctx_init_offset;
8665 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8666 BNXT_CTX_INIT_VALID(flags));
8667 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8668 BNXT_MAX_SPLIT_ENTRY);
8669 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8670 i++, p++)
8671 ctxm->split[i] = le32_to_cpu(*p);
8672 }
8673 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8674
8675 ctx_done:
8676 hwrm_req_drop(bp, req);
8677 return rc;
8678 }
8679
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)8680 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8681 {
8682 struct hwrm_func_backing_store_qcaps_output *resp;
8683 struct hwrm_func_backing_store_qcaps_input *req;
8684 int rc;
8685
8686 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
8687 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
8688 return 0;
8689
8690 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8691 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8692
8693 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8694 if (rc)
8695 return rc;
8696
8697 resp = hwrm_req_hold(bp, req);
8698 rc = hwrm_req_send_silent(bp, req);
8699 if (!rc) {
8700 struct bnxt_ctx_mem_type *ctxm;
8701 struct bnxt_ctx_mem_info *ctx;
8702 u8 init_val, init_idx = 0;
8703 u16 init_mask;
8704
8705 ctx = bp->ctx;
8706 if (!ctx) {
8707 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8708 if (!ctx) {
8709 rc = -ENOMEM;
8710 goto ctx_err;
8711 }
8712 bp->ctx = ctx;
8713 }
8714 init_val = resp->ctx_kind_initializer;
8715 init_mask = le16_to_cpu(resp->ctx_init_mask);
8716
8717 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8718 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8719 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8720 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8721 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8722 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8723 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8724 (init_mask & (1 << init_idx++)) != 0);
8725
8726 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8727 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8728 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8729 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8730 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8731 (init_mask & (1 << init_idx++)) != 0);
8732
8733 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8734 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8735 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8736 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8737 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8738 (init_mask & (1 << init_idx++)) != 0);
8739
8740 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8741 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8742 ctxm->max_entries = ctxm->vnic_entries +
8743 le16_to_cpu(resp->vnic_max_ring_table_entries);
8744 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8745 bnxt_init_ctx_initializer(ctxm, init_val,
8746 resp->vnic_init_offset,
8747 (init_mask & (1 << init_idx++)) != 0);
8748
8749 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8750 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8751 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8752 bnxt_init_ctx_initializer(ctxm, init_val,
8753 resp->stat_init_offset,
8754 (init_mask & (1 << init_idx++)) != 0);
8755
8756 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8757 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8758 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8759 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8760 ctxm->entry_multiple = resp->tqm_entries_multiple;
8761 if (!ctxm->entry_multiple)
8762 ctxm->entry_multiple = 1;
8763
8764 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8765
8766 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8767 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8768 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8769 ctxm->mrav_num_entries_units =
8770 le16_to_cpu(resp->mrav_num_entries_units);
8771 bnxt_init_ctx_initializer(ctxm, init_val,
8772 resp->mrav_init_offset,
8773 (init_mask & (1 << init_idx++)) != 0);
8774
8775 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8776 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8777 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8778
8779 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8780 if (!ctx->tqm_fp_rings_count)
8781 ctx->tqm_fp_rings_count = bp->max_q;
8782 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8783 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8784
8785 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8786 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8787 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8788
8789 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8790 } else {
8791 rc = 0;
8792 }
8793 ctx_err:
8794 hwrm_req_drop(bp, req);
8795 return rc;
8796 }
8797
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)8798 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8799 __le64 *pg_dir)
8800 {
8801 if (!rmem->nr_pages)
8802 return;
8803
8804 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8805 if (rmem->depth >= 1) {
8806 if (rmem->depth == 2)
8807 *pg_attr |= 2;
8808 else
8809 *pg_attr |= 1;
8810 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8811 } else {
8812 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8813 }
8814 }
8815
8816 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8817 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8818 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8819 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8820 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8821 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8822
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)8823 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8824 {
8825 struct hwrm_func_backing_store_cfg_input *req;
8826 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8827 struct bnxt_ctx_pg_info *ctx_pg;
8828 struct bnxt_ctx_mem_type *ctxm;
8829 void **__req = (void **)&req;
8830 u32 req_len = sizeof(*req);
8831 __le32 *num_entries;
8832 __le64 *pg_dir;
8833 u32 flags = 0;
8834 u8 *pg_attr;
8835 u32 ena;
8836 int rc;
8837 int i;
8838
8839 if (!ctx)
8840 return 0;
8841
8842 if (req_len > bp->hwrm_max_ext_req_len)
8843 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8844 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8845 if (rc)
8846 return rc;
8847
8848 req->enables = cpu_to_le32(enables);
8849 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8850 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8851 ctx_pg = ctxm->pg_info;
8852 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8853 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8854 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8855 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8856 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8857 &req->qpc_pg_size_qpc_lvl,
8858 &req->qpc_page_dir);
8859
8860 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8861 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8862 }
8863 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8864 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8865 ctx_pg = ctxm->pg_info;
8866 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8867 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8868 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8869 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8870 &req->srq_pg_size_srq_lvl,
8871 &req->srq_page_dir);
8872 }
8873 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8874 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8875 ctx_pg = ctxm->pg_info;
8876 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8877 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8878 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8879 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8880 &req->cq_pg_size_cq_lvl,
8881 &req->cq_page_dir);
8882 }
8883 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8884 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8885 ctx_pg = ctxm->pg_info;
8886 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8887 req->vnic_num_ring_table_entries =
8888 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8889 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8890 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8891 &req->vnic_pg_size_vnic_lvl,
8892 &req->vnic_page_dir);
8893 }
8894 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8895 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8896 ctx_pg = ctxm->pg_info;
8897 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8898 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8899 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8900 &req->stat_pg_size_stat_lvl,
8901 &req->stat_page_dir);
8902 }
8903 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8904 u32 units;
8905
8906 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8907 ctx_pg = ctxm->pg_info;
8908 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8909 units = ctxm->mrav_num_entries_units;
8910 if (units) {
8911 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8912 u32 entries;
8913
8914 num_mr = ctx_pg->entries - num_ah;
8915 entries = ((num_mr / units) << 16) | (num_ah / units);
8916 req->mrav_num_entries = cpu_to_le32(entries);
8917 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8918 }
8919 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8920 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8921 &req->mrav_pg_size_mrav_lvl,
8922 &req->mrav_page_dir);
8923 }
8924 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8925 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8926 ctx_pg = ctxm->pg_info;
8927 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8928 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8929 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8930 &req->tim_pg_size_tim_lvl,
8931 &req->tim_page_dir);
8932 }
8933 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8934 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8935 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8936 pg_dir = &req->tqm_sp_page_dir,
8937 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8938 ctx_pg = ctxm->pg_info;
8939 i < BNXT_MAX_TQM_RINGS;
8940 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8941 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8942 if (!(enables & ena))
8943 continue;
8944
8945 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8946 *num_entries = cpu_to_le32(ctx_pg->entries);
8947 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8948 }
8949 req->flags = cpu_to_le32(flags);
8950 return hwrm_req_send(bp, req);
8951 }
8952
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)8953 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8954 struct bnxt_ctx_pg_info *ctx_pg)
8955 {
8956 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8957
8958 rmem->page_size = BNXT_PAGE_SIZE;
8959 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8960 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8961 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8962 if (rmem->depth >= 1)
8963 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8964 return bnxt_alloc_ring(bp, rmem);
8965 }
8966
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)8967 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8968 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8969 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8970 {
8971 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8972 int rc;
8973
8974 if (!mem_size)
8975 return -EINVAL;
8976
8977 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8978 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8979 ctx_pg->nr_pages = 0;
8980 return -EINVAL;
8981 }
8982 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8983 int nr_tbls, i;
8984
8985 rmem->depth = 2;
8986 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8987 GFP_KERNEL);
8988 if (!ctx_pg->ctx_pg_tbl)
8989 return -ENOMEM;
8990 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8991 rmem->nr_pages = nr_tbls;
8992 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8993 if (rc)
8994 return rc;
8995 for (i = 0; i < nr_tbls; i++) {
8996 struct bnxt_ctx_pg_info *pg_tbl;
8997
8998 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8999 if (!pg_tbl)
9000 return -ENOMEM;
9001 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
9002 rmem = &pg_tbl->ring_mem;
9003 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
9004 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
9005 rmem->depth = 1;
9006 rmem->nr_pages = MAX_CTX_PAGES;
9007 rmem->ctx_mem = ctxm;
9008 if (i == (nr_tbls - 1)) {
9009 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
9010
9011 if (rem)
9012 rmem->nr_pages = rem;
9013 }
9014 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
9015 if (rc)
9016 break;
9017 }
9018 } else {
9019 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
9020 if (rmem->nr_pages > 1 || depth)
9021 rmem->depth = 1;
9022 rmem->ctx_mem = ctxm;
9023 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
9024 }
9025 return rc;
9026 }
9027
bnxt_copy_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,void * buf,size_t offset,size_t head,size_t tail)9028 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
9029 struct bnxt_ctx_pg_info *ctx_pg,
9030 void *buf, size_t offset, size_t head,
9031 size_t tail)
9032 {
9033 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9034 size_t nr_pages = ctx_pg->nr_pages;
9035 int page_size = rmem->page_size;
9036 size_t len = 0, total_len = 0;
9037 u16 depth = rmem->depth;
9038
9039 tail %= nr_pages * page_size;
9040 do {
9041 if (depth > 1) {
9042 int i = head / (page_size * MAX_CTX_PAGES);
9043 struct bnxt_ctx_pg_info *pg_tbl;
9044
9045 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9046 rmem = &pg_tbl->ring_mem;
9047 }
9048 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
9049 head += len;
9050 offset += len;
9051 total_len += len;
9052 if (head >= nr_pages * page_size)
9053 head = 0;
9054 } while (head != tail);
9055 return total_len;
9056 }
9057
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)9058 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
9059 struct bnxt_ctx_pg_info *ctx_pg)
9060 {
9061 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
9062
9063 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
9064 ctx_pg->ctx_pg_tbl) {
9065 int i, nr_tbls = rmem->nr_pages;
9066
9067 for (i = 0; i < nr_tbls; i++) {
9068 struct bnxt_ctx_pg_info *pg_tbl;
9069 struct bnxt_ring_mem_info *rmem2;
9070
9071 pg_tbl = ctx_pg->ctx_pg_tbl[i];
9072 if (!pg_tbl)
9073 continue;
9074 rmem2 = &pg_tbl->ring_mem;
9075 bnxt_free_ring(bp, rmem2);
9076 ctx_pg->ctx_pg_arr[i] = NULL;
9077 kfree(pg_tbl);
9078 ctx_pg->ctx_pg_tbl[i] = NULL;
9079 }
9080 kfree(ctx_pg->ctx_pg_tbl);
9081 ctx_pg->ctx_pg_tbl = NULL;
9082 }
9083 bnxt_free_ring(bp, rmem);
9084 ctx_pg->nr_pages = 0;
9085 }
9086
bnxt_setup_ctxm_pg_tbls(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)9087 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
9088 struct bnxt_ctx_mem_type *ctxm, u32 entries,
9089 u8 pg_lvl)
9090 {
9091 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9092 int i, rc = 0, n = 1;
9093 u32 mem_size;
9094
9095 if (!ctxm->entry_size || !ctx_pg)
9096 return -EINVAL;
9097 if (ctxm->instance_bmap)
9098 n = hweight32(ctxm->instance_bmap);
9099 if (ctxm->entry_multiple)
9100 entries = roundup(entries, ctxm->entry_multiple);
9101 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
9102 mem_size = entries * ctxm->entry_size;
9103 for (i = 0; i < n && !rc; i++) {
9104 ctx_pg[i].entries = entries;
9105 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
9106 ctxm->init_value ? ctxm : NULL);
9107 }
9108 if (!rc)
9109 ctxm->mem_valid = 1;
9110 return rc;
9111 }
9112
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool last)9113 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
9114 struct bnxt_ctx_mem_type *ctxm,
9115 bool last)
9116 {
9117 struct hwrm_func_backing_store_cfg_v2_input *req;
9118 u32 instance_bmap = ctxm->instance_bmap;
9119 int i, j, rc = 0, n = 1;
9120 __le32 *p;
9121
9122 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
9123 return 0;
9124
9125 if (instance_bmap)
9126 n = hweight32(ctxm->instance_bmap);
9127 else
9128 instance_bmap = 1;
9129
9130 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
9131 if (rc)
9132 return rc;
9133 hwrm_req_hold(bp, req);
9134 req->type = cpu_to_le16(ctxm->type);
9135 req->entry_size = cpu_to_le16(ctxm->entry_size);
9136 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
9137 bnxt_bs_trace_avail(bp, ctxm->type)) {
9138 struct bnxt_bs_trace_info *bs_trace;
9139 u32 enables;
9140
9141 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
9142 req->enables = cpu_to_le32(enables);
9143 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
9144 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
9145 }
9146 req->subtype_valid_cnt = ctxm->split_entry_cnt;
9147 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
9148 p[i] = cpu_to_le32(ctxm->split[i]);
9149 for (i = 0, j = 0; j < n && !rc; i++) {
9150 struct bnxt_ctx_pg_info *ctx_pg;
9151
9152 if (!(instance_bmap & (1 << i)))
9153 continue;
9154 req->instance = cpu_to_le16(i);
9155 ctx_pg = &ctxm->pg_info[j++];
9156 if (!ctx_pg->entries)
9157 continue;
9158 req->num_entries = cpu_to_le32(ctx_pg->entries);
9159 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
9160 &req->page_size_pbl_level,
9161 &req->page_dir);
9162 if (last && j == n)
9163 req->flags =
9164 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
9165 rc = hwrm_req_send(bp, req);
9166 }
9167 hwrm_req_drop(bp, req);
9168 return rc;
9169 }
9170
bnxt_backing_store_cfg_v2(struct bnxt * bp)9171 static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
9172 {
9173 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9174 struct bnxt_ctx_mem_type *ctxm;
9175 u16 last_type = BNXT_CTX_INV;
9176 int rc = 0;
9177 u16 type;
9178
9179 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
9180 ctxm = &ctx->ctx_arr[type];
9181 if (!bnxt_bs_trace_avail(bp, type))
9182 continue;
9183 if (!ctxm->mem_valid) {
9184 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
9185 ctxm->max_entries, 1);
9186 if (rc) {
9187 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
9188 type);
9189 continue;
9190 }
9191 bnxt_bs_trace_init(bp, ctxm);
9192 }
9193 last_type = type;
9194 }
9195
9196 if (last_type == BNXT_CTX_INV) {
9197 for (type = 0; type < BNXT_CTX_MAX; type++) {
9198 ctxm = &ctx->ctx_arr[type];
9199 if (ctxm->mem_valid)
9200 last_type = type;
9201 }
9202 if (last_type == BNXT_CTX_INV)
9203 return 0;
9204 }
9205 ctx->ctx_arr[last_type].last = 1;
9206
9207 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
9208 ctxm = &ctx->ctx_arr[type];
9209
9210 if (!ctxm->mem_valid)
9211 continue;
9212 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
9213 if (rc)
9214 return rc;
9215 }
9216 return 0;
9217 }
9218
9219 /**
9220 * __bnxt_copy_ctx_mem - copy host context memory
9221 * @bp: The driver context
9222 * @ctxm: The pointer to the context memory type
9223 * @buf: The destination buffer or NULL to just obtain the length
9224 * @offset: The buffer offset to copy the data to
9225 * @head: The head offset of context memory to copy from
9226 * @tail: The tail offset (last byte + 1) of context memory to end the copy
9227 *
9228 * This function is called for debugging purposes to dump the host context
9229 * used by the chip.
9230 *
9231 * Return: Length of memory copied
9232 */
__bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset,size_t head,size_t tail)9233 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
9234 struct bnxt_ctx_mem_type *ctxm, void *buf,
9235 size_t offset, size_t head, size_t tail)
9236 {
9237 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
9238 size_t len = 0, total_len = 0;
9239 int i, n = 1;
9240
9241 if (!ctx_pg)
9242 return 0;
9243
9244 if (ctxm->instance_bmap)
9245 n = hweight32(ctxm->instance_bmap);
9246 for (i = 0; i < n; i++) {
9247 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
9248 tail);
9249 offset += len;
9250 total_len += len;
9251 }
9252 return total_len;
9253 }
9254
bnxt_copy_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,void * buf,size_t offset)9255 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
9256 void *buf, size_t offset)
9257 {
9258 size_t tail = ctxm->max_entries * ctxm->entry_size;
9259
9260 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
9261 }
9262
bnxt_free_one_ctx_mem(struct bnxt * bp,struct bnxt_ctx_mem_type * ctxm,bool force)9263 static void bnxt_free_one_ctx_mem(struct bnxt *bp,
9264 struct bnxt_ctx_mem_type *ctxm, bool force)
9265 {
9266 struct bnxt_ctx_pg_info *ctx_pg;
9267 int i, n = 1;
9268
9269 ctxm->last = 0;
9270
9271 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
9272 return;
9273
9274 ctx_pg = ctxm->pg_info;
9275 if (ctx_pg) {
9276 if (ctxm->instance_bmap)
9277 n = hweight32(ctxm->instance_bmap);
9278 for (i = 0; i < n; i++)
9279 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
9280
9281 kfree(ctx_pg);
9282 ctxm->pg_info = NULL;
9283 ctxm->mem_valid = 0;
9284 }
9285 memset(ctxm, 0, sizeof(*ctxm));
9286 }
9287
bnxt_free_ctx_mem(struct bnxt * bp,bool force)9288 void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
9289 {
9290 struct bnxt_ctx_mem_info *ctx = bp->ctx;
9291 u16 type;
9292
9293 if (!ctx)
9294 return;
9295
9296 for (type = 0; type < BNXT_CTX_V2_MAX; type++)
9297 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
9298
9299 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
9300 if (force) {
9301 kfree(ctx);
9302 bp->ctx = NULL;
9303 }
9304 }
9305
bnxt_alloc_ctx_mem(struct bnxt * bp)9306 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
9307 {
9308 struct bnxt_ctx_mem_type *ctxm;
9309 struct bnxt_ctx_mem_info *ctx;
9310 u32 l2_qps, qp1_qps, max_qps;
9311 u32 ena, entries_sp, entries;
9312 u32 srqs, max_srqs, min;
9313 u32 num_mr, num_ah;
9314 u32 extra_srqs = 0;
9315 u32 extra_qps = 0;
9316 u32 fast_qpmd_qps;
9317 u8 pg_lvl = 1;
9318 int i, rc;
9319
9320 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
9321 if (rc) {
9322 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
9323 rc);
9324 return rc;
9325 }
9326 ctx = bp->ctx;
9327 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
9328 return 0;
9329
9330 ena = 0;
9331 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
9332 goto skip_legacy;
9333
9334 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9335 l2_qps = ctxm->qp_l2_entries;
9336 qp1_qps = ctxm->qp_qp1_entries;
9337 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
9338 max_qps = ctxm->max_entries;
9339 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9340 srqs = ctxm->srq_l2_entries;
9341 max_srqs = ctxm->max_entries;
9342 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
9343 pg_lvl = 2;
9344 if (BNXT_SW_RES_LMT(bp)) {
9345 extra_qps = max_qps - l2_qps - qp1_qps;
9346 extra_srqs = max_srqs - srqs;
9347 } else {
9348 extra_qps = min_t(u32, 65536,
9349 max_qps - l2_qps - qp1_qps);
9350 /* allocate extra qps if fw supports RoCE fast qp
9351 * destroy feature
9352 */
9353 extra_qps += fast_qpmd_qps;
9354 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
9355 }
9356 if (fast_qpmd_qps)
9357 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
9358 }
9359
9360 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
9361 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
9362 pg_lvl);
9363 if (rc)
9364 return rc;
9365
9366 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
9367 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
9368 if (rc)
9369 return rc;
9370
9371 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
9372 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
9373 extra_qps * 2, pg_lvl);
9374 if (rc)
9375 return rc;
9376
9377 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
9378 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9379 if (rc)
9380 return rc;
9381
9382 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
9383 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
9384 if (rc)
9385 return rc;
9386
9387 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
9388 goto skip_rdma;
9389
9390 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
9391 if (BNXT_SW_RES_LMT(bp) &&
9392 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
9393 num_ah = ctxm->mrav_av_entries;
9394 num_mr = ctxm->max_entries - num_ah;
9395 } else {
9396 /* 128K extra is needed to accommodate static AH context
9397 * allocation by f/w.
9398 */
9399 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
9400 num_ah = min_t(u32, num_mr, 1024 * 128);
9401 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
9402 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
9403 ctxm->mrav_av_entries = num_ah;
9404 }
9405
9406 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
9407 if (rc)
9408 return rc;
9409 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
9410
9411 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
9412 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
9413 if (rc)
9414 return rc;
9415 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
9416
9417 skip_rdma:
9418 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
9419 min = ctxm->min_entries;
9420 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
9421 2 * (extra_qps + qp1_qps) + min;
9422 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
9423 if (rc)
9424 return rc;
9425
9426 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
9427 entries = l2_qps + 2 * (extra_qps + qp1_qps);
9428 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
9429 if (rc)
9430 return rc;
9431 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
9432 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
9433 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
9434
9435 skip_legacy:
9436 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
9437 rc = bnxt_backing_store_cfg_v2(bp);
9438 else
9439 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
9440 if (rc) {
9441 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
9442 rc);
9443 return rc;
9444 }
9445 ctx->flags |= BNXT_CTX_FLAG_INITED;
9446 return 0;
9447 }
9448
bnxt_hwrm_crash_dump_mem_cfg(struct bnxt * bp)9449 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
9450 {
9451 struct hwrm_dbg_crashdump_medium_cfg_input *req;
9452 u16 page_attr;
9453 int rc;
9454
9455 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9456 return 0;
9457
9458 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
9459 if (rc)
9460 return rc;
9461
9462 if (BNXT_PAGE_SIZE == 0x2000)
9463 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
9464 else if (BNXT_PAGE_SIZE == 0x10000)
9465 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
9466 else
9467 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
9468 req->pg_size_lvl = cpu_to_le16(page_attr |
9469 bp->fw_crash_mem->ring_mem.depth);
9470 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
9471 req->size = cpu_to_le32(bp->fw_crash_len);
9472 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
9473 return hwrm_req_send(bp, req);
9474 }
9475
bnxt_free_crash_dump_mem(struct bnxt * bp)9476 static void bnxt_free_crash_dump_mem(struct bnxt *bp)
9477 {
9478 if (bp->fw_crash_mem) {
9479 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9480 kfree(bp->fw_crash_mem);
9481 bp->fw_crash_mem = NULL;
9482 }
9483 }
9484
bnxt_alloc_crash_dump_mem(struct bnxt * bp)9485 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
9486 {
9487 u32 mem_size = 0;
9488 int rc;
9489
9490 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
9491 return 0;
9492
9493 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
9494 if (rc)
9495 return rc;
9496
9497 mem_size = round_up(mem_size, 4);
9498
9499 /* keep and use the existing pages */
9500 if (bp->fw_crash_mem &&
9501 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
9502 goto alloc_done;
9503
9504 if (bp->fw_crash_mem)
9505 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
9506 else
9507 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
9508 GFP_KERNEL);
9509 if (!bp->fw_crash_mem)
9510 return -ENOMEM;
9511
9512 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
9513 if (rc) {
9514 bnxt_free_crash_dump_mem(bp);
9515 return rc;
9516 }
9517
9518 alloc_done:
9519 bp->fw_crash_len = mem_size;
9520 return 0;
9521 }
9522
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)9523 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
9524 {
9525 struct hwrm_func_resource_qcaps_output *resp;
9526 struct hwrm_func_resource_qcaps_input *req;
9527 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9528 int rc;
9529
9530 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
9531 if (rc)
9532 return rc;
9533
9534 req->fid = cpu_to_le16(0xffff);
9535 resp = hwrm_req_hold(bp, req);
9536 rc = hwrm_req_send_silent(bp, req);
9537 if (rc)
9538 goto hwrm_func_resc_qcaps_exit;
9539
9540 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
9541 if (!all)
9542 goto hwrm_func_resc_qcaps_exit;
9543
9544 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
9545 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9546 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
9547 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9548 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
9549 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9550 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
9551 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9552 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
9553 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
9554 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
9555 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9556 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
9557 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9558 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
9559 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9560
9561 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
9562 u16 max_msix = le16_to_cpu(resp->max_msix);
9563
9564 hw_resc->max_nqs = max_msix;
9565 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
9566 }
9567
9568 if (BNXT_PF(bp)) {
9569 struct bnxt_pf_info *pf = &bp->pf;
9570
9571 pf->vf_resv_strategy =
9572 le16_to_cpu(resp->vf_reservation_strategy);
9573 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
9574 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
9575 }
9576 hwrm_func_resc_qcaps_exit:
9577 hwrm_req_drop(bp, req);
9578 return rc;
9579 }
9580
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)9581 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
9582 {
9583 struct hwrm_port_mac_ptp_qcfg_output *resp;
9584 struct hwrm_port_mac_ptp_qcfg_input *req;
9585 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
9586 u8 flags;
9587 int rc;
9588
9589 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
9590 rc = -ENODEV;
9591 goto no_ptp;
9592 }
9593
9594 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
9595 if (rc)
9596 goto no_ptp;
9597
9598 req->port_id = cpu_to_le16(bp->pf.port_id);
9599 resp = hwrm_req_hold(bp, req);
9600 rc = hwrm_req_send(bp, req);
9601 if (rc)
9602 goto exit;
9603
9604 flags = resp->flags;
9605 if (BNXT_CHIP_P5_AND_MINUS(bp) &&
9606 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
9607 rc = -ENODEV;
9608 goto exit;
9609 }
9610 if (!ptp) {
9611 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
9612 if (!ptp) {
9613 rc = -ENOMEM;
9614 goto exit;
9615 }
9616 ptp->bp = bp;
9617 bp->ptp_cfg = ptp;
9618 }
9619
9620 if (flags &
9621 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
9622 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
9623 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
9624 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
9625 } else if (BNXT_CHIP_P5(bp)) {
9626 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
9627 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
9628 } else {
9629 rc = -ENODEV;
9630 goto exit;
9631 }
9632 ptp->rtc_configured =
9633 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
9634 rc = bnxt_ptp_init(bp);
9635 if (rc)
9636 netdev_warn(bp->dev, "PTP initialization failed.\n");
9637 exit:
9638 hwrm_req_drop(bp, req);
9639 if (!rc)
9640 return 0;
9641
9642 no_ptp:
9643 bnxt_ptp_clear(bp);
9644 kfree(ptp);
9645 bp->ptp_cfg = NULL;
9646 return rc;
9647 }
9648
__bnxt_hwrm_func_qcaps(struct bnxt * bp)9649 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
9650 {
9651 u32 flags, flags_ext, flags_ext2, flags_ext3;
9652 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9653 struct hwrm_func_qcaps_output *resp;
9654 struct hwrm_func_qcaps_input *req;
9655 int rc;
9656
9657 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
9658 if (rc)
9659 return rc;
9660
9661 req->fid = cpu_to_le16(0xffff);
9662 resp = hwrm_req_hold(bp, req);
9663 rc = hwrm_req_send(bp, req);
9664 if (rc)
9665 goto hwrm_func_qcaps_exit;
9666
9667 flags = le32_to_cpu(resp->flags);
9668 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
9669 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
9670 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
9671 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
9672 if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
9673 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
9674 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
9675 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
9676 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
9677 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
9678 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
9679 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
9680 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
9681 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
9682 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
9683 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
9684 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
9685 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
9686 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
9687 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
9688
9689 flags_ext = le32_to_cpu(resp->flags_ext);
9690 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
9691 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
9692 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
9693 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
9694 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
9695 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
9696 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
9697 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
9698 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
9699 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
9700 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
9701 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
9702 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
9703 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
9704 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
9705 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
9706 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
9707 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
9708
9709 flags_ext2 = le32_to_cpu(resp->flags_ext2);
9710 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
9711 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
9712 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
9713 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
9714 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
9715 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
9716 if (flags_ext2 &
9717 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
9718 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
9719 if (BNXT_PF(bp) &&
9720 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
9721 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
9722
9723 flags_ext3 = le32_to_cpu(resp->flags_ext3);
9724 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
9725 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
9726 if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
9727 bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
9728
9729 bp->tx_push_thresh = 0;
9730 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
9731 BNXT_FW_MAJ(bp) > 217)
9732 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
9733
9734 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
9735 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
9736 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
9737 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
9738 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
9739 if (!hw_resc->max_hw_ring_grps)
9740 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
9741 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
9742 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
9743 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
9744
9745 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
9746 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
9747 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
9748 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
9749 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
9750 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
9751
9752 if (BNXT_PF(bp)) {
9753 struct bnxt_pf_info *pf = &bp->pf;
9754
9755 pf->fw_fid = le16_to_cpu(resp->fid);
9756 pf->port_id = le16_to_cpu(resp->port_id);
9757 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
9758 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
9759 pf->max_vfs = le16_to_cpu(resp->max_vfs);
9760 bp->flags &= ~BNXT_FLAG_WOL_CAP;
9761 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
9762 bp->flags |= BNXT_FLAG_WOL_CAP;
9763 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
9764 bp->fw_cap |= BNXT_FW_CAP_PTP;
9765 } else {
9766 bnxt_ptp_clear(bp);
9767 kfree(bp->ptp_cfg);
9768 bp->ptp_cfg = NULL;
9769 }
9770 } else {
9771 #ifdef CONFIG_BNXT_SRIOV
9772 struct bnxt_vf_info *vf = &bp->vf;
9773
9774 vf->fw_fid = le16_to_cpu(resp->fid);
9775 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
9776 #endif
9777 }
9778 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
9779
9780 hwrm_func_qcaps_exit:
9781 hwrm_req_drop(bp, req);
9782 return rc;
9783 }
9784
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)9785 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
9786 {
9787 struct hwrm_dbg_qcaps_output *resp;
9788 struct hwrm_dbg_qcaps_input *req;
9789 int rc;
9790
9791 bp->fw_dbg_cap = 0;
9792 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
9793 return;
9794
9795 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
9796 if (rc)
9797 return;
9798
9799 req->fid = cpu_to_le16(0xffff);
9800 resp = hwrm_req_hold(bp, req);
9801 rc = hwrm_req_send(bp, req);
9802 if (rc)
9803 goto hwrm_dbg_qcaps_exit;
9804
9805 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
9806
9807 hwrm_dbg_qcaps_exit:
9808 hwrm_req_drop(bp, req);
9809 }
9810
9811 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
9812
bnxt_hwrm_func_qcaps(struct bnxt * bp)9813 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
9814 {
9815 int rc;
9816
9817 rc = __bnxt_hwrm_func_qcaps(bp);
9818 if (rc)
9819 return rc;
9820
9821 bnxt_hwrm_dbg_qcaps(bp);
9822
9823 rc = bnxt_hwrm_queue_qportcfg(bp);
9824 if (rc) {
9825 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
9826 return rc;
9827 }
9828 if (bp->hwrm_spec_code >= 0x10803) {
9829 rc = bnxt_alloc_ctx_mem(bp);
9830 if (rc)
9831 return rc;
9832 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9833 if (!rc)
9834 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
9835 }
9836 return 0;
9837 }
9838
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)9839 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
9840 {
9841 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
9842 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
9843 u32 flags;
9844 int rc;
9845
9846 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
9847 return 0;
9848
9849 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
9850 if (rc)
9851 return rc;
9852
9853 resp = hwrm_req_hold(bp, req);
9854 rc = hwrm_req_send(bp, req);
9855 if (rc)
9856 goto hwrm_cfa_adv_qcaps_exit;
9857
9858 flags = le32_to_cpu(resp->flags);
9859 if (flags &
9860 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9861 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9862
9863 if (flags &
9864 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9865 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9866
9867 if (flags &
9868 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9869 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9870
9871 hwrm_cfa_adv_qcaps_exit:
9872 hwrm_req_drop(bp, req);
9873 return rc;
9874 }
9875
__bnxt_alloc_fw_health(struct bnxt * bp)9876 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9877 {
9878 if (bp->fw_health)
9879 return 0;
9880
9881 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9882 if (!bp->fw_health)
9883 return -ENOMEM;
9884
9885 mutex_init(&bp->fw_health->lock);
9886 return 0;
9887 }
9888
bnxt_alloc_fw_health(struct bnxt * bp)9889 static int bnxt_alloc_fw_health(struct bnxt *bp)
9890 {
9891 int rc;
9892
9893 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9894 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9895 return 0;
9896
9897 rc = __bnxt_alloc_fw_health(bp);
9898 if (rc) {
9899 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9900 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9901 return rc;
9902 }
9903
9904 return 0;
9905 }
9906
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)9907 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9908 {
9909 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9910 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9911 BNXT_FW_HEALTH_WIN_MAP_OFF);
9912 }
9913
bnxt_inv_fw_health_reg(struct bnxt * bp)9914 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9915 {
9916 struct bnxt_fw_health *fw_health = bp->fw_health;
9917 u32 reg_type;
9918
9919 if (!fw_health)
9920 return;
9921
9922 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9923 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9924 fw_health->status_reliable = false;
9925
9926 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9927 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9928 fw_health->resets_reliable = false;
9929 }
9930
bnxt_try_map_fw_health_reg(struct bnxt * bp)9931 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9932 {
9933 void __iomem *hs;
9934 u32 status_loc;
9935 u32 reg_type;
9936 u32 sig;
9937
9938 if (bp->fw_health)
9939 bp->fw_health->status_reliable = false;
9940
9941 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9942 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9943
9944 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9945 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9946 if (!bp->chip_num) {
9947 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9948 bp->chip_num = readl(bp->bar0 +
9949 BNXT_FW_HEALTH_WIN_BASE +
9950 BNXT_GRC_REG_CHIP_NUM);
9951 }
9952 if (!BNXT_CHIP_P5_PLUS(bp))
9953 return;
9954
9955 status_loc = BNXT_GRC_REG_STATUS_P5 |
9956 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9957 } else {
9958 status_loc = readl(hs + offsetof(struct hcomm_status,
9959 fw_status_loc));
9960 }
9961
9962 if (__bnxt_alloc_fw_health(bp)) {
9963 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9964 return;
9965 }
9966
9967 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9968 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9969 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9970 __bnxt_map_fw_health_reg(bp, status_loc);
9971 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9972 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9973 }
9974
9975 bp->fw_health->status_reliable = true;
9976 }
9977
bnxt_map_fw_health_regs(struct bnxt * bp)9978 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9979 {
9980 struct bnxt_fw_health *fw_health = bp->fw_health;
9981 u32 reg_base = 0xffffffff;
9982 int i;
9983
9984 bp->fw_health->status_reliable = false;
9985 bp->fw_health->resets_reliable = false;
9986 /* Only pre-map the monitoring GRC registers using window 3 */
9987 for (i = 0; i < 4; i++) {
9988 u32 reg = fw_health->regs[i];
9989
9990 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9991 continue;
9992 if (reg_base == 0xffffffff)
9993 reg_base = reg & BNXT_GRC_BASE_MASK;
9994 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9995 return -ERANGE;
9996 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9997 }
9998 bp->fw_health->status_reliable = true;
9999 bp->fw_health->resets_reliable = true;
10000 if (reg_base == 0xffffffff)
10001 return 0;
10002
10003 __bnxt_map_fw_health_reg(bp, reg_base);
10004 return 0;
10005 }
10006
bnxt_remap_fw_health_regs(struct bnxt * bp)10007 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
10008 {
10009 if (!bp->fw_health)
10010 return;
10011
10012 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
10013 bp->fw_health->status_reliable = true;
10014 bp->fw_health->resets_reliable = true;
10015 } else {
10016 bnxt_try_map_fw_health_reg(bp);
10017 }
10018 }
10019
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)10020 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
10021 {
10022 struct bnxt_fw_health *fw_health = bp->fw_health;
10023 struct hwrm_error_recovery_qcfg_output *resp;
10024 struct hwrm_error_recovery_qcfg_input *req;
10025 int rc, i;
10026
10027 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10028 return 0;
10029
10030 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
10031 if (rc)
10032 return rc;
10033
10034 resp = hwrm_req_hold(bp, req);
10035 rc = hwrm_req_send(bp, req);
10036 if (rc)
10037 goto err_recovery_out;
10038 fw_health->flags = le32_to_cpu(resp->flags);
10039 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
10040 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
10041 rc = -EINVAL;
10042 goto err_recovery_out;
10043 }
10044 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
10045 fw_health->master_func_wait_dsecs =
10046 le32_to_cpu(resp->master_func_wait_period);
10047 fw_health->normal_func_wait_dsecs =
10048 le32_to_cpu(resp->normal_func_wait_period);
10049 fw_health->post_reset_wait_dsecs =
10050 le32_to_cpu(resp->master_func_wait_period_after_reset);
10051 fw_health->post_reset_max_wait_dsecs =
10052 le32_to_cpu(resp->max_bailout_time_after_reset);
10053 fw_health->regs[BNXT_FW_HEALTH_REG] =
10054 le32_to_cpu(resp->fw_health_status_reg);
10055 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
10056 le32_to_cpu(resp->fw_heartbeat_reg);
10057 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
10058 le32_to_cpu(resp->fw_reset_cnt_reg);
10059 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
10060 le32_to_cpu(resp->reset_inprogress_reg);
10061 fw_health->fw_reset_inprog_reg_mask =
10062 le32_to_cpu(resp->reset_inprogress_reg_mask);
10063 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
10064 if (fw_health->fw_reset_seq_cnt >= 16) {
10065 rc = -EINVAL;
10066 goto err_recovery_out;
10067 }
10068 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
10069 fw_health->fw_reset_seq_regs[i] =
10070 le32_to_cpu(resp->reset_reg[i]);
10071 fw_health->fw_reset_seq_vals[i] =
10072 le32_to_cpu(resp->reset_reg_val[i]);
10073 fw_health->fw_reset_seq_delay_msec[i] =
10074 resp->delay_after_reset[i];
10075 }
10076 err_recovery_out:
10077 hwrm_req_drop(bp, req);
10078 if (!rc)
10079 rc = bnxt_map_fw_health_regs(bp);
10080 if (rc)
10081 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10082 return rc;
10083 }
10084
bnxt_hwrm_func_reset(struct bnxt * bp)10085 static int bnxt_hwrm_func_reset(struct bnxt *bp)
10086 {
10087 struct hwrm_func_reset_input *req;
10088 int rc;
10089
10090 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
10091 if (rc)
10092 return rc;
10093
10094 req->enables = 0;
10095 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
10096 return hwrm_req_send(bp, req);
10097 }
10098
bnxt_nvm_cfg_ver_get(struct bnxt * bp)10099 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
10100 {
10101 struct hwrm_nvm_get_dev_info_output nvm_info;
10102
10103 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
10104 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
10105 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
10106 nvm_info.nvm_cfg_ver_upd);
10107 }
10108
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)10109 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
10110 {
10111 struct hwrm_queue_qportcfg_output *resp;
10112 struct hwrm_queue_qportcfg_input *req;
10113 u8 i, j, *qptr;
10114 bool no_rdma;
10115 int rc = 0;
10116
10117 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
10118 if (rc)
10119 return rc;
10120
10121 resp = hwrm_req_hold(bp, req);
10122 rc = hwrm_req_send(bp, req);
10123 if (rc)
10124 goto qportcfg_exit;
10125
10126 if (!resp->max_configurable_queues) {
10127 rc = -EINVAL;
10128 goto qportcfg_exit;
10129 }
10130 bp->max_tc = resp->max_configurable_queues;
10131 bp->max_lltc = resp->max_configurable_lossless_queues;
10132 if (bp->max_tc > BNXT_MAX_QUEUE)
10133 bp->max_tc = BNXT_MAX_QUEUE;
10134
10135 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
10136 qptr = &resp->queue_id0;
10137 for (i = 0, j = 0; i < bp->max_tc; i++) {
10138 bp->q_info[j].queue_id = *qptr;
10139 bp->q_ids[i] = *qptr++;
10140 bp->q_info[j].queue_profile = *qptr++;
10141 bp->tc_to_qidx[j] = j;
10142 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
10143 (no_rdma && BNXT_PF(bp)))
10144 j++;
10145 }
10146 bp->max_q = bp->max_tc;
10147 bp->max_tc = max_t(u8, j, 1);
10148
10149 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
10150 bp->max_tc = 1;
10151
10152 if (bp->max_lltc > bp->max_tc)
10153 bp->max_lltc = bp->max_tc;
10154
10155 qportcfg_exit:
10156 hwrm_req_drop(bp, req);
10157 return rc;
10158 }
10159
bnxt_hwrm_poll(struct bnxt * bp)10160 static int bnxt_hwrm_poll(struct bnxt *bp)
10161 {
10162 struct hwrm_ver_get_input *req;
10163 int rc;
10164
10165 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10166 if (rc)
10167 return rc;
10168
10169 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10170 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10171 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10172
10173 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
10174 rc = hwrm_req_send(bp, req);
10175 return rc;
10176 }
10177
bnxt_hwrm_ver_get(struct bnxt * bp)10178 static int bnxt_hwrm_ver_get(struct bnxt *bp)
10179 {
10180 struct hwrm_ver_get_output *resp;
10181 struct hwrm_ver_get_input *req;
10182 u16 fw_maj, fw_min, fw_bld, fw_rsv;
10183 u32 dev_caps_cfg, hwrm_ver;
10184 int rc, len, max_tmo_secs;
10185
10186 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
10187 if (rc)
10188 return rc;
10189
10190 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10191 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
10192 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
10193 req->hwrm_intf_min = HWRM_VERSION_MINOR;
10194 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
10195
10196 resp = hwrm_req_hold(bp, req);
10197 rc = hwrm_req_send(bp, req);
10198 if (rc)
10199 goto hwrm_ver_get_exit;
10200
10201 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
10202
10203 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
10204 resp->hwrm_intf_min_8b << 8 |
10205 resp->hwrm_intf_upd_8b;
10206 if (resp->hwrm_intf_maj_8b < 1) {
10207 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
10208 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10209 resp->hwrm_intf_upd_8b);
10210 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
10211 }
10212
10213 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
10214 HWRM_VERSION_UPDATE;
10215
10216 if (bp->hwrm_spec_code > hwrm_ver)
10217 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10218 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
10219 HWRM_VERSION_UPDATE);
10220 else
10221 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
10222 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
10223 resp->hwrm_intf_upd_8b);
10224
10225 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
10226 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
10227 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
10228 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
10229 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
10230 len = FW_VER_STR_LEN;
10231 } else {
10232 fw_maj = resp->hwrm_fw_maj_8b;
10233 fw_min = resp->hwrm_fw_min_8b;
10234 fw_bld = resp->hwrm_fw_bld_8b;
10235 fw_rsv = resp->hwrm_fw_rsvd_8b;
10236 len = BC_HWRM_STR_LEN;
10237 }
10238 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
10239 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
10240 fw_rsv);
10241
10242 if (strlen(resp->active_pkg_name)) {
10243 int fw_ver_len = strlen(bp->fw_ver_str);
10244
10245 snprintf(bp->fw_ver_str + fw_ver_len,
10246 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
10247 resp->active_pkg_name);
10248 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
10249 }
10250
10251 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
10252 if (!bp->hwrm_cmd_timeout)
10253 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10254 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
10255 if (!bp->hwrm_cmd_max_timeout)
10256 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
10257 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
10258 #ifdef CONFIG_DETECT_HUNG_TASK
10259 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
10260 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
10261 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
10262 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
10263 }
10264 #endif
10265
10266 if (resp->hwrm_intf_maj_8b >= 1) {
10267 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
10268 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
10269 }
10270 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
10271 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
10272
10273 bp->chip_num = le16_to_cpu(resp->chip_num);
10274 bp->chip_rev = resp->chip_rev;
10275 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
10276 !resp->chip_metal)
10277 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
10278
10279 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
10280 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
10281 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
10282 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
10283
10284 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
10285 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
10286
10287 if (dev_caps_cfg &
10288 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
10289 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
10290
10291 if (dev_caps_cfg &
10292 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
10293 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
10294
10295 if (dev_caps_cfg &
10296 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
10297 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
10298
10299 hwrm_ver_get_exit:
10300 hwrm_req_drop(bp, req);
10301 return rc;
10302 }
10303
bnxt_hwrm_fw_set_time(struct bnxt * bp)10304 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
10305 {
10306 struct hwrm_fw_set_time_input *req;
10307 struct tm tm;
10308 time64_t now = ktime_get_real_seconds();
10309 int rc;
10310
10311 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
10312 bp->hwrm_spec_code < 0x10400)
10313 return -EOPNOTSUPP;
10314
10315 time64_to_tm(now, 0, &tm);
10316 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
10317 if (rc)
10318 return rc;
10319
10320 req->year = cpu_to_le16(1900 + tm.tm_year);
10321 req->month = 1 + tm.tm_mon;
10322 req->day = tm.tm_mday;
10323 req->hour = tm.tm_hour;
10324 req->minute = tm.tm_min;
10325 req->second = tm.tm_sec;
10326 return hwrm_req_send(bp, req);
10327 }
10328
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)10329 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
10330 {
10331 u64 sw_tmp;
10332
10333 hw &= mask;
10334 sw_tmp = (*sw & ~mask) | hw;
10335 if (hw < (*sw & mask))
10336 sw_tmp += mask + 1;
10337 WRITE_ONCE(*sw, sw_tmp);
10338 }
10339
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)10340 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
10341 int count, bool ignore_zero)
10342 {
10343 int i;
10344
10345 for (i = 0; i < count; i++) {
10346 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
10347
10348 if (ignore_zero && !hw)
10349 continue;
10350
10351 if (masks[i] == -1ULL)
10352 sw_stats[i] = hw;
10353 else
10354 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
10355 }
10356 }
10357
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)10358 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
10359 {
10360 if (!stats->hw_stats)
10361 return;
10362
10363 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10364 stats->hw_masks, stats->len / 8, false);
10365 }
10366
bnxt_accumulate_all_stats(struct bnxt * bp)10367 static void bnxt_accumulate_all_stats(struct bnxt *bp)
10368 {
10369 struct bnxt_stats_mem *ring0_stats;
10370 bool ignore_zero = false;
10371 int i;
10372
10373 /* Chip bug. Counter intermittently becomes 0. */
10374 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10375 ignore_zero = true;
10376
10377 for (i = 0; i < bp->cp_nr_rings; i++) {
10378 struct bnxt_napi *bnapi = bp->bnapi[i];
10379 struct bnxt_cp_ring_info *cpr;
10380 struct bnxt_stats_mem *stats;
10381
10382 cpr = &bnapi->cp_ring;
10383 stats = &cpr->stats;
10384 if (!i)
10385 ring0_stats = stats;
10386 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
10387 ring0_stats->hw_masks,
10388 ring0_stats->len / 8, ignore_zero);
10389 }
10390 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10391 struct bnxt_stats_mem *stats = &bp->port_stats;
10392 __le64 *hw_stats = stats->hw_stats;
10393 u64 *sw_stats = stats->sw_stats;
10394 u64 *masks = stats->hw_masks;
10395 int cnt;
10396
10397 cnt = sizeof(struct rx_port_stats) / 8;
10398 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10399
10400 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10401 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10402 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10403 cnt = sizeof(struct tx_port_stats) / 8;
10404 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
10405 }
10406 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
10407 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
10408 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
10409 }
10410 }
10411
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)10412 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
10413 {
10414 struct hwrm_port_qstats_input *req;
10415 struct bnxt_pf_info *pf = &bp->pf;
10416 int rc;
10417
10418 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
10419 return 0;
10420
10421 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10422 return -EOPNOTSUPP;
10423
10424 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
10425 if (rc)
10426 return rc;
10427
10428 req->flags = flags;
10429 req->port_id = cpu_to_le16(pf->port_id);
10430 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
10431 BNXT_TX_PORT_STATS_BYTE_OFFSET);
10432 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
10433 return hwrm_req_send(bp, req);
10434 }
10435
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)10436 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
10437 {
10438 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
10439 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
10440 struct hwrm_port_qstats_ext_output *resp_qs;
10441 struct hwrm_port_qstats_ext_input *req_qs;
10442 struct bnxt_pf_info *pf = &bp->pf;
10443 u32 tx_stat_size;
10444 int rc;
10445
10446 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
10447 return 0;
10448
10449 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
10450 return -EOPNOTSUPP;
10451
10452 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
10453 if (rc)
10454 return rc;
10455
10456 req_qs->flags = flags;
10457 req_qs->port_id = cpu_to_le16(pf->port_id);
10458 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
10459 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
10460 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
10461 sizeof(struct tx_port_stats_ext) : 0;
10462 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
10463 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
10464 resp_qs = hwrm_req_hold(bp, req_qs);
10465 rc = hwrm_req_send(bp, req_qs);
10466 if (!rc) {
10467 bp->fw_rx_stats_ext_size =
10468 le16_to_cpu(resp_qs->rx_stat_size) / 8;
10469 if (BNXT_FW_MAJ(bp) < 220 &&
10470 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
10471 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
10472
10473 bp->fw_tx_stats_ext_size = tx_stat_size ?
10474 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
10475 } else {
10476 bp->fw_rx_stats_ext_size = 0;
10477 bp->fw_tx_stats_ext_size = 0;
10478 }
10479 hwrm_req_drop(bp, req_qs);
10480
10481 if (flags)
10482 return rc;
10483
10484 if (bp->fw_tx_stats_ext_size <=
10485 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
10486 bp->pri2cos_valid = 0;
10487 return rc;
10488 }
10489
10490 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
10491 if (rc)
10492 return rc;
10493
10494 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
10495
10496 resp_qc = hwrm_req_hold(bp, req_qc);
10497 rc = hwrm_req_send(bp, req_qc);
10498 if (!rc) {
10499 u8 *pri2cos;
10500 int i, j;
10501
10502 pri2cos = &resp_qc->pri0_cos_queue_id;
10503 for (i = 0; i < 8; i++) {
10504 u8 queue_id = pri2cos[i];
10505 u8 queue_idx;
10506
10507 /* Per port queue IDs start from 0, 10, 20, etc */
10508 queue_idx = queue_id % 10;
10509 if (queue_idx > BNXT_MAX_QUEUE) {
10510 bp->pri2cos_valid = false;
10511 hwrm_req_drop(bp, req_qc);
10512 return rc;
10513 }
10514 for (j = 0; j < bp->max_q; j++) {
10515 if (bp->q_ids[j] == queue_id)
10516 bp->pri2cos_idx[i] = queue_idx;
10517 }
10518 }
10519 bp->pri2cos_valid = true;
10520 }
10521 hwrm_req_drop(bp, req_qc);
10522
10523 return rc;
10524 }
10525
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)10526 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
10527 {
10528 bnxt_hwrm_tunnel_dst_port_free(bp,
10529 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10530 bnxt_hwrm_tunnel_dst_port_free(bp,
10531 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10532 }
10533
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)10534 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
10535 {
10536 int rc, i;
10537 u32 tpa_flags = 0;
10538
10539 if (set_tpa)
10540 tpa_flags = bp->flags & BNXT_FLAG_TPA;
10541 else if (BNXT_NO_FW_ACCESS(bp))
10542 return 0;
10543 for (i = 0; i < bp->nr_vnics; i++) {
10544 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
10545 if (rc) {
10546 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
10547 i, rc);
10548 return rc;
10549 }
10550 }
10551 return 0;
10552 }
10553
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)10554 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
10555 {
10556 int i;
10557
10558 for (i = 0; i < bp->nr_vnics; i++)
10559 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
10560 }
10561
bnxt_clear_vnic(struct bnxt * bp)10562 static void bnxt_clear_vnic(struct bnxt *bp)
10563 {
10564 if (!bp->vnic_info)
10565 return;
10566
10567 bnxt_hwrm_clear_vnic_filter(bp);
10568 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
10569 /* clear all RSS setting before free vnic ctx */
10570 bnxt_hwrm_clear_vnic_rss(bp);
10571 bnxt_hwrm_vnic_ctx_free(bp);
10572 }
10573 /* before free the vnic, undo the vnic tpa settings */
10574 if (bp->flags & BNXT_FLAG_TPA)
10575 bnxt_set_tpa(bp, false);
10576 bnxt_hwrm_vnic_free(bp);
10577 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10578 bnxt_hwrm_vnic_ctx_free(bp);
10579 }
10580
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)10581 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
10582 bool irq_re_init)
10583 {
10584 bnxt_clear_vnic(bp);
10585 bnxt_hwrm_ring_free(bp, close_path);
10586 bnxt_hwrm_ring_grp_free(bp);
10587 if (irq_re_init) {
10588 bnxt_hwrm_stat_ctx_free(bp);
10589 bnxt_hwrm_free_tunnel_ports(bp);
10590 }
10591 }
10592
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)10593 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
10594 {
10595 struct hwrm_func_cfg_input *req;
10596 u8 evb_mode;
10597 int rc;
10598
10599 if (br_mode == BRIDGE_MODE_VEB)
10600 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
10601 else if (br_mode == BRIDGE_MODE_VEPA)
10602 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
10603 else
10604 return -EINVAL;
10605
10606 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10607 if (rc)
10608 return rc;
10609
10610 req->fid = cpu_to_le16(0xffff);
10611 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
10612 req->evb_mode = evb_mode;
10613 return hwrm_req_send(bp, req);
10614 }
10615
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)10616 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
10617 {
10618 struct hwrm_func_cfg_input *req;
10619 int rc;
10620
10621 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
10622 return 0;
10623
10624 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
10625 if (rc)
10626 return rc;
10627
10628 req->fid = cpu_to_le16(0xffff);
10629 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
10630 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
10631 if (size == 128)
10632 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
10633
10634 return hwrm_req_send(bp, req);
10635 }
10636
__bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10637 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10638 {
10639 int rc;
10640
10641 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
10642 goto skip_rss_ctx;
10643
10644 /* allocate context for vnic */
10645 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
10646 if (rc) {
10647 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10648 vnic->vnic_id, rc);
10649 goto vnic_setup_err;
10650 }
10651 bp->rsscos_nr_ctxs++;
10652
10653 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10654 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
10655 if (rc) {
10656 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
10657 vnic->vnic_id, rc);
10658 goto vnic_setup_err;
10659 }
10660 bp->rsscos_nr_ctxs++;
10661 }
10662
10663 skip_rss_ctx:
10664 /* configure default vnic, ring grp */
10665 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10666 if (rc) {
10667 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10668 vnic->vnic_id, rc);
10669 goto vnic_setup_err;
10670 }
10671
10672 /* Enable RSS hashing on vnic */
10673 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
10674 if (rc) {
10675 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
10676 vnic->vnic_id, rc);
10677 goto vnic_setup_err;
10678 }
10679
10680 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10681 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10682 if (rc) {
10683 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10684 vnic->vnic_id, rc);
10685 }
10686 }
10687
10688 vnic_setup_err:
10689 return rc;
10690 }
10691
bnxt_hwrm_vnic_update(struct bnxt * bp,struct bnxt_vnic_info * vnic,u8 valid)10692 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10693 u8 valid)
10694 {
10695 struct hwrm_vnic_update_input *req;
10696 int rc;
10697
10698 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
10699 if (rc)
10700 return rc;
10701
10702 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
10703
10704 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
10705 req->mru = cpu_to_le16(vnic->mru);
10706
10707 req->enables = cpu_to_le32(valid);
10708
10709 return hwrm_req_send(bp, req);
10710 }
10711
bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10712 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10713 {
10714 int rc;
10715
10716 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10717 if (rc) {
10718 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10719 vnic->vnic_id, rc);
10720 return rc;
10721 }
10722 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10723 if (rc)
10724 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
10725 vnic->vnic_id, rc);
10726 return rc;
10727 }
10728
__bnxt_setup_vnic_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)10729 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10730 {
10731 int rc, i, nr_ctxs;
10732
10733 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
10734 for (i = 0; i < nr_ctxs; i++) {
10735 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
10736 if (rc) {
10737 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
10738 vnic->vnic_id, i, rc);
10739 break;
10740 }
10741 bp->rsscos_nr_ctxs++;
10742 }
10743 if (i < nr_ctxs)
10744 return -ENOMEM;
10745
10746 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
10747 if (rc)
10748 return rc;
10749
10750 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
10751 rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
10752 if (rc) {
10753 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
10754 vnic->vnic_id, rc);
10755 }
10756 }
10757 return rc;
10758 }
10759
bnxt_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic)10760 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
10761 {
10762 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10763 return __bnxt_setup_vnic_p5(bp, vnic);
10764 else
10765 return __bnxt_setup_vnic(bp, vnic);
10766 }
10767
bnxt_alloc_and_setup_vnic(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 start_rx_ring_idx,int rx_rings)10768 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
10769 struct bnxt_vnic_info *vnic,
10770 u16 start_rx_ring_idx, int rx_rings)
10771 {
10772 int rc;
10773
10774 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
10775 if (rc) {
10776 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
10777 vnic->vnic_id, rc);
10778 return rc;
10779 }
10780 return bnxt_setup_vnic(bp, vnic);
10781 }
10782
bnxt_alloc_rfs_vnics(struct bnxt * bp)10783 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
10784 {
10785 struct bnxt_vnic_info *vnic;
10786 int i, rc = 0;
10787
10788 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
10789 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
10790 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
10791 }
10792
10793 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10794 return 0;
10795
10796 for (i = 0; i < bp->rx_nr_rings; i++) {
10797 u16 vnic_id = i + 1;
10798 u16 ring_id = i;
10799
10800 if (vnic_id >= bp->nr_vnics)
10801 break;
10802
10803 vnic = &bp->vnic_info[vnic_id];
10804 vnic->flags |= BNXT_VNIC_RFS_FLAG;
10805 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
10806 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
10807 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
10808 break;
10809 }
10810 return rc;
10811 }
10812
bnxt_del_one_rss_ctx(struct bnxt * bp,struct bnxt_rss_ctx * rss_ctx,bool all)10813 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
10814 bool all)
10815 {
10816 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10817 struct bnxt_filter_base *usr_fltr, *tmp;
10818 struct bnxt_ntuple_filter *ntp_fltr;
10819 int i;
10820
10821 if (netif_running(bp->dev)) {
10822 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
10823 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
10824 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
10825 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
10826 }
10827 }
10828 if (!all)
10829 return;
10830
10831 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
10832 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
10833 usr_fltr->fw_vnic_id == rss_ctx->index) {
10834 ntp_fltr = container_of(usr_fltr,
10835 struct bnxt_ntuple_filter,
10836 base);
10837 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
10838 bnxt_del_ntp_filter(bp, ntp_fltr);
10839 bnxt_del_one_usr_fltr(bp, usr_fltr);
10840 }
10841 }
10842
10843 if (vnic->rss_table)
10844 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
10845 vnic->rss_table,
10846 vnic->rss_table_dma_addr);
10847 bp->num_rss_ctx--;
10848 }
10849
bnxt_vnic_has_rx_ring(struct bnxt * bp,struct bnxt_vnic_info * vnic,int rxr_id)10850 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10851 int rxr_id)
10852 {
10853 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
10854 int i, vnic_rx;
10855
10856 /* Ntuple VNIC always has all the rx rings. Any change of ring id
10857 * must be updated because a future filter may use it.
10858 */
10859 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
10860 return true;
10861
10862 for (i = 0; i < tbl_size; i++) {
10863 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
10864 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
10865 else
10866 vnic_rx = bp->rss_indir_tbl[i];
10867
10868 if (rxr_id == vnic_rx)
10869 return true;
10870 }
10871
10872 return false;
10873 }
10874
bnxt_set_vnic_mru_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic,u16 mru,int rxr_id)10875 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
10876 u16 mru, int rxr_id)
10877 {
10878 int rc;
10879
10880 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
10881 return 0;
10882
10883 if (mru) {
10884 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
10885 if (rc) {
10886 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
10887 vnic->vnic_id, rc);
10888 return rc;
10889 }
10890 }
10891 vnic->mru = mru;
10892 bnxt_hwrm_vnic_update(bp, vnic,
10893 VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
10894
10895 return 0;
10896 }
10897
bnxt_set_rss_ctx_vnic_mru(struct bnxt * bp,u16 mru,int rxr_id)10898 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
10899 {
10900 struct ethtool_rxfh_context *ctx;
10901 unsigned long context;
10902 int rc;
10903
10904 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10905 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10906 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10907
10908 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
10909 if (rc)
10910 return rc;
10911 }
10912
10913 return 0;
10914 }
10915
bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt * bp)10916 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
10917 {
10918 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
10919 struct ethtool_rxfh_context *ctx;
10920 unsigned long context;
10921
10922 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10923 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10924 struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
10925
10926 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
10927 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
10928 __bnxt_setup_vnic_p5(bp, vnic)) {
10929 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
10930 rss_ctx->index);
10931 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
10932 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
10933 }
10934 }
10935 }
10936
bnxt_clear_rss_ctxs(struct bnxt * bp)10937 static void bnxt_clear_rss_ctxs(struct bnxt *bp)
10938 {
10939 struct ethtool_rxfh_context *ctx;
10940 unsigned long context;
10941
10942 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
10943 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
10944
10945 bnxt_del_one_rss_ctx(bp, rss_ctx, false);
10946 }
10947 }
10948
10949 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)10950 static bool bnxt_promisc_ok(struct bnxt *bp)
10951 {
10952 #ifdef CONFIG_BNXT_SRIOV
10953 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
10954 return false;
10955 #endif
10956 return true;
10957 }
10958
bnxt_setup_nitroa0_vnic(struct bnxt * bp)10959 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
10960 {
10961 struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
10962 unsigned int rc = 0;
10963
10964 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
10965 if (rc) {
10966 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10967 rc);
10968 return rc;
10969 }
10970
10971 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
10972 if (rc) {
10973 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
10974 rc);
10975 return rc;
10976 }
10977 return rc;
10978 }
10979
10980 static int bnxt_cfg_rx_mode(struct bnxt *);
10981 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
10982
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)10983 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
10984 {
10985 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
10986 int rc = 0;
10987 unsigned int rx_nr_rings = bp->rx_nr_rings;
10988
10989 if (irq_re_init) {
10990 rc = bnxt_hwrm_stat_ctx_alloc(bp);
10991 if (rc) {
10992 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
10993 rc);
10994 goto err_out;
10995 }
10996 }
10997
10998 rc = bnxt_hwrm_ring_alloc(bp);
10999 if (rc) {
11000 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
11001 goto err_out;
11002 }
11003
11004 rc = bnxt_hwrm_ring_grp_alloc(bp);
11005 if (rc) {
11006 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
11007 goto err_out;
11008 }
11009
11010 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11011 rx_nr_rings--;
11012
11013 /* default vnic 0 */
11014 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
11015 if (rc) {
11016 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
11017 goto err_out;
11018 }
11019
11020 if (BNXT_VF(bp))
11021 bnxt_hwrm_func_qcfg(bp);
11022
11023 rc = bnxt_setup_vnic(bp, vnic);
11024 if (rc)
11025 goto err_out;
11026 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
11027 bnxt_hwrm_update_rss_hash_cfg(bp);
11028
11029 if (bp->flags & BNXT_FLAG_RFS) {
11030 rc = bnxt_alloc_rfs_vnics(bp);
11031 if (rc)
11032 goto err_out;
11033 }
11034
11035 if (bp->flags & BNXT_FLAG_TPA) {
11036 rc = bnxt_set_tpa(bp, true);
11037 if (rc)
11038 goto err_out;
11039 }
11040
11041 if (BNXT_VF(bp))
11042 bnxt_update_vf_mac(bp);
11043
11044 /* Filter for default vnic 0 */
11045 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
11046 if (rc) {
11047 if (BNXT_VF(bp) && rc == -ENODEV)
11048 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
11049 else
11050 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11051 goto err_out;
11052 }
11053 vnic->uc_filter_count = 1;
11054
11055 vnic->rx_mask = 0;
11056 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
11057 goto skip_rx_mask;
11058
11059 if (bp->dev->flags & IFF_BROADCAST)
11060 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11061
11062 if (bp->dev->flags & IFF_PROMISC)
11063 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11064
11065 if (bp->dev->flags & IFF_ALLMULTI) {
11066 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11067 vnic->mc_list_count = 0;
11068 } else if (bp->dev->flags & IFF_MULTICAST) {
11069 u32 mask = 0;
11070
11071 bnxt_mc_list_updated(bp, &mask);
11072 vnic->rx_mask |= mask;
11073 }
11074
11075 rc = bnxt_cfg_rx_mode(bp);
11076 if (rc)
11077 goto err_out;
11078
11079 skip_rx_mask:
11080 rc = bnxt_hwrm_set_coal(bp);
11081 if (rc)
11082 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
11083 rc);
11084
11085 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11086 rc = bnxt_setup_nitroa0_vnic(bp);
11087 if (rc)
11088 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
11089 rc);
11090 }
11091
11092 if (BNXT_VF(bp)) {
11093 bnxt_hwrm_func_qcfg(bp);
11094 netdev_update_features(bp->dev);
11095 }
11096
11097 return 0;
11098
11099 err_out:
11100 bnxt_hwrm_resource_free(bp, 0, true);
11101
11102 return rc;
11103 }
11104
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)11105 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
11106 {
11107 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
11108 return 0;
11109 }
11110
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)11111 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
11112 {
11113 bnxt_init_cp_rings(bp);
11114 bnxt_init_rx_rings(bp);
11115 bnxt_init_tx_rings(bp);
11116 bnxt_init_ring_grps(bp, irq_re_init);
11117 bnxt_init_vnics(bp);
11118
11119 return bnxt_init_chip(bp, irq_re_init);
11120 }
11121
bnxt_set_real_num_queues(struct bnxt * bp)11122 static int bnxt_set_real_num_queues(struct bnxt *bp)
11123 {
11124 int rc;
11125 struct net_device *dev = bp->dev;
11126
11127 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
11128 bp->tx_nr_rings_xdp);
11129 if (rc)
11130 return rc;
11131
11132 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
11133 if (rc)
11134 return rc;
11135
11136 #ifdef CONFIG_RFS_ACCEL
11137 if (bp->flags & BNXT_FLAG_RFS)
11138 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
11139 #endif
11140
11141 return rc;
11142 }
11143
__bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)11144 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11145 bool shared)
11146 {
11147 int _rx = *rx, _tx = *tx;
11148
11149 if (shared) {
11150 *rx = min_t(int, _rx, max);
11151 *tx = min_t(int, _tx, max);
11152 } else {
11153 if (max < 2)
11154 return -ENOMEM;
11155
11156 while (_rx + _tx > max) {
11157 if (_rx > _tx && _rx > 1)
11158 _rx--;
11159 else if (_tx > 1)
11160 _tx--;
11161 }
11162 *rx = _rx;
11163 *tx = _tx;
11164 }
11165 return 0;
11166 }
11167
__bnxt_num_tx_to_cp(struct bnxt * bp,int tx,int tx_sets,int tx_xdp)11168 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
11169 {
11170 return (tx - tx_xdp) / tx_sets + tx_xdp;
11171 }
11172
bnxt_num_tx_to_cp(struct bnxt * bp,int tx)11173 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
11174 {
11175 int tcs = bp->num_tc;
11176
11177 if (!tcs)
11178 tcs = 1;
11179 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
11180 }
11181
bnxt_num_cp_to_tx(struct bnxt * bp,int tx_cp)11182 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
11183 {
11184 int tcs = bp->num_tc;
11185
11186 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
11187 bp->tx_nr_rings_xdp;
11188 }
11189
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool sh)11190 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
11191 bool sh)
11192 {
11193 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
11194
11195 if (tx_cp != *tx) {
11196 int tx_saved = tx_cp, rc;
11197
11198 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
11199 if (rc)
11200 return rc;
11201 if (tx_cp != tx_saved)
11202 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
11203 return 0;
11204 }
11205 return __bnxt_trim_rings(bp, rx, tx, max, sh);
11206 }
11207
bnxt_setup_msix(struct bnxt * bp)11208 static void bnxt_setup_msix(struct bnxt *bp)
11209 {
11210 const int len = sizeof(bp->irq_tbl[0].name);
11211 struct net_device *dev = bp->dev;
11212 int tcs, i;
11213
11214 tcs = bp->num_tc;
11215 if (tcs) {
11216 int i, off, count;
11217
11218 for (i = 0; i < tcs; i++) {
11219 count = bp->tx_nr_rings_per_tc;
11220 off = BNXT_TC_TO_RING_BASE(bp, i);
11221 netdev_set_tc_queue(dev, i, count, off);
11222 }
11223 }
11224
11225 for (i = 0; i < bp->cp_nr_rings; i++) {
11226 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11227 char *attr;
11228
11229 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11230 attr = "TxRx";
11231 else if (i < bp->rx_nr_rings)
11232 attr = "rx";
11233 else
11234 attr = "tx";
11235
11236 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
11237 attr, i);
11238 bp->irq_tbl[map_idx].handler = bnxt_msix;
11239 }
11240 }
11241
11242 static int bnxt_init_int_mode(struct bnxt *bp);
11243
bnxt_change_msix(struct bnxt * bp,int total)11244 static int bnxt_change_msix(struct bnxt *bp, int total)
11245 {
11246 struct msi_map map;
11247 int i;
11248
11249 /* add MSIX to the end if needed */
11250 for (i = bp->total_irqs; i < total; i++) {
11251 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
11252 if (map.index < 0)
11253 return bp->total_irqs;
11254 bp->irq_tbl[i].vector = map.virq;
11255 bp->total_irqs++;
11256 }
11257
11258 /* trim MSIX from the end if needed */
11259 for (i = bp->total_irqs; i > total; i--) {
11260 map.index = i - 1;
11261 map.virq = bp->irq_tbl[i - 1].vector;
11262 pci_msix_free_irq(bp->pdev, map);
11263 bp->total_irqs--;
11264 }
11265 return bp->total_irqs;
11266 }
11267
bnxt_setup_int_mode(struct bnxt * bp)11268 static int bnxt_setup_int_mode(struct bnxt *bp)
11269 {
11270 int rc;
11271
11272 if (!bp->irq_tbl) {
11273 rc = bnxt_init_int_mode(bp);
11274 if (rc || !bp->irq_tbl)
11275 return rc ?: -ENODEV;
11276 }
11277
11278 bnxt_setup_msix(bp);
11279
11280 rc = bnxt_set_real_num_queues(bp);
11281 return rc;
11282 }
11283
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)11284 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
11285 {
11286 return bp->hw_resc.max_rsscos_ctxs;
11287 }
11288
bnxt_get_max_func_vnics(struct bnxt * bp)11289 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
11290 {
11291 return bp->hw_resc.max_vnics;
11292 }
11293
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)11294 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
11295 {
11296 return bp->hw_resc.max_stat_ctxs;
11297 }
11298
bnxt_get_max_func_cp_rings(struct bnxt * bp)11299 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
11300 {
11301 return bp->hw_resc.max_cp_rings;
11302 }
11303
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)11304 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
11305 {
11306 unsigned int cp = bp->hw_resc.max_cp_rings;
11307
11308 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
11309 cp -= bnxt_get_ulp_msix_num(bp);
11310
11311 return cp;
11312 }
11313
bnxt_get_max_func_irqs(struct bnxt * bp)11314 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
11315 {
11316 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11317
11318 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11319 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
11320
11321 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
11322 }
11323
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)11324 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
11325 {
11326 bp->hw_resc.max_irqs = max_irqs;
11327 }
11328
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)11329 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
11330 {
11331 unsigned int cp;
11332
11333 cp = bnxt_get_max_func_cp_rings_for_en(bp);
11334 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11335 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
11336 else
11337 return cp - bp->cp_nr_rings;
11338 }
11339
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)11340 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
11341 {
11342 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
11343 }
11344
bnxt_get_avail_msix(struct bnxt * bp,int num)11345 static int bnxt_get_avail_msix(struct bnxt *bp, int num)
11346 {
11347 int max_irq = bnxt_get_max_func_irqs(bp);
11348 int total_req = bp->cp_nr_rings + num;
11349
11350 if (max_irq < total_req) {
11351 num = max_irq - bp->cp_nr_rings;
11352 if (num <= 0)
11353 return 0;
11354 }
11355 return num;
11356 }
11357
bnxt_get_num_msix(struct bnxt * bp)11358 static int bnxt_get_num_msix(struct bnxt *bp)
11359 {
11360 if (!BNXT_NEW_RM(bp))
11361 return bnxt_get_max_func_irqs(bp);
11362
11363 return bnxt_nq_rings_in_use(bp);
11364 }
11365
bnxt_init_int_mode(struct bnxt * bp)11366 static int bnxt_init_int_mode(struct bnxt *bp)
11367 {
11368 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
11369
11370 total_vecs = bnxt_get_num_msix(bp);
11371 max = bnxt_get_max_func_irqs(bp);
11372 if (total_vecs > max)
11373 total_vecs = max;
11374
11375 if (!total_vecs)
11376 return 0;
11377
11378 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
11379 min = 2;
11380
11381 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
11382 PCI_IRQ_MSIX);
11383 ulp_msix = bnxt_get_ulp_msix_num(bp);
11384 if (total_vecs < 0 || total_vecs < ulp_msix) {
11385 rc = -ENODEV;
11386 goto msix_setup_exit;
11387 }
11388
11389 tbl_size = total_vecs;
11390 if (pci_msix_can_alloc_dyn(bp->pdev))
11391 tbl_size = max;
11392 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
11393 if (bp->irq_tbl) {
11394 for (i = 0; i < total_vecs; i++)
11395 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
11396
11397 bp->total_irqs = total_vecs;
11398 /* Trim rings based upon num of vectors allocated */
11399 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
11400 total_vecs - ulp_msix, min == 1);
11401 if (rc)
11402 goto msix_setup_exit;
11403
11404 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
11405 bp->cp_nr_rings = (min == 1) ?
11406 max_t(int, tx_cp, bp->rx_nr_rings) :
11407 tx_cp + bp->rx_nr_rings;
11408
11409 } else {
11410 rc = -ENOMEM;
11411 goto msix_setup_exit;
11412 }
11413 return 0;
11414
11415 msix_setup_exit:
11416 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
11417 kfree(bp->irq_tbl);
11418 bp->irq_tbl = NULL;
11419 pci_free_irq_vectors(bp->pdev);
11420 return rc;
11421 }
11422
bnxt_clear_int_mode(struct bnxt * bp)11423 static void bnxt_clear_int_mode(struct bnxt *bp)
11424 {
11425 pci_free_irq_vectors(bp->pdev);
11426
11427 kfree(bp->irq_tbl);
11428 bp->irq_tbl = NULL;
11429 }
11430
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)11431 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
11432 {
11433 bool irq_cleared = false;
11434 bool irq_change = false;
11435 int tcs = bp->num_tc;
11436 int irqs_required;
11437 int rc;
11438
11439 if (!bnxt_need_reserve_rings(bp))
11440 return 0;
11441
11442 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
11443 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
11444
11445 if (ulp_msix > bp->ulp_num_msix_want)
11446 ulp_msix = bp->ulp_num_msix_want;
11447 irqs_required = ulp_msix + bp->cp_nr_rings;
11448 } else {
11449 irqs_required = bnxt_get_num_msix(bp);
11450 }
11451
11452 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
11453 irq_change = true;
11454 if (!pci_msix_can_alloc_dyn(bp->pdev)) {
11455 bnxt_ulp_irq_stop(bp);
11456 bnxt_clear_int_mode(bp);
11457 irq_cleared = true;
11458 }
11459 }
11460 rc = __bnxt_reserve_rings(bp);
11461 if (irq_cleared) {
11462 if (!rc)
11463 rc = bnxt_init_int_mode(bp);
11464 bnxt_ulp_irq_restart(bp, rc);
11465 } else if (irq_change && !rc) {
11466 if (bnxt_change_msix(bp, irqs_required) != irqs_required)
11467 rc = -ENOSPC;
11468 }
11469 if (rc) {
11470 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
11471 return rc;
11472 }
11473 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
11474 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
11475 netdev_err(bp->dev, "tx ring reservation failure\n");
11476 netdev_reset_tc(bp->dev);
11477 bp->num_tc = 0;
11478 if (bp->tx_nr_rings_xdp)
11479 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
11480 else
11481 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11482 return -ENOMEM;
11483 }
11484 return 0;
11485 }
11486
bnxt_tx_queue_stop(struct bnxt * bp,int idx)11487 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11488 {
11489 struct bnxt_tx_ring_info *txr;
11490 struct netdev_queue *txq;
11491 struct bnxt_napi *bnapi;
11492 int i;
11493
11494 bnapi = bp->bnapi[idx];
11495 bnxt_for_each_napi_tx(i, bnapi, txr) {
11496 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11497 synchronize_net();
11498
11499 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11500 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11501 if (txq) {
11502 __netif_tx_lock_bh(txq);
11503 netif_tx_stop_queue(txq);
11504 __netif_tx_unlock_bh(txq);
11505 }
11506 }
11507
11508 if (!bp->tph_mode)
11509 continue;
11510
11511 bnxt_hwrm_tx_ring_free(bp, txr, true);
11512 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11513 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11514 bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11515 }
11516 }
11517
bnxt_tx_queue_start(struct bnxt * bp,int idx)11518 static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11519 {
11520 struct bnxt_tx_ring_info *txr;
11521 struct netdev_queue *txq;
11522 struct bnxt_napi *bnapi;
11523 int rc, i;
11524
11525 bnapi = bp->bnapi[idx];
11526 /* All rings have been reserved and previously allocated.
11527 * Reallocating with the same parameters should never fail.
11528 */
11529 bnxt_for_each_napi_tx(i, bnapi, txr) {
11530 if (!bp->tph_mode)
11531 goto start_tx;
11532
11533 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11534 if (rc)
11535 return rc;
11536
11537 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11538 if (rc)
11539 return rc;
11540
11541 txr->tx_prod = 0;
11542 txr->tx_cons = 0;
11543 txr->tx_hw_cons = 0;
11544 start_tx:
11545 WRITE_ONCE(txr->dev_state, 0);
11546 synchronize_net();
11547
11548 if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11549 continue;
11550
11551 txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11552 if (txq)
11553 netif_tx_start_queue(txq);
11554 }
11555
11556 return 0;
11557 }
11558
bnxt_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)11559 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
11560 const cpumask_t *mask)
11561 {
11562 struct bnxt_irq *irq;
11563 u16 tag;
11564 int err;
11565
11566 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11567
11568 if (!irq->bp->tph_mode)
11569 return;
11570
11571 cpumask_copy(irq->cpu_mask, mask);
11572
11573 if (irq->ring_nr >= irq->bp->rx_nr_rings)
11574 return;
11575
11576 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11577 cpumask_first(irq->cpu_mask), &tag))
11578 return;
11579
11580 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
11581 return;
11582
11583 netdev_lock(irq->bp->dev);
11584 if (netif_running(irq->bp->dev)) {
11585 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
11586 if (err)
11587 netdev_err(irq->bp->dev,
11588 "RX queue restart failed: err=%d\n", err);
11589 }
11590 netdev_unlock(irq->bp->dev);
11591 }
11592
bnxt_irq_affinity_release(struct kref * ref)11593 static void bnxt_irq_affinity_release(struct kref *ref)
11594 {
11595 struct irq_affinity_notify *notify =
11596 container_of(ref, struct irq_affinity_notify, kref);
11597 struct bnxt_irq *irq;
11598
11599 irq = container_of(notify, struct bnxt_irq, affinity_notify);
11600
11601 if (!irq->bp->tph_mode)
11602 return;
11603
11604 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
11605 netdev_err(irq->bp->dev,
11606 "Setting ST=0 for MSIX entry %d failed\n",
11607 irq->msix_nr);
11608 return;
11609 }
11610 }
11611
bnxt_release_irq_notifier(struct bnxt_irq * irq)11612 static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
11613 {
11614 irq_set_affinity_notifier(irq->vector, NULL);
11615 }
11616
bnxt_register_irq_notifier(struct bnxt * bp,struct bnxt_irq * irq)11617 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
11618 {
11619 struct irq_affinity_notify *notify;
11620
11621 irq->bp = bp;
11622
11623 /* Nothing to do if TPH is not enabled */
11624 if (!bp->tph_mode)
11625 return;
11626
11627 /* Register IRQ affinity notifier */
11628 notify = &irq->affinity_notify;
11629 notify->irq = irq->vector;
11630 notify->notify = bnxt_irq_affinity_notify;
11631 notify->release = bnxt_irq_affinity_release;
11632
11633 irq_set_affinity_notifier(irq->vector, notify);
11634 }
11635
bnxt_free_irq(struct bnxt * bp)11636 static void bnxt_free_irq(struct bnxt *bp)
11637 {
11638 struct bnxt_irq *irq;
11639 int i;
11640
11641 #ifdef CONFIG_RFS_ACCEL
11642 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
11643 bp->dev->rx_cpu_rmap = NULL;
11644 #endif
11645 if (!bp->irq_tbl || !bp->bnapi)
11646 return;
11647
11648 for (i = 0; i < bp->cp_nr_rings; i++) {
11649 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11650
11651 irq = &bp->irq_tbl[map_idx];
11652 if (irq->requested) {
11653 if (irq->have_cpumask) {
11654 irq_update_affinity_hint(irq->vector, NULL);
11655 free_cpumask_var(irq->cpu_mask);
11656 irq->have_cpumask = 0;
11657 }
11658
11659 bnxt_release_irq_notifier(irq);
11660
11661 free_irq(irq->vector, bp->bnapi[i]);
11662 }
11663
11664 irq->requested = 0;
11665 }
11666
11667 /* Disable TPH support */
11668 pcie_disable_tph(bp->pdev);
11669 bp->tph_mode = 0;
11670 }
11671
bnxt_request_irq(struct bnxt * bp)11672 static int bnxt_request_irq(struct bnxt *bp)
11673 {
11674 struct cpu_rmap *rmap = NULL;
11675 int i, j, rc = 0;
11676 unsigned long flags = 0;
11677
11678 rc = bnxt_setup_int_mode(bp);
11679 if (rc) {
11680 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
11681 rc);
11682 return rc;
11683 }
11684 #ifdef CONFIG_RFS_ACCEL
11685 rmap = bp->dev->rx_cpu_rmap;
11686 #endif
11687
11688 /* Enable TPH support as part of IRQ request */
11689 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
11690 if (!rc)
11691 bp->tph_mode = PCI_TPH_ST_IV_MODE;
11692
11693 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
11694 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
11695 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
11696
11697 if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
11698 rmap && bp->bnapi[i]->rx_ring) {
11699 rc = irq_cpu_rmap_add(rmap, irq->vector);
11700 if (rc)
11701 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
11702 j);
11703 j++;
11704 }
11705
11706 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
11707 bp->bnapi[i]);
11708 if (rc)
11709 break;
11710
11711 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
11712 irq->requested = 1;
11713
11714 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
11715 int numa_node = dev_to_node(&bp->pdev->dev);
11716 u16 tag;
11717
11718 irq->have_cpumask = 1;
11719 irq->msix_nr = map_idx;
11720 irq->ring_nr = i;
11721 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
11722 irq->cpu_mask);
11723 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
11724 if (rc) {
11725 netdev_warn(bp->dev,
11726 "Update affinity hint failed, IRQ = %d\n",
11727 irq->vector);
11728 break;
11729 }
11730
11731 bnxt_register_irq_notifier(bp, irq);
11732
11733 /* Init ST table entry */
11734 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
11735 cpumask_first(irq->cpu_mask),
11736 &tag))
11737 continue;
11738
11739 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
11740 }
11741 }
11742 return rc;
11743 }
11744
bnxt_del_napi(struct bnxt * bp)11745 static void bnxt_del_napi(struct bnxt *bp)
11746 {
11747 int i;
11748
11749 if (!bp->bnapi)
11750 return;
11751
11752 for (i = 0; i < bp->rx_nr_rings; i++)
11753 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
11754 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
11755 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
11756
11757 for (i = 0; i < bp->cp_nr_rings; i++) {
11758 struct bnxt_napi *bnapi = bp->bnapi[i];
11759
11760 __netif_napi_del_locked(&bnapi->napi);
11761 }
11762 /* We called __netif_napi_del_locked(), we need
11763 * to respect an RCU grace period before freeing napi structures.
11764 */
11765 synchronize_net();
11766 }
11767
bnxt_init_napi(struct bnxt * bp)11768 static void bnxt_init_napi(struct bnxt *bp)
11769 {
11770 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
11771 unsigned int cp_nr_rings = bp->cp_nr_rings;
11772 struct bnxt_napi *bnapi;
11773 int i;
11774
11775 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
11776 poll_fn = bnxt_poll_p5;
11777 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
11778 cp_nr_rings--;
11779
11780 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11781
11782 for (i = 0; i < cp_nr_rings; i++) {
11783 bnapi = bp->bnapi[i];
11784 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
11785 bnapi->index);
11786 }
11787 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11788 bnapi = bp->bnapi[cp_nr_rings];
11789 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
11790 }
11791 }
11792
bnxt_disable_napi(struct bnxt * bp)11793 static void bnxt_disable_napi(struct bnxt *bp)
11794 {
11795 int i;
11796
11797 if (!bp->bnapi ||
11798 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
11799 return;
11800
11801 for (i = 0; i < bp->cp_nr_rings; i++) {
11802 struct bnxt_napi *bnapi = bp->bnapi[i];
11803 struct bnxt_cp_ring_info *cpr;
11804
11805 cpr = &bnapi->cp_ring;
11806 if (bnapi->tx_fault)
11807 cpr->sw_stats->tx.tx_resets++;
11808 if (bnapi->in_reset)
11809 cpr->sw_stats->rx.rx_resets++;
11810 napi_disable_locked(&bnapi->napi);
11811 }
11812 }
11813
bnxt_enable_napi(struct bnxt * bp)11814 static void bnxt_enable_napi(struct bnxt *bp)
11815 {
11816 int i;
11817
11818 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
11819 for (i = 0; i < bp->cp_nr_rings; i++) {
11820 struct bnxt_napi *bnapi = bp->bnapi[i];
11821 struct bnxt_cp_ring_info *cpr;
11822
11823 bnapi->tx_fault = 0;
11824
11825 cpr = &bnapi->cp_ring;
11826 bnapi->in_reset = false;
11827
11828 if (bnapi->rx_ring) {
11829 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
11830 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
11831 }
11832 napi_enable_locked(&bnapi->napi);
11833 }
11834 }
11835
bnxt_tx_disable(struct bnxt * bp)11836 void bnxt_tx_disable(struct bnxt *bp)
11837 {
11838 int i;
11839 struct bnxt_tx_ring_info *txr;
11840
11841 if (bp->tx_ring) {
11842 for (i = 0; i < bp->tx_nr_rings; i++) {
11843 txr = &bp->tx_ring[i];
11844 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11845 }
11846 }
11847 /* Make sure napi polls see @dev_state change */
11848 synchronize_net();
11849 /* Drop carrier first to prevent TX timeout */
11850 netif_carrier_off(bp->dev);
11851 /* Stop all TX queues */
11852 netif_tx_disable(bp->dev);
11853 }
11854
bnxt_tx_enable(struct bnxt * bp)11855 void bnxt_tx_enable(struct bnxt *bp)
11856 {
11857 int i;
11858 struct bnxt_tx_ring_info *txr;
11859
11860 for (i = 0; i < bp->tx_nr_rings; i++) {
11861 txr = &bp->tx_ring[i];
11862 WRITE_ONCE(txr->dev_state, 0);
11863 }
11864 /* Make sure napi polls see @dev_state change */
11865 synchronize_net();
11866 netif_tx_wake_all_queues(bp->dev);
11867 if (BNXT_LINK_IS_UP(bp))
11868 netif_carrier_on(bp->dev);
11869 }
11870
bnxt_report_fec(struct bnxt_link_info * link_info)11871 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
11872 {
11873 u8 active_fec = link_info->active_fec_sig_mode &
11874 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
11875
11876 switch (active_fec) {
11877 default:
11878 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
11879 return "None";
11880 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
11881 return "Clause 74 BaseR";
11882 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
11883 return "Clause 91 RS(528,514)";
11884 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
11885 return "Clause 91 RS544_1XN";
11886 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
11887 return "Clause 91 RS(544,514)";
11888 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
11889 return "Clause 91 RS272_1XN";
11890 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
11891 return "Clause 91 RS(272,257)";
11892 }
11893 }
11894
bnxt_report_link(struct bnxt * bp)11895 void bnxt_report_link(struct bnxt *bp)
11896 {
11897 if (BNXT_LINK_IS_UP(bp)) {
11898 const char *signal = "";
11899 const char *flow_ctrl;
11900 const char *duplex;
11901 u32 speed;
11902 u16 fec;
11903
11904 netif_carrier_on(bp->dev);
11905 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
11906 if (speed == SPEED_UNKNOWN) {
11907 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
11908 return;
11909 }
11910 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
11911 duplex = "full";
11912 else
11913 duplex = "half";
11914 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
11915 flow_ctrl = "ON - receive & transmit";
11916 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
11917 flow_ctrl = "ON - transmit";
11918 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
11919 flow_ctrl = "ON - receive";
11920 else
11921 flow_ctrl = "none";
11922 if (bp->link_info.phy_qcfg_resp.option_flags &
11923 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
11924 u8 sig_mode = bp->link_info.active_fec_sig_mode &
11925 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
11926 switch (sig_mode) {
11927 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
11928 signal = "(NRZ) ";
11929 break;
11930 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
11931 signal = "(PAM4 56Gbps) ";
11932 break;
11933 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
11934 signal = "(PAM4 112Gbps) ";
11935 break;
11936 default:
11937 break;
11938 }
11939 }
11940 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
11941 speed, signal, duplex, flow_ctrl);
11942 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
11943 netdev_info(bp->dev, "EEE is %s\n",
11944 bp->eee.eee_active ? "active" :
11945 "not active");
11946 fec = bp->link_info.fec_cfg;
11947 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
11948 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
11949 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
11950 bnxt_report_fec(&bp->link_info));
11951 } else {
11952 netif_carrier_off(bp->dev);
11953 netdev_err(bp->dev, "NIC Link is Down\n");
11954 }
11955 }
11956
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)11957 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
11958 {
11959 if (!resp->supported_speeds_auto_mode &&
11960 !resp->supported_speeds_force_mode &&
11961 !resp->supported_pam4_speeds_auto_mode &&
11962 !resp->supported_pam4_speeds_force_mode &&
11963 !resp->supported_speeds2_auto_mode &&
11964 !resp->supported_speeds2_force_mode)
11965 return true;
11966 return false;
11967 }
11968
bnxt_hwrm_phy_qcaps(struct bnxt * bp)11969 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
11970 {
11971 struct bnxt_link_info *link_info = &bp->link_info;
11972 struct hwrm_port_phy_qcaps_output *resp;
11973 struct hwrm_port_phy_qcaps_input *req;
11974 int rc = 0;
11975
11976 if (bp->hwrm_spec_code < 0x10201)
11977 return 0;
11978
11979 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
11980 if (rc)
11981 return rc;
11982
11983 resp = hwrm_req_hold(bp, req);
11984 rc = hwrm_req_send(bp, req);
11985 if (rc)
11986 goto hwrm_phy_qcaps_exit;
11987
11988 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
11989 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
11990 struct ethtool_keee *eee = &bp->eee;
11991 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
11992
11993 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
11994 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
11995 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
11996 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
11997 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
11998 }
11999
12000 if (bp->hwrm_spec_code >= 0x10a01) {
12001 if (bnxt_phy_qcaps_no_speed(resp)) {
12002 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
12003 netdev_warn(bp->dev, "Ethernet link disabled\n");
12004 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
12005 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
12006 netdev_info(bp->dev, "Ethernet link enabled\n");
12007 /* Phy re-enabled, reprobe the speeds */
12008 link_info->support_auto_speeds = 0;
12009 link_info->support_pam4_auto_speeds = 0;
12010 link_info->support_auto_speeds2 = 0;
12011 }
12012 }
12013 if (resp->supported_speeds_auto_mode)
12014 link_info->support_auto_speeds =
12015 le16_to_cpu(resp->supported_speeds_auto_mode);
12016 if (resp->supported_pam4_speeds_auto_mode)
12017 link_info->support_pam4_auto_speeds =
12018 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
12019 if (resp->supported_speeds2_auto_mode)
12020 link_info->support_auto_speeds2 =
12021 le16_to_cpu(resp->supported_speeds2_auto_mode);
12022
12023 bp->port_count = resp->port_cnt;
12024
12025 hwrm_phy_qcaps_exit:
12026 hwrm_req_drop(bp, req);
12027 return rc;
12028 }
12029
bnxt_hwrm_mac_qcaps(struct bnxt * bp)12030 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
12031 {
12032 struct hwrm_port_mac_qcaps_output *resp;
12033 struct hwrm_port_mac_qcaps_input *req;
12034 int rc;
12035
12036 if (bp->hwrm_spec_code < 0x10a03)
12037 return;
12038
12039 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
12040 if (rc)
12041 return;
12042
12043 resp = hwrm_req_hold(bp, req);
12044 rc = hwrm_req_send_silent(bp, req);
12045 if (!rc)
12046 bp->mac_flags = resp->flags;
12047 hwrm_req_drop(bp, req);
12048 }
12049
bnxt_support_dropped(u16 advertising,u16 supported)12050 static bool bnxt_support_dropped(u16 advertising, u16 supported)
12051 {
12052 u16 diff = advertising ^ supported;
12053
12054 return ((supported | diff) != supported);
12055 }
12056
bnxt_support_speed_dropped(struct bnxt_link_info * link_info)12057 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
12058 {
12059 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
12060
12061 /* Check if any advertised speeds are no longer supported. The caller
12062 * holds the link_lock mutex, so we can modify link_info settings.
12063 */
12064 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12065 if (bnxt_support_dropped(link_info->advertising,
12066 link_info->support_auto_speeds2)) {
12067 link_info->advertising = link_info->support_auto_speeds2;
12068 return true;
12069 }
12070 return false;
12071 }
12072 if (bnxt_support_dropped(link_info->advertising,
12073 link_info->support_auto_speeds)) {
12074 link_info->advertising = link_info->support_auto_speeds;
12075 return true;
12076 }
12077 if (bnxt_support_dropped(link_info->advertising_pam4,
12078 link_info->support_pam4_auto_speeds)) {
12079 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
12080 return true;
12081 }
12082 return false;
12083 }
12084
bnxt_update_link(struct bnxt * bp,bool chng_link_state)12085 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
12086 {
12087 struct bnxt_link_info *link_info = &bp->link_info;
12088 struct hwrm_port_phy_qcfg_output *resp;
12089 struct hwrm_port_phy_qcfg_input *req;
12090 u8 link_state = link_info->link_state;
12091 bool support_changed;
12092 int rc;
12093
12094 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
12095 if (rc)
12096 return rc;
12097
12098 resp = hwrm_req_hold(bp, req);
12099 rc = hwrm_req_send(bp, req);
12100 if (rc) {
12101 hwrm_req_drop(bp, req);
12102 if (BNXT_VF(bp) && rc == -ENODEV) {
12103 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
12104 rc = 0;
12105 }
12106 return rc;
12107 }
12108
12109 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
12110 link_info->phy_link_status = resp->link;
12111 link_info->duplex = resp->duplex_cfg;
12112 if (bp->hwrm_spec_code >= 0x10800)
12113 link_info->duplex = resp->duplex_state;
12114 link_info->pause = resp->pause;
12115 link_info->auto_mode = resp->auto_mode;
12116 link_info->auto_pause_setting = resp->auto_pause;
12117 link_info->lp_pause = resp->link_partner_adv_pause;
12118 link_info->force_pause_setting = resp->force_pause;
12119 link_info->duplex_setting = resp->duplex_cfg;
12120 if (link_info->phy_link_status == BNXT_LINK_LINK) {
12121 link_info->link_speed = le16_to_cpu(resp->link_speed);
12122 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
12123 link_info->active_lanes = resp->active_lanes;
12124 } else {
12125 link_info->link_speed = 0;
12126 link_info->active_lanes = 0;
12127 }
12128 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
12129 link_info->force_pam4_link_speed =
12130 le16_to_cpu(resp->force_pam4_link_speed);
12131 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
12132 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
12133 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
12134 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
12135 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
12136 link_info->auto_pam4_link_speeds =
12137 le16_to_cpu(resp->auto_pam4_link_speed_mask);
12138 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
12139 link_info->lp_auto_link_speeds =
12140 le16_to_cpu(resp->link_partner_adv_speeds);
12141 link_info->lp_auto_pam4_link_speeds =
12142 resp->link_partner_pam4_adv_speeds;
12143 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
12144 link_info->phy_ver[0] = resp->phy_maj;
12145 link_info->phy_ver[1] = resp->phy_min;
12146 link_info->phy_ver[2] = resp->phy_bld;
12147 link_info->media_type = resp->media_type;
12148 link_info->phy_type = resp->phy_type;
12149 link_info->transceiver = resp->xcvr_pkg_type;
12150 link_info->phy_addr = resp->eee_config_phy_addr &
12151 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
12152 link_info->module_status = resp->module_status;
12153
12154 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
12155 struct ethtool_keee *eee = &bp->eee;
12156 u16 fw_speeds;
12157
12158 eee->eee_active = 0;
12159 if (resp->eee_config_phy_addr &
12160 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
12161 eee->eee_active = 1;
12162 fw_speeds = le16_to_cpu(
12163 resp->link_partner_adv_eee_link_speed_mask);
12164 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
12165 }
12166
12167 /* Pull initial EEE config */
12168 if (!chng_link_state) {
12169 if (resp->eee_config_phy_addr &
12170 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
12171 eee->eee_enabled = 1;
12172
12173 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
12174 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
12175
12176 if (resp->eee_config_phy_addr &
12177 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
12178 __le32 tmr;
12179
12180 eee->tx_lpi_enabled = 1;
12181 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
12182 eee->tx_lpi_timer = le32_to_cpu(tmr) &
12183 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
12184 }
12185 }
12186 }
12187
12188 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
12189 if (bp->hwrm_spec_code >= 0x10504) {
12190 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
12191 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
12192 }
12193 /* TODO: need to add more logic to report VF link */
12194 if (chng_link_state) {
12195 if (link_info->phy_link_status == BNXT_LINK_LINK)
12196 link_info->link_state = BNXT_LINK_STATE_UP;
12197 else
12198 link_info->link_state = BNXT_LINK_STATE_DOWN;
12199 if (link_state != link_info->link_state)
12200 bnxt_report_link(bp);
12201 } else {
12202 /* always link down if not require to update link state */
12203 link_info->link_state = BNXT_LINK_STATE_DOWN;
12204 }
12205 hwrm_req_drop(bp, req);
12206
12207 if (!BNXT_PHY_CFG_ABLE(bp))
12208 return 0;
12209
12210 support_changed = bnxt_support_speed_dropped(link_info);
12211 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
12212 bnxt_hwrm_set_link_setting(bp, true, false);
12213 return 0;
12214 }
12215
bnxt_get_port_module_status(struct bnxt * bp)12216 static void bnxt_get_port_module_status(struct bnxt *bp)
12217 {
12218 struct bnxt_link_info *link_info = &bp->link_info;
12219 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
12220 u8 module_status;
12221
12222 if (bnxt_update_link(bp, true))
12223 return;
12224
12225 module_status = link_info->module_status;
12226 switch (module_status) {
12227 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
12228 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
12229 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
12230 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
12231 bp->pf.port_id);
12232 if (bp->hwrm_spec_code >= 0x10201) {
12233 netdev_warn(bp->dev, "Module part number %s\n",
12234 resp->phy_vendor_partnumber);
12235 }
12236 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
12237 netdev_warn(bp->dev, "TX is disabled\n");
12238 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
12239 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
12240 }
12241 }
12242
12243 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12244 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12245 {
12246 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
12247 if (bp->hwrm_spec_code >= 0x10201)
12248 req->auto_pause =
12249 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
12250 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12251 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
12252 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12253 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
12254 req->enables |=
12255 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12256 } else {
12257 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
12258 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
12259 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
12260 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
12261 req->enables |=
12262 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
12263 if (bp->hwrm_spec_code >= 0x10201) {
12264 req->auto_pause = req->force_pause;
12265 req->enables |= cpu_to_le32(
12266 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
12267 }
12268 }
12269 }
12270
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12271 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
12272 {
12273 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
12274 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
12275 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12276 req->enables |=
12277 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
12278 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
12279 } else if (bp->link_info.advertising) {
12280 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
12281 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
12282 }
12283 if (bp->link_info.advertising_pam4) {
12284 req->enables |=
12285 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
12286 req->auto_link_pam4_speed_mask =
12287 cpu_to_le16(bp->link_info.advertising_pam4);
12288 }
12289 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
12290 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
12291 } else {
12292 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
12293 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
12294 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
12295 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
12296 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
12297 (u32)bp->link_info.req_link_speed);
12298 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
12299 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12300 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
12301 } else {
12302 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
12303 }
12304 }
12305
12306 /* tell chimp that the setting takes effect immediately */
12307 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
12308 }
12309
bnxt_hwrm_set_pause(struct bnxt * bp)12310 int bnxt_hwrm_set_pause(struct bnxt *bp)
12311 {
12312 struct hwrm_port_phy_cfg_input *req;
12313 int rc;
12314
12315 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12316 if (rc)
12317 return rc;
12318
12319 bnxt_hwrm_set_pause_common(bp, req);
12320
12321 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
12322 bp->link_info.force_link_chng)
12323 bnxt_hwrm_set_link_common(bp, req);
12324
12325 rc = hwrm_req_send(bp, req);
12326 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
12327 /* since changing of pause setting doesn't trigger any link
12328 * change event, the driver needs to update the current pause
12329 * result upon successfully return of the phy_cfg command
12330 */
12331 bp->link_info.pause =
12332 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
12333 bp->link_info.auto_pause_setting = 0;
12334 if (!bp->link_info.force_link_chng)
12335 bnxt_report_link(bp);
12336 }
12337 bp->link_info.force_link_chng = false;
12338 return rc;
12339 }
12340
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)12341 static void bnxt_hwrm_set_eee(struct bnxt *bp,
12342 struct hwrm_port_phy_cfg_input *req)
12343 {
12344 struct ethtool_keee *eee = &bp->eee;
12345
12346 if (eee->eee_enabled) {
12347 u16 eee_speeds;
12348 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
12349
12350 if (eee->tx_lpi_enabled)
12351 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
12352 else
12353 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
12354
12355 req->flags |= cpu_to_le32(flags);
12356 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
12357 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
12358 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
12359 } else {
12360 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
12361 }
12362 }
12363
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)12364 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
12365 {
12366 struct hwrm_port_phy_cfg_input *req;
12367 int rc;
12368
12369 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12370 if (rc)
12371 return rc;
12372
12373 if (set_pause)
12374 bnxt_hwrm_set_pause_common(bp, req);
12375
12376 bnxt_hwrm_set_link_common(bp, req);
12377
12378 if (set_eee)
12379 bnxt_hwrm_set_eee(bp, req);
12380 return hwrm_req_send(bp, req);
12381 }
12382
bnxt_hwrm_shutdown_link(struct bnxt * bp)12383 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
12384 {
12385 struct hwrm_port_phy_cfg_input *req;
12386 int rc;
12387
12388 if (!BNXT_SINGLE_PF(bp))
12389 return 0;
12390
12391 if (pci_num_vf(bp->pdev) &&
12392 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
12393 return 0;
12394
12395 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
12396 if (rc)
12397 return rc;
12398
12399 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
12400 rc = hwrm_req_send(bp, req);
12401 if (!rc) {
12402 mutex_lock(&bp->link_lock);
12403 /* Device is not obliged link down in certain scenarios, even
12404 * when forced. Setting the state unknown is consistent with
12405 * driver startup and will force link state to be reported
12406 * during subsequent open based on PORT_PHY_QCFG.
12407 */
12408 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
12409 mutex_unlock(&bp->link_lock);
12410 }
12411 return rc;
12412 }
12413
bnxt_fw_reset_via_optee(struct bnxt * bp)12414 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
12415 {
12416 #ifdef CONFIG_TEE_BNXT_FW
12417 int rc = tee_bnxt_fw_load();
12418
12419 if (rc)
12420 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
12421
12422 return rc;
12423 #else
12424 netdev_err(bp->dev, "OP-TEE not supported\n");
12425 return -ENODEV;
12426 #endif
12427 }
12428
bnxt_try_recover_fw(struct bnxt * bp)12429 static int bnxt_try_recover_fw(struct bnxt *bp)
12430 {
12431 if (bp->fw_health && bp->fw_health->status_reliable) {
12432 int retry = 0, rc;
12433 u32 sts;
12434
12435 do {
12436 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12437 rc = bnxt_hwrm_poll(bp);
12438 if (!BNXT_FW_IS_BOOTING(sts) &&
12439 !BNXT_FW_IS_RECOVERING(sts))
12440 break;
12441 retry++;
12442 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
12443
12444 if (!BNXT_FW_IS_HEALTHY(sts)) {
12445 netdev_err(bp->dev,
12446 "Firmware not responding, status: 0x%x\n",
12447 sts);
12448 rc = -ENODEV;
12449 }
12450 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
12451 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
12452 return bnxt_fw_reset_via_optee(bp);
12453 }
12454 return rc;
12455 }
12456
12457 return -ENODEV;
12458 }
12459
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)12460 void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
12461 {
12462 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12463
12464 if (!BNXT_NEW_RM(bp))
12465 return; /* no resource reservations required */
12466
12467 hw_resc->resv_cp_rings = 0;
12468 hw_resc->resv_stat_ctxs = 0;
12469 hw_resc->resv_irqs = 0;
12470 hw_resc->resv_tx_rings = 0;
12471 hw_resc->resv_rx_rings = 0;
12472 hw_resc->resv_hw_ring_grps = 0;
12473 hw_resc->resv_vnics = 0;
12474 hw_resc->resv_rsscos_ctxs = 0;
12475 if (!fw_reset) {
12476 bp->tx_nr_rings = 0;
12477 bp->rx_nr_rings = 0;
12478 }
12479 }
12480
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)12481 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
12482 {
12483 int rc;
12484
12485 if (!BNXT_NEW_RM(bp))
12486 return 0; /* no resource reservations required */
12487
12488 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
12489 if (rc)
12490 netdev_err(bp->dev, "resc_qcaps failed\n");
12491
12492 bnxt_clear_reservations(bp, fw_reset);
12493
12494 return rc;
12495 }
12496
bnxt_hwrm_if_change(struct bnxt * bp,bool up)12497 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
12498 {
12499 struct hwrm_func_drv_if_change_output *resp;
12500 struct hwrm_func_drv_if_change_input *req;
12501 bool resc_reinit = false;
12502 bool caps_change = false;
12503 int rc, retry = 0;
12504 bool fw_reset;
12505 u32 flags = 0;
12506
12507 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
12508 bp->fw_reset_state = 0;
12509
12510 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
12511 return 0;
12512
12513 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
12514 if (rc)
12515 return rc;
12516
12517 if (up)
12518 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
12519 resp = hwrm_req_hold(bp, req);
12520
12521 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
12522 while (retry < BNXT_FW_IF_RETRY) {
12523 rc = hwrm_req_send(bp, req);
12524 if (rc != -EAGAIN)
12525 break;
12526
12527 msleep(50);
12528 retry++;
12529 }
12530
12531 if (rc == -EAGAIN) {
12532 hwrm_req_drop(bp, req);
12533 return rc;
12534 } else if (!rc) {
12535 flags = le32_to_cpu(resp->flags);
12536 } else if (up) {
12537 rc = bnxt_try_recover_fw(bp);
12538 fw_reset = true;
12539 }
12540 hwrm_req_drop(bp, req);
12541 if (rc)
12542 return rc;
12543
12544 if (!up) {
12545 bnxt_inv_fw_health_reg(bp);
12546 return 0;
12547 }
12548
12549 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
12550 resc_reinit = true;
12551 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
12552 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
12553 fw_reset = true;
12554 else
12555 bnxt_remap_fw_health_regs(bp);
12556
12557 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
12558 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
12559 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12560 return -ENODEV;
12561 }
12562 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
12563 caps_change = true;
12564
12565 if (resc_reinit || fw_reset || caps_change) {
12566 if (fw_reset || caps_change) {
12567 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12568 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12569 bnxt_ulp_irq_stop(bp);
12570 bnxt_free_ctx_mem(bp, false);
12571 bnxt_dcb_free(bp);
12572 rc = bnxt_fw_init_one(bp);
12573 if (rc) {
12574 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12575 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12576 return rc;
12577 }
12578 /* IRQ will be initialized later in bnxt_request_irq()*/
12579 bnxt_clear_int_mode(bp);
12580 }
12581 rc = bnxt_cancel_reservations(bp, fw_reset);
12582 }
12583 return rc;
12584 }
12585
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)12586 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
12587 {
12588 struct hwrm_port_led_qcaps_output *resp;
12589 struct hwrm_port_led_qcaps_input *req;
12590 struct bnxt_pf_info *pf = &bp->pf;
12591 int rc;
12592
12593 bp->num_leds = 0;
12594 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
12595 return 0;
12596
12597 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
12598 if (rc)
12599 return rc;
12600
12601 req->port_id = cpu_to_le16(pf->port_id);
12602 resp = hwrm_req_hold(bp, req);
12603 rc = hwrm_req_send(bp, req);
12604 if (rc) {
12605 hwrm_req_drop(bp, req);
12606 return rc;
12607 }
12608 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
12609 int i;
12610
12611 bp->num_leds = resp->num_leds;
12612 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
12613 bp->num_leds);
12614 for (i = 0; i < bp->num_leds; i++) {
12615 struct bnxt_led_info *led = &bp->leds[i];
12616 __le16 caps = led->led_state_caps;
12617
12618 if (!led->led_group_id ||
12619 !BNXT_LED_ALT_BLINK_CAP(caps)) {
12620 bp->num_leds = 0;
12621 break;
12622 }
12623 }
12624 }
12625 hwrm_req_drop(bp, req);
12626 return 0;
12627 }
12628
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)12629 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
12630 {
12631 struct hwrm_wol_filter_alloc_output *resp;
12632 struct hwrm_wol_filter_alloc_input *req;
12633 int rc;
12634
12635 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
12636 if (rc)
12637 return rc;
12638
12639 req->port_id = cpu_to_le16(bp->pf.port_id);
12640 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
12641 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
12642 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
12643
12644 resp = hwrm_req_hold(bp, req);
12645 rc = hwrm_req_send(bp, req);
12646 if (!rc)
12647 bp->wol_filter_id = resp->wol_filter_id;
12648 hwrm_req_drop(bp, req);
12649 return rc;
12650 }
12651
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)12652 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
12653 {
12654 struct hwrm_wol_filter_free_input *req;
12655 int rc;
12656
12657 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
12658 if (rc)
12659 return rc;
12660
12661 req->port_id = cpu_to_le16(bp->pf.port_id);
12662 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
12663 req->wol_filter_id = bp->wol_filter_id;
12664
12665 return hwrm_req_send(bp, req);
12666 }
12667
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)12668 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
12669 {
12670 struct hwrm_wol_filter_qcfg_output *resp;
12671 struct hwrm_wol_filter_qcfg_input *req;
12672 u16 next_handle = 0;
12673 int rc;
12674
12675 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
12676 if (rc)
12677 return rc;
12678
12679 req->port_id = cpu_to_le16(bp->pf.port_id);
12680 req->handle = cpu_to_le16(handle);
12681 resp = hwrm_req_hold(bp, req);
12682 rc = hwrm_req_send(bp, req);
12683 if (!rc) {
12684 next_handle = le16_to_cpu(resp->next_handle);
12685 if (next_handle != 0) {
12686 if (resp->wol_type ==
12687 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
12688 bp->wol = 1;
12689 bp->wol_filter_id = resp->wol_filter_id;
12690 }
12691 }
12692 }
12693 hwrm_req_drop(bp, req);
12694 return next_handle;
12695 }
12696
bnxt_get_wol_settings(struct bnxt * bp)12697 static void bnxt_get_wol_settings(struct bnxt *bp)
12698 {
12699 u16 handle = 0;
12700
12701 bp->wol = 0;
12702 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
12703 return;
12704
12705 do {
12706 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
12707 } while (handle && handle != 0xffff);
12708 }
12709
bnxt_eee_config_ok(struct bnxt * bp)12710 static bool bnxt_eee_config_ok(struct bnxt *bp)
12711 {
12712 struct ethtool_keee *eee = &bp->eee;
12713 struct bnxt_link_info *link_info = &bp->link_info;
12714
12715 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
12716 return true;
12717
12718 if (eee->eee_enabled) {
12719 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
12720 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
12721
12722 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
12723
12724 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12725 eee->eee_enabled = 0;
12726 return false;
12727 }
12728 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
12729 linkmode_and(eee->advertised, advertising,
12730 eee->supported);
12731 return false;
12732 }
12733 }
12734 return true;
12735 }
12736
bnxt_update_phy_setting(struct bnxt * bp)12737 static int bnxt_update_phy_setting(struct bnxt *bp)
12738 {
12739 int rc;
12740 bool update_link = false;
12741 bool update_pause = false;
12742 bool update_eee = false;
12743 struct bnxt_link_info *link_info = &bp->link_info;
12744
12745 rc = bnxt_update_link(bp, true);
12746 if (rc) {
12747 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
12748 rc);
12749 return rc;
12750 }
12751 if (!BNXT_SINGLE_PF(bp))
12752 return 0;
12753
12754 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12755 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
12756 link_info->req_flow_ctrl)
12757 update_pause = true;
12758 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
12759 link_info->force_pause_setting != link_info->req_flow_ctrl)
12760 update_pause = true;
12761 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
12762 if (BNXT_AUTO_MODE(link_info->auto_mode))
12763 update_link = true;
12764 if (bnxt_force_speed_updated(link_info))
12765 update_link = true;
12766 if (link_info->req_duplex != link_info->duplex_setting)
12767 update_link = true;
12768 } else {
12769 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
12770 update_link = true;
12771 if (bnxt_auto_speed_updated(link_info))
12772 update_link = true;
12773 }
12774
12775 /* The last close may have shutdown the link, so need to call
12776 * PHY_CFG to bring it back up.
12777 */
12778 if (!BNXT_LINK_IS_UP(bp))
12779 update_link = true;
12780
12781 if (!bnxt_eee_config_ok(bp))
12782 update_eee = true;
12783
12784 if (update_link)
12785 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
12786 else if (update_pause)
12787 rc = bnxt_hwrm_set_pause(bp);
12788 if (rc) {
12789 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
12790 rc);
12791 return rc;
12792 }
12793
12794 return rc;
12795 }
12796
12797 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
12798
bnxt_reinit_after_abort(struct bnxt * bp)12799 static int bnxt_reinit_after_abort(struct bnxt *bp)
12800 {
12801 int rc;
12802
12803 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12804 return -EBUSY;
12805
12806 if (bp->dev->reg_state == NETREG_UNREGISTERED)
12807 return -ENODEV;
12808
12809 rc = bnxt_fw_init_one(bp);
12810 if (!rc) {
12811 bnxt_clear_int_mode(bp);
12812 rc = bnxt_init_int_mode(bp);
12813 if (!rc) {
12814 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
12815 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
12816 }
12817 }
12818 return rc;
12819 }
12820
bnxt_cfg_one_usr_fltr(struct bnxt * bp,struct bnxt_filter_base * fltr)12821 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
12822 {
12823 struct bnxt_ntuple_filter *ntp_fltr;
12824 struct bnxt_l2_filter *l2_fltr;
12825
12826 if (list_empty(&fltr->list))
12827 return;
12828
12829 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
12830 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
12831 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
12832 atomic_inc(&l2_fltr->refcnt);
12833 ntp_fltr->l2_fltr = l2_fltr;
12834 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
12835 bnxt_del_ntp_filter(bp, ntp_fltr);
12836 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
12837 fltr->sw_id);
12838 }
12839 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
12840 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
12841 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
12842 bnxt_del_l2_filter(bp, l2_fltr);
12843 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
12844 fltr->sw_id);
12845 }
12846 }
12847 }
12848
bnxt_cfg_usr_fltrs(struct bnxt * bp)12849 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
12850 {
12851 struct bnxt_filter_base *usr_fltr, *tmp;
12852
12853 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
12854 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
12855 }
12856
bnxt_set_xps_mapping(struct bnxt * bp)12857 static int bnxt_set_xps_mapping(struct bnxt *bp)
12858 {
12859 int numa_node = dev_to_node(&bp->pdev->dev);
12860 unsigned int q_idx, map_idx, cpu, i;
12861 const struct cpumask *cpu_mask_ptr;
12862 int nr_cpus = num_online_cpus();
12863 cpumask_t *q_map;
12864 int rc = 0;
12865
12866 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
12867 if (!q_map)
12868 return -ENOMEM;
12869
12870 /* Create CPU mask for all TX queues across MQPRIO traffic classes.
12871 * Each TC has the same number of TX queues. The nth TX queue for each
12872 * TC will have the same CPU mask.
12873 */
12874 for (i = 0; i < nr_cpus; i++) {
12875 map_idx = i % bp->tx_nr_rings_per_tc;
12876 cpu = cpumask_local_spread(i, numa_node);
12877 cpu_mask_ptr = get_cpu_mask(cpu);
12878 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
12879 }
12880
12881 /* Register CPU mask for each TX queue except the ones marked for XDP */
12882 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
12883 map_idx = q_idx % bp->tx_nr_rings_per_tc;
12884 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
12885 if (rc) {
12886 netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
12887 q_idx);
12888 break;
12889 }
12890 }
12891
12892 kfree(q_map);
12893
12894 return rc;
12895 }
12896
bnxt_tx_nr_rings(struct bnxt * bp)12897 static int bnxt_tx_nr_rings(struct bnxt *bp)
12898 {
12899 return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
12900 bp->tx_nr_rings_per_tc;
12901 }
12902
bnxt_tx_nr_rings_per_tc(struct bnxt * bp)12903 static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
12904 {
12905 return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
12906 }
12907
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)12908 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
12909 {
12910 int rc = 0;
12911
12912 netif_carrier_off(bp->dev);
12913 if (irq_re_init) {
12914 /* Reserve rings now if none were reserved at driver probe. */
12915 rc = bnxt_init_dflt_ring_mode(bp);
12916 if (rc) {
12917 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
12918 return rc;
12919 }
12920 }
12921 rc = bnxt_reserve_rings(bp, irq_re_init);
12922 if (rc)
12923 return rc;
12924
12925 /* Make adjustments if reserved TX rings are less than requested */
12926 bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
12927 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
12928 if (bp->tx_nr_rings_xdp) {
12929 bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
12930 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12931 }
12932 rc = bnxt_alloc_mem(bp, irq_re_init);
12933 if (rc) {
12934 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
12935 goto open_err_free_mem;
12936 }
12937
12938 if (irq_re_init) {
12939 bnxt_init_napi(bp);
12940 rc = bnxt_request_irq(bp);
12941 if (rc) {
12942 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
12943 goto open_err_irq;
12944 }
12945 }
12946
12947 rc = bnxt_init_nic(bp, irq_re_init);
12948 if (rc) {
12949 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
12950 goto open_err_irq;
12951 }
12952
12953 bnxt_enable_napi(bp);
12954 bnxt_debug_dev_init(bp);
12955
12956 if (link_re_init) {
12957 mutex_lock(&bp->link_lock);
12958 rc = bnxt_update_phy_setting(bp);
12959 mutex_unlock(&bp->link_lock);
12960 if (rc) {
12961 netdev_warn(bp->dev, "failed to update phy settings\n");
12962 if (BNXT_SINGLE_PF(bp)) {
12963 bp->link_info.phy_retry = true;
12964 bp->link_info.phy_retry_expires =
12965 jiffies + 5 * HZ;
12966 }
12967 }
12968 }
12969
12970 if (irq_re_init) {
12971 udp_tunnel_nic_reset_ntf(bp->dev);
12972 rc = bnxt_set_xps_mapping(bp);
12973 if (rc)
12974 netdev_warn(bp->dev, "failed to set xps mapping\n");
12975 }
12976
12977 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
12978 if (!static_key_enabled(&bnxt_xdp_locking_key))
12979 static_branch_enable(&bnxt_xdp_locking_key);
12980 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
12981 static_branch_disable(&bnxt_xdp_locking_key);
12982 }
12983 set_bit(BNXT_STATE_OPEN, &bp->state);
12984 bnxt_enable_int(bp);
12985 /* Enable TX queues */
12986 bnxt_tx_enable(bp);
12987 mod_timer(&bp->timer, jiffies + bp->current_interval);
12988 /* Poll link status and check for SFP+ module status */
12989 mutex_lock(&bp->link_lock);
12990 bnxt_get_port_module_status(bp);
12991 mutex_unlock(&bp->link_lock);
12992
12993 /* VF-reps may need to be re-opened after the PF is re-opened */
12994 if (BNXT_PF(bp))
12995 bnxt_vf_reps_open(bp);
12996 bnxt_ptp_init_rtc(bp, true);
12997 bnxt_ptp_cfg_tstamp_filters(bp);
12998 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
12999 bnxt_hwrm_realloc_rss_ctx_vnic(bp);
13000 bnxt_cfg_usr_fltrs(bp);
13001 return 0;
13002
13003 open_err_irq:
13004 bnxt_del_napi(bp);
13005
13006 open_err_free_mem:
13007 bnxt_free_skbs(bp);
13008 bnxt_free_irq(bp);
13009 bnxt_free_mem(bp, true);
13010 return rc;
13011 }
13012
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13013 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13014 {
13015 int rc = 0;
13016
13017 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
13018 rc = -EIO;
13019 if (!rc)
13020 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
13021 if (rc) {
13022 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
13023 netif_close(bp->dev);
13024 }
13025 return rc;
13026 }
13027
13028 /* netdev instance lock held, open the NIC half way by allocating all
13029 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
13030 * for offline self tests.
13031 */
bnxt_half_open_nic(struct bnxt * bp)13032 int bnxt_half_open_nic(struct bnxt *bp)
13033 {
13034 int rc = 0;
13035
13036 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13037 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
13038 rc = -ENODEV;
13039 goto half_open_err;
13040 }
13041
13042 rc = bnxt_alloc_mem(bp, true);
13043 if (rc) {
13044 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
13045 goto half_open_err;
13046 }
13047 bnxt_init_napi(bp);
13048 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13049 rc = bnxt_init_nic(bp, true);
13050 if (rc) {
13051 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13052 bnxt_del_napi(bp);
13053 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
13054 goto half_open_err;
13055 }
13056 return 0;
13057
13058 half_open_err:
13059 bnxt_free_skbs(bp);
13060 bnxt_free_mem(bp, true);
13061 netif_close(bp->dev);
13062 return rc;
13063 }
13064
13065 /* netdev instance lock held, this call can only be made after a previous
13066 * successful call to bnxt_half_open_nic().
13067 */
bnxt_half_close_nic(struct bnxt * bp)13068 void bnxt_half_close_nic(struct bnxt *bp)
13069 {
13070 bnxt_hwrm_resource_free(bp, false, true);
13071 bnxt_del_napi(bp);
13072 bnxt_free_skbs(bp);
13073 bnxt_free_mem(bp, true);
13074 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
13075 }
13076
bnxt_reenable_sriov(struct bnxt * bp)13077 void bnxt_reenable_sriov(struct bnxt *bp)
13078 {
13079 if (BNXT_PF(bp)) {
13080 struct bnxt_pf_info *pf = &bp->pf;
13081 int n = pf->active_vfs;
13082
13083 if (n)
13084 bnxt_cfg_hw_sriov(bp, &n, true);
13085 }
13086 }
13087
bnxt_open(struct net_device * dev)13088 static int bnxt_open(struct net_device *dev)
13089 {
13090 struct bnxt *bp = netdev_priv(dev);
13091 int rc;
13092
13093 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13094 rc = bnxt_reinit_after_abort(bp);
13095 if (rc) {
13096 if (rc == -EBUSY)
13097 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
13098 else
13099 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
13100 return -ENODEV;
13101 }
13102 }
13103
13104 rc = bnxt_hwrm_if_change(bp, true);
13105 if (rc)
13106 return rc;
13107
13108 rc = __bnxt_open_nic(bp, true, true);
13109 if (rc) {
13110 bnxt_hwrm_if_change(bp, false);
13111 } else {
13112 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
13113 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13114 bnxt_queue_sp_work(bp,
13115 BNXT_RESTART_ULP_SP_EVENT);
13116 }
13117 }
13118
13119 return rc;
13120 }
13121
bnxt_drv_busy(struct bnxt * bp)13122 static bool bnxt_drv_busy(struct bnxt *bp)
13123 {
13124 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
13125 test_bit(BNXT_STATE_READ_STATS, &bp->state));
13126 }
13127
13128 static void bnxt_get_ring_stats(struct bnxt *bp,
13129 struct rtnl_link_stats64 *stats);
13130
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13131 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
13132 bool link_re_init)
13133 {
13134 /* Close the VF-reps before closing PF */
13135 if (BNXT_PF(bp))
13136 bnxt_vf_reps_close(bp);
13137
13138 /* Change device state to avoid TX queue wake up's */
13139 bnxt_tx_disable(bp);
13140
13141 clear_bit(BNXT_STATE_OPEN, &bp->state);
13142 smp_mb__after_atomic();
13143 while (bnxt_drv_busy(bp))
13144 msleep(20);
13145
13146 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
13147 bnxt_clear_rss_ctxs(bp);
13148 /* Flush rings and disable interrupts */
13149 bnxt_shutdown_nic(bp, irq_re_init);
13150
13151 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
13152
13153 bnxt_debug_dev_exit(bp);
13154 bnxt_disable_napi(bp);
13155 timer_delete_sync(&bp->timer);
13156 bnxt_free_skbs(bp);
13157
13158 /* Save ring stats before shutdown */
13159 if (bp->bnapi && irq_re_init) {
13160 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
13161 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
13162 }
13163 if (irq_re_init) {
13164 bnxt_free_irq(bp);
13165 bnxt_del_napi(bp);
13166 }
13167 bnxt_free_mem(bp, irq_re_init);
13168 }
13169
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)13170 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
13171 {
13172 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13173 /* If we get here, it means firmware reset is in progress
13174 * while we are trying to close. We can safely proceed with
13175 * the close because we are holding netdev instance lock.
13176 * Some firmware messages may fail as we proceed to close.
13177 * We set the ABORT_ERR flag here so that the FW reset thread
13178 * will later abort when it gets the netdev instance lock
13179 * and sees the flag.
13180 */
13181 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
13182 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
13183 }
13184
13185 #ifdef CONFIG_BNXT_SRIOV
13186 if (bp->sriov_cfg) {
13187 int rc;
13188
13189 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
13190 !bp->sriov_cfg,
13191 BNXT_SRIOV_CFG_WAIT_TMO);
13192 if (!rc)
13193 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
13194 else if (rc < 0)
13195 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
13196 }
13197 #endif
13198 __bnxt_close_nic(bp, irq_re_init, link_re_init);
13199 }
13200
bnxt_close(struct net_device * dev)13201 static int bnxt_close(struct net_device *dev)
13202 {
13203 struct bnxt *bp = netdev_priv(dev);
13204
13205 bnxt_close_nic(bp, true, true);
13206 bnxt_hwrm_shutdown_link(bp);
13207 bnxt_hwrm_if_change(bp, false);
13208 return 0;
13209 }
13210
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)13211 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
13212 u16 *val)
13213 {
13214 struct hwrm_port_phy_mdio_read_output *resp;
13215 struct hwrm_port_phy_mdio_read_input *req;
13216 int rc;
13217
13218 if (bp->hwrm_spec_code < 0x10a00)
13219 return -EOPNOTSUPP;
13220
13221 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
13222 if (rc)
13223 return rc;
13224
13225 req->port_id = cpu_to_le16(bp->pf.port_id);
13226 req->phy_addr = phy_addr;
13227 req->reg_addr = cpu_to_le16(reg & 0x1f);
13228 if (mdio_phy_id_is_c45(phy_addr)) {
13229 req->cl45_mdio = 1;
13230 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13231 req->dev_addr = mdio_phy_id_devad(phy_addr);
13232 req->reg_addr = cpu_to_le16(reg);
13233 }
13234
13235 resp = hwrm_req_hold(bp, req);
13236 rc = hwrm_req_send(bp, req);
13237 if (!rc)
13238 *val = le16_to_cpu(resp->reg_data);
13239 hwrm_req_drop(bp, req);
13240 return rc;
13241 }
13242
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)13243 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
13244 u16 val)
13245 {
13246 struct hwrm_port_phy_mdio_write_input *req;
13247 int rc;
13248
13249 if (bp->hwrm_spec_code < 0x10a00)
13250 return -EOPNOTSUPP;
13251
13252 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
13253 if (rc)
13254 return rc;
13255
13256 req->port_id = cpu_to_le16(bp->pf.port_id);
13257 req->phy_addr = phy_addr;
13258 req->reg_addr = cpu_to_le16(reg & 0x1f);
13259 if (mdio_phy_id_is_c45(phy_addr)) {
13260 req->cl45_mdio = 1;
13261 req->phy_addr = mdio_phy_id_prtad(phy_addr);
13262 req->dev_addr = mdio_phy_id_devad(phy_addr);
13263 req->reg_addr = cpu_to_le16(reg);
13264 }
13265 req->reg_data = cpu_to_le16(val);
13266
13267 return hwrm_req_send(bp, req);
13268 }
13269
13270 /* netdev instance lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13271 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13272 {
13273 struct mii_ioctl_data *mdio = if_mii(ifr);
13274 struct bnxt *bp = netdev_priv(dev);
13275 int rc;
13276
13277 switch (cmd) {
13278 case SIOCGMIIPHY:
13279 mdio->phy_id = bp->link_info.phy_addr;
13280
13281 fallthrough;
13282 case SIOCGMIIREG: {
13283 u16 mii_regval = 0;
13284
13285 if (!netif_running(dev))
13286 return -EAGAIN;
13287
13288 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
13289 &mii_regval);
13290 mdio->val_out = mii_regval;
13291 return rc;
13292 }
13293
13294 case SIOCSMIIREG:
13295 if (!netif_running(dev))
13296 return -EAGAIN;
13297
13298 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
13299 mdio->val_in);
13300
13301 default:
13302 /* do nothing */
13303 break;
13304 }
13305 return -EOPNOTSUPP;
13306 }
13307
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13308 static void bnxt_get_ring_stats(struct bnxt *bp,
13309 struct rtnl_link_stats64 *stats)
13310 {
13311 int i;
13312
13313 for (i = 0; i < bp->cp_nr_rings; i++) {
13314 struct bnxt_napi *bnapi = bp->bnapi[i];
13315 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13316 u64 *sw = cpr->stats.sw_stats;
13317
13318 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
13319 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13320 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
13321
13322 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
13323 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
13324 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
13325
13326 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
13327 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
13328 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
13329
13330 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
13331 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
13332 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
13333
13334 stats->rx_missed_errors +=
13335 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
13336
13337 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
13338
13339 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
13340
13341 stats->rx_dropped +=
13342 cpr->sw_stats->rx.rx_netpoll_discards +
13343 cpr->sw_stats->rx.rx_oom_discards;
13344 }
13345 }
13346
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)13347 static void bnxt_add_prev_stats(struct bnxt *bp,
13348 struct rtnl_link_stats64 *stats)
13349 {
13350 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
13351
13352 stats->rx_packets += prev_stats->rx_packets;
13353 stats->tx_packets += prev_stats->tx_packets;
13354 stats->rx_bytes += prev_stats->rx_bytes;
13355 stats->tx_bytes += prev_stats->tx_bytes;
13356 stats->rx_missed_errors += prev_stats->rx_missed_errors;
13357 stats->multicast += prev_stats->multicast;
13358 stats->rx_dropped += prev_stats->rx_dropped;
13359 stats->tx_dropped += prev_stats->tx_dropped;
13360 }
13361
13362 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)13363 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
13364 {
13365 struct bnxt *bp = netdev_priv(dev);
13366
13367 set_bit(BNXT_STATE_READ_STATS, &bp->state);
13368 /* Make sure bnxt_close_nic() sees that we are reading stats before
13369 * we check the BNXT_STATE_OPEN flag.
13370 */
13371 smp_mb__after_atomic();
13372 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13373 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13374 *stats = bp->net_stats_prev;
13375 return;
13376 }
13377
13378 bnxt_get_ring_stats(bp, stats);
13379 bnxt_add_prev_stats(bp, stats);
13380
13381 if (bp->flags & BNXT_FLAG_PORT_STATS) {
13382 u64 *rx = bp->port_stats.sw_stats;
13383 u64 *tx = bp->port_stats.sw_stats +
13384 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
13385
13386 stats->rx_crc_errors =
13387 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
13388 stats->rx_frame_errors =
13389 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
13390 stats->rx_length_errors =
13391 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
13392 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
13393 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
13394 stats->rx_errors =
13395 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
13396 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
13397 stats->collisions =
13398 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
13399 stats->tx_fifo_errors =
13400 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
13401 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
13402 }
13403 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
13404 }
13405
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)13406 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
13407 struct bnxt_total_ring_err_stats *stats,
13408 struct bnxt_cp_ring_info *cpr)
13409 {
13410 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
13411 u64 *hw_stats = cpr->stats.sw_stats;
13412
13413 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
13414 stats->rx_total_resets += sw_stats->rx.rx_resets;
13415 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
13416 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
13417 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
13418 stats->rx_total_ring_discards +=
13419 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
13420 stats->tx_total_resets += sw_stats->tx.tx_resets;
13421 stats->tx_total_ring_discards +=
13422 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
13423 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
13424 }
13425
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)13426 void bnxt_get_ring_err_stats(struct bnxt *bp,
13427 struct bnxt_total_ring_err_stats *stats)
13428 {
13429 int i;
13430
13431 for (i = 0; i < bp->cp_nr_rings; i++)
13432 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
13433 }
13434
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)13435 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
13436 {
13437 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13438 struct net_device *dev = bp->dev;
13439 struct netdev_hw_addr *ha;
13440 u8 *haddr;
13441 int mc_count = 0;
13442 bool update = false;
13443 int off = 0;
13444
13445 netdev_for_each_mc_addr(ha, dev) {
13446 if (mc_count >= BNXT_MAX_MC_ADDRS) {
13447 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13448 vnic->mc_list_count = 0;
13449 return false;
13450 }
13451 haddr = ha->addr;
13452 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
13453 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
13454 update = true;
13455 }
13456 off += ETH_ALEN;
13457 mc_count++;
13458 }
13459 if (mc_count)
13460 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13461
13462 if (mc_count != vnic->mc_list_count) {
13463 vnic->mc_list_count = mc_count;
13464 update = true;
13465 }
13466 return update;
13467 }
13468
bnxt_uc_list_updated(struct bnxt * bp)13469 static bool bnxt_uc_list_updated(struct bnxt *bp)
13470 {
13471 struct net_device *dev = bp->dev;
13472 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13473 struct netdev_hw_addr *ha;
13474 int off = 0;
13475
13476 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
13477 return true;
13478
13479 netdev_for_each_uc_addr(ha, dev) {
13480 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
13481 return true;
13482
13483 off += ETH_ALEN;
13484 }
13485 return false;
13486 }
13487
bnxt_set_rx_mode(struct net_device * dev)13488 static void bnxt_set_rx_mode(struct net_device *dev)
13489 {
13490 struct bnxt *bp = netdev_priv(dev);
13491 struct bnxt_vnic_info *vnic;
13492 bool mc_update = false;
13493 bool uc_update;
13494 u32 mask;
13495
13496 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
13497 return;
13498
13499 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13500 mask = vnic->rx_mask;
13501 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
13502 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
13503 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
13504 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
13505
13506 if (dev->flags & IFF_PROMISC)
13507 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13508
13509 uc_update = bnxt_uc_list_updated(bp);
13510
13511 if (dev->flags & IFF_BROADCAST)
13512 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
13513 if (dev->flags & IFF_ALLMULTI) {
13514 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13515 vnic->mc_list_count = 0;
13516 } else if (dev->flags & IFF_MULTICAST) {
13517 mc_update = bnxt_mc_list_updated(bp, &mask);
13518 }
13519
13520 if (mask != vnic->rx_mask || uc_update || mc_update) {
13521 vnic->rx_mask = mask;
13522
13523 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
13524 }
13525 }
13526
bnxt_cfg_rx_mode(struct bnxt * bp)13527 static int bnxt_cfg_rx_mode(struct bnxt *bp)
13528 {
13529 struct net_device *dev = bp->dev;
13530 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
13531 struct netdev_hw_addr *ha;
13532 int i, off = 0, rc;
13533 bool uc_update;
13534
13535 netif_addr_lock_bh(dev);
13536 uc_update = bnxt_uc_list_updated(bp);
13537 netif_addr_unlock_bh(dev);
13538
13539 if (!uc_update)
13540 goto skip_uc;
13541
13542 for (i = 1; i < vnic->uc_filter_count; i++) {
13543 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
13544
13545 bnxt_hwrm_l2_filter_free(bp, fltr);
13546 bnxt_del_l2_filter(bp, fltr);
13547 }
13548
13549 vnic->uc_filter_count = 1;
13550
13551 netif_addr_lock_bh(dev);
13552 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
13553 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13554 } else {
13555 netdev_for_each_uc_addr(ha, dev) {
13556 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
13557 off += ETH_ALEN;
13558 vnic->uc_filter_count++;
13559 }
13560 }
13561 netif_addr_unlock_bh(dev);
13562
13563 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
13564 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
13565 if (rc) {
13566 if (BNXT_VF(bp) && rc == -ENODEV) {
13567 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13568 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
13569 else
13570 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
13571 rc = 0;
13572 } else {
13573 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
13574 }
13575 vnic->uc_filter_count = i;
13576 return rc;
13577 }
13578 }
13579 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
13580 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
13581
13582 skip_uc:
13583 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
13584 !bnxt_promisc_ok(bp))
13585 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
13586 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13587 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
13588 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
13589 rc);
13590 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
13591 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
13592 vnic->mc_list_count = 0;
13593 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
13594 }
13595 if (rc)
13596 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
13597 rc);
13598
13599 return rc;
13600 }
13601
bnxt_can_reserve_rings(struct bnxt * bp)13602 static bool bnxt_can_reserve_rings(struct bnxt *bp)
13603 {
13604 #ifdef CONFIG_BNXT_SRIOV
13605 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
13606 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13607
13608 /* No minimum rings were provisioned by the PF. Don't
13609 * reserve rings by default when device is down.
13610 */
13611 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
13612 return true;
13613
13614 if (!netif_running(bp->dev))
13615 return false;
13616 }
13617 #endif
13618 return true;
13619 }
13620
13621 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)13622 static bool bnxt_rfs_supported(struct bnxt *bp)
13623 {
13624 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13625 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
13626 return true;
13627 return false;
13628 }
13629 /* 212 firmware is broken for aRFS */
13630 if (BNXT_FW_MAJ(bp) == 212)
13631 return false;
13632 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
13633 return true;
13634 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
13635 return true;
13636 return false;
13637 }
13638
13639 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp,bool new_rss_ctx)13640 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
13641 {
13642 struct bnxt_hw_rings hwr = {0};
13643 int max_vnics, max_rss_ctxs;
13644
13645 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13646 !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
13647 return bnxt_rfs_supported(bp);
13648
13649 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
13650 return false;
13651
13652 hwr.grp = bp->rx_nr_rings;
13653 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
13654 if (new_rss_ctx)
13655 hwr.vnic++;
13656 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13657 max_vnics = bnxt_get_max_func_vnics(bp);
13658 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
13659
13660 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
13661 if (bp->rx_nr_rings > 1)
13662 netdev_warn(bp->dev,
13663 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
13664 min(max_rss_ctxs - 1, max_vnics - 1));
13665 return false;
13666 }
13667
13668 if (!BNXT_NEW_RM(bp))
13669 return true;
13670
13671 /* Do not reduce VNIC and RSS ctx reservations. There is a FW
13672 * issue that will mess up the default VNIC if we reduce the
13673 * reservations.
13674 */
13675 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13676 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13677 return true;
13678
13679 bnxt_hwrm_reserve_rings(bp, &hwr);
13680 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
13681 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
13682 return true;
13683
13684 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
13685 hwr.vnic = 1;
13686 hwr.rss_ctx = 0;
13687 bnxt_hwrm_reserve_rings(bp, &hwr);
13688 return false;
13689 }
13690
bnxt_fix_features(struct net_device * dev,netdev_features_t features)13691 static netdev_features_t bnxt_fix_features(struct net_device *dev,
13692 netdev_features_t features)
13693 {
13694 struct bnxt *bp = netdev_priv(dev);
13695 netdev_features_t vlan_features;
13696
13697 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
13698 features &= ~NETIF_F_NTUPLE;
13699
13700 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
13701 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13702
13703 if (!(features & NETIF_F_GRO))
13704 features &= ~NETIF_F_GRO_HW;
13705
13706 if (features & NETIF_F_GRO_HW)
13707 features &= ~NETIF_F_LRO;
13708
13709 /* Both CTAG and STAG VLAN acceleration on the RX side have to be
13710 * turned on or off together.
13711 */
13712 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
13713 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
13714 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13715 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13716 else if (vlan_features)
13717 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13718 }
13719 #ifdef CONFIG_BNXT_SRIOV
13720 if (BNXT_VF(bp) && bp->vf.vlan)
13721 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
13722 #endif
13723 return features;
13724 }
13725
bnxt_reinit_features(struct bnxt * bp,bool irq_re_init,bool link_re_init,u32 flags,bool update_tpa)13726 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
13727 bool link_re_init, u32 flags, bool update_tpa)
13728 {
13729 bnxt_close_nic(bp, irq_re_init, link_re_init);
13730 bp->flags = flags;
13731 if (update_tpa)
13732 bnxt_set_ring_params(bp);
13733 return bnxt_open_nic(bp, irq_re_init, link_re_init);
13734 }
13735
bnxt_set_features(struct net_device * dev,netdev_features_t features)13736 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
13737 {
13738 bool update_tpa = false, update_ntuple = false;
13739 struct bnxt *bp = netdev_priv(dev);
13740 u32 flags = bp->flags;
13741 u32 changes;
13742 int rc = 0;
13743 bool re_init = false;
13744
13745 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
13746 if (features & NETIF_F_GRO_HW)
13747 flags |= BNXT_FLAG_GRO;
13748 else if (features & NETIF_F_LRO)
13749 flags |= BNXT_FLAG_LRO;
13750
13751 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
13752 flags &= ~BNXT_FLAG_TPA;
13753
13754 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13755 flags |= BNXT_FLAG_STRIP_VLAN;
13756
13757 if (features & NETIF_F_NTUPLE)
13758 flags |= BNXT_FLAG_RFS;
13759 else
13760 bnxt_clear_usr_fltrs(bp, true);
13761
13762 changes = flags ^ bp->flags;
13763 if (changes & BNXT_FLAG_TPA) {
13764 update_tpa = true;
13765 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
13766 (flags & BNXT_FLAG_TPA) == 0 ||
13767 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13768 re_init = true;
13769 }
13770
13771 if (changes & ~BNXT_FLAG_TPA)
13772 re_init = true;
13773
13774 if (changes & BNXT_FLAG_RFS)
13775 update_ntuple = true;
13776
13777 if (flags != bp->flags) {
13778 u32 old_flags = bp->flags;
13779
13780 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13781 bp->flags = flags;
13782 if (update_tpa)
13783 bnxt_set_ring_params(bp);
13784 return rc;
13785 }
13786
13787 if (update_ntuple)
13788 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
13789
13790 if (re_init)
13791 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
13792
13793 if (update_tpa) {
13794 bp->flags = flags;
13795 rc = bnxt_set_tpa(bp,
13796 (flags & BNXT_FLAG_TPA) ?
13797 true : false);
13798 if (rc)
13799 bp->flags = old_flags;
13800 }
13801 }
13802 return rc;
13803 }
13804
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)13805 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
13806 u8 **nextp)
13807 {
13808 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
13809 struct hop_jumbo_hdr *jhdr;
13810 int hdr_count = 0;
13811 u8 *nexthdr;
13812 int start;
13813
13814 /* Check that there are at most 2 IPv6 extension headers, no
13815 * fragment header, and each is <= 64 bytes.
13816 */
13817 start = nw_off + sizeof(*ip6h);
13818 nexthdr = &ip6h->nexthdr;
13819 while (ipv6_ext_hdr(*nexthdr)) {
13820 struct ipv6_opt_hdr *hp;
13821 int hdrlen;
13822
13823 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
13824 *nexthdr == NEXTHDR_FRAGMENT)
13825 return false;
13826 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
13827 skb_headlen(skb), NULL);
13828 if (!hp)
13829 return false;
13830 if (*nexthdr == NEXTHDR_AUTH)
13831 hdrlen = ipv6_authlen(hp);
13832 else
13833 hdrlen = ipv6_optlen(hp);
13834
13835 if (hdrlen > 64)
13836 return false;
13837
13838 /* The ext header may be a hop-by-hop header inserted for
13839 * big TCP purposes. This will be removed before sending
13840 * from NIC, so do not count it.
13841 */
13842 if (*nexthdr == NEXTHDR_HOP) {
13843 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
13844 goto increment_hdr;
13845
13846 jhdr = (struct hop_jumbo_hdr *)hp;
13847 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
13848 jhdr->nexthdr != IPPROTO_TCP)
13849 goto increment_hdr;
13850
13851 goto next_hdr;
13852 }
13853 increment_hdr:
13854 hdr_count++;
13855 next_hdr:
13856 nexthdr = &hp->nexthdr;
13857 start += hdrlen;
13858 }
13859 if (nextp) {
13860 /* Caller will check inner protocol */
13861 if (skb->encapsulation) {
13862 *nextp = nexthdr;
13863 return true;
13864 }
13865 *nextp = NULL;
13866 }
13867 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
13868 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
13869 }
13870
13871 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)13872 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
13873 {
13874 struct udphdr *uh = udp_hdr(skb);
13875 __be16 udp_port = uh->dest;
13876
13877 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
13878 udp_port != bp->vxlan_gpe_port)
13879 return false;
13880 if (skb->inner_protocol == htons(ETH_P_TEB)) {
13881 struct ethhdr *eh = inner_eth_hdr(skb);
13882
13883 switch (eh->h_proto) {
13884 case htons(ETH_P_IP):
13885 return true;
13886 case htons(ETH_P_IPV6):
13887 return bnxt_exthdr_check(bp, skb,
13888 skb_inner_network_offset(skb),
13889 NULL);
13890 }
13891 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
13892 return true;
13893 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
13894 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13895 NULL);
13896 }
13897 return false;
13898 }
13899
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)13900 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
13901 {
13902 switch (l4_proto) {
13903 case IPPROTO_UDP:
13904 return bnxt_udp_tunl_check(bp, skb);
13905 case IPPROTO_IPIP:
13906 return true;
13907 case IPPROTO_GRE: {
13908 switch (skb->inner_protocol) {
13909 default:
13910 return false;
13911 case htons(ETH_P_IP):
13912 return true;
13913 case htons(ETH_P_IPV6):
13914 fallthrough;
13915 }
13916 }
13917 case IPPROTO_IPV6:
13918 /* Check ext headers of inner ipv6 */
13919 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
13920 NULL);
13921 }
13922 return false;
13923 }
13924
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)13925 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
13926 struct net_device *dev,
13927 netdev_features_t features)
13928 {
13929 struct bnxt *bp = netdev_priv(dev);
13930 u8 *l4_proto;
13931
13932 features = vlan_features_check(skb, features);
13933 switch (vlan_get_protocol(skb)) {
13934 case htons(ETH_P_IP):
13935 if (!skb->encapsulation)
13936 return features;
13937 l4_proto = &ip_hdr(skb)->protocol;
13938 if (bnxt_tunl_check(bp, skb, *l4_proto))
13939 return features;
13940 break;
13941 case htons(ETH_P_IPV6):
13942 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
13943 &l4_proto))
13944 break;
13945 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
13946 return features;
13947 break;
13948 }
13949 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13950 }
13951
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)13952 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
13953 u32 *reg_buf)
13954 {
13955 struct hwrm_dbg_read_direct_output *resp;
13956 struct hwrm_dbg_read_direct_input *req;
13957 __le32 *dbg_reg_buf;
13958 dma_addr_t mapping;
13959 int rc, i;
13960
13961 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
13962 if (rc)
13963 return rc;
13964
13965 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
13966 &mapping);
13967 if (!dbg_reg_buf) {
13968 rc = -ENOMEM;
13969 goto dbg_rd_reg_exit;
13970 }
13971
13972 req->host_dest_addr = cpu_to_le64(mapping);
13973
13974 resp = hwrm_req_hold(bp, req);
13975 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
13976 req->read_len32 = cpu_to_le32(num_words);
13977
13978 rc = hwrm_req_send(bp, req);
13979 if (rc || resp->error_code) {
13980 rc = -EIO;
13981 goto dbg_rd_reg_exit;
13982 }
13983 for (i = 0; i < num_words; i++)
13984 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
13985
13986 dbg_rd_reg_exit:
13987 hwrm_req_drop(bp, req);
13988 return rc;
13989 }
13990
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)13991 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
13992 u32 ring_id, u32 *prod, u32 *cons)
13993 {
13994 struct hwrm_dbg_ring_info_get_output *resp;
13995 struct hwrm_dbg_ring_info_get_input *req;
13996 int rc;
13997
13998 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
13999 if (rc)
14000 return rc;
14001
14002 req->ring_type = ring_type;
14003 req->fw_ring_id = cpu_to_le32(ring_id);
14004 resp = hwrm_req_hold(bp, req);
14005 rc = hwrm_req_send(bp, req);
14006 if (!rc) {
14007 *prod = le32_to_cpu(resp->producer_index);
14008 *cons = le32_to_cpu(resp->consumer_index);
14009 }
14010 hwrm_req_drop(bp, req);
14011 return rc;
14012 }
14013
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)14014 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
14015 {
14016 struct bnxt_tx_ring_info *txr;
14017 int i = bnapi->index, j;
14018
14019 bnxt_for_each_napi_tx(j, bnapi, txr)
14020 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
14021 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
14022 txr->tx_cons);
14023 }
14024
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)14025 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
14026 {
14027 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
14028 int i = bnapi->index;
14029
14030 if (!rxr)
14031 return;
14032
14033 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
14034 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
14035 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
14036 rxr->rx_sw_agg_prod);
14037 }
14038
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)14039 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
14040 {
14041 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
14042 int i = bnapi->index, j;
14043
14044 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
14045 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
14046 for (j = 0; j < cpr->cp_ring_count; j++) {
14047 cpr2 = &cpr->cp_ring_arr[j];
14048 if (!cpr2->bnapi)
14049 continue;
14050 netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
14051 i, j, cpr2->cp_ring_struct.fw_ring_id,
14052 cpr2->cp_raw_cons);
14053 }
14054 }
14055
bnxt_dbg_dump_states(struct bnxt * bp)14056 static void bnxt_dbg_dump_states(struct bnxt *bp)
14057 {
14058 int i;
14059 struct bnxt_napi *bnapi;
14060
14061 for (i = 0; i < bp->cp_nr_rings; i++) {
14062 bnapi = bp->bnapi[i];
14063 if (netif_msg_drv(bp)) {
14064 bnxt_dump_tx_sw_state(bnapi);
14065 bnxt_dump_rx_sw_state(bnapi);
14066 bnxt_dump_cp_sw_state(bnapi);
14067 }
14068 }
14069 }
14070
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)14071 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
14072 {
14073 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
14074 struct hwrm_ring_reset_input *req;
14075 struct bnxt_napi *bnapi = rxr->bnapi;
14076 struct bnxt_cp_ring_info *cpr;
14077 u16 cp_ring_id;
14078 int rc;
14079
14080 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
14081 if (rc)
14082 return rc;
14083
14084 cpr = &bnapi->cp_ring;
14085 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
14086 req->cmpl_ring = cpu_to_le16(cp_ring_id);
14087 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
14088 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
14089 return hwrm_req_send_silent(bp, req);
14090 }
14091
bnxt_reset_task(struct bnxt * bp,bool silent)14092 static void bnxt_reset_task(struct bnxt *bp, bool silent)
14093 {
14094 if (!silent)
14095 bnxt_dbg_dump_states(bp);
14096 if (netif_running(bp->dev)) {
14097 bnxt_close_nic(bp, !silent, false);
14098 bnxt_open_nic(bp, !silent, false);
14099 }
14100 }
14101
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)14102 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
14103 {
14104 struct bnxt *bp = netdev_priv(dev);
14105
14106 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
14107 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
14108 }
14109
bnxt_fw_health_check(struct bnxt * bp)14110 static void bnxt_fw_health_check(struct bnxt *bp)
14111 {
14112 struct bnxt_fw_health *fw_health = bp->fw_health;
14113 struct pci_dev *pdev = bp->pdev;
14114 u32 val;
14115
14116 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14117 return;
14118
14119 /* Make sure it is enabled before checking the tmr_counter. */
14120 smp_rmb();
14121 if (fw_health->tmr_counter) {
14122 fw_health->tmr_counter--;
14123 return;
14124 }
14125
14126 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14127 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
14128 fw_health->arrests++;
14129 goto fw_reset;
14130 }
14131
14132 fw_health->last_fw_heartbeat = val;
14133
14134 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14135 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
14136 fw_health->discoveries++;
14137 goto fw_reset;
14138 }
14139
14140 fw_health->tmr_counter = fw_health->tmr_multiplier;
14141 return;
14142
14143 fw_reset:
14144 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
14145 }
14146
bnxt_timer(struct timer_list * t)14147 static void bnxt_timer(struct timer_list *t)
14148 {
14149 struct bnxt *bp = timer_container_of(bp, t, timer);
14150 struct net_device *dev = bp->dev;
14151
14152 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
14153 return;
14154
14155 if (atomic_read(&bp->intr_sem) != 0)
14156 goto bnxt_restart_timer;
14157
14158 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
14159 bnxt_fw_health_check(bp);
14160
14161 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
14162 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
14163
14164 if (bnxt_tc_flower_enabled(bp))
14165 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
14166
14167 #ifdef CONFIG_RFS_ACCEL
14168 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
14169 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14170 #endif /*CONFIG_RFS_ACCEL*/
14171
14172 if (bp->link_info.phy_retry) {
14173 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
14174 bp->link_info.phy_retry = false;
14175 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
14176 } else {
14177 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
14178 }
14179 }
14180
14181 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
14182 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
14183
14184 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
14185 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
14186
14187 bnxt_restart_timer:
14188 mod_timer(&bp->timer, jiffies + bp->current_interval);
14189 }
14190
bnxt_lock_sp(struct bnxt * bp)14191 static void bnxt_lock_sp(struct bnxt *bp)
14192 {
14193 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
14194 * set. If the device is being closed, bnxt_close() may be holding
14195 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
14196 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
14197 * instance lock.
14198 */
14199 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14200 netdev_lock(bp->dev);
14201 }
14202
bnxt_unlock_sp(struct bnxt * bp)14203 static void bnxt_unlock_sp(struct bnxt *bp)
14204 {
14205 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14206 netdev_unlock(bp->dev);
14207 }
14208
14209 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)14210 static void bnxt_reset(struct bnxt *bp, bool silent)
14211 {
14212 bnxt_lock_sp(bp);
14213 if (test_bit(BNXT_STATE_OPEN, &bp->state))
14214 bnxt_reset_task(bp, silent);
14215 bnxt_unlock_sp(bp);
14216 }
14217
14218 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)14219 static void bnxt_rx_ring_reset(struct bnxt *bp)
14220 {
14221 int i;
14222
14223 bnxt_lock_sp(bp);
14224 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14225 bnxt_unlock_sp(bp);
14226 return;
14227 }
14228 /* Disable and flush TPA before resetting the RX ring */
14229 if (bp->flags & BNXT_FLAG_TPA)
14230 bnxt_set_tpa(bp, false);
14231 for (i = 0; i < bp->rx_nr_rings; i++) {
14232 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
14233 struct bnxt_cp_ring_info *cpr;
14234 int rc;
14235
14236 if (!rxr->bnapi->in_reset)
14237 continue;
14238
14239 rc = bnxt_hwrm_rx_ring_reset(bp, i);
14240 if (rc) {
14241 if (rc == -EINVAL || rc == -EOPNOTSUPP)
14242 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
14243 else
14244 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
14245 rc);
14246 bnxt_reset_task(bp, true);
14247 break;
14248 }
14249 bnxt_free_one_rx_ring_skbs(bp, rxr);
14250 rxr->rx_prod = 0;
14251 rxr->rx_agg_prod = 0;
14252 rxr->rx_sw_agg_prod = 0;
14253 rxr->rx_next_cons = 0;
14254 rxr->bnapi->in_reset = false;
14255 bnxt_alloc_one_rx_ring(bp, i);
14256 cpr = &rxr->bnapi->cp_ring;
14257 cpr->sw_stats->rx.rx_resets++;
14258 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14259 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
14260 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
14261 }
14262 if (bp->flags & BNXT_FLAG_TPA)
14263 bnxt_set_tpa(bp, true);
14264 bnxt_unlock_sp(bp);
14265 }
14266
bnxt_fw_fatal_close(struct bnxt * bp)14267 static void bnxt_fw_fatal_close(struct bnxt *bp)
14268 {
14269 bnxt_tx_disable(bp);
14270 bnxt_disable_napi(bp);
14271 bnxt_disable_int_sync(bp);
14272 bnxt_free_irq(bp);
14273 bnxt_clear_int_mode(bp);
14274 pci_disable_device(bp->pdev);
14275 }
14276
bnxt_fw_reset_close(struct bnxt * bp)14277 static void bnxt_fw_reset_close(struct bnxt *bp)
14278 {
14279 /* When firmware is in fatal state, quiesce device and disable
14280 * bus master to prevent any potential bad DMAs before freeing
14281 * kernel memory.
14282 */
14283 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
14284 u16 val = 0;
14285
14286 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
14287 if (val == 0xffff)
14288 bp->fw_reset_min_dsecs = 0;
14289 bnxt_fw_fatal_close(bp);
14290 }
14291 __bnxt_close_nic(bp, true, false);
14292 bnxt_vf_reps_free(bp);
14293 bnxt_clear_int_mode(bp);
14294 bnxt_hwrm_func_drv_unrgtr(bp);
14295 if (pci_is_enabled(bp->pdev))
14296 pci_disable_device(bp->pdev);
14297 bnxt_free_ctx_mem(bp, false);
14298 }
14299
is_bnxt_fw_ok(struct bnxt * bp)14300 static bool is_bnxt_fw_ok(struct bnxt *bp)
14301 {
14302 struct bnxt_fw_health *fw_health = bp->fw_health;
14303 bool no_heartbeat = false, has_reset = false;
14304 u32 val;
14305
14306 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
14307 if (val == fw_health->last_fw_heartbeat)
14308 no_heartbeat = true;
14309
14310 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
14311 if (val != fw_health->last_fw_reset_cnt)
14312 has_reset = true;
14313
14314 if (!no_heartbeat && has_reset)
14315 return true;
14316
14317 return false;
14318 }
14319
14320 /* netdev instance lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)14321 static void bnxt_force_fw_reset(struct bnxt *bp)
14322 {
14323 struct bnxt_fw_health *fw_health = bp->fw_health;
14324 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14325 u32 wait_dsecs;
14326
14327 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
14328 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
14329 return;
14330
14331 /* we have to serialize with bnxt_refclk_read()*/
14332 if (ptp) {
14333 unsigned long flags;
14334
14335 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14336 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14337 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14338 } else {
14339 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14340 }
14341 bnxt_fw_reset_close(bp);
14342 wait_dsecs = fw_health->master_func_wait_dsecs;
14343 if (fw_health->primary) {
14344 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
14345 wait_dsecs = 0;
14346 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
14347 } else {
14348 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
14349 wait_dsecs = fw_health->normal_func_wait_dsecs;
14350 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14351 }
14352
14353 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
14354 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
14355 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
14356 }
14357
bnxt_fw_exception(struct bnxt * bp)14358 void bnxt_fw_exception(struct bnxt *bp)
14359 {
14360 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
14361 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
14362 bnxt_ulp_stop(bp);
14363 bnxt_lock_sp(bp);
14364 bnxt_force_fw_reset(bp);
14365 bnxt_unlock_sp(bp);
14366 }
14367
14368 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
14369 * < 0 on error.
14370 */
bnxt_get_registered_vfs(struct bnxt * bp)14371 static int bnxt_get_registered_vfs(struct bnxt *bp)
14372 {
14373 #ifdef CONFIG_BNXT_SRIOV
14374 int rc;
14375
14376 if (!BNXT_PF(bp))
14377 return 0;
14378
14379 rc = bnxt_hwrm_func_qcfg(bp);
14380 if (rc) {
14381 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
14382 return rc;
14383 }
14384 if (bp->pf.registered_vfs)
14385 return bp->pf.registered_vfs;
14386 if (bp->sriov_cfg)
14387 return 1;
14388 #endif
14389 return 0;
14390 }
14391
bnxt_fw_reset(struct bnxt * bp)14392 void bnxt_fw_reset(struct bnxt *bp)
14393 {
14394 bnxt_ulp_stop(bp);
14395 bnxt_lock_sp(bp);
14396 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
14397 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
14398 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
14399 int n = 0, tmo;
14400
14401 /* we have to serialize with bnxt_refclk_read()*/
14402 if (ptp) {
14403 unsigned long flags;
14404
14405 write_seqlock_irqsave(&ptp->ptp_lock, flags);
14406 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14407 write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
14408 } else {
14409 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14410 }
14411 if (bp->pf.active_vfs &&
14412 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
14413 n = bnxt_get_registered_vfs(bp);
14414 if (n < 0) {
14415 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
14416 n);
14417 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14418 netif_close(bp->dev);
14419 goto fw_reset_exit;
14420 } else if (n > 0) {
14421 u16 vf_tmo_dsecs = n * 10;
14422
14423 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
14424 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
14425 bp->fw_reset_state =
14426 BNXT_FW_RESET_STATE_POLL_VF;
14427 bnxt_queue_fw_reset_work(bp, HZ / 10);
14428 goto fw_reset_exit;
14429 }
14430 bnxt_fw_reset_close(bp);
14431 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
14432 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
14433 tmo = HZ / 10;
14434 } else {
14435 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
14436 tmo = bp->fw_reset_min_dsecs * HZ / 10;
14437 }
14438 bnxt_queue_fw_reset_work(bp, tmo);
14439 }
14440 fw_reset_exit:
14441 bnxt_unlock_sp(bp);
14442 }
14443
bnxt_chk_missed_irq(struct bnxt * bp)14444 static void bnxt_chk_missed_irq(struct bnxt *bp)
14445 {
14446 int i;
14447
14448 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14449 return;
14450
14451 for (i = 0; i < bp->cp_nr_rings; i++) {
14452 struct bnxt_napi *bnapi = bp->bnapi[i];
14453 struct bnxt_cp_ring_info *cpr;
14454 u32 fw_ring_id;
14455 int j;
14456
14457 if (!bnapi)
14458 continue;
14459
14460 cpr = &bnapi->cp_ring;
14461 for (j = 0; j < cpr->cp_ring_count; j++) {
14462 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
14463 u32 val[2];
14464
14465 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
14466 continue;
14467
14468 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
14469 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
14470 continue;
14471 }
14472 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
14473 bnxt_dbg_hwrm_ring_info_get(bp,
14474 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
14475 fw_ring_id, &val[0], &val[1]);
14476 cpr->sw_stats->cmn.missed_irqs++;
14477 }
14478 }
14479 }
14480
14481 static void bnxt_cfg_ntp_filters(struct bnxt *);
14482
bnxt_init_ethtool_link_settings(struct bnxt * bp)14483 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
14484 {
14485 struct bnxt_link_info *link_info = &bp->link_info;
14486
14487 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
14488 link_info->autoneg = BNXT_AUTONEG_SPEED;
14489 if (bp->hwrm_spec_code >= 0x10201) {
14490 if (link_info->auto_pause_setting &
14491 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
14492 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14493 } else {
14494 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
14495 }
14496 bnxt_set_auto_speed(link_info);
14497 } else {
14498 bnxt_set_force_speed(link_info);
14499 link_info->req_duplex = link_info->duplex_setting;
14500 }
14501 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
14502 link_info->req_flow_ctrl =
14503 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
14504 else
14505 link_info->req_flow_ctrl = link_info->force_pause_setting;
14506 }
14507
bnxt_fw_echo_reply(struct bnxt * bp)14508 static void bnxt_fw_echo_reply(struct bnxt *bp)
14509 {
14510 struct bnxt_fw_health *fw_health = bp->fw_health;
14511 struct hwrm_func_echo_response_input *req;
14512 int rc;
14513
14514 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
14515 if (rc)
14516 return;
14517 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
14518 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
14519 hwrm_req_send(bp, req);
14520 }
14521
bnxt_ulp_restart(struct bnxt * bp)14522 static void bnxt_ulp_restart(struct bnxt *bp)
14523 {
14524 bnxt_ulp_stop(bp);
14525 bnxt_ulp_start(bp, 0);
14526 }
14527
bnxt_sp_task(struct work_struct * work)14528 static void bnxt_sp_task(struct work_struct *work)
14529 {
14530 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
14531
14532 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14533 smp_mb__after_atomic();
14534 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
14535 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14536 return;
14537 }
14538
14539 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
14540 bnxt_ulp_restart(bp);
14541 bnxt_reenable_sriov(bp);
14542 }
14543
14544 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
14545 bnxt_cfg_rx_mode(bp);
14546
14547 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
14548 bnxt_cfg_ntp_filters(bp);
14549 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
14550 bnxt_hwrm_exec_fwd_req(bp);
14551 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
14552 netdev_info(bp->dev, "Receive PF driver unload event!\n");
14553 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
14554 bnxt_hwrm_port_qstats(bp, 0);
14555 bnxt_hwrm_port_qstats_ext(bp, 0);
14556 bnxt_accumulate_all_stats(bp);
14557 }
14558
14559 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
14560 int rc;
14561
14562 mutex_lock(&bp->link_lock);
14563 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
14564 &bp->sp_event))
14565 bnxt_hwrm_phy_qcaps(bp);
14566
14567 rc = bnxt_update_link(bp, true);
14568 if (rc)
14569 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
14570 rc);
14571
14572 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
14573 &bp->sp_event))
14574 bnxt_init_ethtool_link_settings(bp);
14575 mutex_unlock(&bp->link_lock);
14576 }
14577 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
14578 int rc;
14579
14580 mutex_lock(&bp->link_lock);
14581 rc = bnxt_update_phy_setting(bp);
14582 mutex_unlock(&bp->link_lock);
14583 if (rc) {
14584 netdev_warn(bp->dev, "update phy settings retry failed\n");
14585 } else {
14586 bp->link_info.phy_retry = false;
14587 netdev_info(bp->dev, "update phy settings retry succeeded\n");
14588 }
14589 }
14590 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
14591 mutex_lock(&bp->link_lock);
14592 bnxt_get_port_module_status(bp);
14593 mutex_unlock(&bp->link_lock);
14594 }
14595
14596 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
14597 bnxt_tc_flow_stats_work(bp);
14598
14599 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
14600 bnxt_chk_missed_irq(bp);
14601
14602 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
14603 bnxt_fw_echo_reply(bp);
14604
14605 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
14606 bnxt_hwmon_notify_event(bp);
14607
14608 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
14609 * must be the last functions to be called before exiting.
14610 */
14611 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
14612 bnxt_reset(bp, false);
14613
14614 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
14615 bnxt_reset(bp, true);
14616
14617 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
14618 bnxt_rx_ring_reset(bp);
14619
14620 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
14621 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
14622 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
14623 bnxt_devlink_health_fw_report(bp);
14624 else
14625 bnxt_fw_reset(bp);
14626 }
14627
14628 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
14629 if (!is_bnxt_fw_ok(bp))
14630 bnxt_devlink_health_fw_report(bp);
14631 }
14632
14633 smp_mb__before_atomic();
14634 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
14635 }
14636
14637 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14638 int *max_cp);
14639
14640 /* Under netdev instance lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)14641 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
14642 int tx_xdp)
14643 {
14644 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
14645 struct bnxt_hw_rings hwr = {0};
14646 int rx_rings = rx;
14647 int rc;
14648
14649 if (tcs)
14650 tx_sets = tcs;
14651
14652 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
14653
14654 if (max_rx < rx_rings)
14655 return -ENOMEM;
14656
14657 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14658 rx_rings <<= 1;
14659
14660 hwr.rx = rx_rings;
14661 hwr.tx = tx * tx_sets + tx_xdp;
14662 if (max_tx < hwr.tx)
14663 return -ENOMEM;
14664
14665 hwr.vnic = bnxt_get_total_vnics(bp, rx);
14666
14667 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
14668 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
14669 if (max_cp < hwr.cp)
14670 return -ENOMEM;
14671 hwr.stat = hwr.cp;
14672 if (BNXT_NEW_RM(bp)) {
14673 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
14674 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
14675 hwr.grp = rx;
14676 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
14677 }
14678 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
14679 hwr.cp_p5 = hwr.tx + rx;
14680 rc = bnxt_hwrm_check_rings(bp, &hwr);
14681 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
14682 if (!bnxt_ulp_registered(bp->edev)) {
14683 hwr.cp += bnxt_get_ulp_msix_num(bp);
14684 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
14685 }
14686 if (hwr.cp > bp->total_irqs) {
14687 int total_msix = bnxt_change_msix(bp, hwr.cp);
14688
14689 if (total_msix < hwr.cp) {
14690 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
14691 hwr.cp, total_msix);
14692 rc = -ENOSPC;
14693 }
14694 }
14695 }
14696 return rc;
14697 }
14698
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)14699 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
14700 {
14701 if (bp->bar2) {
14702 pci_iounmap(pdev, bp->bar2);
14703 bp->bar2 = NULL;
14704 }
14705
14706 if (bp->bar1) {
14707 pci_iounmap(pdev, bp->bar1);
14708 bp->bar1 = NULL;
14709 }
14710
14711 if (bp->bar0) {
14712 pci_iounmap(pdev, bp->bar0);
14713 bp->bar0 = NULL;
14714 }
14715 }
14716
bnxt_cleanup_pci(struct bnxt * bp)14717 static void bnxt_cleanup_pci(struct bnxt *bp)
14718 {
14719 bnxt_unmap_bars(bp, bp->pdev);
14720 pci_release_regions(bp->pdev);
14721 if (pci_is_enabled(bp->pdev))
14722 pci_disable_device(bp->pdev);
14723 }
14724
bnxt_init_dflt_coal(struct bnxt * bp)14725 static void bnxt_init_dflt_coal(struct bnxt *bp)
14726 {
14727 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
14728 struct bnxt_coal *coal;
14729 u16 flags = 0;
14730
14731 if (coal_cap->cmpl_params &
14732 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
14733 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
14734
14735 /* Tick values in micro seconds.
14736 * 1 coal_buf x bufs_per_record = 1 completion record.
14737 */
14738 coal = &bp->rx_coal;
14739 coal->coal_ticks = 10;
14740 coal->coal_bufs = 30;
14741 coal->coal_ticks_irq = 1;
14742 coal->coal_bufs_irq = 2;
14743 coal->idle_thresh = 50;
14744 coal->bufs_per_record = 2;
14745 coal->budget = 64; /* NAPI budget */
14746 coal->flags = flags;
14747
14748 coal = &bp->tx_coal;
14749 coal->coal_ticks = 28;
14750 coal->coal_bufs = 30;
14751 coal->coal_ticks_irq = 2;
14752 coal->coal_bufs_irq = 2;
14753 coal->bufs_per_record = 1;
14754 coal->flags = flags;
14755
14756 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
14757 }
14758
14759 /* FW that pre-reserves 1 VNIC per function */
bnxt_fw_pre_resv_vnics(struct bnxt * bp)14760 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
14761 {
14762 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
14763
14764 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14765 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
14766 return true;
14767 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
14768 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
14769 return true;
14770 return false;
14771 }
14772
bnxt_hwrm_pfcwd_qcaps(struct bnxt * bp)14773 static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
14774 {
14775 struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
14776 struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
14777 int rc;
14778
14779 bp->max_pfcwd_tmo_ms = 0;
14780 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
14781 if (rc)
14782 return;
14783 resp = hwrm_req_hold(bp, req);
14784 rc = hwrm_req_send_silent(bp, req);
14785 if (!rc)
14786 bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
14787 hwrm_req_drop(bp, req);
14788 }
14789
bnxt_fw_init_one_p1(struct bnxt * bp)14790 static int bnxt_fw_init_one_p1(struct bnxt *bp)
14791 {
14792 int rc;
14793
14794 bp->fw_cap = 0;
14795 rc = bnxt_hwrm_ver_get(bp);
14796 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
14797 * so wait before continuing with recovery.
14798 */
14799 if (rc)
14800 msleep(100);
14801 bnxt_try_map_fw_health_reg(bp);
14802 if (rc) {
14803 rc = bnxt_try_recover_fw(bp);
14804 if (rc)
14805 return rc;
14806 rc = bnxt_hwrm_ver_get(bp);
14807 if (rc)
14808 return rc;
14809 }
14810
14811 bnxt_nvm_cfg_ver_get(bp);
14812
14813 rc = bnxt_hwrm_func_reset(bp);
14814 if (rc)
14815 return -ENODEV;
14816
14817 bnxt_hwrm_fw_set_time(bp);
14818 return 0;
14819 }
14820
bnxt_fw_init_one_p2(struct bnxt * bp)14821 static int bnxt_fw_init_one_p2(struct bnxt *bp)
14822 {
14823 int rc;
14824
14825 /* Get the MAX capabilities for this function */
14826 rc = bnxt_hwrm_func_qcaps(bp);
14827 if (rc) {
14828 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
14829 rc);
14830 return -ENODEV;
14831 }
14832
14833 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
14834 if (rc)
14835 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
14836 rc);
14837
14838 if (bnxt_alloc_fw_health(bp)) {
14839 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
14840 } else {
14841 rc = bnxt_hwrm_error_recovery_qcfg(bp);
14842 if (rc)
14843 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
14844 rc);
14845 }
14846
14847 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
14848 if (rc)
14849 return -ENODEV;
14850
14851 rc = bnxt_alloc_crash_dump_mem(bp);
14852 if (rc)
14853 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
14854 rc);
14855 if (!rc) {
14856 rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
14857 if (rc) {
14858 bnxt_free_crash_dump_mem(bp);
14859 netdev_warn(bp->dev,
14860 "hwrm crash dump mem failure rc: %d\n", rc);
14861 }
14862 }
14863
14864 if (bnxt_fw_pre_resv_vnics(bp))
14865 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
14866
14867 bnxt_hwrm_pfcwd_qcaps(bp);
14868 bnxt_hwrm_func_qcfg(bp);
14869 bnxt_hwrm_vnic_qcaps(bp);
14870 bnxt_hwrm_port_led_qcaps(bp);
14871 bnxt_ethtool_init(bp);
14872 if (bp->fw_cap & BNXT_FW_CAP_PTP)
14873 __bnxt_hwrm_ptp_qcfg(bp);
14874 bnxt_dcb_init(bp);
14875 bnxt_hwmon_init(bp);
14876 return 0;
14877 }
14878
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)14879 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
14880 {
14881 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
14882 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
14883 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
14884 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
14885 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
14886 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
14887 bp->rss_hash_delta = bp->rss_hash_cfg;
14888 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
14889 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
14890 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
14891 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
14892 }
14893 }
14894
bnxt_set_dflt_rfs(struct bnxt * bp)14895 static void bnxt_set_dflt_rfs(struct bnxt *bp)
14896 {
14897 struct net_device *dev = bp->dev;
14898
14899 dev->hw_features &= ~NETIF_F_NTUPLE;
14900 dev->features &= ~NETIF_F_NTUPLE;
14901 bp->flags &= ~BNXT_FLAG_RFS;
14902 if (bnxt_rfs_supported(bp)) {
14903 dev->hw_features |= NETIF_F_NTUPLE;
14904 if (bnxt_rfs_capable(bp, false)) {
14905 bp->flags |= BNXT_FLAG_RFS;
14906 dev->features |= NETIF_F_NTUPLE;
14907 }
14908 }
14909 }
14910
bnxt_fw_init_one_p3(struct bnxt * bp)14911 static void bnxt_fw_init_one_p3(struct bnxt *bp)
14912 {
14913 struct pci_dev *pdev = bp->pdev;
14914
14915 bnxt_set_dflt_rss_hash_type(bp);
14916 bnxt_set_dflt_rfs(bp);
14917
14918 bnxt_get_wol_settings(bp);
14919 if (bp->flags & BNXT_FLAG_WOL_CAP)
14920 device_set_wakeup_enable(&pdev->dev, bp->wol);
14921 else
14922 device_set_wakeup_capable(&pdev->dev, false);
14923
14924 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
14925 bnxt_hwrm_coal_params_qcaps(bp);
14926 }
14927
14928 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
14929
bnxt_fw_init_one(struct bnxt * bp)14930 int bnxt_fw_init_one(struct bnxt *bp)
14931 {
14932 int rc;
14933
14934 rc = bnxt_fw_init_one_p1(bp);
14935 if (rc) {
14936 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
14937 return rc;
14938 }
14939 rc = bnxt_fw_init_one_p2(bp);
14940 if (rc) {
14941 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
14942 return rc;
14943 }
14944 rc = bnxt_probe_phy(bp, false);
14945 if (rc)
14946 return rc;
14947 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
14948 if (rc)
14949 return rc;
14950
14951 bnxt_fw_init_one_p3(bp);
14952 return 0;
14953 }
14954
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)14955 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
14956 {
14957 struct bnxt_fw_health *fw_health = bp->fw_health;
14958 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
14959 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
14960 u32 reg_type, reg_off, delay_msecs;
14961
14962 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
14963 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
14964 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
14965 switch (reg_type) {
14966 case BNXT_FW_HEALTH_REG_TYPE_CFG:
14967 pci_write_config_dword(bp->pdev, reg_off, val);
14968 break;
14969 case BNXT_FW_HEALTH_REG_TYPE_GRC:
14970 writel(reg_off & BNXT_GRC_BASE_MASK,
14971 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
14972 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
14973 fallthrough;
14974 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
14975 writel(val, bp->bar0 + reg_off);
14976 break;
14977 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
14978 writel(val, bp->bar1 + reg_off);
14979 break;
14980 }
14981 if (delay_msecs) {
14982 pci_read_config_dword(bp->pdev, 0, &val);
14983 msleep(delay_msecs);
14984 }
14985 }
14986
bnxt_hwrm_reset_permitted(struct bnxt * bp)14987 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
14988 {
14989 struct hwrm_func_qcfg_output *resp;
14990 struct hwrm_func_qcfg_input *req;
14991 bool result = true; /* firmware will enforce if unknown */
14992
14993 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
14994 return result;
14995
14996 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
14997 return result;
14998
14999 req->fid = cpu_to_le16(0xffff);
15000 resp = hwrm_req_hold(bp, req);
15001 if (!hwrm_req_send(bp, req))
15002 result = !!(le16_to_cpu(resp->flags) &
15003 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
15004 hwrm_req_drop(bp, req);
15005 return result;
15006 }
15007
bnxt_reset_all(struct bnxt * bp)15008 static void bnxt_reset_all(struct bnxt *bp)
15009 {
15010 struct bnxt_fw_health *fw_health = bp->fw_health;
15011 int i, rc;
15012
15013 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15014 bnxt_fw_reset_via_optee(bp);
15015 bp->fw_reset_timestamp = jiffies;
15016 return;
15017 }
15018
15019 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
15020 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
15021 bnxt_fw_reset_writel(bp, i);
15022 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
15023 struct hwrm_fw_reset_input *req;
15024
15025 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
15026 if (!rc) {
15027 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
15028 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
15029 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
15030 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
15031 rc = hwrm_req_send(bp, req);
15032 }
15033 if (rc != -ENODEV)
15034 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
15035 }
15036 bp->fw_reset_timestamp = jiffies;
15037 }
15038
bnxt_fw_reset_timeout(struct bnxt * bp)15039 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
15040 {
15041 return time_after(jiffies, bp->fw_reset_timestamp +
15042 (bp->fw_reset_max_dsecs * HZ / 10));
15043 }
15044
bnxt_fw_reset_abort(struct bnxt * bp,int rc)15045 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
15046 {
15047 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15048 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
15049 bnxt_dl_health_fw_status_update(bp, false);
15050 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
15051 netif_close(bp->dev);
15052 }
15053
bnxt_fw_reset_task(struct work_struct * work)15054 static void bnxt_fw_reset_task(struct work_struct *work)
15055 {
15056 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
15057 int rc = 0;
15058
15059 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15060 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
15061 return;
15062 }
15063
15064 switch (bp->fw_reset_state) {
15065 case BNXT_FW_RESET_STATE_POLL_VF: {
15066 int n = bnxt_get_registered_vfs(bp);
15067 int tmo;
15068
15069 if (n < 0) {
15070 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
15071 n, jiffies_to_msecs(jiffies -
15072 bp->fw_reset_timestamp));
15073 goto fw_reset_abort;
15074 } else if (n > 0) {
15075 if (bnxt_fw_reset_timeout(bp)) {
15076 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15077 bp->fw_reset_state = 0;
15078 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
15079 n);
15080 goto ulp_start;
15081 }
15082 bnxt_queue_fw_reset_work(bp, HZ / 10);
15083 return;
15084 }
15085 bp->fw_reset_timestamp = jiffies;
15086 netdev_lock(bp->dev);
15087 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
15088 bnxt_fw_reset_abort(bp, rc);
15089 netdev_unlock(bp->dev);
15090 goto ulp_start;
15091 }
15092 bnxt_fw_reset_close(bp);
15093 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
15094 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
15095 tmo = HZ / 10;
15096 } else {
15097 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15098 tmo = bp->fw_reset_min_dsecs * HZ / 10;
15099 }
15100 netdev_unlock(bp->dev);
15101 bnxt_queue_fw_reset_work(bp, tmo);
15102 return;
15103 }
15104 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
15105 u32 val;
15106
15107 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15108 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
15109 !bnxt_fw_reset_timeout(bp)) {
15110 bnxt_queue_fw_reset_work(bp, HZ / 5);
15111 return;
15112 }
15113
15114 if (!bp->fw_health->primary) {
15115 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
15116
15117 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15118 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
15119 return;
15120 }
15121 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
15122 }
15123 fallthrough;
15124 case BNXT_FW_RESET_STATE_RESET_FW:
15125 bnxt_reset_all(bp);
15126 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
15127 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
15128 return;
15129 case BNXT_FW_RESET_STATE_ENABLE_DEV:
15130 bnxt_inv_fw_health_reg(bp);
15131 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
15132 !bp->fw_reset_min_dsecs) {
15133 u16 val;
15134
15135 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
15136 if (val == 0xffff) {
15137 if (bnxt_fw_reset_timeout(bp)) {
15138 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
15139 rc = -ETIMEDOUT;
15140 goto fw_reset_abort;
15141 }
15142 bnxt_queue_fw_reset_work(bp, HZ / 1000);
15143 return;
15144 }
15145 }
15146 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
15147 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
15148 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
15149 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
15150 bnxt_dl_remote_reload(bp);
15151 if (pci_enable_device(bp->pdev)) {
15152 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
15153 rc = -ENODEV;
15154 goto fw_reset_abort;
15155 }
15156 pci_set_master(bp->pdev);
15157 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
15158 fallthrough;
15159 case BNXT_FW_RESET_STATE_POLL_FW:
15160 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
15161 rc = bnxt_hwrm_poll(bp);
15162 if (rc) {
15163 if (bnxt_fw_reset_timeout(bp)) {
15164 netdev_err(bp->dev, "Firmware reset aborted\n");
15165 goto fw_reset_abort_status;
15166 }
15167 bnxt_queue_fw_reset_work(bp, HZ / 5);
15168 return;
15169 }
15170 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
15171 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
15172 fallthrough;
15173 case BNXT_FW_RESET_STATE_OPENING:
15174 while (!netdev_trylock(bp->dev)) {
15175 bnxt_queue_fw_reset_work(bp, HZ / 10);
15176 return;
15177 }
15178 rc = bnxt_open(bp->dev);
15179 if (rc) {
15180 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
15181 bnxt_fw_reset_abort(bp, rc);
15182 netdev_unlock(bp->dev);
15183 goto ulp_start;
15184 }
15185
15186 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
15187 bp->fw_health->enabled) {
15188 bp->fw_health->last_fw_reset_cnt =
15189 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
15190 }
15191 bp->fw_reset_state = 0;
15192 /* Make sure fw_reset_state is 0 before clearing the flag */
15193 smp_mb__before_atomic();
15194 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15195 bnxt_ptp_reapply_pps(bp);
15196 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
15197 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
15198 bnxt_dl_health_fw_recovery_done(bp);
15199 bnxt_dl_health_fw_status_update(bp, true);
15200 }
15201 netdev_unlock(bp->dev);
15202 bnxt_ulp_start(bp, 0);
15203 bnxt_reenable_sriov(bp);
15204 netdev_lock(bp->dev);
15205 bnxt_vf_reps_alloc(bp);
15206 bnxt_vf_reps_open(bp);
15207 netdev_unlock(bp->dev);
15208 break;
15209 }
15210 return;
15211
15212 fw_reset_abort_status:
15213 if (bp->fw_health->status_reliable ||
15214 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
15215 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
15216
15217 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
15218 }
15219 fw_reset_abort:
15220 netdev_lock(bp->dev);
15221 bnxt_fw_reset_abort(bp, rc);
15222 netdev_unlock(bp->dev);
15223 ulp_start:
15224 bnxt_ulp_start(bp, rc);
15225 }
15226
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)15227 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
15228 {
15229 int rc;
15230 struct bnxt *bp = netdev_priv(dev);
15231
15232 SET_NETDEV_DEV(dev, &pdev->dev);
15233
15234 /* enable device (incl. PCI PM wakeup), and bus-mastering */
15235 rc = pci_enable_device(pdev);
15236 if (rc) {
15237 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15238 goto init_err;
15239 }
15240
15241 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
15242 dev_err(&pdev->dev,
15243 "Cannot find PCI device base address, aborting\n");
15244 rc = -ENODEV;
15245 goto init_err_disable;
15246 }
15247
15248 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
15249 if (rc) {
15250 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15251 goto init_err_disable;
15252 }
15253
15254 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
15255 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
15256 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
15257 rc = -EIO;
15258 goto init_err_release;
15259 }
15260
15261 pci_set_master(pdev);
15262
15263 bp->dev = dev;
15264 bp->pdev = pdev;
15265
15266 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
15267 * determines the BAR size.
15268 */
15269 bp->bar0 = pci_ioremap_bar(pdev, 0);
15270 if (!bp->bar0) {
15271 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15272 rc = -ENOMEM;
15273 goto init_err_release;
15274 }
15275
15276 bp->bar2 = pci_ioremap_bar(pdev, 4);
15277 if (!bp->bar2) {
15278 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
15279 rc = -ENOMEM;
15280 goto init_err_release;
15281 }
15282
15283 INIT_WORK(&bp->sp_task, bnxt_sp_task);
15284 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
15285
15286 spin_lock_init(&bp->ntp_fltr_lock);
15287 #if BITS_PER_LONG == 32
15288 spin_lock_init(&bp->db_lock);
15289 #endif
15290
15291 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
15292 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
15293
15294 timer_setup(&bp->timer, bnxt_timer, 0);
15295 bp->current_interval = BNXT_TIMER_INTERVAL;
15296
15297 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
15298 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
15299
15300 clear_bit(BNXT_STATE_OPEN, &bp->state);
15301 return 0;
15302
15303 init_err_release:
15304 bnxt_unmap_bars(bp, pdev);
15305 pci_release_regions(pdev);
15306
15307 init_err_disable:
15308 pci_disable_device(pdev);
15309
15310 init_err:
15311 return rc;
15312 }
15313
bnxt_change_mac_addr(struct net_device * dev,void * p)15314 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
15315 {
15316 struct sockaddr *addr = p;
15317 struct bnxt *bp = netdev_priv(dev);
15318 int rc = 0;
15319
15320 netdev_assert_locked(dev);
15321
15322 if (!is_valid_ether_addr(addr->sa_data))
15323 return -EADDRNOTAVAIL;
15324
15325 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
15326 return 0;
15327
15328 rc = bnxt_approve_mac(bp, addr->sa_data, true);
15329 if (rc)
15330 return rc;
15331
15332 eth_hw_addr_set(dev, addr->sa_data);
15333 bnxt_clear_usr_fltrs(bp, true);
15334 if (netif_running(dev)) {
15335 bnxt_close_nic(bp, false, false);
15336 rc = bnxt_open_nic(bp, false, false);
15337 }
15338
15339 return rc;
15340 }
15341
bnxt_change_mtu(struct net_device * dev,int new_mtu)15342 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
15343 {
15344 struct bnxt *bp = netdev_priv(dev);
15345
15346 netdev_assert_locked(dev);
15347
15348 if (netif_running(dev))
15349 bnxt_close_nic(bp, true, false);
15350
15351 WRITE_ONCE(dev->mtu, new_mtu);
15352
15353 /* MTU change may change the AGG ring settings if an XDP multi-buffer
15354 * program is attached. We need to set the AGG rings settings and
15355 * rx_skb_func accordingly.
15356 */
15357 if (READ_ONCE(bp->xdp_prog))
15358 bnxt_set_rx_skb_mode(bp, true);
15359
15360 bnxt_set_ring_params(bp);
15361
15362 if (netif_running(dev))
15363 return bnxt_open_nic(bp, true, false);
15364
15365 return 0;
15366 }
15367
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)15368 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
15369 {
15370 struct bnxt *bp = netdev_priv(dev);
15371 bool sh = false;
15372 int rc, tx_cp;
15373
15374 if (tc > bp->max_tc) {
15375 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
15376 tc, bp->max_tc);
15377 return -EINVAL;
15378 }
15379
15380 if (bp->num_tc == tc)
15381 return 0;
15382
15383 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15384 sh = true;
15385
15386 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
15387 sh, tc, bp->tx_nr_rings_xdp);
15388 if (rc)
15389 return rc;
15390
15391 /* Needs to close the device and do hw resource re-allocations */
15392 if (netif_running(bp->dev))
15393 bnxt_close_nic(bp, true, false);
15394
15395 if (tc) {
15396 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
15397 netdev_set_num_tc(dev, tc);
15398 bp->num_tc = tc;
15399 } else {
15400 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
15401 netdev_reset_tc(dev);
15402 bp->num_tc = 0;
15403 }
15404 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
15405 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
15406 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
15407 tx_cp + bp->rx_nr_rings;
15408
15409 if (netif_running(bp->dev))
15410 return bnxt_open_nic(bp, true, false);
15411
15412 return 0;
15413 }
15414
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)15415 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
15416 void *cb_priv)
15417 {
15418 struct bnxt *bp = cb_priv;
15419
15420 if (!bnxt_tc_flower_enabled(bp) ||
15421 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
15422 return -EOPNOTSUPP;
15423
15424 switch (type) {
15425 case TC_SETUP_CLSFLOWER:
15426 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
15427 default:
15428 return -EOPNOTSUPP;
15429 }
15430 }
15431
15432 LIST_HEAD(bnxt_block_cb_list);
15433
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)15434 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
15435 void *type_data)
15436 {
15437 struct bnxt *bp = netdev_priv(dev);
15438
15439 switch (type) {
15440 case TC_SETUP_BLOCK:
15441 return flow_block_cb_setup_simple(type_data,
15442 &bnxt_block_cb_list,
15443 bnxt_setup_tc_block_cb,
15444 bp, bp, true);
15445 case TC_SETUP_QDISC_MQPRIO: {
15446 struct tc_mqprio_qopt *mqprio = type_data;
15447
15448 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
15449
15450 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
15451 }
15452 default:
15453 return -EOPNOTSUPP;
15454 }
15455 }
15456
bnxt_get_ntp_filter_idx(struct bnxt * bp,struct flow_keys * fkeys,const struct sk_buff * skb)15457 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
15458 const struct sk_buff *skb)
15459 {
15460 struct bnxt_vnic_info *vnic;
15461
15462 if (skb)
15463 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
15464
15465 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
15466 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
15467 }
15468
bnxt_insert_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15469 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
15470 u32 idx)
15471 {
15472 struct hlist_head *head;
15473 int bit_id;
15474
15475 spin_lock_bh(&bp->ntp_fltr_lock);
15476 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
15477 if (bit_id < 0) {
15478 spin_unlock_bh(&bp->ntp_fltr_lock);
15479 return -ENOMEM;
15480 }
15481
15482 fltr->base.sw_id = (u16)bit_id;
15483 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
15484 fltr->base.flags |= BNXT_ACT_RING_DST;
15485 head = &bp->ntp_fltr_hash_tbl[idx];
15486 hlist_add_head_rcu(&fltr->base.hash, head);
15487 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
15488 bnxt_insert_usr_fltr(bp, &fltr->base);
15489 bp->ntp_fltr_count++;
15490 spin_unlock_bh(&bp->ntp_fltr_lock);
15491 return 0;
15492 }
15493
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)15494 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
15495 struct bnxt_ntuple_filter *f2)
15496 {
15497 struct bnxt_flow_masks *masks1 = &f1->fmasks;
15498 struct bnxt_flow_masks *masks2 = &f2->fmasks;
15499 struct flow_keys *keys1 = &f1->fkeys;
15500 struct flow_keys *keys2 = &f2->fkeys;
15501
15502 if (keys1->basic.n_proto != keys2->basic.n_proto ||
15503 keys1->basic.ip_proto != keys2->basic.ip_proto)
15504 return false;
15505
15506 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
15507 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
15508 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
15509 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
15510 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
15511 return false;
15512 } else {
15513 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
15514 &keys2->addrs.v6addrs.src) ||
15515 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
15516 &masks2->addrs.v6addrs.src) ||
15517 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
15518 &keys2->addrs.v6addrs.dst) ||
15519 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
15520 &masks2->addrs.v6addrs.dst))
15521 return false;
15522 }
15523
15524 return keys1->ports.src == keys2->ports.src &&
15525 masks1->ports.src == masks2->ports.src &&
15526 keys1->ports.dst == keys2->ports.dst &&
15527 masks1->ports.dst == masks2->ports.dst &&
15528 keys1->control.flags == keys2->control.flags &&
15529 f1->l2_fltr == f2->l2_fltr;
15530 }
15531
15532 struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt * bp,struct bnxt_ntuple_filter * fltr,u32 idx)15533 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
15534 struct bnxt_ntuple_filter *fltr, u32 idx)
15535 {
15536 struct bnxt_ntuple_filter *f;
15537 struct hlist_head *head;
15538
15539 head = &bp->ntp_fltr_hash_tbl[idx];
15540 hlist_for_each_entry_rcu(f, head, base.hash) {
15541 if (bnxt_fltr_match(f, fltr))
15542 return f;
15543 }
15544 return NULL;
15545 }
15546
15547 #ifdef CONFIG_RFS_ACCEL
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)15548 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
15549 u16 rxq_index, u32 flow_id)
15550 {
15551 struct bnxt *bp = netdev_priv(dev);
15552 struct bnxt_ntuple_filter *fltr, *new_fltr;
15553 struct flow_keys *fkeys;
15554 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
15555 struct bnxt_l2_filter *l2_fltr;
15556 int rc = 0, idx;
15557 u32 flags;
15558
15559 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
15560 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
15561 atomic_inc(&l2_fltr->refcnt);
15562 } else {
15563 struct bnxt_l2_key key;
15564
15565 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
15566 key.vlan = 0;
15567 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
15568 if (!l2_fltr)
15569 return -EINVAL;
15570 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
15571 bnxt_del_l2_filter(bp, l2_fltr);
15572 return -EINVAL;
15573 }
15574 }
15575 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
15576 if (!new_fltr) {
15577 bnxt_del_l2_filter(bp, l2_fltr);
15578 return -ENOMEM;
15579 }
15580
15581 fkeys = &new_fltr->fkeys;
15582 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
15583 rc = -EPROTONOSUPPORT;
15584 goto err_free;
15585 }
15586
15587 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
15588 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
15589 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
15590 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
15591 rc = -EPROTONOSUPPORT;
15592 goto err_free;
15593 }
15594 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
15595 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
15596 if (bp->hwrm_spec_code < 0x10601) {
15597 rc = -EPROTONOSUPPORT;
15598 goto err_free;
15599 }
15600 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
15601 }
15602 flags = fkeys->control.flags;
15603 if (((flags & FLOW_DIS_ENCAPSULATION) &&
15604 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
15605 rc = -EPROTONOSUPPORT;
15606 goto err_free;
15607 }
15608 new_fltr->l2_fltr = l2_fltr;
15609
15610 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
15611 rcu_read_lock();
15612 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
15613 if (fltr) {
15614 rc = fltr->base.sw_id;
15615 rcu_read_unlock();
15616 goto err_free;
15617 }
15618 rcu_read_unlock();
15619
15620 new_fltr->flow_id = flow_id;
15621 new_fltr->base.rxq = rxq_index;
15622 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
15623 if (!rc) {
15624 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
15625 return new_fltr->base.sw_id;
15626 }
15627
15628 err_free:
15629 bnxt_del_l2_filter(bp, l2_fltr);
15630 kfree(new_fltr);
15631 return rc;
15632 }
15633 #endif
15634
bnxt_del_ntp_filter(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)15635 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
15636 {
15637 spin_lock_bh(&bp->ntp_fltr_lock);
15638 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
15639 spin_unlock_bh(&bp->ntp_fltr_lock);
15640 return;
15641 }
15642 hlist_del_rcu(&fltr->base.hash);
15643 bnxt_del_one_usr_fltr(bp, &fltr->base);
15644 bp->ntp_fltr_count--;
15645 spin_unlock_bh(&bp->ntp_fltr_lock);
15646 bnxt_del_l2_filter(bp, fltr->l2_fltr);
15647 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
15648 kfree_rcu(fltr, base.rcu);
15649 }
15650
bnxt_cfg_ntp_filters(struct bnxt * bp)15651 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
15652 {
15653 #ifdef CONFIG_RFS_ACCEL
15654 int i;
15655
15656 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
15657 struct hlist_head *head;
15658 struct hlist_node *tmp;
15659 struct bnxt_ntuple_filter *fltr;
15660 int rc;
15661
15662 head = &bp->ntp_fltr_hash_tbl[i];
15663 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
15664 bool del = false;
15665
15666 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
15667 if (fltr->base.flags & BNXT_ACT_NO_AGING)
15668 continue;
15669 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
15670 fltr->flow_id,
15671 fltr->base.sw_id)) {
15672 bnxt_hwrm_cfa_ntuple_filter_free(bp,
15673 fltr);
15674 del = true;
15675 }
15676 } else {
15677 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
15678 fltr);
15679 if (rc)
15680 del = true;
15681 else
15682 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
15683 }
15684
15685 if (del)
15686 bnxt_del_ntp_filter(bp, fltr);
15687 }
15688 }
15689 #endif
15690 }
15691
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15692 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
15693 unsigned int entry, struct udp_tunnel_info *ti)
15694 {
15695 struct bnxt *bp = netdev_priv(netdev);
15696 unsigned int cmd;
15697
15698 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15699 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
15700 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15701 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
15702 else
15703 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
15704
15705 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
15706 }
15707
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)15708 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
15709 unsigned int entry, struct udp_tunnel_info *ti)
15710 {
15711 struct bnxt *bp = netdev_priv(netdev);
15712 unsigned int cmd;
15713
15714 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
15715 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
15716 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
15717 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
15718 else
15719 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
15720
15721 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
15722 }
15723
15724 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
15725 .set_port = bnxt_udp_tunnel_set_port,
15726 .unset_port = bnxt_udp_tunnel_unset_port,
15727 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15728 .tables = {
15729 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15730 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15731 },
15732 }, bnxt_udp_tunnels_p7 = {
15733 .set_port = bnxt_udp_tunnel_set_port,
15734 .unset_port = bnxt_udp_tunnel_unset_port,
15735 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
15736 .tables = {
15737 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
15738 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
15739 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
15740 },
15741 };
15742
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)15743 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15744 struct net_device *dev, u32 filter_mask,
15745 int nlflags)
15746 {
15747 struct bnxt *bp = netdev_priv(dev);
15748
15749 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
15750 nlflags, filter_mask, NULL);
15751 }
15752
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)15753 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
15754 u16 flags, struct netlink_ext_ack *extack)
15755 {
15756 struct bnxt *bp = netdev_priv(dev);
15757 struct nlattr *attr, *br_spec;
15758 int rem, rc = 0;
15759
15760 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
15761 return -EOPNOTSUPP;
15762
15763 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15764 if (!br_spec)
15765 return -EINVAL;
15766
15767 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
15768 u16 mode;
15769
15770 mode = nla_get_u16(attr);
15771 if (mode == bp->br_mode)
15772 break;
15773
15774 rc = bnxt_hwrm_set_br_mode(bp, mode);
15775 if (!rc)
15776 bp->br_mode = mode;
15777 break;
15778 }
15779 return rc;
15780 }
15781
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)15782 int bnxt_get_port_parent_id(struct net_device *dev,
15783 struct netdev_phys_item_id *ppid)
15784 {
15785 struct bnxt *bp = netdev_priv(dev);
15786
15787 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
15788 return -EOPNOTSUPP;
15789
15790 /* The PF and it's VF-reps only support the switchdev framework */
15791 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
15792 return -EOPNOTSUPP;
15793
15794 ppid->id_len = sizeof(bp->dsn);
15795 memcpy(ppid->id, bp->dsn, ppid->id_len);
15796
15797 return 0;
15798 }
15799
15800 static const struct net_device_ops bnxt_netdev_ops = {
15801 .ndo_open = bnxt_open,
15802 .ndo_start_xmit = bnxt_start_xmit,
15803 .ndo_stop = bnxt_close,
15804 .ndo_get_stats64 = bnxt_get_stats64,
15805 .ndo_set_rx_mode = bnxt_set_rx_mode,
15806 .ndo_eth_ioctl = bnxt_ioctl,
15807 .ndo_validate_addr = eth_validate_addr,
15808 .ndo_set_mac_address = bnxt_change_mac_addr,
15809 .ndo_change_mtu = bnxt_change_mtu,
15810 .ndo_fix_features = bnxt_fix_features,
15811 .ndo_set_features = bnxt_set_features,
15812 .ndo_features_check = bnxt_features_check,
15813 .ndo_tx_timeout = bnxt_tx_timeout,
15814 #ifdef CONFIG_BNXT_SRIOV
15815 .ndo_get_vf_config = bnxt_get_vf_config,
15816 .ndo_set_vf_mac = bnxt_set_vf_mac,
15817 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
15818 .ndo_set_vf_rate = bnxt_set_vf_bw,
15819 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
15820 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
15821 .ndo_set_vf_trust = bnxt_set_vf_trust,
15822 #endif
15823 .ndo_setup_tc = bnxt_setup_tc,
15824 #ifdef CONFIG_RFS_ACCEL
15825 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
15826 #endif
15827 .ndo_bpf = bnxt_xdp,
15828 .ndo_xdp_xmit = bnxt_xdp_xmit,
15829 .ndo_bridge_getlink = bnxt_bridge_getlink,
15830 .ndo_bridge_setlink = bnxt_bridge_setlink,
15831 .ndo_hwtstamp_get = bnxt_hwtstamp_get,
15832 .ndo_hwtstamp_set = bnxt_hwtstamp_set,
15833 };
15834
bnxt_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)15835 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
15836 struct netdev_queue_stats_rx *stats)
15837 {
15838 struct bnxt *bp = netdev_priv(dev);
15839 struct bnxt_cp_ring_info *cpr;
15840 u64 *sw;
15841
15842 if (!bp->bnapi)
15843 return;
15844
15845 cpr = &bp->bnapi[i]->cp_ring;
15846 sw = cpr->stats.sw_stats;
15847
15848 stats->packets = 0;
15849 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
15850 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
15851 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
15852
15853 stats->bytes = 0;
15854 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
15855 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
15856 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
15857
15858 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
15859 }
15860
bnxt_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)15861 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
15862 struct netdev_queue_stats_tx *stats)
15863 {
15864 struct bnxt *bp = netdev_priv(dev);
15865 struct bnxt_napi *bnapi;
15866 u64 *sw;
15867
15868 if (!bp->tx_ring)
15869 return;
15870
15871 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
15872 sw = bnapi->cp_ring.stats.sw_stats;
15873
15874 stats->packets = 0;
15875 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
15876 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
15877 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
15878
15879 stats->bytes = 0;
15880 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
15881 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
15882 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
15883 }
15884
bnxt_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)15885 static void bnxt_get_base_stats(struct net_device *dev,
15886 struct netdev_queue_stats_rx *rx,
15887 struct netdev_queue_stats_tx *tx)
15888 {
15889 struct bnxt *bp = netdev_priv(dev);
15890
15891 rx->packets = bp->net_stats_prev.rx_packets;
15892 rx->bytes = bp->net_stats_prev.rx_bytes;
15893 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
15894
15895 tx->packets = bp->net_stats_prev.tx_packets;
15896 tx->bytes = bp->net_stats_prev.tx_bytes;
15897 }
15898
15899 static const struct netdev_stat_ops bnxt_stat_ops = {
15900 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
15901 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
15902 .get_base_stats = bnxt_get_base_stats,
15903 };
15904
bnxt_queue_mem_alloc(struct net_device * dev,void * qmem,int idx)15905 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15906 {
15907 struct bnxt_rx_ring_info *rxr, *clone;
15908 struct bnxt *bp = netdev_priv(dev);
15909 struct bnxt_ring_struct *ring;
15910 int rc;
15911
15912 if (!bp->rx_ring)
15913 return -ENETDOWN;
15914
15915 rxr = &bp->rx_ring[idx];
15916 clone = qmem;
15917 memcpy(clone, rxr, sizeof(*rxr));
15918 bnxt_init_rx_ring_struct(bp, clone);
15919 bnxt_reset_rx_ring_struct(bp, clone);
15920
15921 clone->rx_prod = 0;
15922 clone->rx_agg_prod = 0;
15923 clone->rx_sw_agg_prod = 0;
15924 clone->rx_next_cons = 0;
15925 clone->need_head_pool = false;
15926
15927 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15928 if (rc)
15929 return rc;
15930
15931 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
15932 if (rc < 0)
15933 goto err_page_pool_destroy;
15934
15935 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
15936 MEM_TYPE_PAGE_POOL,
15937 clone->page_pool);
15938 if (rc)
15939 goto err_rxq_info_unreg;
15940
15941 ring = &clone->rx_ring_struct;
15942 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15943 if (rc)
15944 goto err_free_rx_ring;
15945
15946 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15947 ring = &clone->rx_agg_ring_struct;
15948 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15949 if (rc)
15950 goto err_free_rx_agg_ring;
15951
15952 rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15953 if (rc)
15954 goto err_free_rx_agg_ring;
15955 }
15956
15957 if (bp->flags & BNXT_FLAG_TPA) {
15958 rc = bnxt_alloc_one_tpa_info(bp, clone);
15959 if (rc)
15960 goto err_free_tpa_info;
15961 }
15962
15963 bnxt_init_one_rx_ring_rxbd(bp, clone);
15964 bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15965
15966 bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15967 if (bp->flags & BNXT_FLAG_AGG_RINGS)
15968 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
15969 if (bp->flags & BNXT_FLAG_TPA)
15970 bnxt_alloc_one_tpa_info_data(bp, clone);
15971
15972 return 0;
15973
15974 err_free_tpa_info:
15975 bnxt_free_one_tpa_info(bp, clone);
15976 err_free_rx_agg_ring:
15977 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15978 err_free_rx_ring:
15979 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15980 err_rxq_info_unreg:
15981 xdp_rxq_info_unreg(&clone->xdp_rxq);
15982 err_page_pool_destroy:
15983 page_pool_destroy(clone->page_pool);
15984 page_pool_destroy(clone->head_pool);
15985 clone->page_pool = NULL;
15986 clone->head_pool = NULL;
15987 return rc;
15988 }
15989
bnxt_queue_mem_free(struct net_device * dev,void * qmem)15990 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15991 {
15992 struct bnxt_rx_ring_info *rxr = qmem;
15993 struct bnxt *bp = netdev_priv(dev);
15994 struct bnxt_ring_struct *ring;
15995
15996 bnxt_free_one_rx_ring_skbs(bp, rxr);
15997 bnxt_free_one_tpa_info(bp, rxr);
15998
15999 xdp_rxq_info_unreg(&rxr->xdp_rxq);
16000
16001 page_pool_destroy(rxr->page_pool);
16002 page_pool_destroy(rxr->head_pool);
16003 rxr->page_pool = NULL;
16004 rxr->head_pool = NULL;
16005
16006 ring = &rxr->rx_ring_struct;
16007 bnxt_free_ring(bp, &ring->ring_mem);
16008
16009 ring = &rxr->rx_agg_ring_struct;
16010 bnxt_free_ring(bp, &ring->ring_mem);
16011
16012 kfree(rxr->rx_agg_bmap);
16013 rxr->rx_agg_bmap = NULL;
16014 }
16015
bnxt_copy_rx_ring(struct bnxt * bp,struct bnxt_rx_ring_info * dst,struct bnxt_rx_ring_info * src)16016 static void bnxt_copy_rx_ring(struct bnxt *bp,
16017 struct bnxt_rx_ring_info *dst,
16018 struct bnxt_rx_ring_info *src)
16019 {
16020 struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
16021 struct bnxt_ring_struct *dst_ring, *src_ring;
16022 int i;
16023
16024 dst_ring = &dst->rx_ring_struct;
16025 dst_rmem = &dst_ring->ring_mem;
16026 src_ring = &src->rx_ring_struct;
16027 src_rmem = &src_ring->ring_mem;
16028
16029 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16030 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16031 WARN_ON(dst_rmem->flags != src_rmem->flags);
16032 WARN_ON(dst_rmem->depth != src_rmem->depth);
16033 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16034 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16035
16036 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16037 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16038 *dst_rmem->vmem = *src_rmem->vmem;
16039 for (i = 0; i < dst_rmem->nr_pages; i++) {
16040 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16041 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16042 }
16043
16044 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
16045 return;
16046
16047 dst_ring = &dst->rx_agg_ring_struct;
16048 dst_rmem = &dst_ring->ring_mem;
16049 src_ring = &src->rx_agg_ring_struct;
16050 src_rmem = &src_ring->ring_mem;
16051
16052 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
16053 WARN_ON(dst_rmem->page_size != src_rmem->page_size);
16054 WARN_ON(dst_rmem->flags != src_rmem->flags);
16055 WARN_ON(dst_rmem->depth != src_rmem->depth);
16056 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
16057 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
16058 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
16059
16060 dst_rmem->pg_tbl = src_rmem->pg_tbl;
16061 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
16062 *dst_rmem->vmem = *src_rmem->vmem;
16063 for (i = 0; i < dst_rmem->nr_pages; i++) {
16064 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
16065 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
16066 }
16067
16068 dst->rx_agg_bmap = src->rx_agg_bmap;
16069 }
16070
bnxt_queue_start(struct net_device * dev,void * qmem,int idx)16071 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
16072 {
16073 struct bnxt *bp = netdev_priv(dev);
16074 struct bnxt_rx_ring_info *rxr, *clone;
16075 struct bnxt_cp_ring_info *cpr;
16076 struct bnxt_vnic_info *vnic;
16077 struct bnxt_napi *bnapi;
16078 int i, rc;
16079 u16 mru;
16080
16081 rxr = &bp->rx_ring[idx];
16082 clone = qmem;
16083
16084 rxr->rx_prod = clone->rx_prod;
16085 rxr->rx_agg_prod = clone->rx_agg_prod;
16086 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
16087 rxr->rx_next_cons = clone->rx_next_cons;
16088 rxr->rx_tpa = clone->rx_tpa;
16089 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
16090 rxr->page_pool = clone->page_pool;
16091 rxr->head_pool = clone->head_pool;
16092 rxr->xdp_rxq = clone->xdp_rxq;
16093 rxr->need_head_pool = clone->need_head_pool;
16094
16095 bnxt_copy_rx_ring(bp, rxr, clone);
16096
16097 bnapi = rxr->bnapi;
16098 cpr = &bnapi->cp_ring;
16099
16100 /* All rings have been reserved and previously allocated.
16101 * Reallocating with the same parameters should never fail.
16102 */
16103 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
16104 if (rc)
16105 goto err_reset;
16106
16107 if (bp->tph_mode) {
16108 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
16109 if (rc)
16110 goto err_reset;
16111 }
16112
16113 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
16114 if (rc)
16115 goto err_reset;
16116
16117 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
16118 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16119 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
16120
16121 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
16122 rc = bnxt_tx_queue_start(bp, idx);
16123 if (rc)
16124 goto err_reset;
16125 }
16126
16127 bnxt_enable_rx_page_pool(rxr);
16128 napi_enable_locked(&bnapi->napi);
16129 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16130
16131 mru = bp->dev->mtu + VLAN_ETH_HLEN;
16132 for (i = 0; i < bp->nr_vnics; i++) {
16133 vnic = &bp->vnic_info[i];
16134
16135 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
16136 if (rc)
16137 return rc;
16138 }
16139 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
16140
16141 err_reset:
16142 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
16143 rc);
16144 napi_enable_locked(&bnapi->napi);
16145 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
16146 bnxt_reset_task(bp, true);
16147 return rc;
16148 }
16149
bnxt_queue_stop(struct net_device * dev,void * qmem,int idx)16150 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
16151 {
16152 struct bnxt *bp = netdev_priv(dev);
16153 struct bnxt_rx_ring_info *rxr;
16154 struct bnxt_cp_ring_info *cpr;
16155 struct bnxt_vnic_info *vnic;
16156 struct bnxt_napi *bnapi;
16157 int i;
16158
16159 for (i = 0; i < bp->nr_vnics; i++) {
16160 vnic = &bp->vnic_info[i];
16161
16162 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
16163 }
16164 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
16165 /* Make sure NAPI sees that the VNIC is disabled */
16166 synchronize_net();
16167 rxr = &bp->rx_ring[idx];
16168 bnapi = rxr->bnapi;
16169 cpr = &bnapi->cp_ring;
16170 cancel_work_sync(&cpr->dim.work);
16171 bnxt_hwrm_rx_ring_free(bp, rxr, false);
16172 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
16173 page_pool_disable_direct_recycling(rxr->page_pool);
16174 if (bnxt_separate_head_pool(rxr))
16175 page_pool_disable_direct_recycling(rxr->head_pool);
16176
16177 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
16178 bnxt_tx_queue_stop(bp, idx);
16179
16180 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
16181 * completion is handled in NAPI to guarantee no more DMA on that ring
16182 * after seeing the completion.
16183 */
16184 napi_disable_locked(&bnapi->napi);
16185
16186 if (bp->tph_mode) {
16187 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
16188 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
16189 }
16190 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
16191
16192 memcpy(qmem, rxr, sizeof(*rxr));
16193 bnxt_init_rx_ring_struct(bp, qmem);
16194
16195 return 0;
16196 }
16197
16198 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
16199 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
16200 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
16201 .ndo_queue_mem_free = bnxt_queue_mem_free,
16202 .ndo_queue_start = bnxt_queue_start,
16203 .ndo_queue_stop = bnxt_queue_stop,
16204 };
16205
bnxt_remove_one(struct pci_dev * pdev)16206 static void bnxt_remove_one(struct pci_dev *pdev)
16207 {
16208 struct net_device *dev = pci_get_drvdata(pdev);
16209 struct bnxt *bp = netdev_priv(dev);
16210
16211 if (BNXT_PF(bp))
16212 __bnxt_sriov_disable(bp);
16213
16214 bnxt_rdma_aux_device_del(bp);
16215
16216 unregister_netdev(dev);
16217 bnxt_ptp_clear(bp);
16218
16219 bnxt_rdma_aux_device_uninit(bp);
16220
16221 bnxt_free_l2_filters(bp, true);
16222 bnxt_free_ntp_fltrs(bp, true);
16223 WARN_ON(bp->num_rss_ctx);
16224 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
16225 /* Flush any pending tasks */
16226 cancel_work_sync(&bp->sp_task);
16227 cancel_delayed_work_sync(&bp->fw_reset_task);
16228 bp->sp_event = 0;
16229
16230 bnxt_dl_fw_reporters_destroy(bp);
16231 bnxt_dl_unregister(bp);
16232 bnxt_shutdown_tc(bp);
16233
16234 bnxt_clear_int_mode(bp);
16235 bnxt_hwrm_func_drv_unrgtr(bp);
16236 bnxt_free_hwrm_resources(bp);
16237 bnxt_hwmon_uninit(bp);
16238 bnxt_ethtool_free(bp);
16239 bnxt_dcb_free(bp);
16240 kfree(bp->ptp_cfg);
16241 bp->ptp_cfg = NULL;
16242 kfree(bp->fw_health);
16243 bp->fw_health = NULL;
16244 bnxt_cleanup_pci(bp);
16245 bnxt_free_ctx_mem(bp, true);
16246 bnxt_free_crash_dump_mem(bp);
16247 kfree(bp->rss_indir_tbl);
16248 bp->rss_indir_tbl = NULL;
16249 bnxt_free_port_stats(bp);
16250 free_netdev(dev);
16251 }
16252
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)16253 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
16254 {
16255 int rc = 0;
16256 struct bnxt_link_info *link_info = &bp->link_info;
16257
16258 bp->phy_flags = 0;
16259 rc = bnxt_hwrm_phy_qcaps(bp);
16260 if (rc) {
16261 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
16262 rc);
16263 return rc;
16264 }
16265 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
16266 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
16267 else
16268 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
16269
16270 bp->mac_flags = 0;
16271 bnxt_hwrm_mac_qcaps(bp);
16272
16273 if (!fw_dflt)
16274 return 0;
16275
16276 mutex_lock(&bp->link_lock);
16277 rc = bnxt_update_link(bp, false);
16278 if (rc) {
16279 mutex_unlock(&bp->link_lock);
16280 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
16281 rc);
16282 return rc;
16283 }
16284
16285 /* Older firmware does not have supported_auto_speeds, so assume
16286 * that all supported speeds can be autonegotiated.
16287 */
16288 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
16289 link_info->support_auto_speeds = link_info->support_speeds;
16290
16291 bnxt_init_ethtool_link_settings(bp);
16292 mutex_unlock(&bp->link_lock);
16293 return 0;
16294 }
16295
bnxt_get_max_irq(struct pci_dev * pdev)16296 static int bnxt_get_max_irq(struct pci_dev *pdev)
16297 {
16298 u16 ctrl;
16299
16300 if (!pdev->msix_cap)
16301 return 1;
16302
16303 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
16304 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
16305 }
16306
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)16307 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16308 int *max_cp)
16309 {
16310 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
16311 int max_ring_grps = 0, max_irq;
16312
16313 *max_tx = hw_resc->max_tx_rings;
16314 *max_rx = hw_resc->max_rx_rings;
16315 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
16316 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
16317 bnxt_get_ulp_msix_num_in_use(bp),
16318 hw_resc->max_stat_ctxs -
16319 bnxt_get_ulp_stat_ctxs_in_use(bp));
16320 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
16321 *max_cp = min_t(int, *max_cp, max_irq);
16322 max_ring_grps = hw_resc->max_hw_ring_grps;
16323 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
16324 *max_cp -= 1;
16325 *max_rx -= 2;
16326 }
16327 if (bp->flags & BNXT_FLAG_AGG_RINGS)
16328 *max_rx >>= 1;
16329 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
16330 int rc;
16331
16332 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
16333 if (rc) {
16334 *max_rx = 0;
16335 *max_tx = 0;
16336 }
16337 /* On P5 chips, max_cp output param should be available NQs */
16338 *max_cp = max_irq;
16339 }
16340 *max_rx = min_t(int, *max_rx, max_ring_grps);
16341 }
16342
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16343 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
16344 {
16345 int rx, tx, cp;
16346
16347 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
16348 *max_rx = rx;
16349 *max_tx = tx;
16350 if (!rx || !tx || !cp)
16351 return -ENOMEM;
16352
16353 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
16354 }
16355
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)16356 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
16357 bool shared)
16358 {
16359 int rc;
16360
16361 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16362 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
16363 /* Not enough rings, try disabling agg rings. */
16364 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
16365 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
16366 if (rc) {
16367 /* set BNXT_FLAG_AGG_RINGS back for consistency */
16368 bp->flags |= BNXT_FLAG_AGG_RINGS;
16369 return rc;
16370 }
16371 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
16372 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16373 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
16374 bnxt_set_ring_params(bp);
16375 }
16376
16377 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
16378 int max_cp, max_stat, max_irq;
16379
16380 /* Reserve minimum resources for RoCE */
16381 max_cp = bnxt_get_max_func_cp_rings(bp);
16382 max_stat = bnxt_get_max_func_stat_ctxs(bp);
16383 max_irq = bnxt_get_max_func_irqs(bp);
16384 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
16385 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
16386 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
16387 return 0;
16388
16389 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
16390 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
16391 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
16392 max_cp = min_t(int, max_cp, max_irq);
16393 max_cp = min_t(int, max_cp, max_stat);
16394 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
16395 if (rc)
16396 rc = 0;
16397 }
16398 return rc;
16399 }
16400
16401 /* In initial default shared ring setting, each shared ring must have a
16402 * RX/TX ring pair.
16403 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)16404 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
16405 {
16406 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
16407 bp->rx_nr_rings = bp->cp_nr_rings;
16408 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
16409 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16410 }
16411
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)16412 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
16413 {
16414 int dflt_rings, max_rx_rings, max_tx_rings, rc;
16415 int avail_msix;
16416
16417 if (!bnxt_can_reserve_rings(bp))
16418 return 0;
16419
16420 if (sh)
16421 bp->flags |= BNXT_FLAG_SHARED_RINGS;
16422 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
16423 /* Reduce default rings on multi-port cards so that total default
16424 * rings do not exceed CPU count.
16425 */
16426 if (bp->port_count > 1) {
16427 int max_rings =
16428 max_t(int, num_online_cpus() / bp->port_count, 1);
16429
16430 dflt_rings = min_t(int, dflt_rings, max_rings);
16431 }
16432 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
16433 if (rc)
16434 return rc;
16435 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
16436 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
16437 if (sh)
16438 bnxt_trim_dflt_sh_rings(bp);
16439 else
16440 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
16441 bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
16442
16443 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
16444 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
16445 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
16446
16447 bnxt_set_ulp_msix_num(bp, ulp_num_msix);
16448 bnxt_set_dflt_ulp_stat_ctxs(bp);
16449 }
16450
16451 rc = __bnxt_reserve_rings(bp);
16452 if (rc && rc != -ENODEV)
16453 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
16454 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16455 if (sh)
16456 bnxt_trim_dflt_sh_rings(bp);
16457
16458 /* Rings may have been trimmed, re-reserve the trimmed rings. */
16459 if (bnxt_need_reserve_rings(bp)) {
16460 rc = __bnxt_reserve_rings(bp);
16461 if (rc && rc != -ENODEV)
16462 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
16463 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16464 }
16465 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
16466 bp->rx_nr_rings++;
16467 bp->cp_nr_rings++;
16468 }
16469 if (rc) {
16470 bp->tx_nr_rings = 0;
16471 bp->rx_nr_rings = 0;
16472 }
16473 return rc;
16474 }
16475
bnxt_init_dflt_ring_mode(struct bnxt * bp)16476 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
16477 {
16478 int rc;
16479
16480 if (bp->tx_nr_rings)
16481 return 0;
16482
16483 bnxt_ulp_irq_stop(bp);
16484 bnxt_clear_int_mode(bp);
16485 rc = bnxt_set_dflt_rings(bp, true);
16486 if (rc) {
16487 if (BNXT_VF(bp) && rc == -ENODEV)
16488 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16489 else
16490 netdev_err(bp->dev, "Not enough rings available.\n");
16491 goto init_dflt_ring_err;
16492 }
16493 rc = bnxt_init_int_mode(bp);
16494 if (rc)
16495 goto init_dflt_ring_err;
16496
16497 bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
16498
16499 bnxt_set_dflt_rfs(bp);
16500
16501 init_dflt_ring_err:
16502 bnxt_ulp_irq_restart(bp, rc);
16503 return rc;
16504 }
16505
bnxt_restore_pf_fw_resources(struct bnxt * bp)16506 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
16507 {
16508 int rc;
16509
16510 netdev_ops_assert_locked(bp->dev);
16511 bnxt_hwrm_func_qcaps(bp);
16512
16513 if (netif_running(bp->dev))
16514 __bnxt_close_nic(bp, true, false);
16515
16516 bnxt_ulp_irq_stop(bp);
16517 bnxt_clear_int_mode(bp);
16518 rc = bnxt_init_int_mode(bp);
16519 bnxt_ulp_irq_restart(bp, rc);
16520
16521 if (netif_running(bp->dev)) {
16522 if (rc)
16523 netif_close(bp->dev);
16524 else
16525 rc = bnxt_open_nic(bp, true, false);
16526 }
16527
16528 return rc;
16529 }
16530
bnxt_init_mac_addr(struct bnxt * bp)16531 static int bnxt_init_mac_addr(struct bnxt *bp)
16532 {
16533 int rc = 0;
16534
16535 if (BNXT_PF(bp)) {
16536 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
16537 } else {
16538 #ifdef CONFIG_BNXT_SRIOV
16539 struct bnxt_vf_info *vf = &bp->vf;
16540 bool strict_approval = true;
16541
16542 if (is_valid_ether_addr(vf->mac_addr)) {
16543 /* overwrite netdev dev_addr with admin VF MAC */
16544 eth_hw_addr_set(bp->dev, vf->mac_addr);
16545 /* Older PF driver or firmware may not approve this
16546 * correctly.
16547 */
16548 strict_approval = false;
16549 } else {
16550 eth_hw_addr_random(bp->dev);
16551 }
16552 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
16553 #endif
16554 }
16555 return rc;
16556 }
16557
bnxt_vpd_read_info(struct bnxt * bp)16558 static void bnxt_vpd_read_info(struct bnxt *bp)
16559 {
16560 struct pci_dev *pdev = bp->pdev;
16561 unsigned int vpd_size, kw_len;
16562 int pos, size;
16563 u8 *vpd_data;
16564
16565 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
16566 if (IS_ERR(vpd_data)) {
16567 pci_warn(pdev, "Unable to read VPD\n");
16568 return;
16569 }
16570
16571 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16572 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
16573 if (pos < 0)
16574 goto read_sn;
16575
16576 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16577 memcpy(bp->board_partno, &vpd_data[pos], size);
16578
16579 read_sn:
16580 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
16581 PCI_VPD_RO_KEYWORD_SERIALNO,
16582 &kw_len);
16583 if (pos < 0)
16584 goto exit;
16585
16586 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
16587 memcpy(bp->board_serialno, &vpd_data[pos], size);
16588 exit:
16589 kfree(vpd_data);
16590 }
16591
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])16592 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
16593 {
16594 struct pci_dev *pdev = bp->pdev;
16595 u64 qword;
16596
16597 qword = pci_get_dsn(pdev);
16598 if (!qword) {
16599 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
16600 return -EOPNOTSUPP;
16601 }
16602
16603 put_unaligned_le64(qword, dsn);
16604
16605 bp->flags |= BNXT_FLAG_DSN_VALID;
16606 return 0;
16607 }
16608
bnxt_map_db_bar(struct bnxt * bp)16609 static int bnxt_map_db_bar(struct bnxt *bp)
16610 {
16611 if (!bp->db_size)
16612 return -ENODEV;
16613 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
16614 if (!bp->bar1)
16615 return -ENOMEM;
16616 return 0;
16617 }
16618
bnxt_print_device_info(struct bnxt * bp)16619 void bnxt_print_device_info(struct bnxt *bp)
16620 {
16621 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
16622 board_info[bp->board_idx].name,
16623 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
16624
16625 pcie_print_link_status(bp->pdev);
16626 }
16627
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)16628 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16629 {
16630 struct bnxt_hw_resc *hw_resc;
16631 struct net_device *dev;
16632 struct bnxt *bp;
16633 int rc, max_irqs;
16634
16635 if (pci_is_bridge(pdev))
16636 return -ENODEV;
16637
16638 if (!pdev->msix_cap) {
16639 dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
16640 return -ENODEV;
16641 }
16642
16643 /* Clear any pending DMA transactions from crash kernel
16644 * while loading driver in capture kernel.
16645 */
16646 if (is_kdump_kernel()) {
16647 pci_clear_master(pdev);
16648 pcie_flr(pdev);
16649 }
16650
16651 max_irqs = bnxt_get_max_irq(pdev);
16652 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
16653 max_irqs);
16654 if (!dev)
16655 return -ENOMEM;
16656
16657 bp = netdev_priv(dev);
16658 bp->board_idx = ent->driver_data;
16659 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
16660 bnxt_set_max_func_irqs(bp, max_irqs);
16661
16662 if (bnxt_vf_pciid(bp->board_idx))
16663 bp->flags |= BNXT_FLAG_VF;
16664
16665 /* No devlink port registration in case of a VF */
16666 if (BNXT_PF(bp))
16667 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
16668
16669 rc = bnxt_init_board(pdev, dev);
16670 if (rc < 0)
16671 goto init_err_free;
16672
16673 dev->netdev_ops = &bnxt_netdev_ops;
16674 dev->stat_ops = &bnxt_stat_ops;
16675 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
16676 dev->ethtool_ops = &bnxt_ethtool_ops;
16677 pci_set_drvdata(pdev, dev);
16678
16679 rc = bnxt_alloc_hwrm_resources(bp);
16680 if (rc)
16681 goto init_err_pci_clean;
16682
16683 mutex_init(&bp->hwrm_cmd_lock);
16684 mutex_init(&bp->link_lock);
16685
16686 rc = bnxt_fw_init_one_p1(bp);
16687 if (rc)
16688 goto init_err_pci_clean;
16689
16690 if (BNXT_PF(bp))
16691 bnxt_vpd_read_info(bp);
16692
16693 if (BNXT_CHIP_P5_PLUS(bp)) {
16694 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
16695 if (BNXT_CHIP_P7(bp))
16696 bp->flags |= BNXT_FLAG_CHIP_P7;
16697 }
16698
16699 rc = bnxt_alloc_rss_indir_tbl(bp);
16700 if (rc)
16701 goto init_err_pci_clean;
16702
16703 rc = bnxt_fw_init_one_p2(bp);
16704 if (rc)
16705 goto init_err_pci_clean;
16706
16707 rc = bnxt_map_db_bar(bp);
16708 if (rc) {
16709 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
16710 rc);
16711 goto init_err_pci_clean;
16712 }
16713
16714 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16715 NETIF_F_TSO | NETIF_F_TSO6 |
16716 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16717 NETIF_F_GSO_IPXIP4 |
16718 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16719 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
16720 NETIF_F_RXCSUM | NETIF_F_GRO;
16721 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16722 dev->hw_features |= NETIF_F_GSO_UDP_L4;
16723
16724 if (BNXT_SUPPORTS_TPA(bp))
16725 dev->hw_features |= NETIF_F_LRO;
16726
16727 dev->hw_enc_features =
16728 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
16729 NETIF_F_TSO | NETIF_F_TSO6 |
16730 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
16731 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
16732 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
16733 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
16734 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
16735 if (bp->flags & BNXT_FLAG_CHIP_P7)
16736 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
16737 else
16738 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
16739
16740 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
16741 NETIF_F_GSO_GRE_CSUM;
16742 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
16743 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
16744 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
16745 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
16746 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
16747 if (BNXT_SUPPORTS_TPA(bp))
16748 dev->hw_features |= NETIF_F_GRO_HW;
16749 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
16750 if (dev->features & NETIF_F_GRO_HW)
16751 dev->features &= ~NETIF_F_LRO;
16752 dev->priv_flags |= IFF_UNICAST_FLT;
16753
16754 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
16755 if (bp->tso_max_segs)
16756 netif_set_tso_max_segs(dev, bp->tso_max_segs);
16757
16758 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
16759 NETDEV_XDP_ACT_RX_SG;
16760
16761 #ifdef CONFIG_BNXT_SRIOV
16762 init_waitqueue_head(&bp->sriov_cfg_wait);
16763 #endif
16764 if (BNXT_SUPPORTS_TPA(bp)) {
16765 bp->gro_func = bnxt_gro_func_5730x;
16766 if (BNXT_CHIP_P4(bp))
16767 bp->gro_func = bnxt_gro_func_5731x;
16768 else if (BNXT_CHIP_P5_PLUS(bp))
16769 bp->gro_func = bnxt_gro_func_5750x;
16770 }
16771 if (!BNXT_CHIP_P4_PLUS(bp))
16772 bp->flags |= BNXT_FLAG_DOUBLE_DB;
16773
16774 rc = bnxt_init_mac_addr(bp);
16775 if (rc) {
16776 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
16777 rc = -EADDRNOTAVAIL;
16778 goto init_err_pci_clean;
16779 }
16780
16781 if (BNXT_PF(bp)) {
16782 /* Read the adapter's DSN to use as the eswitch switch_id */
16783 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
16784 }
16785
16786 /* MTU range: 60 - FW defined max */
16787 dev->min_mtu = ETH_ZLEN;
16788 dev->max_mtu = bp->max_mtu;
16789
16790 rc = bnxt_probe_phy(bp, true);
16791 if (rc)
16792 goto init_err_pci_clean;
16793
16794 hw_resc = &bp->hw_resc;
16795 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
16796 BNXT_L2_FLTR_MAX_FLTR;
16797 /* Older firmware may not report these filters properly */
16798 if (bp->max_fltr < BNXT_MAX_FLTR)
16799 bp->max_fltr = BNXT_MAX_FLTR;
16800 bnxt_init_l2_fltr_tbl(bp);
16801 __bnxt_set_rx_skb_mode(bp, false);
16802 bnxt_set_tpa_flags(bp);
16803 bnxt_init_ring_params(bp);
16804 bnxt_set_ring_params(bp);
16805 bnxt_rdma_aux_device_init(bp);
16806 rc = bnxt_set_dflt_rings(bp, true);
16807 if (rc) {
16808 if (BNXT_VF(bp) && rc == -ENODEV) {
16809 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
16810 } else {
16811 netdev_err(bp->dev, "Not enough rings available.\n");
16812 rc = -ENOMEM;
16813 }
16814 goto init_err_pci_clean;
16815 }
16816
16817 bnxt_fw_init_one_p3(bp);
16818
16819 bnxt_init_dflt_coal(bp);
16820
16821 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
16822 bp->flags |= BNXT_FLAG_STRIP_VLAN;
16823
16824 rc = bnxt_init_int_mode(bp);
16825 if (rc)
16826 goto init_err_pci_clean;
16827
16828 /* No TC has been set yet and rings may have been trimmed due to
16829 * limited MSIX, so we re-initialize the TX rings per TC.
16830 */
16831 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
16832
16833 if (BNXT_PF(bp)) {
16834 if (!bnxt_pf_wq) {
16835 bnxt_pf_wq =
16836 create_singlethread_workqueue("bnxt_pf_wq");
16837 if (!bnxt_pf_wq) {
16838 dev_err(&pdev->dev, "Unable to create workqueue.\n");
16839 rc = -ENOMEM;
16840 goto init_err_pci_clean;
16841 }
16842 }
16843 rc = bnxt_init_tc(bp);
16844 if (rc)
16845 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
16846 rc);
16847 }
16848
16849 bnxt_inv_fw_health_reg(bp);
16850 rc = bnxt_dl_register(bp);
16851 if (rc)
16852 goto init_err_dl;
16853
16854 INIT_LIST_HEAD(&bp->usr_fltr_list);
16855
16856 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
16857 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
16858 if (BNXT_SUPPORTS_QUEUE_API(bp))
16859 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
16860 dev->request_ops_lock = true;
16861 dev->netmem_tx = true;
16862
16863 rc = register_netdev(dev);
16864 if (rc)
16865 goto init_err_cleanup;
16866
16867 bnxt_dl_fw_reporters_create(bp);
16868
16869 bnxt_rdma_aux_device_add(bp);
16870
16871 bnxt_print_device_info(bp);
16872
16873 pci_save_state(pdev);
16874
16875 return 0;
16876 init_err_cleanup:
16877 bnxt_rdma_aux_device_uninit(bp);
16878 bnxt_dl_unregister(bp);
16879 init_err_dl:
16880 bnxt_shutdown_tc(bp);
16881 bnxt_clear_int_mode(bp);
16882
16883 init_err_pci_clean:
16884 bnxt_hwrm_func_drv_unrgtr(bp);
16885 bnxt_free_hwrm_resources(bp);
16886 bnxt_hwmon_uninit(bp);
16887 bnxt_ethtool_free(bp);
16888 bnxt_ptp_clear(bp);
16889 kfree(bp->ptp_cfg);
16890 bp->ptp_cfg = NULL;
16891 kfree(bp->fw_health);
16892 bp->fw_health = NULL;
16893 bnxt_cleanup_pci(bp);
16894 bnxt_free_ctx_mem(bp, true);
16895 bnxt_free_crash_dump_mem(bp);
16896 kfree(bp->rss_indir_tbl);
16897 bp->rss_indir_tbl = NULL;
16898
16899 init_err_free:
16900 free_netdev(dev);
16901 return rc;
16902 }
16903
bnxt_shutdown(struct pci_dev * pdev)16904 static void bnxt_shutdown(struct pci_dev *pdev)
16905 {
16906 struct net_device *dev = pci_get_drvdata(pdev);
16907 struct bnxt *bp;
16908
16909 if (!dev)
16910 return;
16911
16912 rtnl_lock();
16913 netdev_lock(dev);
16914 bp = netdev_priv(dev);
16915 if (!bp)
16916 goto shutdown_exit;
16917
16918 if (netif_running(dev))
16919 netif_close(dev);
16920
16921 if (bnxt_hwrm_func_drv_unrgtr(bp)) {
16922 pcie_flr(pdev);
16923 goto shutdown_exit;
16924 }
16925 bnxt_ptp_clear(bp);
16926 bnxt_clear_int_mode(bp);
16927 pci_disable_device(pdev);
16928
16929 if (system_state == SYSTEM_POWER_OFF) {
16930 pci_wake_from_d3(pdev, bp->wol);
16931 pci_set_power_state(pdev, PCI_D3hot);
16932 }
16933
16934 shutdown_exit:
16935 netdev_unlock(dev);
16936 rtnl_unlock();
16937 }
16938
16939 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)16940 static int bnxt_suspend(struct device *device)
16941 {
16942 struct net_device *dev = dev_get_drvdata(device);
16943 struct bnxt *bp = netdev_priv(dev);
16944 int rc = 0;
16945
16946 bnxt_ulp_stop(bp);
16947
16948 netdev_lock(dev);
16949 if (netif_running(dev)) {
16950 netif_device_detach(dev);
16951 rc = bnxt_close(dev);
16952 }
16953 bnxt_hwrm_func_drv_unrgtr(bp);
16954 bnxt_ptp_clear(bp);
16955 pci_disable_device(bp->pdev);
16956 bnxt_free_ctx_mem(bp, false);
16957 netdev_unlock(dev);
16958 return rc;
16959 }
16960
bnxt_resume(struct device * device)16961 static int bnxt_resume(struct device *device)
16962 {
16963 struct net_device *dev = dev_get_drvdata(device);
16964 struct bnxt *bp = netdev_priv(dev);
16965 int rc = 0;
16966
16967 netdev_lock(dev);
16968 rc = pci_enable_device(bp->pdev);
16969 if (rc) {
16970 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
16971 rc);
16972 goto resume_exit;
16973 }
16974 pci_set_master(bp->pdev);
16975 if (bnxt_hwrm_ver_get(bp)) {
16976 rc = -ENODEV;
16977 goto resume_exit;
16978 }
16979 rc = bnxt_hwrm_func_reset(bp);
16980 if (rc) {
16981 rc = -EBUSY;
16982 goto resume_exit;
16983 }
16984
16985 rc = bnxt_hwrm_func_qcaps(bp);
16986 if (rc)
16987 goto resume_exit;
16988
16989 bnxt_clear_reservations(bp, true);
16990
16991 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
16992 rc = -ENODEV;
16993 goto resume_exit;
16994 }
16995 if (bp->fw_crash_mem)
16996 bnxt_hwrm_crash_dump_mem_cfg(bp);
16997
16998 if (bnxt_ptp_init(bp)) {
16999 kfree(bp->ptp_cfg);
17000 bp->ptp_cfg = NULL;
17001 }
17002 bnxt_get_wol_settings(bp);
17003 if (netif_running(dev)) {
17004 rc = bnxt_open(dev);
17005 if (!rc)
17006 netif_device_attach(dev);
17007 }
17008
17009 resume_exit:
17010 netdev_unlock(bp->dev);
17011 bnxt_ulp_start(bp, rc);
17012 if (!rc)
17013 bnxt_reenable_sriov(bp);
17014 return rc;
17015 }
17016
17017 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
17018 #define BNXT_PM_OPS (&bnxt_pm_ops)
17019
17020 #else
17021
17022 #define BNXT_PM_OPS NULL
17023
17024 #endif /* CONFIG_PM_SLEEP */
17025
17026 /**
17027 * bnxt_io_error_detected - called when PCI error is detected
17028 * @pdev: Pointer to PCI device
17029 * @state: The current pci connection state
17030 *
17031 * This function is called after a PCI bus error affecting
17032 * this device has been detected.
17033 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)17034 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
17035 pci_channel_state_t state)
17036 {
17037 struct net_device *netdev = pci_get_drvdata(pdev);
17038 struct bnxt *bp = netdev_priv(netdev);
17039 bool abort = false;
17040
17041 netdev_info(netdev, "PCI I/O error detected\n");
17042
17043 bnxt_ulp_stop(bp);
17044
17045 netdev_lock(netdev);
17046 netif_device_detach(netdev);
17047
17048 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
17049 netdev_err(bp->dev, "Firmware reset already in progress\n");
17050 abort = true;
17051 }
17052
17053 if (abort || state == pci_channel_io_perm_failure) {
17054 netdev_unlock(netdev);
17055 return PCI_ERS_RESULT_DISCONNECT;
17056 }
17057
17058 /* Link is not reliable anymore if state is pci_channel_io_frozen
17059 * so we disable bus master to prevent any potential bad DMAs before
17060 * freeing kernel memory.
17061 */
17062 if (state == pci_channel_io_frozen) {
17063 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
17064 bnxt_fw_fatal_close(bp);
17065 }
17066
17067 if (netif_running(netdev))
17068 __bnxt_close_nic(bp, true, true);
17069
17070 if (pci_is_enabled(pdev))
17071 pci_disable_device(pdev);
17072 bnxt_free_ctx_mem(bp, false);
17073 netdev_unlock(netdev);
17074
17075 /* Request a slot reset. */
17076 return PCI_ERS_RESULT_NEED_RESET;
17077 }
17078
17079 /**
17080 * bnxt_io_slot_reset - called after the pci bus has been reset.
17081 * @pdev: Pointer to PCI device
17082 *
17083 * Restart the card from scratch, as if from a cold-boot.
17084 * At this point, the card has experienced a hard reset,
17085 * followed by fixups by BIOS, and has its config space
17086 * set up identically to what it was at cold boot.
17087 */
bnxt_io_slot_reset(struct pci_dev * pdev)17088 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
17089 {
17090 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
17091 struct net_device *netdev = pci_get_drvdata(pdev);
17092 struct bnxt *bp = netdev_priv(netdev);
17093 int retry = 0;
17094 int err = 0;
17095 int off;
17096
17097 netdev_info(bp->dev, "PCI Slot Reset\n");
17098
17099 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
17100 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
17101 msleep(900);
17102
17103 netdev_lock(netdev);
17104
17105 if (pci_enable_device(pdev)) {
17106 dev_err(&pdev->dev,
17107 "Cannot re-enable PCI device after reset.\n");
17108 } else {
17109 pci_set_master(pdev);
17110 /* Upon fatal error, our device internal logic that latches to
17111 * BAR value is getting reset and will restore only upon
17112 * rewriting the BARs.
17113 *
17114 * As pci_restore_state() does not re-write the BARs if the
17115 * value is same as saved value earlier, driver needs to
17116 * write the BARs to 0 to force restore, in case of fatal error.
17117 */
17118 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
17119 &bp->state)) {
17120 for (off = PCI_BASE_ADDRESS_0;
17121 off <= PCI_BASE_ADDRESS_5; off += 4)
17122 pci_write_config_dword(bp->pdev, off, 0);
17123 }
17124 pci_restore_state(pdev);
17125 pci_save_state(pdev);
17126
17127 bnxt_inv_fw_health_reg(bp);
17128 bnxt_try_map_fw_health_reg(bp);
17129
17130 /* In some PCIe AER scenarios, firmware may take up to
17131 * 10 seconds to become ready in the worst case.
17132 */
17133 do {
17134 err = bnxt_try_recover_fw(bp);
17135 if (!err)
17136 break;
17137 retry++;
17138 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
17139
17140 if (err) {
17141 dev_err(&pdev->dev, "Firmware not ready\n");
17142 goto reset_exit;
17143 }
17144
17145 err = bnxt_hwrm_func_reset(bp);
17146 if (!err)
17147 result = PCI_ERS_RESULT_RECOVERED;
17148
17149 /* IRQ will be initialized later in bnxt_io_resume */
17150 bnxt_ulp_irq_stop(bp);
17151 bnxt_clear_int_mode(bp);
17152 }
17153
17154 reset_exit:
17155 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
17156 bnxt_clear_reservations(bp, true);
17157 netdev_unlock(netdev);
17158
17159 return result;
17160 }
17161
17162 /**
17163 * bnxt_io_resume - called when traffic can start flowing again.
17164 * @pdev: Pointer to PCI device
17165 *
17166 * This callback is called when the error recovery driver tells
17167 * us that its OK to resume normal operation.
17168 */
bnxt_io_resume(struct pci_dev * pdev)17169 static void bnxt_io_resume(struct pci_dev *pdev)
17170 {
17171 struct net_device *netdev = pci_get_drvdata(pdev);
17172 struct bnxt *bp = netdev_priv(netdev);
17173 int err;
17174
17175 netdev_info(bp->dev, "PCI Slot Resume\n");
17176 netdev_lock(netdev);
17177
17178 err = bnxt_hwrm_func_qcaps(bp);
17179 if (!err) {
17180 if (netif_running(netdev)) {
17181 err = bnxt_open(netdev);
17182 } else {
17183 err = bnxt_reserve_rings(bp, true);
17184 if (!err)
17185 err = bnxt_init_int_mode(bp);
17186 }
17187 }
17188
17189 if (!err)
17190 netif_device_attach(netdev);
17191
17192 netdev_unlock(netdev);
17193 bnxt_ulp_start(bp, err);
17194 if (!err)
17195 bnxt_reenable_sriov(bp);
17196 }
17197
17198 static const struct pci_error_handlers bnxt_err_handler = {
17199 .error_detected = bnxt_io_error_detected,
17200 .slot_reset = bnxt_io_slot_reset,
17201 .resume = bnxt_io_resume
17202 };
17203
17204 static struct pci_driver bnxt_pci_driver = {
17205 .name = DRV_MODULE_NAME,
17206 .id_table = bnxt_pci_tbl,
17207 .probe = bnxt_init_one,
17208 .remove = bnxt_remove_one,
17209 .shutdown = bnxt_shutdown,
17210 .driver.pm = BNXT_PM_OPS,
17211 .err_handler = &bnxt_err_handler,
17212 #if defined(CONFIG_BNXT_SRIOV)
17213 .sriov_configure = bnxt_sriov_configure,
17214 #endif
17215 };
17216
bnxt_init(void)17217 static int __init bnxt_init(void)
17218 {
17219 int err;
17220
17221 bnxt_debug_init();
17222 err = pci_register_driver(&bnxt_pci_driver);
17223 if (err) {
17224 bnxt_debug_exit();
17225 return err;
17226 }
17227
17228 return 0;
17229 }
17230
bnxt_exit(void)17231 static void __exit bnxt_exit(void)
17232 {
17233 pci_unregister_driver(&bnxt_pci_driver);
17234 if (bnxt_pf_wq)
17235 destroy_workqueue(bnxt_pf_wq);
17236 bnxt_debug_exit();
17237 }
17238
17239 module_init(bnxt_init);
17240 module_exit(bnxt_exit);
17241